summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorG, Manjunath Kondaiah <manjugk@ti.com>2010-12-01 10:58:10 +0530
committerG, Manjunath Kondaiah <manjugk@ti.com>2010-12-01 11:01:02 +0530
commit463e3f2ef6c93c3180bd9017d8c0ce8e4548196f (patch)
tree7f2b0902590e074714bfdf11818701295ba11070
parente4ad07e201e656e85852bef1f657efc9d7d1548d (diff)
parent8bf2afec86f8f2f71ed7803f1a06581afa8f82c8 (diff)
Merge remote branch 'for-integ' into L24.11 for p4 release
-rw-r--r--arch/arm/mach-omap2/include/mach/dmm.h4
-rw-r--r--arch/arm/mach-omap2/include/mach/tiler.h411
-rw-r--r--drivers/media/Kconfig2
-rw-r--r--drivers/media/video/Makefile3
-rw-r--r--drivers/media/video/dmm/Kconfig6
-rw-r--r--drivers/media/video/dmm/Makefile3
-rw-r--r--drivers/media/video/dmm/dmm.c338
-rw-r--r--drivers/media/video/dmm/tmm_pat.c331
-rw-r--r--drivers/media/video/tiler/Kconfig126
-rw-r--r--drivers/media/video/tiler/Makefile11
-rw-r--r--drivers/media/video/tiler/_tiler.h148
-rw-r--r--drivers/media/video/tiler/dmm.c231
-rw-r--r--drivers/media/video/tiler/tcm.h (renamed from drivers/media/video/tiler/tcm/tcm.h)155
-rw-r--r--drivers/media/video/tiler/tcm/Makefile2
-rw-r--r--drivers/media/video/tiler/tcm/_tcm-sita.h65
-rw-r--r--drivers/media/video/tiler/tcm/_tcm_sita.h91
-rw-r--r--drivers/media/video/tiler/tcm/tcm-sita.c934
-rw-r--r--drivers/media/video/tiler/tcm/tcm-sita.h (renamed from drivers/media/video/tiler/tcm/tcm_sita.h)6
-rw-r--r--drivers/media/video/tiler/tcm/tcm-utils.h (renamed from drivers/media/video/tiler/tcm/tcm_utils.h)21
-rw-r--r--drivers/media/video/tiler/tcm/tcm_sita.c1359
-rw-r--r--drivers/media/video/tiler/tiler-geom.c372
-rw-r--r--drivers/media/video/tiler/tiler-iface.c828
-rw-r--r--drivers/media/video/tiler/tiler-main.c1269
-rw-r--r--drivers/media/video/tiler/tiler-reserve.c550
-rw-r--r--drivers/media/video/tiler/tiler.c1603
-rw-r--r--drivers/media/video/tiler/tiler_def.h158
-rw-r--r--drivers/media/video/tiler/tiler_pack.c269
-rw-r--r--drivers/media/video/tiler/tiler_rot.c239
-rw-r--r--drivers/media/video/tiler/tmm-pat.c300
-rw-r--r--drivers/media/video/tiler/tmm.h (renamed from drivers/media/video/dmm/tmm.h)16
30 files changed, 5194 insertions, 4657 deletions
diff --git a/arch/arm/mach-omap2/include/mach/dmm.h b/arch/arm/mach-omap2/include/mach/dmm.h
index 700f08aefbc4..68b798a22c41 100644
--- a/arch/arm/mach-omap2/include/mach/dmm.h
+++ b/arch/arm/mach-omap2/include/mach/dmm.h
@@ -1,7 +1,9 @@
/*
* dmm.h
*
- * DMM driver support functions for TI OMAP processors.
+ * DMM driver support functions for TI DMM-TILER hardware block.
+ *
+ * Author: David Sin <davidsin@ti.com>
*
* Copyright (C) 2009-2010 Texas Instruments, Inc.
*
diff --git a/arch/arm/mach-omap2/include/mach/tiler.h b/arch/arm/mach-omap2/include/mach/tiler.h
index 3e6c820dc7aa..d72f322696c5 100644
--- a/arch/arm/mach-omap2/include/mach/tiler.h
+++ b/arch/arm/mach-omap2/include/mach/tiler.h
@@ -1,7 +1,10 @@
/*
* tiler.h
*
- * TILER driver support functions for TI OMAP processors.
+ * TILER driver support functions for TI TILER hardware block.
+ *
+ * Authors: Lajos Molnar <molnar@ti.com>
+ * David Sin <davidsin@ti.com>
*
* Copyright (C) 2009-2010 Texas Instruments, Inc.
*
@@ -17,70 +20,65 @@
#ifndef TILER_H
#define TILER_H
-#define TILER_PAGE 0x1000
-#define TILER_WIDTH 256
-#define TILER_HEIGHT 128
-#define TILER_BLOCK_WIDTH 64
-#define TILER_BLOCK_HEIGHT 64
-#define TILER_LENGTH (TILER_WIDTH * TILER_HEIGHT * TILER_PAGE)
+#include <linux/mm.h>
-#define TILER_MAX_NUM_BLOCKS 16
+/*
+ * ----------------------------- API Definitions -----------------------------
+ */
-#define TILIOC_GBUF _IOWR('z', 100, u32)
-#define TILIOC_FBUF _IOWR('z', 101, u32)
-#define TILIOC_GSSP _IOWR('z', 102, u32)
-#define TILIOC_MBUF _IOWR('z', 103, u32)
-#define TILIOC_UMBUF _IOWR('z', 104, u32)
-#define TILIOC_QBUF _IOWR('z', 105, u32)
-#define TILIOC_RBUF _IOWR('z', 106, u32)
-#define TILIOC_URBUF _IOWR('z', 107, u32)
-#define TILIOC_QUERY_BLK _IOWR('z', 108, u32)
+/* return true if physical address is in the tiler container */
+bool is_tiler_addr(u32 phys);
enum tiler_fmt {
- TILFMT_MIN = -1,
- TILFMT_INVALID = -1,
- TILFMT_NONE = 0,
- TILFMT_8BIT = 1,
- TILFMT_16BIT = 2,
- TILFMT_32BIT = 3,
- TILFMT_PAGE = 4,
- TILFMT_MAX = 4
+ TILFMT_MIN = -2,
+ TILFMT_INVALID = -2,
+ TILFMT_NONE = -1,
+ TILFMT_8BIT = 0,
+ TILFMT_16BIT = 1,
+ TILFMT_32BIT = 2,
+ TILFMT_PAGE = 3,
+ TILFMT_MAX = 3,
+ TILFMT_8AND16 = 4, /* used to mark NV12 reserve block */
};
-struct area {
- u16 width;
- u16 height;
+/* tiler block info */
+struct tiler_block_t {
+ u32 phys; /* system space (L3) tiler addr */
+ u32 width; /* width */
+ u32 height; /* height */
+ u32 key; /* secret key */
+ u32 id; /* unique block ID */
};
-struct tiler_block_info {
- enum tiler_fmt fmt;
- union {
- struct area area;
- u32 len;
- } dim;
- u32 stride;
- void *ptr;
- u32 ssptr;
+/* tiler (image/video frame) view */
+struct tiler_view_t {
+ u32 tsptr; /* tiler space addr */
+ u32 width; /* width */
+ u32 height; /* height */
+ u32 bpp; /* bytes per pixel */
+ s32 h_inc; /* horizontal increment */
+ s32 v_inc; /* vertical increment */
};
-struct tiler_buf_info {
- s32 num_blocks;
- struct tiler_block_info blocks[TILER_MAX_NUM_BLOCKS];
- s32 offset;
-};
+/* get the tiler format for a physical address or TILFMT_INVALID */
+enum tiler_fmt tiler_fmt(u32 phys);
-struct tiler_view_orient {
- u8 rotate_90;
- u8 x_invert;
- u8 y_invert;
-};
+/* get the modified (1 for page mode) bytes-per-pixel for a tiler block */
+u32 tiler_bpp(const struct tiler_block_t *b);
+
+/* get tiler block physical stride */
+u32 tiler_pstride(const struct tiler_block_t *b);
+
+/* get tiler block virtual stride */
+static inline u32 tiler_vstride(const struct tiler_block_t *b)
+{
+ return PAGE_ALIGN((b->phys & ~PAGE_MASK) + tiler_bpp(b) * b->width);
+}
-/* utility functions */
-static inline u32 tilfmt_bpp(enum tiler_fmt fmt)
+/* returns the virtual size of the block (for mmap) */
+static inline u32 tiler_size(const struct tiler_block_t *b)
{
- return fmt == TILFMT_8BIT ? 1 :
- fmt == TILFMT_16BIT ? 2 :
- fmt == TILFMT_32BIT ? 4 : 0;
+ return b->height * tiler_vstride(b);
}
/* Event types */
@@ -93,7 +91,7 @@ static inline u32 tilfmt_bpp(enum tiler_fmt fmt)
*
* @return error status
*/
-int tiler_reg_notifier(struct notifier_block *nb);
+s32 tiler_reg_notifier(struct notifier_block *nb);
/**
* Un-registers a notifier block with TILER driver.
@@ -102,40 +100,74 @@ int tiler_reg_notifier(struct notifier_block *nb);
*
* @return error status
*/
-int tiler_unreg_notifier(struct notifier_block *nb);
+s32 tiler_unreg_notifier(struct notifier_block *nb);
/**
* Reserves a 1D or 2D TILER block area and memory for the
* current process with group ID 0.
*
- * @param fmt TILER bit mode
- * @param width block width
- * @param height block height (must be 1 for 1D)
- * @param sys_addr pointer where system space (L3) address
- * will be stored.
+ * @param blk pointer to tiler block data. This must be set up ('phys' member
+ * must be 0) with the tiler block information. 'height' must be 1
+ * for 1D block.
+ * @param fmt TILER block format
+ * @param align block alignment (default: normally PAGE_SIZE)
+ * @param offs block offset
*
* @return error status
*/
-s32 tiler_alloc(enum tiler_fmt fmt, u32 width, u32 height, u32 *sys_addr);
+s32 tiler_alloc(struct tiler_block_t *blk, enum tiler_fmt fmt, u32 align,
+ u32 offs);
/**
- * Reserves a 1D or 2D TILER block area and memory with extended
- * arguments.
+ * Reserves a 1D or 2D TILER block area and memory for a set process and group
+ * ID.
*
- * @param fmt TILER bit mode
- * @param width block width
- * @param height block height (must be 1 for 1D)
- * @param align block alignment (default: PAGE_SIZE)
- * @param offs block offset
- * @param gid group ID
- * @param pid process ID
- * @param sys_addr pointer where system space (L3) address
- * will be stored.
+ * @param blk pointer to tiler block data. This must be set up ('phys' member
+ * must be 0) with the tiler block information. 'height' must be 1
+ * for 1D block.
+ * @param fmt TILER block format
+ * @param align block alignment (default: normally PAGE_SIZE)
+ * @param offs block offset
+ * @param gid group ID
+ * @param pid process ID
*
* @return error status
*/
-s32 tiler_allocx(enum tiler_fmt fmt, u32 width, u32 height,
- u32 align, u32 offs, u32 gid, pid_t pid, u32 *sys_addr);
+s32 tiler_allocx(struct tiler_block_t *blk, enum tiler_fmt fmt, u32 align,
+ u32 offs, u32 gid, pid_t pid);
+
+/**
+ * Mmaps a portion of a tiler block to a virtual address. Use this method in
+ * your driver's mmap function to potentially combine multiple tiler blocks as
+ * one virtual buffer.
+ *
+ * @param blk pointer to tiler block data
+ * @param offs offset from where to map (must be page aligned)
+ * @param size size of area to map (must be page aligned)
+ * @param vma VMM memory area to map to
+ * @param voffs offset (from vm_start) in the VMM memory area to start
+ * mapping at
+ *
+ * @return error status
+ */
+s32 tiler_mmap_blk(struct tiler_block_t *blk, u32 offs, u32 size,
+ struct vm_area_struct *vma, u32 voffs);
+
+/**
+ * Ioremaps a portion of a tiler block. Use this method in your driver instead
+ * of ioremap to potentially combine multiple tiler blocks as one virtual
+ * buffer.
+ *
+ * @param blk pointer to tiler block data
+ * @param offs offset from where to map (must be page aligned)
+ * @param size size of area to map (must be page aligned)
+ * @param addr virtual address
+ * @param mtype ioremap memory type (e.g. MT_DEVICE)
+ *
+ * @return error status
+ */
+s32 tiler_ioremap_blk(struct tiler_block_t *blk, u32 offs, u32 size, u32 addr,
+ u32 mtype);
/**
* Maps an existing buffer to a 1D or 2D TILER area for the
@@ -143,95 +175,222 @@ s32 tiler_allocx(enum tiler_fmt fmt, u32 width, u32 height,
*
* Currently, only 1D area mapping is supported.
*
- * @param fmt TILER bit mode
- * @param width block width
- * @param height block height (must be 1 for 1D)
- * @param sys_addr pointer where system space (L3) address
- * will be stored.
+ * NOTE: alignment is always PAGE_SIZE and offset is 0 as full pages are mapped
+ * into tiler container.
+ *
+ * @param blk pointer to tiler block data. This must be set up
+ * ('phys' member must be 0) with the tiler block
+ * information. 'height' must be 1 for 1D block.
+ * @param fmt TILER format
* @param usr_addr user space address of existing buffer.
*
* @return error status
*/
-s32 tiler_map(enum tiler_fmt fmt, u32 width, u32 height, u32 *sys_addr,
- u32 usr_addr);
+s32 tiler_map(struct tiler_block_t *blk, enum tiler_fmt fmt, u32 usr_addr);
/**
- * Maps an existing buffer to a 1D or 2D TILER area with
- * extended arguments.
+ * Maps an existing buffer to a 1D or 2D TILER area for a set process and group
+ * ID.
*
* Currently, only 1D area mapping is supported.
*
- * NOTE: alignment is always PAGE_SIZE and offset is 0
+ * NOTE: alignment is always PAGE_SIZE and offset is 0 as full pages are mapped
+ * into tiler container.
*
- * @param fmt TILER bit mode
- * @param width block width
- * @param height block height (must be 1 for 1D)
- * @param gid group ID
- * @param pid process ID
- * @param sys_addr pointer where system space (L3) address
- * will be stored.
+ * @param blk pointer to tiler block data. This must be set up
+ * ('phys' member must be 0) with the tiler block
+ * information. 'height' must be 1 for 1D block.
+ * @param fmt TILER format
+ * @param gid group ID
+ * @param pid process ID
* @param usr_addr user space address of existing buffer.
*
* @return error status
*/
-s32 tiler_mapx(enum tiler_fmt fmt, u32 width, u32 height,
- u32 gid, pid_t pid, u32 *sys_addr, u32 usr_addr);
+s32 tiler_mapx(struct tiler_block_t *blk, enum tiler_fmt fmt,
+ u32 gid, pid_t pid, u32 usr_addr);
+
+/**
+ * Frees TILER memory. Since there may be multiple references for the same area
+ * if duplicated by tiler_dup, the area is only actually freed if all references
+ * have been freed.
+ *
+ * @param blk pointer to a tiler block data as filled by tiler_alloc,
+ * tiler_map or tiler_dup. 'phys' and 'id' members will be set to
+ * 0 on success.
+ */
+void tiler_free(struct tiler_block_t *blk);
/**
- * Free TILER memory.
+ * Reserves tiler area for n identical blocks for the current process. Use this
+ * method to get optimal placement of multiple identical tiler blocks; however,
+ * it may not reserve area if tiler_alloc is equally efficient.
*
- * @param sys_addr system space (L3) address.
+ * @param n number of identical set of blocks
+ * @param fmt TILER format
+ * @param width block width
+ * @param height block height (must be 1 for 1D)
+ * @param align block alignment (default: PAGE_SIZE)
+ * @param offs block offset
+ */
+void tiler_reserve(u32 n, enum tiler_fmt fmt, u32 width, u32 height, u32 align,
+ u32 offs);
+
+/**
+ * Reserves tiler area for n identical blocks. Use this method to get optimal
+ * placement of multiple identical tiler blocks; however, it may not reserve
+ * area if tiler_alloc is equally efficient.
*
- * @return an error status.
+ * @param n number of identical set of blocks
+ * @param fmt TILER bit mode
+ * @param width block width
+ * @param height block height (must be 1 for 1D)
+ * @param align block alignment (default: PAGE_SIZE)
+ * @param offs block offset
+ * @param gid group ID
+ * @param pid process ID
*/
-s32 tiler_free(u32 sys_addr);
+void tiler_reservex(u32 n, enum tiler_fmt fmt, u32 width, u32 height,
+ u32 align, u32 offs, u32 gid, pid_t pid);
/**
- * Reserves tiler area for n identical set of blocks (buffer)
- * for the current process. Use this method to get optimal
- * placement of multiple related tiler blocks; however, it may
+ * Reserves tiler area for n identical NV12 blocks for the current process. Use
+ * this method to get optimal placement of multiple identical NV12 tiler blocks;
+ * however, it may not reserve area if tiler_alloc is equally efficient.
+ *
+ * @param n number of identical set of blocks
+ * @param width block width (Y)
+ * @param height block height (Y)
+ * @param align block alignment (default: PAGE_SIZE)
+ * @param offs block offset
+ */
+void tiler_reserve_nv12(u32 n, u32 width, u32 height, u32 align, u32 offs);
+
+/**
+ * Reserves tiler area for n identical NV12 blocks. Use this method to get
+ * optimal placement of multiple identical NV12 tiler blocks; however, it may
* not reserve area if tiler_alloc is equally efficient.
*
- * @param n number of identical set of blocks
- * @param b information on the set of blocks (ptr, ssptr and
- * stride fields are ignored)
+ * @param n number of identical set of blocks
+ * @param width block width (Y)
+ * @param height block height (Y)
+ * @param align block alignment (default: PAGE_SIZE)
+ * @param offs block offset
+ * @param gid group ID
+ * @param pid process ID
+ */
+void tiler_reservex_nv12(u32 n, u32 width, u32 height, u32 align, u32 offs,
+ u32 gid, pid_t pid);
+
+/**
+ * Create a view based on a tiler address and width and height
*
- * @return error status
+ * This method should only be used as a last resort, e.g. if tilview object
+ * cannot be passed because of incoherence with other view 2D objects that must
+ * be supported.
+ *
+ * @param view Pointer to a view where the information will be stored
+ * @param ssptr MUST BE a tiler address
+ * @param width view width
+ * @param height view height
*/
-s32 tiler_reserve(u32 n, struct tiler_buf_info *b);
+void tilview_create(struct tiler_view_t *view, u32 phys, u32 width, u32 height);
/**
- * Reserves tiler area for n identical set of blocks (buffer) fo
- * a given process. Use this method to get optimal placement of
- * multiple related tiler blocks; however, it may not reserve
- * area if tiler_alloc is equally efficient.
+ * Obtains the view information for a tiler block
*
- * @param n number of identical set of blocks
- * @param b information on the set of blocks (ptr, ssptr and
- * stride fields are ignored)
- * @param pid process ID
+ * @param view Pointer to a view where the information will be stored
+ * @param blk Pointer to an existing allocated tiler block
+ */
+void tilview_get(struct tiler_view_t *view, struct tiler_block_t *blk);
+
+/**
+ * Crops a tiler view to a rectangular portion. Crop area must be fully within
+ * the orginal tiler view: 0 <= left <= left + width <= view->width, also:
+ * 0 <= top <= top + height <= view->height.
*
- * @return error status
+ * @param view Pointer to tiler view to be cropped
+ * @param left x of top-left corner
+ * @param top y of top-left corner
+ * @param width crop width
+ * @param height crop height
+ *
+ * @return error status. The view will be reduced to the crop region if the
+ * crop region is correct. Otherwise, no modifications are made.
+ */
+s32 tilview_crop(struct tiler_view_t *view, u32 left, u32 top, u32 width,
+ u32 height);
+
+/**
+ * Rotates a tiler view clockwise by a specified degree.
+ *
+ * @param view Pointer to tiler view to be cropped
+ * @param rotate Degree of rotation (clockwise). Must be a multiple of
+ * 90.
+ * @return error status. View is not modified on error; otherwise, it is
+ * updated in place.
*/
-s32 tiler_reservex(u32 n, struct tiler_buf_info *b, pid_t pid);
+s32 tilview_rotate(struct tiler_view_t *view, s32 rotation);
-u32 tiler_reorient_addr(u32 tsptr, struct tiler_view_orient orient);
+/**
+ * Mirrors a tiler view horizontally and/or vertically.
+ *
+ * @param view Pointer to tiler view to be cropped
+ * @param flip_x Mirror horizontally (left-to-right)
+ * @param flip_y Mirror vertically (top-to-bottom)
+ *
+ * @return error status. View is not modified on error; otherwise, it is
+ * updated in place.
+ */
+s32 tilview_flip(struct tiler_view_t *view, bool flip_x, bool flip_y);
-u32 tiler_get_natural_addr(void *sys_ptr);
+/*
+ * ---------------------------- IOCTL Definitions ----------------------------
+ */
-u32 tiler_reorient_topleft(u32 tsptr, struct tiler_view_orient orient,
- u32 width, u32 height);
+/* ioctls */
+#define TILIOC_GBLK _IOWR('z', 100, struct tiler_block_info)
+#define TILIOC_FBLK _IOW('z', 101, struct tiler_block_info)
+#define TILIOC_GSSP _IOWR('z', 102, u32)
+#define TILIOC_MBLK _IOWR('z', 103, struct tiler_block_info)
+#define TILIOC_UMBLK _IOW('z', 104, struct tiler_block_info)
+#define TILIOC_QBUF _IOWR('z', 105, struct tiler_buf_info)
+#define TILIOC_RBUF _IOWR('z', 106, struct tiler_buf_info)
+#define TILIOC_URBUF _IOWR('z', 107, struct tiler_buf_info)
+#define TILIOC_QBLK _IOWR('z', 108, struct tiler_block_info)
+#define TILIOC_PRBLK _IOW('z', 109, struct tiler_block_info)
+#define TILIOC_URBLK _IOW('z', 110, u32)
-u32 tiler_stride(u32 tsptr);
+struct area {
+ u16 width;
+ u16 height;
+};
-void tiler_rotate_view(struct tiler_view_orient *orient, u32 rotation);
+/* userspace tiler block info */
+struct tiler_block_info {
+ enum tiler_fmt fmt;
+ union {
+ struct area area;
+ u32 len;
+ } dim;
+ u32 stride; /* stride is not maintained for 1D blocks */
+ void *ptr; /* userspace address for mapping existing buffer */
+ u32 id;
+ u32 key;
+ u32 group_id;
+ u32 align; /* alignment requirements for ssptr */
+ u32 offs; /* offset (ssptr & (align - 1) will equal offs) */
+ u32 ssptr; /* physical address, may not exposed by default */
+};
-void tiler_alloc_packed(s32 *count, enum tiler_fmt fmt, u32 width, u32 height,
- void **sysptr, void **allocptr, s32 aligned);
+#define TILER_MAX_NUM_BLOCKS 16
-void tiler_alloc_packed_nv12(s32 *count, u32 width, u32 height, void **y_sysptr,
- void **uv_sysptr, void **y_allocptr,
- void **uv_allocptr, s32 aligned);
+/* userspace tiler buffer info */
+struct tiler_buf_info {
+ u32 num_blocks;
+ struct tiler_block_info blocks[TILER_MAX_NUM_BLOCKS];
+ u32 offset;
+ u32 length; /* also used as number of buffers for reservation */
+};
#endif
-
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index de714546fc09..115168113905 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -144,8 +144,6 @@ config USB_DABUSB
module will be called dabusb.
endif # DAB
-source "drivers/media/video/dmm/Kconfig"
-
source "drivers/media/video/tiler/Kconfig"
endif # MEDIA_SUPPORT
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index fdd8f5842d79..2be33fc5a6da 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -177,8 +177,7 @@ obj-$(CONFIG_VIDEO_SAA7164) += saa7164/
obj-$(CONFIG_VIDEO_IR_I2C) += ir-kbd-i2c.o
-obj-$(CONFIG_DMM_OMAP) += dmm/
-obj-$(CONFIG_TILER_OMAP) += tiler/
+obj-$(CONFIG_TI_TILER) += tiler/
obj-$(CONFIG_ARCH_OMAP) += omap/
diff --git a/drivers/media/video/dmm/Kconfig b/drivers/media/video/dmm/Kconfig
deleted file mode 100644
index 4af47ea0aef6..000000000000
--- a/drivers/media/video/dmm/Kconfig
+++ /dev/null
@@ -1,6 +0,0 @@
-config DMM_OMAP
- tristate "OMAP DMM support"
- default y
- help
- DMM driver for OMAP based boards.
-
diff --git a/drivers/media/video/dmm/Makefile b/drivers/media/video/dmm/Makefile
deleted file mode 100644
index 494e70569aab..000000000000
--- a/drivers/media/video/dmm/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_DMM_OMAP) += dmm_omap.o
-dmm_omap-objs = dmm.o tmm_pat.o
-
diff --git a/drivers/media/video/dmm/dmm.c b/drivers/media/video/dmm/dmm.c
deleted file mode 100644
index 54b5e000ef40..000000000000
--- a/drivers/media/video/dmm/dmm.c
+++ /dev/null
@@ -1,338 +0,0 @@
-/*
- * dmm.c
- *
- * DMM driver support functions for TI OMAP processors.
- *
- * Copyright (C) 2009-2010 Texas Instruments, Inc.
- *
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/cdev.h> /* struct cdev */
-#include <linux/kdev_t.h> /* MKDEV() */
-#include <linux/fs.h> /* register_chrdev_region() */
-#include <linux/device.h> /* struct class */
-#include <linux/platform_device.h> /* platform_device() */
-#include <linux/err.h> /* IS_ERR() */
-#include <linux/io.h> /* ioremap() */
-#include <linux/errno.h>
-#include <linux/slab.h>
-
-#include <mach/dmm.h>
-
-#undef __DEBUG__
-#define BITS_32(in_NbBits) ((((u32)1 << in_NbBits) - 1) | ((u32)1 << in_NbBits))
-#define BITFIELD_32(in_UpBit, in_LowBit)\
- (BITS_32(in_UpBit) & ~((BITS_32(in_LowBit)) >> 1))
-#define BF BITFIELD_32
-
-#ifdef __DEBUG__
-#define DEBUG(x, y) printk(KERN_NOTICE "%s()::%d:%s=(0x%08x)\n", \
- __func__, __LINE__, x, (s32)y);
-#else
-#define DEBUG(x, y)
-#endif
-
-static s32 dmm_major;
-static s32 dmm_minor;
-
-struct dmm_dev {
- struct cdev cdev;
-};
-
-static struct dmm_dev *dmm_device;
-static struct class *dmmdev_class;
-
-static struct platform_driver dmm_driver_ldm = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "dmm",
- },
- .probe = NULL,
- .shutdown = NULL,
- .remove = NULL,
-};
-
-s32 dmm_pat_refill(struct dmm *dmm, struct pat *pd, enum pat_mode mode)
-{
- void __iomem *r = NULL;
- u32 v = -1, w = -1;
-
- /* Only manual refill supported */
- if (mode != MANUAL)
- return -EFAULT;
-
- /*
- * Check that the DMM_PAT_STATUS register
- * has not reported an error.
- */
- r = (void __iomem *)((u32)dmm->base | DMM_PAT_STATUS__0);
- v = __raw_readl(r);
- if ((v & 0xFC00) != 0) {
- while (1)
- printk(KERN_ERR "dmm_pat_refill() error.\n");
- }
-
- /* Set "next" register to NULL */
- r = (void __iomem *)((u32)dmm->base | DMM_PAT_DESCR__0);
- v = __raw_readl(r);
- w = (v & (~(BF(31, 4)))) | ((((u32)NULL) << 4) & BF(31, 4));
- __raw_writel(w, r);
-
- /* Set area to be refilled */
- r = (void __iomem *)((u32)dmm->base | DMM_PAT_AREA__0);
- v = __raw_readl(r);
- w = (v & (~(BF(30, 24)))) | ((((s8)pd->area.y1) << 24) & BF(30, 24));
- __raw_writel(w, r);
-
- v = __raw_readl(r);
- w = (v & (~(BF(23, 16)))) | ((((s8)pd->area.x1) << 16) & BF(23, 16));
- __raw_writel(w, r);
-
- v = __raw_readl(r);
- w = (v & (~(BF(14, 8)))) | ((((s8)pd->area.y0) << 8) & BF(14, 8));
- __raw_writel(w, r);
-
- v = __raw_readl(r);
- w = (v & (~(BF(7, 0)))) | ((((s8)pd->area.x0) << 0) & BF(7, 0));
- __raw_writel(w, r);
- wmb();
-
-#ifdef __DEBUG__
- printk(KERN_NOTICE "\nx0=(%d),y0=(%d),x1=(%d),y1=(%d)\n",
- (char)pd->area.x0,
- (char)pd->area.y0,
- (char)pd->area.x1,
- (char)pd->area.y1);
-#endif
-
- /* First, clear the DMM_PAT_IRQSTATUS register */
- r = (void __iomem *)((u32)dmm->base | (u32)DMM_PAT_IRQSTATUS);
- __raw_writel(0xFFFFFFFF, r);
- wmb();
-
- r = (void __iomem *)((u32)dmm->base | (u32)DMM_PAT_IRQSTATUS_RAW);
- v = 0xFFFFFFFF;
-
- while (v != 0x0) {
- v = __raw_readl(r);
- DEBUG("DMM_PAT_IRQSTATUS_RAW", v);
- }
-
- /* Fill data register */
- r = (void __iomem *)((u32)dmm->base | DMM_PAT_DATA__0);
- v = __raw_readl(r);
-
- /* Apply 4 bit left shft to counter the 4 bit right shift */
- w = (v & (~(BF(31, 4)))) | ((((u32)(pd->data >> 4)) << 4) & BF(31, 4));
- __raw_writel(w, r);
- wmb();
-
- /* Read back PAT_DATA__0 to see if write was successful */
- v = 0x0;
- while (v != pd->data) {
- v = __raw_readl(r);
- DEBUG("DMM_PAT_DATA__0", v);
- }
-
- r = (void __iomem *)((u32)dmm->base | (u32)DMM_PAT_CTRL__0);
- v = __raw_readl(r);
-
- w = (v & (~(BF(31, 28)))) | ((((u32)pd->ctrl.ini) << 28) & BF(31, 28));
- __raw_writel(w, r);
-
- v = __raw_readl(r);
- w = (v & (~(BF(16, 16)))) | ((((u32)pd->ctrl.sync) << 16) & BF(16, 16));
- __raw_writel(w, r);
-
- v = __raw_readl(r);
- w = (v & (~(BF(9, 8)))) | ((((u32)pd->ctrl.lut_id) << 8) & BF(9, 8));
- __raw_writel(w, r);
-
- v = __raw_readl(r);
- w = (v & (~(BF(6, 4)))) | ((((u32)pd->ctrl.dir) << 4) & BF(6, 4));
- __raw_writel(w, r);
-
- v = __raw_readl(r);
- w = (v & (~(BF(0, 0)))) | ((((u32)pd->ctrl.start) << 0) & BF(0, 0));
- __raw_writel(w, r);
- wmb();
-
- /*
- * Now, check if PAT_IRQSTATUS_RAW has been
- * set after the PAT has been refilled
- */
- r = (void __iomem *)((u32)dmm->base | (u32)DMM_PAT_IRQSTATUS_RAW);
- v = 0x0;
- while ((v & 0x3) != 0x3) {
- v = __raw_readl(r);
- DEBUG("DMM_PAT_IRQSTATUS_RAW", v);
- }
-
- /* Again, clear the DMM_PAT_IRQSTATUS register */
- r = (void __iomem *)((u32)dmm->base | (u32)DMM_PAT_IRQSTATUS);
- __raw_writel(0xFFFFFFFF, r);
- wmb();
-
- r = (void __iomem *)((u32)dmm->base | (u32)DMM_PAT_IRQSTATUS_RAW);
- v = 0xFFFFFFFF;
-
- while (v != 0x0) {
- v = __raw_readl(r);
- DEBUG("DMM_PAT_IRQSTATUS_RAW", v);
- }
-
- /* Again, set "next" register to NULL to clear any PAT STATUS errors */
- r = (void __iomem *)((u32)dmm->base | DMM_PAT_DESCR__0);
- v = __raw_readl(r);
- w = (v & (~(BF(31, 4)))) | ((((u32)NULL) << 4) & BF(31, 4));
- __raw_writel(w, r);
-
- /*
- * Now, check that the DMM_PAT_STATUS register
- * has not reported an error before exiting.
- */
- r = (void __iomem *)((u32)dmm->base | DMM_PAT_STATUS__0);
- v = __raw_readl(r);
- if ((v & 0xFC00) != 0) {
- while (1)
- printk(KERN_ERR "dmm_pat_refill() error.\n");
- }
-
- return 0;
-}
-EXPORT_SYMBOL(dmm_pat_refill);
-
-static s32 dmm_open(struct inode *ip, struct file *filp)
-{
- return 0;
-}
-
-static s32 dmm_release(struct inode *ip, struct file *filp)
-{
- return 0;
-}
-
-static const struct file_operations dmm_fops = {
- .open = dmm_open,
- .release = dmm_release,
-};
-
-struct dmm *dmm_pat_init(u32 id)
-{
- u32 base = 0;
- struct dmm *dmm = NULL;
- switch (id) {
- case 0:
- /* only support id 0 for now */
- base = DMM_BASE;
- break;
- default:
- return NULL;
- }
-
- dmm = kmalloc(sizeof(*dmm), GFP_KERNEL);
- if (!dmm)
- return NULL;
-
- dmm->base = ioremap(base, DMM_SIZE);
- if (!dmm->base) {
- kfree(dmm);
- return NULL;
- }
-
- __raw_writel(0x88888888, dmm->base + DMM_PAT_VIEW__0);
- __raw_writel(0x88888888, dmm->base + DMM_PAT_VIEW__1);
- __raw_writel(0x80808080, dmm->base + DMM_PAT_VIEW_MAP__0);
- __raw_writel(0x80000000, dmm->base + DMM_PAT_VIEW_MAP_BASE);
- __raw_writel(0x88888888, dmm->base + DMM_TILER_OR__0);
- __raw_writel(0x88888888, dmm->base + DMM_TILER_OR__1);
-
- return dmm;
-}
-EXPORT_SYMBOL(dmm_pat_init);
-
-/**
- * Clean up the physical address translator.
- * @param dmm Device data
- * @return an error status.
- */
-void dmm_pat_release(struct dmm *dmm)
-{
- if (dmm) {
- iounmap(dmm->base);
- kfree(dmm);
- }
-}
-EXPORT_SYMBOL(dmm_pat_release);
-
-static s32 __init dmm_init(void)
-{
- dev_t dev = 0;
- s32 r = -1;
- struct device *device = NULL;
-
- if (!cpu_is_omap44xx())
- return 0;
-
- if (dmm_major) {
- dev = MKDEV(dmm_major, dmm_minor);
- r = register_chrdev_region(dev, 1, "dmm");
- } else {
- r = alloc_chrdev_region(&dev, dmm_minor, 1, "dmm");
- dmm_major = MAJOR(dev);
- }
-
- dmm_device = kmalloc(sizeof(*dmm_device), GFP_KERNEL);
- if (!dmm_device) {
- unregister_chrdev_region(dev, 1);
- return -ENOMEM;
- }
- memset(dmm_device, 0x0, sizeof(struct dmm_dev));
-
- cdev_init(&dmm_device->cdev, &dmm_fops);
- dmm_device->cdev.owner = THIS_MODULE;
- dmm_device->cdev.ops = &dmm_fops;
-
- r = cdev_add(&dmm_device->cdev, dev, 1);
- if (r)
- printk(KERN_ERR "cdev_add():failed\n");
-
- dmmdev_class = class_create(THIS_MODULE, "dmm");
-
- if (IS_ERR(dmmdev_class)) {
- printk(KERN_ERR "class_create():failed\n");
- goto EXIT;
- }
-
- device = device_create(dmmdev_class, NULL, dev, NULL, "dmm");
- if (device == NULL)
- printk(KERN_ERR "device_create() fail\n");
-
- r = platform_driver_register(&dmm_driver_ldm);
-
-EXIT:
- return r;
-}
-
-static void __exit dmm_exit(void)
-{
- platform_driver_unregister(&dmm_driver_ldm);
- cdev_del(&dmm_device->cdev);
- kfree(dmm_device);
- device_destroy(dmmdev_class, MKDEV(dmm_major, dmm_minor));
- class_destroy(dmmdev_class);
-}
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("davidsin@ti.com");
-module_init(dmm_init);
-module_exit(dmm_exit);
diff --git a/drivers/media/video/dmm/tmm_pat.c b/drivers/media/video/dmm/tmm_pat.c
deleted file mode 100644
index 4ee59bde6e60..000000000000
--- a/drivers/media/video/dmm/tmm_pat.c
+++ /dev/null
@@ -1,331 +0,0 @@
-/*
- * tmm_pat.c
- *
- * DMM driver support functions for TI OMAP processors.
- *
- * Copyright (C) 2009-2010 Texas Instruments, Inc.
- *
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
- */
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mm.h>
-#include <linux/mmzone.h>
-#include <asm/cacheflush.h>
-#include <linux/mutex.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-
-#include "tmm.h"
-
-/**
- * Number of pages to allocate when
- * refilling the free page stack.
- */
-#define MAX 16
-#define DMM_PAGE 0x1000
-
-/* Max pages in free page stack */
-#define PAGE_CAP (256 * 128)
-
-/* Number of pages currently allocated */
-static unsigned long count;
-
-/**
- * Used to keep track of mem per
- * dmm_get_pages call.
- */
-struct fast {
- struct list_head list;
- struct mem **mem;
- u32 *pa;
- u32 num;
-};
-
-/**
- * Used to keep track of the page struct ptrs
- * and physical addresses of each page.
- */
-struct mem {
- struct list_head list;
- struct page *pg;
- u32 pa;
-};
-
-/**
- * TMM PAT private structure
- */
-struct dmm_mem {
- struct fast fast_list;
- struct mem free_list;
- struct mem used_list;
- struct mutex mtx;
- struct dmm *dmm;
-};
-
-static void dmm_free_fast_list(struct fast *fast)
-{
- struct list_head *pos = NULL, *q = NULL;
- struct fast *f = NULL;
- s32 i = 0;
-
- /* mutex is locked */
- list_for_each_safe(pos, q, &fast->list) {
- f = list_entry(pos, struct fast, list);
- for (i = 0; i < f->num; i++)
- __free_page(f->mem[i]->pg);
- kfree(f->pa);
- kfree(f->mem);
- list_del(pos);
- kfree(f);
- }
-}
-
-static u32 fill_page_stack(struct mem *mem, struct mutex *mtx)
-{
- s32 i = 0;
- struct mem *m = NULL;
-
- for (i = 0; i < MAX; i++) {
- m = kmalloc(sizeof(*m), GFP_KERNEL);
- if (!m)
- return -ENOMEM;
- memset(m, 0x0, sizeof(*m));
-
- m->pg = alloc_page(GFP_KERNEL | GFP_DMA);
- if (!m->pg) {
- kfree(m);
- return -ENOMEM;
- }
-
- m->pa = page_to_phys(m->pg);
-
- /**
- * Note: we need to flush the cache
- * entry for each page we allocate.
- */
- dmac_flush_range((void *)page_address(m->pg),
- (void *)page_address(m->pg) + DMM_PAGE);
- outer_flush_range(m->pa, m->pa + DMM_PAGE);
-
- mutex_lock(mtx);
- count++;
- list_add(&m->list, &mem->list);
- mutex_unlock(mtx);
- }
- return 0x0;
-}
-
-static void dmm_free_page_stack(struct mem *mem)
-{
- struct list_head *pos = NULL, *q = NULL;
- struct mem *m = NULL;
-
- /* mutex is locked */
- list_for_each_safe(pos, q, &mem->list) {
- m = list_entry(pos, struct mem, list);
- __free_page(m->pg);
- list_del(pos);
- kfree(m);
- }
-}
-
-static void tmm_pat_deinit(struct tmm *tmm)
-{
- struct dmm_mem *pvt = (struct dmm_mem *) tmm->pvt;
-
- mutex_lock(&pvt->mtx);
- dmm_free_fast_list(&pvt->fast_list);
- dmm_free_page_stack(&pvt->free_list);
- dmm_free_page_stack(&pvt->used_list);
- mutex_destroy(&pvt->mtx);
-}
-
-static u32 *tmm_pat_get_pages(struct tmm *tmm, s32 n)
-{
- s32 i = 0;
- struct list_head *pos = NULL, *q = NULL;
- struct mem *m = NULL;
- struct fast *f = NULL;
- struct dmm_mem *pvt = (struct dmm_mem *) tmm->pvt;
-
- if (n <= 0 || n > 0x8000)
- return NULL;
-
- if (list_empty_careful(&pvt->free_list.list))
- if (fill_page_stack(&pvt->free_list, &pvt->mtx))
- return NULL;
-
- f = kmalloc(sizeof(*f), GFP_KERNEL);
- if (!f)
- return NULL;
- memset(f, 0x0, sizeof(*f));
-
- /* array of mem struct pointers */
- f->mem = kmalloc(n * sizeof(*f->mem), GFP_KERNEL);
- if (!f->mem) {
- kfree(f); return NULL;
- }
- memset(f->mem, 0x0, n * sizeof(*f->mem));
-
- /* array of physical addresses */
- f->pa = kmalloc(n * sizeof(*f->pa), GFP_KERNEL);
- if (!f->pa) {
- kfree(f->mem); kfree(f); return NULL;
- }
- memset(f->pa, 0x0, n * sizeof(*f->pa));
-
- /*
- * store the number of mem structs so that we
- * know how many to free later.
- */
- f->num = n;
-
- for (i = 0; i < n; i++) {
- if (list_empty_careful(&pvt->free_list.list))
- if (fill_page_stack(&pvt->free_list, &pvt->mtx))
- goto cleanup;
-
- mutex_lock(&pvt->mtx);
- pos = NULL;
- q = NULL;
- m = NULL;
-
- /*
- * remove one mem struct from the free list and
- * add the address to the fast struct mem array
- */
- list_for_each_safe(pos, q, &pvt->free_list.list) {
- m = list_entry(pos, struct mem, list);
- list_del(pos);
- break;
- }
- mutex_unlock(&pvt->mtx);
-
- if (m != NULL) {
- f->mem[i] = m;
- f->pa[i] = m->pa;
- } else {
- goto cleanup;
- }
- }
-
- mutex_lock(&pvt->mtx);
- list_add(&f->list, &pvt->fast_list.list);
- mutex_unlock(&pvt->mtx);
-
- if (f != NULL)
- return f->pa;
-cleanup:
- for (; i > 0; i--) {
- mutex_lock(&pvt->mtx);
- list_add(&f->mem[i - 1]->list, &pvt->free_list.list);
- mutex_unlock(&pvt->mtx);
- }
- kfree(f->pa);
- kfree(f->mem);
- kfree(f);
- return NULL;
-}
-
-static void tmm_pat_free_pages(struct tmm *tmm, u32 *list)
-{
- struct dmm_mem *pvt = (struct dmm_mem *) tmm->pvt;
- struct list_head *pos = NULL, *q = NULL;
- struct fast *f = NULL;
- s32 i = 0;
-
- mutex_lock(&pvt->mtx);
- pos = NULL;
- q = NULL;
- list_for_each_safe(pos, q, &pvt->fast_list.list) {
- f = list_entry(pos, struct fast, list);
- if (f->pa[0] == list[0]) {
- for (i = 0; i < f->num; i++) {
- if (count < PAGE_CAP) {
- list_add(
- &((struct mem *)f->mem[i])->list,
- &pvt->free_list.list);
- } else {
- __free_page(
- ((struct mem *)f->mem[i])->pg);
- count--;
- }
- }
- list_del(pos);
- kfree(f->pa);
- kfree(f->mem);
- kfree(f);
- break;
- }
- }
- mutex_unlock(&pvt->mtx);
-}
-
-static s32 tmm_pat_map(struct tmm *tmm, struct pat_area area, u32 page_pa)
-{
- struct dmm_mem *pvt = (struct dmm_mem *) tmm->pvt;
- struct pat pat_desc = {0};
-
- /* send pat descriptor to dmm driver */
- pat_desc.ctrl.dir = 0;
- pat_desc.ctrl.ini = 0;
- pat_desc.ctrl.lut_id = 0;
- pat_desc.ctrl.start = 1;
- pat_desc.ctrl.sync = 0;
- pat_desc.area = area;
- pat_desc.next = NULL;
-
- /* must be a 16-byte aligned physical address */
- pat_desc.data = page_pa;
- return dmm_pat_refill(pvt->dmm, &pat_desc, MANUAL);
-}
-
-struct tmm *tmm_pat_init(u32 pat_id)
-{
- struct tmm *tmm = NULL;
- struct dmm_mem *pvt = NULL;
-
- struct dmm *dmm = dmm_pat_init(pat_id);
- if (dmm)
- tmm = kmalloc(sizeof(*tmm), GFP_KERNEL);
- if (tmm)
- pvt = kmalloc(sizeof(*pvt), GFP_KERNEL);
- if (pvt) {
- /* private data */
- pvt->dmm = dmm;
- INIT_LIST_HEAD(&pvt->free_list.list);
- INIT_LIST_HEAD(&pvt->used_list.list);
- INIT_LIST_HEAD(&pvt->fast_list.list);
- mutex_init(&pvt->mtx);
-
- count = 0;
- if (list_empty_careful(&pvt->free_list.list))
- if (fill_page_stack(&pvt->free_list, &pvt->mtx))
- goto error;
-
- /* public data */
- tmm->pvt = pvt;
- tmm->deinit = tmm_pat_deinit;
- tmm->get = tmm_pat_get_pages;
- tmm->free = tmm_pat_free_pages;
- tmm->map = tmm_pat_map;
- tmm->clear = NULL; /* not yet supported */
-
- return tmm;
- }
-
-error:
- kfree(pvt);
- kfree(tmm);
- dmm_pat_release(dmm);
- return NULL;
-}
-EXPORT_SYMBOL(tmm_pat_init);
-
diff --git a/drivers/media/video/tiler/Kconfig b/drivers/media/video/tiler/Kconfig
index fabbb59a6c8d..8ff8ede9164f 100644
--- a/drivers/media/video/tiler/Kconfig
+++ b/drivers/media/video/tiler/Kconfig
@@ -1,6 +1,126 @@
-config TILER_OMAP
- tristate "OMAP TILER support"
+config HAVE_TI_TILER
+ bool
+ default y
+ depends on ARCH_OMAP4
+
+menuconfig TI_TILER
+ tristate "TI TILER support"
default y
+ depends on HAVE_TI_TILER
help
- TILER driver for OMAP based boards.
+ TILER and TILER-DMM driver for TI chips. The TI TILER device
+ enables video rotation on certain TI chips such as OMAP4 or
+ Netra. Video rotation will be limited without TILER support.
+
+config TILER_GRANULARITY
+ int "Allocation granularity (2^n)"
+ range 1 4096
+ default 128
+ depends on TI_TILER
+ help
+ This option sets the default TILER allocation granularity. It can
+ be overriden by the tiler.grain boot argument.
+
+ The allocation granularity is the smallest TILER block size (in
+ bytes) managed distinctly by the TILER driver. TILER blocks of any
+ size are managed in chunks of at least this size.
+
+ Must be a 2^n in the range of 1 to 4096; however, the TILER driver
+ may use a larger supported granularity.
+
+ Supported values are: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024,
+ 2048, 4096.
+
+config TILER_ALIGNMENT
+ int "Allocation alignment (2^n)"
+ range 1 4096
+ default 4096
+ depends on TI_TILER
+ help
+ This option sets the default TILER allocation alignment. It can
+ be overriden by the tiler.align boot argument.
+
+ Must be a 2^n in the range of 1 to 4096; however, it is naturally
+ aligned to the TILER granularity.
+
+ Supported values are: 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024,
+ 2048, 4096.
+
+config TILER_CACHE_LIMIT
+ int "Memory limit to cache free pages in MBytes"
+ range 0 128
+ default 40
+ depends on TI_TILER
+ help
+ This option sets the minimum memory that TILER retains even if
+ there is less TILER allocated memory is use. The unused memory is
+ instead stored in a cache to speed up allocation and freeing of
+ physical pages.
+
+ This option can be overriden by the tiler.cache boot argument.
+
+ While initially TILER will use less memory than this limit (0), it
+ will not release any memory used until it reaches this limit.
+ Thereafter, TILER will release any unused memory immediately as
+ long as there it is above this threshold.
+
+config TILER_SECURITY
+ int "Process security"
+ range 0 1
+ default 1
+ depends on TI_TILER
+ help
+ This option sets the default TILER process security. It can be
+ overriden by the tiler.secure boot argument.
+
+ If process security is enabled (1), the TILER driver uses a separate
+ TILER buffer address spaces (for mmap purposes) for each process.
+ This means that one process cannot simply map another process's
+ TILER buffer into its memory, even for sharing. However, it can
+ recreate the buffer by knowing the id-s and secret keys for the
+ TILER blocks involved. This is the preferred configuration.
+
+ Disabling security (0) allows sharing buffers simply by sharing the
+ mmap offset and size. However, because buffers can potentially be
+ shared between processes, it delays resource cleanup while any
+ process has an open TILER device.
+
+config TILER_SSPTR_ID
+ int "Use SSPtr for id"
+ range 0 1
+ default 0
+ depends on TI_TILER
+ help
+ This option sets the default behavior for TILER block ids. It can
+ be overriden by the tiler.ssptr_id boot argument.
+
+ If true, TILER driver uses the system-space (physical) address
+ (SSPtr) of a TILER block as its unique id. This may help sharing
+ TILER blocks between co-processors if using a constant key for each
+ block.
+
+ Note that the SSPtr is unique for each TILER block.
+
+config TILER_SECURE
+ bool "Secure TILER build"
+ default n
+ depends on TI_TILER
+ help
+ This option forces TILER security features that bypasses module
+ parameters.
+
+ If set, process security will be hardwired and ssptr and offset
+ lookup APIs are removed.
+
+config TILER_EXPOSE_SSPTR
+ bool "Expose SSPtr to userspace"
+ default y
+ depends on TI_TILER
+ help
+ This option sets whether SSPtr-s for blocks are exposed
+ during TILIOC_GBLK ioctls (MemMgr_Alloc APIs). In a secure
+ TILER build, this may be the only way for the userspace code
+ to learn the system-space addresses of TILER blocks.
+ You can use this flag to see if the userspace is relying on
+ having access to the SSPtr.
diff --git a/drivers/media/video/tiler/Makefile b/drivers/media/video/tiler/Makefile
index 34bea164b150..b3276440304a 100644
--- a/drivers/media/video/tiler/Makefile
+++ b/drivers/media/video/tiler/Makefile
@@ -1,3 +1,8 @@
-obj-$(CONFIG_TILER_OMAP) += tcm/
-obj-$(CONFIG_TILER_OMAP) += tiler_omap.o
-tiler_omap-objs := tiler.o tiler_pack.o tiler_rot.o
+obj-$(CONFIG_TI_TILER) += tcm/
+
+obj-$(CONFIG_TI_TILER) += tiler.o
+tiler-objs = tiler-geom.o tiler-main.o tiler-iface.o tiler-reserve.o tmm-pat.o
+
+obj-$(CONFIG_TI_TILER) += tiler_dmm.o
+tiler_dmm-objs = dmm.o
+
diff --git a/drivers/media/video/tiler/_tiler.h b/drivers/media/video/tiler/_tiler.h
new file mode 100644
index 000000000000..aeec9f6de6ed
--- /dev/null
+++ b/drivers/media/video/tiler/_tiler.h
@@ -0,0 +1,148 @@
+/*
+ * _tiler.h
+ *
+ * TI TILER driver internal shared definitions.
+ *
+ * Author: Lajos Molnar <molnar@ti.com>
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _TILER_H
+#define _TILER_H
+
+#include <linux/kernel.h>
+#include <mach/tiler.h>
+#include "tcm.h"
+
+#define TILER_FORMATS (TILFMT_MAX - TILFMT_MIN + 1)
+
+/* per process (thread group) info */
+struct process_info {
+ struct list_head list; /* other processes */
+ struct list_head groups; /* my groups */
+ struct list_head bufs; /* my registered buffers */
+ pid_t pid; /* really: thread group ID */
+ u32 refs; /* open tiler devices, 0 for processes
+ tracked via kernel APIs */
+ bool kernel; /* tracking kernel objects */
+};
+
+/* per group info (within a process) */
+struct gid_info {
+ struct list_head by_pid; /* other groups */
+ struct list_head areas; /* all areas in this pid/gid */
+ struct list_head reserved; /* areas pre-reserved */
+ struct list_head onedim; /* all 1D areas in this pid/gid */
+ u32 gid; /* group ID */
+ int refs; /* instances directly using this ptr */
+ struct process_info *pi; /* parent */
+};
+
+/* info for an area reserved from a container */
+struct area_info {
+ struct list_head by_gid; /* areas in this pid/gid */
+ struct list_head blocks; /* blocks in this area */
+ u32 nblocks; /* # of blocks in this area */
+
+ struct tcm_area area; /* area details */
+ struct gid_info *gi; /* link to parent, if still alive */
+};
+
+/* info for a block */
+struct mem_info {
+ struct list_head global; /* reserved / global blocks */
+ struct tiler_block_t blk; /* block info */
+ u32 num_pg; /* number of pages in page-list */
+ u32 usr; /* user space address */
+ u32 *pg_ptr; /* list of mapped struct page ptrs */
+ struct tcm_area area;
+ u32 *mem; /* list of alloced phys addresses */
+ int refs; /* number of times referenced */
+ bool alloced; /* still alloced */
+
+ struct list_head by_area; /* blocks in the same area / 1D */
+ void *parent; /* area info for 2D, else group info */
+};
+
+/* tiler geometry information */
+struct tiler_geom {
+ u32 x_shft; /* unused X-bits (as part of bpp) */
+ u32 y_shft; /* unused Y-bits (as part of bpp) */
+ u32 bpp; /* bytes per pixel */
+ u32 slot_w; /* width of each slot (in pixels) */
+ u32 slot_h; /* height of each slot (in pixels) */
+ u32 bpp_m; /* modified bytes per pixel (=1 for page mode) */
+};
+
+/* methods and variables shared between source files */
+struct tiler_ops {
+ /* block operations */
+ s32 (*alloc) (enum tiler_fmt fmt, u32 width, u32 height,
+ u32 align, u32 offs, u32 key,
+ u32 gid, struct process_info *pi,
+ struct mem_info **info);
+ s32 (*map) (enum tiler_fmt fmt, u32 width, u32 height,
+ u32 key, u32 gid, struct process_info *pi,
+ struct mem_info **info, u32 usr_addr);
+ void (*reserve_nv12) (u32 n, u32 width, u32 height, u32 align, u32 offs,
+ u32 gid, struct process_info *pi);
+ void (*reserve) (u32 n, enum tiler_fmt fmt, u32 width, u32 height,
+ u32 align, u32 offs, u32 gid, struct process_info *pi);
+ void (*unreserve) (u32 gid, struct process_info *pi);
+
+ /* block access operations */
+ struct mem_info * (*lock) (u32 key, u32 id, struct gid_info *gi);
+ struct mem_info * (*lock_by_ssptr) (u32 sys_addr);
+ void (*describe) (struct mem_info *i, struct tiler_block_info *blk);
+ void (*unlock_free) (struct mem_info *mi, bool free);
+
+ s32 (*lay_2d) (enum tiler_fmt fmt, u16 n, u16 w, u16 h, u16 band,
+ u16 align, u16 offs, struct gid_info *gi,
+ struct list_head *pos);
+ s32 (*lay_nv12) (int n, u16 w, u16 w1, u16 h, struct gid_info *gi,
+ u8 *p);
+ /* group operations */
+ struct gid_info * (*get_gi) (struct process_info *pi, u32 gid);
+ void (*release_gi) (struct gid_info *gi);
+ void (*destroy_group) (struct gid_info *pi);
+
+ /* group access operations */
+ void (*add_reserved) (struct list_head *reserved, struct gid_info *gi);
+ void (*release) (struct list_head *reserved);
+
+ /* area operations */
+ s32 (*analize) (enum tiler_fmt fmt, u32 width, u32 height,
+ u16 *x_area, u16 *y_area, u16 *band,
+ u16 *align, u16 *offs, u16 *in_offs);
+
+ /* process operations */
+ void (*cleanup) (void);
+
+ /* geometry operations */
+ void (*xy) (u32 ssptr, u32 *x, u32 *y);
+ u32 (*addr) (enum tiler_fmt fmt, u32 x, u32 y);
+ const struct tiler_geom * (*geom) (enum tiler_fmt fmt);
+
+ /* additional info */
+ const struct file_operations *fops;
+
+ bool nv12_packed; /* whether NV12 is packed into same container */
+ u32 page; /* page size */
+ u32 width; /* container width */
+ u32 height; /* container height */
+};
+
+void tiler_iface_init(struct tiler_ops *tiler);
+void tiler_geom_init(struct tiler_ops *tiler);
+void tiler_reserve_init(struct tiler_ops *tiler);
+
+#endif
diff --git a/drivers/media/video/tiler/dmm.c b/drivers/media/video/tiler/dmm.c
new file mode 100644
index 000000000000..5ca5e7d5fa39
--- /dev/null
+++ b/drivers/media/video/tiler/dmm.c
@@ -0,0 +1,231 @@
+/*
+ * dmm.c
+ *
+ * DMM driver support functions for TI OMAP processors.
+ *
+ * Authors: David Sin <davidsin@ti.com>
+ * Lajos Molnar <molnar@ti.com>
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h> /* platform_device() */
+#include <linux/io.h> /* ioremap() */
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+#include <mach/dmm.h>
+
+#undef __DEBUG__
+
+#define MASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb))
+#define SET_FLD(reg, msb, lsb, val) \
+(((reg) & ~MASK((msb), (lsb))) | (((val) << (lsb)) & MASK((msb), (lsb))))
+
+#ifdef __DEBUG__
+#define DEBUG(x, y) printk(KERN_NOTICE "%s()::%d:%s=(0x%08x)\n", \
+ __func__, __LINE__, x, (s32)y);
+#else
+#define DEBUG(x, y)
+#endif
+
+static struct platform_driver dmm_driver_ldm = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "dmm",
+ },
+ .probe = NULL,
+ .shutdown = NULL,
+ .remove = NULL,
+};
+
+s32 dmm_pat_refill(struct dmm *dmm, struct pat *pd, enum pat_mode mode)
+{
+ void __iomem *r;
+ u32 v;
+
+ /* Only manual refill supported */
+ if (mode != MANUAL)
+ return -EFAULT;
+
+ /* Check that the DMM_PAT_STATUS register has not reported an error */
+ r = dmm->base + DMM_PAT_STATUS__0;
+ v = __raw_readl(r);
+ if ((v & 0xFC00) != 0) {
+ while (1)
+ printk(KERN_ERR "dmm_pat_refill() error.\n");
+ }
+
+ /* Set "next" register to NULL */
+ r = dmm->base + DMM_PAT_DESCR__0;
+ v = __raw_readl(r);
+ v = SET_FLD(v, 31, 4, (u32) NULL);
+ __raw_writel(v, r);
+
+ /* Set area to be refilled */
+ r = dmm->base + DMM_PAT_AREA__0;
+ v = __raw_readl(r);
+ v = SET_FLD(v, 30, 24, pd->area.y1);
+ v = SET_FLD(v, 23, 16, pd->area.x1);
+ v = SET_FLD(v, 14, 8, pd->area.y0);
+ v = SET_FLD(v, 7, 0, pd->area.x0);
+ __raw_writel(v, r);
+ wmb();
+
+#ifdef __DEBUG__
+ printk(KERN_NOTICE "\nx0=(%d),y0=(%d),x1=(%d),y1=(%d)\n",
+ (char)pd->area.x0,
+ (char)pd->area.y0,
+ (char)pd->area.x1,
+ (char)pd->area.y1);
+#endif
+
+ /* First, clear the DMM_PAT_IRQSTATUS register */
+ r = dmm->base + DMM_PAT_IRQSTATUS;
+ __raw_writel(0xFFFFFFFF, r);
+ wmb();
+
+ r = dmm->base + DMM_PAT_IRQSTATUS_RAW;
+ do {
+ v = __raw_readl(r);
+ DEBUG("DMM_PAT_IRQSTATUS_RAW", v);
+ } while (v != 0x0);
+
+ /* Fill data register */
+ r = dmm->base + DMM_PAT_DATA__0;
+ v = __raw_readl(r);
+
+ /* pd->data must be 16 aligned */
+ BUG_ON(pd->data & 15);
+ v = SET_FLD(v, 31, 4, pd->data >> 4);
+ __raw_writel(v, r);
+ wmb();
+
+ /* Read back PAT_DATA__0 to see if write was successful */
+ do {
+ v = __raw_readl(r);
+ DEBUG("DMM_PAT_DATA__0", v);
+ } while (v != pd->data);
+
+ r = dmm->base + DMM_PAT_CTRL__0;
+ v = __raw_readl(r);
+ v = SET_FLD(v, 31, 28, pd->ctrl.ini);
+ v = SET_FLD(v, 16, 16, pd->ctrl.sync);
+ v = SET_FLD(v, 9, 8, pd->ctrl.lut_id);
+ v = SET_FLD(v, 6, 4, pd->ctrl.dir);
+ v = SET_FLD(v, 0, 0, pd->ctrl.start);
+ __raw_writel(v, r);
+ wmb();
+
+ /* Check if PAT_IRQSTATUS_RAW is set after the PAT has been refilled */
+ r = dmm->base + DMM_PAT_IRQSTATUS_RAW;
+ do {
+ v = __raw_readl(r);
+ DEBUG("DMM_PAT_IRQSTATUS_RAW", v);
+ } while ((v & 0x3) != 0x3);
+
+ /* Again, clear the DMM_PAT_IRQSTATUS register */
+ r = dmm->base + DMM_PAT_IRQSTATUS;
+ __raw_writel(0xFFFFFFFF, r);
+ wmb();
+
+ r = dmm->base + DMM_PAT_IRQSTATUS_RAW;
+ do {
+ v = __raw_readl(r);
+ DEBUG("DMM_PAT_IRQSTATUS_RAW", v);
+ } while (v != 0x0);
+
+ /* Again, set "next" register to NULL to clear any PAT STATUS errors */
+ r = dmm->base + DMM_PAT_DESCR__0;
+ v = __raw_readl(r);
+ v = SET_FLD(v, 31, 4, (u32) NULL);
+ __raw_writel(v, r);
+
+ /*
+ * Now, check that the DMM_PAT_STATUS register
+ * has not reported an error before exiting.
+ */
+ r = dmm->base + DMM_PAT_STATUS__0;
+ v = __raw_readl(r);
+ if ((v & 0xFC00) != 0) {
+ while (1)
+ printk(KERN_ERR "dmm_pat_refill() error.\n");
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(dmm_pat_refill);
+
+struct dmm *dmm_pat_init(u32 id)
+{
+ u32 base;
+ struct dmm *dmm;
+ switch (id) {
+ case 0:
+ /* only support id 0 for now */
+ base = DMM_BASE;
+ break;
+ default:
+ return NULL;
+ }
+
+ dmm = kmalloc(sizeof(*dmm), GFP_KERNEL);
+ if (!dmm)
+ return NULL;
+
+ dmm->base = ioremap(base, DMM_SIZE);
+ if (!dmm->base) {
+ kfree(dmm);
+ return NULL;
+ }
+
+ __raw_writel(0x88888888, dmm->base + DMM_PAT_VIEW__0);
+ __raw_writel(0x88888888, dmm->base + DMM_PAT_VIEW__1);
+ __raw_writel(0x80808080, dmm->base + DMM_PAT_VIEW_MAP__0);
+ __raw_writel(0x80000000, dmm->base + DMM_PAT_VIEW_MAP_BASE);
+ __raw_writel(0x88888888, dmm->base + DMM_TILER_OR__0);
+ __raw_writel(0x88888888, dmm->base + DMM_TILER_OR__1);
+
+ return dmm;
+}
+EXPORT_SYMBOL(dmm_pat_init);
+
+/**
+ * Clean up the physical address translator.
+ * @param dmm Device data
+ * @return an error status.
+ */
+void dmm_pat_release(struct dmm *dmm)
+{
+ if (dmm) {
+ iounmap(dmm->base);
+ kfree(dmm);
+ }
+}
+EXPORT_SYMBOL(dmm_pat_release);
+
+static s32 __init dmm_init(void)
+{
+ return platform_driver_register(&dmm_driver_ldm);
+}
+
+static void __exit dmm_exit(void)
+{
+ platform_driver_unregister(&dmm_driver_ldm);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("davidsin@ti.com");
+MODULE_AUTHOR("molnar@ti.com");
+module_init(dmm_init);
+module_exit(dmm_exit);
diff --git a/drivers/media/video/tiler/tcm/tcm.h b/drivers/media/video/tiler/tcm.h
index d205dad32a46..68b0d684dd56 100644
--- a/drivers/media/video/tiler/tcm/tcm.h
+++ b/drivers/media/video/tiler/tcm.h
@@ -2,7 +2,9 @@
* tcm.h
*
* TILER container manager specification and support functions for TI
- * processors.
+ * TILER driver.
+ *
+ * Author: Lajos Molnar <molnar@ti.com>
*
* Copyright (C) 2009-2010 Texas Instruments, Inc.
*
@@ -15,19 +17,18 @@
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
-#ifndef _TCM_H_
-#define _TCM_H_
-
-#include <linux/init.h>
-#include <linux/module.h>
+#ifndef TCM_H
+#define TCM_H
struct tcm;
+/* point */
struct tcm_pt {
u16 x;
u16 y;
};
+/* 1d or 2d area */
struct tcm_area {
bool is2d; /* whether are is 1d or 2d */
struct tcm *tcm; /* parent */
@@ -49,8 +50,6 @@ struct tcm {
struct tcm_area *area);
s32 (*reserve_1d)(struct tcm *tcm, u32 slots, struct tcm_area *area);
s32 (*free) (struct tcm *tcm, struct tcm_area *area);
- s32 (*get_parent)(struct tcm *tcm, struct tcm_pt *pt,
- struct tcm_area *area);
void (*deinit) (struct tcm *tcm);
};
@@ -79,12 +78,12 @@ struct tcm {
* @param width Width of container
* @param height Height of container
* @param attr Container manager specific configuration
- * arguments. Please describe these in
- * your header file.
+ * arguments. Please describe these in
+ * your header file.
*
* @return Pointer to the allocated and initialized container
- * manager. NULL on failure. DO NOT leak any memory on
- * failure!
+ * manager. NULL on failure. DO NOT leak any memory on
+ * failure!
*/
#define TCM_INIT(name, attr_t) \
struct tcm *name(u16 width, u16 height, typeof(attr_t) *attr);
@@ -92,14 +91,12 @@ struct tcm *name(u16 width, u16 height, typeof(attr_t) *attr);
/**
* Deinitialize tiler container manager.
*
- * @author Ravi Ramachandra (3/1/2010)
- *
* @param tcm Pointer to container manager.
*
* @return 0 on success, non-0 error value on error. The call
- * should free as much memory as possible and meaningful
- * even on failure. Some error codes: -ENODEV: invalid
- * manager.
+ * should free as much memory as possible and meaningful
+ * even on failure. Some error codes: -ENODEV: invalid
+ * manager.
*/
static inline void tcm_deinit(struct tcm *tcm)
{
@@ -110,34 +107,36 @@ static inline void tcm_deinit(struct tcm *tcm)
/**
* Reserves a 2D area in the container.
*
- * @author Ravi Ramachandra (3/1/2010)
- *
* @param tcm Pointer to container manager.
* @param height Height(in pages) of area to be reserved.
* @param width Width(in pages) of area to be reserved.
* @param align Alignment requirement for top-left corner of area. Not
- * all values may be supported by the container manager,
- * but it must support 0 (1), 32 and 64.
- * 0 value is equivalent to 1.
+ * all values may be supported by the container manager,
+ * but it must support 0 (1), 32 and 64.
+ * 0 value is equivalent to 1.
* @param area Pointer to where the reserved area should be stored.
*
* @return 0 on success. Non-0 error code on failure. Also,
- * the tcm field of the area will be set to NULL on
- * failure. Some error codes: -ENODEV: invalid manager,
- * -EINVAL: invalid area, -ENOMEM: not enough space for
- * allocation.
+ * the tcm field of the area will be set to NULL on
+ * failure. Some error codes: -ENODEV: invalid manager,
+ * -EINVAL: invalid area, -ENOMEM: not enough space for
+ * allocation.
*/
static inline s32 tcm_reserve_2d(struct tcm *tcm, u16 width, u16 height,
u16 align, struct tcm_area *area)
{
/* perform rudimentary error checking */
- s32 res = (tcm == NULL ? -ENODEV :
- area == NULL ? -EINVAL :
- (height > tcm->height || width > tcm->width) ? -ENOMEM :
- tcm->reserve_2d(tcm, height, width, align, area));
-
- if (area)
+ s32 res = tcm == NULL ? -ENODEV :
+ (area == NULL || width == 0 || height == 0 ||
+ /* align must be a 2 power */
+ align & (align - 1)) ? -EINVAL :
+ (height > tcm->height || width > tcm->width) ? -ENOMEM : 0;
+
+ if (!res) {
+ area->is2d = true;
+ res = tcm->reserve_2d(tcm, height, width, align, area);
area->tcm = res ? NULL : tcm;
+ }
return res;
}
@@ -145,29 +144,29 @@ static inline s32 tcm_reserve_2d(struct tcm *tcm, u16 width, u16 height,
/**
* Reserves a 1D area in the container.
*
- * @author Ravi Ramachandra (3/1/2010)
- *
* @param tcm Pointer to container manager.
* @param slots Number of (contiguous) slots to reserve.
* @param area Pointer to where the reserved area should be stored.
*
* @return 0 on success. Non-0 error code on failure. Also,
- * the tcm field of the area will be set to NULL on
- * failure. Some error codes: -ENODEV: invalid manager,
- * -EINVAL: invalid area, -ENOMEM: not enough space for
- * allocation.
+ * the tcm field of the area will be set to NULL on
+ * failure. Some error codes: -ENODEV: invalid manager,
+ * -EINVAL: invalid area, -ENOMEM: not enough space for
+ * allocation.
*/
static inline s32 tcm_reserve_1d(struct tcm *tcm, u32 slots,
struct tcm_area *area)
{
/* perform rudimentary error checking */
- s32 res = (tcm == NULL ? -ENODEV :
- area == NULL ? -EINVAL :
- slots > (tcm->width * (u32) tcm->height) ? -ENOMEM :
- tcm->reserve_1d(tcm, slots, area));
+ s32 res = tcm == NULL ? -ENODEV :
+ (area == NULL || slots == 0) ? -EINVAL :
+ slots > (tcm->width * (u32) tcm->height) ? -ENOMEM : 0;
- if (area)
+ if (!res) {
+ area->is2d = false;
+ res = tcm->reserve_1d(tcm, slots, area);
area->tcm = res ? NULL : tcm;
+ }
return res;
}
@@ -175,17 +174,15 @@ static inline s32 tcm_reserve_1d(struct tcm *tcm, u32 slots,
/**
* Free a previously reserved area from the container.
*
- * @author Ravi Ramachandra (3/1/2010)
- *
* @param area Pointer to area reserved by a prior call to
- * tcm_reserve_1d or tcm_reserve_2d call, whether
- * it was successful or not. (Note: all fields of
- * the structure must match.)
+ * tcm_reserve_1d or tcm_reserve_2d call, whether
+ * it was successful or not. (Note: all fields of
+ * the structure must match.)
*
* @return 0 on success. Non-0 error code on failure. Also, the tcm
- * field of the area is set to NULL on success to avoid subsequent
- * freeing. This call will succeed even if supplying
- * the area from a failed reserved call.
+ * field of the area is set to NULL on success to avoid subsequent
+ * freeing. This call will succeed even if supplying
+ * the area from a failed reserved call.
*/
static inline s32 tcm_free(struct tcm_area *area)
{
@@ -200,37 +197,6 @@ static inline s32 tcm_free(struct tcm_area *area)
return res;
}
-
-/**
- * Retrieves the parent area (1D or 2D) for a given co-ordinate in the
- * container.
- *
- * @author Ravi Ramachandra (3/1/2010)
- *
- * @param tcm Pointer to container manager.
- * @param pt Pointer to the coordinates of a slot in the container.
- * @param area Pointer to where the reserved area should be stored.
- *
- * @return 0 on success. Non-0 error code on failure. Also,
- * the tcm field of the area will be set to NULL on
- * failure. Some error codes: -ENODEV: invalid manager,
- * -EINVAL: invalid area, -ENOENT: coordinate is not part of any
- * active area.
- */
-static inline s32 tcm_get_parent(struct tcm *tcm, struct tcm_pt *pt,
- struct tcm_area *area)
-{
- s32 res = (tcm == NULL ? -ENODEV :
- area == NULL ? -EINVAL :
- (pt->x >= tcm->width || pt->y >= tcm->height) ? -ENOENT :
- tcm->get_parent(tcm, pt, area));
-
- if (area)
- area->tcm = res ? NULL : tcm;
-
- return res;
-}
-
/*=============================================================================
HELPER FUNCTION FOR ANY TILER CONTAINER MANAGER
=============================================================================*/
@@ -242,8 +208,6 @@ static inline s32 tcm_get_parent(struct tcm *tcm, struct tcm_pt *pt,
* fit in a 2D slice, its tcm pointer is set to NULL to mark that it is no
* longer a valid area.
*
- * @author Lajos Molnar (3/17/2010)
- *
* @param parent Pointer to a VALID parent area that will get modified
* @param slice Pointer to the slice area that will get modified
*/
@@ -267,16 +231,10 @@ static inline void tcm_slice(struct tcm_area *parent, struct tcm_area *slice)
}
}
-/**
- * Verifies if a tcm area is logically valid.
- *
- * @param area Pointer to tcm area
- *
- * @return TRUE if area is logically valid, FALSE otherwise.
- */
+/* Verify if a tcm area is logically valid */
static inline bool tcm_area_is_valid(struct tcm_area *area)
{
- return (area && area->tcm &&
+ return area && area->tcm &&
/* coordinate bounds */
area->p1.x < area->tcm->width &&
area->p1.y < area->tcm->height &&
@@ -288,8 +246,7 @@ static inline bool tcm_area_is_valid(struct tcm_area *area)
area->p1.x + area->p1.y * area->tcm->width) ||
/* 2D coordinate relationship */
(area->is2d &&
- area->p0.x <= area->p1.x))
- );
+ area->p0.x <= area->p1.x));
}
/* see if a coordinate is within an area */
@@ -337,11 +294,11 @@ static inline u16 __tcm_sizeof(struct tcm_area *area)
* syntactically as a for(;;) statement.
*
* @param var Name of a local variable of type 'struct
- * tcm_area *' that will get modified to
- * contain each slice.
+ * tcm_area *' that will get modified to
+ * contain each slice.
* @param area Pointer to the VALID parent area. This
- * structure will not get modified
- * throughout the loop.
+ * structure will not get modified
+ * throughout the loop.
*
*/
#define tcm_for_each_slice(var, area, safe) \
@@ -349,4 +306,4 @@ static inline u16 __tcm_sizeof(struct tcm_area *area)
tcm_slice(&safe, &var); \
var.tcm; tcm_slice(&safe, &var))
-#endif /* _TCM_H_ */
+#endif
diff --git a/drivers/media/video/tiler/tcm/Makefile b/drivers/media/video/tiler/tcm/Makefile
index f03f3b7c862f..efd62d778478 100644
--- a/drivers/media/video/tiler/tcm/Makefile
+++ b/drivers/media/video/tiler/tcm/Makefile
@@ -1,2 +1,2 @@
-obj-$(CONFIG_TILER_OMAP) += tcm_sita.o
+obj-$(CONFIG_TI_TILER) += tcm-sita.o
diff --git a/drivers/media/video/tiler/tcm/_tcm-sita.h b/drivers/media/video/tiler/tcm/_tcm-sita.h
new file mode 100644
index 000000000000..a300b923ef08
--- /dev/null
+++ b/drivers/media/video/tiler/tcm/_tcm-sita.h
@@ -0,0 +1,65 @@
+/*
+ * _tcm_sita.h
+ *
+ * SImple Tiler Allocator (SiTA) private structures.
+ *
+ * Author: Ravi Ramachandra <r.ramachandra@ti.com>
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _TCM_SITA_H
+#define _TCM_SITA_H
+
+#include "../tcm.h"
+
+/* length between two coordinates */
+#define LEN(a, b) ((a) > (b) ? (a) - (b) + 1 : (b) - (a) + 1)
+
+enum criteria {
+ CR_MAX_NEIGHS = 0x01,
+ CR_FIRST_FOUND = 0x10,
+ CR_BIAS_HORIZONTAL = 0x20,
+ CR_BIAS_VERTICAL = 0x40,
+ CR_DIAGONAL_BALANCE = 0x80
+};
+
+/* nearness to the beginning of the search field from 0 to 1000 */
+struct nearness_factor {
+ s32 x;
+ s32 y;
+};
+
+/*
+ * Statistics on immediately neighboring slots. Edge is the number of
+ * border segments that are also border segments of the scan field. Busy
+ * refers to the number of neighbors that are occupied.
+ */
+struct neighbor_stats {
+ u16 edge;
+ u16 busy;
+};
+
+/* structure to keep the score of a potential allocation */
+struct score {
+ struct nearness_factor f;
+ struct neighbor_stats n;
+ struct tcm_area a;
+ u16 neighs; /* number of busy neighbors */
+};
+
+struct sita_pvt {
+ struct mutex mtx;
+ struct tcm_pt div_pt; /* divider point splitting container */
+ struct tcm_area ***map; /* pointers to the parent area for each slot */
+};
+
+#endif
diff --git a/drivers/media/video/tiler/tcm/_tcm_sita.h b/drivers/media/video/tiler/tcm/_tcm_sita.h
deleted file mode 100644
index 75de584c747d..000000000000
--- a/drivers/media/video/tiler/tcm/_tcm_sita.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * _tcm_sita.h
- *
- * SImple Tiler Allocator (SiTA) private structures.
- *
- * Author: Ravi Ramachandra <r.ramachandra@ti.com>
- *
- * Copyright (C) 2009-2010 Texas Instruments, Inc.
- *
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
- */
-
-#ifndef _TCM_SITA_H_
-#define _TCM_SITA_H_
-
-#include "tcm.h"
-
-#define TL_CORNER 0
-#define TR_CORNER 1
-#define BL_CORNER 3
-#define BR_CORNER 4
-
-/*Provide inclusive length between co-ordinates */
-#define INCL_LEN(high, low) ((high) - (low) + 1)
-#define INCL_LEN_MOD(start, end) ((start) > (end) ? (start) - (end) + 1 : \
- (end) - (start) + 1)
-
-#define BOUNDARY(stat) ((stat)->top_boundary + (stat)->bottom_boundary + \
- (stat)->left_boundary + (stat)->right_boundary)
-#define OCCUPIED(stat) ((stat)->top_occupied + (stat)->bottom_occupied + \
- (stat)->left_occupied + (stat)->right_occupied)
-
-enum Criteria {
- CR_MAX_NEIGHS = 0x01,
- CR_FIRST_FOUND = 0x10,
- CR_BIAS_HORIZONTAL = 0x20,
- CR_BIAS_VERTICAL = 0x40,
- CR_DIAGONAL_BALANCE = 0x80
-};
-
-struct nearness_factor {
- s32 x;
- s32 y;
-};
-
-/*
- * Area info kept
- */
-struct area_spec {
- struct tcm_area area;
- struct list_head list;
-};
-
-/*
- * Everything is a rectangle with four sides and on
- * each side you could have a boundary or another Tile.
- * The tile could be Occupied or Not. These info is stored
- */
-struct neighbour_stats {
- u16 left_boundary;
- u16 left_occupied;
- u16 top_boundary;
- u16 top_occupied;
- u16 right_boundary;
- u16 right_occupied;
- u16 bottom_boundary;
- u16 bottom_occupied;
-};
-
-struct slot {
- u8 busy; /* is slot occupied */
- struct tcm_area parent; /* parent area */
- u32 reserved;
-};
-
-struct sita_pvt {
- u16 width;
- u16 height;
- struct list_head res; /* all allocations */
- struct mutex mtx;
- struct tcm_pt div_pt; /* divider point splitting container */
- struct slot **map; /* container slots */
-};
-
-#endif /* _TCM_SITA_H_ */
diff --git a/drivers/media/video/tiler/tcm/tcm-sita.c b/drivers/media/video/tiler/tcm/tcm-sita.c
new file mode 100644
index 000000000000..71b921308bba
--- /dev/null
+++ b/drivers/media/video/tiler/tcm/tcm-sita.c
@@ -0,0 +1,934 @@
+/*
+ * tcm-sita.c
+ *
+ * SImple Tiler Allocator (SiTA): 2D and 1D allocation(reservation) algorithm
+ *
+ * Authors: Ravi Ramachandra <r.ramachandra@ti.com>,
+ * Lajos Molnar <molnar@ti.com>
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ */
+#include <linux/slab.h>
+
+#include "_tcm-sita.h"
+#include "tcm-sita.h"
+
+#define TCM_ALG_NAME "tcm_sita"
+#include "tcm-utils.h"
+
+#define X_SCAN_LIMITER 1
+#define Y_SCAN_LIMITER 1
+
+#define ALIGN_DOWN(value, align) ((value) & ~((align) - 1))
+
+/* Individual selection criteria for different scan areas */
+static s32 CR_L2R_T2B = CR_BIAS_HORIZONTAL;
+static s32 CR_R2L_T2B = CR_DIAGONAL_BALANCE;
+#ifdef SCAN_BOTTOM_UP
+static s32 CR_R2L_B2T = CR_FIRST_FOUND;
+static s32 CR_L2R_B2T = CR_DIAGONAL_BALANCE;
+#endif
+
+/*********************************************
+ * TCM API - Sita Implementation
+ *********************************************/
+static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u8 align,
+ struct tcm_area *area);
+static s32 sita_reserve_1d(struct tcm *tcm, u32 slots, struct tcm_area *area);
+static s32 sita_free(struct tcm *tcm, struct tcm_area *area);
+static void sita_deinit(struct tcm *tcm);
+
+/*********************************************
+ * Main Scanner functions
+ *********************************************/
+static s32 scan_areas_and_find_fit(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *area);
+
+static s32 scan_l2r_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *field, struct tcm_area *area);
+
+static s32 scan_r2l_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *field, struct tcm_area *area);
+
+#ifdef SCAN_BOTTOM_UP
+static s32 scan_l2r_b2t(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *field, struct tcm_area *area);
+
+static s32 scan_r2l_b2t(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *field, struct tcm_area *area);
+#endif
+static s32 scan_r2l_b2t_one_dim(struct tcm *tcm, u32 num_slots,
+ struct tcm_area *field, struct tcm_area *area);
+
+/*********************************************
+ * Support Infrastructure Methods
+ *********************************************/
+static s32 is_area_free(struct tcm_area ***map, u16 x0, u16 y0, u16 w, u16 h);
+
+static s32 update_candidate(struct tcm *tcm, u16 x0, u16 y0, u16 w, u16 h,
+ struct tcm_area *field, s32 criteria,
+ struct score *best);
+
+static void get_nearness_factor(struct tcm_area *field,
+ struct tcm_area *candidate,
+ struct nearness_factor *nf);
+
+static void get_neighbor_stats(struct tcm *tcm, struct tcm_area *area,
+ struct neighbor_stats *stat);
+
+static void fill_area(struct tcm *tcm,
+ struct tcm_area *area, struct tcm_area *parent);
+
+/*********************************************/
+
+/*********************************************
+ * Utility Methods
+ *********************************************/
+struct tcm *sita_init(u16 width, u16 height, struct tcm_pt *attr)
+{
+ struct tcm *tcm;
+ struct sita_pvt *pvt;
+ struct tcm_area area = {0};
+ s32 i;
+
+ if (width == 0 || height == 0)
+ return NULL;
+
+ tcm = kmalloc(sizeof(*tcm), GFP_KERNEL);
+ pvt = kmalloc(sizeof(*pvt), GFP_KERNEL);
+ if (!tcm || !pvt)
+ goto error;
+
+ memset(tcm, 0, sizeof(*tcm));
+ memset(pvt, 0, sizeof(*pvt));
+
+ /* Updating the pointers to SiTA implementation APIs */
+ tcm->height = height;
+ tcm->width = width;
+ tcm->reserve_2d = sita_reserve_2d;
+ tcm->reserve_1d = sita_reserve_1d;
+ tcm->free = sita_free;
+ tcm->deinit = sita_deinit;
+ tcm->pvt = (void *)pvt;
+
+ mutex_init(&(pvt->mtx));
+
+ /* Creating tam map */
+ pvt->map = kmalloc(sizeof(*pvt->map) * tcm->width, GFP_KERNEL);
+ if (!pvt->map)
+ goto error;
+
+ for (i = 0; i < tcm->width; i++) {
+ pvt->map[i] =
+ kmalloc(sizeof(**pvt->map) * tcm->height,
+ GFP_KERNEL);
+ if (pvt->map[i] == NULL) {
+ while (i--)
+ kfree(pvt->map[i]);
+ kfree(pvt->map);
+ goto error;
+ }
+ }
+
+ if (attr && attr->x <= tcm->width && attr->y <= tcm->height) {
+ pvt->div_pt.x = attr->x;
+ pvt->div_pt.y = attr->y;
+
+ } else {
+ /* Defaulting to 3:1 ratio on width for 2D area split */
+ /* Defaulting to 3:1 ratio on height for 2D and 1D split */
+ pvt->div_pt.x = (tcm->width * 3) / 4;
+ pvt->div_pt.y = (tcm->height * 3) / 4;
+ }
+
+ mutex_lock(&(pvt->mtx));
+ assign(&area, 0, 0, width - 1, height - 1);
+ fill_area(tcm, &area, NULL);
+ mutex_unlock(&(pvt->mtx));
+ return tcm;
+
+error:
+ kfree(tcm);
+ kfree(pvt);
+ return NULL;
+}
+
+static void sita_deinit(struct tcm *tcm)
+{
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+ struct tcm_area area = {0};
+ s32 i;
+
+ area.p1.x = tcm->width - 1;
+ area.p1.y = tcm->height - 1;
+
+ mutex_lock(&(pvt->mtx));
+ fill_area(tcm, &area, NULL);
+ mutex_unlock(&(pvt->mtx));
+
+ mutex_destroy(&(pvt->mtx));
+
+ for (i = 0; i < tcm->height; i++)
+ kfree(pvt->map[i]);
+ kfree(pvt->map);
+ kfree(pvt);
+}
+
+/**
+ * Reserve a 1D area in the container
+ *
+ * @param num_slots size of 1D area
+ * @param area pointer to the area that will be populated with the
+ * reserved area
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 sita_reserve_1d(struct tcm *tcm, u32 num_slots,
+ struct tcm_area *area)
+{
+ s32 ret;
+ struct tcm_area field = {0};
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+ mutex_lock(&(pvt->mtx));
+#ifdef RESTRICT_1D
+ /* scan within predefined 1D boundary */
+ assign(&field, tcm->width - 1, tcm->height - 1, 0, pvt->div_pt.y);
+#else
+ /* Scanning entire container */
+ assign(&field, tcm->width - 1, tcm->height - 1, 0, 0);
+#endif
+ ret = scan_r2l_b2t_one_dim(tcm, num_slots, &field, area);
+ if (!ret)
+ /* update map */
+ fill_area(tcm, area, area);
+
+ mutex_unlock(&(pvt->mtx));
+ return ret;
+}
+
+/**
+ * Reserve a 2D area in the container
+ *
+ * @param w width
+ * @param h height
+ * @param area pointer to the area that will be populated with the reesrved
+ * area
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u8 align,
+ struct tcm_area *area)
+{
+ s32 ret;
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+ /* not supporting more than 64 as alignment */
+ if (align > 64)
+ return -EINVAL;
+
+ /* we prefer 1, 32 and 64 as alignment */
+ align = align <= 1 ? 1 : align <= 32 ? 32 : 64;
+
+ mutex_lock(&(pvt->mtx));
+ ret = scan_areas_and_find_fit(tcm, w, h, align, area);
+ if (!ret)
+ /* update map */
+ fill_area(tcm, area, area);
+
+ mutex_unlock(&(pvt->mtx));
+ return ret;
+}
+
+/**
+ * Unreserve a previously allocated 2D or 1D area
+ * @param area area to be freed
+ * @return 0 - success
+ */
+static s32 sita_free(struct tcm *tcm, struct tcm_area *area)
+{
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+ mutex_lock(&(pvt->mtx));
+
+ /* check that this is in fact an existing area */
+ WARN_ON(pvt->map[area->p0.x][area->p0.y] != area ||
+ pvt->map[area->p1.x][area->p1.y] != area);
+
+ /* Clear the contents of the associated tiles in the map */
+ fill_area(tcm, area, NULL);
+
+ mutex_unlock(&(pvt->mtx));
+
+ return 0;
+}
+
+/**
+ * Note: In general the cordinates in the scan field area relevant to the can
+ * sweep directions. The scan origin (e.g. top-left corner) will always be
+ * the p0 member of the field. Therfore, for a scan from top-left p0.x <= p1.x
+ * and p0.y <= p1.y; whereas, for a scan from bottom-right p1.x <= p0.x and p1.y
+ * <= p0.y
+ */
+
+/**
+ * Raster scan horizontally right to left from top to bottom to find a place for
+ * a 2D area of given size inside a scan field.
+ *
+ * @param w width of desired area
+ * @param h height of desired area
+ * @param align desired area alignment
+ * @param area pointer to the area that will be set to the best position
+ * @param field area to scan (inclusive)
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 scan_r2l_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *field, struct tcm_area *area)
+{
+ s32 x, y;
+ s16 start_x, end_x, start_y, end_y, found_x = -1;
+ struct tcm_area ***map = ((struct sita_pvt *)tcm->pvt)->map;
+ struct score best = {{0}, {0}, {0}, 0};
+
+ PA(2, "scan_r2l_t2b:", field);
+
+ start_x = field->p0.x;
+ end_x = field->p1.x;
+ start_y = field->p0.y;
+ end_y = field->p1.y;
+
+ /* check scan area co-ordinates */
+ if (field->p0.x < field->p1.x ||
+ field->p1.y < field->p0.y)
+ return -EINVAL;
+
+ /* check if allocation would fit in scan area */
+ if (w > LEN(start_x, end_x) || h > LEN(end_y, start_y))
+ return -ENOSPC;
+
+ /* adjust start_x and end_y, as allocation would not fit beyond */
+ start_x = ALIGN_DOWN(start_x - w + 1, align); /* - 1 to be inclusive */
+ end_y = end_y - h + 1;
+
+ /* check if allocation would still fit in scan area */
+ if (start_x < end_x)
+ return -ENOSPC;
+
+ P2("ali=%d x=%d..%d y=%d..%d", align, start_x, end_x, start_y, end_y);
+
+ /* scan field top-to-bottom, right-to-left */
+ for (y = start_y; y <= end_y; y++) {
+ for (x = start_x; x >= end_x; x -= align) {
+ if (is_area_free(map, x, y, w, h)) {
+ P3("found shoulder: %d,%d", x, y);
+ found_x = x;
+
+ /* update best candidate */
+ if (update_candidate(tcm, x, y, w, h, field,
+ CR_R2L_T2B, &best))
+ goto done;
+
+#ifdef X_SCAN_LIMITER
+ /* change upper x bound */
+ end_x = x + 1;
+#endif
+ break;
+ } else if (map[x][y] && map[x][y]->is2d) {
+ /* step over 2D areas */
+ x = ALIGN(map[x][y]->p0.x - w + 1, align);
+ P3("moving to: %d,%d", x, y);
+ }
+ }
+#ifdef Y_SCAN_LIMITER
+ /* break if you find a free area shouldering the scan field */
+ if (found_x == start_x)
+ break;
+#endif
+ }
+
+ if (!best.a.tcm)
+ return -ENOSPC;
+done:
+ assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y);
+ return 0;
+}
+
+#ifdef SCAN_BOTTOM_UP
+/**
+ * Raster scan horizontally right to left from bottom to top to find a place
+ * for a 2D area of given size inside a scan field.
+ *
+ * @param w width of desired area
+ * @param h height of desired area
+ * @param align desired area alignment
+ * @param area pointer to the area that will be set to the best position
+ * @param field area to scan (inclusive)
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 scan_r2l_b2t(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *field, struct tcm_area *area)
+{
+ /* TODO: Should I check scan area?
+ * Might have to take it as input during initialization
+ */
+ s32 x, y;
+ s16 start_x, end_x, start_y, end_y, found_x = -1;
+ struct tcm_area ***map = ((struct sita_pvt *)tcm->pvt)->map;
+ struct score best = {{0}, {0}, {0}, 0};
+
+ PA(2, "scan_r2l_b2t:", field);
+
+ start_x = field->p0.x;
+ end_x = field->p1.x;
+ start_y = field->p0.y;
+ end_y = field->p1.y;
+
+ /* check scan area co-ordinates */
+ if (field->p1.x < field->p0.x ||
+ field->p1.y < field->p0.y)
+ return -EINVAL;
+
+ /* check if allocation would fit in scan area */
+ if (w > LEN(start_x, end_x) || h > LEN(start_y, end_y))
+ return -ENOSPC;
+
+ /* adjust start_x and start_y, as allocation would not fit beyond */
+ start_x = ALIGN_DOWN(start_x - w + 1, align); /* + 1 to be inclusive */
+ start_y = start_y - h + 1;
+
+ /* check if allocation would still fit in scan area */
+ if (start_x < end_x)
+ return -ENOSPC;
+
+ P2("ali=%d x=%d..%d y=%d..%d", align, start_x, end_x, start_y, end_y);
+
+ /* scan field bottom-to-top, right-to-left */
+ for (y = start_y; y >= end_y; y--) {
+ for (x = start_x; x >= end_x; x -= align) {
+ if (is_area_free(map, x, y, w, h)) {
+ P3("found shoulder: %d,%d", x, y);
+ found_x = x;
+
+ /* update best candidate */
+ if (update_candidate(tcm, x, y, w, h, field,
+ CR_R2L_B2T, &best))
+ goto done;
+#ifdef X_SCAN_LIMITER
+ /* change upper x bound */
+ end_x = x + 1;
+#endif
+ break;
+ } else if (map[x][y] && map[x][y]->is2d) {
+ /* step over 2D areas */
+ x = ALIGN(map[x][y]->p0.x - w + 1, align);
+ P3("moving to: %d,%d", x, y);
+ }
+ }
+#ifdef Y_SCAN_LIMITER
+ /* break if you find a free area shouldering the scan field */
+ if (found_x == start_x)
+ break;
+#endif
+ }
+
+ if (!best.a.tcm)
+ return -ENOSPC;
+done:
+ assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y);
+ return 0;
+}
+#endif
+
+/**
+ * Raster scan horizontally left to right from top to bottom to find a place for
+ * a 2D area of given size inside a scan field.
+ *
+ * @param w width of desired area
+ * @param h height of desired area
+ * @param align desired area alignment
+ * @param area pointer to the area that will be set to the best position
+ * @param field area to scan (inclusive)
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 scan_l2r_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *field, struct tcm_area *area)
+{
+ s32 x, y;
+ s16 start_x, end_x, start_y, end_y, found_x = -1;
+ struct tcm_area ***map = ((struct sita_pvt *)tcm->pvt)->map;
+ struct score best = {{0}, {0}, {0}, 0};
+
+ PA(2, "scan_l2r_t2b:", field);
+
+ start_x = field->p0.x;
+ end_x = field->p1.x;
+ start_y = field->p0.y;
+ end_y = field->p1.y;
+
+ /* check scan area co-ordinates */
+ if (field->p1.x < field->p0.x ||
+ field->p1.y < field->p0.y)
+ return -EINVAL;
+
+ /* check if allocation would fit in scan area */
+ if (w > LEN(end_x, start_x) || h > LEN(end_y, start_y))
+ return -ENOSPC;
+
+ start_x = ALIGN(start_x, align);
+
+ /* check if allocation would still fit in scan area */
+ if (w > LEN(end_x, start_x))
+ return -ENOSPC;
+
+ /* adjust end_x and end_y, as allocation would not fit beyond */
+ end_x = end_x - w + 1; /* + 1 to be inclusive */
+ end_y = end_y - h + 1;
+
+ P2("ali=%d x=%d..%d y=%d..%d", align, start_x, end_x, start_y, end_y);
+
+ /* scan field top-to-bottom, left-to-right */
+ for (y = start_y; y <= end_y; y++) {
+ for (x = start_x; x <= end_x; x += align) {
+ if (is_area_free(map, x, y, w, h)) {
+ P3("found shoulder: %d,%d", x, y);
+ found_x = x;
+
+ /* update best candidate */
+ if (update_candidate(tcm, x, y, w, h, field,
+ CR_L2R_T2B, &best))
+ goto done;
+#ifdef X_SCAN_LIMITER
+ /* change upper x bound */
+ end_x = x - 1;
+#endif
+ break;
+ } else if (map[x][y] && map[x][y]->is2d) {
+ /* step over 2D areas */
+ x = ALIGN_DOWN(map[x][y]->p1.x, align);
+ P3("moving to: %d,%d", x, y);
+ }
+ }
+#ifdef Y_SCAN_LIMITER
+ /* break if you find a free area shouldering the scan field */
+ if (found_x == start_x)
+ break;
+#endif
+ }
+
+ if (!best.a.tcm)
+ return -ENOSPC;
+done:
+ assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y);
+ return 0;
+}
+
+#ifdef SCAN_BOTTOM_UP
+/**
+ * Raster scan horizontally left to right from bottom to top to find a
+ * place for a 2D area of given size inside a scan field.
+ *
+ * @param w width of desired area
+ * @param h height of desired area
+ * @param align desired area alignment
+ * @param area pointer to the area that will be set to the best position
+ * @param field area to scan (inclusive)
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 scan_l2r_b2t(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *field, struct tcm_area *area)
+{
+ s32 x, y;
+ s16 start_x, end_x, start_y, end_y, found_x = -1;
+ struct tcm_area ***map = ((struct sita_pvt *)tcm->pvt)->map;
+ struct score best = {{0}, {0}, {0}, 0};
+
+ PA(2, "scan_l2r_b2t:", field);
+
+ start_x = field->p0.x;
+ end_x = field->p1.x;
+ start_y = field->p0.y;
+ end_y = field->p1.y;
+
+ /* check scan area co-ordinates */
+ if (field->p1.x < field->p0.x ||
+ field->p0.y < field->p1.y)
+ return -EINVAL;
+
+ /* check if allocation would fit in scan area */
+ if (w > LEN(end_x, start_x) || h > LEN(start_y, end_y))
+ return -ENOSPC;
+
+ start_x = ALIGN(start_x, align);
+
+ /* check if allocation would still fit in scan area */
+ if (w > LEN(end_x, start_x))
+ return -ENOSPC;
+
+ /* adjust end_x and start_y, as allocation would not fit beyond */
+ end_x = end_x - w + 1; /* + 1 to be inclusive */
+ start_y = start_y - h + 1;
+
+ P2("ali=%d x=%d..%d y=%d..%d", align, start_x, end_x, start_y, end_y);
+
+ /* scan field bottom-to-top, left-to-right */
+ for (y = start_y; y >= end_y; y--) {
+ for (x = start_x; x <= end_x; x += align) {
+ if (is_area_free(map, x, y, w, h)) {
+ P3("found shoulder: %d,%d", x, y);
+ found_x = x;
+
+ /* update best candidate */
+ if (update_candidate(tcm, x, y, w, h, field,
+ CR_L2R_B2T, &best))
+ goto done;
+#ifdef X_SCAN_LIMITER
+ /* change upper x bound */
+ end_x = x - 1;
+#endif
+ break;
+ } else if (map[x][y] && map[x][y]->is2d) {
+ /* step over 2D areas */
+ x = ALIGN_DOWN(map[x][y]->p1.x, align);
+ P3("moving to: %d,%d", x, y);
+ }
+ }
+
+#ifdef Y_SCAN_LIMITER
+ /* break if you find a free area shouldering the scan field */
+ if (found_x == start_x)
+ break;
+#endif
+ }
+
+ if (!best.a.tcm)
+ return -ENOSPC;
+done:
+ assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y);
+ return 0;
+}
+#endif
+
+/**
+ * Raster scan horizontally right to left from bottom to top to find a place
+ * for a 1D area of given size inside a scan field.
+ *
+ * @param num_slots size of desired area
+ * @param align desired area alignment
+ * @param area pointer to the area that will be set to the best
+ * position
+ * @param field area to scan (inclusive)
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 scan_r2l_b2t_one_dim(struct tcm *tcm, u32 num_slots,
+ struct tcm_area *field, struct tcm_area *area)
+{
+ s32 found = 0;
+ s16 x, y;
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+ struct tcm_area *p;
+
+ /* check scan area co-ordinates */
+ if (field->p0.y < field->p1.y)
+ return -EINVAL;
+
+ PA(2, "scan_r2l_b2t_one_dim:", field);
+
+ /**
+ * Currently we only support full width 1D scan field, which makes sense
+ * since 1D slot-ordering spans the full container width.
+ */
+ if (tcm->width != field->p0.x - field->p1.x + 1)
+ return -EINVAL;
+
+ /* check if allocation would fit in scan area */
+ if (num_slots > tcm->width * LEN(field->p0.y, field->p1.y))
+ return -ENOSPC;
+
+ x = field->p0.x;
+ y = field->p0.y;
+
+ /* find num_slots consecutive free slots to the left */
+ while (found < num_slots) {
+ if (y < 0)
+ return -ENOSPC;
+
+ /* remember bottom-right corner */
+ if (found == 0) {
+ area->p1.x = x;
+ area->p1.y = y;
+ }
+
+ /* skip busy regions */
+ p = pvt->map[x][y];
+ if (p) {
+ /* move to left of 2D areas, top left of 1D */
+ x = p->p0.x;
+ if (!p->is2d)
+ y = p->p0.y;
+
+ /* start over */
+ found = 0;
+ } else {
+ /* count consecutive free slots */
+ found++;
+ }
+
+ /* move to the left */
+ if (x == 0)
+ y--;
+ x = (x ? : tcm->width) - 1;
+
+ }
+
+ /* set top-left corner */
+ area->p0.x = x;
+ area->p0.y = y;
+ return 0;
+}
+
+/**
+ * Find a place for a 2D area of given size inside a scan field based on its
+ * alignment needs.
+ *
+ * @param w width of desired area
+ * @param h height of desired area
+ * @param align desired area alignment
+ * @param area pointer to the area that will be set to the best position
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 scan_areas_and_find_fit(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *area)
+{
+ s32 ret = 0;
+ struct tcm_area field = {0};
+ u16 boundary_x, boundary_y;
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+ if (align > 1) {
+ /* prefer top-left corner */
+ boundary_x = pvt->div_pt.x - 1;
+ boundary_y = pvt->div_pt.y - 1;
+
+ /* expand width and height if needed */
+ if (w > pvt->div_pt.x)
+ boundary_x = tcm->width - 1;
+ if (h > pvt->div_pt.y)
+ boundary_y = tcm->height - 1;
+
+ assign(&field, 0, 0, boundary_x, boundary_y);
+ ret = scan_l2r_t2b(tcm, w, h, align, &field, area);
+
+ /* scan whole container if failed, but do not scan 2x */
+ if (ret != 0 && (boundary_x != tcm->width - 1 ||
+ boundary_y != tcm->height - 1)) {
+ /* scan the entire container if nothing found */
+ assign(&field, 0, 0, tcm->width - 1, tcm->height - 1);
+ ret = scan_l2r_t2b(tcm, w, h, align, &field, area);
+ }
+ } else if (align == 1) {
+ /* prefer top-right corner */
+ boundary_x = pvt->div_pt.x;
+ boundary_y = pvt->div_pt.y - 1;
+
+ /* expand width and height if needed */
+ if (w > (tcm->width - pvt->div_pt.x))
+ boundary_x = 0;
+ if (h > pvt->div_pt.y)
+ boundary_y = tcm->height - 1;
+
+ assign(&field, tcm->width - 1, 0, boundary_x, boundary_y);
+ ret = scan_r2l_t2b(tcm, w, h, align, &field, area);
+
+ /* scan whole container if failed, but do not scan 2x */
+ if (ret != 0 && (boundary_x != 0 ||
+ boundary_y != tcm->height - 1)) {
+ /* scan the entire container if nothing found */
+ assign(&field, tcm->width - 1, 0, 0, tcm->height - 1);
+ ret = scan_r2l_t2b(tcm, w, h, align, &field,
+ area);
+ }
+ }
+
+ return ret;
+}
+
+/* check if an entire area is free */
+static s32 is_area_free(struct tcm_area ***map, u16 x0, u16 y0, u16 w, u16 h)
+{
+ u16 x = 0, y = 0;
+ for (y = y0; y < y0 + h; y++) {
+ for (x = x0; x < x0 + w; x++) {
+ if (map[x][y])
+ return false;
+ }
+ }
+ return true;
+}
+
+/* fills an area with a parent tcm_area */
+static void fill_area(struct tcm *tcm, struct tcm_area *area,
+ struct tcm_area *parent)
+{
+ s32 x, y;
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+ struct tcm_area a, a_;
+
+ /* set area's tcm; otherwise, enumerator considers it invalid */
+ area->tcm = tcm;
+
+ tcm_for_each_slice(a, *area, a_) {
+ PA(2, "fill 2d area", &a);
+ for (x = a.p0.x; x <= a.p1.x; ++x)
+ for (y = a.p0.y; y <= a.p1.y; ++y)
+ pvt->map[x][y] = parent;
+
+ }
+}
+
+/**
+ * Compares a candidate area to the current best area, and if it is a better
+ * fit, it updates the best to this one.
+ *
+ * @param x0, y0, w, h top, left, width, height of candidate area
+ * @param field scan field
+ * @param criteria scan criteria
+ * @param best best candidate and its scores
+ *
+ * @return 1 (true) if the candidate area is known to be the final best, so no
+ * more searching should be performed
+ */
+static s32 update_candidate(struct tcm *tcm, u16 x0, u16 y0, u16 w, u16 h,
+ struct tcm_area *field, s32 criteria,
+ struct score *best)
+{
+ struct score me; /* score for area */
+
+ /*
+ * If first found is enabled then we stop looking
+ * NOTE: For horizontal bias we always give the first found, because our
+ * scan is horizontal-raster-based and the first candidate will always
+ * have the horizontal bias.
+ */
+ bool first = criteria & (CR_FIRST_FOUND | CR_BIAS_HORIZONTAL);
+
+ assign(&me.a, x0, y0, x0 + w - 1, y0 + h - 1);
+
+ /* calculate score for current candidate */
+ if (!first) {
+ get_neighbor_stats(tcm, &me.a, &me.n);
+ me.neighs = me.n.edge + me.n.busy;
+ get_nearness_factor(field, &me.a, &me.f);
+ }
+
+ /* the 1st candidate is always the best */
+ if (!best->a.tcm)
+ goto better;
+
+ BUG_ON(first);
+
+ /* see if this are is better than the best so far */
+
+ /* neighbor check */
+ if ((criteria & CR_MAX_NEIGHS) &&
+ me.neighs > best->neighs)
+ goto better;
+
+ /* vertical bias check */
+ if ((criteria & CR_BIAS_VERTICAL) &&
+ /*
+ * NOTE: not checking if lengths are same, because that does not
+ * find new shoulders on the same row after a fit
+ */
+ LEN(me.a.p0.y, field->p0.y) >
+ LEN(best->a.p0.y, field->p0.y))
+ goto better;
+
+ /* diagonal balance check */
+ if ((criteria & CR_DIAGONAL_BALANCE) &&
+ best->neighs <= me.neighs &&
+ (best->neighs < me.neighs ||
+ /* this implies that neighs and occupied match */
+ best->n.busy < me.n.busy ||
+ (best->n.busy == me.n.busy &&
+ /* check the nearness factor */
+ best->f.x + best->f.y > me.f.x + me.f.y)))
+ goto better;
+
+ /* not better, keep going */
+ return 0;
+
+better:
+ /* save current area as best */
+ memcpy(best, &me, sizeof(me));
+ best->a.tcm = tcm;
+ return first;
+}
+
+/**
+ * Calculate the nearness factor of an area in a search field. The nearness
+ * factor is smaller if the area is closer to the search origin.
+ */
+static void get_nearness_factor(struct tcm_area *field, struct tcm_area *area,
+ struct nearness_factor *nf)
+{
+ /**
+ * Using signed math as field coordinates may be reversed if
+ * search direction is right-to-left or bottom-to-top.
+ */
+ nf->x = (s32)(area->p0.x - field->p0.x) * 1000 /
+ (field->p1.x - field->p0.x);
+ nf->y = (s32)(area->p0.y - field->p0.y) * 1000 /
+ (field->p1.y - field->p0.y);
+}
+
+/* get neighbor statistics */
+static void get_neighbor_stats(struct tcm *tcm, struct tcm_area *area,
+ struct neighbor_stats *stat)
+{
+ s16 x = 0, y = 0;
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+ /* Clearing any exisiting values */
+ memset(stat, 0, sizeof(*stat));
+
+ /* process top & bottom edges */
+ for (x = area->p0.x; x <= area->p1.x; x++) {
+ if (area->p0.y == 0)
+ stat->edge++;
+ else if (pvt->map[x][area->p0.y - 1])
+ stat->busy++;
+
+ if (area->p1.y == tcm->height - 1)
+ stat->edge++;
+ else if (pvt->map[x][area->p1.y + 1])
+ stat->busy++;
+ }
+
+ /* process left & right edges */
+ for (y = area->p0.y; y <= area->p1.y; ++y) {
+ if (area->p0.x == 0)
+ stat->edge++;
+ else if (pvt->map[area->p0.x - 1][y])
+ stat->busy++;
+
+ if (area->p1.x == tcm->width - 1)
+ stat->edge++;
+ else if (pvt->map[area->p1.x + 1][y])
+ stat->busy++;
+ }
+}
diff --git a/drivers/media/video/tiler/tcm/tcm_sita.h b/drivers/media/video/tiler/tcm/tcm-sita.h
index fb5f8e89192c..86fa5918b8b7 100644
--- a/drivers/media/video/tiler/tcm/tcm_sita.h
+++ b/drivers/media/video/tiler/tcm/tcm-sita.h
@@ -16,10 +16,10 @@
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
-#ifndef TCM_SITA_H_
-#define TCM_SITA_H_
+#ifndef TCM_SITA_H
+#define TCM_SITA_H
-#include "tcm.h"
+#include "../tcm.h"
/**
* Create a SiTA tiler container manager.
diff --git a/drivers/media/video/tiler/tcm/tcm_utils.h b/drivers/media/video/tiler/tcm/tcm-utils.h
index 7d0ed5c149b8..0d1260af1972 100644
--- a/drivers/media/video/tiler/tcm/tcm_utils.h
+++ b/drivers/media/video/tiler/tcm/tcm-utils.h
@@ -3,6 +3,8 @@
*
* Utility functions for implementing TILER container managers.
*
+ * Author: Lajos Molnar <molnar@ti.com>
+ *
* Copyright (C) 2009-2010 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
@@ -14,19 +16,17 @@
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
-#ifndef _TCM_UTILS_H
-#define _TCM_UTILS_H
-
-#include "tcm.h"
+#ifndef TCM_UTILS_H
+#define TCM_UTILS_H
-#define AREA_FMT "(%03d %03d)-(%03d %03d)"
-#define AREA(area) (area).p0.x, (area).p0.y, (area).p1.x, (area).p1.y
+#include "../tcm.h"
/* TCM_ALG_NAME must be defined to use the debug methods */
#ifdef DEBUG
#define IFDEBUG(x) x
#else
+/* compile-check debug statements even if not DEBUG */
#define IFDEBUG(x) do { if (0) x; } while (0)
#endif
@@ -38,7 +38,8 @@
#define P2(fmt, ...) P(KERN_INFO, fmt, ##__VA_ARGS__)
#define P3(fmt, ...) P(KERN_DEBUG, fmt, ##__VA_ARGS__)
-#define PA(level, msg, p_area) P##level(msg " " AREA_FMT "\n", AREA(*(p_area)))
+#define PA(level, msg, p_area) P##level(msg " (%03d %03d)-(%03d %03d)\n", \
+ (p_area)->p0.x, (p_area)->p0.y, (p_area)->p1.x, (p_area)->p1.y)
/* assign coordinates to area */
static inline
@@ -50,10 +51,4 @@ void assign(struct tcm_area *a, u16 x0, u16 y0, u16 x1, u16 y1)
a->p1.y = y1;
}
-static inline
-void dump_area(struct tcm_area *area)
-{
- printk(KERN_NOTICE AREA_FMT "\n", AREA(*area));
-}
-
#endif
diff --git a/drivers/media/video/tiler/tcm/tcm_sita.c b/drivers/media/video/tiler/tcm/tcm_sita.c
deleted file mode 100644
index a2b549abac8f..000000000000
--- a/drivers/media/video/tiler/tcm/tcm_sita.c
+++ /dev/null
@@ -1,1359 +0,0 @@
-/*
- * tcm_sita.c
- *
- * Author: Ravi Ramachandra <r.ramachandra@ti.com>
- *
- * SImple Tiler Allocator (SiTA): 2D and 1D allocation(reservation) algorithm
- *
- * Copyright (C) 2009-2010 Texas Instruments, Inc.
- *
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
- *
- */
-#include <linux/slab.h>
-
-#include "_tcm_sita.h"
-#include "tcm_sita.h"
-
-#define TCM_ALG_NAME "tcm_sita"
-#include "tcm_utils.h"
-
-#define X_SCAN_LIMITER 1
-#define Y_SCAN_LIMITER 1
-
-#define ALIGN_DOWN(value, align) ((value) & ~((align) - 1))
-
-/* Individual selection criteria for different scan areas */
-static s32 CR_L2R_T2B = CR_BIAS_HORIZONTAL;
-static s32 CR_R2L_T2B = CR_DIAGONAL_BALANCE;
-#ifdef SCAN_BOTTOM_UP
-static s32 CR_R2L_B2T = CR_FIRST_FOUND;
-static s32 CR_L2R_B2T = CR_DIAGONAL_BALANCE;
-#endif
-
-/*********************************************
- * TCM API - Sita Implementation
- *********************************************/
-static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u8 align,
- struct tcm_area *area);
-static s32 sita_reserve_1d(struct tcm *tcm, u32 slots, struct tcm_area
- *area);
-static s32 sita_free(struct tcm *tcm, struct tcm_area *to_be_removed_area);
-static s32 sita_get_parent(struct tcm *tcm, struct tcm_pt *pt,
- struct tcm_area *area);
-static void sita_deinit(struct tcm *tcm);
-
-/*********************************************
- * Main Scanner functions
- *********************************************/
-static s32 scan_areas_and_find_fit(struct tcm *tcm, u16 w, u16 h, u16 stride,
- struct tcm_area *area);
-
-static s32 scan_l2r_t2b(struct tcm *tcm, u16 w, u16 h, u16 stride,
- struct tcm_area *field, struct tcm_area *area);
-
-static s32 scan_r2l_t2b(struct tcm *tcm, u16 w, u16 h, u16 stride,
- struct tcm_area *field, struct tcm_area *area);
-
-#ifdef SCAN_BOTTOM_UP
-static s32 scan_l2r_b2t(struct tcm *tcm, u16 w, u16 h, u16 stride,
- struct tcm_area *field, struct tcm_area *area);
-
-static s32 scan_r2l_b2t(struct tcm *tcm, u16 w, u16 h, u16 stride,
- struct tcm_area *field, struct tcm_area *area);
-#endif
-static s32 scan_r2l_b2t_one_dim(struct tcm *tcm, u32 num_pages,
- struct tcm_area *field, struct tcm_area *area);
-
-/*********************************************
- * Support Infrastructure Methods
- *********************************************/
-static s32 check_fit_r_and_b(struct tcm *tcm, u16 w, u16 h, u16 left_x,
- u16 top_y);
-
-static s32 check_fit_r_one_dim(struct tcm *tcm, u16 x, u16 y, u32 num_pages,
- u16 *busy_x, u16 *busy_y);
-
-static void select_candidate(struct tcm *tcm, u16 w, u16 h,
- struct list_head *maybes, struct tcm_area *field,
- s32 criteria, struct tcm_area *area);
-
-static void get_nearness_factor(struct tcm_area *field,
- struct tcm_area *candidate, struct nearness_factor *nf);
-
-static s32 get_busy_neigh_stats(struct tcm *tcm, u16 width, u16 height,
- struct tcm_area *top_left_corner,
- struct neighbour_stats *neighbour_stat);
-
-static void fill_1d_area(struct tcm *tcm,
- struct tcm_area *area, struct slot slot);
-
-static void fill_2d_area(struct tcm *tcm,
- struct tcm_area *area, struct slot slot);
-
-static s32 move_left(struct tcm *tcm, u16 x, u16 y, u32 num_pages,
- u16 *xx, u16 *yy);
-static s32 move_right(struct tcm *tcm, u16 x, u16 y, u32 num_pages,
- u16 *xx, u16 *yy);
-/*********************************************/
-
-/*********************************************
- * Utility Methods
- *********************************************/
-
-/* TODO: check if element allocation succeeded */
-
-/* insert a given area at the end of a given list */
-static
-struct area_spec *insert_element(struct list_head *head, struct tcm_area *area)
-{
- struct area_spec *elem;
-
- elem = kmalloc(sizeof(*elem), GFP_KERNEL);
- if (elem) {
- elem->area = *area;
- list_add_tail(&elem->list, head);
- }
- return elem;
-}
-
-static
-s32 rem_element_with_match(struct list_head *head,
- struct tcm_area *area, u16 *is2d)
-{
- struct area_spec *elem = NULL;
-
- /*If the area to be removed matchs the list head itself,
- we need to put the next one as list head */
- list_for_each_entry(elem, head, list) {
- if (elem->area.p0.x == area->p0.x
- && elem->area.p0.y == area->p0.y
- && elem->area.p1.x == area->p1.x
- && elem->area.p1.y == area->p1.y) {
-
- *is2d = elem->area.is2d;
- list_del(&elem->list);
-
- kfree(elem);
- return 0;
- }
- }
- return -ENOENT;
-}
-
-static
-void clean_list(struct list_head *head)
-{
- struct area_spec *elem = NULL, *elem_ = NULL;
-
- list_for_each_entry_safe(elem, elem_, head, list) {
- list_del(&elem->list);
- kfree(elem);
- }
-}
-
-#if 0
-static
-void dump_list_entries(struct list_head *head)
-{
- struct area_spec *elem = NULL;
-
- P1("Printing List Entries:\n");
-
- list_for_each_entry(elem, head, list) {
- printk(KERN_NOTICE "%dD:" AREA_FMT "\n", elem->area.type,
- AREA(elem->area));
- }
-
- P1("List Finished\n");
-}
-
-static
-s32 dump_neigh_stats(struct neighbour_stats *neighbour)
-{
- P1("Top Occ:Boundary %d:%d\n", neighbour->top_occupied,
- neighbour->top_boundary);
- P1("Bot Occ:Boundary %d:%d\n", neighbour->bottom_occupied,
- neighbour->bottom_boundary);
- P1("Left Occ:Boundary %d:%d\n", neighbour->left_occupied,
- neighbour->left_boundary);
- P1("Rigt Occ:Boundary %d:%d\n", neighbour->right_occupied,
- neighbour->right_boundary);
- return 0;
-}
-#endif
-
-struct tcm *sita_init(u16 width, u16 height, struct tcm_pt *attr)
-{
- struct tcm *tcm = NULL;
- struct sita_pvt *pvt = NULL;
- struct slot init_tile = {0};
- struct tcm_area area = {0};
- s32 i = 0;
-
- if (width == 0 || height == 0)
- goto error;
-
- tcm = kmalloc(sizeof(*tcm), GFP_KERNEL);
- pvt = kmalloc(sizeof(*pvt), GFP_KERNEL);
- if (!tcm || !pvt)
- goto error;
-
- memset(tcm, 0, sizeof(*tcm));
- memset(pvt, 0, sizeof(*pvt));
-
- /* Updating the pointers to SiTA implementation APIs */
- tcm->height = height;
- tcm->width = width;
- tcm->reserve_2d = sita_reserve_2d;
- tcm->reserve_1d = sita_reserve_1d;
- tcm->get_parent = sita_get_parent;
- tcm->free = sita_free;
- tcm->deinit = sita_deinit;
- tcm->pvt = (void *)pvt;
-
- INIT_LIST_HEAD(&pvt->res);
- pvt->height = height;
- pvt->width = width;
-
- mutex_init(&(pvt->mtx));
-
- /* Creating tam map */
- pvt->map = kmalloc(sizeof(*pvt->map) * pvt->width, GFP_KERNEL);
-
- if (!pvt->map)
- goto error;
-
- for (i = 0; i < pvt->width; i++) {
- pvt->map[i] =
- kmalloc(sizeof(**pvt->map) * pvt->height,
- GFP_KERNEL);
- if (pvt->map[i] == NULL) {
- while (i--)
- kfree(pvt->map[i]);
- kfree(pvt->map);
- goto error;
- }
- }
-
- if (attr && attr->x <= pvt->width && attr->y <= pvt->height) {
- pvt->div_pt.x = attr->x;
- pvt->div_pt.y = attr->y;
-
- } else {
- /* Defaulting to 3:1 ratio on width for 2D area split */
- /* Defaulting to 3:1 ratio on height for 2D and 1D split */
- pvt->div_pt.x = (pvt->width * 3) / 4;
- pvt->div_pt.y = (pvt->height * 3) / 4;
- }
-
- area.p1.x = width - 1;
- area.p1.y = height - 1;
-
- mutex_lock(&(pvt->mtx));
- fill_2d_area(tcm, &area, init_tile);
- mutex_unlock(&(pvt->mtx));
- return tcm;
-
-error:
- kfree(tcm);
- kfree(pvt);
- return NULL;
-}
-EXPORT_SYMBOL(sita_init);
-
-static void sita_deinit(struct tcm *tcm)
-{
- struct slot init_tile = {0};
- struct sita_pvt *pvt = NULL;
- struct tcm_area area = {0};
- s32 i = 0;
-
- pvt = (struct sita_pvt *)tcm->pvt;
- if (pvt) {
- area.p1.x = pvt->width - 1;
- area.p1.y = pvt->height - 1;
-
- mutex_lock(&(pvt->mtx));
- fill_2d_area(tcm, &area, init_tile);
- mutex_unlock(&(pvt->mtx));
-
- mutex_destroy(&(pvt->mtx));
-
- for (i = 0; i < pvt->height; i++) {
- kfree(pvt->map[i]);
- pvt->map[i] = NULL;
- }
- kfree(pvt->map);
- pvt->map = NULL;
- kfree(pvt);
- }
-}
-
-/**
- * @description: Allocate 1d pages if the required number of pages are
- * available in the container
- *
- * @input:num_pages to be allocated
- *
- * @return 0 on success, non-0 error value on failure. On success
- * area contain co-ordinates of start and end Tiles(inclusive)
- */
-static s32 sita_reserve_1d(struct tcm *tcm, u32 num_pages,
- struct tcm_area *area)
-{
- s32 ret = 0;
- struct tcm_area field = {0};
- struct slot slot = {0};
- struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
-
- area->is2d = false;
-
- mutex_lock(&(pvt->mtx));
-#ifdef RESTRICT_1D
- /* scan within predefined 1D boundary */
- assign(&field, pvt->width - 1, pvt->height - 1, 0, pvt->div_pt.y);
-#else
- /* Scanning entire container */
- assign(&field, pvt->width - 1, pvt->height - 1, 0, 0);
-#endif
- ret = scan_r2l_b2t_one_dim(tcm, num_pages,
- &field, area);
- /* There is not much to select, we pretty much give the first one
- which accomodates */
- if (!ret) {
- slot.busy = true;
- slot.parent = *area;
- /* inserting into tiler container */
- fill_1d_area(tcm, area, slot);
- /* updating the list of allocations */
- insert_element(&pvt->res, area);
- }
- mutex_unlock(&(pvt->mtx));
- return ret;
-}
-
-/**
- * @description: Allocate 2d area on availability in the container
- *
- * @input:'w'idth and 'h'eight of the 2d area, 'align'ment specification
- *
- * @return 0 on success, non-0 error value on failure. On success
- * area contain co-ordinates of TL corner Tile and BR corner Tile of
- * the rectangle (inclusive)
- */
-static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u8 align,
- struct tcm_area *area)
-{
- s32 ret = 0;
- struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
- /* we only support 1, 32 and 64 as alignment */
- u16 stride = align <= 1 ? 1 : align <= 32 ? 32 : 64;
- struct slot slot = {0};
-
- area->is2d = true;
-
- /* align must be 2 power */
- if (align & (align - 1) || align > 64)
- return -EINVAL;
-
- mutex_lock(&(pvt->mtx));
- ret = scan_areas_and_find_fit(tcm, w, h, stride, area);
- if (!ret) {
- slot.busy = true;
- slot.parent = *area;
-
- fill_2d_area(tcm, area, slot);
- insert_element(&(pvt->res), area);
- }
- mutex_unlock(&(pvt->mtx));
- return ret;
-}
-
-/**
- * @description: unreserve 2d or 1D allocations if previously allocated
- *
- * @input:'area' specification: for 2D this should contain
- * TL Corner and BR Corner of the 2D area, or for 1D allocation this should
- * contain the start and end Tiles
- *
- * @return 0 on success, non-0 error value on failure. On success
- * the to_be_removed_area is removed from g_allocation_list and the
- * corresponding tiles are marked 'NOT_OCCUPIED'
- *
- */
-static s32 sita_free(struct tcm *tcm, struct tcm_area *area)
-{
- s32 ret = 0;
- struct slot slot = {0};
- struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
- u16 is2d;
-
- slot.busy = false;
- mutex_lock(&(pvt->mtx));
- /*First we check if the given Area is aleast valid in our list*/
- ret = rem_element_with_match(&(pvt->res), area, &is2d);
-
- /* If we found a positive match & removed the area details from list
- * then we clear the contents of the associated tiles in the global
- * container*/
- if (!ret) {
- if (is2d)
- fill_2d_area(tcm, area, slot);
- else
- fill_1d_area(tcm, area, slot);
- }
- mutex_unlock(&(pvt->mtx));
- return ret;
-}
-
-/**
- * @description: raster scan right to left from top to bottom; find if there is
- * a free area to fit a given w x h inside the 'scan area'. If there is a free
- * area, then adds to maybes candidates, which later is sent for selection
- * as per pre-defined criteria.
- *
- * @input:'w x h' width and height of the allocation area.
- * 'stride' - 64/32/None for start address alignment
- * 'field' - area in which the scan operation should take place
- *
- * @return 0 on success, non-0 error value on failure. On success
- * the 'area' area contains TL and BR corners of the allocated area
- *
- */
-static s32 scan_r2l_t2b(struct tcm *tcm, u16 w, u16 h, u16 stride,
- struct tcm_area *field, struct tcm_area *area)
-{
- s32 xx = 0, yy = 0;
- s16 start_x = -1, end_x = -1, start_y = -1, end_y = -1;
- s16 found_x = -1, found_y = -1;
- LIST_HEAD(maybes);
- struct tcm_area candidate = {0};
- struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
-
- PA(2, "scan_r2l_t2b:", field);
-
- start_x = field->p0.x;
- end_x = field->p1.x;
- start_y = field->p0.y;
- end_y = field->p1.y;
-
- /* check scan area co-ordinates */
- if (field->p0.x < field->p1.x ||
- field->p1.y < field->p0.y)
- return -EINVAL;
-
- /* check if allocation would fit in scan area */
- if (w > INCL_LEN(start_x, end_x) || h > INCL_LEN(end_y, start_y))
- return -ENOSPC;
-
- /* adjust start_x and end_y, as allocation would not fit beyond */
- start_x = ALIGN_DOWN(start_x - w + 1, stride); /* - 1 to be inclusive */
- end_y = end_y - h + 1;
-
- /* check if allocation would still fit in scan area */
- if (start_x < end_x)
- return -ENOSPC;
-
- P2("ali=%d x=%d..%d y=%d..%d", stride, start_x, end_x, start_y, end_y);
-
- /*
- * Start scanning: These scans are always inclusive ones so if we are
- * given a start x = 0 is a valid value so if we have a end_x = 255,
- * 255th element is also checked
- */
- for (yy = start_y; yy <= end_y; yy++) {
- for (xx = start_x; xx >= end_x; xx -= stride) {
- if (!pvt->map[xx][yy].busy) {
- if (check_fit_r_and_b(tcm, w, h, xx, yy)) {
- P3("found shoulder: %d,%d", xx, yy);
- found_x = xx;
- found_y = yy;
- /* Insert this candidate, it is just a
- co-ordinate, reusing Area */
- assign(&candidate, xx, yy, 0, 0);
- insert_element(&maybes, &candidate);
-#ifdef X_SCAN_LIMITER
- /* change upper x bound */
- end_x = xx + 1;
-#endif
- break;
- }
- } else {
- /* Optimization required only for Non Aligned,
- Aligned anyways skip by 32/64 tiles at a time */
- if (stride == 1 &&
- pvt->map[xx][yy].parent.is2d) {
- xx = pvt->map[xx][yy].parent.p0.x;
- P3("moving to: %d,%d", xx, yy);
- }
- }
- }
-
- /* if you find a free area shouldering the given scan area on
- then we can break */
-#ifdef Y_SCAN_LIMITER
- if (found_x == start_x)
- break;
-#endif
- }
-
- if (list_empty(&maybes))
- return -ENOSPC;
-
- select_candidate(tcm, w, h, &maybes, field, CR_R2L_T2B, area);
- /* dump_list_entries(maybes); */
- clean_list(&maybes);
- return 0;
-}
-
-#ifdef SCAN_BOTTOM_UP
-/**
- * @description: raster scan right to left from bottom to top; find if there is
- * a free area to fit a given w x h inside the 'scan area'. If there is a free
- * area, then adds to maybes candidates, which later is sent for selection
- * as per pre-defined criteria.
- *
- * @input:'w x h' width and height of the allocation area.
- * 'stride' - 64/32/None for start address alignment
- * 'field' - area in which the scan operation should take place
- *
- * @return 0 on success, non-0 error value on failure. On success
- * the 'area' area contains TL and BR corners of the allocated area
- *
- */
-static s32 scan_r2l_b2t(struct tcm *tcm, u16 w, u16 h, u16 stride,
- struct tcm_area *field, struct tcm_area *area)
-{
- /* TODO: Should I check scan area?
- * Might have to take it as input during initialization
- */
- s32 xx = 0, yy = 0;
- s16 start_x = -1, end_x = -1, start_y = -1, end_y = -1;
- s16 found_x = -1, found_y = -1;
- LIST_HEAD(maybes);
- struct tcm_area candidate = {0};
- struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
-
- PA(2, "scan_r2l_b2t:", field);
-
- start_x = field->p0.x;
- end_x = field->p1.x;
- start_y = field->p0.y;
- end_y = field->p1.y;
-
- /* check scan area co-ordinates */
- if (field->p1.x < field->p0.x ||
- field->p1.y < field->p0.y)
- return -EINVAL;
-
- /* check if allocation would fit in scan area */
- if (w > INCL_LEN(start_x, end_x) || h > INCL_LEN(start_y, end_y))
- return -ENOSPC;
-
- /* adjust start_x and start_y, as allocation would not fit beyond */
- start_x = ALIGN_DOWN(start_x - w + 1, stride); /* + 1 to be inclusive */
- start_y = start_y - h + 1;
-
- /* check if allocation would still fit in scan area */
- if (start_x < end_x)
- return -ENOSPC;
-
- P2("ali=%d x=%d..%d y=%d..%d", stride, start_x, end_x, start_y, end_y);
-
- /*
- * Start scanning: These scans are always inclusive ones so if we are
- * given a start x = 0 is a valid value so if we have a end_x = 255,
- * 255th element is also checked
- */
- for (yy = start_y; yy >= end_y; yy--) {
- for (xx = start_x; xx >= end_x; xx -= stride) {
- if (!pvt->map[xx][yy].busy) {
- if (check_fit_r_and_b(tcm, w, h, xx, yy)) {
- P3("found shoulder: %d,%d", xx, yy);
- found_x = xx;
- found_y = yy;
- /* Insert this candidate, it is just a
- co-ordinate, reusing Area */
- assign(&candidate, xx, yy, 0, 0);
- insert_element(&maybes, &candidate);
-#ifdef X_SCAN_LIMITER
- /* change upper x bound */
- end_x = xx + 1;
-#endif
- break;
- }
- } else {
- /* Optimization required only for Non Aligned,
- Aligned anyways skip by 32/64 tiles at a time */
- if (stride == 1 &&
- pvt->map[xx][yy].parent.is2d) {
- xx = pvt->map[xx][yy].parent.p0.x;
- P3("moving to: %d,%d", xx, yy);
- }
- }
-
- }
-
- /* if you find a free area shouldering the given scan area on
- then we can break */
-#ifdef Y_SCAN_LIMITER
- if (found_x == start_x)
- break;
-#endif
- }
-
- if (list_empty(&maybes))
- return -ENOSPC;
-
- select_candidate(tcm, w, h, &maybes, field, CR_R2L_B2T, area);
- /* dump_list_entries(maybes); */
- clean_list(&maybes);
- return 0;
-}
-#endif
-
-/**
- * @description: raster scan left to right from top to bottom; find if there is
- * a free area to fit a given w x h inside the 'scan area'. If there is a free
- * area, then adds to maybes candidates, which later is sent for selection
- * as per pre-defined criteria.
- *
- * @input:'w x h' width and height of the allocation area.
- * 'stride' - 64/32/None for start address alignment
- * 'field' - area in which the scan operation should take place
- *
- * @return 0 on success, non-0 error value on failure. On success
- * the 'area' area contains TL and BR corners of the allocated area
- *
- */
-static s32 scan_l2r_t2b(struct tcm *tcm, u16 w, u16 h, u16 stride,
- struct tcm_area *field, struct tcm_area *area)
-{
- s32 xx = 0, yy = 0;
- s16 start_x = -1, end_x = -1, start_y = -1, end_y = -1;
- s16 found_x = -1, found_y = -1;
- LIST_HEAD(maybes);
- struct tcm_area candidate = {0};
- struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
-
- PA(2, "scan_l2r_t2b:", field);
-
- start_x = field->p0.x;
- end_x = field->p1.x;
- start_y = field->p0.y;
- end_y = field->p1.y;
-
- /* check scan area co-ordinates */
- if (field->p1.x < field->p0.x ||
- field->p1.y < field->p0.y)
- return -EINVAL;
-
- /* check if allocation would fit in scan area */
- if (w > INCL_LEN(end_x, start_x) || h > INCL_LEN(end_y, start_y))
- return -ENOSPC;
-
- start_x = ALIGN(start_x, stride);
-
- /* check if allocation would still fit in scan area */
- if (w > INCL_LEN(end_x, start_x))
- return -ENOSPC;
-
- /* adjust end_x and end_y, as allocation would not fit beyond */
- end_x = end_x - w + 1; /* + 1 to be inclusive */
- end_y = end_y - h + 1;
-
- P2("ali=%d x=%d..%d y=%d..%d", stride, start_x, end_x, start_y, end_y);
-
- /*
- * Start scanning: These scans are always inclusive ones so if we are
- * given a start x = 0 is a valid value so if we have a end_x = 255,
- * 255th element is also checked
- */
- for (yy = start_y; yy <= end_y; yy++) {
- for (xx = start_x; xx <= end_x; xx += stride) {
- /* if NOT occupied */
- if (!pvt->map[xx][yy].busy) {
- if (check_fit_r_and_b(tcm, w, h, xx, yy)) {
- P3("found shoulder: %d,%d", xx, yy);
- found_x = xx;
- found_y = yy;
- /* Insert this candidate, it is just a
- co-ordinate, reusing Area */
- assign(&candidate, xx, yy, 0, 0);
- insert_element(&maybes, &candidate);
-#ifdef X_SCAN_LIMITER
- /* change upper x bound */
- end_x = xx - 1;
-#endif
- break;
- }
- } else {
- /* Optimization required only for Non Aligned,
- Aligned anyways skip by 32/64 tiles at a time */
- if (stride == 1 &&
- pvt->map[xx][yy].parent.is2d) {
- xx = pvt->map[xx][yy].parent.p1.x;
- P3("moving to: %d,%d", xx, yy);
- }
- }
- }
- /* if you find a free area shouldering the given scan area on
- then we can break */
-#ifdef Y_SCAN_LIMITER
- if (found_x == start_x)
- break;
-#endif
- }
-
- if (list_empty(&maybes))
- return -ENOSPC;
-
- select_candidate(tcm, w, h, &maybes, field, CR_L2R_T2B, area);
- /* dump_list_entries(maybes); */
- clean_list(&maybes);
- return 0;
-}
-
-#ifdef SCAN_BOTTOM_UP
-/**
- * @description: raster scan left to right from bottom to top; find if there is
- * a free area to fit a given w x h inside the 'scan area'. If there is a free
- * area, then adds to maybes candidates, which later is sent for selection
- * as per pre-defined criteria.
- *
- * @input:'w x h' width and height of the allocation area.
- * 'stride' - 64/32/None for start address alignment
- * 'field' - area in which the scan operation should take place
- *
- * @return 0 on success, non-0 error value on failure. On success
- * the 'area' area contains TL and BR corners of the allocated area
- *
- */
-static s32 scan_l2r_b2t(struct tcm *tcm, u16 w, u16 h, u16 stride,
- struct tcm_area *field, struct tcm_area *area)
-{
- s32 xx = 0, yy = 0;
- s16 start_x = -1, end_x = -1, start_y = -1, end_y = -1;
- s16 found_x = -1, found_y = -1;
- LIST_HEAD(maybes);
- struct tcm_area candidate = {0};
- struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
-
- PA(2, "scan_l2r_b2t:", field);
-
- start_x = field->p0.x;
- end_x = field->p1.x;
- start_y = field->p0.y;
- end_y = field->p1.y;
-
- /* check scan area co-ordinates */
- if (field->p1.x < field->p0.x ||
- field->p0.y < field->p1.y)
- return -EINVAL;
-
- /* check if allocation would fit in scan area */
- if (w > INCL_LEN(end_x, start_x) || h > INCL_LEN(start_y, end_y))
- return -ENOSPC;
-
- start_x = ALIGN(start_x, stride);
-
- /* check if allocation would still fit in scan area */
- if (w > INCL_LEN(end_x, start_x))
- return -ENOSPC;
-
- /* adjust end_x and start_y, as allocation would not fit beyond */
- end_x = end_x - w + 1; /* + 1 to be inclusive */
- start_y = start_y - h + 1;
-
- P2("ali=%d x=%d..%d y=%d..%d", stride, start_x, end_x, start_y, end_y);
-
- /*
- * Start scanning: These scans are always inclusive ones so if we are
- * given a start x = 0 is a valid value so if we have a end_x = 255,
- * 255th element is also checked
- */
- for (yy = start_y; yy >= end_y; yy--) {
- for (xx = start_x; xx <= end_x; xx += stride) {
- /* if NOT occupied */
- if (!pvt->map[xx][yy].busy) {
- if (check_fit_r_and_b(tcm, w, h, xx, yy)) {
- P3("found shoulder: %d,%d", xx, yy);
- found_x = xx;
- found_y = yy;
- /* Insert this candidate, it is just a
- co-ordinate, reusing Area */
- assign(&candidate, xx, yy, 0, 0);
- insert_element(&maybes, &candidate);
-#ifdef X_SCAN_LIMITER
- /* change upper x bound */
- end_x = xx - 1;
-#endif
- break;
- }
- } else {
- /* Optimization required only for Non Aligned,
- Aligned anyways skip by 32/64 tiles at a time */
- if (stride == 1 &&
- pvt->map[xx][yy].parent.is2d) {
- xx = pvt->map[xx][yy].parent.p1.x;
- P3("moving to: %d,%d", xx, yy);
- }
- }
- }
-
- /* if you find a free area shouldering the given scan area on
- then we can break */
-#ifdef Y_SCAN_LIMITER
- if (found_x == start_x)
- break;
-#endif
- }
-
- if (list_empty(&maybes))
- return -ENOSPC;
-
- select_candidate(tcm, w, h, &maybes, field, CR_L2R_B2T, area);
- /* dump_list_entries(maybes); */
- clean_list(&maybes);
- return 0;
-}
-#endif
-/*
-Note: In General the cordinates specified in the scan area area relevant to the
-scan sweep directions. i.e A scan Area from Top Left Corner will have
-p0.x <= p1.x and p0.y <= p1.y. Where as A scan Area from bottom Right Corner
-will have p1.x <= p0.x and p1.y <= p0.y
-*/
-
-/**
- * @description: raster scan right to left from bottom to top; find if there are
- * continuous free pages(one slot is one page, continuity always from left to
- * right) inside the 'scan area'. If there are enough continous free pages,
- * then it returns the start and end Tile/page co-ordinates inside 'area'
- *
- * @input:'num_pages' required,
- * 'field' - area in which the scan operation should take place
- *
- * @return 0 on success, non-0 error value on failure. On success
- * the 'area' area contains start and end slot (inclusive).
- *
- */
-static s32 scan_r2l_b2t_one_dim(struct tcm *tcm, u32 num_pages,
- struct tcm_area *field, struct tcm_area *area)
-{
- s32 fit = false;
- u16 x, y;
- u16 left_x, left_y, busy_x, busy_y;
- s32 ret = 0;
- struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
-
- /* check scan area co-ordinates */
- if (field->p0.y < field->p1.y)
- return -EINVAL;
-
- PA(2, "scan_r2l_b2t_one_dim:", field);
-
- /* Note: Checking sanctity of scan area
- * The reason for checking this that 1D allocations assume that the X
- ranges the entire TilerSpace X ie ALL Columns
- * The scan area can limit only the Y ie, Num of Rows for 1D allocation.
- We also expect we could have only 1 row for 1D allocation
- * i.e our field p0.y and p1.y may have a same value.
- */
-
- /* only support full width 1d scan area */
- if (pvt->width != field->p0.x - field->p1.x + 1)
- return -EINVAL;
-
- /* check if allocation would fit in scan area */
- if (num_pages > pvt->width * INCL_LEN(field->p0.y, field->p1.y))
- return -ENOSPC;
-
- left_x = field->p0.x;
- left_y = field->p0.y;
- while (!ret) {
- x = left_x;
- y = left_y;
-
- if (!pvt->map[x][y].busy) {
- ret = move_left(tcm, x, y, num_pages - 1,
- &left_x, &left_y);
- if (ret)
- break; /* out of space */
-
- P3("moved left %d slots: %d,%d", num_pages - 1,
- left_x, left_y);
- fit = check_fit_r_one_dim(tcm, left_x, left_y,
- num_pages, &busy_x, &busy_y);
- if (fit) {
- assign(area, left_x, left_y,
- busy_x, busy_y);
- break;
- } else {
- /* no fit, continue at the busy slot */
- x = busy_x;
- y = busy_y;
- }
- }
-
- /* now the tile is occupied, skip busy region */
- if (pvt->map[x][y].parent.is2d) {
- busy_x = pvt->map[x][y].parent.p0.x;
- busy_y = y;
- } else {
- busy_x = pvt->map[x][y].parent.p0.x;
- busy_y = pvt->map[x][y].parent.p0.y;
- }
- x = busy_x;
- y = busy_y;
-
- P3("moving left from: %d,%d", x, y);
- ret = move_left(tcm, x, y, 1, &left_x, &left_y);
- }
-
- return fit ? 0 : -ENOSPC;
-}
-
-/**
- * @description:
- *
- *
- *
- *
- * @input:
- *
- *
- * @return 0 on success, non-0 error value on failure. On success
- */
-static s32 scan_areas_and_find_fit(struct tcm *tcm, u16 w, u16 h, u16 stride,
- struct tcm_area *area)
-{
- s32 ret = 0;
- struct tcm_area field = {0};
- u16 boundary_x = 0, boundary_y = 0;
- struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
- s32 need_scan = 2;
-
- if (stride > 1) {
- boundary_x = pvt->div_pt.x - 1;
- boundary_y = pvt->div_pt.y - 1;
-
- /* more intelligence here */
- if (w > pvt->div_pt.x) {
- boundary_x = pvt->width - 1;
- need_scan--;
- }
- if (h > pvt->div_pt.y) {
- boundary_y = pvt->height - 1;
- need_scan--;
- }
-
- assign(&field, 0, 0, boundary_x, boundary_y);
- ret = scan_l2r_t2b(tcm, w, h, stride, &field, area);
- if (ret != 0 && need_scan) {
- /* scan the entire container if nothing found */
- assign(&field, 0, 0, pvt->width - 1, pvt->height - 1);
- ret = scan_l2r_t2b(tcm, w, h, stride, &field, area);
- }
- } else if (stride == 1) {
- boundary_x = pvt->div_pt.x;
- boundary_y = pvt->div_pt.y - 1;
-
- /* more intelligence here */
- if (w > (pvt->width - pvt->div_pt.x)) {
- boundary_x = 0;
- need_scan--;
- }
- if (h > pvt->div_pt.y) {
- boundary_y = pvt->height - 1;
- need_scan--;
- }
-
- assign(&field, pvt->width - 1, 0, boundary_x, boundary_y);
- ret = scan_r2l_t2b(tcm, w, h, stride, &field, area);
-
- if (ret != 0 && need_scan) {
- /* scan the entire container if nothing found */
- assign(&field, pvt->width - 1, 0, 0,
- pvt->height - 1);
- ret = scan_r2l_t2b(tcm, w, h, stride, &field,
- area);
- }
- }
-
- /* 3/30/2010: moved aligned to left, and unaligned to right side. */
-#if 0
- else if (stride == 1) {
- /* use 64-align area so we don't grow down and shrink 1D area */
- if (h > pvt->div_pt.y) {
- need_scan -= 2;
- assign(&field, 0, 0, pvt->width - 1, pvt->height - 1);
- ret = scan_l2r_t2b(tcm, w, h, stride, &field, area);
- } else {
- assign(&field, 0, pvt->div_pt.y - 1, pvt->width - 1, 0);
- /* scan up in 64 and 32 areas accross whole width */
- ret = scan_l2r_b2t(tcm, w, h, stride, &field, area);
- }
-
- if (ret != 0 && need_scan) {
- assign(&field, 0, 0, pvt->width - 1, pvt->height - 1);
- ret = scan_l2r_t2b(tcm, w, h, stride, &field, area);
- }
- }
-#endif
- return ret;
-}
-
-static s32 check_fit_r_and_b(struct tcm *tcm, u16 w, u16 h, u16 left_x,
- u16 top_y)
-{
- u16 xx = 0, yy = 0;
- struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
-
- for (yy = top_y; yy < top_y + h; yy++) {
- for (xx = left_x; xx < left_x + w; xx++) {
- if (pvt->map[xx][yy].busy)
- return false;
- }
- }
- return true;
-}
-
-static s32 check_fit_r_one_dim(struct tcm *tcm, u16 x, u16 y, u32 num_pages,
- u16 *busy_x, u16 *busy_y)
-{
- s32 ret = 0;
- struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
- s32 i = 0;
- *busy_x = x;
- *busy_y = y;
-
- P2("checking fit for %d pages from %d,%d", num_pages, x, y);
- while (i < num_pages) {
- if (pvt->map[x][y].busy) {
- /* go to the start of the blocking allocation
- to avoid unecessary checking */
- if (pvt->map[x][y].parent.is2d) {
- *busy_x = pvt->map[x][y].parent.p0.x;
- *busy_y = y;
- } else {
- *busy_x = pvt->map[x][y].parent.p0.x;
- *busy_y = pvt->map[x][y].parent.p0.y;
- }
- /* TODO: Could also move left in case of 2D */
- P2("after busy slot at: %d,%d", *busy_x, *busy_y);
- return false;
- }
-
- i++;
-
- /* break here so busy_x, busy_y will be correct */
- if (i == num_pages)
- break;
-
- ret = move_right(tcm, x, y, 1, busy_x, busy_y);
- if (ret)
- return false;
-
- x = *busy_x;
- y = *busy_y;
- }
-
- return true;
-}
-
-static void fill_2d_area(struct tcm *tcm, struct tcm_area *area,
- struct slot slot)
-{
- s32 x, y;
- struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
-
- PA(2, "fill 2d area", area);
- for (x = area->p0.x; x <= area->p1.x; ++x)
- for (y = area->p0.y; y <= area->p1.y; ++y)
- pvt->map[x][y] = slot;
-}
-
-/* area should be a valid area */
-static void fill_1d_area(struct tcm *tcm, struct tcm_area *area,
- struct slot slot)
-{
- u16 x = 0, y = 0;
- struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
-
- PA(2, "fill 1d area", area);
- x = area->p0.x;
- y = area->p0.y;
-
- while (!(x == area->p1.x && y == area->p1.y)) {
- pvt->map[x++][y] = slot;
- if (x == pvt->width) {
- x = 0;
- y++;
- }
- }
- /* set the last slot */
- pvt->map[x][y] = slot;
-}
-
-static void select_candidate(struct tcm *tcm, u16 w, u16 h,
- struct list_head *maybes,
- struct tcm_area *field, s32 criteria,
- struct tcm_area *area)
-{
- /* bookkeeping the best match and the one evaluated */
- struct area_spec *best = NULL;
- struct nearness_factor best_factor = {0};
- struct neighbour_stats best_stats = {0};
- u16 win_neighs = 0;
-
- /* bookkeeping the current one being evaluated */
- struct area_spec *elem = NULL;
- struct nearness_factor factor = {0};
- struct neighbour_stats stats = {0};
- u16 neighs = 0;
-
- bool better; /* whether current is better */
-
- /* we default to the 1st candidate */
- best = list_first_entry(maybes, struct area_spec, list);
-
- /*i f there is only one candidate then that is the selection*/
-
- /* If first found is enabled then we just provide bluntly the first
- found candidate
- * NOTE: For Horizontal bias we just give the first found, because our
- * scan is Horizontal raster based and the first candidate will always
- * be the same as if selecting the Horizontal one.
- */
- if (list_is_singular(maybes) ||
- criteria & CR_FIRST_FOUND || criteria & CR_BIAS_HORIZONTAL)
- /* Note: Sure we could have done this in the previous function,
- but just wanted this to be cleaner so having
- * one place where the selection is made. Here I am returning
- the first one
- */
- goto done;
-
- /* lets calculate for the first candidate and assign him the best and
- replace with the one who has better credentials w/ to the criteria */
-
- get_busy_neigh_stats(tcm, w, h, &best->area, &best_stats);
- win_neighs = BOUNDARY(&best_stats) +
- OCCUPIED(&best_stats);
- get_nearness_factor(field, &best->area, &best_factor);
-
- list_for_each_entry(elem, maybes->next, list) {
- better = false;
-
- /* calculate required statistics */
- get_busy_neigh_stats(tcm, w, h, &elem->area, &stats);
- get_nearness_factor(field, &elem->area, &factor);
- neighs = BOUNDARY(&stats) + OCCUPIED(&stats);
-
- /* see if this are is better than the best so far */
-
- /* neighbor check */
- if ((criteria & CR_MAX_NEIGHS) &&
- neighs > win_neighs)
- better = true;
-
- /* vertical bias check */
- if ((criteria & CR_BIAS_VERTICAL) &&
- /*
- * NOTE: not checking if lengths are same, because that does not
- * find new shoulders on the same row after a fit
- */
- INCL_LEN_MOD(elem->area.p0.y, field->p0.y) >
- INCL_LEN_MOD(best->area.p0.y, field->p0.y))
- better = true;
-
- /* diagonal balance check */
- if ((criteria & CR_DIAGONAL_BALANCE) &&
- win_neighs <= neighs &&
- (win_neighs < neighs ||
- /* this implies that neighs and occupied match */
- OCCUPIED(&best_stats) < OCCUPIED(&stats) ||
- (OCCUPIED(&best_stats) == OCCUPIED(&stats) &&
- /* check the nearness factor */
- best_factor.x + best_factor.y > factor.x + factor.y)))
- better = true;
-
- if (better) {
- best = elem;
- best_factor = factor;
- best_stats = stats;
- win_neighs = neighs;
- }
- }
-
-done:
- assign(area, best->area.p0.x, best->area.p0.y,
- best->area.p0.x + w - 1, best->area.p0.y + h - 1);
-}
-
-/* get the nearness factor of an area in a search field */
-static void get_nearness_factor(struct tcm_area *field,
- struct tcm_area *area, struct nearness_factor *nf)
-{
- /* For the following calculation we need worry of +/- sign, the
- relative distances take of this. Multiplied by 1000, there
- is no floating point arithmetic used in kernel */
-
- nf->x = (s32)(area->p0.x - field->p0.x) * 1000 /
- (field->p1.x - field->p0.x);
- nf->y = (s32)(area->p0.y - field->p0.y) * 1000 /
- (field->p1.y - field->p0.y);
-}
-
-/* Neighbours
- *
- * |<-----T------>|
- * _ _______________ _
- * L | Ar | R
- * _ |______________|_
- * |<-----B------>|
- */
-static s32 get_busy_neigh_stats(struct tcm *tcm, u16 width, u16 height,
- struct tcm_area *top_left_corner,
- struct neighbour_stats *neighbour_stat)
-{
- s16 xx = 0, yy = 0;
- struct tcm_area left_edge;
- struct tcm_area right_edge;
- struct tcm_area top_edge;
- struct tcm_area bottom_edge;
- struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
-
- if (neighbour_stat == NULL)
- return -EINVAL;
-
- if (width == 0 || height == 0)
- return -EINVAL;
-
- /* Clearing any exisiting values */
- memset(neighbour_stat, 0, sizeof(*neighbour_stat));
-
- /* Finding Top Edge */
- assign(&top_edge, top_left_corner->p0.x, top_left_corner->p0.y,
- top_left_corner->p0.x + width - 1, top_left_corner->p0.y);
-
- /* Finding Bottom Edge */
- assign(&bottom_edge, top_left_corner->p0.x,
- top_left_corner->p0.y+height - 1,
- top_left_corner->p0.x + width - 1,
- top_left_corner->p0.y + height - 1);
-
- /* Finding Left Edge */
- assign(&left_edge, top_left_corner->p0.x, top_left_corner->p0.y,
- top_left_corner->p0.x, top_left_corner->p0.y + height - 1);
-
- /* Finding Right Edge */
- assign(&right_edge, top_left_corner->p0.x + width - 1,
- top_left_corner->p0.y,
- top_left_corner->p0.x + width - 1,
- top_left_corner->p0.y + height - 1);
-
- /* dump_area(&top_edge);
- dump_area(&right_edge);
- dump_area(&bottom_edge);
- dump_area(&left_edge);
- */
-
- /* Parsing through top & bottom edge */
- for (xx = top_edge.p0.x; xx <= top_edge.p1.x; xx++) {
- if (top_edge.p0.y - 1 < 0)
- neighbour_stat->top_boundary++;
- else if (pvt->map[xx][top_edge.p0.y - 1].busy)
- neighbour_stat->top_occupied++;
-
- if (bottom_edge.p0.y + 1 > pvt->height - 1)
- neighbour_stat->bottom_boundary++;
- else if (pvt->map[xx][bottom_edge.p0.y+1].busy)
- neighbour_stat->bottom_occupied++;
- }
-
- /* Parsing throught left and right edge */
- for (yy = left_edge.p0.y; yy <= left_edge.p1.y; ++yy) {
- if (left_edge.p0.x - 1 < 0)
- neighbour_stat->left_boundary++;
- else if (pvt->map[left_edge.p0.x - 1][yy].busy)
- neighbour_stat->left_occupied++;
-
- if (right_edge.p0.x + 1 > pvt->width - 1)
- neighbour_stat->right_boundary++;
- else if (pvt->map[right_edge.p0.x + 1][yy].busy)
- neighbour_stat->right_occupied++;
-
- }
-
- return 0;
-}
-
-/**
- @description: Retrieves the parent area of the page at p0.x, p0.y if
- occupied
- @input:co-ordinates of the page (p0.x, p0.y) whoes parent area
- is required
- @return 0 on success, non-0 error value on failure. On success
-
- parent will contain co-ordinates (TL & BR corner) of the parent
- area
-*/
-static s32 sita_get_parent(struct tcm *tcm, struct tcm_pt *pt,
- struct tcm_area *parent)
-{
- struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
- s32 res = 0;
-
- mutex_lock(&(pvt->mtx));
-
- if (pvt->map[pt->x][pt->y].busy) {
- *parent = pvt->map[pt->x][pt->y].parent;
- } else {
- memset(parent, 0, sizeof(*parent));
- res = -ENOENT;
- }
-
- mutex_unlock(&(pvt->mtx));
-
- return res;
-}
-
-static s32 move_left(struct tcm *tcm, u16 x, u16 y, u32 num_pages,
- u16 *xx, u16 *yy)
-{
- struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
- u32 pos = x + pvt->width * y;
-
- if (pos < num_pages)
- return -ENOSPC;
-
- pos -= num_pages;
- *xx = pos % pvt->width;
- *yy = pos / pvt->width;
- return 0;
-}
-
-static s32 move_right(struct tcm *tcm, u16 x, u16 y, u32 num_pages,
- u16 *xx, u16 *yy)
-{
- struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
- u32 pos = x + pvt->width * y;
-
- if (num_pages > pvt->width * pvt->height - pos)
- return -ENOSPC;
-
- pos += num_pages;
- *xx = pos % pvt->width;
- *yy = pos / pvt->width;
- return 0;
-}
-
diff --git a/drivers/media/video/tiler/tiler-geom.c b/drivers/media/video/tiler/tiler-geom.c
new file mode 100644
index 000000000000..f95ae5c9ef98
--- /dev/null
+++ b/drivers/media/video/tiler/tiler-geom.c
@@ -0,0 +1,372 @@
+/*
+ * tiler-geom.c
+ *
+ * TILER geometry functions for TI TILER hardware block.
+ *
+ * Author: Lajos Molnar <molnar@ti.com>
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/module.h>
+#include "_tiler.h"
+
+/* bits representing the same slot in DMM-TILER hw-block */
+#define SLOT_WIDTH_BITS 6
+#define SLOT_HEIGHT_BITS 6
+
+/* bits reserved to describe coordinates in DMM-TILER hw-block */
+#define CONT_WIDTH_BITS 14
+#define CONT_HEIGHT_BITS 13
+
+static struct tiler_geom geom[TILER_FORMATS] = {
+ {
+ .x_shft = 0,
+ .y_shft = 0,
+ },
+ {
+ .x_shft = 0,
+ .y_shft = 1,
+ },
+ {
+ .x_shft = 1,
+ .y_shft = 1,
+ },
+ {
+ .x_shft = SLOT_WIDTH_BITS,
+ .y_shft = SLOT_HEIGHT_BITS,
+ },
+};
+
+/* tiler space addressing bitfields */
+#define MASK_XY_FLIP (1 << 31)
+#define MASK_Y_INVERT (1 << 30)
+#define MASK_X_INVERT (1 << 29)
+#define SHIFT_ACC_MODE 27
+#define MASK_ACC_MODE 3
+
+/* calculated constants */
+#define TILER_PAGE (1 << (SLOT_WIDTH_BITS + SLOT_HEIGHT_BITS))
+#define TILER_WIDTH (1 << (CONT_WIDTH_BITS - SLOT_WIDTH_BITS))
+#define TILER_HEIGHT (1 << (CONT_HEIGHT_BITS - SLOT_HEIGHT_BITS))
+
+#define VIEW_SIZE (1u << (CONT_WIDTH_BITS + CONT_HEIGHT_BITS))
+#define VIEW_MASK (VIEW_SIZE - 1u)
+
+#define MASK(bits) ((1 << (bits)) - 1)
+
+#define TILER_FMT(x) ((enum tiler_fmt) \
+ ((x >> SHIFT_ACC_MODE) & MASK_ACC_MODE))
+
+#define MASK_VIEW (MASK_X_INVERT | MASK_Y_INVERT | MASK_XY_FLIP)
+
+/* location of the various tiler views in physical address space */
+#define TILVIEW_8BIT 0x60000000u
+#define TILVIEW_16BIT (TILVIEW_8BIT + VIEW_SIZE)
+#define TILVIEW_32BIT (TILVIEW_16BIT + VIEW_SIZE)
+#define TILVIEW_PAGE (TILVIEW_32BIT + VIEW_SIZE)
+#define TILVIEW_END (TILVIEW_PAGE + VIEW_SIZE)
+
+/* create tsptr by adding view orientation and access mode */
+#define TIL_ADDR(x, orient, a)\
+ ((u32) (x) | (orient) | ((a) << SHIFT_ACC_MODE))
+
+bool is_tiler_addr(u32 phys)
+{
+ return phys >= TILVIEW_8BIT && phys < TILVIEW_END;
+}
+EXPORT_SYMBOL(is_tiler_addr);
+
+u32 tiler_bpp(const struct tiler_block_t *b)
+{
+ enum tiler_fmt fmt = tiler_fmt(b->phys);
+ BUG_ON(fmt == TILFMT_INVALID);
+
+ return geom[fmt].bpp_m;
+}
+EXPORT_SYMBOL(tiler_bpp);
+
+/* return the stride of a tiler-block in tiler space */
+static inline s32 tiler_stride(u32 tsptr)
+{
+ enum tiler_fmt fmt = TILER_FMT(tsptr);
+
+ if (fmt == TILFMT_PAGE)
+ return 0;
+ else if (tsptr & MASK_XY_FLIP)
+ return 1 << (CONT_HEIGHT_BITS + geom[fmt].x_shft);
+ else
+ return 1 << (CONT_WIDTH_BITS + geom[fmt].y_shft);
+}
+
+u32 tiler_pstride(const struct tiler_block_t *b)
+{
+ enum tiler_fmt fmt = tiler_fmt(b->phys);
+ BUG_ON(fmt == TILFMT_INVALID);
+
+ /* return the virtual stride for page mode */
+ if (fmt == TILFMT_PAGE)
+ return tiler_vstride(b);
+
+ return tiler_stride(b->phys & ~MASK_VIEW);
+}
+EXPORT_SYMBOL(tiler_pstride);
+
+enum tiler_fmt tiler_fmt(u32 phys)
+{
+ if (!is_tiler_addr(phys))
+ return TILFMT_INVALID;
+
+ return TILER_FMT(phys);
+}
+EXPORT_SYMBOL(tiler_fmt);
+
+/* returns the tiler geometry information for a format */
+static const struct tiler_geom *get_geom(enum tiler_fmt fmt)
+{
+ if (fmt >= TILFMT_MIN && fmt <= TILFMT_MAX)
+ return geom + fmt;
+ return NULL;
+}
+
+/**
+ * Returns the natural x and y coordinates for a pixel in tiler space address.
+ * That is, the coordinates for the same pixel in the natural (non-rotated,
+ * non-mirrored) view. This allows to uniquely identify a tiler pixel in any
+ * view orientation.
+ */
+static void tiler_get_natural_xy(u32 tsptr, u32 *x, u32 *y)
+{
+ u32 x_bits, y_bits, offset;
+ enum tiler_fmt fmt;
+
+ fmt = TILER_FMT(tsptr);
+
+ x_bits = CONT_WIDTH_BITS - geom[fmt].x_shft;
+ y_bits = CONT_HEIGHT_BITS - geom[fmt].y_shft;
+ offset = (tsptr & VIEW_MASK) >> (geom[fmt].x_shft + geom[fmt].y_shft);
+
+ /* separate coordinate bitfields based on view orientation */
+ if (tsptr & MASK_XY_FLIP) {
+ *x = offset >> y_bits;
+ *y = offset & MASK(y_bits);
+ } else {
+ *x = offset & MASK(x_bits);
+ *y = offset >> x_bits;
+ }
+
+ /* account for mirroring */
+ if (tsptr & MASK_X_INVERT)
+ *x ^= MASK(x_bits);
+ if (tsptr & MASK_Y_INVERT)
+ *y ^= MASK(y_bits);
+}
+
+/* calculate the tiler space address of a pixel in a view orientation */
+static u32 tiler_get_address(u32 orient, enum tiler_fmt fmt, u32 x, u32 y)
+{
+ u32 x_bits, y_bits, tmp, x_mask, y_mask, alignment;
+
+ x_bits = CONT_WIDTH_BITS - geom[fmt].x_shft;
+ y_bits = CONT_HEIGHT_BITS - geom[fmt].y_shft;
+ alignment = geom[fmt].x_shft + geom[fmt].y_shft;
+
+ /* validate coordinate */
+ x_mask = MASK(x_bits);
+ y_mask = MASK(y_bits);
+ if (x < 0 || x > x_mask || y < 0 || y > y_mask)
+ return 0;
+
+ /* account for mirroring */
+ if (orient & MASK_X_INVERT)
+ x ^= x_mask;
+ if (orient & MASK_Y_INVERT)
+ y ^= y_mask;
+
+ /* get coordinate address */
+ if (orient & MASK_XY_FLIP)
+ tmp = ((x << y_bits) + y);
+ else
+ tmp = ((y << x_bits) + x);
+
+ return TIL_ADDR((tmp << alignment), orient, fmt);
+}
+
+void tilview_create(struct tiler_view_t *view, u32 phys, u32 width, u32 height)
+{
+ BUG_ON(!is_tiler_addr(phys));
+
+ view->tsptr = phys & ~MASK_VIEW;
+ view->bpp = geom[TILER_FMT(phys)].bpp_m;
+ view->width = width;
+ view->height = height;
+ view->h_inc = view->bpp;
+ view->v_inc = tiler_stride(view->tsptr);
+}
+EXPORT_SYMBOL(tilview_create);
+
+void tilview_get(struct tiler_view_t *view, struct tiler_block_t *blk)
+{
+ view->tsptr = blk->phys & ~MASK_VIEW;
+ view->bpp = tiler_bpp(blk);
+ view->width = blk->width;
+ view->height = blk->height;
+ view->h_inc = view->bpp;
+ view->v_inc = tiler_stride(view->tsptr);
+}
+EXPORT_SYMBOL(tilview_get);
+
+s32 tilview_crop(struct tiler_view_t *view, u32 left, u32 top, u32 width,
+ u32 height)
+{
+ /* check for valid crop */
+ if (left + width < left || left + width > view->width ||
+ top + height < top || top + height > view->height)
+ return -EINVAL;
+
+ view->tsptr += left * view->h_inc + top * view->v_inc;
+ view->width = width;
+ view->height = height;
+ return 0;
+}
+EXPORT_SYMBOL(tilview_crop);
+
+/* calculate tilerspace address and stride after view orientation change */
+static void reorient(struct tiler_view_t *view, u32 orient)
+{
+ u32 x, y;
+
+ tiler_get_natural_xy(view->tsptr, &x, &y);
+ view->tsptr = tiler_get_address(orient,
+ TILER_FMT(view->tsptr), x, y);
+ view->v_inc = tiler_stride(view->tsptr);
+}
+
+s32 tilview_rotate(struct tiler_view_t *view, s32 rotation)
+{
+ u32 orient;
+
+ if (rotation % 90)
+ return -EINVAL;
+
+ /* normalize rotation to quarters */
+ rotation = (rotation / 90) & 3;
+ if (!rotation)
+ return 0; /* nothing to do */
+
+ /* PAGE mode view cannot be rotated */
+ if (TILER_FMT(view->tsptr) == TILFMT_PAGE)
+ return -EPERM;
+
+ /*
+ * first adjust top-left corner. NOTE: it rotates counter-clockwise:
+ * 0 < 3
+ * v ^
+ * 1 > 2
+ */
+ if (rotation < 3)
+ view->tsptr += (view->height - 1) * view->v_inc;
+ if (rotation > 1)
+ view->tsptr += (view->width - 1) * view->h_inc;
+
+ /* then rotate view itself */
+ orient = view->tsptr & MASK_VIEW;
+
+ /* rotate first 2 quarters */
+ if (rotation & 2) {
+ orient ^= MASK_X_INVERT;
+ orient ^= MASK_Y_INVERT;
+ }
+
+ /* rotate last quarter */
+ if (rotation & 1) {
+ orient ^= (orient & MASK_XY_FLIP) ?
+ MASK_X_INVERT : MASK_Y_INVERT;
+
+ /* swap x & y */
+ orient ^= MASK_XY_FLIP;
+ swap(view->height, view->width);
+ }
+
+ /* finally reorient view */
+ reorient(view, orient);
+ return 0;
+}
+EXPORT_SYMBOL(tilview_rotate);
+
+s32 tilview_flip(struct tiler_view_t *view, bool flip_x, bool flip_y)
+{
+ u32 orient;
+ orient = view->tsptr & MASK_VIEW;
+
+ if (!flip_x && !flip_y)
+ return 0; /* nothing to do */
+
+ /* PAGE mode view cannot be flipped */
+ if (TILER_FMT(view->tsptr) == TILFMT_PAGE)
+ return -EPERM;
+
+ /* adjust top-left corner */
+ if (flip_x)
+ view->tsptr += (view->width - 1) * view->h_inc;
+ if (flip_y)
+ view->tsptr += (view->height - 1) * view->v_inc;
+
+ /* flip view orientation */
+ if (orient & MASK_XY_FLIP)
+ swap(flip_x, flip_y);
+
+ if (flip_x)
+ orient ^= MASK_X_INVERT;
+ if (flip_y)
+ orient ^= MASK_Y_INVERT;
+
+ /* finally reorient view */
+ reorient(view, orient);
+ return 0;
+}
+EXPORT_SYMBOL(tilview_flip);
+
+/* return the alias address for a coordinate */
+static inline u32 alias_address(enum tiler_fmt fmt, u32 x, u32 y)
+{
+ return tiler_get_address(0, fmt, x, y) + TILVIEW_8BIT;
+}
+
+/* get the coordinates for an alias address */
+static inline void alias_xy(u32 ssptr, u32 *x, u32 *y)
+{
+ tiler_get_natural_xy(ssptr & ~MASK_VIEW, x, y);
+}
+
+/* initialize shared geometric data */
+void tiler_geom_init(struct tiler_ops *tiler)
+{
+ struct tiler_geom *g;
+
+ tiler->xy = alias_xy;
+ tiler->addr = alias_address;
+ tiler->geom = get_geom;
+
+ tiler->page = TILER_PAGE;
+ tiler->width = TILER_WIDTH;
+ tiler->height = TILER_HEIGHT;
+
+ /* calculate geometry */
+ for (g = geom; g < geom + TILER_FORMATS; g++) {
+ g->bpp_m = g->bpp = 1 << (g->x_shft + g->y_shft);
+ g->slot_w = 1 << (SLOT_WIDTH_BITS - g->x_shft);
+ g->slot_h = 1 << (SLOT_HEIGHT_BITS - g->y_shft);
+ }
+
+ /* set bpp_m = 1 for page mode as most applications deal in byte data */
+ geom[TILFMT_PAGE].bpp_m = 1;
+}
diff --git a/drivers/media/video/tiler/tiler-iface.c b/drivers/media/video/tiler/tiler-iface.c
new file mode 100644
index 000000000000..f7a1be7131e8
--- /dev/null
+++ b/drivers/media/video/tiler/tiler-iface.c
@@ -0,0 +1,828 @@
+/*
+ * tiler-iface.c
+ *
+ * TILER driver interace functions for TI TILER hardware block.
+ *
+ * Authors: Lajos Molnar <molnar@ti.com>
+ * David Sin <davidsin@ti.com>
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/fs.h> /* fops */
+#include <linux/uaccess.h> /* copy_to_user */
+#include <linux/slab.h> /* kmalloc */
+#include <linux/sched.h> /* current */
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <asm/mach/map.h> /* for ioremap_page */
+
+#include "_tiler.h"
+
+static bool security = CONFIG_TILER_SECURITY;
+static bool ssptr_lookup = true;
+static bool offset_lookup = true;
+
+module_param(security, bool, 0644);
+MODULE_PARM_DESC(security,
+ "Separate allocations by different processes into different pages");
+module_param(ssptr_lookup, bool, 0644);
+MODULE_PARM_DESC(ssptr_lookup,
+ "Allow looking up a block by ssptr - This is a security risk");
+module_param(offset_lookup, bool, 0644);
+MODULE_PARM_DESC(offset_lookup,
+ "Allow looking up a buffer by offset - This is a security risk");
+
+static struct mutex mtx;
+static struct list_head procs; /* list of process info structs */
+static struct tiler_ops *ops; /* shared methods and variables */
+static struct blocking_notifier_head notifier; /* notifier for events */
+
+/*
+ * Event notification methods
+ * ==========================================================================
+ */
+
+static s32 tiler_notify_event(int event, void *data)
+{
+ return blocking_notifier_call_chain(&notifier, event, data);
+}
+
+/*
+ * Buffer handling methods
+ * ==========================================================================
+ */
+
+struct __buf_info {
+ struct list_head by_pid; /* list of buffers per pid */
+ struct tiler_buf_info buf_info;
+ struct mem_info *mi[TILER_MAX_NUM_BLOCKS]; /* blocks */
+};
+
+/* check if an offset is used */
+static bool _m_offs_in_use(u32 offs, u32 length, struct process_info *pi)
+{
+ struct __buf_info *_b;
+ /* have mutex */
+ list_for_each_entry(_b, &pi->bufs, by_pid)
+ if (_b->buf_info.offset < offs + length &&
+ _b->buf_info.offset + _b->buf_info.length > offs)
+ return 1;
+ return 0;
+}
+
+/* get an offset */
+static u32 _m_get_offs(struct process_info *pi, u32 length)
+{
+ static u32 offs = 0xda7a;
+
+ /* ensure no-one is using this offset */
+ while ((offs << PAGE_SHIFT) + length < length ||
+ _m_offs_in_use(offs << PAGE_SHIFT, length, pi)) {
+ /* use a pseudo-random generator to get a new offset to try */
+
+ /* Galois LSF: 20, 17 */
+ offs = (offs >> 1) ^ (u32)((0 - (offs & 1u)) & 0x90000);
+ }
+
+ return offs << PAGE_SHIFT;
+}
+
+/* find and lock a block. process_info is optional */
+static struct mem_info *
+_m_lock_block(u32 key, u32 id, struct process_info *pi) {
+ struct gid_info *gi;
+ struct mem_info *mi;
+
+ /* if process_info is given, look there first */
+ if (pi) {
+ /* have mutex */
+
+ /* find block in process list and free it */
+ list_for_each_entry(gi, &pi->groups, by_pid) {
+ mi = ops->lock(key, id, gi);
+ if (mi)
+ return mi;
+ }
+ }
+
+ /* if not found or no process_info given, find block in global list */
+ return ops->lock(key, id, NULL);
+}
+
+/* register a buffer */
+static s32 _m_register_buf(struct __buf_info *_b, struct process_info *pi)
+{
+ struct mem_info *mi;
+ struct tiler_buf_info *b = &_b->buf_info;
+ u32 i, num = b->num_blocks, offs;
+
+ /* check validity */
+ if (num > TILER_MAX_NUM_BLOCKS || num == 0)
+ return -EINVAL;
+
+ /* find each block */
+ b->length = 0;
+ for (i = 0; i < num; i++) {
+ mi = _m_lock_block(b->blocks[i].key, b->blocks[i].id, pi);
+ if (!mi) {
+ /* unlock any blocks already found */
+ while (i--)
+ ops->unlock_free(_b->mi[i], false);
+ return -EACCES;
+ }
+ _b->mi[i] = mi;
+
+ /* we don't keep track of ptr and 1D stride so clear them */
+ b->blocks[i].ptr = NULL;
+ b->blocks[i].stride = 0;
+
+ ops->describe(mi, b->blocks + i);
+ b->length += tiler_size(&mi->blk);
+ }
+
+ /* if found all, register buffer */
+ offs = _b->mi[0]->blk.phys & ~PAGE_MASK;
+ b->offset = _m_get_offs(pi, b->length) + offs;
+ b->length -= offs;
+
+ /* have mutex */
+ list_add(&_b->by_pid, &pi->bufs);
+
+ return 0;
+}
+
+/* unregister a buffer */
+static void _m_unregister_buf(struct __buf_info *_b)
+{
+ u32 i;
+
+ /* unregister */
+ list_del(&_b->by_pid);
+
+ /* no longer using the blocks */
+ for (i = 0; i < _b->buf_info.num_blocks; i++)
+ ops->unlock_free(_b->mi[i], false);
+
+ kfree(_b);
+}
+
+/*
+ * process_info handling methods
+ * ==========================================================================
+ */
+
+/* get process info, and increment refs for device tracking */
+static struct process_info *__get_pi(pid_t pid, bool kernel)
+{
+ struct process_info *pi;
+
+ /*
+ * treat all processes as the same, kernel processes are still treated
+ * differently so not to free kernel allocated areas when a user process
+ * closes the tiler driver
+ */
+ if (!security)
+ pid = 0;
+
+ /* find process context */
+ mutex_lock(&mtx);
+ list_for_each_entry(pi, &procs, list) {
+ if (pi->pid == pid && pi->kernel == kernel)
+ goto done;
+ }
+
+ /* create process context */
+ pi = kmalloc(sizeof(*pi), GFP_KERNEL);
+ if (!pi)
+ goto done;
+ memset(pi, 0, sizeof(*pi));
+
+ pi->pid = pid;
+ pi->kernel = kernel;
+ INIT_LIST_HEAD(&pi->groups);
+ INIT_LIST_HEAD(&pi->bufs);
+ list_add(&pi->list, &procs);
+done:
+ /* increment reference count */
+ if (pi && !kernel)
+ pi->refs++;
+ mutex_unlock(&mtx);
+ return pi;
+}
+
+/**
+ * Free all info kept by a process: all registered buffers, allocated blocks,
+ * and unreferenced blocks. Any blocks/areas still referenced will move to the
+ * orphaned lists to avoid issues if a new process is created with the same pid.
+ */
+static void _m_free_process_info(struct process_info *pi)
+{
+ struct gid_info *gi, *gi_;
+ struct __buf_info *_b = NULL, *_b_ = NULL;
+
+ /* have mutex */
+
+ if (!list_empty(&pi->bufs))
+ tiler_notify_event(TILER_DEVICE_CLOSE, NULL);
+
+ /* unregister all buffers */
+ list_for_each_entry_safe(_b, _b_, &pi->bufs, by_pid)
+ _m_unregister_buf(_b);
+
+ BUG_ON(!list_empty(&pi->bufs));
+
+ /* free all allocated blocks, and remove unreferenced ones */
+ list_for_each_entry_safe(gi, gi_, &pi->groups, by_pid)
+ ops->destroy_group(gi);
+
+ BUG_ON(!list_empty(&pi->groups));
+ list_del(&pi->list);
+ kfree(pi);
+}
+
+/* Free all info kept by all processes. Called on cleanup. */
+static void destroy_processes(void)
+{
+ struct process_info *pi, *pi_;
+
+ mutex_lock(&mtx);
+
+ list_for_each_entry_safe(pi, pi_, &procs, list)
+ _m_free_process_info(pi);
+ BUG_ON(!list_empty(&procs));
+
+ mutex_unlock(&mtx);
+}
+
+/*
+ * File operations (mmap, ioctl, open, close)
+ * ==========================================================================
+ */
+
+/* mmap tiler buffer into user's virtual space */
+static s32 tiler_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct __buf_info *_b;
+ struct tiler_buf_info *b = NULL;
+ u32 i, map_offs, map_size, blk_offs, blk_size, mapped_size;
+ struct process_info *pi = filp->private_data;
+ u32 offs = vma->vm_pgoff << PAGE_SHIFT;
+ u32 size = vma->vm_end - vma->vm_start;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+ /* find tiler buffer to mmap */
+ mutex_lock(&mtx);
+ list_for_each_entry(_b, &pi->bufs, by_pid) {
+ /* we support partial mmaping of a whole tiler buffer */
+ if (offs >= (_b->buf_info.offset & PAGE_MASK) &&
+ offs + size <= PAGE_ALIGN(_b->buf_info.offset +
+ _b->buf_info.length)) {
+ b = &_b->buf_info;
+ break;
+ }
+ }
+ mutex_unlock(&mtx);
+
+ /* we use b to detect if we found the bufffer */
+ if (!b)
+ return -ENXIO;
+
+ /* mmap relevant blocks */
+ blk_offs = _b->buf_info.offset;
+
+ /* start at the beginning of the region */
+ mapped_size = 0;
+ for (i = 0; i < b->num_blocks; i++, blk_offs += blk_size) {
+ blk_size = tiler_size(&_b->mi[i]->blk);
+ /* see if tiler block is inside the requested region */
+ if (offs >= blk_offs + blk_size || offs + size < blk_offs)
+ continue;
+ /* get the offset and map size for this particular block */
+ map_offs = max(offs, blk_offs) - blk_offs;
+ map_size = min(size - mapped_size, blk_size);
+
+ /* mmap block */
+ if (tiler_mmap_blk(&_b->mi[i]->blk, map_offs, map_size, vma,
+ mapped_size))
+ return -EAGAIN;
+
+ /* update mmap region pointer */
+ mapped_size += map_size;
+ }
+ return 0;
+}
+
+/* ioctl handler */
+static s32 tiler_ioctl(struct inode *ip, struct file *filp, u32 cmd,
+ unsigned long arg)
+{
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *ptep, pte;
+ s32 r;
+ void __user *data = (void __user *)arg;
+ struct process_info *pi = filp->private_data;
+ struct __buf_info *_b;
+ struct tiler_buf_info buf_info = {0};
+ struct tiler_block_info block_info = {0};
+ struct mem_info *mi;
+
+ switch (cmd) {
+ /* allocate block */
+ case TILIOC_GBLK:
+ if (copy_from_user(&block_info, data, sizeof(block_info)))
+ return -EFAULT;
+
+ switch (block_info.fmt) {
+ case TILFMT_PAGE:
+ r = ops->alloc(block_info.fmt, block_info.dim.len, 1,
+ block_info.align, block_info.offs,
+ block_info.key, block_info.group_id,
+ pi, &mi);
+ break;
+ case TILFMT_8BIT:
+ case TILFMT_16BIT:
+ case TILFMT_32BIT:
+ r = ops->alloc(block_info.fmt,
+ block_info.dim.area.width,
+ block_info.dim.area.height,
+ block_info.align, block_info.offs,
+ block_info.key, block_info.group_id,
+ pi, &mi);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (r)
+ return r;
+
+ /* fill out block info */
+ if (mi) {
+ block_info.ptr = NULL;
+ ops->describe(mi, &block_info);
+ }
+
+ if (copy_to_user(data, &block_info, sizeof(block_info)))
+ return -EFAULT;
+ break;
+ /* free/unmap block */
+ case TILIOC_FBLK:
+ case TILIOC_UMBLK:
+ if (copy_from_user(&block_info, data, sizeof(block_info)))
+ return -EFAULT;
+
+ /* search current process first, then all processes */
+ mutex_lock(&mtx);
+ mi = _m_lock_block(block_info.key, block_info.id, pi);
+ mutex_unlock(&mtx);
+ if (mi)
+ ops->unlock_free(mi, true);
+
+ /* free always succeeds */
+ break;
+ /* get physical address */
+ case TILIOC_GSSP:
+ pgd = pgd_offset(current->mm, arg);
+ if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
+ pmd = pmd_offset(pgd, arg);
+ if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
+ ptep = pte_offset_map(pmd, arg);
+ if (ptep) {
+ pte = *ptep;
+ if (pte_present(pte))
+ return (pte & PAGE_MASK) |
+ (~PAGE_MASK & arg);
+ }
+ }
+ }
+ /* va not in page table, return NULL */
+ return (s32) NULL;
+ break;
+ /* map block */
+ case TILIOC_MBLK:
+ if (copy_from_user(&block_info, data, sizeof(block_info)))
+ return -EFAULT;
+
+ if (!block_info.ptr)
+ return -EFAULT;
+
+ r = ops->map(block_info.fmt, block_info.dim.len, 1,
+ block_info.key, block_info.group_id, pi,
+ &mi, (u32)block_info.ptr);
+ if (r)
+ return r;
+
+ /* fill out block info */
+ if (mi)
+ ops->describe(mi, &block_info);
+
+ if (copy_to_user(data, &block_info, sizeof(block_info)))
+ return -EFAULT;
+ break;
+#ifndef CONFIG_TILER_SECURE
+ /* query buffer information by offset */
+ case TILIOC_QBUF:
+ if (!offset_lookup)
+ return -EPERM;
+
+ if (copy_from_user(&buf_info, data, sizeof(buf_info)))
+ return -EFAULT;
+
+ /* find buffer */
+ mutex_lock(&mtx);
+ r = -ENOENT;
+ /* buffer registration is per process */
+ list_for_each_entry(_b, &pi->bufs, by_pid) {
+ if (buf_info.offset == _b->buf_info.offset) {
+ memcpy(&buf_info, &_b->buf_info,
+ sizeof(buf_info));
+ r = 0;
+ break;
+ }
+ }
+ mutex_unlock(&mtx);
+
+ if (r)
+ return r;
+
+ if (copy_to_user(data, &_b->buf_info, sizeof(_b->buf_info)))
+ return -EFAULT;
+ break;
+#endif
+ /* register buffer */
+ case TILIOC_RBUF:
+ /* save buffer information */
+ _b = kmalloc(sizeof(*_b), GFP_KERNEL);
+ if (!_b)
+ return -ENOMEM;
+ memset(_b, 0, sizeof(*_b));
+
+ if (copy_from_user(&_b->buf_info, data, sizeof(_b->buf_info))) {
+ kfree(_b);
+ return -EFAULT;
+ }
+
+ mutex_lock(&mtx);
+ r = _m_register_buf(_b, pi);
+ mutex_unlock(&mtx);
+
+ if (r) {
+ kfree(_b);
+ return -EACCES;
+ }
+
+ /* undo registration on failure */
+ if (copy_to_user(data, &_b->buf_info, sizeof(_b->buf_info))) {
+ mutex_lock(&mtx);
+ _m_unregister_buf(_b);
+ mutex_unlock(&mtx);
+ return -EFAULT;
+ }
+ break;
+ /* unregister a buffer */
+ case TILIOC_URBUF:
+ if (copy_from_user(&buf_info, data, sizeof(buf_info)))
+ return -EFAULT;
+
+ /* find buffer */
+ r = -EFAULT;
+ mutex_lock(&mtx);
+ /* buffer registration is per process */
+ list_for_each_entry(_b, &pi->bufs, by_pid) {
+ if (buf_info.offset == _b->buf_info.offset) {
+ _m_unregister_buf(_b);
+ /* only retrieve buffer length */
+ buf_info.length = _b->buf_info.length;
+ r = 0;
+ break;
+ }
+ }
+ mutex_unlock(&mtx);
+
+ if (r)
+ return r;
+
+ if (copy_to_user(data, &buf_info, sizeof(buf_info)))
+ return -EFAULT;
+ break;
+ /* prereserv blocks */
+ case TILIOC_PRBLK:
+ if (copy_from_user(&block_info, data, sizeof(block_info)))
+ return -EFAULT;
+
+ if (block_info.fmt == TILFMT_8AND16)
+ ops->reserve_nv12(block_info.key,
+ block_info.dim.area.width,
+ block_info.dim.area.height,
+ block_info.align,
+ block_info.offs,
+ block_info.group_id, pi);
+ else
+ ops->reserve(block_info.key,
+ block_info.fmt,
+ block_info.dim.area.width,
+ block_info.dim.area.height,
+ block_info.align,
+ block_info.offs,
+ block_info.group_id, pi);
+ break;
+ /* unreserve blocks */
+ case TILIOC_URBLK:
+ ops->unreserve(arg, pi);
+ break;
+ /* query a tiler block */
+ case TILIOC_QBLK:
+ if (copy_from_user(&block_info, data, sizeof(block_info)))
+ return -EFAULT;
+
+ if (block_info.id) {
+ /* look up by id if specified */
+ mutex_lock(&mtx);
+ mi = _m_lock_block(block_info.key, block_info.id, pi);
+ mutex_unlock(&mtx);
+ } else
+#ifndef CONFIG_TILER_SECURE
+ if (ssptr_lookup) {
+ /* otherwise, look up by ssptr if allowed */
+ mi = ops->lock_by_ssptr(block_info.ssptr);
+ } else
+#endif
+ return -EPERM;
+
+ if (!mi)
+ return -EFAULT;
+
+ /* we don't keep track of ptr and 1D stride so clear them */
+ block_info.ptr = NULL;
+ block_info.stride = 0;
+
+ ops->describe(mi, &block_info);
+ ops->unlock_free(mi, false);
+
+ if (copy_to_user(data, &block_info, sizeof(block_info)))
+ return -EFAULT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/* open tiler driver */
+static s32 tiler_open(struct inode *ip, struct file *filp)
+{
+ struct process_info *pi = __get_pi(current->tgid, false);
+ if (!pi)
+ return -ENOMEM;
+
+ filp->private_data = pi;
+ return 0;
+}
+
+/* close tiler driver */
+static s32 tiler_release(struct inode *ip, struct file *filp)
+{
+ struct process_info *pi = filp->private_data;
+
+ mutex_lock(&mtx);
+ /* free resources if last device in this process */
+ if (0 == --pi->refs)
+ _m_free_process_info(pi);
+
+ mutex_unlock(&mtx);
+
+ return 0;
+}
+
+/* tiler driver file operations */
+static const struct file_operations tiler_fops = {
+ .open = tiler_open,
+ .ioctl = tiler_ioctl,
+ .release = tiler_release,
+ .mmap = tiler_mmap,
+};
+
+/* initialize tiler interface */
+void tiler_iface_init(struct tiler_ops *tiler)
+{
+ ops = tiler;
+ ops->cleanup = destroy_processes;
+ ops->fops = &tiler_fops;
+
+#ifdef CONFIG_TILER_SECURE
+ security = true;
+ offset_lookup = ssptr_lookup = false;
+#endif
+
+ mutex_init(&mtx);
+ INIT_LIST_HEAD(&procs);
+ BLOCKING_INIT_NOTIFIER_HEAD(&notifier);
+}
+
+/*
+ * Kernel APIs
+ * ==========================================================================
+ */
+
+s32 tiler_reg_notifier(struct notifier_block *nb)
+{
+ if (!nb)
+ return -EINVAL;
+ return blocking_notifier_chain_register(&notifier, nb);
+}
+EXPORT_SYMBOL(tiler_reg_notifier);
+
+s32 tiler_unreg_notifier(struct notifier_block *nb)
+{
+ if (!nb)
+ return -EINVAL;
+ return blocking_notifier_chain_unregister(&notifier, nb);
+}
+EXPORT_SYMBOL(tiler_unreg_notifier);
+
+void tiler_reservex(u32 n, enum tiler_fmt fmt, u32 width, u32 height,
+ u32 align, u32 offs, u32 gid, pid_t pid)
+{
+ struct process_info *pi = __get_pi(pid, true);
+
+ if (pi)
+ ops->reserve(n, fmt, width, height, align, offs, gid, pi);
+}
+EXPORT_SYMBOL(tiler_reservex);
+
+void tiler_reserve(u32 n, enum tiler_fmt fmt, u32 width, u32 height,
+ u32 align, u32 offs)
+{
+ tiler_reservex(n, fmt, width, height, align, offs, 0, current->tgid);
+}
+EXPORT_SYMBOL(tiler_reserve);
+
+void tiler_reservex_nv12(u32 n, u32 width, u32 height, u32 align, u32 offs,
+ u32 gid, pid_t pid)
+{
+ struct process_info *pi = __get_pi(pid, true);
+
+ if (pi)
+ ops->reserve_nv12(n, width, height, align, offs, gid, pi);
+}
+EXPORT_SYMBOL(tiler_reservex_nv12);
+
+void tiler_reserve_nv12(u32 n, u32 width, u32 height, u32 align, u32 offs)
+{
+ tiler_reservex_nv12(n, width, height, align, offs, 0, current->tgid);
+}
+EXPORT_SYMBOL(tiler_reserve_nv12);
+
+s32 tiler_allocx(struct tiler_block_t *blk, enum tiler_fmt fmt,
+ u32 align, u32 offs, u32 gid, pid_t pid)
+{
+ struct mem_info *mi;
+ struct process_info *pi;
+ s32 res;
+
+ BUG_ON(!blk || blk->phys);
+
+ pi = __get_pi(pid, true);
+ if (!pi)
+ return -ENOMEM;
+
+ res = ops->alloc(fmt, blk->width, blk->height, align, offs, blk->key,
+ gid, pi, &mi);
+ if (mi) {
+ blk->phys = mi->blk.phys;
+ blk->id = mi->blk.id;
+ }
+ return res;
+}
+EXPORT_SYMBOL(tiler_allocx);
+
+s32 tiler_alloc(struct tiler_block_t *blk, enum tiler_fmt fmt,
+ u32 align, u32 offs)
+{
+ return tiler_allocx(blk, fmt, align, offs, 0, current->tgid);
+}
+EXPORT_SYMBOL(tiler_alloc);
+
+s32 tiler_mapx(struct tiler_block_t *blk, enum tiler_fmt fmt, u32 gid,
+ pid_t pid, u32 usr_addr)
+{
+ struct mem_info *mi;
+ struct process_info *pi;
+ s32 res;
+
+ BUG_ON(!blk || blk->phys);
+
+ pi = __get_pi(pid, true);
+ if (!pi)
+ return -ENOMEM;
+
+ res = ops->map(fmt, blk->width, blk->height, blk->key, gid, pi, &mi,
+ usr_addr);
+ if (mi) {
+ blk->phys = mi->blk.phys;
+ blk->id = mi->blk.id;
+ }
+ return res;
+
+}
+EXPORT_SYMBOL(tiler_mapx);
+
+s32 tiler_map(struct tiler_block_t *blk, enum tiler_fmt fmt, u32 usr_addr)
+{
+ return tiler_mapx(blk, fmt, 0, current->tgid, usr_addr);
+}
+EXPORT_SYMBOL(tiler_map);
+
+s32 tiler_mmap_blk(struct tiler_block_t *blk, u32 offs, u32 size,
+ struct vm_area_struct *vma, u32 voffs)
+{
+ u32 v, p, len;
+
+ /* mapping must fit into vma */
+ BUG_ON(vma->vm_start > vma->vm_start + voffs ||
+ vma->vm_start + voffs > vma->vm_start + voffs + size ||
+ vma->vm_start + voffs + size > vma->vm_end);
+
+ /* mapping must fit into block */
+ BUG_ON(offs > offs + size || offs + size > tiler_size(blk));
+
+ v = tiler_vstride(blk);
+ p = tiler_pstride(blk);
+
+ /* remap block portion */
+ len = v - (offs % v); /* initial area to map */
+ while (size) {
+ /* restrict to size still needs mapping */
+ if (len > size)
+ len = size;
+
+ vma->vm_pgoff = (blk->phys + offs) >> PAGE_SHIFT;
+ if (remap_pfn_range(vma, vma->vm_start + voffs, vma->vm_pgoff,
+ len, vma->vm_page_prot))
+ return -EAGAIN;
+ voffs += len;
+ offs += len + p - v;
+ size -= len;
+ len = v; /* subsequent area to map */
+ }
+ return 0;
+}
+EXPORT_SYMBOL(tiler_mmap_blk);
+
+s32 tiler_ioremap_blk(struct tiler_block_t *blk, u32 offs, u32 size,
+ u32 addr, u32 mtype)
+{
+ u32 v, p;
+ u32 len; /* area to map */
+ const struct mem_type *type = get_mem_type(mtype);
+
+ /* mapping must fit into address space */
+ BUG_ON(addr > addr + size);
+
+ /* mapping must fit into block */
+ BUG_ON(offs > offs + size || offs + size > tiler_size(blk));
+
+ v = tiler_vstride(blk);
+ p = tiler_pstride(blk);
+
+ /* move offset and address to end */
+ offs += blk->phys + size;
+ addr += size;
+
+ len = v - (offs % v); /* initial area to map */
+ while (size) {
+ while (len && size) {
+ if (ioremap_page(addr - size, offs - size, type))
+ return -EAGAIN;
+ len -= PAGE_SIZE;
+ size -= PAGE_SIZE;
+ }
+
+ offs += p - v;
+ len = v; /* subsequent area to map */
+ }
+ return 0;
+}
+EXPORT_SYMBOL(tiler_ioremap_blk);
+
+void tiler_free(struct tiler_block_t *blk)
+{
+ /* find block */
+ struct mem_info *mi = ops->lock(blk->key, blk->id, NULL);
+ if (mi)
+ ops->unlock_free(mi, true);
+ blk->phys = blk->id = 0;
+}
+EXPORT_SYMBOL(tiler_free);
diff --git a/drivers/media/video/tiler/tiler-main.c b/drivers/media/video/tiler/tiler-main.c
new file mode 100644
index 000000000000..19e838f9ec3e
--- /dev/null
+++ b/drivers/media/video/tiler/tiler-main.c
@@ -0,0 +1,1269 @@
+/*
+ * tiler-main.c
+ *
+ * TILER driver main support functions for TI TILER hardware block.
+ *
+ * Authors: Lajos Molnar <molnar@ti.com>
+ * David Sin <davidsin@ti.com>
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/cdev.h> /* struct cdev */
+#include <linux/kdev_t.h> /* MKDEV() */
+#include <linux/fs.h> /* register_chrdev_region() */
+#include <linux/device.h> /* struct class */
+#include <linux/platform_device.h> /* platform_device() */
+#include <linux/err.h> /* IS_ERR() */
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/dma-mapping.h> /* dma_alloc_coherent */
+#include <linux/pagemap.h> /* page_cache_release() */
+#include <linux/slab.h>
+
+#include <mach/dmm.h>
+#include "tmm.h"
+#include "_tiler.h"
+#include "tcm/tcm-sita.h" /* TCM algorithm */
+
+static bool ssptr_id = CONFIG_TILER_SSPTR_ID;
+static uint default_align = CONFIG_TILER_ALIGNMENT;
+static uint granularity = CONFIG_TILER_GRANULARITY;
+
+/*
+ * We can only change ssptr_id if there are no blocks allocated, so that
+ * pseudo-random ids and ssptrs do not potentially clash. For now make it
+ * read-only.
+ */
+module_param(ssptr_id, bool, 0444);
+MODULE_PARM_DESC(ssptr_id, "Use ssptr as block ID");
+module_param_named(align, default_align, uint, 0644);
+MODULE_PARM_DESC(align, "Default block ssptr alignment");
+module_param_named(grain, granularity, uint, 0644);
+MODULE_PARM_DESC(grain, "Granularity (bytes)");
+
+struct tiler_dev {
+ struct cdev cdev;
+};
+
+struct platform_driver tiler_driver_ldm = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "tiler",
+ },
+ .probe = NULL,
+ .shutdown = NULL,
+ .remove = NULL,
+};
+
+static struct tiler_ops tiler; /* shared methods and variables */
+
+static struct list_head blocks; /* all tiler blocks */
+static struct list_head orphan_areas; /* orphaned 2D areas */
+static struct list_head orphan_onedim; /* orphaned 1D areas */
+
+static s32 tiler_major;
+static s32 tiler_minor;
+static struct tiler_dev *tiler_device;
+static struct class *tilerdev_class;
+static struct mutex mtx;
+static struct tcm *tcm[TILER_FORMATS];
+static struct tmm *tmm[TILER_FORMATS];
+static u32 *dmac_va;
+static dma_addr_t dmac_pa;
+
+/*
+ * TMM connectors
+ * ==========================================================================
+ */
+/* wrapper around tmm_map */
+static s32 refill_pat(struct tmm *tmm, struct tcm_area *area, u32 *ptr)
+{
+ s32 res = 0;
+ struct pat_area p_area = {0};
+ struct tcm_area slice, area_s;
+
+ tcm_for_each_slice(slice, *area, area_s) {
+ p_area.x0 = slice.p0.x;
+ p_area.y0 = slice.p0.y;
+ p_area.x1 = slice.p1.x;
+ p_area.y1 = slice.p1.y;
+
+ memcpy(dmac_va, ptr, sizeof(*ptr) * tcm_sizeof(slice));
+ ptr += tcm_sizeof(slice);
+
+ if (tmm_map(tmm, p_area, dmac_pa)) {
+ res = -EFAULT;
+ break;
+ }
+ }
+
+ return res;
+}
+
+/* wrapper around tmm_clear */
+static void clear_pat(struct tmm *tmm, struct tcm_area *area)
+{
+ struct pat_area p_area = {0};
+ struct tcm_area slice, area_s;
+
+ tcm_for_each_slice(slice, *area, area_s) {
+ p_area.x0 = slice.p0.x;
+ p_area.y0 = slice.p0.y;
+ p_area.x1 = slice.p1.x;
+ p_area.y1 = slice.p1.y;
+
+ tmm_clear(tmm, p_area);
+ }
+}
+
+/*
+ * ID handling methods
+ * ==========================================================================
+ */
+
+/* check if an id is used */
+static bool _m_id_in_use(u32 id)
+{
+ struct mem_info *mi;
+ list_for_each_entry(mi, &blocks, global)
+ if (mi->blk.id == id)
+ return 1;
+ return 0;
+}
+
+/* get an id */
+static u32 _m_get_id(void)
+{
+ static u32 id = 0x2d7ae;
+
+ /* ensure noone is using this id */
+ while (_m_id_in_use(id)) {
+ /* generate a new pseudo-random ID */
+
+ /* Galois LSFR: 32, 22, 2, 1 */
+ id = (id >> 1) ^ (u32)((0 - (id & 1u)) & 0x80200003u);
+ }
+
+ return id;
+}
+
+/*
+ * gid_info handling methods
+ * ==========================================================================
+ */
+
+/* get or create new gid_info object */
+static struct gid_info *_m_get_gi(struct process_info *pi, u32 gid)
+{
+ struct gid_info *gi;
+
+ /* have mutex */
+
+ /* see if group already exist */
+ list_for_each_entry(gi, &pi->groups, by_pid) {
+ if (gi->gid == gid)
+ goto done;
+ }
+
+ /* create new group */
+ gi = kmalloc(sizeof(*gi), GFP_KERNEL);
+ if (!gi)
+ return gi;
+
+ memset(gi, 0, sizeof(*gi));
+ INIT_LIST_HEAD(&gi->areas);
+ INIT_LIST_HEAD(&gi->onedim);
+ INIT_LIST_HEAD(&gi->reserved);
+ gi->pi = pi;
+ gi->gid = gid;
+ list_add(&gi->by_pid, &pi->groups);
+done:
+ /*
+ * Once area is allocated, the group info's ref count will be
+ * decremented as the reference is no longer needed.
+ */
+ gi->refs++;
+ return gi;
+}
+
+/* free gid_info object if empty */
+static void _m_try_free_group(struct gid_info *gi)
+{
+ /* have mutex */
+ if (gi && list_empty(&gi->areas) && list_empty(&gi->onedim) &&
+ /* also ensure noone is still using this group */
+ !gi->refs) {
+ BUG_ON(!list_empty(&gi->reserved));
+ list_del(&gi->by_pid);
+
+ /* if group is tracking kernel objects, we may free even
+ the process info */
+ if (gi->pi->kernel && list_empty(&gi->pi->groups)) {
+ list_del(&gi->pi->list);
+ kfree(gi->pi);
+ }
+
+ kfree(gi);
+ }
+}
+
+/* --- external versions --- */
+
+static struct gid_info *get_gi(struct process_info *pi, u32 gid)
+{
+ struct gid_info *gi;
+ mutex_lock(&mtx);
+ gi = _m_get_gi(pi, gid);
+ mutex_unlock(&mtx);
+ return gi;
+}
+
+static void release_gi(struct gid_info *gi)
+{
+ mutex_lock(&mtx);
+ gi->refs--;
+ _m_try_free_group(gi);
+ mutex_unlock(&mtx);
+}
+
+/*
+ * Area handling methods
+ * ==========================================================================
+ */
+
+/* allocate an reserved area of size, alignment and link it to gi */
+/* leaves mutex locked to be able to add block to area */
+static struct area_info *area_new_m(u16 width, u16 height, u16 align,
+ struct tcm *tcm, struct gid_info *gi)
+{
+ struct area_info *ai = kmalloc(sizeof(*ai), GFP_KERNEL);
+ if (!ai)
+ return NULL;
+
+ /* set up empty area info */
+ memset(ai, 0x0, sizeof(*ai));
+ INIT_LIST_HEAD(&ai->blocks);
+
+ /* reserve an allocation area */
+ if (tcm_reserve_2d(tcm, width, height, align, &ai->area)) {
+ kfree(ai);
+ return NULL;
+ }
+
+ ai->gi = gi;
+ mutex_lock(&mtx);
+ list_add_tail(&ai->by_gid, &gi->areas);
+ return ai;
+}
+
+/* (must have mutex) free an area */
+static inline void _m_area_free(struct area_info *ai)
+{
+ if (ai) {
+ list_del(&ai->by_gid);
+ kfree(ai);
+ }
+}
+
+static s32 __analize_area(enum tiler_fmt fmt, u32 width, u32 height,
+ u16 *x_area, u16 *y_area, u16 *band,
+ u16 *align, u16 *offs, u16 *in_offs)
+{
+ /* input: width, height is in pixels, align, offs in bytes */
+ /* output: x_area, y_area, band, align, offs in slots */
+
+ /* slot width, height, and row size */
+ u32 slot_row, min_align;
+ const struct tiler_geom *g;
+
+ /* width and height must be positive */
+ if (!width || !height)
+ return -EINVAL;
+
+ /* align must be 2 power */
+ if (*align & (*align - 1))
+ return -EINVAL;
+
+ if (fmt == TILFMT_PAGE) {
+ /* adjust size to accomodate offset, only do page alignment */
+ *align = PAGE_SIZE;
+ *in_offs = *offs & ~PAGE_MASK;
+ width += *in_offs;
+
+ /* for 1D area keep the height (1), width is in tiler slots */
+ *x_area = DIV_ROUND_UP(width, tiler.page);
+ *y_area = *band = 1;
+
+ if (*x_area * *y_area > tiler.width * tiler.height)
+ return -ENOMEM;
+ return 0;
+ }
+
+ /* format must be valid */
+ g = tiler.geom(fmt);
+ if (!g)
+ return -EINVAL;
+
+ /* get the # of bytes per row in 1 slot */
+ slot_row = g->slot_w * g->bpp;
+
+ /* how many slots are can be accessed via one physical page */
+ *band = PAGE_SIZE / slot_row;
+
+ /* minimum alignment is at least 1 slot. Use default if needed */
+ min_align = max(slot_row, granularity);
+ *align = ALIGN(*align ? : default_align, min_align);
+
+ /* align must still be 2 power (in case default_align is wrong) */
+ if (*align & (*align - 1))
+ return -EAGAIN;
+
+ /* offset must be multiple of bpp */
+ if (*offs & (g->bpp - 1) || *offs >= *align)
+ return -EINVAL;
+
+ /* round down the offset to the nearest slot size, and increase width
+ to allow space for having the correct offset */
+ width += (*offs & (min_align - 1)) / g->bpp;
+ if (in_offs)
+ *in_offs = *offs & (min_align - 1);
+ *offs &= ~(min_align - 1);
+
+ /* expand width to block size */
+ width = ALIGN(width, min_align / g->bpp);
+
+ /* adjust to slots */
+ *x_area = DIV_ROUND_UP(width, g->slot_w);
+ *y_area = DIV_ROUND_UP(height, g->slot_h);
+ *align /= slot_row;
+ *offs /= slot_row;
+
+ if (*x_area > tiler.width || *y_area > tiler.height)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * Find a place where a 2D block would fit into a 2D area of the
+ * same height.
+ *
+ * @author a0194118 (3/19/2010)
+ *
+ * @param w Width of the block.
+ * @param align Alignment of the block.
+ * @param offs Offset of the block (within alignment)
+ * @param ai Pointer to area info
+ * @param next Pointer to the variable where the next block
+ * will be stored. The block should be inserted
+ * before this block.
+ *
+ * @return the end coordinate (x1 + 1) where a block would fit,
+ * or 0 if it does not fit.
+ *
+ * (must have mutex)
+ */
+static u16 _m_blk_find_fit(u16 w, u16 align, u16 offs,
+ struct area_info *ai, struct list_head **before)
+{
+ int x = ai->area.p0.x + w + offs;
+ struct mem_info *mi;
+
+ /* area blocks are sorted by x */
+ list_for_each_entry(mi, &ai->blocks, by_area) {
+ /* check if buffer would fit before this area */
+ if (x <= mi->area.p0.x) {
+ *before = &mi->by_area;
+ return x;
+ }
+ x = ALIGN(mi->area.p1.x + 1 - offs, align) + w + offs;
+ }
+ *before = &ai->blocks;
+
+ /* check if buffer would fit after last area */
+ return (x <= ai->area.p1.x + 1) ? x : 0;
+}
+
+/* (must have mutex) adds a block to an area with certain x coordinates */
+static inline
+struct mem_info *_m_add2area(struct mem_info *mi, struct area_info *ai,
+ u16 x0, u16 w, struct list_head *before)
+{
+ mi->parent = ai;
+ mi->area = ai->area;
+ mi->area.p0.x = x0;
+ mi->area.p1.x = x0 + w - 1;
+ list_add_tail(&mi->by_area, before);
+ ai->nblocks++;
+ return mi;
+}
+
+static struct mem_info *get_2d_area(u16 w, u16 h, u16 align, u16 offs, u16 band,
+ struct gid_info *gi, struct tcm *tcm)
+{
+ struct area_info *ai = NULL;
+ struct mem_info *mi = NULL;
+ struct list_head *before = NULL;
+ u16 x = 0; /* this holds the end of a potential area */
+
+ /* allocate map info */
+
+ /* see if there is available prereserved space */
+ mutex_lock(&mtx);
+ list_for_each_entry(mi, &gi->reserved, global) {
+ if (mi->area.tcm == tcm &&
+ tcm_aheight(mi->area) == h &&
+ tcm_awidth(mi->area) == w &&
+ (mi->area.p0.x & (align - 1)) == offs) {
+ /* this area is already set up */
+
+ /* remove from reserved list */
+ list_del(&mi->global);
+ goto done;
+ }
+ }
+ mutex_unlock(&mtx);
+
+ /* if not, reserve a block struct */
+ mi = kmalloc(sizeof(*mi), GFP_KERNEL);
+ if (!mi)
+ return mi;
+ memset(mi, 0, sizeof(*mi));
+
+ /* see if allocation fits in one of the existing areas */
+ /* this sets x, ai and before */
+ mutex_lock(&mtx);
+ list_for_each_entry(ai, &gi->areas, by_gid) {
+ if (ai->area.tcm == tcm &&
+ tcm_aheight(ai->area) == h) {
+ x = _m_blk_find_fit(w, align, offs, ai, &before);
+ if (x) {
+ _m_add2area(mi, ai, x - w, w, before);
+ goto done;
+ }
+ }
+ }
+ mutex_unlock(&mtx);
+
+ /* if no area fit, reserve a new one */
+ ai = area_new_m(ALIGN(w + offs, max(band, align)), h,
+ max(band, align), tcm, gi);
+ if (ai) {
+ _m_add2area(mi, ai, ai->area.p0.x + offs, w, &ai->blocks);
+ } else {
+ /* clean up */
+ kfree(mi);
+ return NULL;
+ }
+
+done:
+ mutex_unlock(&mtx);
+ return mi;
+}
+
+/* layout reserved 2d blocks in a larger area */
+/* NOTE: band, w, h, a(lign), o(ffs) is in slots */
+static s32 lay_2d(enum tiler_fmt fmt, u16 n, u16 w, u16 h, u16 band,
+ u16 align, u16 offs, struct gid_info *gi,
+ struct list_head *pos)
+{
+ u16 x, x0, e = ALIGN(w, align), w_res = (n - 1) * e + w;
+ struct mem_info *mi = NULL;
+ struct area_info *ai = NULL;
+
+ printk(KERN_INFO "packing %u %u buffers into %u width\n",
+ n, w, w_res);
+
+ /* calculate dimensions, band, offs and alignment in slots */
+ /* reserve an area */
+ ai = area_new_m(ALIGN(w_res + offs, max(band, align)), h,
+ max(band, align), tcm[fmt], gi);
+ if (!ai)
+ return -ENOMEM;
+
+ /* lay out blocks in the reserved area */
+ for (n = 0, x = offs; x < w_res; x += e, n++) {
+ /* reserve a block struct */
+ mi = kmalloc(sizeof(*mi), GFP_KERNEL);
+ if (!mi)
+ break;
+
+ memset(mi, 0, sizeof(*mi));
+ x0 = ai->area.p0.x + x;
+ _m_add2area(mi, ai, x0, w, &ai->blocks);
+ list_add(&mi->global, pos);
+ }
+
+ mutex_unlock(&mtx);
+ return n;
+}
+
+/* layout reserved nv12 blocks in a larger area */
+/* NOTE: area w(idth), w1 (8-bit block width), h(eight) are in slots */
+/* p is a pointer to a packing description, which is a list of offsets in
+ the area for consecutive 8-bit and 16-bit blocks */
+static s32 lay_nv12(int n, u16 w, u16 w1, u16 h, struct gid_info *gi, u8 *p)
+{
+ u16 wh = (w1 + 1) >> 1, width, x0;
+ int m;
+ int a = PAGE_SIZE / tiler.geom(TILFMT_8BIT)->slot_w;
+
+ struct mem_info *mi = NULL;
+ struct area_info *ai = NULL;
+ struct list_head *pos;
+
+ /* reserve area */
+ ai = area_new_m(w, h, a, TILFMT_8BIT, gi);
+ if (!ai)
+ return -ENOMEM;
+
+ /* lay out blocks in the reserved area */
+ for (m = 0; m < 2 * n; m++) {
+ width = (m & 1) ? wh : w1;
+ x0 = ai->area.p0.x + *p++;
+
+ /* get insertion head */
+ list_for_each(pos, &ai->blocks) {
+ mi = list_entry(pos, struct mem_info, by_area);
+ if (mi->area.p0.x > x0)
+ break;
+ }
+
+ /* reserve a block struct */
+ mi = kmalloc(sizeof(*mi), GFP_KERNEL);
+ if (!mi)
+ break;
+
+ memset(mi, 0, sizeof(*mi));
+
+ _m_add2area(mi, ai, x0, width, pos);
+ list_add(&mi->global, &gi->reserved);
+ }
+
+ mutex_unlock(&mtx);
+ return n;
+}
+
+/* (must have mutex) free block and any freed areas */
+static s32 _m_free(struct mem_info *mi)
+{
+ struct area_info *ai = NULL;
+ struct page *page = NULL;
+ s32 res = 0;
+ u32 i;
+
+ /* release memory */
+ if (mi->pg_ptr) {
+ for (i = 0; i < mi->num_pg; i++) {
+ page = (struct page *)mi->pg_ptr[i];
+ if (page) {
+ if (!PageReserved(page))
+ SetPageDirty(page);
+ page_cache_release(page);
+ }
+ }
+ kfree(mi->pg_ptr);
+ } else if (mi->mem) {
+ tmm_free(tmm[tiler_fmt(mi->blk.phys)], mi->mem);
+ }
+ clear_pat(tmm[tiler_fmt(mi->blk.phys)], &mi->area);
+
+ /* safe deletion as list may not have been assigned */
+ if (mi->global.next)
+ list_del(&mi->global);
+ if (mi->by_area.next)
+ list_del(&mi->by_area);
+
+ /* remove block from area first if 2D */
+ if (mi->area.is2d) {
+ ai = mi->parent;
+
+ /* check to see if area needs removing also */
+ if (ai && !--ai->nblocks) {
+ res = tcm_free(&ai->area);
+ list_del(&ai->by_gid);
+ /* try to remove parent if it became empty */
+ _m_try_free_group(ai->gi);
+ kfree(ai);
+ ai = NULL;
+ }
+ } else {
+ /* remove 1D area */
+ res = tcm_free(&mi->area);
+ /* try to remove parent if it became empty */
+ _m_try_free_group(mi->parent);
+ }
+
+ kfree(mi);
+ return res;
+}
+
+/* (must have mutex) returns true if block was freed */
+static bool _m_chk_ref(struct mem_info *mi)
+{
+ /* check references */
+ if (mi->refs)
+ return 0;
+
+ if (_m_free(mi))
+ printk(KERN_ERR "error while removing tiler block\n");
+
+ return 1;
+}
+
+/* (must have mutex) */
+static inline bool _m_dec_ref(struct mem_info *mi)
+{
+ if (mi->refs-- <= 1)
+ return _m_chk_ref(mi);
+
+ return 0;
+}
+
+/* (must have mutex) */
+static inline void _m_inc_ref(struct mem_info *mi)
+{
+ mi->refs++;
+}
+
+/* (must have mutex) returns true if block was freed */
+static inline bool _m_try_free(struct mem_info *mi)
+{
+ if (mi->alloced) {
+ mi->refs--;
+ mi->alloced = false;
+ }
+ return _m_chk_ref(mi);
+}
+
+/* --- external methods --- */
+
+/* find a block by key/id and lock it */
+static struct mem_info *
+find_n_lock(u32 key, u32 id, struct gid_info *gi) {
+ struct area_info *ai = NULL;
+ struct mem_info *mi = NULL;
+
+ mutex_lock(&mtx);
+
+ /* if group is not given, look globally */
+ if (!gi) {
+ list_for_each_entry(mi, &blocks, global) {
+ if (mi->blk.key == key && mi->blk.id == id)
+ goto done;
+ }
+ } else {
+ /* is id is ssptr, we know if block is 1D or 2D by the address,
+ so we optimize lookup */
+ if (!ssptr_id ||
+ tiler_fmt(id) == TILFMT_PAGE) {
+ list_for_each_entry(mi, &gi->onedim, by_area) {
+ if (mi->blk.key == key && mi->blk.id == id)
+ goto done;
+ }
+ }
+
+ if (!ssptr_id ||
+ tiler_fmt(id) != TILFMT_PAGE) {
+ list_for_each_entry(ai, &gi->areas, by_gid) {
+ list_for_each_entry(mi, &ai->blocks, by_area) {
+ if (mi->blk.key == key &&
+ mi->blk.id == id)
+ goto done;
+ }
+ }
+ }
+ }
+
+ mi = NULL;
+done:
+ /* lock block by increasing its ref count */
+ if (mi)
+ mi->refs++;
+
+ mutex_unlock(&mtx);
+
+ return mi;
+}
+
+/* unlock a block, and optionally free it */
+static void unlock_n_free(struct mem_info *mi, bool free)
+{
+ mutex_lock(&mtx);
+
+ _m_dec_ref(mi);
+ if (free)
+ _m_try_free(mi);
+
+ mutex_unlock(&mtx);
+}
+
+/**
+ * Free all blocks in a group:
+ *
+ * allocated blocks, and unreferenced blocks. Any blocks/areas still referenced
+ * will move to the orphaned lists to avoid issues if a new process is created
+ * with the same pid.
+ *
+ * (must have mutex)
+ */
+static void destroy_group(struct gid_info *gi)
+{
+ struct area_info *ai, *ai_;
+ struct mem_info *mi, *mi_;
+ bool ai_autofreed, need2free;
+
+ mutex_lock(&mtx);
+
+ /* free all allocated blocks, and remove unreferenced ones */
+
+ /*
+ * Group info structs when they become empty on an _m_try_free.
+ * However, if the group info is already empty, we need to
+ * remove it manually
+ */
+ need2free = list_empty(&gi->areas) && list_empty(&gi->onedim);
+ list_for_each_entry_safe(ai, ai_, &gi->areas, by_gid) {
+ ai_autofreed = true;
+ list_for_each_entry_safe(mi, mi_, &ai->blocks, by_area)
+ ai_autofreed &= _m_try_free(mi);
+
+ /* save orphaned areas for later removal */
+ if (!ai_autofreed) {
+ need2free = true;
+ ai->gi = NULL;
+ list_move(&ai->by_gid, &orphan_areas);
+ }
+ }
+
+ list_for_each_entry_safe(mi, mi_, &gi->onedim, by_area) {
+ if (!_m_try_free(mi)) {
+ need2free = true;
+ /* save orphaned 1D blocks */
+ mi->parent = NULL;
+ list_move(&mi->by_area, &orphan_onedim);
+ }
+ }
+
+ /* if group is still alive reserved list should have been
+ emptied as there should be no reference on those blocks */
+ if (need2free) {
+ BUG_ON(!list_empty(&gi->onedim));
+ BUG_ON(!list_empty(&gi->areas));
+ _m_try_free_group(gi);
+ }
+
+ mutex_unlock(&mtx);
+}
+
+/* release (reserved) blocks */
+static void release_blocks(struct list_head *reserved)
+{
+ struct mem_info *mi, *mi_;
+
+ mutex_lock(&mtx);
+
+ /* find block in global list and free it */
+ list_for_each_entry_safe(mi, mi_, reserved, global) {
+ BUG_ON(mi->refs || mi->alloced);
+ _m_free(mi);
+ }
+ mutex_unlock(&mtx);
+}
+
+/* add reserved blocks to a group */
+static void add_reserved_blocks(struct list_head *reserved, struct gid_info *gi)
+{
+ mutex_lock(&mtx);
+ list_splice_init(reserved, &gi->reserved);
+ mutex_unlock(&mtx);
+}
+
+/* find a block by ssptr */
+static struct mem_info *find_block_by_ssptr(u32 sys_addr)
+{
+ struct mem_info *i;
+ struct tcm_pt pt;
+ u32 x, y;
+ enum tiler_fmt fmt;
+ const struct tiler_geom *g;
+
+ fmt = tiler_fmt(sys_addr);
+ if (fmt == TILFMT_INVALID)
+ return NULL;
+
+ g = tiler.geom(fmt);
+
+ /* convert x & y pixel coordinates to slot coordinates */
+ tiler.xy(sys_addr, &x, &y);
+ pt.x = x / g->slot_w;
+ pt.y = y / g->slot_h;
+
+ mutex_lock(&mtx);
+ list_for_each_entry(i, &blocks, global) {
+ if (tiler_fmt(i->blk.phys) == tiler_fmt(sys_addr) &&
+ tcm_is_in(pt, i->area)) {
+ i->refs++;
+ goto found;
+ }
+ }
+ i = NULL;
+
+found:
+ mutex_unlock(&mtx);
+ return i;
+}
+
+/* find a block by ssptr */
+static void fill_block_info(struct mem_info *i, struct tiler_block_info *blk)
+{
+ blk->fmt = tiler_fmt(i->blk.phys);
+#ifdef CONFIG_TILER_EXPOSE_SSPTR
+ blk->ssptr = i->blk.phys;
+#endif
+ if (blk->fmt == TILFMT_PAGE) {
+ blk->dim.len = i->blk.width;
+ blk->group_id = ((struct gid_info *) i->parent)->gid;
+ } else {
+ blk->stride = tiler_vstride(&i->blk);
+ blk->dim.area.width = i->blk.width;
+ blk->dim.area.height = i->blk.height;
+ blk->group_id = ((struct area_info *) i->parent)->gi->gid;
+ }
+ blk->id = i->blk.id;
+ blk->key = i->blk.key;
+ blk->offs = i->blk.phys & ~PAGE_MASK;
+ blk->align = PAGE_SIZE;
+}
+
+/*
+ * Block operations
+ * ==========================================================================
+ */
+
+static struct mem_info *__get_area(enum tiler_fmt fmt, u32 width, u32 height,
+ u16 align, u16 offs, struct gid_info *gi)
+{
+ u16 x, y, band, in_offs = 0;
+ struct mem_info *mi = NULL;
+ const struct tiler_geom *g = tiler.geom(fmt);
+
+ /* calculate dimensions, band, offs and alignment in slots */
+ if (__analize_area(fmt, width, height, &x, &y, &band, &align, &offs,
+ &in_offs))
+ return NULL;
+
+ if (fmt == TILFMT_PAGE) {
+ /* 1D areas don't pack */
+ mi = kmalloc(sizeof(*mi), GFP_KERNEL);
+ if (!mi)
+ return NULL;
+ memset(mi, 0x0, sizeof(*mi));
+
+ if (tcm_reserve_1d(tcm[fmt], x * y, &mi->area)) {
+ kfree(mi);
+ return NULL;
+ }
+
+ mutex_lock(&mtx);
+ mi->parent = gi;
+ list_add(&mi->by_area, &gi->onedim);
+ } else {
+ mi = get_2d_area(x, y, align, offs, band, gi, tcm[fmt]);
+ if (!mi)
+ return NULL;
+
+ mutex_lock(&mtx);
+ }
+
+ list_add(&mi->global, &blocks);
+ mi->alloced = true;
+ mi->refs++;
+ gi->refs--;
+ mutex_unlock(&mtx);
+
+ mi->blk.phys = tiler.addr(fmt,
+ mi->area.p0.x * g->slot_w, mi->area.p0.y * g->slot_h)
+ + in_offs;
+ return mi;
+}
+
+static s32 alloc_block(enum tiler_fmt fmt, u32 width, u32 height,
+ u32 align, u32 offs, u32 key, u32 gid, struct process_info *pi,
+ struct mem_info **info)
+{
+ struct mem_info *mi = NULL;
+ struct gid_info *gi = NULL;
+
+ *info = NULL;
+
+ /* only support up to page alignment */
+ if (align > PAGE_SIZE || offs >= (align ? : default_align) || !pi)
+ return -EINVAL;
+
+ /* get group context */
+ mutex_lock(&mtx);
+ gi = _m_get_gi(pi, gid);
+ mutex_unlock(&mtx);
+
+ if (!gi)
+ return -ENOMEM;
+
+ /* reserve area in tiler container */
+ mi = __get_area(fmt, width, height, align, offs, gi);
+ if (!mi) {
+ mutex_lock(&mtx);
+ gi->refs--;
+ _m_try_free_group(gi);
+ mutex_unlock(&mtx);
+ return -ENOMEM;
+ }
+
+ mi->blk.width = width;
+ mi->blk.height = height;
+ mi->blk.key = key;
+ if (ssptr_id) {
+ mi->blk.id = mi->blk.phys;
+ } else {
+ mutex_lock(&mtx);
+ mi->blk.id = _m_get_id();
+ mutex_unlock(&mtx);
+ }
+
+ /* allocate and map if mapping is supported */
+ if (tmm_can_map(tmm[fmt])) {
+ mi->num_pg = tcm_sizeof(mi->area);
+
+ mi->mem = tmm_get(tmm[fmt], mi->num_pg);
+ if (!mi->mem)
+ goto cleanup;
+
+ /* Ensure the data reaches to main memory before PAT refill */
+ wmb();
+
+ /* program PAT */
+ if (refill_pat(tmm[fmt], &mi->area, mi->mem))
+ goto cleanup;
+ }
+ *info = mi;
+ return 0;
+
+cleanup:
+ mutex_lock(&mtx);
+ _m_free(mi);
+ mutex_unlock(&mtx);
+ return -ENOMEM;
+
+}
+
+static s32 map_block(enum tiler_fmt fmt, u32 width, u32 height,
+ u32 key, u32 gid, struct process_info *pi,
+ struct mem_info **info, u32 usr_addr)
+{
+ u32 i = 0, tmp = -1, *mem = NULL;
+ u8 write = 0;
+ s32 res = -ENOMEM;
+ struct mem_info *mi = NULL;
+ struct page *page = NULL;
+ struct task_struct *curr_task = current;
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma = NULL;
+ struct gid_info *gi = NULL;
+
+ *info = NULL;
+
+ /* we only support mapping a user buffer in page mode */
+ if (fmt != TILFMT_PAGE)
+ return -EPERM;
+
+ /* check if mapping is supported by tmm */
+ if (!tmm_can_map(tmm[fmt]))
+ return -EPERM;
+
+ /* get group context */
+ mutex_lock(&mtx);
+ gi = _m_get_gi(pi, gid);
+ mutex_unlock(&mtx);
+
+ if (!gi)
+ return -ENOMEM;
+
+ /* reserve area in tiler container */
+ mi = __get_area(fmt, width, height, 0, 0, gi);
+ if (!mi) {
+ mutex_lock(&mtx);
+ gi->refs--;
+ _m_try_free_group(gi);
+ mutex_unlock(&mtx);
+ return -ENOMEM;
+ }
+
+ mi->blk.width = width;
+ mi->blk.height = height;
+ mi->blk.key = key;
+ if (ssptr_id) {
+ mi->blk.id = mi->blk.phys;
+ } else {
+ mutex_lock(&mtx);
+ mi->blk.id = _m_get_id();
+ mutex_unlock(&mtx);
+ }
+
+ mi->usr = usr_addr;
+
+ /* allocate pages */
+ mi->num_pg = tcm_sizeof(mi->area);
+
+ mem = kmalloc(mi->num_pg * sizeof(*mem), GFP_KERNEL);
+ if (!mem)
+ goto done;
+ memset(mem, 0x0, sizeof(*mem) * mi->num_pg);
+
+ mi->pg_ptr = kmalloc(mi->num_pg * sizeof(*mi->pg_ptr), GFP_KERNEL);
+ if (!mi->pg_ptr)
+ goto done;
+ memset(mi->pg_ptr, 0x0, sizeof(*mi->pg_ptr) * mi->num_pg);
+
+ /*
+ * Important Note: usr_addr is mapped from user
+ * application process to current process - it must lie
+ * completely within the current virtual memory address
+ * space in order to be of use to us here.
+ */
+ down_read(&mm->mmap_sem);
+ vma = find_vma(mm, mi->usr);
+ res = -EFAULT;
+
+ /*
+ * It is observed that under some circumstances, the user
+ * buffer is spread across several vmas, so loop through
+ * and check if the entire user buffer is covered.
+ */
+ while ((vma) && (mi->usr + width > vma->vm_end)) {
+ /* jump to the next VMA region */
+ vma = find_vma(mm, vma->vm_end + 1);
+ }
+ if (!vma) {
+ printk(KERN_ERR "Failed to get the vma region for "
+ "user buffer.\n");
+ goto fault;
+ }
+
+ if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
+ write = 1;
+
+ tmp = mi->usr;
+ for (i = 0; i < mi->num_pg; i++) {
+ if (get_user_pages(curr_task, mm, tmp, 1, write, 1, &page,
+ NULL)) {
+ if (page_count(page) < 1) {
+ printk(KERN_ERR "Bad page count from"
+ "get_user_pages()\n");
+ }
+ mi->pg_ptr[i] = (u32)page;
+ mem[i] = page_to_phys(page);
+ tmp += PAGE_SIZE;
+ } else {
+ printk(KERN_ERR "get_user_pages() failed\n");
+ goto fault;
+ }
+ }
+ up_read(&mm->mmap_sem);
+
+ /* Ensure the data reaches to main memory before PAT refill */
+ wmb();
+
+ if (refill_pat(tmm[fmt], &mi->area, mem))
+ goto fault;
+
+ res = 0;
+ *info = mi;
+ goto done;
+fault:
+ up_read(&mm->mmap_sem);
+done:
+ if (res) {
+ mutex_lock(&mtx);
+ _m_free(mi);
+ mutex_unlock(&mtx);
+ }
+ kfree(mem);
+ return res;
+}
+
+/*
+ * Driver code
+ * ==========================================================================
+ */
+
+static s32 __init tiler_init(void)
+{
+ dev_t dev = 0;
+ s32 r = -1;
+ struct device *device = NULL;
+ struct tcm_pt div_pt;
+ struct tcm *sita = NULL;
+ struct tmm *tmm_pat = NULL;
+
+ tiler.alloc = alloc_block;
+ tiler.map = map_block;
+ tiler.lock = find_n_lock;
+ tiler.unlock_free = unlock_n_free;
+ tiler.lay_2d = lay_2d;
+ tiler.lay_nv12 = lay_nv12;
+ tiler.destroy_group = destroy_group;
+ tiler.lock_by_ssptr = find_block_by_ssptr;
+ tiler.describe = fill_block_info;
+ tiler.get_gi = get_gi;
+ tiler.release_gi = release_gi;
+ tiler.release = release_blocks;
+ tiler.add_reserved = add_reserved_blocks;
+ tiler.analize = __analize_area;
+ tiler_geom_init(&tiler);
+ tiler_reserve_init(&tiler);
+ tiler_iface_init(&tiler);
+
+ /* check module parameters for correctness */
+ if (default_align > PAGE_SIZE ||
+ default_align & (default_align - 1) ||
+ granularity < 1 || granularity > PAGE_SIZE ||
+ granularity & (granularity - 1))
+ return -EINVAL;
+
+ /*
+ * Array of physical pages for PAT programming, which must be a 16-byte
+ * aligned physical address.
+ */
+ dmac_va = dma_alloc_coherent(NULL, tiler.width * tiler.height *
+ sizeof(*dmac_va), &dmac_pa, GFP_ATOMIC);
+ if (!dmac_va)
+ return -ENOMEM;
+
+ /* Allocate tiler container manager (we share 1 on OMAP4) */
+ div_pt.x = tiler.width; /* hardcoded default */
+ div_pt.y = (3 * tiler.height) / 4;
+ sita = sita_init(tiler.width, tiler.height, (void *)&div_pt);
+
+ tcm[TILFMT_8BIT] = sita;
+ tcm[TILFMT_16BIT] = sita;
+ tcm[TILFMT_32BIT] = sita;
+ tcm[TILFMT_PAGE] = sita;
+
+ /* Allocate tiler memory manager (must have 1 unique TMM per TCM ) */
+ tmm_pat = tmm_pat_init(0);
+ tmm[TILFMT_8BIT] = tmm_pat;
+ tmm[TILFMT_16BIT] = tmm_pat;
+ tmm[TILFMT_32BIT] = tmm_pat;
+ tmm[TILFMT_PAGE] = tmm_pat;
+
+ tiler.nv12_packed = tcm[TILFMT_8BIT] == tcm[TILFMT_16BIT];
+
+ tiler_device = kmalloc(sizeof(*tiler_device), GFP_KERNEL);
+ if (!tiler_device || !sita || !tmm_pat) {
+ r = -ENOMEM;
+ goto error;
+ }
+
+ memset(tiler_device, 0x0, sizeof(*tiler_device));
+ if (tiler_major) {
+ dev = MKDEV(tiler_major, tiler_minor);
+ r = register_chrdev_region(dev, 1, "tiler");
+ } else {
+ r = alloc_chrdev_region(&dev, tiler_minor, 1, "tiler");
+ tiler_major = MAJOR(dev);
+ }
+
+ cdev_init(&tiler_device->cdev, tiler.fops);
+ tiler_device->cdev.owner = THIS_MODULE;
+ tiler_device->cdev.ops = tiler.fops;
+
+ r = cdev_add(&tiler_device->cdev, dev, 1);
+ if (r)
+ printk(KERN_ERR "cdev_add():failed\n");
+
+ tilerdev_class = class_create(THIS_MODULE, "tiler");
+
+ if (IS_ERR(tilerdev_class)) {
+ printk(KERN_ERR "class_create():failed\n");
+ goto error;
+ }
+
+ device = device_create(tilerdev_class, NULL, dev, NULL, "tiler");
+ if (device == NULL)
+ printk(KERN_ERR "device_create() fail\n");
+
+ r = platform_driver_register(&tiler_driver_ldm);
+
+ mutex_init(&mtx);
+ INIT_LIST_HEAD(&blocks);
+ INIT_LIST_HEAD(&orphan_areas);
+ INIT_LIST_HEAD(&orphan_onedim);
+
+error:
+ /* TODO: error handling for device registration */
+ if (r) {
+ kfree(tiler_device);
+ tcm_deinit(sita);
+ tmm_deinit(tmm_pat);
+ dma_free_coherent(NULL, tiler.width * tiler.height *
+ sizeof(*dmac_va), dmac_va, dmac_pa);
+ }
+
+ return r;
+}
+
+static void __exit tiler_exit(void)
+{
+ int i, j;
+
+ mutex_lock(&mtx);
+
+ /* free all process data */
+ tiler.cleanup();
+
+ /* all lists should have cleared */
+ BUG_ON(!list_empty(&blocks));
+ BUG_ON(!list_empty(&orphan_onedim));
+ BUG_ON(!list_empty(&orphan_areas));
+
+ mutex_unlock(&mtx);
+
+ dma_free_coherent(NULL, tiler.width * tiler.height * sizeof(*dmac_va),
+ dmac_va, dmac_pa);
+
+ /* close containers only once */
+ for (i = TILFMT_MIN; i <= TILFMT_MAX; i++) {
+ /* remove identical containers (tmm is unique per tcm) */
+ for (j = i + 1; j <= TILFMT_MAX; j++)
+ if (tcm[i] == tcm[j]) {
+ tcm[j] = NULL;
+ tmm[j] = NULL;
+ }
+
+ tcm_deinit(tcm[i]);
+ tmm_deinit(tmm[i]);
+ }
+
+ mutex_destroy(&mtx);
+ platform_driver_unregister(&tiler_driver_ldm);
+ cdev_del(&tiler_device->cdev);
+ kfree(tiler_device);
+ device_destroy(tilerdev_class, MKDEV(tiler_major, tiler_minor));
+ class_destroy(tilerdev_class);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Lajos Molnar <molnar@ti.com>");
+MODULE_AUTHOR("David Sin <davidsin@ti.com>");
+module_init(tiler_init);
+module_exit(tiler_exit);
diff --git a/drivers/media/video/tiler/tiler-reserve.c b/drivers/media/video/tiler/tiler-reserve.c
new file mode 100644
index 000000000000..6715d3ddd6af
--- /dev/null
+++ b/drivers/media/video/tiler/tiler-reserve.c
@@ -0,0 +1,550 @@
+/*
+ * tiler-reserve.c
+ *
+ * TILER driver area reservation functions for TI TILER hardware block.
+ *
+ * Author: Lajos Molnar <molnar@ti.com>
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include "_tiler.h"
+
+static struct tiler_ops *ops; /* shared methods and variables */
+static int band_8; /* size of 8-bit band in slots */
+static int band_16; /* size of 16-bit band in slots */
+
+/**
+ * Calculate the maximum number buffers that can be packed next to each other,
+ * and the area they occupy. This method is used for both 2D and NV12 packing.
+ *
+ * @author a0194118 (7/16/2010)
+ *
+ * @param o desired offset
+ * @param w width of one block (>0)
+ * @param a desired alignment
+ * @param b band width (each block must occupy the same number of bands)
+ * @param n pointer to the desired number of blocks to pack. It will be
+ * updated with the maximum number of blocks that can be packed.
+ * @param _area pointer to store total area needed
+ *
+ * @return packing efficiency (0-1024)
+ */
+static u32 tiler_best2pack(u16 o, u16 a, u16 b, u16 w, u16 *n, u16 *_area)
+{
+ u16 m = 0, max_n = *n; /* m is mostly n - 1 */
+ u16 e = ALIGN(w, a); /* effective width of one block */
+ u32 eff, best_eff = 0; /* best values */
+ u16 stride = ALIGN(o + w, b); /* block stride */
+ u16 area = stride; /* area needed (for m + 1 blocks) */
+
+ /* NOTE: block #m+1 occupies the range (o + m * e, o + m * e + w) */
+
+ /* see how many blocks we can pack */
+ while (m < max_n &&
+ /* blocks must fit in tiler container */
+ o + m * e + w <= ops->width &&
+ /* block stride must be correct */
+ stride == ALIGN(area - o - m * e, b)) {
+
+ m++;
+ eff = m * w * 1024 / area;
+ if (eff > best_eff) {
+ /* store packing for best efficiency & smallest area */
+ best_eff = eff;
+ *n = m;
+ if (_area)
+ *_area = area;
+ }
+ /* update area */
+ area = ALIGN(o + m * e + w, b);
+ }
+
+ return best_eff;
+}
+
+/*
+ * NV12 Reservation Functions
+ *
+ * TILER is designed so that a (w * h) * 8bit area is twice as wide as a
+ * (w/2 * h/2) * 16bit area. Since having pairs of such 8-bit and 16-bit
+ * blocks is a common usecase for TILER, we optimize packing these into a
+ * TILER area.
+ *
+ * During reservation we want to find the most effective packing (most used area
+ * in the smallest overall area)
+ *
+ * We have two algorithms for packing nv12 blocks: either pack 8- and 16-bit
+ * blocks into separate container areas, or pack them together into same area.
+ */
+
+/**
+ * Calculate effectiveness of packing. We weight total area much higher than
+ * packing efficiency to get the smallest overall container use.
+ *
+ * @param w width of one (8-bit) block
+ * @param n buffers in a packing
+ * @param area width of packing area
+ * @param n_total total number of buffers to be packed
+ * @return effectiveness, the higher the better
+ */
+static inline u32 nv12_eff(u16 w, u16 n, u16 area, u16 n_total)
+{
+ return 0x10000000 -
+ /* weigh against total area needed (for all buffers) */
+ /* 64-slots = -2048 */
+ DIV_ROUND_UP(n_total, n) * area * 32 +
+ /* packing efficiency (0 - 1024) */
+ 1024 * n * ((w * 3 + 1) >> 1) / area;
+}
+
+/**
+ * Fallback nv12 packing algorithm: pack 8 and 16 bit block into separate
+ * areas.
+ *
+ * @author a0194118 (7/16/2010)
+ *
+ * @param o desired offset (<a)
+ * @param a desired alignment (>=2)
+ * @param w block width (>0)
+ * @param n number of blocks desired
+ * @param area pointer to store total area needed
+ *
+ * @return number of blocks that can be allocated
+ */
+static u16 nv12_separate(u16 o, u16 a, u16 w, u16 n, u16 *area)
+{
+ tiler_best2pack(o, a, band_8, w, &n, area);
+ tiler_best2pack(o >> 1, a >> 1, band_16, (w + 1) >> 1, &n, area);
+ *area *= 3;
+ return n;
+}
+
+/*
+ * Specialized NV12 Reservation Algorithms
+ *
+ * We use 4 packing methods that pack nv12 blocks into the same area. Together
+ * these 4 methods give the optimal result for most possible input parameters.
+ *
+ * For now we pack into a 64-slot area, so that we don't have to worry about
+ * stride issues (all blocks get 4K stride). For some of the algorithms this
+ * could be true even if the area was 128.
+ */
+
+/**
+ * Packing types are marked using a letter sequence, capital letters denoting
+ * 8-bit blocks, lower case letters denoting corresponding 16-bit blocks.
+ *
+ * All methods have the following parameters. They also define the maximum
+ * number of coordinates that could potentially be packed.
+ *
+ * @param o, a, w, n offset, alignment, width, # of blocks as usual
+ * @param area pointer to store area needed for packing
+ * @param p pointer to store packing coordinates
+ * @return number of blocks that can be packed
+ */
+
+/* Method A: progressive packing: AAAAaaaaBBbbCc into 64-slot area */
+#define MAX_A 21
+static int nv12_A(u16 o, u16 a, u16 w, u16 n, u16 *area, u8 *p)
+{
+ u16 x = o, u, l, m = 0;
+ *area = band_8;
+
+ while (x + w < *area && m < n) {
+ /* current 8bit upper bound (a) is next 8bit lower bound (B) */
+ l = u = (*area + x) >> 1;
+
+ /* pack until upper bound */
+ while (x + w <= u && m < n) {
+ /* save packing */
+ BUG_ON(m + 1 >= MAX_A);
+ *p++ = x;
+ *p++ = l;
+ l = (*area + x + w + 1) >> 1;
+ x = ALIGN(x + w - o, a) + o;
+ m++;
+ }
+ x = ALIGN(l - o, a) + o; /* set new lower bound */
+ }
+ return m;
+}
+
+/* Method -A: regressive packing: cCbbBBaaaaAAAA into 64-slot area */
+static int nv12_revA(u16 o, u16 a, u16 w, u16 n, u16 *area, u8 *p)
+{
+ u16 m;
+
+ /* this is a mirrored packing of method A */
+ n = nv12_A((a - (o + w) % a) % a, a, w, n, area, p);
+
+ /* reverse packing */
+ for (m = 0; m < n; m++) {
+ *p = *area - *p - w;
+ p++;
+ *p = *area - *p - ((w + 1) >> 1);
+ p++;
+ }
+ return n;
+}
+
+/* Method B: simple layout: aAbcBdeCfgDhEFGH */
+#define MAX_B 8
+static int nv12_B(u16 o, u16 a, u16 w, u16 n, u16 *area, u8 *p)
+{
+ u16 e = (o + w) % a; /* end offset */
+ u16 o1 = (o >> 1) % a; /* half offset */
+ u16 e1 = ((o + w + 1) >> 1) % a; /* half end offset */
+ u16 o2 = o1 + (a >> 2); /* 2nd half offset */
+ u16 e2 = e1 + (a >> 2); /* 2nd half end offset */
+ u16 m = 0;
+ *area = band_8;
+
+ /* ensure 16-bit blocks don't overlap 8-bit blocks */
+
+ /* width cannot wrap around alignment, half block must be before block,
+ 2nd half can be before or after */
+ if (w < a && o < e && e1 <= o && (e2 <= o || o2 >= e))
+ while (o + w <= *area && m < n) {
+ BUG_ON(m + 1 >= MAX_B);
+ *p++ = o;
+ *p++ = o >> 1;
+ m++;
+ o += a;
+ }
+ return m;
+}
+
+/* Method C: butterfly layout: AAbbaaBB */
+#define MAX_C 20
+static int nv12_C(u16 o, u16 a, u16 w, u16 n, u16 *area, u8 *p)
+{
+ int m = 0;
+ u16 o2, e = ALIGN(w, a), i = 0, j = 0;
+ *area = band_8;
+ o2 = *area - (a - (o + w) % a) % a; /* end of last possible block */
+
+ m = (min(o2 - 2 * o, 2 * o2 - o - *area) / 3 - w) / e + 1;
+ for (i = j = 0; i < m && j < n; i++, j++) {
+ BUG_ON(j + 1 >= MAX_C);
+ *p++ = o + i * e;
+ *p++ = (o + i * e + *area) >> 1;
+ if (++j < n) {
+ *p++ = o2 - i * e - w;
+ *p++ = (o2 - i * e - w) >> 1;
+ }
+ }
+ return j;
+}
+
+/* Method D: for large allocation: aA or Aa */
+#define MAX_D 1
+static int nv12_D(u16 o, u16 a, u16 w, u16 n, u16 *area, u8 *p)
+{
+ u16 o1, w1 = (w + 1) >> 1, d;
+ *area = ALIGN(o + w, band_8);
+
+ for (d = 0; n > 0 && d + o + w <= *area; d += a) {
+ /* try to fit 16-bit before 8-bit */
+ o1 = ((o + d) % band_8) >> 1;
+ if (o1 + w1 <= o + d) {
+ *p++ = o + d;
+ *p++ = o1;
+ return 1;
+ }
+
+ /* try to fit 16-bit after 8-bit */
+ o1 += ALIGN(d + o + w - o1, band_16);
+ if (o1 + w1 <= *area) {
+ *p++ = o;
+ *p++ = o1;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/**
+ * Umbrella nv12 packing method. This selects the best packings from the above
+ * methods. It also contains hardcoded packings for parameter combinations
+ * that have more efficient packings. This method provides is guaranteed to
+ * provide the optimal packing if 2 <= a <= 64 and w <= 64 and n is large.
+ */
+#define MAX_ANY 21 /* must be MAX(method-MAX-s, hardcoded n-s) */
+static u16 nv12_together(u16 o, u16 a, u16 w, u16 n, u16 *area, u8 *packing)
+{
+ u16 n_best, a_best, n2, a_, o_, w_;
+
+ /* algo results (packings) */
+ u8 pack_A[MAX_A * 2], pack_rA[MAX_A * 2];
+ u8 pack_B[MAX_B * 2], pack_C[MAX_C * 2];
+ u8 pack_D[MAX_D * 2];
+
+ /*
+ * Hardcoded packings. They are sorted by increasing area, and then by
+ * decreasing n. We may not get the best efficiency if less than n
+ * blocks are needed as packings are not necessarily sorted in
+ * increasing order. However, for those n-s one of the other 4 methods
+ * may return the optimal packing.
+ */
+ u8 packings[] = {
+ /* n=9, o=2, w=4, a=4, area=64 */
+ 9, 2, 4, 4, 64,
+ /* 8-bit, 16-bit block coordinate pairs */
+ 2, 33, 6, 35, 10, 37, 14, 39, 18, 41,
+ 46, 23, 50, 25, 54, 27, 58, 29,
+ /* o=0, w=12, a=4, n=3 */
+ 3, 0, 12, 4, 64,
+ 0, 32, 12, 38, 48, 24,
+ /* end */
+ 0
+ }, *p = packings, *p_best = NULL, *p_end;
+ p_end = packings + sizeof(packings) - 1;
+
+ /* see which method gives the best packing */
+
+ /* start with smallest area algorithms A, B & C, stop if we can
+ pack all buffers */
+ n_best = nv12_A(o, a, w, n, area, pack_A);
+ p_best = pack_A;
+ if (n_best < n) {
+ n2 = nv12_revA(o, a, w, n, &a_best, pack_rA);
+ if (n2 > n_best) {
+ n_best = n2;
+ p_best = pack_rA;
+ *area = a_best;
+ }
+ }
+ if (n_best < n) {
+ n2 = nv12_B(o, a, w, n, &a_best, pack_B);
+ if (n2 > n_best) {
+ n_best = n2;
+ p_best = pack_B;
+ *area = a_best;
+ }
+ }
+ if (n_best < n) {
+ n2 = nv12_C(o, a, w, n, &a_best, pack_C);
+ if (n2 > n_best) {
+ n_best = n2;
+ p_best = pack_C;
+ *area = a_best;
+ }
+ }
+
+ /* traverse any special packings */
+ while (*p) {
+ n2 = *p++;
+ o_ = *p++;
+ w_ = *p++;
+ a_ = *p++;
+ /* stop if we already have a better packing */
+ if (n2 < n_best)
+ break;
+
+ /* check if this packing is satisfactory */
+ if (a_ >= a && o + w + ALIGN(o_ - o, a) <= o_ + w_) {
+ *area = *p++;
+ n_best = min(n2, n);
+ p_best = p;
+ break;
+ }
+
+ /* skip to next packing */
+ p += 1 + n2 * 2;
+ }
+
+ /*
+ * If so far unsuccessful, check whether 8 and 16 bit blocks can be
+ * co-packed. This will actually be done in the end by the normal
+ * allocation, but we need to reserve a big-enough area.
+ */
+ if (!n_best) {
+ n_best = nv12_D(o, a, w, n, area, pack_D);
+ p_best = NULL;
+ }
+
+ /* store best packing */
+ if (p_best && n_best) {
+ BUG_ON(n_best > MAX_ANY);
+ memcpy(packing, p_best, n_best * 2 * sizeof(*pack_A));
+ }
+
+ return n_best;
+}
+
+/* reserve nv12 blocks */
+static void reserve_nv12(u32 n, u32 width, u32 height, u32 align, u32 offs,
+ u32 gid, struct process_info *pi)
+{
+ u16 w, h, band, a = align, o = offs;
+ struct gid_info *gi;
+ int res = 0, res2, i;
+ u16 n_t, n_s, area_t, area_s;
+ u8 packing[2 * MAX_ANY];
+ struct list_head reserved = LIST_HEAD_INIT(reserved);
+
+ /* adjust alignment to the largest slot width (128 bytes) */
+ a = max_t(u16, PAGE_SIZE / min(band_8, band_16), a);
+
+ /* Check input parameters for correctness, and support */
+ if (!width || !height || !n ||
+ offs >= align || offs & 1 ||
+ align >= PAGE_SIZE ||
+ n > ops->width * ops->height / 2)
+ return;
+
+ /* calculate dimensions, band, offs and alignment in slots */
+ if (ops->analize(TILFMT_8BIT, width, height, &w, &h, &band, &a, &o,
+ NULL))
+ return;
+
+ /* get group context */
+ gi = ops->get_gi(pi, gid);
+ if (!gi)
+ return;
+
+ /* reserve in groups until failed or all is reserved */
+ for (i = 0; i < n && res >= 0; i += res) {
+ /* check packing separately vs together */
+ n_s = nv12_separate(o, a, w, n - i, &area_s);
+ if (ops->nv12_packed)
+ n_t = nv12_together(o, a, w, n - i, &area_t, packing);
+ else
+ n_t = 0;
+
+ /* pack based on better efficiency */
+ res = -1;
+ if (!ops->nv12_packed ||
+ nv12_eff(w, n_s, area_s, n - i) >
+ nv12_eff(w, n_t, area_t, n - i)) {
+
+ /*
+ * Reserve blocks separately into a temporary list, so
+ * that we can free them if unsuccessful. We need to be
+ * able to reserve both 8- and 16-bit blocks as the
+ * offsets of them must match.
+ */
+ res = ops->lay_2d(TILFMT_8BIT, n_s, w, h, band_8, a, o,
+ gi, &reserved);
+ res2 = ops->lay_2d(TILFMT_16BIT, n_s, (w + 1) >> 1, h,
+ band_16, a >> 1, o >> 1, gi, &reserved);
+
+ if (res2 < 0 || res < 0 || res != res2) {
+ /* clean up */
+ ops->release(&reserved);
+ res = -1;
+ } else {
+ /* add list to reserved */
+ ops->add_reserved(&reserved, gi);
+ }
+ }
+
+ /* if separate packing failed, still try to pack together */
+ if (res < 0 && ops->nv12_packed && n_t) {
+ /* pack together */
+ res = ops->lay_nv12(n_t, area_t, w, h, gi, packing);
+ }
+ }
+
+ ops->release_gi(gi);
+}
+
+/**
+ * We also optimize packing regular 2D areas as the auto-packing may result in
+ * sub-optimal efficiency. This is most pronounced if the area is wider than
+ * half a PAGE_SIZE (e.g. 2048 in 8-bit mode, or 1024 in 16-bit mode).
+ */
+
+/* reserve 2d blocks */
+static void reserve_blocks(u32 n, enum tiler_fmt fmt, u32 width, u32 height,
+ u32 align, u32 offs, u32 gid,
+ struct process_info *pi)
+{
+ u32 bpt, res = 0, i;
+ u16 o = offs, a = align, band, w, h, n_try;
+ struct gid_info *gi;
+ const struct tiler_geom *g;
+
+ /* Check input parameters for correctness, and support */
+ if (!width || !height || !n ||
+ align > PAGE_SIZE || offs >= align ||
+ fmt < TILFMT_8BIT || fmt > TILFMT_32BIT)
+ return;
+
+ /* tiler slot in bytes */
+ g = ops->geom(fmt);
+ bpt = g->slot_w * g->bpp;
+
+ /*
+ * For blocks narrower than half PAGE_SIZE the default allocation is
+ * sufficient. Also check for basic area info.
+ */
+ if (width * g->bpp * 2 <= PAGE_SIZE ||
+ ops->analize(fmt, width, height, &w, &h, &band, &a, &o, NULL))
+ return;
+
+ /* get group id */
+ gi = ops->get_gi(pi, gid);
+ if (!gi)
+ return;
+
+ /* reserve in groups until failed or all is reserved */
+ for (i = 0; i < n && res >= 0; i += res + 1) {
+ /* blocks to allocate in one area */
+ n_try = min(n - i, ops->width);
+ tiler_best2pack(offs, a, band, w, &n_try, NULL);
+
+ res = -1;
+ while (n_try > 1) {
+ /* adjust res so we fail on 0 return value */
+ res = ops->lay_2d(fmt, n_try, w, h, band, a, o,
+ gi, &gi->reserved) - 1;
+ if (res >= 0)
+ break;
+
+ /* reduce n if failed to allocate area */
+ n_try--;
+ }
+ }
+ /* keep reserved blocks even if failed to reserve all */
+
+ ops->release_gi(gi);
+}
+
+/* unreserve blocks for a group id */
+static void unreserve_blocks(u32 gid, struct process_info *pi)
+{
+ struct gid_info *gi;
+
+ gi = ops->get_gi(pi, gid);
+ if (!gi)
+ return;
+
+ ops->release(&gi->reserved);
+
+ ops->release_gi(gi);
+}
+
+/* initialize shared method pointers and global static variables */
+void tiler_reserve_init(struct tiler_ops *tiler)
+{
+ ops = tiler;
+
+ ops->reserve_nv12 = reserve_nv12;
+ ops->reserve = reserve_blocks;
+ ops->unreserve = unreserve_blocks;
+
+ band_8 = PAGE_SIZE / ops->geom(TILFMT_8BIT)->slot_w
+ / ops->geom(TILFMT_8BIT)->bpp;
+ band_16 = PAGE_SIZE / ops->geom(TILFMT_16BIT)->slot_w
+ / ops->geom(TILFMT_16BIT)->bpp;
+}
diff --git a/drivers/media/video/tiler/tiler.c b/drivers/media/video/tiler/tiler.c
deleted file mode 100644
index 1c117eb04b37..000000000000
--- a/drivers/media/video/tiler/tiler.c
+++ /dev/null
@@ -1,1603 +0,0 @@
-/*
- * tiler.c
- *
- * TILER driver support functions for TI OMAP processors.
- *
- * Copyright (C) 2009-2010 Texas Instruments, Inc.
- *
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/cdev.h> /* struct cdev */
-#include <linux/kdev_t.h> /* MKDEV() */
-#include <linux/fs.h> /* register_chrdev_region() */
-#include <linux/device.h> /* struct class */
-#include <linux/platform_device.h> /* platform_device() */
-#include <linux/err.h> /* IS_ERR() */
-#include <linux/uaccess.h> /* copy_to_user */
-#include <linux/mm.h>
-#include <linux/mm_types.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/mutex.h>
-#include <linux/dma-mapping.h>
-#include <linux/pagemap.h> /* page_cache_release() */
-#include <linux/slab.h>
-
-#include <mach/tiler.h>
-#include <mach/dmm.h>
-#include "../dmm/tmm.h"
-#include "tiler_def.h"
-#include "tcm/tcm_sita.h" /* Algo Specific header */
-
-#include <linux/syscalls.h>
-
-struct tiler_dev {
- struct cdev cdev;
-
- struct blocking_notifier_head notifier;
-};
-
-static struct platform_driver tiler_driver_ldm = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "tiler",
- },
- .probe = NULL,
- .shutdown = NULL,
- .remove = NULL,
-};
-
-/* per process (thread group) info */
-struct process_info {
- struct list_head list; /* other processes */
- struct list_head groups; /* my groups */
- struct list_head bufs; /* my registered buffers */
- pid_t pid; /* really: thread group ID */
- u32 refs; /* open tiler devices, 0 for processes
- tracked via kernel APIs */
- bool kernel; /* tracking kernel objects */
-};
-
-/* per group info (within a process) */
-struct gid_info {
- struct list_head by_pid; /* other groups */
- struct list_head areas; /* all areas in this pid/gid */
- struct list_head reserved; /* areas pre-reserved */
- struct list_head onedim; /* all 1D areas in this pid/gid */
- u32 gid; /* group ID */
- struct process_info *pi; /* parent */
-};
-
-static struct list_head blocks;
-static struct list_head procs;
-static struct list_head orphan_areas;
-static struct list_head orphan_onedim;
-
-struct area_info {
- struct list_head by_gid; /* areas in this pid/gid */
- struct list_head blocks; /* blocks in this area */
- u32 nblocks; /* # of blocks in this area */
-
- struct tcm_area area; /* area details */
- struct gid_info *gi; /* link to parent, if still alive */
-};
-
-struct mem_info {
- struct list_head global; /* reserved / global blocks */
- u32 sys_addr; /* system space (L3) tiler addr */
- u32 num_pg; /* number of pages in page-list */
- u32 usr; /* user space address */
- u32 *pg_ptr; /* list of mapped struct page pointers */
- struct tcm_area area;
- u32 *mem; /* list of alloced phys addresses */
- u32 refs; /* number of times referenced */
- bool alloced; /* still alloced */
-
- struct list_head by_area; /* blocks in the same area / 1D */
- void *parent; /* area info for 2D, else group info */
-};
-
-struct __buf_info {
- struct list_head by_pid; /* list of buffers per pid */
- struct tiler_buf_info buf_info;
- struct mem_info *mi[TILER_MAX_NUM_BLOCKS]; /* blocks */
-};
-
-#define TILER_FORMATS 4
-
-static s32 tiler_major;
-static s32 tiler_minor;
-static struct tiler_dev *tiler_device;
-static struct class *tilerdev_class;
-static u32 id;
-static struct mutex mtx;
-static struct tcm *tcm[TILER_FORMATS];
-static struct tmm *tmm[TILER_FORMATS];
-static u32 *dmac_va;
-static dma_addr_t dmac_pa;
-
-#define TCM(fmt) tcm[(fmt) - TILFMT_8BIT]
-#define TCM_SS(ssptr) TCM(TILER_GET_ACC_MODE(ssptr))
-#define TCM_SET(fmt, i) tcm[(fmt) - TILFMT_8BIT] = i
-#define TMM(fmt) tmm[(fmt) - TILFMT_8BIT]
-#define TMM_SS(ssptr) TMM(TILER_GET_ACC_MODE(ssptr))
-#define TMM_SET(fmt, i) tmm[(fmt) - TILFMT_8BIT] = i
-
-/* get process info, and increment refs for device tracking */
-static struct process_info *__get_pi(pid_t pid, bool kernel)
-{
- struct process_info *pi;
-
- /* find process context */
- mutex_lock(&mtx);
- list_for_each_entry(pi, &procs, list) {
- if (pi->pid == pid && pi->kernel == kernel)
- goto done;
- }
-
- /* create process context */
- pi = kmalloc(sizeof(*pi), GFP_KERNEL);
- if (!pi)
- goto done;
-
- memset(pi, 0, sizeof(*pi));
- pi->pid = pid;
- pi->kernel = kernel;
- INIT_LIST_HEAD(&pi->groups);
- INIT_LIST_HEAD(&pi->bufs);
- list_add(&pi->list, &procs);
-done:
- if (pi && !kernel)
- pi->refs++;
- mutex_unlock(&mtx);
- return pi;
-}
-
-/* allocate an reserved area of size, alignment and link it to gi */
-static struct area_info *area_new(u16 width, u16 height, u16 align,
- struct tcm *tcm, struct gid_info *gi)
-{
- struct area_info *ai = kmalloc(sizeof(*ai), GFP_KERNEL);
- if (!ai)
- return NULL;
-
- /* set up empty area info */
- memset(ai, 0x0, sizeof(*ai));
- INIT_LIST_HEAD(&ai->blocks);
-
- /* reserve an allocation area */
- if (tcm_reserve_2d(tcm, width, height, align, &ai->area)) {
- kfree(ai);
- return NULL;
- }
-
- ai->gi = gi;
- mutex_lock(&mtx);
- list_add_tail(&ai->by_gid, &gi->areas);
- mutex_unlock(&mtx);
- return ai;
-}
-
-/* (must have mutex) free an area and return NULL */
-static inline void _m_area_free(struct area_info *ai)
-{
- if (ai) {
- list_del(&ai->by_gid);
- kfree(ai);
- }
-}
-
-static s32 __analize_area(enum tiler_fmt fmt, u32 width, u32 height,
- u16 *x_area, u16 *y_area, u16 *band,
- u16 *align, u16 *offs)
-{
- /* input: width, height is in pixels, align, offs in bytes */
- /* output: x_area, y_area, band, align, offs in slots */
-
- /* slot width, height, and row size */
- u32 slot_w, slot_h, slot_row, bpp;
-
- /* align must be 2 power */
- if (*align & (*align - 1))
- return -1;
-
- switch (fmt) {
- case TILFMT_8BIT:
- slot_w = DMM_PAGE_DIMM_X_MODE_8;
- slot_h = DMM_PAGE_DIMM_Y_MODE_8;
- break;
- case TILFMT_16BIT:
- slot_w = DMM_PAGE_DIMM_X_MODE_16;
- slot_h = DMM_PAGE_DIMM_Y_MODE_16;
- break;
- case TILFMT_32BIT:
- slot_w = DMM_PAGE_DIMM_X_MODE_32;
- slot_h = DMM_PAGE_DIMM_Y_MODE_32;
- break;
- case TILFMT_PAGE:
- /* adjust size to accomodate offset, only do page alignment */
- *align = PAGE_SIZE;
- width += *offs & (PAGE_SIZE - 1);
-
- /* for 1D area keep the height (1), width is in tiler slots */
- *x_area = DIV_ROUND_UP(width, TILER_PAGE);
- *y_area = *band = 1;
-
- if (*x_area * *y_area > TILER_WIDTH * TILER_HEIGHT)
- return -1;
- return 0;
- default:
- return -EINVAL;
- }
-
- /* get the # of bytes per row in 1 slot */
- bpp = tilfmt_bpp(fmt);
- slot_row = slot_w * bpp;
-
- /* how many slots are can be accessed via one physical page */
- *band = PAGE_SIZE / slot_row;
-
- /* minimum alignment is 1 slot, default alignment is page size */
- *align = ALIGN(*align ? : PAGE_SIZE, slot_row);
-
- /* offset must be multiple of bpp */
- if (*offs & (bpp - 1))
- return -EINVAL;
-
- /* round down the offset to the nearest slot size, and increase width
- to allow space for having the correct offset */
- width += (*offs & (*align - 1)) / bpp;
- *offs &= ~(*align - 1);
-
- /* adjust to slots */
- *x_area = DIV_ROUND_UP(width, slot_w);
- *y_area = DIV_ROUND_UP(height, slot_h);
- *align /= slot_row;
- *offs /= slot_row;
-
- if (*x_area > TILER_WIDTH || *y_area > TILER_HEIGHT)
- return -1;
- return 0x0;
-}
-
-/**
- * Find a place where a 2D block would fit into a 2D area of the
- * same height.
- *
- * @author a0194118 (3/19/2010)
- *
- * @param w Width of the block.
- * @param align Alignment of the block.
- * @param offs Offset of the block (within alignment)
- * @param ai Pointer to area info
- * @param next Pointer to the variable where the next block
- * will be stored. The block should be inserted
- * before this block.
- *
- * @return the end coordinate (x1 + 1) where a block would fit,
- * or 0 if it does not fit.
- *
- * (must have mutex)
- */
-static u16 _m_blk_find_fit(u16 w, u16 align, u16 offs,
- struct area_info *ai, struct list_head **before)
-{
- int x = ai->area.p0.x + w + offs;
- struct mem_info *mi;
-
- /* area blocks are sorted by x */
- list_for_each_entry(mi, &ai->blocks, by_area) {
- /* check if buffer would fit before this area */
- if (x <= mi->area.p0.x) {
- *before = &mi->by_area;
- return x;
- }
- x = ALIGN(mi->area.p1.x + 1 - offs, align) + w + offs;
- }
- *before = &ai->blocks;
-
- /* check if buffer would fit after last area */
- return (x <= ai->area.p1.x + 1) ? x : 0;
-}
-
-/* (must have mutex) adds a block to an area with certain x coordinates */
-static inline
-struct mem_info *_m_add2area(struct mem_info *mi, struct area_info *ai,
- u16 x0, u16 x1, struct list_head *before)
-{
- mi->parent = ai;
- mi->area = ai->area;
- mi->area.p0.x = x0;
- mi->area.p1.x = x1;
- list_add_tail(&mi->by_area, before);
- ai->nblocks++;
- return mi;
-}
-
-static struct mem_info *get_2d_area(u16 w, u16 h, u16 align, u16 offs, u16 band,
- struct gid_info *gi, struct tcm *tcm) {
- struct area_info *ai = NULL;
- struct mem_info *mi = NULL;
- struct list_head *before = NULL;
- u16 x = 0; /* this holds the end of a potential area */
-
- /* allocate map info */
-
- /* see if there is available prereserved space */
- mutex_lock(&mtx);
- list_for_each_entry(mi, &gi->reserved, global) {
- if (mi->area.tcm == tcm &&
- tcm_aheight(mi->area) == h &&
- tcm_awidth(mi->area) == w &&
- (mi->area.p0.x & (align - 1)) == offs) {
- /* this area is already set up */
-
- /* remove from reserved list */
- list_del(&mi->global);
- goto done;
- }
- }
- mutex_unlock(&mtx);
-
- /* if not, reserve a block struct */
- mi = kmalloc(sizeof(*mi), GFP_KERNEL);
- if (!mi)
- return mi;
- memset(mi, 0, sizeof(*mi));
-
- /* see if allocation fits in one of the existing areas */
- /* this sets x, ai and before */
- mutex_lock(&mtx);
- list_for_each_entry(ai, &gi->areas, by_gid) {
- if (ai->area.tcm == tcm &&
- tcm_aheight(ai->area) == h) {
- x = _m_blk_find_fit(w, align, offs, ai, &before);
- if (x) {
- _m_add2area(mi, ai, x - w, x - 1, before);
- goto done;
- }
- }
- }
- mutex_unlock(&mtx);
-
- /* if no area fit, reserve a new one */
- ai = area_new(ALIGN(w + offs, max(band, align)), h,
- max(band, align), tcm, gi);
- if (ai) {
- mutex_lock(&mtx);
- _m_add2area(mi, ai, ai->area.p0.x + offs,
- ai->area.p0.x + offs + w - 1,
- &ai->blocks);
- } else {
- /* clean up */
- kfree(mi);
- return NULL;
- }
-
-done:
- mutex_unlock(&mtx);
- return mi;
-}
-
-/* (must have mutex) */
-static void _m_try_free_group(struct gid_info *gi)
-{
- if (gi && list_empty(&gi->areas) && list_empty(&gi->onedim)) {
- WARN_ON(!list_empty(&gi->reserved));
- list_del(&gi->by_pid);
-
- /* if group is tracking kernel objects, we may free even
- the process info */
- if (gi->pi->kernel && list_empty(&gi->pi->groups)) {
- list_del(&gi->pi->list);
- kfree(gi->pi);
- }
-
- kfree(gi);
- }
-}
-
-static void clear_pat(struct tmm *tmm, struct tcm_area *area)
-{
- struct pat_area p_area = {0};
- struct tcm_area slice, area_s;
-
- tcm_for_each_slice(slice, *area, area_s) {
- p_area.x0 = slice.p0.x;
- p_area.y0 = slice.p0.y;
- p_area.x1 = slice.p1.x;
- p_area.y1 = slice.p1.y;
-
- tmm_clear(tmm, p_area);
- }
-}
-
-/* (must have mutex) free block and any freed areas */
-static s32 _m_free(struct mem_info *mi)
-{
- struct area_info *ai = NULL;
- struct page *page = NULL;
- s32 res = 0;
- u32 i;
-
- /* release memory */
- if (mi->pg_ptr) {
- for (i = 0; i < mi->num_pg; i++) {
- page = (struct page *)mi->pg_ptr[i];
- if (page) {
- if (!PageReserved(page))
- SetPageDirty(page);
- page_cache_release(page);
- }
- }
- kfree(mi->pg_ptr);
- } else if (mi->mem) {
- tmm_free(TMM_SS(mi->sys_addr), mi->mem);
- }
-
- /* safe deletion as list may not have been assigned */
- if (mi->global.next)
- list_del(&mi->global);
- if (mi->by_area.next)
- list_del(&mi->by_area);
-
- /* remove block from area first if 2D */
- if (mi->area.is2d) {
- ai = mi->parent;
-
- /* check to see if area needs removing also */
- if (ai && !--ai->nblocks) {
- clear_pat(TMM_SS(mi->sys_addr), &ai->area);
- res = tcm_free(&ai->area);
- list_del(&ai->by_gid);
- /* try to remove parent if it became empty */
- _m_try_free_group(ai->gi);
- kfree(ai);
- ai = NULL;
- }
- } else {
- /* remove 1D area */
- clear_pat(TMM_SS(mi->sys_addr), &mi->area);
- res = tcm_free(&mi->area);
- /* try to remove parent if it became empty */
- _m_try_free_group(mi->parent);
- }
-
- kfree(mi);
- return res;
-}
-
-/* (must have mutex) returns true if block was freed */
-static bool _m_chk_ref(struct mem_info *mi)
-{
- /* check references */
- if (mi->refs)
- return 0;
-
- if (_m_free(mi))
- printk(KERN_ERR "error while removing tiler block\n");
-
- return 1;
-}
-
-/* (must have mutex) */
-static inline s32 _m_dec_ref(struct mem_info *mi)
-{
- if (mi->refs-- <= 1)
- return _m_chk_ref(mi);
-
- return 0;
-}
-
-/* (must have mutex) */
-static inline void _m_inc_ref(struct mem_info *mi)
-{
- mi->refs++;
-}
-
-/* (must have mutex) returns true if block was freed */
-static inline bool _m_try_free(struct mem_info *mi)
-{
- if (mi->alloced) {
- mi->refs--;
- mi->alloced = false;
- }
- return _m_chk_ref(mi);
-}
-
-static s32 register_buf(struct __buf_info *_b, struct process_info *pi)
-{
- struct mem_info *mi = NULL;
- struct tiler_buf_info *b = &_b->buf_info;
- u32 i, num = b->num_blocks, remain = num;
-
- /* check validity */
- if (num > TILER_MAX_NUM_BLOCKS)
- return -EINVAL;
-
- mutex_lock(&mtx);
-
- /* find each block */
- list_for_each_entry(mi, &blocks, global) {
- for (i = 0; i < num; i++) {
- if (!_b->mi[i] && mi->sys_addr == b->blocks[i].ssptr) {
- _b->mi[i] = mi;
-
- /* quit if found all*/
- if (!--remain)
- break;
-
- }
- }
- }
-
- /* if found all, register buffer */
- if (!remain) {
- b->offset = id;
- id += 0x1000;
-
- list_add(&_b->by_pid, &pi->bufs);
-
- /* using each block */
- for (i = 0; i < num; i++)
- _m_inc_ref(_b->mi[i]);
- }
-
- mutex_unlock(&mtx);
-
- return remain ? -EACCES : 0;
-}
-
-/* must have mutex */
-static void _m_unregister_buf(struct __buf_info *_b)
-{
- u32 i;
-
- /* unregister */
- list_del(&_b->by_pid);
-
- /* no longer using the blocks */
- for (i = 0; i < _b->buf_info.num_blocks; i++)
- _m_dec_ref(_b->mi[i]);
-
- kfree(_b);
-}
-
-static int tiler_notify_event(int event, void *data)
-{
- return blocking_notifier_call_chain(&tiler_device->notifier,
- event, data);
-}
-
-/**
- * Free all info kept by a process:
- *
- * all registered buffers, allocated blocks, and unreferenced
- * blocks. Any blocks/areas still referenced will move to the
- * orphaned lists to avoid issues if a new process is created
- * with the same pid.
- *
- * (must have mutex)
- */
-static void _m_free_process_info(struct process_info *pi)
-{
- struct area_info *ai, *ai_;
- struct mem_info *mi, *mi_;
- struct gid_info *gi, *gi_;
- struct __buf_info *_b = NULL, *_b_ = NULL;
- bool ai_autofreed, need2free;
-
- if (!list_empty(&pi->bufs))
- tiler_notify_event(TILER_DEVICE_CLOSE, NULL);
-
- /* unregister all buffers */
- list_for_each_entry_safe(_b, _b_, &pi->bufs, by_pid)
- _m_unregister_buf(_b);
-
- WARN_ON(!list_empty(&pi->bufs));
-
- /* free all allocated blocks, and remove unreferenced ones */
- list_for_each_entry_safe(gi, gi_, &pi->groups, by_pid) {
-
- /*
- * Group info structs when they become empty on an _m_try_free.
- * However, if the group info is already empty, we need to
- * remove it manually
- */
- need2free = list_empty(&gi->areas) && list_empty(&gi->onedim);
- list_for_each_entry_safe(ai, ai_, &gi->areas, by_gid) {
- ai_autofreed = true;
- list_for_each_entry_safe(mi, mi_, &ai->blocks, by_area)
- ai_autofreed &= _m_try_free(mi);
-
- /* save orphaned areas for later removal */
- if (!ai_autofreed) {
- need2free = true;
- ai->gi = NULL;
- list_move(&ai->by_gid, &orphan_areas);
- }
- }
-
- list_for_each_entry_safe(mi, mi_, &gi->onedim, by_area) {
- if (!_m_try_free(mi)) {
- need2free = true;
- /* save orphaned 1D blocks */
- mi->parent = NULL;
- list_move(&mi->by_area, &orphan_onedim);
- }
- }
-
- /* if group is still alive reserved list should have been
- emptied as there should be no reference on those blocks */
- if (need2free) {
- WARN_ON(!list_empty(&gi->onedim));
- WARN_ON(!list_empty(&gi->areas));
- _m_try_free_group(gi);
- }
- }
-
- WARN_ON(!list_empty(&pi->groups));
- list_del(&pi->list);
- kfree(pi);
-}
-
-static s32 get_area(u32 sys_addr, struct tcm_pt *pt)
-{
- enum tiler_fmt fmt;
-
- sys_addr &= TILER_ALIAS_VIEW_CLEAR;
- fmt = TILER_GET_ACC_MODE(sys_addr);
-
- switch (fmt) {
- case TILFMT_8BIT:
- pt->x = DMM_HOR_X_PAGE_COOR_GET_8(sys_addr);
- pt->y = DMM_HOR_Y_PAGE_COOR_GET_8(sys_addr);
- break;
- case TILFMT_16BIT:
- pt->x = DMM_HOR_X_PAGE_COOR_GET_16(sys_addr);
- pt->y = DMM_HOR_Y_PAGE_COOR_GET_16(sys_addr);
- break;
- case TILFMT_32BIT:
- pt->x = DMM_HOR_X_PAGE_COOR_GET_32(sys_addr);
- pt->y = DMM_HOR_Y_PAGE_COOR_GET_32(sys_addr);
- break;
- case TILFMT_PAGE:
- pt->x = (sys_addr & 0x7FFFFFF) >> 12;
- pt->y = pt->x / TILER_WIDTH;
- pt->x &= (TILER_WIDTH - 1);
- break;
- default:
- return -EFAULT;
- }
- return 0x0;
-}
-
-static u32 __get_alias_addr(enum tiler_fmt fmt, u16 x, u16 y)
-{
- u32 acc_mode = -1;
- u32 x_shft = -1, y_shft = -1;
-
- switch (fmt) {
- case TILFMT_8BIT:
- acc_mode = 0; x_shft = 6; y_shft = 20;
- break;
- case TILFMT_16BIT:
- acc_mode = 1; x_shft = 7; y_shft = 20;
- break;
- case TILFMT_32BIT:
- acc_mode = 2; x_shft = 7; y_shft = 20;
- break;
- case TILFMT_PAGE:
- acc_mode = 3; y_shft = 8;
- break;
- default:
- return 0;
- break;
- }
-
- if (fmt == TILFMT_PAGE)
- return (u32)TIL_ALIAS_ADDR((x | y << y_shft) << 12, acc_mode);
- else
- return (u32)TIL_ALIAS_ADDR(x << x_shft | y << y_shft, acc_mode);
-}
-
-/* must have mutex */
-static struct gid_info *_m_get_gi(struct process_info *pi, u32 gid)
-{
- struct gid_info *gi;
-
- /* see if group already exist */
- list_for_each_entry(gi, &pi->groups, by_pid) {
- if (gi->gid == gid)
- return gi;
- }
-
- /* create new group */
- gi = kmalloc(sizeof(*gi), GFP_KERNEL);
- if (!gi)
- return gi;
-
- memset(gi, 0, sizeof(*gi));
- INIT_LIST_HEAD(&gi->areas);
- INIT_LIST_HEAD(&gi->onedim);
- INIT_LIST_HEAD(&gi->reserved);
- gi->pi = pi;
- gi->gid = gid;
- list_add(&gi->by_pid, &pi->groups);
- return gi;
-}
-
-static struct mem_info *__get_area(enum tiler_fmt fmt, u32 width, u32 height,
- u16 align, u16 offs, struct gid_info *gi)
-{
- u16 x, y, band;
- struct mem_info *mi = NULL;
-
- /* calculate dimensions, band, offs and alignment in slots */
- if (__analize_area(fmt, width, height, &x, &y, &band, &align, &offs))
- return NULL;
-
- if (fmt == TILFMT_PAGE) {
- /* 1D areas don't pack */
- mi = kmalloc(sizeof(*mi), GFP_KERNEL);
- if (!mi)
- return NULL;
- memset(mi, 0x0, sizeof(*mi));
-
- if (tcm_reserve_1d(TCM(fmt), x * y, &mi->area)) {
- kfree(mi);
- return NULL;
- }
-
- mutex_lock(&mtx);
- mi->parent = gi;
- list_add(&mi->by_area, &gi->onedim);
- } else {
- mi = get_2d_area(x, y, align, offs, band, gi, TCM(fmt));
- if (!mi)
- return NULL;
-
- mutex_lock(&mtx);
- }
-
- list_add(&mi->global, &blocks);
- mi->alloced = true;
- mi->refs++;
- mutex_unlock(&mtx);
-
- mi->sys_addr = __get_alias_addr(fmt, mi->area.p0.x, mi->area.p0.y);
- return mi;
-}
-
-static s32 tiler_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct __buf_info *_b = NULL;
- struct tiler_buf_info *b = NULL;
- s32 i = 0, j = 0, k = 0, m = 0, p = 0, bpp = 1;
- struct list_head *pos = NULL;
- struct process_info *pi = filp->private_data;
-
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
-
- mutex_lock(&mtx);
- list_for_each(pos, &pi->bufs) {
- _b = list_entry(pos, struct __buf_info, by_pid);
- if ((vma->vm_pgoff << PAGE_SHIFT) == _b->buf_info.offset)
- break;
- }
- mutex_unlock(&mtx);
- if (!_b)
- return -ENXIO;
-
- b = &_b->buf_info;
-
- for (i = 0; i < b->num_blocks; i++) {
- if (b->blocks[i].fmt >= TILFMT_8BIT &&
- b->blocks[i].fmt <= TILFMT_32BIT) {
- /* get line width */
- bpp = (b->blocks[i].fmt == TILFMT_8BIT ? 1 :
- b->blocks[i].fmt == TILFMT_16BIT ? 2 : 4);
- p = PAGE_ALIGN(b->blocks[i].dim.area.width * bpp);
-
- for (j = 0; j < b->blocks[i].dim.area.height; j++) {
- /* map each page of the line */
- vma->vm_pgoff =
- (b->blocks[i].ssptr + m) >> PAGE_SHIFT;
- if (remap_pfn_range(vma, vma->vm_start + k,
- (b->blocks[i].ssptr + m) >> PAGE_SHIFT,
- p, vma->vm_page_prot))
- return -EAGAIN;
- k += p;
- if (b->blocks[i].fmt == TILFMT_8BIT)
- m += 64*TILER_WIDTH;
- else
- m += 2*64*TILER_WIDTH;
- }
- m = 0;
- } else if (b->blocks[i].fmt == TILFMT_PAGE) {
- vma->vm_pgoff = (b->blocks[i].ssptr) >> PAGE_SHIFT;
- p = PAGE_ALIGN(b->blocks[i].dim.len);
- if (remap_pfn_range(vma, vma->vm_start + k,
- (b->blocks[i].ssptr) >> PAGE_SHIFT, p,
- vma->vm_page_prot))
- return -EAGAIN;;
- k += p;
- }
- }
- return 0;
-}
-
-static s32 refill_pat(struct tmm *tmm, struct tcm_area *area, u32 *ptr)
-{
- s32 res = 0;
- struct pat_area p_area = {0};
- struct tcm_area slice, area_s;
-
- tcm_for_each_slice(slice, *area, area_s) {
- p_area.x0 = slice.p0.x;
- p_area.y0 = slice.p0.y;
- p_area.x1 = slice.p1.x;
- p_area.y1 = slice.p1.y;
-
- memcpy(dmac_va, ptr, sizeof(*ptr) * tcm_sizeof(slice));
- ptr += tcm_sizeof(slice);
-
- if (tmm_map(tmm, p_area, dmac_pa)) {
- res = -EFAULT;
- break;
- }
- }
-
- return res;
-}
-
-static s32 map_block(enum tiler_fmt fmt, u32 width, u32 height, u32 gid,
- struct process_info *pi, u32 *sys_addr, u32 usr_addr)
-{
- u32 i = 0, tmp = -1, *mem = NULL;
- u8 write = 0;
- s32 res = -ENOMEM;
- struct mem_info *mi = NULL;
- struct page *page = NULL;
- struct task_struct *curr_task = current;
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma = NULL;
- struct gid_info *gi = NULL;
-
- /* we only support mapping a user buffer in page mode */
- if (fmt != TILFMT_PAGE)
- return -EPERM;
-
- /* check if mapping is supported by tmm */
- if (!tmm_can_map(TMM(fmt)))
- return -EPERM;
-
- /* get group context */
- mutex_lock(&mtx);
- gi = _m_get_gi(pi, gid);
- mutex_unlock(&mtx);
-
- if (!gi)
- return -ENOMEM;
-
- /* reserve area in tiler container */
- mi = __get_area(fmt, width, height, 0, 0, gi);
- if (!mi) {
- mutex_lock(&mtx);
- _m_try_free_group(gi);
- mutex_unlock(&mtx);
- return -ENOMEM;
- }
-
- *sys_addr = mi->sys_addr;
- mi->usr = usr_addr;
-
- /* allocate pages */
- mi->num_pg = tcm_sizeof(mi->area);
-
- mem = kmalloc(mi->num_pg * sizeof(*mem), GFP_KERNEL);
- if (!mem)
- goto done;
- memset(mem, 0x0, sizeof(*mem) * mi->num_pg);
-
- mi->pg_ptr = kmalloc(mi->num_pg * sizeof(*mi->pg_ptr), GFP_KERNEL);
- if (!mi->pg_ptr)
- goto done;
- memset(mi->pg_ptr, 0x0, sizeof(*mi->pg_ptr) * mi->num_pg);
-
- /*
- * Important Note: usr_addr is mapped from user
- * application process to current process - it must lie
- * completely within the current virtual memory address
- * space in order to be of use to us here.
- */
- down_read(&mm->mmap_sem);
- vma = find_vma(mm, mi->usr);
- res = -EFAULT;
-
- /*
- * It is observed that under some circumstances, the user
- * buffer is spread across several vmas, so loop through
- * and check if the entire user buffer is covered.
- */
- while ((vma) && (mi->usr + width > vma->vm_end)) {
- /* jump to the next VMA region */
- vma = find_vma(mm, vma->vm_end + 1);
- }
- if (!vma) {
- printk(KERN_ERR "Failed to get the vma region for "
- "user buffer.\n");
- goto fault;
- }
-
- if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
- write = 1;
-
- tmp = mi->usr;
- for (i = 0; i < mi->num_pg; i++) {
- if (get_user_pages(curr_task, mm, tmp, 1, write, 1, &page,
- NULL)) {
- if (page_count(page) < 1) {
- printk(KERN_ERR "Bad page count from"
- "get_user_pages()\n");
- }
- mi->pg_ptr[i] = (u32)page;
- mem[i] = page_to_phys(page);
- tmp += PAGE_SIZE;
- } else {
- printk(KERN_ERR "get_user_pages() failed\n");
- goto fault;
- }
- }
- up_read(&mm->mmap_sem);
-
- /* Ensure the data reaches to main memory before PAT refill */
- wmb();
-
- if (refill_pat(TMM(fmt), &mi->area, mem))
- goto fault;
-
- res = 0;
- goto done;
-fault:
- up_read(&mm->mmap_sem);
-done:
- if (res) {
- mutex_lock(&mtx);
- _m_free(mi);
- mutex_unlock(&mtx);
- }
- kfree(mem);
- return res;
-}
-
-s32 tiler_mapx(enum tiler_fmt fmt, u32 width, u32 height, u32 gid,
- pid_t pid, u32 *sys_addr, u32 usr_addr)
-{
- return map_block(fmt, width, height, gid, __get_pi(pid, true),
- sys_addr, usr_addr);
-}
-EXPORT_SYMBOL(tiler_mapx);
-
-s32 tiler_map(enum tiler_fmt fmt, u32 width, u32 height, u32 *sys_addr,
- u32 usr_addr)
-{
- return tiler_mapx(fmt, width, height, 0, current->tgid, sys_addr,
- usr_addr);
-}
-EXPORT_SYMBOL(tiler_map);
-
-static s32 free_block(u32 sys_addr, struct process_info *pi)
-{
- struct gid_info *gi = NULL;
- struct area_info *ai = NULL;
- struct mem_info *mi = NULL;
- s32 res = -ENOENT;
-
- mutex_lock(&mtx);
-
- /* find block in process list and free it */
- list_for_each_entry(gi, &pi->groups, by_pid) {
- /* currently we know if block is 1D or 2D by the address */
- if (TILER_GET_ACC_MODE(sys_addr) == TILFMT_PAGE) {
- list_for_each_entry(mi, &gi->onedim, by_area) {
- if (mi->sys_addr == sys_addr) {
- _m_try_free(mi);
- res = 0;
- goto done;
- }
- }
- } else {
- list_for_each_entry(ai, &gi->areas, by_gid) {
- list_for_each_entry(mi, &ai->blocks, by_area) {
- if (mi->sys_addr == sys_addr) {
- _m_try_free(mi);
- res = 0;
- goto done;
- }
- }
- }
- }
- }
-
-done:
- mutex_unlock(&mtx);
-
- /* for debugging, we can set the PAT entries to DMM_LISA_MAP__0 */
- return res;
-}
-
-s32 tiler_free(u32 sys_addr)
-{
- struct mem_info *mi;
- s32 res = -ENOENT;
-
- mutex_lock(&mtx);
-
- /* find block in global list and free it */
- list_for_each_entry(mi, &blocks, global) {
- if (mi->sys_addr == sys_addr) {
- _m_try_free(mi);
- res = 0;
- break;
- }
- }
- mutex_unlock(&mtx);
-
- /* for debugging, we can set the PAT entries to DMM_LISA_MAP__0 */
- return res;
-}
-EXPORT_SYMBOL(tiler_free);
-
-/* :TODO: Currently we do not track enough information from alloc to get back
- the actual width and height of the container, so we must make a guess. We
- do not even have enough information to get the virtual stride of the buffer,
- which is the real reason for this ioctl */
-static s32 find_block(u32 sys_addr, struct tiler_block_info *blk)
-{
- struct mem_info *i;
- struct tcm_pt pt;
-
- if (get_area(sys_addr, &pt))
- return -EFAULT;
-
- list_for_each_entry(i, &blocks, global) {
- if (tcm_is_in(pt, i->area))
- goto found;
- }
-
- blk->fmt = TILFMT_INVALID;
- blk->dim.len = blk->stride = blk->ssptr = 0;
- return -EFAULT;
-
-found:
- blk->ptr = NULL;
- blk->fmt = TILER_GET_ACC_MODE(sys_addr);
- blk->ssptr = __get_alias_addr(blk->fmt, i->area.p0.x, i->area.p0.y);
-
- if (blk->fmt == TILFMT_PAGE) {
- blk->dim.len = tcm_sizeof(i->area) * TILER_PAGE;
- blk->stride = 0;
- } else {
- blk->stride = blk->dim.area.width =
- tcm_awidth(i->area) * TILER_BLOCK_WIDTH;
- blk->dim.area.height = tcm_aheight(i->area)
- * TILER_BLOCK_HEIGHT;
- if (blk->fmt != TILFMT_8BIT) {
- blk->stride <<= 1;
- blk->dim.area.height >>= 1;
- if (blk->fmt == TILFMT_32BIT)
- blk->dim.area.width >>= 1;
- }
- blk->stride = PAGE_ALIGN(blk->stride);
- }
- return 0;
-}
-
-static s32 alloc_block(enum tiler_fmt fmt, u32 width, u32 height,
- u32 align, u32 offs, u32 gid, struct process_info *pi,
- u32 *sys_addr);
-
-static s32 tiler_ioctl(struct inode *ip, struct file *filp, u32 cmd,
- unsigned long arg)
-{
- pgd_t *pgd = NULL;
- pmd_t *pmd = NULL;
- pte_t *ptep = NULL, pte = 0x0;
- s32 r = -1;
- u32 til_addr = 0x0;
- struct process_info *pi = filp->private_data;
-
- struct __buf_info *_b = NULL;
- struct tiler_buf_info buf_info = {0};
- struct tiler_block_info block_info = {0};
-
- switch (cmd) {
- case TILIOC_GBUF:
- if (copy_from_user(&block_info, (void __user *)arg,
- sizeof(block_info)))
- return -EFAULT;
-
- switch (block_info.fmt) {
- case TILFMT_PAGE:
- r = alloc_block(block_info.fmt, block_info.dim.len, 1,
- 0, 0, 0, pi, &til_addr);
- if (r)
- return r;
- break;
- case TILFMT_8BIT:
- case TILFMT_16BIT:
- case TILFMT_32BIT:
- r = alloc_block(block_info.fmt,
- block_info.dim.area.width,
- block_info.dim.area.height,
- 0, 0, 0, pi, &til_addr);
- if (r)
- return r;
- break;
- default:
- return -EINVAL;
- }
-
- block_info.ssptr = til_addr;
- if (copy_to_user((void __user *)arg, &block_info,
- sizeof(block_info)))
- return -EFAULT;
- break;
- case TILIOC_FBUF:
- case TILIOC_UMBUF:
- if (copy_from_user(&block_info, (void __user *)arg,
- sizeof(block_info)))
- return -EFAULT;
-
- /* search current process first, then all processes */
- free_block(block_info.ssptr, pi) ?
- tiler_free(block_info.ssptr) : 0;
-
- /* free always succeeds */
- break;
-
- case TILIOC_GSSP:
- pgd = pgd_offset(current->mm, arg);
- if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
- pmd = pmd_offset(pgd, arg);
- if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
- ptep = pte_offset_map(pmd, arg);
- if (ptep) {
- pte = *ptep;
- if (pte_present(pte))
- return (pte & PAGE_MASK) |
- (~PAGE_MASK & arg);
- }
- }
- }
- /* va not in page table */
- return 0x0;
- break;
- case TILIOC_MBUF:
- if (copy_from_user(&block_info, (void __user *)arg,
- sizeof(block_info)))
- return -EFAULT;
-
- if (!block_info.ptr)
- return -EFAULT;
-
- if (map_block(block_info.fmt, block_info.dim.len, 1, 0, pi,
- &block_info.ssptr, (u32)block_info.ptr))
- return -ENOMEM;
-
- if (copy_to_user((void __user *)arg, &block_info,
- sizeof(block_info)))
- return -EFAULT;
- break;
- case TILIOC_QBUF:
- if (copy_from_user(&buf_info, (void __user *)arg,
- sizeof(buf_info)))
- return -EFAULT;
-
- mutex_lock(&mtx);
- list_for_each_entry(_b, &pi->bufs, by_pid) {
- if (buf_info.offset == _b->buf_info.offset) {
- if (copy_to_user((void __user *)arg,
- &_b->buf_info,
- sizeof(_b->buf_info))) {
- mutex_unlock(&mtx);
- return -EFAULT;
- } else {
- mutex_unlock(&mtx);
- return 0;
- }
- }
- }
- mutex_unlock(&mtx);
- return -EFAULT;
- break;
- case TILIOC_RBUF:
- _b = kmalloc(sizeof(*_b), GFP_KERNEL);
- if (!_b)
- return -ENOMEM;
-
- memset(_b, 0x0, sizeof(*_b));
-
- if (copy_from_user(&_b->buf_info, (void __user *)arg,
- sizeof(_b->buf_info))) {
- kfree(_b); return -EFAULT;
- }
-
- r = register_buf(_b, pi);
- if (r) {
- kfree(_b); return -EACCES;
- }
-
- if (copy_to_user((void __user *)arg, &_b->buf_info,
- sizeof(_b->buf_info))) {
- _m_unregister_buf(_b);
- return -EFAULT;
- }
- break;
- case TILIOC_URBUF:
- if (copy_from_user(&buf_info, (void __user *)arg,
- sizeof(buf_info)))
- return -EFAULT;
-
- mutex_lock(&mtx);
- /* buffer registration is per process */
- list_for_each_entry(_b, &pi->bufs, by_pid) {
- if (buf_info.offset == _b->buf_info.offset) {
- _m_unregister_buf(_b);
- mutex_unlock(&mtx);
- return 0;
- }
- }
- mutex_unlock(&mtx);
- return -EFAULT;
- break;
- case TILIOC_QUERY_BLK:
- if (copy_from_user(&block_info, (void __user *)arg,
- sizeof(block_info)))
- return -EFAULT;
-
- if (find_block(block_info.ssptr, &block_info))
- return -EFAULT;
-
- if (copy_to_user((void __user *)arg, &block_info,
- sizeof(block_info)))
- return -EFAULT;
- break;
- default:
- return -EINVAL;
- }
- return 0x0;
-}
-
-static s32 alloc_block(enum tiler_fmt fmt, u32 width, u32 height,
- u32 align, u32 offs, u32 gid, struct process_info *pi,
- u32 *sys_addr)
-{
- struct mem_info *mi = NULL;
- struct gid_info *gi = NULL;
-
- /* only support up to page alignment */
- if (align > PAGE_SIZE || offs > align || !pi)
- return -EINVAL;
-
- /* get group context */
- mutex_lock(&mtx);
- gi = _m_get_gi(pi, gid);
- mutex_unlock(&mtx);
-
- if (!gi)
- return -ENOMEM;
-
- /* reserve area in tiler container */
- mi = __get_area(fmt, width, height, align, offs, gi);
- if (!mi) {
- mutex_lock(&mtx);
- _m_try_free_group(gi);
- mutex_unlock(&mtx);
- return -ENOMEM;
- }
-
- *sys_addr = mi->sys_addr;
-
- /* allocate and map if mapping is supported */
- if (tmm_can_map(TMM(fmt))) {
- mi->num_pg = tcm_sizeof(mi->area);
-
- mi->mem = tmm_get(TMM(fmt), mi->num_pg);
- if (!mi->mem)
- goto cleanup;
-
- /* Ensure the data reaches to main memory before PAT refill */
- wmb();
-
- /* program PAT */
- if (refill_pat(TMM(fmt), &mi->area, mi->mem))
- goto cleanup;
- }
- return 0;
-
-cleanup:
- mutex_lock(&mtx);
- _m_free(mi);
- mutex_unlock(&mtx);
- return -ENOMEM;
-
-}
-
-s32 tiler_allocx(enum tiler_fmt fmt, u32 width, u32 height,
- u32 align, u32 offs, u32 gid, pid_t pid, u32 *sys_addr)
-{
- return alloc_block(fmt, width, height, align, offs, gid,
- __get_pi(pid, true), sys_addr);
-}
-EXPORT_SYMBOL(tiler_allocx);
-
-s32 tiler_alloc(enum tiler_fmt fmt, u32 width, u32 height, u32 *sys_addr)
-{
- return tiler_allocx(fmt, width, height, 0, 0,
- 0, current->tgid, sys_addr);
-}
-EXPORT_SYMBOL(tiler_alloc);
-
-
-static void reserve_nv12_blocks(u32 n, u32 width, u32 height,
- u32 align, u32 offs, u32 gid, pid_t pid)
-{
-}
-
-static void reserve_blocks(u32 n, enum tiler_fmt fmt, u32 width, u32 height,
- u32 align, u32 offs, u32 gid, pid_t pid)
-{
-}
-
-/* reserve area for n identical buffers */
-s32 tiler_reservex(u32 n, struct tiler_buf_info *b, pid_t pid)
-{
- u32 i;
-
- if (b->num_blocks > TILER_MAX_NUM_BLOCKS)
- return -EINVAL;
-
- for (i = 0; i < b->num_blocks; i++) {
- /* check for NV12 reservations */
- if (i + 1 < b->num_blocks &&
- b->blocks[i].fmt == TILFMT_8BIT &&
- b->blocks[i + 1].fmt == TILFMT_16BIT &&
- b->blocks[i].dim.area.height ==
- b->blocks[i + 1].dim.area.height &&
- b->blocks[i].dim.area.width ==
- b->blocks[i + 1].dim.area.width) {
- reserve_nv12_blocks(n,
- b->blocks[i].dim.area.width,
- b->blocks[i].dim.area.height,
- 0, /* align */
- 0, /* offs */
- 0, /* gid */
- pid);
- i++;
- } else if (b->blocks[i].fmt >= TILFMT_8BIT &&
- b->blocks[i].fmt <= TILFMT_32BIT) {
- /* other 2D reservations */
- reserve_blocks(n,
- b->blocks[i].fmt,
- b->blocks[i].dim.area.width,
- b->blocks[i].dim.area.height,
- 0, /* align */
- 0, /* offs */
- 0, /* gid */
- pid);
- } else {
- return -EINVAL;
- }
- }
- return 0;
-}
-EXPORT_SYMBOL(tiler_reservex);
-
-s32 tiler_reserve(u32 n, struct tiler_buf_info *b)
-{
- return tiler_reservex(n, b, current->tgid);
-}
-EXPORT_SYMBOL(tiler_reserve);
-
-int tiler_reg_notifier(struct notifier_block *nb)
-{
- if (!nb)
- return -EINVAL;
- return blocking_notifier_chain_register(&tiler_device->notifier, nb);
-}
-EXPORT_SYMBOL(tiler_reg_notifier);
-
-int tiler_unreg_notifier(struct notifier_block *nb)
-{
- if (!nb)
- return -EINVAL;
- return blocking_notifier_chain_unregister(&tiler_device->notifier, nb);
-}
-EXPORT_SYMBOL(tiler_unreg_notifier);
-
-static void __exit tiler_exit(void)
-{
- struct process_info *pi = NULL, *pi_ = NULL;
- int i, j;
-
- mutex_lock(&mtx);
-
- /* free all process data */
- list_for_each_entry_safe(pi, pi_, &procs, list)
- _m_free_process_info(pi);
-
- /* all lists should have cleared */
- WARN_ON(!list_empty(&blocks));
- WARN_ON(!list_empty(&procs));
- WARN_ON(!list_empty(&orphan_onedim));
- WARN_ON(!list_empty(&orphan_areas));
-
- mutex_unlock(&mtx);
-
- dma_free_coherent(NULL, TILER_WIDTH * TILER_HEIGHT * sizeof(*dmac_va),
- dmac_va, dmac_pa);
-
- /* close containers only once */
- for (i = TILFMT_8BIT; i <= TILFMT_MAX; i++) {
- /* remove identical containers (tmm is unique per tcm) */
- for (j = i + 1; j <= TILFMT_MAX; j++)
- if (TCM(i) == TCM(j)) {
- TCM_SET(j, NULL);
- TMM_SET(j, NULL);
- }
-
- tcm_deinit(TCM(i));
- tmm_deinit(TMM(i));
- }
-
- mutex_destroy(&mtx);
- platform_driver_unregister(&tiler_driver_ldm);
- cdev_del(&tiler_device->cdev);
- kfree(tiler_device);
- device_destroy(tilerdev_class, MKDEV(tiler_major, tiler_minor));
- class_destroy(tilerdev_class);
-}
-
-static s32 tiler_open(struct inode *ip, struct file *filp)
-{
- struct process_info *pi = __get_pi(current->tgid, false);
-
- if (!pi)
- return -ENOMEM;
-
- filp->private_data = pi;
- return 0x0;
-}
-
-static s32 tiler_release(struct inode *ip, struct file *filp)
-{
- struct process_info *pi = filp->private_data;
-
- mutex_lock(&mtx);
- /* free resources if last device in this process */
- if (0 == --pi->refs)
- _m_free_process_info(pi);
-
- mutex_unlock(&mtx);
-
- return 0x0;
-}
-
-static const struct file_operations tiler_fops = {
- .open = tiler_open,
- .ioctl = tiler_ioctl,
- .release = tiler_release,
- .mmap = tiler_mmap,
-};
-
-static s32 __init tiler_init(void)
-{
- dev_t dev = 0;
- s32 r = -1;
- struct device *device = NULL;
- struct tcm_pt div_pt;
- struct tcm *sita = NULL;
- struct tmm *tmm_pat = NULL;
-
- if (!cpu_is_omap44xx())
- return 0;
-
- /**
- * Array of physical pages for PAT programming, which must be a 16-byte
- * aligned physical address
- */
- dmac_va = dma_alloc_coherent(NULL, TILER_WIDTH * TILER_HEIGHT *
- sizeof(*dmac_va), &dmac_pa, GFP_ATOMIC);
- if (!dmac_va)
- return -ENOMEM;
-
- /* Allocate tiler container manager (we share 1 on OMAP4) */
- div_pt.x = TILER_WIDTH; /* hardcoded default */
- div_pt.y = (3 * TILER_HEIGHT) / 4;
- sita = sita_init(TILER_WIDTH, TILER_HEIGHT, (void *)&div_pt);
-
- TCM_SET(TILFMT_8BIT, sita);
- TCM_SET(TILFMT_16BIT, sita);
- TCM_SET(TILFMT_32BIT, sita);
- TCM_SET(TILFMT_PAGE, sita);
-
- /* Allocate tiler memory manager (must have 1 unique TMM per TCM ) */
- tmm_pat = tmm_pat_init(0);
- TMM_SET(TILFMT_8BIT, tmm_pat);
- TMM_SET(TILFMT_16BIT, tmm_pat);
- TMM_SET(TILFMT_32BIT, tmm_pat);
- TMM_SET(TILFMT_PAGE, tmm_pat);
-
- tiler_device = kmalloc(sizeof(*tiler_device), GFP_KERNEL);
- if (!tiler_device || !sita || !tmm_pat) {
- r = -ENOMEM;
- goto error;
- }
-
- memset(tiler_device, 0x0, sizeof(*tiler_device));
- if (tiler_major) {
- dev = MKDEV(tiler_major, tiler_minor);
- r = register_chrdev_region(dev, 1, "tiler");
- } else {
- r = alloc_chrdev_region(&dev, tiler_minor, 1, "tiler");
- tiler_major = MAJOR(dev);
- }
-
- cdev_init(&tiler_device->cdev, &tiler_fops);
- tiler_device->cdev.owner = THIS_MODULE;
- tiler_device->cdev.ops = &tiler_fops;
-
- r = cdev_add(&tiler_device->cdev, dev, 1);
- if (r)
- printk(KERN_ERR "cdev_add():failed\n");
-
- tilerdev_class = class_create(THIS_MODULE, "tiler");
-
- if (IS_ERR(tilerdev_class)) {
- printk(KERN_ERR "class_create():failed\n");
- goto error;
- }
-
- device = device_create(tilerdev_class, NULL, dev, NULL, "tiler");
- if (device == NULL)
- printk(KERN_ERR "device_create() fail\n");
-
- r = platform_driver_register(&tiler_driver_ldm);
-
- mutex_init(&mtx);
- INIT_LIST_HEAD(&blocks);
- INIT_LIST_HEAD(&procs);
- INIT_LIST_HEAD(&orphan_areas);
- INIT_LIST_HEAD(&orphan_onedim);
- BLOCKING_INIT_NOTIFIER_HEAD(&tiler_device->notifier);
- id = 0xda7a000;
-
-error:
- /* TODO: error handling for device registration */
- if (r) {
- kfree(tiler_device);
- tcm_deinit(sita);
- tmm_deinit(tmm_pat);
- dma_free_coherent(NULL, TILER_WIDTH * TILER_HEIGHT *
- sizeof(*dmac_va), dmac_va, dmac_pa);
- }
-
- return r;
-}
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("David Sin <davidsin@ti.com>");
-MODULE_AUTHOR("Lajos Molnar <molnar@ti.com>");
-module_init(tiler_init);
-module_exit(tiler_exit);
diff --git a/drivers/media/video/tiler/tiler_def.h b/drivers/media/video/tiler/tiler_def.h
deleted file mode 100644
index d92bfde8e452..000000000000
--- a/drivers/media/video/tiler/tiler_def.h
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * tiler_def.h
- *
- * TILER driver support functions for TI OMAP processors.
- *
- * Copyright (C) 2009-2010 Texas Instruments, Inc.
- *
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
- */
-
-#ifndef TILER_DEF_H
-#define TILER_DEF_H
-
-#define ROUND_UP_2P(a, b) (((a) + (b) - 1) & ~((b) - 1))
-#define DIVIDE_UP(a, b) (((a) + (b) - 1) / (b))
-#define ROUND_UP(a, b) (DIVIDE_UP(a, b) * (b))
-#define MIN(a, b) ((a) < (b) ? (a) : (b))
-#define MAX(a, b) ((a) > (b) ? (a) : (b))
-
-#define TILER_ACC_MODE_SHIFT (27)
-#define TILER_ACC_MODE_MASK (3)
-#define TILER_GET_ACC_MODE(x) ((enum tiler_fmt) (1 + \
-(((u32)x & (TILER_ACC_MODE_MASK<<TILER_ACC_MODE_SHIFT))>>TILER_ACC_MODE_SHIFT)))
-
-#define TILER_ALIAS_BASE (0x60000000)
-#define TILER_ACC_MODE_SHIFT (27)
-#define DMM_ACC_MODE_SHIFT (27)
-
-#define TIL_ALIAS_ADDR(x, access_mode)\
-((void *)(TILER_ALIAS_BASE | (u32)x | (access_mode << TILER_ACC_MODE_SHIFT)))
-
-#define TIL_ADDR(x, r, yi, xi, a)\
-((void *)((u32)x | (r << DMM_ROTATION_SHIFT) |\
-(yi << DMM_Y_INVERT_SHIFT) | (xi << DMM_X_INVERT_SHIFT) |\
-(a << DMM_ACC_MODE_SHIFT)))
-
-#define TILER_ALIAS_VIEW_CLEAR (~0xE0000000)
-
-#define DMM_X_INVERT_SHIFT (29)
-#define DMM_GET_X_INVERTED(x) ((((u32)x & (1<<DMM_X_INVERT_SHIFT)) > 0) ? 1 : 0)
-#define DMM_Y_INVERT_SHIFT (30)
-#define DMM_GET_Y_INVERTED(x) ((((u32)x & (1<<DMM_Y_INVERT_SHIFT)) > 0) ? 1 : 0)
-
-#define DMM_ROTATION_SHIFT (31)
-#define DMM_GET_ROTATED(x)\
-((((u32)x & ((u32)1<<DMM_ROTATION_SHIFT)) > 0) ? 1 : 0)
-
-#define DMM_ALIAS_VIEW_CLEAR (~0xE0000000)
-
-#define DMM_TILE_DIMM_X_MODE_8 (32)
-#define DMM_TILE_DIMM_Y_MODE_8 (32)
-
-#define DMM_TILE_DIMM_X_MODE_16 (32)
-#define DMM_TILE_DIMM_Y_MODE_16 (16)
-
-#define DMM_TILE_DIMM_X_MODE_32 (16)
-#define DMM_TILE_DIMM_Y_MODE_32 (16)
-
-#define DMM_PAGE_DIMM_X_MODE_8 (DMM_TILE_DIMM_X_MODE_8*2)
-#define DMM_PAGE_DIMM_Y_MODE_8 (DMM_TILE_DIMM_Y_MODE_8*2)
-
-#define DMM_PAGE_DIMM_X_MODE_16 (DMM_TILE_DIMM_X_MODE_16*2)
-#define DMM_PAGE_DIMM_Y_MODE_16 (DMM_TILE_DIMM_Y_MODE_16*2)
-
-#define DMM_PAGE_DIMM_X_MODE_32 (DMM_TILE_DIMM_X_MODE_32*2)
-#define DMM_PAGE_DIMM_Y_MODE_32 (DMM_TILE_DIMM_Y_MODE_32*2)
-
-#define DMM_HOR_X_ADDRSHIFT_8 (0)
-#define DMM_HOR_X_ADDRMASK_8 (0x3FFF)
-#define DMM_HOR_X_COOR_GET_8(x)\
- (((unsigned long)x >> DMM_HOR_X_ADDRSHIFT_8) & DMM_HOR_X_ADDRMASK_8)
-#define DMM_HOR_X_PAGE_COOR_GET_8(x)\
- (DMM_HOR_X_COOR_GET_8(x)/DMM_PAGE_DIMM_X_MODE_8)
-
-#define DMM_HOR_Y_ADDRSHIFT_8 (14)
-#define DMM_HOR_Y_ADDRMASK_8 (0x1FFF)
-#define DMM_HOR_Y_COOR_GET_8(x)\
- (((unsigned long)x >> DMM_HOR_Y_ADDRSHIFT_8) & DMM_HOR_Y_ADDRMASK_8)
-#define DMM_HOR_Y_PAGE_COOR_GET_8(x)\
- (DMM_HOR_Y_COOR_GET_8(x)/DMM_PAGE_DIMM_Y_MODE_8)
-
-#define DMM_HOR_X_ADDRSHIFT_16 (1)
-#define DMM_HOR_X_ADDRMASK_16 (0x7FFE)
-#define DMM_HOR_X_COOR_GET_16(x) (((unsigned long)x >> \
- DMM_HOR_X_ADDRSHIFT_16) & DMM_HOR_X_ADDRMASK_16)
-#define DMM_HOR_X_PAGE_COOR_GET_16(x) (DMM_HOR_X_COOR_GET_16(x) / \
- DMM_PAGE_DIMM_X_MODE_16)
-
-#define DMM_HOR_Y_ADDRSHIFT_16 (15)
-#define DMM_HOR_Y_ADDRMASK_16 (0xFFF)
-#define DMM_HOR_Y_COOR_GET_16(x) (((unsigned long)x >> \
- DMM_HOR_Y_ADDRSHIFT_16) & DMM_HOR_Y_ADDRMASK_16)
-#define DMM_HOR_Y_PAGE_COOR_GET_16(x) (DMM_HOR_Y_COOR_GET_16(x) / \
- DMM_PAGE_DIMM_Y_MODE_16)
-
-#define DMM_HOR_X_ADDRSHIFT_32 (2)
-#define DMM_HOR_X_ADDRMASK_32 (0x7FFC)
-#define DMM_HOR_X_COOR_GET_32(x) (((unsigned long)x >> \
- DMM_HOR_X_ADDRSHIFT_32) & DMM_HOR_X_ADDRMASK_32)
-#define DMM_HOR_X_PAGE_COOR_GET_32(x) (DMM_HOR_X_COOR_GET_32(x) / \
- DMM_PAGE_DIMM_X_MODE_32)
-
-#define DMM_HOR_Y_ADDRSHIFT_32 (15)
-#define DMM_HOR_Y_ADDRMASK_32 (0xFFF)
-#define DMM_HOR_Y_COOR_GET_32(x) (((unsigned long)x >> \
- DMM_HOR_Y_ADDRSHIFT_32) & DMM_HOR_Y_ADDRMASK_32)
-#define DMM_HOR_Y_PAGE_COOR_GET_32(x) (DMM_HOR_Y_COOR_GET_32(x) / \
- DMM_PAGE_DIMM_Y_MODE_32)
-
-#define DMM_VER_X_ADDRSHIFT_8 (14)
-#define DMM_VER_X_ADDRMASK_8 (0x1FFF)
-#define DMM_VER_X_COOR_GET_8(x)\
- (((unsigned long)x >> DMM_VER_X_ADDRSHIFT_8) & DMM_VER_X_ADDRMASK_8)
-#define DMM_VER_X_PAGE_COOR_GET_8(x)\
- (DMM_VER_X_COOR_GET_8(x)/DMM_PAGE_DIMM_X_MODE_8)
-
-#define DMM_VER_Y_ADDRSHIFT_8 (0)
-#define DMM_VER_Y_ADDRMASK_8 (0x3FFF)
-#define DMM_VER_Y_COOR_GET_8(x)\
- (((unsigned long)x >> DMM_VER_Y_ADDRSHIFT_8) & DMM_VER_Y_ADDRMASK_8)
-#define DMM_VER_Y_PAGE_COOR_GET_8(x)\
- (DMM_VER_Y_COOR_GET_8(x)/DMM_PAGE_DIMM_Y_MODE_8)
-
-#define DMM_VER_X_ADDRSHIFT_16 (14)
-#define DMM_VER_X_ADDRMASK_16 (0x1FFF)
-#define DMM_VER_X_COOR_GET_16(x) (((unsigned long)x >> \
- DMM_VER_X_ADDRSHIFT_16) & DMM_VER_X_ADDRMASK_16)
-#define DMM_VER_X_PAGE_COOR_GET_16(x) (DMM_VER_X_COOR_GET_16(x) / \
- DMM_PAGE_DIMM_X_MODE_16)
-
-#define DMM_VER_Y_ADDRSHIFT_16 (0)
-#define DMM_VER_Y_ADDRMASK_16 (0x3FFF)
-#define DMM_VER_Y_COOR_GET_16(x) (((unsigned long)x >> \
- DMM_VER_Y_ADDRSHIFT_16) & DMM_VER_Y_ADDRMASK_16)
-#define DMM_VER_Y_PAGE_COOR_GET_16(x) (DMM_VER_Y_COOR_GET_16(x) / \
- DMM_PAGE_DIMM_Y_MODE_16)
-
-#define DMM_VER_X_ADDRSHIFT_32 (15)
-#define DMM_VER_X_ADDRMASK_32 (0xFFF)
-#define DMM_VER_X_COOR_GET_32(x) (((unsigned long)x >> \
- DMM_VER_X_ADDRSHIFT_32) & DMM_VER_X_ADDRMASK_32)
-#define DMM_VER_X_PAGE_COOR_GET_32(x) (DMM_VER_X_COOR_GET_32(x) / \
- DMM_PAGE_DIMM_X_MODE_32)
-
-#define DMM_VER_Y_ADDRSHIFT_32 (0)
-#define DMM_VER_Y_ADDRMASK_32 (0x7FFF)
-#define DMM_VER_Y_COOR_GET_32(x) (((unsigned long)x >> \
- DMM_VER_Y_ADDRSHIFT_32) & DMM_VER_Y_ADDRMASK_32)
-#define DMM_VER_Y_PAGE_COOR_GET_32(x) (DMM_VER_Y_COOR_GET_32(x) / \
- DMM_PAGE_DIMM_Y_MODE_32)
-
-#endif
diff --git a/drivers/media/video/tiler/tiler_pack.c b/drivers/media/video/tiler/tiler_pack.c
deleted file mode 100644
index 6338c2bb2146..000000000000
--- a/drivers/media/video/tiler/tiler_pack.c
+++ /dev/null
@@ -1,269 +0,0 @@
-/*
- * tiler_pack.c
- *
- * TILER driver support functions for TI OMAP processors.
- *
- * Copyright (C) 2009-2010 Texas Instruments, Inc.
- *
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <mach/tiler.h>
-#include "tiler_def.h"
-
-void tiler_alloc_packed(s32 *count, enum tiler_fmt fmt, u32 width, u32 height,
- void **sysptr, void **allocptr, s32 aligned)
-{
- int til_width, bpp, bpt, buf_width, alloc_width, map_width;
- int buf_map_width, n_per_m, m_per_a, i = 0, m, n;
-
- /* Check input parameters for correctness */
- if (!width || !height || !sysptr || !allocptr || !count ||
- *count <= 0 || fmt < TILFMT_8BIT || fmt > TILFMT_32BIT) {
- if (count)
- *count = 0;
- return;
- }
-
- /* tiler page width in pixels, bytes per pixel, tiler page in bytes */
- til_width = fmt == TILFMT_32BIT ? 32 : 64;
- bpp = 1 << (fmt - TILFMT_8BIT);
- bpt = til_width * bpp;
-
- /* width of buffer in tiled pages */
- buf_width = DIVIDE_UP(width, til_width);
-
- /* :TODO: for now tiler allocation width is 64-multiple */
- alloc_width = ROUND_UP_2P(buf_width, 64);
- map_width = TILER_PAGE / bpt;
-
- /* ensure alignment if needed */
- buf_map_width = ROUND_UP_2P(buf_width, map_width);
-
- /* number of buffers in a map window */
- n_per_m = aligned ? 1 : (buf_map_width / buf_width);
-
- /* number of map windows per allocation */
- m_per_a = alloc_width / buf_map_width;
-
- printk(KERN_INFO "packing %d*%d buffers into an allocation\n",
- n_per_m, m_per_a);
-
- while (i < *count) {
- /* allocate required width of a frame to fit remaining
- frames */
- int n_alloc, m_alloc, tiles, res;
- void *base;
-
- n_alloc = MIN(*count - i, m_per_a * n_per_m);
- m_alloc = DIVIDE_UP(n_alloc, n_per_m);
- tiles = ((m_alloc - 1) * map_width +
- buf_width * (n_alloc - (m_alloc - 1) * n_per_m));
-
- res = tiler_alloc(fmt, til_width * tiles, height,
- (u32 *)sysptr + i);
- if (res != 0)
- break;
-
- /* mark allocation */
- base = allocptr[i] = sysptr[i];
- i++;
-
- /* portion out remaining buffers */
- for (m = 0; m < m_per_a; m++, base += bpt * buf_map_width) {
- for (n = 0; n < n_per_m; n++) {
- /* first buffer is already allocated */
- if (n + m == 0)
- continue;
-
- /* stop if we are done */
- if (i == *count)
- break;
-
- /* set buffer address */
- sysptr[i] = base + bpt * n * buf_width;
- allocptr[i++] = NULL;
- }
- }
- }
-
- /* mark how many buffers we allocated */
- *count = i;
-}
-EXPORT_SYMBOL(tiler_alloc_packed);
-
-static int layout_packed_nv12(char *offsets, int y_width, int uv_width,
- void **buf, int blocks, int i,
- void **y_sysptr, void **uv_sysptr,
- void **y_allocptr, void **uv_allocptr)
-{
- int j;
- for (j = 0; j < blocks; j++, offsets += 3) {
- int page_offset = (63 & (int) offsets[0])
- + y_width * ((int) offsets[1])
- + uv_width * (int) offsets[2];
- void *base = buf[offsets[0] >> 6] + 64 * page_offset;
-
- if (j & 1) {
- /* convert 8-bit to 16-bit view */
- /* this formula only works for even ys */
- uv_sysptr[i] = base + (0x3FFF & (unsigned long) base)
- + 0x8000000;
- uv_allocptr[i] = page_offset ? NULL : base;
- i++;
- } else {
- y_sysptr[i] = base;
- y_allocptr[i] = page_offset ? NULL : y_sysptr[i];
- }
- }
- return i;
-}
-
-void tiler_alloc_packed_nv12(s32 *count, u32 width, u32 height, void **y_sysptr,
- void **uv_sysptr, void **y_allocptr,
- void **uv_allocptr, s32 aligned)
-{
- /* optimized packing table */
- /* we read this table from beginning to end, and determine whether
- the optimization meets our requirement (e.g. allocating at least
- i buffers, with max w y-width, and alignment a. If not, we get
- to the next element. Otherwise we do the allocation. The table
- is constructed in such a way that if an interim tiler allocation
- fails, the next matching rule for the scenario will be able to
- use the buffers already allocated. */
-
-#define MAX_BUFS_TO_PACK 3
- void *buf[MAX_BUFS_TO_PACK];
- int n_buf, buf_w[MAX_BUFS_TO_PACK];
-
- static const char packing[] = {
- /* min(i), max(w), aligned, buffers to alloc */
- 5, 16, 0, 2,
- /* buffer widths in a + b * w(y) + c * w(uv) */
- 64, 0, 0, 64, 0, 0,
- /* tiler-page offsets in
- a + b * w(y) + c * w(uv) */
- 0, 0, 0, 32, 0, 0,
- 16, 0, 0, 40, 0, 0,
- 64, 0, 0, 96, 0, 0,
- 80, 0, 0, 104, 0, 0,
- 112, 0, 0, 56, 0, 0,
-
- 2, 16, 0, 1,
- 32, 0, 2,
- 0, 0, 0, 32, 0, 0,
- 0, 0, 2, 32, 0, 1,
-
- 2, 20, 0, 1,
- 42, 1, 0,
- 0, 0, 0, 32, 0, 0,
- 42, 0, 0, 21, 0, 0,
-
- 3, 24, 0, 2,
- 48, 0, 1, 32, 1, 0,
- 0, 0, 0, 64, 0, 0,
- 24, 0, 0, 76, 0, 0,
- 96, 0, 0, 48, 0, 0,
-
- 4, 32, 0, 3,
- 48, 0, 1, 32, 1, 0, 32, 1, 0,
- 0, 0, 0, 32, 0, 0,
- 96, 0, 0, 48, 0, 0,
- 64, 0, 0, 128, 0, 0,
- 160, 0, 0, 144, 0, 0,
-
- /* this is needed for soft landing if prior allocation fails
- after two buffers */
- 2, 32, 1, 2,
- 32, 0, 1, 32, 0, 1,
- 0, 0, 0, 32, 0, 0,
- 64, 0, 0, 96, 0, 0,
-
- 1, 32, 1, 1,
- 32, 0, 1,
- 0, 0, 0, 32, 0, 0,
-
- 2, 64, 1, 3,
- 0, 1, 0, 32, 0, 1, 0, 1, 0,
- 0, 0, 0, 64, 0, 0,
- 128, 0, 0, 96, 0, 0,
- /* this is the basic NV12 allocation using 2 buffers */
- 1, 0, 1, 2,
- 0, 1, 0, 0, 0, 1,
- 0, 0, 0, 64, 0, 0,
- 0 };
- int y_width, uv_width, i = 0;
-
- /* Check input parameters for correctness */
- if (!width || !height || !y_sysptr || !y_allocptr || !count ||
- !uv_sysptr || !uv_allocptr || *count <= 0) {
- if (count)
- *count = 0;
- return;
- }
-
- y_width = DIVIDE_UP(width, 64);
- uv_width = DIVIDE_UP(width >> 1, 64);
-
- while (i < *count) {
- int n_alloc = *count - i;
- char *p = (char *)packing;
- n_buf = 0;
-
- /* skip packings that do not apply */
- while (*p) {
- /* see if this packing applies */
- if (p[0] <= n_alloc &&
- (!p[1] || p[1] >= y_width) &&
- (!aligned || p[2])) {
-
- /* allocate buffers */
- while (n_buf < p[3]) {
- buf_w[n_buf] = p[4 + 3 * n_buf] +
- y_width * p[5 + 3 * n_buf] +
- uv_width * p[6 + 3 * n_buf];
-
- if (0 != tiler_alloc(
- TILFMT_8BIT, buf_w[n_buf] * 64,
- height, (u32 *)buf + n_buf))
- break;
- n_buf++;
- }
-
- /* if successfully allocated buffers */
- if (n_buf >= p[3]) {
- i = layout_packed_nv12(p + 4 + 3 * p[3],
- y_width,
- uv_width,
- buf, 2 * p[0], i,
- y_sysptr,
- uv_sysptr,
- y_allocptr,
- uv_allocptr);
- break;
- }
- }
-
- p += 4 + 3 * p[3] + 6 * p[0];
- }
-
- /* if allocation failed free any outstanding buffers and stop */
- if (!*p) {
- while (n_buf > 0)
- tiler_free((unsigned long)(buf[--n_buf]));
- break;
- }
- }
-
- /* mark how many buffers we allocated */
- *count = i;
-}
-EXPORT_SYMBOL(tiler_alloc_packed_nv12);
diff --git a/drivers/media/video/tiler/tiler_rot.c b/drivers/media/video/tiler/tiler_rot.c
deleted file mode 100644
index f1c5f7d7e182..000000000000
--- a/drivers/media/video/tiler/tiler_rot.c
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * tiler_rot.c
- *
- * TILER driver support functions for TI OMAP processors.
- *
- * Copyright (C) 2009-2010 Texas Instruments, Inc.
- *
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <mach/tiler.h>
-#include "tiler_def.h"
-
-#define DMM_SHIFT_PER_X_8 0
-#define DMM_SHIFT_PER_Y_8 0
-#define DMM_SHIFT_PER_X_16 0
-#define DMM_SHIFT_PER_Y_16 1
-#define DMM_SHIFT_PER_X_32 1
-#define DMM_SHIFT_PER_Y_32 1
-#define DMM_SHIFT_PER_X_PAGE 6
-#define DMM_SHIFT_PER_Y_PAGE 6
-
-#define DMM_TILER_THE(NAME) (1 << DMM_TILER_##NAME##_BITS)
-#define DMM_TILER_THE_(N, NAME) (1 << DMM_TILER_##NAME##_BITS_(N))
-
-#define DMM_TILER_CONT_WIDTH_BITS 14
-#define DMM_TILER_CONT_HEIGHT_BITS 13
-
-#define DMM_SHIFT_PER_P_(N) (DMM_SHIFT_PER_X_##N + DMM_SHIFT_PER_Y_##N)
-
-#define DMM_TILER_CONT_HEIGHT_BITS_(N) \
- (DMM_TILER_CONT_HEIGHT_BITS - DMM_SHIFT_PER_Y_##N)
-#define DMM_TILER_CONT_WIDTH_BITS_(N) \
- (DMM_TILER_CONT_WIDTH_BITS - DMM_SHIFT_PER_X_##N)
-
-#define DMM_TILER_MASK(bits) ((1 << (bits)) - 1)
-
-#define DMM_TILER_GET_OFFSET_(N, var) \
- ((((u32) var) & DMM_TILER_MASK(DMM_TILER_CONT_WIDTH_BITS + \
- DMM_TILER_CONT_HEIGHT_BITS)) >> DMM_SHIFT_PER_P_(N))
-
-#define DMM_TILER_GET_0_X_(N, var) \
- (DMM_TILER_GET_OFFSET_(N, var) & \
- DMM_TILER_MASK(DMM_TILER_CONT_WIDTH_BITS_(N)))
-#define DMM_TILER_GET_0_Y_(N, var) \
- (DMM_TILER_GET_OFFSET_(N, var) >> DMM_TILER_CONT_WIDTH_BITS_(N))
-#define DMM_TILER_GET_90_X_(N, var) \
- (DMM_TILER_GET_OFFSET_(N, var) & \
- DMM_TILER_MASK(DMM_TILER_CONT_HEIGHT_BITS_(N)))
-#define DMM_TILER_GET_90_Y_(N, var) \
- (DMM_TILER_GET_OFFSET_(N, var) >> DMM_TILER_CONT_HEIGHT_BITS_(N))
-
-#define DMM_TILER_STRIDE_0_(N) \
- (DMM_TILER_THE(CONT_WIDTH) << DMM_SHIFT_PER_Y_##N)
-#define DMM_TILER_STRIDE_90_(N) \
- (DMM_TILER_THE(CONT_HEIGHT) << DMM_SHIFT_PER_X_##N)
-
-static void tiler_get_natural_xy(u32 tsptr, u32 *x, u32 *y)
-{
- u32 x_bits, y_bits, offset;
- enum tiler_fmt fmt;
-
- fmt = TILER_GET_ACC_MODE(tsptr);
-
- switch (fmt) {
- case TILFMT_8BIT:
- x_bits = DMM_TILER_CONT_WIDTH_BITS_(8);
- y_bits = DMM_TILER_CONT_HEIGHT_BITS_(8);
- offset = DMM_TILER_GET_OFFSET_(8, tsptr);
- break;
- case TILFMT_16BIT:
- x_bits = DMM_TILER_CONT_WIDTH_BITS_(16);
- y_bits = DMM_TILER_CONT_HEIGHT_BITS_(16);
- offset = DMM_TILER_GET_OFFSET_(16, tsptr);
- break;
- case TILFMT_32BIT:
- x_bits = DMM_TILER_CONT_WIDTH_BITS_(32);
- y_bits = DMM_TILER_CONT_HEIGHT_BITS_(32);
- offset = DMM_TILER_GET_OFFSET_(32, tsptr);
- break;
- case TILFMT_PAGE:
- default:
- x_bits = DMM_TILER_CONT_WIDTH_BITS_(PAGE);
- y_bits = DMM_TILER_CONT_HEIGHT_BITS_(PAGE);
- offset = DMM_TILER_GET_OFFSET_(PAGE, tsptr);
- break;
- }
-
- if (DMM_GET_ROTATED(tsptr)) {
- *x = offset >> y_bits;
- *y = offset & DMM_TILER_MASK(y_bits);
- } else {
- *x = offset & DMM_TILER_MASK(x_bits);
- *y = offset >> x_bits;
- }
-
- if (DMM_GET_X_INVERTED(tsptr))
- *x ^= DMM_TILER_MASK(x_bits);
- if (DMM_GET_Y_INVERTED(tsptr))
- *y ^= DMM_TILER_MASK(y_bits);
-}
-
-static u32 tiler_get_address(struct tiler_view_orient orient,
- enum tiler_fmt fmt, u32 x, u32 y)
-{
- u32 x_bits, y_bits, tmp, x_mask, y_mask, alignment;
-
- switch (fmt) {
- case TILFMT_8BIT:
- x_bits = DMM_TILER_CONT_WIDTH_BITS_(8);
- y_bits = DMM_TILER_CONT_HEIGHT_BITS_(8);
- alignment = DMM_SHIFT_PER_P_(8);
- break;
- case TILFMT_16BIT:
- x_bits = DMM_TILER_CONT_WIDTH_BITS_(16);
- y_bits = DMM_TILER_CONT_HEIGHT_BITS_(16);
- alignment = DMM_SHIFT_PER_P_(16);
- break;
- case TILFMT_32BIT:
- x_bits = DMM_TILER_CONT_WIDTH_BITS_(32);
- y_bits = DMM_TILER_CONT_HEIGHT_BITS_(32);
- alignment = DMM_SHIFT_PER_P_(32);
- break;
- case TILFMT_PAGE:
- default:
- x_bits = DMM_TILER_CONT_WIDTH_BITS_(PAGE);
- y_bits = DMM_TILER_CONT_HEIGHT_BITS_(PAGE);
- alignment = DMM_SHIFT_PER_P_(PAGE);
- break;
- }
-
- x_mask = DMM_TILER_MASK(x_bits);
- y_mask = DMM_TILER_MASK(y_bits);
- if (x < 0 || x > x_mask || y < 0 || y > y_mask)
- return 0;
-
- if (orient.x_invert)
- x ^= x_mask;
- if (orient.y_invert)
- y ^= y_mask;
-
- if (orient.rotate_90)
- tmp = ((x << y_bits) + y);
- else
- tmp = ((y << x_bits) + x);
-
- return (u32)
- TIL_ADDR((tmp << alignment), (orient.rotate_90 ? 1 : 0),
- (orient.y_invert ? 1 : 0), (orient.x_invert ? 1 : 0),
- (fmt - 1));
-}
-
-u32 tiler_reorient_addr(u32 tsptr, struct tiler_view_orient orient)
-{
- u32 x, y;
-
- tiler_get_natural_xy(tsptr, &x, &y);
- return tiler_get_address(orient, TILER_GET_ACC_MODE(tsptr), x, y);
-}
-EXPORT_SYMBOL(tiler_reorient_addr);
-
-u32 tiler_get_natural_addr(void *sys_ptr)
-{
- return (u32)sys_ptr & DMM_ALIAS_VIEW_CLEAR;
-}
-EXPORT_SYMBOL(tiler_get_natural_addr);
-
-u32 tiler_reorient_topleft(u32 tsptr, struct tiler_view_orient orient,
- u32 width, u32 height)
-{
- enum tiler_fmt fmt;
- u32 x, y;
-
- fmt = TILER_GET_ACC_MODE(tsptr);
-
- tiler_get_natural_xy(tsptr, &x, &y);
-
- if (DMM_GET_X_INVERTED(tsptr))
- x -= width - 1;
- if (DMM_GET_Y_INVERTED(tsptr))
- y -= height - 1;
-
- if (orient.x_invert)
- x += width - 1;
- if (orient.y_invert)
- y += height - 1;
-
- return tiler_get_address(orient, fmt, x, y);
-}
-EXPORT_SYMBOL(tiler_reorient_topleft);
-
-u32 tiler_stride(u32 tsptr)
-{
- enum tiler_fmt fmt;
-
- fmt = TILER_GET_ACC_MODE(tsptr);
-
- switch (fmt) {
- case TILFMT_8BIT:
- return DMM_GET_ROTATED(tsptr) ?
- DMM_TILER_STRIDE_90_(8) : DMM_TILER_STRIDE_0_(8);
- case TILFMT_16BIT:
- return DMM_GET_ROTATED(tsptr) ?
- DMM_TILER_STRIDE_90_(16) : DMM_TILER_STRIDE_0_(16);
- case TILFMT_32BIT:
- return DMM_GET_ROTATED(tsptr) ?
- DMM_TILER_STRIDE_90_(32) : DMM_TILER_STRIDE_0_(32);
- default:
- return 0;
- }
-}
-EXPORT_SYMBOL(tiler_stride);
-
-void tiler_rotate_view(struct tiler_view_orient *orient, u32 rotation)
-{
- rotation = (rotation / 90) & 3;
-
- if (rotation & 2) {
- orient->x_invert = !orient->x_invert;
- orient->y_invert = !orient->y_invert;
- }
-
- if (rotation & 1) {
- if (orient->rotate_90)
- orient->y_invert = !orient->y_invert;
- else
- orient->x_invert = !orient->x_invert;
- orient->rotate_90 = !orient->rotate_90;
- }
-}
-EXPORT_SYMBOL(tiler_rotate_view);
diff --git a/drivers/media/video/tiler/tmm-pat.c b/drivers/media/video/tiler/tmm-pat.c
new file mode 100644
index 000000000000..d33866b125ec
--- /dev/null
+++ b/drivers/media/video/tiler/tmm-pat.c
@@ -0,0 +1,300 @@
+/*
+ * tmm-pat.c
+ *
+ * DMM driver support functions for TI TILER hardware block.
+ *
+ * Author: Lajos Molnar <molnar@ti.com>, David Sin <dsin@ti.com>
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <asm/cacheflush.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+#include "tmm.h"
+
+static int param_set_mem(const char *val, struct kernel_param *kp);
+
+/* Memory limit to cache free pages. TILER will eventually use this much */
+static u32 cache_limit = CONFIG_TILER_CACHE_LIMIT << 20;
+
+param_check_uint(cache, &cache_limit);
+module_param_call(cache, param_set_mem, param_get_uint, &cache_limit, 0644);
+__MODULE_PARM_TYPE(cache, "uint")
+MODULE_PARM_DESC(cache, "Cache free pages if total memory is under this limit");
+
+/* global state - statically initialized */
+static LIST_HEAD(free_list); /* page cache: list of free pages */
+static u32 total_mem; /* total memory allocated (free & used) */
+static u32 refs; /* number of tmm_pat instances */
+static DEFINE_MUTEX(mtx); /* global mutex */
+
+/* The page struct pointer and physical address of each page.*/
+struct mem {
+ struct list_head list;
+ struct page *pg; /* page struct */
+ u32 pa; /* physical address */
+};
+
+/* Used to keep track of mem per tmm_pat_get_pages call */
+struct fast {
+ struct list_head list;
+ struct mem **mem; /* array of page info */
+ u32 *pa; /* array of physical addresses */
+ u32 num; /* number of pages */
+};
+
+/* TMM PAT private structure */
+struct dmm_mem {
+ struct list_head fast_list;
+ struct dmm *dmm;
+};
+
+/* read mem values for a param */
+static int param_set_mem(const char *val, struct kernel_param *kp)
+{
+ u32 a;
+ char *p;
+
+ /* must specify memory */
+ if (!val)
+ return -EINVAL;
+
+ /* parse value */
+ a = memparse(val, &p);
+ if (p == val || *p)
+ return -EINVAL;
+
+ /* store parsed value */
+ *(uint *)kp->arg = a;
+ return 0;
+}
+
+/**
+ * Frees pages in a fast structure. Moves pages to the free list if there
+ * are less pages used than max_to_keep. Otherwise, it frees the pages
+ */
+static void free_fast(struct fast *f)
+{
+ s32 i = 0;
+
+ /* mutex is locked */
+ for (i = 0; i < f->num; i++) {
+ if (total_mem < cache_limit) {
+ /* cache free page if under the limit */
+ list_add(&f->mem[i]->list, &free_list);
+ } else {
+ /* otherwise, free */
+ total_mem -= PAGE_SIZE;
+ __free_page(f->mem[i]->pg);
+ }
+ }
+ kfree(f->pa);
+ kfree(f->mem);
+ /* remove only if element was added */
+ if (f->list.next)
+ list_del(&f->list);
+ kfree(f);
+}
+
+/* allocate and flush a page */
+static struct mem *alloc_mem(void)
+{
+ struct mem *m = kmalloc(sizeof(*m), GFP_KERNEL);
+ if (!m)
+ return NULL;
+ memset(m, 0, sizeof(*m));
+
+ m->pg = alloc_page(GFP_KERNEL | GFP_DMA);
+ if (!m->pg) {
+ kfree(m);
+ return NULL;
+ }
+
+ m->pa = page_to_phys(m->pg);
+
+ /* flush the cache entry for each page we allocate. */
+ dmac_flush_range(page_address(m->pg),
+ page_address(m->pg) + PAGE_SIZE);
+ outer_flush_range(m->pa, m->pa + PAGE_SIZE);
+
+ return m;
+}
+
+static void free_page_cache(void)
+{
+ struct mem *m, *m_;
+
+ /* mutex is locked */
+ list_for_each_entry_safe(m, m_, &free_list, list) {
+ __free_page(m->pg);
+ total_mem -= PAGE_SIZE;
+ list_del(&m->list);
+ kfree(m);
+ }
+}
+
+static void tmm_pat_deinit(struct tmm *tmm)
+{
+ struct fast *f, *f_;
+ struct dmm_mem *pvt = (struct dmm_mem *) tmm->pvt;
+
+ mutex_lock(&mtx);
+
+ /* free all outstanding used memory */
+ list_for_each_entry_safe(f, f_, &pvt->fast_list, list)
+ free_fast(f);
+
+ /* if this is the last tmm_pat, free all memory */
+ if (--refs == 0)
+ free_page_cache();
+
+ mutex_unlock(&mtx);
+}
+
+static u32 *tmm_pat_get_pages(struct tmm *tmm, u32 n)
+{
+ struct mem *m;
+ struct fast *f;
+ struct dmm_mem *pvt = (struct dmm_mem *) tmm->pvt;
+
+ f = kmalloc(sizeof(*f), GFP_KERNEL);
+ if (!f)
+ return NULL;
+ memset(f, 0, sizeof(*f));
+
+ /* array of mem struct pointers */
+ f->mem = kmalloc(n * sizeof(*f->mem), GFP_KERNEL);
+
+ /* array of physical addresses */
+ f->pa = kmalloc(n * sizeof(*f->pa), GFP_KERNEL);
+
+ /* no pages have been allocated yet (needed for cleanup) */
+ f->num = 0;
+
+ if (!f->mem || !f->pa)
+ goto cleanup;
+
+ memset(f->mem, 0, n * sizeof(*f->mem));
+ memset(f->pa, 0, n * sizeof(*f->pa));
+
+ /* fill out fast struct mem array with free pages */
+ mutex_lock(&mtx);
+ while (f->num < n) {
+ /* if there is a free cached page use it */
+ if (!list_empty(&free_list)) {
+ /* unbind first element from list */
+ m = list_first_entry(&free_list, typeof(*m), list);
+ list_del(&m->list);
+ } else {
+ mutex_unlock(&mtx);
+
+ /**
+ * Unlock mutex during allocation and cache flushing.
+ */
+ m = alloc_mem();
+ if (!m)
+ goto cleanup;
+
+ mutex_lock(&mtx);
+ total_mem += PAGE_SIZE;
+ }
+
+ f->mem[f->num] = m;
+ f->pa[f->num++] = m->pa;
+ }
+
+ list_add(&f->list, &pvt->fast_list);
+ mutex_unlock(&mtx);
+ return f->pa;
+
+cleanup:
+ free_fast(f);
+ return NULL;
+}
+
+static void tmm_pat_free_pages(struct tmm *tmm, u32 *page_list)
+{
+ struct dmm_mem *pvt = (struct dmm_mem *) tmm->pvt;
+ struct fast *f, *f_;
+
+ mutex_lock(&mtx);
+ /* find fast struct based on 1st page */
+ list_for_each_entry_safe(f, f_, &pvt->fast_list, list) {
+ if (f->pa[0] == page_list[0]) {
+ free_fast(f);
+ break;
+ }
+ }
+ mutex_unlock(&mtx);
+}
+
+static s32 tmm_pat_map(struct tmm *tmm, struct pat_area area, u32 page_pa)
+{
+ struct dmm_mem *pvt = (struct dmm_mem *) tmm->pvt;
+ struct pat pat_desc = {0};
+
+ /* send pat descriptor to dmm driver */
+ pat_desc.ctrl.dir = 0;
+ pat_desc.ctrl.ini = 0;
+ pat_desc.ctrl.lut_id = 0;
+ pat_desc.ctrl.start = 1;
+ pat_desc.ctrl.sync = 0;
+ pat_desc.area = area;
+ pat_desc.next = NULL;
+
+ /* must be a 16-byte aligned physical address */
+ pat_desc.data = page_pa;
+ return dmm_pat_refill(pvt->dmm, &pat_desc, MANUAL);
+}
+
+struct tmm *tmm_pat_init(u32 pat_id)
+{
+ struct tmm *tmm = NULL;
+ struct dmm_mem *pvt = NULL;
+
+ struct dmm *dmm = dmm_pat_init(pat_id);
+ if (dmm)
+ tmm = kmalloc(sizeof(*tmm), GFP_KERNEL);
+ if (tmm)
+ pvt = kmalloc(sizeof(*pvt), GFP_KERNEL);
+ if (pvt) {
+ /* private data */
+ pvt->dmm = dmm;
+ INIT_LIST_HEAD(&pvt->fast_list);
+
+ /* increate tmm_pat references */
+ mutex_lock(&mtx);
+ refs++;
+ mutex_unlock(&mtx);
+
+ /* public data */
+ tmm->pvt = pvt;
+ tmm->deinit = tmm_pat_deinit;
+ tmm->get = tmm_pat_get_pages;
+ tmm->free = tmm_pat_free_pages;
+ tmm->map = tmm_pat_map;
+ tmm->clear = NULL; /* not yet supported */
+
+ return tmm;
+ }
+
+ kfree(pvt);
+ kfree(tmm);
+ dmm_pat_release(dmm);
+ return NULL;
+}
+EXPORT_SYMBOL(tmm_pat_init);
diff --git a/drivers/media/video/dmm/tmm.h b/drivers/media/video/tiler/tmm.h
index deaeca51a905..fbdc1e23d0e8 100644
--- a/drivers/media/video/dmm/tmm.h
+++ b/drivers/media/video/tiler/tmm.h
@@ -1,7 +1,9 @@
/*
* tmm.h
*
- * TMM interface definition for TI TILER.
+ * TMM interface definition for TI TILER driver.
+ *
+ * Author: Lajos Molnar <molnar@ti.com>
*
* Copyright (C) 2009-2010 Texas Instruments, Inc.
*
@@ -24,11 +26,11 @@ struct tmm {
void *pvt;
/* function table */
- u32 *(*get) (struct tmm *tmm, s32 num_pages);
- void (*free) (struct tmm *tmm, u32 *pages);
- s32 (*map) (struct tmm *tmm, struct pat_area area, u32 page_pa);
- void (*clear) (struct tmm *tmm, struct pat_area area);
- void (*deinit) (struct tmm *tmm);
+ u32 *(*get) (struct tmm *tmm, u32 num_pages);
+ void (*free) (struct tmm *tmm, u32 *pages);
+ s32 (*map) (struct tmm *tmm, struct pat_area area, u32 page_pa);
+ void (*clear) (struct tmm *tmm, struct pat_area area);
+ void (*deinit) (struct tmm *tmm);
};
/**
@@ -36,7 +38,7 @@ struct tmm {
* @return a pointer to a list of physical page addresses.
*/
static inline
-u32 *tmm_get(struct tmm *tmm, s32 num_pages)
+u32 *tmm_get(struct tmm *tmm, u32 num_pages)
{
if (tmm && tmm->pvt)
return tmm->get(tmm, num_pages);