From 355c54d2e70093f09910d2ecf343023aefc219e1 Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Tue, 29 Jul 2008 22:34:02 -0700 Subject: sgi-xp: define is_shub() and is_uv() macros Define the is_shub()/is_uv() macros if they've not already been defined. Signed-off-by: Dean Nelson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/misc/sgi-xp/xp.h | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'drivers/misc/sgi-xp/xp.h') diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h index 03a87a307e32..83627eac4128 100644 --- a/drivers/misc/sgi-xp/xp.h +++ b/drivers/misc/sgi-xp/xp.h @@ -25,6 +25,22 @@ #define DBUG_ON(condition) #endif +#ifndef is_shub1 +#define is_shub1() 0 +#endif + +#ifndef is_shub2 +#define is_shub2() 0 +#endif + +#ifndef is_shub +#define is_shub() (is_shub1() || is_shub2()) +#endif + +#ifndef is_uv +#define is_uv() 0 +#endif + /* * Define the maximum number of logically defined partitions the system * can support. It is constrained by the maximum number of hardware -- cgit v1.2.3 From da9705259848b968cdf6151b977334fe7b5b0461 Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Tue, 29 Jul 2008 22:34:03 -0700 Subject: sgi-xp: define xpSalError reason code Define xpSalError reason code. Signed-off-by: Dean Nelson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/misc/sgi-xp/xp.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/misc/sgi-xp/xp.h') diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h index 83627eac4128..21cb8a31def1 100644 --- a/drivers/misc/sgi-xp/xp.h +++ b/drivers/misc/sgi-xp/xp.h @@ -249,8 +249,9 @@ enum xp_retval { xpDisconnected, /* 51: channel disconnected (closed) */ xpBteCopyError, /* 52: bte_copy() returned error */ + xpSalError, /* 53: sn SAL error */ - xpUnknownReason /* 53: unknown reason - must be last in enum */ + xpUnknownReason /* 54: unknown reason - must be last in enum */ }; /* -- cgit v1.2.3 From 78ce1bbe446e9b46dcd6c1e60a4768448a8ce355 Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Tue, 29 Jul 2008 22:34:03 -0700 Subject: sgi-xp: define BYTES_PER_WORD Add a BYTES_PER_WORD #define. Signed-off-by: Dean Nelson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/misc/sgi-xp/xp.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/misc/sgi-xp/xp.h') diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h index 21cb8a31def1..867fb4863d5a 100644 --- a/drivers/misc/sgi-xp/xp.h +++ b/drivers/misc/sgi-xp/xp.h @@ -19,6 +19,9 @@ #include #include +/* >>> Add this #define to some linux header file some day. */ +#define BYTES_PER_WORD sizeof(void *) + #ifdef USE_DBUG_ON #define DBUG_ON(condition) BUG_ON(condition) #else -- cgit v1.2.3 From bc63d387e4f5dbbe4ea0c5ade862c38073fd7fa3 Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Tue, 29 Jul 2008 22:34:04 -0700 Subject: sgi-xp: support runtime selection of xp_max_npartitions Support runtime selection of the max number of partitions based on the hardware being run on. Signed-off-by: Dean Nelson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/misc/sgi-xp/Makefile | 3 +- drivers/misc/sgi-xp/xp.h | 53 ++++++++++++------- drivers/misc/sgi-xp/xp_main.c | 84 ++++++++++++----------------- drivers/misc/sgi-xp/xp_sn2.c | 92 ++++++++++++++++++++++++++++++++ drivers/misc/sgi-xp/xp_uv.c | 30 +++++++++++ drivers/misc/sgi-xp/xpc.h | 12 +++-- drivers/misc/sgi-xp/xpc_channel.c | 20 +++---- drivers/misc/sgi-xp/xpc_main.c | 103 +++++++++++++++++------------------- drivers/misc/sgi-xp/xpc_partition.c | 16 ++---- drivers/misc/sgi-xp/xpnet.c | 4 +- 10 files changed, 266 insertions(+), 151 deletions(-) create mode 100644 drivers/misc/sgi-xp/xp_sn2.c create mode 100644 drivers/misc/sgi-xp/xp_uv.c (limited to 'drivers/misc/sgi-xp/xp.h') diff --git a/drivers/misc/sgi-xp/Makefile b/drivers/misc/sgi-xp/Makefile index b6e40a7958ce..b50f29217813 100644 --- a/drivers/misc/sgi-xp/Makefile +++ b/drivers/misc/sgi-xp/Makefile @@ -3,7 +3,8 @@ # obj-$(CONFIG_SGI_XP) += xp.o -xp-y := xp_main.o xp_nofault.o +xp-y := xp_main.o xp_uv.o +xp-$(CONFIG_IA64) += xp_sn2.o xp_nofault.o obj-$(CONFIG_SGI_XP) += xpc.o xpc-y := xpc_main.o xpc_channel.o xpc_partition.o diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h index 867fb4863d5a..51087e111887 100644 --- a/drivers/misc/sgi-xp/xp.h +++ b/drivers/misc/sgi-xp/xp.h @@ -18,6 +18,9 @@ #include #include #include +#ifdef CONFIG_IA64 +#include +#endif /* >>> Add this #define to some linux header file some day. */ #define BYTES_PER_WORD sizeof(void *) @@ -45,17 +48,18 @@ #endif /* - * Define the maximum number of logically defined partitions the system - * can support. It is constrained by the maximum number of hardware - * partitionable regions. The term 'region' in this context refers to the - * minimum number of nodes that can comprise an access protection grouping. - * The access protection is in regards to memory, IPI and IOI. + * Define the maximum number of partitions the system can possibly support. + * It is based on the maximum number of hardware partitionable regions. The + * term 'region' in this context refers to the minimum number of nodes that + * can comprise an access protection grouping. The access protection is in + * regards to memory, IPI and IOI. * * The maximum number of hardware partitionable regions is equal to the * maximum number of nodes in the entire system divided by the minimum number * of nodes that comprise an access protection grouping. */ -#define XP_MAX_PARTITIONS 64 +#define XP_MAX_NPARTITIONS_SN2 64 +#define XP_MAX_NPARTITIONS_UV 256 /* * Define the number of u64s required to represent all the C-brick nasids @@ -112,24 +116,28 @@ xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification) * other partition that is currently up. Over these channels, kernel-level * `users' can communicate with their counterparts on the other partitions. * - * The maxinum number of channels is limited to eight. For performance reasons, - * the internal cross partition structures require sixteen bytes per channel, - * and eight allows all of this interface-shared info to fit in one cache line. +>>> The following described limitation of a max of eight channels possible +>>> pertains only to ia64-sn2. THIS ISN'T TRUE SINCE I'M PLANNING TO JUST +>>> TIE INTO THE EXISTING MECHANISM ONCE THE CHANNEL MESSAGES ARE RECEIVED. +>>> THE 128-BYTE CACHELINE PERFORMANCE ISSUE IS TIED TO IA64-SN2. * - * XPC_NCHANNELS reflects the total number of channels currently defined. * If the need for additional channels arises, one can simply increase - * XPC_NCHANNELS accordingly. If the day should come where that number - * exceeds the MAXIMUM number of channels allowed (eight), then one will need - * to make changes to the XPC code to allow for this. + * XPC_MAX_NCHANNELS accordingly. If the day should come where that number + * exceeds the absolute MAXIMUM number of channels possible (eight), then one + * will need to make changes to the XPC code to accommodate for this. + * + * The absolute maximum number of channels possible is currently limited to + * eight for performance reasons. The internal cross partition structures + * require sixteen bytes per channel, and eight allows all of this + * interface-shared info to fit in one 128-byte cacheline. */ #define XPC_MEM_CHANNEL 0 /* memory channel number */ #define XPC_NET_CHANNEL 1 /* network channel number */ -#define XPC_NCHANNELS 2 /* #of defined channels */ -#define XPC_MAX_NCHANNELS 8 /* max #of channels allowed */ +#define XPC_MAX_NCHANNELS 2 /* max #of channels allowed */ -#if XPC_NCHANNELS > XPC_MAX_NCHANNELS -#error XPC_NCHANNELS exceeds MAXIMUM allowed. +#if XPC_MAX_NCHANNELS > 8 +#error XPC_MAX_NCHANNELS exceeds absolute MAXIMUM possible. #endif /* @@ -254,7 +262,8 @@ enum xp_retval { xpBteCopyError, /* 52: bte_copy() returned error */ xpSalError, /* 53: sn SAL error */ - xpUnknownReason /* 54: unknown reason - must be last in enum */ + xpUnsupported, /* 54: unsupported functionality or resource */ + xpUnknownReason /* 55: unknown reason - must be last in enum */ }; /* @@ -397,8 +406,16 @@ xpc_partid_to_nasids(short partid, void *nasids) return xpc_interface.partid_to_nasids(partid, nasids); } +extern short xp_max_npartitions; + extern u64 xp_nofault_PIOR_target; extern int xp_nofault_PIOR(void *); extern int xp_error_PIOR(void); +extern struct device *xp; +extern enum xp_retval xp_init_sn2(void); +extern enum xp_retval xp_init_uv(void); +extern void xp_exit_sn2(void); +extern void xp_exit_uv(void); + #endif /* _DRIVERS_MISC_SGIXP_XP_H */ diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c index 196480b691a1..c5cec606377d 100644 --- a/drivers/misc/sgi-xp/xp_main.c +++ b/drivers/misc/sgi-xp/xp_main.c @@ -15,28 +15,32 @@ */ #include -#include #include -#include -#include -#include +#include #include "xp.h" -/* - * The export of xp_nofault_PIOR needs to happen here since it is defined - * in drivers/misc/sgi-xp/xp_nofault.S. The target of the nofault read is - * defined here. - */ -EXPORT_SYMBOL_GPL(xp_nofault_PIOR); +/* define the XP debug device structures to be used with dev_dbg() et al */ -u64 xp_nofault_PIOR_target; -EXPORT_SYMBOL_GPL(xp_nofault_PIOR_target); +struct device_driver xp_dbg_name = { + .name = "xp" +}; + +struct device xp_dbg_subname = { + .bus_id = {0}, /* set to "" */ + .driver = &xp_dbg_name +}; + +struct device *xp = &xp_dbg_subname; + +/* max #of partitions possible */ +short xp_max_npartitions; +EXPORT_SYMBOL_GPL(xp_max_npartitions); /* * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level * users of XPC. */ -struct xpc_registration xpc_registrations[XPC_NCHANNELS]; +struct xpc_registration xpc_registrations[XPC_MAX_NCHANNELS]; EXPORT_SYMBOL_GPL(xpc_registrations); /* @@ -135,7 +139,7 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, { struct xpc_registration *registration; - DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); + DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS); DBUG_ON(payload_size == 0 || nentries == 0); DBUG_ON(func == NULL); DBUG_ON(assigned_limit == 0 || idle_limit > assigned_limit); @@ -185,7 +189,7 @@ xpc_disconnect(int ch_number) { struct xpc_registration *registration; - DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); + DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS); registration = &xpc_registrations[ch_number]; @@ -221,39 +225,21 @@ EXPORT_SYMBOL_GPL(xpc_disconnect); int __init xp_init(void) { - int ret, ch_number; - u64 func_addr = *(u64 *)xp_nofault_PIOR; - u64 err_func_addr = *(u64 *)xp_error_PIOR; - - if (!ia64_platform_is("sn2")) - return -ENODEV; + enum xp_retval ret; + int ch_number; - /* - * Register a nofault code region which performs a cross-partition - * PIO read. If the PIO read times out, the MCA handler will consume - * the error and return to a kernel-provided instruction to indicate - * an error. This PIO read exists because it is guaranteed to timeout - * if the destination is down (AMO operations do not timeout on at - * least some CPUs on Shubs <= v1.2, which unfortunately we have to - * work around). - */ - ret = sn_register_nofault_code(func_addr, err_func_addr, err_func_addr, - 1, 1); - if (ret != 0) { - printk(KERN_ERR "XP: can't register nofault code, error=%d\n", - ret); - } - /* - * Setup the nofault PIO read target. (There is no special reason why - * SH_IPI_ACCESS was selected.) - */ - if (is_shub2()) - xp_nofault_PIOR_target = SH2_IPI_ACCESS0; + if (is_shub()) + ret = xp_init_sn2(); + else if (is_uv()) + ret = xp_init_uv(); else - xp_nofault_PIOR_target = SH1_IPI_ACCESS; + ret = xpUnsupported; + + if (ret != xpSuccess) + return -ENODEV; /* initialize the connection registration mutex */ - for (ch_number = 0; ch_number < XPC_NCHANNELS; ch_number++) + for (ch_number = 0; ch_number < XPC_MAX_NCHANNELS; ch_number++) mutex_init(&xpc_registrations[ch_number].mutex); return 0; @@ -264,12 +250,10 @@ module_init(xp_init); void __exit xp_exit(void) { - u64 func_addr = *(u64 *)xp_nofault_PIOR; - u64 err_func_addr = *(u64 *)xp_error_PIOR; - - /* unregister the PIO read nofault code region */ - (void)sn_register_nofault_code(func_addr, err_func_addr, - err_func_addr, 1, 0); + if (is_shub()) + xp_exit_sn2(); + else if (is_uv()) + xp_exit_uv(); } module_exit(xp_exit); diff --git a/drivers/misc/sgi-xp/xp_sn2.c b/drivers/misc/sgi-xp/xp_sn2.c new file mode 100644 index 000000000000..b92579356a64 --- /dev/null +++ b/drivers/misc/sgi-xp/xp_sn2.c @@ -0,0 +1,92 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + */ + +/* + * Cross Partition (XP) sn2-based functions. + * + * Architecture specific implementation of common functions. + */ + +#include +#include +#include "xp.h" + +/* + * The export of xp_nofault_PIOR needs to happen here since it is defined + * in drivers/misc/sgi-xp/xp_nofault.S. The target of the nofault read is + * defined here. + */ +EXPORT_SYMBOL_GPL(xp_nofault_PIOR); + +u64 xp_nofault_PIOR_target; +EXPORT_SYMBOL_GPL(xp_nofault_PIOR_target); + +/* + * Register a nofault code region which performs a cross-partition PIO read. + * If the PIO read times out, the MCA handler will consume the error and + * return to a kernel-provided instruction to indicate an error. This PIO read + * exists because it is guaranteed to timeout if the destination is down + * (AMO operations do not timeout on at least some CPUs on Shubs <= v1.2, + * which unfortunately we have to work around). + */ +static enum xp_retval +xp_register_nofault_code_sn2(void) +{ + int ret; + u64 func_addr; + u64 err_func_addr; + + func_addr = *(u64 *)xp_nofault_PIOR; + err_func_addr = *(u64 *)xp_error_PIOR; + ret = sn_register_nofault_code(func_addr, err_func_addr, err_func_addr, + 1, 1); + if (ret != 0) { + dev_err(xp, "can't register nofault code, error=%d\n", ret); + return xpSalError; + } + /* + * Setup the nofault PIO read target. (There is no special reason why + * SH_IPI_ACCESS was selected.) + */ + if (is_shub1()) + xp_nofault_PIOR_target = SH1_IPI_ACCESS; + else if (is_shub2()) + xp_nofault_PIOR_target = SH2_IPI_ACCESS0; + + return xpSuccess; +} + +void +xp_unregister_nofault_code_sn2(void) +{ + u64 func_addr = *(u64 *)xp_nofault_PIOR; + u64 err_func_addr = *(u64 *)xp_error_PIOR; + + /* unregister the PIO read nofault code region */ + (void)sn_register_nofault_code(func_addr, err_func_addr, + err_func_addr, 1, 0); +} + +enum xp_retval +xp_init_sn2(void) +{ + BUG_ON(!is_shub()); + + xp_max_npartitions = XP_MAX_NPARTITIONS_SN2; + + return xp_register_nofault_code_sn2(); +} + +void +xp_exit_sn2(void) +{ + BUG_ON(!is_shub()); + + xp_unregister_nofault_code_sn2(); +} + diff --git a/drivers/misc/sgi-xp/xp_uv.c b/drivers/misc/sgi-xp/xp_uv.c new file mode 100644 index 000000000000..30888be2cdb0 --- /dev/null +++ b/drivers/misc/sgi-xp/xp_uv.c @@ -0,0 +1,30 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + */ + +/* + * Cross Partition (XP) uv-based functions. + * + * Architecture specific implementation of common functions. + * + */ + +#include "xp.h" + +enum xp_retval +xp_init_uv(void) +{ + BUG_ON(!is_uv()); + + xp_max_npartitions = XP_MAX_NPARTITIONS_UV; +} + +void +xp_exit_uv(void) +{ + BUG_ON(!is_uv()); +} diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h index 11ac267ed68f..0f2affd01df1 100644 --- a/drivers/misc/sgi-xp/xpc.h +++ b/drivers/misc/sgi-xp/xpc.h @@ -210,7 +210,7 @@ xpc_disallow_hb(short partid, struct xpc_vars *vars) * the XPC running on the remote partition). */ #define XPC_NOTIFY_IRQ_AMOS 0 -#define XPC_ACTIVATE_IRQ_AMOS (XPC_NOTIFY_IRQ_AMOS + XP_MAX_PARTITIONS) +#define XPC_ACTIVATE_IRQ_AMOS (XPC_NOTIFY_IRQ_AMOS + XP_MAX_NPARTITIONS_SN2) #define XPC_ENGAGED_PARTITIONS_AMO (XPC_ACTIVATE_IRQ_AMOS + XP_NASID_MASK_WORDS) #define XPC_DISENGAGE_REQUEST_AMO (XPC_ENGAGED_PARTITIONS_AMO + 1) @@ -285,7 +285,7 @@ struct xpc_gp { }; #define XPC_GP_SIZE \ - L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_NCHANNELS) + L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_MAX_NCHANNELS) /* * Define a structure that contains arguments associated with opening and @@ -300,7 +300,8 @@ struct xpc_openclose_args { }; #define XPC_OPENCLOSE_ARGS_SIZE \ - L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * XPC_NCHANNELS) + L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * \ + XPC_MAX_NCHANNELS) /* struct xpc_msg flags */ @@ -637,7 +638,7 @@ extern int xpc_exiting; extern struct xpc_vars *xpc_vars; extern struct xpc_rsvd_page *xpc_rsvd_page; extern struct xpc_vars_part *xpc_vars_part; -extern struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1]; +extern struct xpc_partition *xpc_partitions; extern char *xpc_remote_copy_buffer; extern void *xpc_remote_copy_buffer_base; extern void *xpc_kmalloc_cacheline_aligned(size_t, gfp_t, void **); @@ -1104,13 +1105,14 @@ xpc_IPI_send_local_msgrequest(struct xpc_channel *ch) } /* +>>> this block comment needs to be moved and re-written. * Memory for XPC's AMO variables is allocated by the MSPEC driver. These * pages are located in the lowest granule. The lowest granule uses 4k pages * for cached references and an alternate TLB handler to never provide a * cacheable mapping for the entire region. This will prevent speculative * reading of cached copies of our lines from being issued which will cause * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64 - * AMO variables (based on XP_MAX_PARTITIONS) for message notification and an + * AMO variables (based on xp_max_npartitions) for message notification and an * additional 128 AMO variables (based on XP_NASID_MASK_WORDS) for partition * activation and 2 AMO variables for partition deactivation. */ diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c index 9c90c2d55c08..12d8eb6957a7 100644 --- a/drivers/misc/sgi-xp/xpc_channel.c +++ b/drivers/misc/sgi-xp/xpc_channel.c @@ -110,14 +110,14 @@ xpc_setup_infrastructure(struct xpc_partition *part) * Allocate all of the channel structures as a contiguous chunk of * memory. */ - part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_NCHANNELS, + part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_MAX_NCHANNELS, GFP_KERNEL); if (part->channels == NULL) { dev_err(xpc_chan, "can't get memory for channels\n"); return xpNoMemory; } - part->nchannels = XPC_NCHANNELS; + part->nchannels = XPC_MAX_NCHANNELS; /* allocate all the required GET/PUT values */ @@ -1432,9 +1432,9 @@ xpc_initiate_connect(int ch_number) struct xpc_partition *part; struct xpc_channel *ch; - DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); + DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS); - for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { + for (partid = 0; partid < xp_max_npartitions; partid++) { part = &xpc_partitions[partid]; if (xpc_part_ref(part)) { @@ -1488,10 +1488,10 @@ xpc_initiate_disconnect(int ch_number) struct xpc_partition *part; struct xpc_channel *ch; - DBUG_ON(ch_number < 0 || ch_number >= XPC_NCHANNELS); + DBUG_ON(ch_number < 0 || ch_number >= XPC_MAX_NCHANNELS); /* initiate the channel disconnect for every active partition */ - for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { + for (partid = 0; partid < xp_max_npartitions; partid++) { part = &xpc_partitions[partid]; if (xpc_part_ref(part)) { @@ -1734,7 +1734,7 @@ xpc_initiate_allocate(short partid, int ch_number, u32 flags, void **payload) enum xp_retval ret = xpUnknownReason; struct xpc_msg *msg = NULL; - DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); + DBUG_ON(partid < 0 || partid >= xp_max_npartitions); DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); *payload = NULL; @@ -1918,7 +1918,7 @@ xpc_initiate_send(short partid, int ch_number, void *payload) dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg, partid, ch_number); - DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); + DBUG_ON(partid < 0 || partid >= xp_max_npartitions); DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); DBUG_ON(msg == NULL); @@ -1968,7 +1968,7 @@ xpc_initiate_send_notify(short partid, int ch_number, void *payload, dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg, partid, ch_number); - DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); + DBUG_ON(partid < 0 || partid >= xp_max_npartitions); DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); DBUG_ON(msg == NULL); DBUG_ON(func == NULL); @@ -2210,7 +2210,7 @@ xpc_initiate_received(short partid, int ch_number, void *payload) struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); s64 get, msg_number = msg->number; - DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); + DBUG_ON(partid < 0 || partid >= xp_max_npartitions); DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); ch = &part->channels[ch_number]; diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index c3b4227f48a5..a05c7c7da228 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c @@ -433,7 +433,7 @@ xpc_activating(void *__partid) struct xpc_partition *part = &xpc_partitions[partid]; unsigned long irq_flags; - DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); + DBUG_ON(partid < 0 || partid >= xp_max_npartitions); spin_lock_irqsave(&part->act_lock, irq_flags); @@ -544,7 +544,7 @@ xpc_notify_IRQ_handler(int irq, void *dev_id) short partid = (short)(u64)dev_id; struct xpc_partition *part = &xpc_partitions[partid]; - DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); + DBUG_ON(partid < 0 || partid >= xp_max_npartitions); if (xpc_part_ref(part)) { xpc_check_for_channel_activity(part); @@ -815,7 +815,7 @@ xpc_disconnect_wait(int ch_number) int wakeup_channel_mgr; /* now wait for all callouts to the caller's function to cease */ - for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { + for (partid = 0; partid < xp_max_npartitions; partid++) { part = &xpc_partitions[partid]; if (!xpc_part_ref(part)) @@ -895,7 +895,7 @@ xpc_do_exit(enum xp_retval reason) do { active_part_count = 0; - for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { + for (partid = 0; partid < xp_max_npartitions; partid++) { part = &xpc_partitions[partid]; if (xpc_partition_disengaged(part) && @@ -956,11 +956,8 @@ xpc_do_exit(enum xp_retval reason) DBUG_ON(xpc_vars->heartbeating_to_mask != 0); if (reason == xpUnloading) { - /* take ourselves off of the reboot_notifier_list */ - (void)unregister_reboot_notifier(&xpc_reboot_notifier); - - /* take ourselves off of the die_notifier list */ (void)unregister_die_notifier(&xpc_die_notifier); + (void)unregister_reboot_notifier(&xpc_reboot_notifier); } /* close down protections for IPI operations */ @@ -972,6 +969,7 @@ xpc_do_exit(enum xp_retval reason) if (xpc_sysctl) unregister_sysctl_table(xpc_sysctl); + kfree(xpc_partitions); kfree(xpc_remote_copy_buffer_base); } @@ -1017,7 +1015,7 @@ xpc_die_disengage(void) xpc_vars->heartbeating_to_mask = 0; /* indicate we're deactivated */ - for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { + for (partid = 0; partid < xp_max_npartitions; partid++) { part = &xpc_partitions[partid]; if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part-> @@ -1053,7 +1051,8 @@ xpc_die_disengage(void) time = rtc_time(); if (time >= disengage_request_timeout) { - for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { + for (partid = 0; partid < xp_max_npartitions; + partid++) { if (engaged & (1UL << partid)) { dev_info(xpc_part, "disengage from " "remote partition %d timed " @@ -1132,18 +1131,26 @@ xpc_init(void) if (!ia64_platform_is("sn2")) return -ENODEV; + snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part"); + snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan"); + buf_size = max(XPC_RP_VARS_SIZE, XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES); xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size, GFP_KERNEL, &xpc_remote_copy_buffer_base); - if (xpc_remote_copy_buffer == NULL) + if (xpc_remote_copy_buffer == NULL) { + dev_err(xpc_part, "can't get memory for remote copy buffer\n"); return -ENOMEM; + } - snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part"); - snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan"); - - xpc_sysctl = register_sysctl_table(xpc_sys_dir); + xpc_partitions = kzalloc(sizeof(struct xpc_partition) * + xp_max_npartitions, GFP_KERNEL); + if (xpc_partitions == NULL) { + dev_err(xpc_part, "can't get memory for partition structure\n"); + ret = -ENOMEM; + goto out_1; + } /* * The first few fields of each entry of xpc_partitions[] need to @@ -1153,7 +1160,7 @@ xpc_init(void) * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING * PARTITION HAS BEEN ACTIVATED. */ - for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { + for (partid = 0; partid < xp_max_npartitions; partid++) { part = &xpc_partitions[partid]; DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part)); @@ -1173,6 +1180,8 @@ xpc_init(void) atomic_set(&part->references, 0); } + xpc_sysctl = register_sysctl_table(xpc_sys_dir); + /* * Open up protections for IPI operations (and AMO operations on * Shub 1.1 systems). @@ -1196,14 +1205,8 @@ xpc_init(void) if (ret != 0) { dev_err(xpc_part, "can't register ACTIVATE IRQ handler, " "errno=%d\n", -ret); - - xpc_restrict_IPI_ops(); - - if (xpc_sysctl) - unregister_sysctl_table(xpc_sysctl); - - kfree(xpc_remote_copy_buffer_base); - return -EBUSY; + ret = -EBUSY; + goto out_2; } /* @@ -1213,16 +1216,9 @@ xpc_init(void) */ xpc_rsvd_page = xpc_rsvd_page_init(); if (xpc_rsvd_page == NULL) { - dev_err(xpc_part, "could not setup our reserved page\n"); - - free_irq(SGI_XPC_ACTIVATE, NULL); - xpc_restrict_IPI_ops(); - - if (xpc_sysctl) - unregister_sysctl_table(xpc_sysctl); - - kfree(xpc_remote_copy_buffer_base); - return -EBUSY; + dev_err(xpc_part, "can't setup our reserved page\n"); + ret = -EBUSY; + goto out_3; } /* add ourselves to the reboot_notifier_list */ @@ -1245,25 +1241,8 @@ xpc_init(void) kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME); if (IS_ERR(kthread)) { dev_err(xpc_part, "failed while forking hb check thread\n"); - - /* indicate to others that our reserved page is uninitialized */ - xpc_rsvd_page->vars_pa = 0; - - /* take ourselves off of the reboot_notifier_list */ - (void)unregister_reboot_notifier(&xpc_reboot_notifier); - - /* take ourselves off of the die_notifier list */ - (void)unregister_die_notifier(&xpc_die_notifier); - - del_timer_sync(&xpc_hb_timer); - free_irq(SGI_XPC_ACTIVATE, NULL); - xpc_restrict_IPI_ops(); - - if (xpc_sysctl) - unregister_sysctl_table(xpc_sysctl); - - kfree(xpc_remote_copy_buffer_base); - return -EBUSY; + ret = -EBUSY; + goto out_4; } /* @@ -1290,6 +1269,24 @@ xpc_init(void) xpc_initiate_partid_to_nasids); return 0; + + /* initialization was not successful */ +out_4: + /* indicate to others that our reserved page is uninitialized */ + xpc_rsvd_page->vars_pa = 0; + del_timer_sync(&xpc_hb_timer); + (void)unregister_die_notifier(&xpc_die_notifier); + (void)unregister_reboot_notifier(&xpc_reboot_notifier); +out_3: + free_irq(SGI_XPC_ACTIVATE, NULL); +out_2: + xpc_restrict_IPI_ops(); + if (xpc_sysctl) + unregister_sysctl_table(xpc_sysctl); + kfree(xpc_partitions); +out_1: + kfree(xpc_remote_copy_buffer_base); + return ret; } module_init(xpc_init); diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c index 7dd4b5812c42..02a858eddd8d 100644 --- a/drivers/misc/sgi-xp/xpc_partition.c +++ b/drivers/misc/sgi-xp/xpc_partition.c @@ -51,13 +51,7 @@ struct xpc_vars_part *xpc_vars_part; static int xp_nasid_mask_bytes; /* actual size in bytes of nasid mask */ static int xp_nasid_mask_words; /* actual size in words of nasid mask */ -/* - * For performance reasons, each entry of xpc_partitions[] is cacheline - * aligned. And xpc_partitions[] is padded with an additional entry at the - * end so that the last legitimate entry doesn't share its cacheline with - * another variable. - */ -struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1]; +struct xpc_partition *xpc_partitions; /* * Generic buffer used to store a local copy of portions of a remote @@ -261,7 +255,7 @@ xpc_rsvd_page_init(void) /* clear xpc_vars_part */ memset((u64 *)xpc_vars_part, 0, sizeof(struct xpc_vars_part) * - XP_MAX_PARTITIONS); + xp_max_npartitions); /* initialize the activate IRQ related AMO variables */ for (i = 0; i < xp_nasid_mask_words; i++) @@ -408,7 +402,7 @@ xpc_check_remote_hb(void) remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer; - for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) { + for (partid = 0; partid < xp_max_npartitions; partid++) { if (xpc_exiting) break; @@ -487,10 +481,8 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids, /* check that the partid is for another partition */ - if (remote_rp->partid < 1 || - remote_rp->partid > (XP_MAX_PARTITIONS - 1)) { + if (remote_rp->partid < 0 || remote_rp->partid >= xp_max_npartitions) return xpInvalidPartid; - } if (remote_rp->partid == sn_partition_id) return xpLocalPartid; diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c index 822dc8e8d7f0..cc252f201b25 100644 --- a/drivers/misc/sgi-xp/xpnet.c +++ b/drivers/misc/sgi-xp/xpnet.c @@ -287,7 +287,7 @@ xpnet_connection_activity(enum xp_retval reason, short partid, int channel, { long bp; - DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); + DBUG_ON(partid < 0 || partid >= xp_max_npartitions); DBUG_ON(channel != XPC_NET_CHANNEL); switch (reason) { @@ -513,7 +513,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) /* * Main send loop. */ - for (dest_partid = 1; dp && dest_partid < XP_MAX_PARTITIONS; + for (dest_partid = 0; dp && dest_partid < xp_max_npartitions; dest_partid++) { if (!(dp & (1UL << (dest_partid - 1)))) { -- cgit v1.2.3 From 908787db9b95f548270af18d83d62b9d2020ca10 Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Tue, 29 Jul 2008 22:34:05 -0700 Subject: sgi-xp: create a common xp_remote_memcpy() function Create a common remote memcpy function that maps to what the hardware booted supports. Signed-off-by: Dean Nelson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/misc/sgi-xp/xp.h | 43 ++--------------------------- drivers/misc/sgi-xp/xp_main.c | 3 ++ drivers/misc/sgi-xp/xp_sn2.c | 46 +++++++++++++++++++++++++++++++ drivers/misc/sgi-xp/xp_uv.c | 11 ++++++++ drivers/misc/sgi-xp/xpc.h | 7 ----- drivers/misc/sgi-xp/xpc_channel.c | 20 ++++++-------- drivers/misc/sgi-xp/xpc_partition.c | 55 ++++++++++++++++--------------------- drivers/misc/sgi-xp/xpnet.c | 28 +++++++++---------- 8 files changed, 107 insertions(+), 106 deletions(-) (limited to 'drivers/misc/sgi-xp/xp.h') diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h index 51087e111887..c42196a1a6b7 100644 --- a/drivers/misc/sgi-xp/xp.h +++ b/drivers/misc/sgi-xp/xp.h @@ -17,7 +17,6 @@ #include #include #include -#include #ifdef CONFIG_IA64 #include #endif @@ -71,46 +70,6 @@ #define XP_NASID_MASK_BYTES ((XP_MAX_PHYSNODE_ID + 7) / 8) #define XP_NASID_MASK_WORDS ((XP_MAX_PHYSNODE_ID + 63) / 64) -/* - * Wrapper for bte_copy() that should it return a failure status will retry - * the bte_copy() once in the hope that the failure was due to a temporary - * aberration (i.e., the link going down temporarily). - * - * src - physical address of the source of the transfer. - * vdst - virtual address of the destination of the transfer. - * len - number of bytes to transfer from source to destination. - * mode - see bte_copy() for definition. - * notification - see bte_copy() for definition. - * - * Note: xp_bte_copy() should never be called while holding a spinlock. - */ -static inline bte_result_t -xp_bte_copy(u64 src, u64 vdst, u64 len, u64 mode, void *notification) -{ - bte_result_t ret; - u64 pdst = ia64_tpa(vdst); - - /* - * Ensure that the physically mapped memory is contiguous. - * - * We do this by ensuring that the memory is from region 7 only. - * If the need should arise to use memory from one of the other - * regions, then modify the BUG_ON() statement to ensure that the - * memory from that region is always physically contiguous. - */ - BUG_ON(REGION_NUMBER(vdst) != RGN_KERNEL); - - ret = bte_copy(src, pdst, len, mode, notification); - if ((ret != BTE_SUCCESS) && BTE_ERROR_RETRY(ret)) { - if (!in_interrupt()) - cond_resched(); - - ret = bte_copy(src, pdst, len, mode, notification); - } - - return ret; -} - /* * XPC establishes channel connections between the local partition and any * other partition that is currently up. Over these channels, kernel-level @@ -408,6 +367,8 @@ xpc_partid_to_nasids(short partid, void *nasids) extern short xp_max_npartitions; +extern enum xp_retval (*xp_remote_memcpy) (void *, const void *, size_t); + extern u64 xp_nofault_PIOR_target; extern int xp_nofault_PIOR(void *); extern int xp_error_PIOR(void); diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c index c5cec606377d..6f25613b27e3 100644 --- a/drivers/misc/sgi-xp/xp_main.c +++ b/drivers/misc/sgi-xp/xp_main.c @@ -36,6 +36,9 @@ struct device *xp = &xp_dbg_subname; short xp_max_npartitions; EXPORT_SYMBOL_GPL(xp_max_npartitions); +enum xp_retval (*xp_remote_memcpy) (void *dst, const void *src, size_t len); +EXPORT_SYMBOL_GPL(xp_remote_memcpy); + /* * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level * users of XPC. diff --git a/drivers/misc/sgi-xp/xp_sn2.c b/drivers/misc/sgi-xp/xp_sn2.c index b92579356a64..3d553fa73f4d 100644 --- a/drivers/misc/sgi-xp/xp_sn2.c +++ b/drivers/misc/sgi-xp/xp_sn2.c @@ -13,6 +13,7 @@ */ #include +#include #include #include "xp.h" @@ -72,6 +73,49 @@ xp_unregister_nofault_code_sn2(void) err_func_addr, 1, 0); } +/* + * Wrapper for bte_copy(). + * + * vdst - virtual address of the destination of the transfer. + * psrc - physical address of the source of the transfer. + * len - number of bytes to transfer from source to destination. + * + * Note: xp_remote_memcpy_sn2() should never be called while holding a spinlock. + */ +static enum xp_retval +xp_remote_memcpy_sn2(void *vdst, const void *psrc, size_t len) +{ + bte_result_t ret; + u64 pdst = ia64_tpa(vdst); + /* >>> What are the rules governing the src and dst addresses passed in? + * >>> Currently we're assuming that dst is a virtual address and src + * >>> is a physical address, is this appropriate? Can we allow them to + * >>> be whatever and we make the change here without damaging the + * >>> addresses? + */ + + /* + * Ensure that the physically mapped memory is contiguous. + * + * We do this by ensuring that the memory is from region 7 only. + * If the need should arise to use memory from one of the other + * regions, then modify the BUG_ON() statement to ensure that the + * memory from that region is always physically contiguous. + */ + BUG_ON(REGION_NUMBER(vdst) != RGN_KERNEL); + + ret = bte_copy((u64)psrc, pdst, len, (BTE_NOTIFY | BTE_WACQUIRE), NULL); + if (ret == BTE_SUCCESS) + return xpSuccess; + + if (is_shub2()) + dev_err(xp, "bte_copy() on shub2 failed, error=0x%x\n", ret); + else + dev_err(xp, "bte_copy() failed, error=%d\n", ret); + + return xpBteCopyError; +} + enum xp_retval xp_init_sn2(void) { @@ -79,6 +123,8 @@ xp_init_sn2(void) xp_max_npartitions = XP_MAX_NPARTITIONS_SN2; + xp_remote_memcpy = xp_remote_memcpy_sn2; + return xp_register_nofault_code_sn2(); } diff --git a/drivers/misc/sgi-xp/xp_uv.c b/drivers/misc/sgi-xp/xp_uv.c index 30888be2cdb0..dca519fdef98 100644 --- a/drivers/misc/sgi-xp/xp_uv.c +++ b/drivers/misc/sgi-xp/xp_uv.c @@ -15,12 +15,23 @@ #include "xp.h" +static enum xp_retval +xp_remote_memcpy_uv(void *vdst, const void *psrc, size_t len) +{ + /* >>> this function needs fleshing out */ + return xpUnsupported; +} + enum xp_retval xp_init_uv(void) { BUG_ON(!is_uv()); xp_max_npartitions = XP_MAX_NPARTITIONS_UV; + + xp_remote_memcpy = xp_remote_memcpy_uv; + + return xpSuccess; } void diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h index 0f2affd01df1..60388bed7701 100644 --- a/drivers/misc/sgi-xp/xpc.h +++ b/drivers/misc/sgi-xp/xpc.h @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include @@ -1125,12 +1124,6 @@ xpc_IPI_init(int index) return amo; } -static inline enum xp_retval -xpc_map_bte_errors(bte_result_t error) -{ - return ((error == BTE_SUCCESS) ? xpSuccess : xpBteCopyError); -} - /* * Check to see if there is any channel activity to/from the specified * partition. diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c index 12d8eb6957a7..9e79ad7eafe5 100644 --- a/drivers/misc/sgi-xp/xpc_channel.c +++ b/drivers/misc/sgi-xp/xpc_channel.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include "xpc.h" @@ -252,13 +251,13 @@ xpc_setup_infrastructure(struct xpc_partition *part) * * src must be a cacheline aligned physical address on the remote partition. * dst must be a cacheline aligned virtual address on this partition. - * cnt must be an cacheline sized + * cnt must be cacheline sized */ static enum xp_retval xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst, const void *src, size_t cnt) { - bte_result_t bte_ret; + enum xp_retval ret; DBUG_ON((u64)src != L1_CACHE_ALIGN((u64)src)); DBUG_ON((u64)dst != L1_CACHE_ALIGN((u64)dst)); @@ -267,15 +266,12 @@ xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst, if (part->act_state == XPC_P_DEACTIVATING) return part->reason; - bte_ret = xp_bte_copy((u64)src, (u64)dst, (u64)cnt, - (BTE_NORMAL | BTE_WACQUIRE), NULL); - if (bte_ret == BTE_SUCCESS) - return xpSuccess; - - dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n", - XPC_PARTID(part), bte_ret); - - return xpc_map_bte_errors(bte_ret); + ret = xp_remote_memcpy(dst, src, cnt); + if (ret != xpSuccess) { + dev_dbg(xpc_chan, "xp_remote_memcpy() from partition %d failed," + " ret=%d\n", XPC_PARTID(part), ret); + } + return ret; } /* diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c index 02a858eddd8d..6c82f2050974 100644 --- a/drivers/misc/sgi-xp/xpc_partition.c +++ b/drivers/misc/sgi-xp/xpc_partition.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include @@ -92,7 +91,7 @@ xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) static u64 xpc_get_rsvd_page_pa(int nasid) { - bte_result_t bte_res; + enum xp_retval ret; s64 status; u64 cookie = 0; u64 rp_pa = nasid; /* seed with nasid */ @@ -113,6 +112,7 @@ xpc_get_rsvd_page_pa(int nasid) if (status != SALRET_MORE_PASSES) break; + /* >>> L1_CACHE_ALIGN() is only a sn2-bte_copy requirement */ if (L1_CACHE_ALIGN(len) > buf_len) { kfree(buf_base); buf_len = L1_CACHE_ALIGN(len); @@ -127,10 +127,9 @@ xpc_get_rsvd_page_pa(int nasid) } } - bte_res = xp_bte_copy(rp_pa, buf, buf_len, - (BTE_NOTIFY | BTE_WACQUIRE), NULL); - if (bte_res != BTE_SUCCESS) { - dev_dbg(xpc_part, "xp_bte_copy failed %i\n", bte_res); + ret = xp_remote_memcpy((void *)buf, (void *)rp_pa, buf_len); + if (ret != xpSuccess) { + dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret); status = SALRET_ERROR; break; } @@ -398,7 +397,7 @@ xpc_check_remote_hb(void) struct xpc_vars *remote_vars; struct xpc_partition *part; short partid; - bte_result_t bres; + enum xp_retval ret; remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer; @@ -418,13 +417,11 @@ xpc_check_remote_hb(void) } /* pull the remote_hb cache line */ - bres = xp_bte_copy(part->remote_vars_pa, - (u64)remote_vars, - XPC_RP_VARS_SIZE, - (BTE_NOTIFY | BTE_WACQUIRE), NULL); - if (bres != BTE_SUCCESS) { - XPC_DEACTIVATE_PARTITION(part, - xpc_map_bte_errors(bres)); + ret = xp_remote_memcpy(remote_vars, + (void *)part->remote_vars_pa, + XPC_RP_VARS_SIZE); + if (ret != xpSuccess) { + XPC_DEACTIVATE_PARTITION(part, ret); continue; } @@ -457,7 +454,8 @@ static enum xp_retval xpc_get_remote_rp(int nasid, u64 *discovered_nasids, struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa) { - int bres, i; + int i; + enum xp_retval ret; /* get the reserved page's physical address */ @@ -466,11 +464,10 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids, return xpNoRsvdPageAddr; /* pull over the reserved page header and part_nasids mask */ - bres = xp_bte_copy(*remote_rp_pa, (u64)remote_rp, - XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes, - (BTE_NOTIFY | BTE_WACQUIRE), NULL); - if (bres != BTE_SUCCESS) - return xpc_map_bte_errors(bres); + ret = xp_remote_memcpy(remote_rp, (void *)*remote_rp_pa, + XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes); + if (ret != xpSuccess) + return ret; if (discovered_nasids != NULL) { u64 *remote_part_nasids = XPC_RP_PART_NASIDS(remote_rp); @@ -504,16 +501,16 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids, static enum xp_retval xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars) { - int bres; + enum xp_retval ret; if (remote_vars_pa == 0) return xpVarsNotSet; /* pull over the cross partition variables */ - bres = xp_bte_copy(remote_vars_pa, (u64)remote_vars, XPC_RP_VARS_SIZE, - (BTE_NOTIFY | BTE_WACQUIRE), NULL); - if (bres != BTE_SUCCESS) - return xpc_map_bte_errors(bres); + ret = xp_remote_memcpy(remote_vars, (void *)remote_vars_pa, + XPC_RP_VARS_SIZE); + if (ret != xpSuccess) + return ret; if (XPC_VERSION_MAJOR(remote_vars->version) != XPC_VERSION_MAJOR(XPC_V_VERSION)) { @@ -1148,7 +1145,6 @@ xpc_initiate_partid_to_nasids(short partid, void *nasid_mask) { struct xpc_partition *part; u64 part_nasid_pa; - int bte_res; part = &xpc_partitions[partid]; if (part->remote_rp_pa == 0) @@ -1158,9 +1154,6 @@ xpc_initiate_partid_to_nasids(short partid, void *nasid_mask) part_nasid_pa = (u64)XPC_RP_PART_NASIDS(part->remote_rp_pa); - bte_res = xp_bte_copy(part_nasid_pa, (u64)nasid_mask, - xp_nasid_mask_bytes, (BTE_NOTIFY | BTE_WACQUIRE), - NULL); - - return xpc_map_bte_errors(bte_res); + return xp_remote_memcpy(nasid_mask, (void *)part_nasid_pa, + xp_nasid_mask_bytes); } diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c index cc252f201b25..9c540eb1847d 100644 --- a/drivers/misc/sgi-xp/xpnet.c +++ b/drivers/misc/sgi-xp/xpnet.c @@ -32,7 +32,6 @@ #include #include #include -#include #include #include #include @@ -169,7 +168,7 @@ static void xpnet_receive(short partid, int channel, struct xpnet_message *msg) { struct sk_buff *skb; - bte_result_t bret; + enum xp_retval ret; struct xpnet_dev_private *priv = (struct xpnet_dev_private *)xpnet_device->priv; @@ -201,7 +200,7 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg) /* * The allocated skb has some reserved space. - * In order to use bte_copy, we need to get the + * In order to use xp_remote_memcpy(), we need to get the * skb->data pointer moved forward. */ skb_reserve(skb, (L1_CACHE_BYTES - ((u64)skb->data & @@ -227,25 +226,24 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg) (size_t)msg->embedded_bytes); } else { dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t" - "bte_copy(0x%p, 0x%p, %hu)\n", (void *)msg->buf_pa, - (void *)__pa((u64)skb->data & ~(L1_CACHE_BYTES - 1)), - msg->size); + "xp_remote_memcpy(0x%p, 0x%p, %hu)\n", (void *) + ((u64)skb->data & ~(L1_CACHE_BYTES - 1)), + (void *)msg->buf_pa, msg->size); - bret = bte_copy(msg->buf_pa, - __pa((u64)skb->data & ~(L1_CACHE_BYTES - 1)), - msg->size, (BTE_NOTIFY | BTE_WACQUIRE), NULL); + ret = xp_remote_memcpy((void *)((u64)skb->data & + ~(L1_CACHE_BYTES - 1)), + (void *)msg->buf_pa, msg->size); - if (bret != BTE_SUCCESS) { + if (ret != xpSuccess) { /* * >>> Need better way of cleaning skb. Currently skb * >>> appears in_use and we can't just call * >>> dev_kfree_skb. */ - dev_err(xpnet, "bte_copy(0x%p, 0x%p, 0x%hx) returned " - "error=0x%x\n", (void *)msg->buf_pa, - (void *)__pa((u64)skb->data & - ~(L1_CACHE_BYTES - 1)), - msg->size, bret); + dev_err(xpnet, "xp_remote_memcpy(0x%p, 0x%p, 0x%hx) " + "returned error=0x%x\n", (void *) + ((u64)skb->data & ~(L1_CACHE_BYTES - 1)), + (void *)msg->buf_pa, msg->size, ret); xpc_received(partid, channel, (void *)msg); -- cgit v1.2.3 From 94bd2708d4a95d7da5a1c7c28a063eccd127fb69 Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Tue, 29 Jul 2008 22:34:05 -0700 Subject: sgi-xp: prepare xpc_rsvd_page to work on either sn2 or uv hardware Prepare XPC's reserved page header to work for either sn2 or uv. Signed-off-by: Dean Nelson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/misc/sgi-xp/Makefile | 3 +- drivers/misc/sgi-xp/xp.h | 5 +- drivers/misc/sgi-xp/xpc.h | 57 +++++++++----- drivers/misc/sgi-xp/xpc_main.c | 27 ++++++- drivers/misc/sgi-xp/xpc_partition.c | 149 +++++++++++------------------------- drivers/misc/sgi-xp/xpc_sn2.c | 111 +++++++++++++++++++++++++++ drivers/misc/sgi-xp/xpc_uv.c | 48 ++++++++++++ 7 files changed, 267 insertions(+), 133 deletions(-) create mode 100644 drivers/misc/sgi-xp/xpc_sn2.c create mode 100644 drivers/misc/sgi-xp/xpc_uv.c (limited to 'drivers/misc/sgi-xp/xp.h') diff --git a/drivers/misc/sgi-xp/Makefile b/drivers/misc/sgi-xp/Makefile index b50f29217813..b3eeff31ebf8 100644 --- a/drivers/misc/sgi-xp/Makefile +++ b/drivers/misc/sgi-xp/Makefile @@ -7,6 +7,7 @@ xp-y := xp_main.o xp_uv.o xp-$(CONFIG_IA64) += xp_sn2.o xp_nofault.o obj-$(CONFIG_SGI_XP) += xpc.o -xpc-y := xpc_main.o xpc_channel.o xpc_partition.o +xpc-y := xpc_main.o xpc_uv.o xpc_channel.o xpc_partition.o +xpc-$(CONFIG_IA64) += xpc_sn2.o obj-$(CONFIG_SGI_XP) += xpnet.o diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h index c42196a1a6b7..0f75592896dd 100644 --- a/drivers/misc/sgi-xp/xp.h +++ b/drivers/misc/sgi-xp/xp.h @@ -220,9 +220,10 @@ enum xp_retval { xpBteCopyError, /* 52: bte_copy() returned error */ xpSalError, /* 53: sn SAL error */ + xpRsvdPageNotSet, /* 54: the reserved page is not set up */ - xpUnsupported, /* 54: unsupported functionality or resource */ - xpUnknownReason /* 55: unknown reason - must be last in enum */ + xpUnsupported, /* 55: unsupported functionality or resource */ + xpUnknownReason /* 56: unknown reason - must be last in enum */ }; /* diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h index 60388bed7701..94b52bb8151e 100644 --- a/drivers/misc/sgi-xp/xpc.h +++ b/drivers/misc/sgi-xp/xpc.h @@ -71,11 +71,11 @@ * * reserved page header * - * The first cacheline of the reserved page contains the header - * (struct xpc_rsvd_page). Before SAL initialization has completed, + * The first two 64-byte cachelines of the reserved page contain the + * header (struct xpc_rsvd_page). Before SAL initialization has completed, * SAL has set up the following fields of the reserved page header: - * SAL_signature, SAL_version, partid, and nasids_size. The other - * fields are set up by XPC. (xpc_rsvd_page points to the local + * SAL_signature, SAL_version, SAL_partid, and SAL_nasids_size. The + * other fields are set up by XPC. (xpc_rsvd_page points to the local * partition's reserved page.) * * part_nasids mask @@ -89,11 +89,11 @@ * nasids. The part_nasids mask is located starting at the first cacheline * following the reserved page header. The mach_nasids mask follows right * after the part_nasids mask. The size in bytes of each mask is reflected - * by the reserved page header field 'nasids_size'. (Local partition's + * by the reserved page header field 'SAL_nasids_size'. (Local partition's * mask pointers are xpc_part_nasids and xpc_mach_nasids.) * - * vars - * vars part + * vars (ia64-sn2 only) + * vars part (ia64-sn2 only) * * Immediately following the mach_nasids mask are the XPC variables * required by other partitions. First are those that are generic to all @@ -101,25 +101,31 @@ * which are partition specific (vars part). These are setup by XPC. * (Local partition's vars pointers are xpc_vars and xpc_vars_part.) * - * Note: Until vars_pa is set, the partition XPC code has not been initialized. + * Note: Until 'stamp' is set non-zero, the partition XPC code has not been + * initialized. */ struct xpc_rsvd_page { u64 SAL_signature; /* SAL: unique signature */ u64 SAL_version; /* SAL: version */ - u8 partid; /* SAL: partition ID */ + short SAL_partid; /* SAL: partition ID */ + short max_npartitions; /* value of XPC_MAX_PARTITIONS */ u8 version; - u8 pad1[6]; /* align to next u64 in cacheline */ - u64 vars_pa; /* physical address of struct xpc_vars */ + u8 pad1[3]; /* align to next u64 in 1st 64-byte cacheline */ + union { + u64 vars_pa; /* physical address of struct xpc_vars */ + u64 activate_mq_gpa; /* global phys address of activate_mq */ + } sn; struct timespec stamp; /* time when reserved page was setup by XPC */ - u64 pad2[9]; /* align to last u64 in cacheline */ - u64 nasids_size; /* SAL: size of each nasid mask in bytes */ + u64 pad2[9]; /* align to last u64 in 2nd 64-byte cacheline */ + u64 SAL_nasids_size; /* SAL: size of each nasid mask in bytes */ }; -#define XPC_RP_VERSION _XPC_VERSION(1, 1) /* version 1.1 of the reserved page */ +#define XPC_RP_VERSION _XPC_VERSION(2, 0) /* version 2.0 of the reserved page */ #define XPC_SUPPORTS_RP_STAMP(_version) \ (_version >= _XPC_VERSION(1, 1)) +#define ZERO_STAMP ((struct timespec){0, 0}) /* * compare stamps - the return value is: * @@ -218,10 +224,10 @@ xpc_disallow_hb(short partid, struct xpc_vars *vars) * * An array of these structures, one per partition, will be defined. As a * partition becomes active XPC will copy the array entry corresponding to - * itself from that partition. It is desirable that the size of this - * structure evenly divide into a cacheline, such that none of the entries - * in this array crosses a cacheline boundary. As it is now, each entry - * occupies half a cacheline. + * itself from that partition. It is desirable that the size of this structure + * evenly divides into a 128-byte cacheline, such that none of the entries in + * this array crosses a 128-byte cacheline boundary. As it is now, each entry + * occupies a 64-byte cacheline. */ struct xpc_vars_part { u64 magic; @@ -632,16 +638,25 @@ extern void xpc_activate_kthreads(struct xpc_channel *, int); extern void xpc_create_kthreads(struct xpc_channel *, int, int); extern void xpc_disconnect_wait(int); +extern enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *); + +/* found in xpc_sn2.c */ +extern void xpc_init_sn2(void); +extern struct xpc_vars *xpc_vars; /*>>> eliminate from here */ +extern struct xpc_vars_part *xpc_vars_part; /*>>> eliminate from here */ + +/* found in xpc_uv.c */ +extern void xpc_init_uv(void); + /* found in xpc_partition.c */ extern int xpc_exiting; -extern struct xpc_vars *xpc_vars; +extern int xp_nasid_mask_words; extern struct xpc_rsvd_page *xpc_rsvd_page; -extern struct xpc_vars_part *xpc_vars_part; extern struct xpc_partition *xpc_partitions; extern char *xpc_remote_copy_buffer; extern void *xpc_remote_copy_buffer_base; extern void *xpc_kmalloc_cacheline_aligned(size_t, gfp_t, void **); -extern struct xpc_rsvd_page *xpc_rsvd_page_init(void); +extern struct xpc_rsvd_page *xpc_setup_rsvd_page(void); extern void xpc_allow_IPI_ops(void); extern void xpc_restrict_IPI_ops(void); extern int xpc_identify_act_IRQ_sender(void); diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index a05c7c7da228..2180f1f7e087 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c @@ -175,6 +175,8 @@ static struct notifier_block xpc_die_notifier = { .notifier_call = xpc_system_die, }; +enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *rp); + /* * Timer function to enforce the timelimit on the partition disengage request. */ @@ -949,7 +951,7 @@ xpc_do_exit(enum xp_retval reason) DBUG_ON(xpc_partition_engaged(-1UL)); /* indicate to others that our reserved page is uninitialized */ - xpc_rsvd_page->vars_pa = 0; + xpc_rsvd_page->stamp = ZERO_STAMP; /* now it's time to eliminate our heartbeat */ del_timer_sync(&xpc_hb_timer); @@ -1128,8 +1130,24 @@ xpc_init(void) struct task_struct *kthread; size_t buf_size; - if (!ia64_platform_is("sn2")) + if (is_shub()) { + /* + * The ia64-sn2 architecture supports at most 64 partitions. + * And the inability to unregister remote AMOs restricts us + * further to only support exactly 64 partitions on this + * architecture, no less. + */ + if (xp_max_npartitions != 64) + return -EINVAL; + + xpc_init_sn2(); + + } else if (is_uv()) { + xpc_init_uv(); + + } else { return -ENODEV; + } snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part"); snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan"); @@ -1214,7 +1232,7 @@ xpc_init(void) * other partitions to discover we are alive and establish initial * communications. */ - xpc_rsvd_page = xpc_rsvd_page_init(); + xpc_rsvd_page = xpc_setup_rsvd_page(); if (xpc_rsvd_page == NULL) { dev_err(xpc_part, "can't setup our reserved page\n"); ret = -EBUSY; @@ -1273,7 +1291,8 @@ xpc_init(void) /* initialization was not successful */ out_4: /* indicate to others that our reserved page is uninitialized */ - xpc_rsvd_page->vars_pa = 0; + xpc_rsvd_page->stamp = ZERO_STAMP; + del_timer_sync(&xpc_hb_timer); (void)unregister_die_notifier(&xpc_die_notifier); (void)unregister_reboot_notifier(&xpc_reboot_notifier); diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c index 6c82f2050974..1db84cb49143 100644 --- a/drivers/misc/sgi-xp/xpc_partition.c +++ b/drivers/misc/sgi-xp/xpc_partition.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include @@ -44,11 +43,10 @@ u64 xpc_prot_vec[MAX_NUMNODES]; struct xpc_rsvd_page *xpc_rsvd_page; static u64 *xpc_part_nasids; static u64 *xpc_mach_nasids; -struct xpc_vars *xpc_vars; -struct xpc_vars_part *xpc_vars_part; -static int xp_nasid_mask_bytes; /* actual size in bytes of nasid mask */ -static int xp_nasid_mask_words; /* actual size in words of nasid mask */ +/* >>> next two variables should be 'xpc_' if they remain here */ +static int xp_sizeof_nasid_mask; /* actual size in bytes of nasid mask */ +int xp_nasid_mask_words; /* actual size in words of nasid mask */ struct xpc_partition *xpc_partitions; @@ -150,12 +148,10 @@ xpc_get_rsvd_page_pa(int nasid) * communications. */ struct xpc_rsvd_page * -xpc_rsvd_page_init(void) +xpc_setup_rsvd_page(void) { struct xpc_rsvd_page *rp; - AMO_t *amos_page; - u64 rp_pa, nasid_array = 0; - int i, ret; + u64 rp_pa; /* get the local reserved page's address */ @@ -168,110 +164,44 @@ xpc_rsvd_page_init(void) } rp = (struct xpc_rsvd_page *)__va(rp_pa); - if (rp->partid != sn_partition_id) { - dev_err(xpc_part, "the reserved page's partid of %d should be " - "%d\n", rp->partid, sn_partition_id); + if (rp->SAL_version < 3) { + /* SAL_versions < 3 had a SAL_partid defined as a u8 */ + rp->SAL_partid &= 0xff; + } + BUG_ON(rp->SAL_partid != sn_partition_id); + + if (rp->SAL_partid < 0 || rp->SAL_partid >= xp_max_npartitions) { + dev_err(xpc_part, "the reserved page's partid of %d is outside " + "supported range (< 0 || >= %d)\n", rp->SAL_partid, + xp_max_npartitions); return NULL; } rp->version = XPC_RP_VERSION; + rp->max_npartitions = xp_max_npartitions; /* establish the actual sizes of the nasid masks */ if (rp->SAL_version == 1) { /* SAL_version 1 didn't set the nasids_size field */ - rp->nasids_size = 128; + rp->SAL_nasids_size = 128; } - xp_nasid_mask_bytes = rp->nasids_size; - xp_nasid_mask_words = xp_nasid_mask_bytes / 8; + xp_sizeof_nasid_mask = rp->SAL_nasids_size; + xp_nasid_mask_words = DIV_ROUND_UP(xp_sizeof_nasid_mask, + BYTES_PER_WORD); /* setup the pointers to the various items in the reserved page */ xpc_part_nasids = XPC_RP_PART_NASIDS(rp); xpc_mach_nasids = XPC_RP_MACH_NASIDS(rp); - xpc_vars = XPC_RP_VARS(rp); - xpc_vars_part = XPC_RP_VARS_PART(rp); - /* - * Before clearing xpc_vars, see if a page of AMOs had been previously - * allocated. If not we'll need to allocate one and set permissions - * so that cross-partition AMOs are allowed. - * - * The allocated AMO page needs MCA reporting to remain disabled after - * XPC has unloaded. To make this work, we keep a copy of the pointer - * to this page (i.e., amos_page) in the struct xpc_vars structure, - * which is pointed to by the reserved page, and re-use that saved copy - * on subsequent loads of XPC. This AMO page is never freed, and its - * memory protections are never restricted. - */ - amos_page = xpc_vars->amos_page; - if (amos_page == NULL) { - amos_page = (AMO_t *)TO_AMO(uncached_alloc_page(0, 1)); - if (amos_page == NULL) { - dev_err(xpc_part, "can't allocate page of AMOs\n"); - return NULL; - } - - /* - * Open up AMO-R/W to cpu. This is done for Shub 1.1 systems - * when xpc_allow_IPI_ops() is called via xpc_hb_init(). - */ - if (!enable_shub_wars_1_1()) { - ret = sn_change_memprotect(ia64_tpa((u64)amos_page), - PAGE_SIZE, - SN_MEMPROT_ACCESS_CLASS_1, - &nasid_array); - if (ret != 0) { - dev_err(xpc_part, "can't change memory " - "protections\n"); - uncached_free_page(__IA64_UNCACHED_OFFSET | - TO_PHYS((u64)amos_page), 1); - return NULL; - } - } - } else if (!IS_AMO_ADDRESS((u64)amos_page)) { - /* - * EFI's XPBOOT can also set amos_page in the reserved page, - * but it happens to leave it as an uncached physical address - * and we need it to be an uncached virtual, so we'll have to - * convert it. - */ - if (!IS_AMO_PHYS_ADDRESS((u64)amos_page)) { - dev_err(xpc_part, "previously used amos_page address " - "is bad = 0x%p\n", (void *)amos_page); - return NULL; - } - amos_page = (AMO_t *)TO_AMO((u64)amos_page); - } - - /* clear xpc_vars */ - memset(xpc_vars, 0, sizeof(struct xpc_vars)); - - xpc_vars->version = XPC_V_VERSION; - xpc_vars->act_nasid = cpuid_to_nasid(0); - xpc_vars->act_phys_cpuid = cpu_physical_id(0); - xpc_vars->vars_part_pa = __pa(xpc_vars_part); - xpc_vars->amos_page_pa = ia64_tpa((u64)amos_page); - xpc_vars->amos_page = amos_page; /* save for next load of XPC */ - - /* clear xpc_vars_part */ - memset((u64 *)xpc_vars_part, 0, sizeof(struct xpc_vars_part) * - xp_max_npartitions); - - /* initialize the activate IRQ related AMO variables */ - for (i = 0; i < xp_nasid_mask_words; i++) - (void)xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i); - - /* initialize the engaged remote partitions related AMO variables */ - (void)xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO); - (void)xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO); - - /* timestamp of when reserved page was setup by XPC */ - rp->stamp = CURRENT_TIME; + if (xpc_rsvd_page_init(rp) != xpSuccess) + return NULL; /* + * Set timestamp of when reserved page was setup by XPC. * This signifies to the remote partition that our reserved * page is initialized. */ - rp->vars_pa = __pa(xpc_vars); + rp->stamp = CURRENT_TIME; return rp; } @@ -465,7 +395,7 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids, /* pull over the reserved page header and part_nasids mask */ ret = xp_remote_memcpy(remote_rp, (void *)*remote_rp_pa, - XPC_RP_HEADER_SIZE + xp_nasid_mask_bytes); + XPC_RP_HEADER_SIZE + xp_sizeof_nasid_mask); if (ret != xpSuccess) return ret; @@ -476,19 +406,28 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids, discovered_nasids[i] |= remote_part_nasids[i]; } - /* check that the partid is for another partition */ + /* check that the partid is valid and is for another partition */ - if (remote_rp->partid < 0 || remote_rp->partid >= xp_max_npartitions) + if (remote_rp->SAL_partid < 0 || + remote_rp->SAL_partid >= xp_max_npartitions) { return xpInvalidPartid; + } - if (remote_rp->partid == sn_partition_id) + if (remote_rp->SAL_partid == sn_partition_id) return xpLocalPartid; + /* see if the rest of the reserved page has been set up by XPC */ + if (timespec_equal(&remote_rp->stamp, &ZERO_STAMP)) + return xpRsvdPageNotSet; + if (XPC_VERSION_MAJOR(remote_rp->version) != XPC_VERSION_MAJOR(XPC_RP_VERSION)) { return xpBadVersion; } + if (remote_rp->max_npartitions <= sn_partition_id) + return xpInvalidPartid; + return xpSuccess; } @@ -592,7 +531,7 @@ xpc_identify_act_IRQ_req(int nasid) int remote_rp_version; int reactivate = 0; int stamp_diff; - struct timespec remote_rp_stamp = { 0, 0 }; + struct timespec remote_rp_stamp = { 0, 0 }; /*>>> ZERO_STAMP */ short partid; struct xpc_partition *part; enum xp_retval ret; @@ -608,12 +547,12 @@ xpc_identify_act_IRQ_req(int nasid) return; } - remote_vars_pa = remote_rp->vars_pa; + remote_vars_pa = remote_rp->sn.vars_pa; remote_rp_version = remote_rp->version; if (XPC_SUPPORTS_RP_STAMP(remote_rp_version)) remote_rp_stamp = remote_rp->stamp; - partid = remote_rp->partid; + partid = remote_rp->SAL_partid; part = &xpc_partitions[partid]; /* pull over the cross partition variables */ @@ -977,7 +916,7 @@ xpc_discovery(void) enum xp_retval ret; remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE + - xp_nasid_mask_bytes, + xp_sizeof_nasid_mask, GFP_KERNEL, &remote_rp_base); if (remote_rp == NULL) return; @@ -1063,9 +1002,9 @@ xpc_discovery(void) continue; } - remote_vars_pa = remote_rp->vars_pa; + remote_vars_pa = remote_rp->sn.vars_pa; - partid = remote_rp->partid; + partid = remote_rp->SAL_partid; part = &xpc_partitions[partid]; /* pull over the cross partition variables */ @@ -1155,5 +1094,5 @@ xpc_initiate_partid_to_nasids(short partid, void *nasid_mask) part_nasid_pa = (u64)XPC_RP_PART_NASIDS(part->remote_rp_pa); return xp_remote_memcpy(nasid_mask, (void *)part_nasid_pa, - xp_nasid_mask_bytes); + xp_sizeof_nasid_mask); } diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c new file mode 100644 index 000000000000..5a37348715c7 --- /dev/null +++ b/drivers/misc/sgi-xp/xpc_sn2.c @@ -0,0 +1,111 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + */ + +/* + * Cross Partition Communication (XPC) sn2-based functions. + * + * Architecture specific implementation of common functions. + * + */ + +#include +#include +#include +#include "xpc.h" + +struct xpc_vars *xpc_vars; +struct xpc_vars_part *xpc_vars_part; + +static enum xp_retval +xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp) +{ + AMO_t *amos_page; + u64 nasid_array = 0; + int i; + int ret; + + xpc_vars = XPC_RP_VARS(rp); + + rp->sn.vars_pa = __pa(xpc_vars); + + xpc_vars_part = XPC_RP_VARS_PART(rp); + + /* + * Before clearing xpc_vars, see if a page of AMOs had been previously + * allocated. If not we'll need to allocate one and set permissions + * so that cross-partition AMOs are allowed. + * + * The allocated AMO page needs MCA reporting to remain disabled after + * XPC has unloaded. To make this work, we keep a copy of the pointer + * to this page (i.e., amos_page) in the struct xpc_vars structure, + * which is pointed to by the reserved page, and re-use that saved copy + * on subsequent loads of XPC. This AMO page is never freed, and its + * memory protections are never restricted. + */ + amos_page = xpc_vars->amos_page; + if (amos_page == NULL) { + amos_page = (AMO_t *)TO_AMO(uncached_alloc_page(0, 1)); + if (amos_page == NULL) { + dev_err(xpc_part, "can't allocate page of AMOs\n"); + return xpNoMemory; + } + + /* + * Open up AMO-R/W to cpu. This is done for Shub 1.1 systems + * when xpc_allow_IPI_ops() is called via xpc_hb_init(). + */ + if (!enable_shub_wars_1_1()) { + ret = sn_change_memprotect(ia64_tpa((u64)amos_page), + PAGE_SIZE, + SN_MEMPROT_ACCESS_CLASS_1, + &nasid_array); + if (ret != 0) { + dev_err(xpc_part, "can't change memory " + "protections\n"); + uncached_free_page(__IA64_UNCACHED_OFFSET | + TO_PHYS((u64)amos_page), 1); + return xpSalError; + } + } + } + + /* clear xpc_vars */ + memset(xpc_vars, 0, sizeof(struct xpc_vars)); + + xpc_vars->version = XPC_V_VERSION; + xpc_vars->act_nasid = cpuid_to_nasid(0); + xpc_vars->act_phys_cpuid = cpu_physical_id(0); + xpc_vars->vars_part_pa = __pa(xpc_vars_part); + xpc_vars->amos_page_pa = ia64_tpa((u64)amos_page); + xpc_vars->amos_page = amos_page; /* save for next load of XPC */ + + /* clear xpc_vars_part */ + memset((u64 *)xpc_vars_part, 0, sizeof(struct xpc_vars_part) * + xp_max_npartitions); + + /* initialize the activate IRQ related AMO variables */ + for (i = 0; i < xp_nasid_mask_words; i++) + (void)xpc_IPI_init(XPC_ACTIVATE_IRQ_AMOS + i); + + /* initialize the engaged remote partitions related AMO variables */ + (void)xpc_IPI_init(XPC_ENGAGED_PARTITIONS_AMO); + (void)xpc_IPI_init(XPC_DISENGAGE_REQUEST_AMO); + + return xpSuccess; +} + +void +xpc_init_sn2(void) +{ + xpc_rsvd_page_init = xpc_rsvd_page_init_sn2; +} + +void +xpc_exit_sn2(void) +{ +} diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c new file mode 100644 index 000000000000..8327cd4017ec --- /dev/null +++ b/drivers/misc/sgi-xp/xpc_uv.c @@ -0,0 +1,48 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. + */ + +/* + * Cross Partition Communication (XPC) uv-based functions. + * + * Architecture specific implementation of common functions. + * + */ + +#include + +/* >>> #include */ +/* >>> uv_gpa() is defined in */ +#define uv_gpa(_a) ((unsigned long)_a) + +/* >>> temporarily define next three items for xpc.h */ +#define SGI_XPC_ACTIVATE 23 +#define SGI_XPC_NOTIFY 24 +#define sn_send_IPI_phys(_a, _b, _c, _d) + +#include "xpc.h" + +static void *xpc_activate_mq; + +static enum xp_retval +xpc_rsvd_page_init_uv(struct xpc_rsvd_page *rp) +{ + /* >>> need to have established xpc_activate_mq earlier */ + rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq); + return xpSuccess; +} + +void +xpc_init_uv(void) +{ + xpc_rsvd_page_init = xpc_rsvd_page_init_uv; +} + +void +xpc_exit_uv(void) +{ +} -- cgit v1.2.3 From 97bf1aa1e1bb18de9bb1987c6eb9ad751bf08aab Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Tue, 29 Jul 2008 22:34:08 -0700 Subject: sgi-xp: move xpc_allocate() into xpc_send()/xpc_send_notify() Move xpc_allocate() functionality into xpc_send()/xpc_send_notify() so xpc_allocate() no longer needs to be called by XPNET. Signed-off-by: Dean Nelson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/misc/sgi-xp/xp.h | 44 ++++++--------- drivers/misc/sgi-xp/xp_main.c | 23 +++----- drivers/misc/sgi-xp/xpc.h | 9 +-- drivers/misc/sgi-xp/xpc_channel.c | 112 +++++++++++++------------------------- drivers/misc/sgi-xp/xpc_main.c | 14 ++--- drivers/misc/sgi-xp/xpc_sn2.c | 64 ++++++++++------------ drivers/misc/sgi-xp/xpnet.c | 11 ++-- 7 files changed, 106 insertions(+), 171 deletions(-) (limited to 'drivers/misc/sgi-xp/xp.h') diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h index 0f75592896dd..43bf2470850e 100644 --- a/drivers/misc/sgi-xp/xp.h +++ b/drivers/misc/sgi-xp/xp.h @@ -116,12 +116,6 @@ * The size of the payload is defined by the user via xpc_connect(). A user- * defined message resides in the payload area. * - * The user should have no dealings with the message header, but only the - * message's payload. When a message entry is allocated (via xpc_allocate()) - * a pointer to the payload area is returned and not the actual beginning of - * the XPC message. The user then constructs a message in the payload area - * and passes that pointer as an argument on xpc_send() or xpc_send_notify(). - * * The size of a message entry (within a message queue) must be a cacheline * sized multiple in order to facilitate the BTE transfer of messages from one * message queue to another. A macro, XPC_MSG_SIZE(), is provided for the user @@ -221,9 +215,10 @@ enum xp_retval { xpBteCopyError, /* 52: bte_copy() returned error */ xpSalError, /* 53: sn SAL error */ xpRsvdPageNotSet, /* 54: the reserved page is not set up */ + xpPayloadTooBig, /* 55: payload too large for message slot */ - xpUnsupported, /* 55: unsupported functionality or resource */ - xpUnknownReason /* 56: unknown reason - must be last in enum */ + xpUnsupported, /* 56: unsupported functionality or resource */ + xpUnknownReason /* 57: unknown reason - must be last in enum */ }; /* @@ -304,16 +299,15 @@ struct xpc_registration { #define XPC_CHANNEL_REGISTERED(_c) (xpc_registrations[_c].func != NULL) -/* the following are valid xpc_allocate() flags */ +/* the following are valid xpc_send() or xpc_send_notify() flags */ #define XPC_WAIT 0 /* wait flag */ #define XPC_NOWAIT 1 /* no wait flag */ struct xpc_interface { void (*connect) (int); void (*disconnect) (int); - enum xp_retval (*allocate) (short, int, u32, void **); - enum xp_retval (*send) (short, int, void *); - enum xp_retval (*send_notify) (short, int, void *, + enum xp_retval (*send) (short, int, u32, void *, u16); + enum xp_retval (*send_notify) (short, int, u32, void *, u16, xpc_notify_func, void *); void (*received) (short, int, void *); enum xp_retval (*partid_to_nasids) (short, void *); @@ -323,10 +317,9 @@ extern struct xpc_interface xpc_interface; extern void xpc_set_interface(void (*)(int), void (*)(int), - enum xp_retval (*)(short, int, u32, void **), - enum xp_retval (*)(short, int, void *), - enum xp_retval (*)(short, int, void *, - xpc_notify_func, void *), + enum xp_retval (*)(short, int, u32, void *, u16), + enum xp_retval (*)(short, int, u32, void *, u16, + xpc_notify_func, void *), void (*)(short, int, void *), enum xp_retval (*)(short, void *)); extern void xpc_clear_interface(void); @@ -336,22 +329,19 @@ extern enum xp_retval xpc_connect(int, xpc_channel_func, void *, u16, extern void xpc_disconnect(int); static inline enum xp_retval -xpc_allocate(short partid, int ch_number, u32 flags, void **payload) -{ - return xpc_interface.allocate(partid, ch_number, flags, payload); -} - -static inline enum xp_retval -xpc_send(short partid, int ch_number, void *payload) +xpc_send(short partid, int ch_number, u32 flags, void *payload, + u16 payload_size) { - return xpc_interface.send(partid, ch_number, payload); + return xpc_interface.send(partid, ch_number, flags, payload, + payload_size); } static inline enum xp_retval -xpc_send_notify(short partid, int ch_number, void *payload, - xpc_notify_func func, void *key) +xpc_send_notify(short partid, int ch_number, u32 flags, void *payload, + u16 payload_size, xpc_notify_func func, void *key) { - return xpc_interface.send_notify(partid, ch_number, payload, func, key); + return xpc_interface.send_notify(partid, ch_number, flags, payload, + payload_size, func, key); } static inline void diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c index 6f25613b27e3..9c0ce2f15ff6 100644 --- a/drivers/misc/sgi-xp/xp_main.c +++ b/drivers/misc/sgi-xp/xp_main.c @@ -58,10 +58,9 @@ xpc_notloaded(void) struct xpc_interface xpc_interface = { (void (*)(int))xpc_notloaded, (void (*)(int))xpc_notloaded, - (enum xp_retval(*)(short, int, u32, void **))xpc_notloaded, - (enum xp_retval(*)(short, int, void *))xpc_notloaded, - (enum xp_retval(*)(short, int, void *, xpc_notify_func, void *)) - xpc_notloaded, + (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded, + (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func, + void *))xpc_notloaded, (void (*)(short, int, void *))xpc_notloaded, (enum xp_retval(*)(short, void *))xpc_notloaded }; @@ -73,16 +72,14 @@ EXPORT_SYMBOL_GPL(xpc_interface); void xpc_set_interface(void (*connect) (int), void (*disconnect) (int), - enum xp_retval (*allocate) (short, int, u32, void **), - enum xp_retval (*send) (short, int, void *), - enum xp_retval (*send_notify) (short, int, void *, + enum xp_retval (*send) (short, int, u32, void *, u16), + enum xp_retval (*send_notify) (short, int, u32, void *, u16, xpc_notify_func, void *), void (*received) (short, int, void *), enum xp_retval (*partid_to_nasids) (short, void *)) { xpc_interface.connect = connect; xpc_interface.disconnect = disconnect; - xpc_interface.allocate = allocate; xpc_interface.send = send; xpc_interface.send_notify = send_notify; xpc_interface.received = received; @@ -98,13 +95,11 @@ xpc_clear_interface(void) { xpc_interface.connect = (void (*)(int))xpc_notloaded; xpc_interface.disconnect = (void (*)(int))xpc_notloaded; - xpc_interface.allocate = (enum xp_retval(*)(short, int, u32, - void **))xpc_notloaded; - xpc_interface.send = (enum xp_retval(*)(short, int, void *)) + xpc_interface.send = (enum xp_retval(*)(short, int, u32, void *, u16)) xpc_notloaded; - xpc_interface.send_notify = (enum xp_retval(*)(short, int, void *, - xpc_notify_func, - void *))xpc_notloaded; + xpc_interface.send_notify = (enum xp_retval(*)(short, int, u32, void *, + u16, xpc_notify_func, + void *))xpc_notloaded; xpc_interface.received = (void (*)(short, int, void *)) xpc_notloaded; xpc_interface.partid_to_nasids = (enum xp_retval(*)(short, void *)) diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h index 56bf5dcc391d..6b622b091bde 100644 --- a/drivers/misc/sgi-xp/xpc.h +++ b/drivers/misc/sgi-xp/xpc.h @@ -624,9 +624,7 @@ extern void (*xpc_IPI_send_closereply) (struct xpc_channel *, unsigned long *); extern void (*xpc_IPI_send_openrequest) (struct xpc_channel *, unsigned long *); extern void (*xpc_IPI_send_openreply) (struct xpc_channel *, unsigned long *); -extern enum xp_retval (*xpc_allocate_msg) (struct xpc_channel *, u32, - struct xpc_msg **); -extern enum xp_retval (*xpc_send_msg) (struct xpc_channel *, struct xpc_msg *, +extern enum xp_retval (*xpc_send_msg) (struct xpc_channel *, u32, void *, u16, u8, xpc_notify_func, void *); extern void (*xpc_received_msg) (struct xpc_channel *, struct xpc_msg *); @@ -664,9 +662,8 @@ extern void *xpc_kzalloc_cacheline_aligned(size_t, gfp_t, void **); extern void xpc_initiate_connect(int); extern void xpc_initiate_disconnect(int); extern enum xp_retval xpc_allocate_msg_wait(struct xpc_channel *); -extern enum xp_retval xpc_initiate_allocate(short, int, u32, void **); -extern enum xp_retval xpc_initiate_send(short, int, void *); -extern enum xp_retval xpc_initiate_send_notify(short, int, void *, +extern enum xp_retval xpc_initiate_send(short, int, u32, void *, u16); +extern enum xp_retval xpc_initiate_send_notify(short, int, u32, void *, u16, xpc_notify_func, void *); extern void xpc_initiate_received(short, int, void *); extern void xpc_process_channel_activity(struct xpc_partition *); diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c index 26c5e12c1220..55182c8dd32a 100644 --- a/drivers/misc/sgi-xp/xpc_channel.c +++ b/drivers/misc/sgi-xp/xpc_channel.c @@ -1192,87 +1192,54 @@ xpc_allocate_msg_wait(struct xpc_channel *ch) } /* - * Allocate an entry for a message from the message queue associated with the - * specified channel. NOTE that this routine can sleep waiting for a message - * entry to become available. To not sleep, pass in the XPC_NOWAIT flag. + * Send a message that contains the user's payload on the specified channel + * connected to the specified partition. * - * Arguments: + * NOTE that this routine can sleep waiting for a message entry to become + * available. To not sleep, pass in the XPC_NOWAIT flag. * - * partid - ID of partition to which the channel is connected. - * ch_number - channel #. - * flags - see xpc.h for valid flags. - * payload - address of the allocated payload area pointer (filled in on - * return) in which the user-defined message is constructed. - */ -enum xp_retval -xpc_initiate_allocate(short partid, int ch_number, u32 flags, void **payload) -{ - struct xpc_partition *part = &xpc_partitions[partid]; - enum xp_retval ret = xpUnknownReason; - struct xpc_msg *msg = NULL; - - DBUG_ON(partid < 0 || partid >= xp_max_npartitions); - DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); - - *payload = NULL; - - if (xpc_part_ref(part)) { - ret = xpc_allocate_msg(&part->channels[ch_number], flags, &msg); - xpc_part_deref(part); - - if (msg != NULL) - *payload = &msg->payload; - } - - return ret; -} - -/* - * Send a message previously allocated using xpc_initiate_allocate() on the - * specified channel connected to the specified partition. - * - * This routine will not wait for the message to be received, nor will - * notification be given when it does happen. Once this routine has returned - * the message entry allocated via xpc_initiate_allocate() is no longer - * accessable to the caller. - * - * This routine, although called by users, does not call xpc_part_ref() to - * ensure that the partition infrastructure is in place. It relies on the - * fact that we called xpc_msgqueue_ref() in xpc_allocate_msg(). + * Once sent, this routine will not wait for the message to be received, nor + * will notification be given when it does happen. * * Arguments: * * partid - ID of partition to which the channel is connected. * ch_number - channel # to send message on. - * payload - pointer to the payload area allocated via - * xpc_initiate_allocate(). + * flags - see xp.h for valid flags. + * payload - pointer to the payload which is to be sent. + * payload_size - size of the payload in bytes. */ enum xp_retval -xpc_initiate_send(short partid, int ch_number, void *payload) +xpc_initiate_send(short partid, int ch_number, u32 flags, void *payload, + u16 payload_size) { struct xpc_partition *part = &xpc_partitions[partid]; - struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); - enum xp_retval ret; + enum xp_retval ret = xpUnknownReason; - dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg, + dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload, partid, ch_number); DBUG_ON(partid < 0 || partid >= xp_max_npartitions); DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); - DBUG_ON(msg == NULL); + DBUG_ON(payload == NULL); - ret = xpc_send_msg(&part->channels[ch_number], msg, 0, NULL, NULL); + if (xpc_part_ref(part)) { + ret = xpc_send_msg(&part->channels[ch_number], flags, payload, + payload_size, 0, NULL, NULL); + xpc_part_deref(part); + } return ret; } /* - * Send a message previously allocated using xpc_initiate_allocate on the - * specified channel connected to the specified partition. + * Send a message that contains the user's payload on the specified channel + * connected to the specified partition. * - * This routine will not wait for the message to be sent. Once this routine - * has returned the message entry allocated via xpc_initiate_allocate() is no - * longer accessable to the caller. + * NOTE that this routine can sleep waiting for a message entry to become + * available. To not sleep, pass in the XPC_NOWAIT flag. + * + * This routine will not wait for the message to be sent or received. * * Once the remote end of the channel has received the message, the function * passed as an argument to xpc_initiate_send_notify() will be called. This @@ -1282,38 +1249,37 @@ xpc_initiate_send(short partid, int ch_number, void *payload) * * If this routine returns an error, the caller's function will NOT be called. * - * This routine, although called by users, does not call xpc_part_ref() to - * ensure that the partition infrastructure is in place. It relies on the - * fact that we called xpc_msgqueue_ref() in xpc_allocate_msg(). - * * Arguments: * * partid - ID of partition to which the channel is connected. * ch_number - channel # to send message on. - * payload - pointer to the payload area allocated via - * xpc_initiate_allocate(). + * flags - see xp.h for valid flags. + * payload - pointer to the payload which is to be sent. + * payload_size - size of the payload in bytes. * func - function to call with asynchronous notification of message * receipt. THIS FUNCTION MUST BE NON-BLOCKING. * key - user-defined key to be passed to the function when it's called. */ enum xp_retval -xpc_initiate_send_notify(short partid, int ch_number, void *payload, - xpc_notify_func func, void *key) +xpc_initiate_send_notify(short partid, int ch_number, u32 flags, void *payload, + u16 payload_size, xpc_notify_func func, void *key) { struct xpc_partition *part = &xpc_partitions[partid]; - struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); - enum xp_retval ret; + enum xp_retval ret = xpUnknownReason; - dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg, + dev_dbg(xpc_chan, "payload=0x%p, partid=%d, channel=%d\n", payload, partid, ch_number); DBUG_ON(partid < 0 || partid >= xp_max_npartitions); DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); - DBUG_ON(msg == NULL); + DBUG_ON(payload == NULL); DBUG_ON(func == NULL); - ret = xpc_send_msg(&part->channels[ch_number], msg, XPC_N_CALL, - func, key); + if (xpc_part_ref(part)) { + ret = xpc_send_msg(&part->channels[ch_number], flags, payload, + payload_size, XPC_N_CALL, func, key); + xpc_part_deref(part); + } return ret; } @@ -1372,7 +1338,7 @@ xpc_deliver_msg(struct xpc_channel *ch) * partid - ID of partition to which the channel is connected. * ch_number - channel # message received on. * payload - pointer to the payload area allocated via - * xpc_initiate_allocate(). + * xpc_initiate_send() or xpc_initiate_send_notify(). */ void xpc_initiate_received(short partid, int ch_number, void *payload) diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index 4a6eb3774759..aae90f5933b5 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c @@ -217,12 +217,9 @@ void (*xpc_IPI_send_openrequest) (struct xpc_channel *ch, void (*xpc_IPI_send_openreply) (struct xpc_channel *ch, unsigned long *irq_flags); -enum xp_retval (*xpc_allocate_msg) (struct xpc_channel *ch, u32 flags, - struct xpc_msg **address_of_msg); - -enum xp_retval (*xpc_send_msg) (struct xpc_channel *ch, struct xpc_msg *msg, - u8 notify_type, xpc_notify_func func, - void *key); +enum xp_retval (*xpc_send_msg) (struct xpc_channel *ch, u32 flags, + void *payload, u16 payload_size, u8 notify_type, + xpc_notify_func func, void *key); void (*xpc_received_msg) (struct xpc_channel *ch, struct xpc_msg *msg); /* @@ -1286,9 +1283,8 @@ xpc_init(void) /* set the interface to point at XPC's functions */ xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect, - xpc_initiate_allocate, xpc_initiate_send, - xpc_initiate_send_notify, xpc_initiate_received, - xpc_initiate_partid_to_nasids); + xpc_initiate_send, xpc_initiate_send_notify, + xpc_initiate_received, xpc_initiate_partid_to_nasids); return 0; diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c index 7216df36bc73..db67d348b35c 100644 --- a/drivers/misc/sgi-xp/xpc_sn2.c +++ b/drivers/misc/sgi-xp/xpc_sn2.c @@ -1532,18 +1532,6 @@ xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags, enum xp_retval ret; s64 put; - /* this reference will be dropped in xpc_send_msg_sn2() */ - xpc_msgqueue_ref(ch); - - if (ch->flags & XPC_C_DISCONNECTING) { - xpc_msgqueue_deref(ch); - return ch->reason; - } - if (!(ch->flags & XPC_C_CONNECTED)) { - xpc_msgqueue_deref(ch); - return xpNotConnected; - } - /* * Get the next available message entry from the local message queue. * If none are available, we'll make sure that we grab the latest @@ -1582,16 +1570,12 @@ xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags, if (ret == xpTimeout) xpc_IPI_send_local_msgrequest_sn2(ch); - if (flags & XPC_NOWAIT) { - xpc_msgqueue_deref(ch); + if (flags & XPC_NOWAIT) return xpNoWait; - } ret = xpc_allocate_msg_wait(ch); - if (ret != xpInterrupted && ret != xpTimeout) { - xpc_msgqueue_deref(ch); + if (ret != xpInterrupted && ret != xpTimeout) return ret; - } } /* get the message's address and initialize it */ @@ -1606,7 +1590,6 @@ xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags, (void *)msg, msg->number, ch->partid, ch->number); *address_of_msg = msg; - return xpSuccess; } @@ -1616,24 +1599,38 @@ xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags, * message is being sent to. */ static enum xp_retval -xpc_send_msg_sn2(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type, - xpc_notify_func func, void *key) +xpc_send_msg_sn2(struct xpc_channel *ch, u32 flags, void *payload, + u16 payload_size, u8 notify_type, xpc_notify_func func, + void *key) { enum xp_retval ret = xpSuccess; + struct xpc_msg *msg = msg; struct xpc_notify *notify = notify; - s64 put, msg_number = msg->number; + s64 msg_number; + s64 put; DBUG_ON(notify_type == XPC_N_CALL && func == NULL); - DBUG_ON((((u64)msg - (u64)ch->local_msgqueue) / ch->msg_size) != - msg_number % ch->local_nentries); - DBUG_ON(msg->flags & XPC_M_READY); + + if (XPC_MSG_SIZE(payload_size) > ch->msg_size) + return xpPayloadTooBig; + + xpc_msgqueue_ref(ch); if (ch->flags & XPC_C_DISCONNECTING) { - /* drop the reference grabbed in xpc_allocate_msg_sn2() */ - xpc_msgqueue_deref(ch); - return ch->reason; + ret = ch->reason; + goto out_1; + } + if (!(ch->flags & XPC_C_CONNECTED)) { + ret = xpNotConnected; + goto out_1; } + ret = xpc_allocate_msg_sn2(ch, flags, &msg); + if (ret != xpSuccess) + goto out_1; + + msg_number = msg->number; + if (notify_type != 0) { /* * Tell the remote side to send an ACK interrupt when the @@ -1663,13 +1660,12 @@ xpc_send_msg_sn2(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type, atomic_dec(&ch->n_to_notify); ret = ch->reason; } - - /* drop reference grabbed in xpc_allocate_msg_sn2() */ - xpc_msgqueue_deref(ch); - return ret; + goto out_1; } } + memcpy(&msg->payload, payload, payload_size); + msg->flags |= XPC_M_READY; /* @@ -1684,7 +1680,7 @@ xpc_send_msg_sn2(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type, if (put == msg_number) xpc_send_msgs_sn2(ch, put); - /* drop the reference grabbed in xpc_allocate_msg_sn2() */ +out_1: xpc_msgqueue_deref(ch); return ret; } @@ -1821,8 +1817,6 @@ xpc_init_sn2(void) xpc_IPI_send_openrequest = xpc_IPI_send_openrequest_sn2; xpc_IPI_send_openreply = xpc_IPI_send_openreply_sn2; - xpc_allocate_msg = xpc_allocate_msg_sn2; - xpc_send_msg = xpc_send_msg_sn2; xpc_received_msg = xpc_received_msg_sn2; } diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c index 9c540eb1847d..f9356ba73155 100644 --- a/drivers/misc/sgi-xp/xpnet.c +++ b/drivers/misc/sgi-xp/xpnet.c @@ -438,7 +438,8 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct xpnet_pending_msg *queued_msg; enum xp_retval ret; - struct xpnet_message *msg; + u8 msg_buffer[XPNET_MSG_SIZE]; + struct xpnet_message *msg = (struct xpnet_message *)&msg_buffer[0]; u64 start_addr, end_addr; long dp; u8 second_mac_octet; @@ -524,11 +525,6 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) /* found a partition to send to */ - ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL, - XPC_NOWAIT, (void **)&msg); - if (unlikely(ret != xpSuccess)) - continue; - msg->embedded_bytes = embedded_bytes; if (unlikely(embedded_bytes != 0)) { msg->version = XPNET_VERSION_EMBED; @@ -553,7 +549,8 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) atomic_inc(&queued_msg->use_count); - ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, msg, + ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, XPC_NOWAIT, + &msg, sizeof(msg) + embedded_bytes - 1, xpnet_send_completed, queued_msg); if (unlikely(ret != xpSuccess)) { atomic_dec(&queued_msg->use_count); -- cgit v1.2.3 From ee6665e3b6e1283c30ae240732af1345bc02154e Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Tue, 29 Jul 2008 22:34:13 -0700 Subject: sgi-xp: isolate remote copy buffer to sn2 only Make the remote copy buffer an sn2 only item. Signed-off-by: Dean Nelson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/misc/sgi-xp/xp.h | 10 ----- drivers/misc/sgi-xp/xpc.h | 39 ++++------------ drivers/misc/sgi-xp/xpc_main.c | 31 ++++--------- drivers/misc/sgi-xp/xpc_partition.c | 31 +++++-------- drivers/misc/sgi-xp/xpc_sn2.c | 88 +++++++++++++++++++++++++++++-------- 5 files changed, 97 insertions(+), 102 deletions(-) (limited to 'drivers/misc/sgi-xp/xp.h') diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h index 43bf2470850e..955b5b913235 100644 --- a/drivers/misc/sgi-xp/xp.h +++ b/drivers/misc/sgi-xp/xp.h @@ -60,16 +60,6 @@ #define XP_MAX_NPARTITIONS_SN2 64 #define XP_MAX_NPARTITIONS_UV 256 -/* - * Define the number of u64s required to represent all the C-brick nasids - * as a bitmap. The cross-partition kernel modules deal only with - * C-brick nasids, thus the need for bitmaps which don't account for - * odd-numbered (non C-brick) nasids. - */ -#define XP_MAX_PHYSNODE_ID (MAX_NUMALINK_NODES / 2) -#define XP_NASID_MASK_BYTES ((XP_MAX_PHYSNODE_ID + 7) / 8) -#define XP_NASID_MASK_WORDS ((XP_MAX_PHYSNODE_ID + 63) / 64) - /* * XPC establishes channel connections between the local partition and any * other partition that is currently up. Over these channels, kernel-level diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h index defd08881184..2111723553bf 100644 --- a/drivers/misc/sgi-xp/xpc.h +++ b/drivers/misc/sgi-xp/xpc.h @@ -150,26 +150,6 @@ struct xpc_vars_sn2 { #define XPC_V_VERSION _XPC_VERSION(3, 1) /* version 3.1 of the cross vars */ -/* - * The following pertains to ia64-sn2 only. - * - * Memory for XPC's amo variables is allocated by the MSPEC driver. These - * pages are located in the lowest granule. The lowest granule uses 4k pages - * for cached references and an alternate TLB handler to never provide a - * cacheable mapping for the entire region. This will prevent speculative - * reading of cached copies of our lines from being issued which will cause - * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64 - * amo variables (based on XP_MAX_NPARTITIONS_SN2) to identify the senders of - * NOTIFY IRQs, 128 amo variables (based on XP_NASID_MASK_WORDS) to identify - * the senders of ACTIVATE IRQs, 1 amo variable to identify which remote - * partitions (i.e., XPCs) consider themselves currently engaged with the - * local XPC and 1 amo variable to request partition deactivation. - */ -#define XPC_NOTIFY_IRQ_AMOS 0 -#define XPC_ACTIVATE_IRQ_AMOS (XPC_NOTIFY_IRQ_AMOS + XP_MAX_NPARTITIONS_SN2) -#define XPC_ENGAGED_PARTITIONS_AMO (XPC_ACTIVATE_IRQ_AMOS + XP_NASID_MASK_WORDS) -#define XPC_DEACTIVATE_REQUEST_AMO (XPC_ENGAGED_PARTITIONS_AMO + 1) - /* * The following structure describes the per partition specific variables. * @@ -214,9 +194,10 @@ struct xpc_vars_part_sn2 { #define XPC_RP_VARS_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_vars_sn2)) #define XPC_RP_PART_NASIDS(_rp) ((u64 *)((u8 *)(_rp) + XPC_RP_HEADER_SIZE)) -#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xp_nasid_mask_words) -#define XPC_RP_VARS(_rp) ((struct xpc_vars_sn2 *)(XPC_RP_MACH_NASIDS(_rp) + \ - xp_nasid_mask_words)) +#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xpc_nasid_mask_words) +#define XPC_RP_VARS(_rp) ((struct xpc_vars_sn2 *) \ + (XPC_RP_MACH_NASIDS(_rp) + \ + xpc_nasid_mask_words)) /* * Functions registered by add_timer() or called by kernel_thread() only @@ -225,11 +206,11 @@ struct xpc_vars_part_sn2 { * the passed argument. */ #define XPC_PACK_ARGS(_arg1, _arg2) \ - ((((u64) _arg1) & 0xffffffff) | \ - ((((u64) _arg2) & 0xffffffff) << 32)) + ((((u64)_arg1) & 0xffffffff) | \ + ((((u64)_arg2) & 0xffffffff) << 32)) -#define XPC_UNPACK_ARG1(_args) (((u64) _args) & 0xffffffff) -#define XPC_UNPACK_ARG2(_args) ((((u64) _args) >> 32) & 0xffffffff) +#define XPC_UNPACK_ARG1(_args) (((u64)_args) & 0xffffffff) +#define XPC_UNPACK_ARG2(_args) ((((u64)_args) >> 32) & 0xffffffff) /* * Define a Get/Put value pair (pointers) used with a message queue. @@ -710,12 +691,10 @@ extern void xpc_exit_uv(void); /* found in xpc_partition.c */ extern int xpc_exiting; -extern int xp_nasid_mask_words; +extern int xpc_nasid_mask_words; extern struct xpc_rsvd_page *xpc_rsvd_page; extern u64 *xpc_mach_nasids; extern struct xpc_partition *xpc_partitions; -extern char *xpc_remote_copy_buffer; -extern void *xpc_remote_copy_buffer_base; extern void *xpc_kmalloc_cacheline_aligned(size_t, gfp_t, void **); extern struct xpc_rsvd_page *xpc_setup_rsvd_page(void); extern int xpc_identify_activate_IRQ_sender(void); diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index b5f3c5e59db0..36dfccea5247 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c @@ -877,7 +877,6 @@ xpc_do_exit(enum xp_retval reason) unregister_sysctl_table(xpc_sysctl); kfree(xpc_partitions); - kfree(xpc_remote_copy_buffer_base); if (is_shub()) xpc_exit_sn2(); @@ -1031,7 +1030,9 @@ xpc_init(void) short partid; struct xpc_partition *part; struct task_struct *kthread; - size_t buf_size; + + snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part"); + snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan"); if (is_shub()) { /* @@ -1054,26 +1055,12 @@ xpc_init(void) return -ENODEV; } - snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part"); - snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan"); - - buf_size = max(XPC_RP_VARS_SIZE, - XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES); - xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size, - GFP_KERNEL, - &xpc_remote_copy_buffer_base); - if (xpc_remote_copy_buffer == NULL) { - dev_err(xpc_part, "can't get memory for remote copy buffer\n"); - ret = -ENOMEM; - goto out_1; - } - xpc_partitions = kzalloc(sizeof(struct xpc_partition) * xp_max_npartitions, GFP_KERNEL); if (xpc_partitions == NULL) { dev_err(xpc_part, "can't get memory for partition structure\n"); ret = -ENOMEM; - goto out_2; + goto out_1; } /* @@ -1115,7 +1102,7 @@ xpc_init(void) if (xpc_rsvd_page == NULL) { dev_err(xpc_part, "can't setup our reserved page\n"); ret = -EBUSY; - goto out_3; + goto out_2; } /* add ourselves to the reboot_notifier_list */ @@ -1136,7 +1123,7 @@ xpc_init(void) if (IS_ERR(kthread)) { dev_err(xpc_part, "failed while forking hb check thread\n"); ret = -EBUSY; - goto out_4; + goto out_3; } /* @@ -1164,18 +1151,16 @@ xpc_init(void) return 0; /* initialization was not successful */ -out_4: +out_3: /* indicate to others that our reserved page is uninitialized */ xpc_rsvd_page->stamp = 0; (void)unregister_die_notifier(&xpc_die_notifier); (void)unregister_reboot_notifier(&xpc_reboot_notifier); -out_3: +out_2: if (xpc_sysctl) unregister_sysctl_table(xpc_sysctl); kfree(xpc_partitions); -out_2: - kfree(xpc_remote_copy_buffer_base); out_1: if (is_shub()) xpc_exit_sn2(); diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c index c769ab8f74ef..9f104450478f 100644 --- a/drivers/misc/sgi-xp/xpc_partition.c +++ b/drivers/misc/sgi-xp/xpc_partition.c @@ -34,20 +34,11 @@ struct xpc_rsvd_page *xpc_rsvd_page; static u64 *xpc_part_nasids; u64 *xpc_mach_nasids; -/* >>> next two variables should be 'xpc_' if they remain here */ -static int xp_sizeof_nasid_mask; /* actual size in bytes of nasid mask */ -int xp_nasid_mask_words; /* actual size in words of nasid mask */ +static int xpc_sizeof_nasid_mask; /* actual size in bytes of nasid mask */ +int xpc_nasid_mask_words; /* actual size in words of nasid mask */ struct xpc_partition *xpc_partitions; -/* - * Generic buffer used to store a local copy of portions of a remote - * partition's reserved page (either its header and part_nasids mask, - * or its vars). - */ -char *xpc_remote_copy_buffer; -void *xpc_remote_copy_buffer_base; - /* * Guarantee that the kmalloc'd memory is cacheline aligned. */ @@ -176,9 +167,9 @@ xpc_setup_rsvd_page(void) /* SAL_version 1 didn't set the nasids_size field */ rp->SAL_nasids_size = 128; } - xp_sizeof_nasid_mask = rp->SAL_nasids_size; - xp_nasid_mask_words = DIV_ROUND_UP(xp_sizeof_nasid_mask, - BYTES_PER_WORD); + xpc_sizeof_nasid_mask = rp->SAL_nasids_size; + xpc_nasid_mask_words = DIV_ROUND_UP(xpc_sizeof_nasid_mask, + BYTES_PER_WORD); /* setup the pointers to the various items in the reserved page */ xpc_part_nasids = XPC_RP_PART_NASIDS(rp); @@ -222,14 +213,14 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids, /* pull over the reserved page header and part_nasids mask */ ret = xp_remote_memcpy(remote_rp, (void *)*remote_rp_pa, - XPC_RP_HEADER_SIZE + xp_sizeof_nasid_mask); + XPC_RP_HEADER_SIZE + xpc_sizeof_nasid_mask); if (ret != xpSuccess) return ret; if (discovered_nasids != NULL) { u64 *remote_part_nasids = XPC_RP_PART_NASIDS(remote_rp); - for (i = 0; i < xp_nasid_mask_words; i++) + for (i = 0; i < xpc_nasid_mask_words; i++) discovered_nasids[i] |= remote_part_nasids[i]; } @@ -414,12 +405,12 @@ xpc_discovery(void) enum xp_retval ret; remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE + - xp_sizeof_nasid_mask, + xpc_sizeof_nasid_mask, GFP_KERNEL, &remote_rp_base); if (remote_rp == NULL) return; - discovered_nasids = kzalloc(sizeof(u64) * xp_nasid_mask_words, + discovered_nasids = kzalloc(sizeof(u64) * xpc_nasid_mask_words, GFP_KERNEL); if (discovered_nasids == NULL) { kfree(remote_rp_base); @@ -521,10 +512,10 @@ xpc_initiate_partid_to_nasids(short partid, void *nasid_mask) if (part->remote_rp_pa == 0) return xpPartitionDown; - memset(nasid_mask, 0, XP_NASID_MASK_BYTES); + memset(nasid_mask, 0, xpc_sizeof_nasid_mask); part_nasid_pa = (u64)XPC_RP_PART_NASIDS(part->remote_rp_pa); return xp_remote_memcpy(nasid_mask, (void *)part_nasid_pa, - xp_sizeof_nasid_mask); + xpc_sizeof_nasid_mask); } diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c index e5dc8c44c6fb..9c0c29a2ac86 100644 --- a/drivers/misc/sgi-xp/xpc_sn2.c +++ b/drivers/misc/sgi-xp/xpc_sn2.c @@ -19,6 +19,43 @@ #include #include "xpc.h" +/* + * Define the number of u64s required to represent all the C-brick nasids + * as a bitmap. The cross-partition kernel modules deal only with + * C-brick nasids, thus the need for bitmaps which don't account for + * odd-numbered (non C-brick) nasids. + */ +#define XPC_MAX_PHYSNODES_SN2 (MAX_NUMALINK_NODES / 2) +#define XP_NASID_MASK_BYTES_SN2 ((XPC_MAX_PHYSNODES_SN2 + 7) / 8) +#define XP_NASID_MASK_WORDS_SN2 ((XPC_MAX_PHYSNODES_SN2 + 63) / 64) + +/* + * Memory for XPC's amo variables is allocated by the MSPEC driver. These + * pages are located in the lowest granule. The lowest granule uses 4k pages + * for cached references and an alternate TLB handler to never provide a + * cacheable mapping for the entire region. This will prevent speculative + * reading of cached copies of our lines from being issued which will cause + * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64 + * amo variables (based on XP_MAX_NPARTITIONS_SN2) to identify the senders of + * NOTIFY IRQs, 128 amo variables (based on XP_NASID_MASK_WORDS_SN2) to identify + * the senders of ACTIVATE IRQs, 1 amo variable to identify which remote + * partitions (i.e., XPCs) consider themselves currently engaged with the + * local XPC and 1 amo variable to request partition deactivation. + */ +#define XPC_NOTIFY_IRQ_AMOS_SN2 0 +#define XPC_ACTIVATE_IRQ_AMOS_SN2 (XPC_NOTIFY_IRQ_AMOS_SN2 + \ + XP_MAX_NPARTITIONS_SN2) +#define XPC_ENGAGED_PARTITIONS_AMO_SN2 (XPC_ACTIVATE_IRQ_AMOS_SN2 + \ + XP_NASID_MASK_WORDS_SN2) +#define XPC_DEACTIVATE_REQUEST_AMO_SN2 (XPC_ENGAGED_PARTITIONS_AMO_SN2 + 1) + +/* + * Buffer used to store a local copy of portions of a remote partition's + * reserved page (either its header and part_nasids mask, or its vars). + */ +static char *xpc_remote_copy_buffer_sn2; +static void *xpc_remote_copy_buffer_base_sn2; + static struct xpc_vars_sn2 *xpc_vars; /* >>> Add _sn2 suffix? */ static struct xpc_vars_part_sn2 *xpc_vars_part; /* >>> Add _sn2 suffix? */ @@ -176,7 +213,7 @@ xpc_send_activate_IRQ_sn2(u64 amos_page_pa, int from_nasid, int to_nasid, int w_index = XPC_NASID_W_INDEX(from_nasid); int b_index = XPC_NASID_B_INDEX(from_nasid); struct amo *amos = (struct amo *)__va(amos_page_pa + - (XPC_ACTIVATE_IRQ_AMOS * + (XPC_ACTIVATE_IRQ_AMOS_SN2 * sizeof(struct amo))); (void)xpc_send_IRQ_sn2(&amos[w_index], (1UL << b_index), to_nasid, @@ -189,7 +226,7 @@ xpc_send_local_activate_IRQ_sn2(int from_nasid) int w_index = XPC_NASID_W_INDEX(from_nasid); int b_index = XPC_NASID_B_INDEX(from_nasid); struct amo *amos = (struct amo *)__va(xpc_vars->amos_page_pa + - (XPC_ACTIVATE_IRQ_AMOS * + (XPC_ACTIVATE_IRQ_AMOS_SN2 * sizeof(struct amo))); /* fake the sending and receipt of an activate IRQ from remote nasid */ @@ -395,7 +432,7 @@ xpc_indicate_partition_engaged_sn2(struct xpc_partition *part) { unsigned long irq_flags; struct amo *amo = (struct amo *)__va(part->sn.sn2.remote_amos_page_pa + - (XPC_ENGAGED_PARTITIONS_AMO * + (XPC_ENGAGED_PARTITIONS_AMO_SN2 * sizeof(struct amo))); local_irq_save(irq_flags); @@ -422,7 +459,7 @@ xpc_indicate_partition_disengaged_sn2(struct xpc_partition *part) struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; unsigned long irq_flags; struct amo *amo = (struct amo *)__va(part_sn2->remote_amos_page_pa + - (XPC_ENGAGED_PARTITIONS_AMO * + (XPC_ENGAGED_PARTITIONS_AMO_SN2 * sizeof(struct amo))); local_irq_save(irq_flags); @@ -455,7 +492,7 @@ xpc_indicate_partition_disengaged_sn2(struct xpc_partition *part) static int xpc_partition_engaged_sn2(short partid) { - struct amo *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO; + struct amo *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO_SN2; /* our partition's amo variable ANDed with partid mask */ return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) & @@ -465,7 +502,7 @@ xpc_partition_engaged_sn2(short partid) static int xpc_any_partition_engaged_sn2(void) { - struct amo *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO; + struct amo *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO_SN2; /* our partition's amo variable */ return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) != 0; @@ -474,7 +511,7 @@ xpc_any_partition_engaged_sn2(void) static void xpc_assume_partition_disengaged_sn2(short partid) { - struct amo *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO; + struct amo *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO_SN2; /* clear bit(s) based on partid mask in our partition's amo */ FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, @@ -599,12 +636,12 @@ xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp) xp_max_npartitions); /* initialize the activate IRQ related amo variables */ - for (i = 0; i < xp_nasid_mask_words; i++) - (void)xpc_init_IRQ_amo_sn2(XPC_ACTIVATE_IRQ_AMOS + i); + for (i = 0; i < xpc_nasid_mask_words; i++) + (void)xpc_init_IRQ_amo_sn2(XPC_ACTIVATE_IRQ_AMOS_SN2 + i); /* initialize the engaged remote partitions related amo variables */ - (void)xpc_init_IRQ_amo_sn2(XPC_ENGAGED_PARTITIONS_AMO); - (void)xpc_init_IRQ_amo_sn2(XPC_DEACTIVATE_REQUEST_AMO); + (void)xpc_init_IRQ_amo_sn2(XPC_ENGAGED_PARTITIONS_AMO_SN2); + (void)xpc_init_IRQ_amo_sn2(XPC_DEACTIVATE_REQUEST_AMO_SN2); return xpSuccess; } @@ -657,7 +694,7 @@ xpc_check_remote_hb_sn2(void) short partid; enum xp_retval ret; - remote_vars = (struct xpc_vars_sn2 *)xpc_remote_copy_buffer; + remote_vars = (struct xpc_vars_sn2 *)xpc_remote_copy_buffer_sn2; for (partid = 0; partid < xp_max_npartitions; partid++) { @@ -749,7 +786,7 @@ xpc_request_partition_deactivation_sn2(struct xpc_partition *part) struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; unsigned long irq_flags; struct amo *amo = (struct amo *)__va(part_sn2->remote_amos_page_pa + - (XPC_DEACTIVATE_REQUEST_AMO * + (XPC_DEACTIVATE_REQUEST_AMO_SN2 * sizeof(struct amo))); local_irq_save(irq_flags); @@ -784,7 +821,7 @@ xpc_cancel_partition_deactivation_request_sn2(struct xpc_partition *part) { unsigned long irq_flags; struct amo *amo = (struct amo *)__va(part->sn.sn2.remote_amos_page_pa + - (XPC_DEACTIVATE_REQUEST_AMO * + (XPC_DEACTIVATE_REQUEST_AMO_SN2 * sizeof(struct amo))); local_irq_save(irq_flags); @@ -808,7 +845,7 @@ xpc_cancel_partition_deactivation_request_sn2(struct xpc_partition *part) static int xpc_partition_deactivation_requested_sn2(short partid) { - struct amo *amo = xpc_vars->amos_page + XPC_DEACTIVATE_REQUEST_AMO; + struct amo *amo = xpc_vars->amos_page + XPC_DEACTIVATE_REQUEST_AMO_SN2; /* our partition's amo variable ANDed with partid mask */ return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) & @@ -898,7 +935,7 @@ xpc_identify_activate_IRQ_req_sn2(int nasid) /* pull over the reserved page structure */ - remote_rp = (struct xpc_rsvd_page *)xpc_remote_copy_buffer; + remote_rp = (struct xpc_rsvd_page *)xpc_remote_copy_buffer_sn2; ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa); if (ret != xpSuccess) { @@ -917,7 +954,7 @@ xpc_identify_activate_IRQ_req_sn2(int nasid) /* pull over the cross partition variables */ - remote_vars = (struct xpc_vars_sn2 *)xpc_remote_copy_buffer; + remote_vars = (struct xpc_vars_sn2 *)xpc_remote_copy_buffer_sn2; ret = xpc_get_remote_vars_sn2(remote_vars_pa, remote_vars); if (ret != xpSuccess) { @@ -996,10 +1033,10 @@ xpc_identify_activate_IRQ_sender_sn2(void) int n_IRQs_detected = 0; struct amo *act_amos; - act_amos = xpc_vars->amos_page + XPC_ACTIVATE_IRQ_AMOS; + act_amos = xpc_vars->amos_page + XPC_ACTIVATE_IRQ_AMOS_SN2; /* scan through act amo variable looking for non-zero entries */ - for (word = 0; word < xp_nasid_mask_words; word++) { + for (word = 0; word < xpc_nasid_mask_words; word++) { if (xpc_exiting) break; @@ -2334,6 +2371,7 @@ int xpc_init_sn2(void) { int ret; + size_t buf_size; xpc_rsvd_page_init = xpc_rsvd_page_init_sn2; xpc_increment_heartbeat = xpc_increment_heartbeat_sn2; @@ -2378,6 +2416,16 @@ xpc_init_sn2(void) xpc_send_msg = xpc_send_msg_sn2; xpc_received_msg = xpc_received_msg_sn2; + buf_size = max(XPC_RP_VARS_SIZE, + XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES_SN2); + xpc_remote_copy_buffer_sn2 = xpc_kmalloc_cacheline_aligned(buf_size, + GFP_KERNEL, + &xpc_remote_copy_buffer_base_sn2); + if (xpc_remote_copy_buffer_sn2 == NULL) { + dev_err(xpc_part, "can't get memory for remote copy buffer\n"); + return -ENOMEM; + } + /* open up protections for IPI and [potentially] amo operations */ xpc_allow_IPI_ops_sn2(); xpc_allow_amo_ops_shub_wars_1_1_sn2(); @@ -2394,6 +2442,7 @@ xpc_init_sn2(void) dev_err(xpc_part, "can't register ACTIVATE IRQ handler, " "errno=%d\n", -ret); xpc_disallow_IPI_ops_sn2(); + kfree(xpc_remote_copy_buffer_base_sn2); } return ret; } @@ -2403,4 +2452,5 @@ xpc_exit_sn2(void) { free_irq(SGI_XPC_ACTIVATE, NULL); xpc_disallow_IPI_ops_sn2(); + kfree(xpc_remote_copy_buffer_base_sn2); } -- cgit v1.2.3 From ea57f80c8c0e59cfc5095f7e856ce7c8e6ac2984 Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Tue, 29 Jul 2008 22:34:14 -0700 Subject: sgi-xp: eliminate '>>>' in comments Comments in /drivers/misc/sgi-xp has been using '>>>' as a means to draw attention to something that needs to be done or considered. To avoid colliding with git rejects, '>>>' will now be replaced by '!!!' to indicate something to do, and by '???' to indicate something to be considered. Signed-off-by: Dean Nelson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/misc/sgi-xp/xp.h | 11 +++-------- drivers/misc/sgi-xp/xp_sn2.c | 10 +++++----- drivers/misc/sgi-xp/xp_uv.c | 2 +- drivers/misc/sgi-xp/xpc.h | 14 +++++++++----- drivers/misc/sgi-xp/xpc_channel.c | 2 +- drivers/misc/sgi-xp/xpc_partition.c | 2 +- drivers/misc/sgi-xp/xpc_sn2.c | 8 ++++---- drivers/misc/sgi-xp/xpc_uv.c | 32 ++++++++++++++++---------------- drivers/misc/sgi-xp/xpnet.c | 6 +++--- 9 files changed, 43 insertions(+), 44 deletions(-) (limited to 'drivers/misc/sgi-xp/xp.h') diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h index 955b5b913235..0ca81f16646f 100644 --- a/drivers/misc/sgi-xp/xp.h +++ b/drivers/misc/sgi-xp/xp.h @@ -21,7 +21,7 @@ #include #endif -/* >>> Add this #define to some linux header file some day. */ +/* ??? Add this #define to some linux header file some day? */ #define BYTES_PER_WORD sizeof(void *) #ifdef USE_DBUG_ON @@ -65,18 +65,13 @@ * other partition that is currently up. Over these channels, kernel-level * `users' can communicate with their counterparts on the other partitions. * ->>> The following described limitation of a max of eight channels possible ->>> pertains only to ia64-sn2. THIS ISN'T TRUE SINCE I'M PLANNING TO JUST ->>> TIE INTO THE EXISTING MECHANISM ONCE THE CHANNEL MESSAGES ARE RECEIVED. ->>> THE 128-BYTE CACHELINE PERFORMANCE ISSUE IS TIED TO IA64-SN2. - * * If the need for additional channels arises, one can simply increase * XPC_MAX_NCHANNELS accordingly. If the day should come where that number * exceeds the absolute MAXIMUM number of channels possible (eight), then one * will need to make changes to the XPC code to accommodate for this. * - * The absolute maximum number of channels possible is currently limited to - * eight for performance reasons. The internal cross partition structures + * The absolute maximum number of channels possible is limited to eight for + * performance reasons on sn2 hardware. The internal cross partition structures * require sixteen bytes per channel, and eight allows all of this * interface-shared info to fit in one 128-byte cacheline. */ diff --git a/drivers/misc/sgi-xp/xp_sn2.c b/drivers/misc/sgi-xp/xp_sn2.c index 1fcfdebca2c5..baabc1cb3fee 100644 --- a/drivers/misc/sgi-xp/xp_sn2.c +++ b/drivers/misc/sgi-xp/xp_sn2.c @@ -87,11 +87,11 @@ xp_remote_memcpy_sn2(void *vdst, const void *psrc, size_t len) { bte_result_t ret; u64 pdst = ia64_tpa(vdst); - /* >>> What are the rules governing the src and dst addresses passed in? - * >>> Currently we're assuming that dst is a virtual address and src - * >>> is a physical address, is this appropriate? Can we allow them to - * >>> be whatever and we make the change here without damaging the - * >>> addresses? + /* ??? What are the rules governing the src and dst addresses passed in? + * ??? Currently we're assuming that dst is a virtual address and src + * ??? is a physical address, is this appropriate? Can we allow them to + * ??? be whatever and we make the change here without damaging the + * ??? addresses? */ /* diff --git a/drivers/misc/sgi-xp/xp_uv.c b/drivers/misc/sgi-xp/xp_uv.c index dca519fdef98..382b1b6bcc0b 100644 --- a/drivers/misc/sgi-xp/xp_uv.c +++ b/drivers/misc/sgi-xp/xp_uv.c @@ -18,7 +18,7 @@ static enum xp_retval xp_remote_memcpy_uv(void *vdst, const void *psrc, size_t len) { - /* >>> this function needs fleshing out */ + /* !!! this function needs fleshing out */ return xpUnsupported; } diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h index 2111723553bf..0f516c3e3e61 100644 --- a/drivers/misc/sgi-xp/xpc.h +++ b/drivers/misc/sgi-xp/xpc.h @@ -276,9 +276,12 @@ struct xpc_notify { * There is an array of these structures for each remote partition. It is * allocated at the time a partition becomes active. The array contains one * of these structures for each potential channel connection to that partition. + */ + +/* + * The following is sn2 only. * ->>> sn2 only!!! - * Each of these structures manages two message queues (circular buffers). + * Each channel structure manages two message queues (circular buffers). * They are allocated at the time a channel connection is made. One of * these message queues (local_msgqueue) holds the locally created messages * that are destined for the remote partition. The other of these message @@ -345,6 +348,7 @@ struct xpc_notify { * new messages, by the clearing of the message flags of the acknowledged * messages. */ + struct xpc_channel_sn2 { /* various flavors of local and remote Get/Put values */ @@ -359,7 +363,7 @@ struct xpc_channel_sn2 { }; struct xpc_channel_uv { - /* >>> code is coming */ + /* !!! code is coming */ }; struct xpc_channel { @@ -500,7 +504,7 @@ xpc_any_msg_chctl_flags_set(union xpc_channel_ctl_flags *chctl) } /* - * Manages channels on a partition basis. There is one of these structures + * Manage channels on a partition basis. There is one of these structures * for each partition (a partition will never utilize the structure that * represents itself). */ @@ -535,7 +539,7 @@ struct xpc_partition_sn2 { }; struct xpc_partition_uv { - /* >>> code is coming */ + /* !!! code is coming */ }; struct xpc_partition { diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c index 1c73423665bd..f1afc0a7c33f 100644 --- a/drivers/misc/sgi-xp/xpc_channel.c +++ b/drivers/misc/sgi-xp/xpc_channel.c @@ -129,7 +129,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) /* wake those waiting for notify completion */ if (atomic_read(&ch->n_to_notify) > 0) { - /* >>> we do callout while holding ch->lock */ + /* we do callout while holding ch->lock, callout can't block */ xpc_notify_senders_of_disconnect(ch); } diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c index 9f104450478f..73a92957b800 100644 --- a/drivers/misc/sgi-xp/xpc_partition.c +++ b/drivers/misc/sgi-xp/xpc_partition.c @@ -91,7 +91,7 @@ xpc_get_rsvd_page_pa(int nasid) if (status != SALRET_MORE_PASSES) break; - /* >>> L1_CACHE_ALIGN() is only a sn2-bte_copy requirement */ + /* !!! L1_CACHE_ALIGN() is only a sn2-bte_copy requirement */ if (L1_CACHE_ALIGN(len) > buf_len) { kfree(buf_base); buf_len = L1_CACHE_ALIGN(len); diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c index 63fe59a5bfac..e42c3038203c 100644 --- a/drivers/misc/sgi-xp/xpc_sn2.c +++ b/drivers/misc/sgi-xp/xpc_sn2.c @@ -75,7 +75,7 @@ xpc_allow_IPI_ops_sn2(void) int node; int nasid; - /* >>> The following should get moved into SAL. */ + /* !!! The following should get moved into SAL. */ if (is_shub2()) { xpc_sh2_IPI_access0_sn2 = (u64)HUB_L((u64 *)LOCAL_MMR_ADDR(SH2_IPI_ACCESS0)); @@ -118,7 +118,7 @@ xpc_disallow_IPI_ops_sn2(void) int node; int nasid; - /* >>> The following should get moved into SAL. */ + /* !!! The following should get moved into SAL. */ if (is_shub2()) { for_each_online_node(node) { nasid = cnodeid_to_nasid(node); @@ -1360,7 +1360,7 @@ xpc_teardown_infrastructure_sn2(struct xpc_partition *part) * dst must be a cacheline aligned virtual address on this partition. * cnt must be cacheline sized */ -/* >>> Replace this function by call to xp_remote_memcpy() or bte_copy()? */ +/* ??? Replace this function by call to xp_remote_memcpy() or bte_copy()? */ static enum xp_retval xpc_pull_remote_cachelines_sn2(struct xpc_partition *part, void *dst, const void *src, size_t cnt) @@ -2242,7 +2242,7 @@ xpc_send_msg_sn2(struct xpc_channel *ch, u32 flags, void *payload, notify->key = key; notify->type = notify_type; - /* >>> is a mb() needed here? */ + /* ??? Is a mb() needed here? */ if (ch->flags & XPC_C_DISCONNECTING) { /* diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index 1401b0f45dcb..2aec1dfbb3db 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c @@ -15,8 +15,8 @@ #include -/* >>> #include */ -/* >>> uv_gpa() is defined in */ +/* !!! #include */ +/* !!! uv_gpa() is defined in */ #define uv_gpa(_a) ((unsigned long)_a) #include "xpc.h" @@ -29,16 +29,16 @@ static void xpc_send_local_activate_IRQ_uv(struct xpc_partition *part) { /* - * >>> make our side think that the remote parition sent an activate - * >>> message our way. Also do what the activate IRQ handler would - * >>> do had one really been sent. + * !!! Make our side think that the remote parition sent an activate + * !!! message our way. Also do what the activate IRQ handler would + * !!! do had one really been sent. */ } static enum xp_retval xpc_rsvd_page_init_uv(struct xpc_rsvd_page *rp) { - /* >>> need to have established xpc_activate_mq earlier */ + /* !!! need to have established xpc_activate_mq earlier */ rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq); return xpSuccess; } @@ -46,7 +46,7 @@ xpc_rsvd_page_init_uv(struct xpc_rsvd_page *rp) static void xpc_increment_heartbeat_uv(void) { - /* >>> send heartbeat msg to xpc_heartbeating_to_mask partids */ + /* !!! send heartbeat msg to xpc_heartbeating_to_mask partids */ } static void @@ -59,7 +59,7 @@ xpc_heartbeat_init_uv(void) static void xpc_heartbeat_exit_uv(void) { - /* >>> send heartbeat_offline msg to xpc_heartbeating_to_mask partids */ + /* !!! send heartbeat_offline msg to xpc_heartbeating_to_mask partids */ } static void @@ -70,9 +70,9 @@ xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp, struct xpc_partition *part = &xpc_partitions[partid]; /* - * >>> setup part structure with the bits of info we can glean from the rp - * >>> part->remote_rp_pa = remote_rp_pa; - * >>> part->sn.uv.activate_mq_gpa = remote_rp->sn.activate_mq_gpa; + * !!! Setup part structure with the bits of info we can glean from the rp: + * !!! part->remote_rp_pa = remote_rp_pa; + * !!! part->sn.uv.activate_mq_gpa = remote_rp->sn.activate_mq_gpa; */ xpc_send_local_activate_IRQ_uv(part); @@ -91,7 +91,7 @@ xpc_request_partition_reactivation_uv(struct xpc_partition *part) static enum xp_retval xpc_setup_infrastructure_uv(struct xpc_partition *part) { - /* >>> this function needs fleshing out */ + /* !!! this function needs fleshing out */ return xpUnsupported; } @@ -102,28 +102,28 @@ xpc_setup_infrastructure_uv(struct xpc_partition *part) static void xpc_teardown_infrastructure_uv(struct xpc_partition *part) { - /* >>> this function needs fleshing out */ + /* !!! this function needs fleshing out */ return; } static enum xp_retval xpc_make_first_contact_uv(struct xpc_partition *part) { - /* >>> this function needs fleshing out */ + /* !!! this function needs fleshing out */ return xpUnsupported; } static u64 xpc_get_chctl_all_flags_uv(struct xpc_partition *part) { - /* >>> this function needs fleshing out */ + /* !!! this function needs fleshing out */ return 0UL; } static struct xpc_msg * xpc_get_deliverable_msg_uv(struct xpc_channel *ch) { - /* >>> this function needs fleshing out */ + /* !!! this function needs fleshing out */ return NULL; } diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c index c5f59a6dae52..07c89c4e2c25 100644 --- a/drivers/misc/sgi-xp/xpnet.c +++ b/drivers/misc/sgi-xp/xpnet.c @@ -229,9 +229,9 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg) if (ret != xpSuccess) { /* - * >>> Need better way of cleaning skb. Currently skb - * >>> appears in_use and we can't just call - * >>> dev_kfree_skb. + * !!! Need better way of cleaning skb. Currently skb + * !!! appears in_use and we can't just call + * !!! dev_kfree_skb. */ dev_err(xpnet, "xp_remote_memcpy(0x%p, 0x%p, 0x%hx) " "returned error=0x%x\n", (void *) -- cgit v1.2.3 From 04de741885bc7565a28150e82c56a56e544440e6 Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Tue, 29 Jul 2008 22:34:14 -0700 Subject: sgi-xp: use standard bitops macros and functions Change sgi-xp to use the standard bitops macros and functions instead of trying to invent its own mechanism. Signed-off-by: Dean Nelson Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/misc/sgi-xp/xp.h | 3 -- drivers/misc/sgi-xp/xpc.h | 43 ++++++++-------------- drivers/misc/sgi-xp/xpc_partition.c | 43 +++++++++++----------- drivers/misc/sgi-xp/xpc_sn2.c | 73 ++++++++++++++++++++----------------- 4 files changed, 76 insertions(+), 86 deletions(-) (limited to 'drivers/misc/sgi-xp/xp.h') diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h index 0ca81f16646f..3054fae8b023 100644 --- a/drivers/misc/sgi-xp/xp.h +++ b/drivers/misc/sgi-xp/xp.h @@ -21,9 +21,6 @@ #include #endif -/* ??? Add this #define to some linux header file some day? */ -#define BYTES_PER_WORD sizeof(void *) - #ifdef USE_DBUG_ON #define DBUG_ON(condition) BUG_ON(condition) #else diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h index 0f516c3e3e61..0907934cdd8f 100644 --- a/drivers/misc/sgi-xp/xpc.h +++ b/drivers/misc/sgi-xp/xpc.h @@ -35,23 +35,7 @@ #define XPC_VERSION_MAJOR(_v) ((_v) >> 4) #define XPC_VERSION_MINOR(_v) ((_v) & 0xf) -/* - * The next macros define word or bit representations for given - * C-brick nasid in either the SAL provided bit array representing - * nasids in the partition/machine or the array of amo structures used - * for inter-partition initiation communications. - * - * For SN2 machines, C-Bricks are alway even numbered NASIDs. As - * such, some space will be saved by insisting that nasid information - * passed from SAL always be packed for C-Bricks and the - * cross-partition interrupts use the same packing scheme. - */ -#define XPC_NASID_W_INDEX(_n) (((_n) / 64) / 2) -#define XPC_NASID_B_INDEX(_n) (((_n) / 2) & (64 - 1)) -#define XPC_NASID_IN_ARRAY(_n, _p) ((_p)[XPC_NASID_W_INDEX(_n)] & \ - (1UL << XPC_NASID_B_INDEX(_n))) -#define XPC_NASID_FROM_W_B(_w, _b) (((_w) * 64 + (_b)) * 2) - +/* define frequency of the heartbeat and frequency how often it's checked */ #define XPC_HB_DEFAULT_INTERVAL 5 /* incr HB every x secs */ #define XPC_HB_CHECK_DEFAULT_INTERVAL 20 /* check HB every x secs */ @@ -86,11 +70,13 @@ * the actual nasids in the entire machine (mach_nasids). We're only * interested in the even numbered nasids (which contain the processors * and/or memory), so we only need half as many bits to represent the - * nasids. The part_nasids mask is located starting at the first cacheline - * following the reserved page header. The mach_nasids mask follows right - * after the part_nasids mask. The size in bytes of each mask is reflected - * by the reserved page header field 'SAL_nasids_size'. (Local partition's - * mask pointers are xpc_part_nasids and xpc_mach_nasids.) + * nasids. When mapping nasid to bit in a mask (or bit to nasid) be sure + * to either divide or multiply by 2. The part_nasids mask is located + * starting at the first cacheline following the reserved page header. The + * mach_nasids mask follows right after the part_nasids mask. The size in + * bytes of each mask is reflected by the reserved page header field + * 'SAL_nasids_size'. (Local partition's mask pointers are xpc_part_nasids + * and xpc_mach_nasids.) * * vars (ia64-sn2 only) * vars part (ia64-sn2 only) @@ -194,10 +180,11 @@ struct xpc_vars_part_sn2 { #define XPC_RP_VARS_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_vars_sn2)) #define XPC_RP_PART_NASIDS(_rp) ((u64 *)((u8 *)(_rp) + XPC_RP_HEADER_SIZE)) -#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xpc_nasid_mask_words) +#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + \ + xpc_nasid_mask_nlongs) #define XPC_RP_VARS(_rp) ((struct xpc_vars_sn2 *) \ (XPC_RP_MACH_NASIDS(_rp) + \ - xpc_nasid_mask_words)) + xpc_nasid_mask_nlongs)) /* * Functions registered by add_timer() or called by kernel_thread() only @@ -695,9 +682,9 @@ extern void xpc_exit_uv(void); /* found in xpc_partition.c */ extern int xpc_exiting; -extern int xpc_nasid_mask_words; +extern int xpc_nasid_mask_nlongs; extern struct xpc_rsvd_page *xpc_rsvd_page; -extern u64 *xpc_mach_nasids; +extern unsigned long *xpc_mach_nasids; extern struct xpc_partition *xpc_partitions; extern void *xpc_kmalloc_cacheline_aligned(size_t, gfp_t, void **); extern struct xpc_rsvd_page *xpc_setup_rsvd_page(void); @@ -706,8 +693,8 @@ extern int xpc_partition_disengaged(struct xpc_partition *); extern enum xp_retval xpc_mark_partition_active(struct xpc_partition *); extern void xpc_mark_partition_inactive(struct xpc_partition *); extern void xpc_discovery(void); -extern enum xp_retval xpc_get_remote_rp(int, u64 *, struct xpc_rsvd_page *, - u64 *); +extern enum xp_retval xpc_get_remote_rp(int, unsigned long *, + struct xpc_rsvd_page *, u64 *); extern void xpc_deactivate_partition(const int, struct xpc_partition *, enum xp_retval); extern enum xp_retval xpc_initiate_partid_to_nasids(short, void *); diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c index 73a92957b800..ca6784f55970 100644 --- a/drivers/misc/sgi-xp/xpc_partition.c +++ b/drivers/misc/sgi-xp/xpc_partition.c @@ -31,11 +31,11 @@ int xpc_exiting; /* this partition's reserved page pointers */ struct xpc_rsvd_page *xpc_rsvd_page; -static u64 *xpc_part_nasids; -u64 *xpc_mach_nasids; +static unsigned long *xpc_part_nasids; +unsigned long *xpc_mach_nasids; -static int xpc_sizeof_nasid_mask; /* actual size in bytes of nasid mask */ -int xpc_nasid_mask_words; /* actual size in words of nasid mask */ +static int xpc_nasid_mask_nbytes; /* #of bytes in nasid mask */ +int xpc_nasid_mask_nlongs; /* #of longs in nasid mask */ struct xpc_partition *xpc_partitions; @@ -167,9 +167,9 @@ xpc_setup_rsvd_page(void) /* SAL_version 1 didn't set the nasids_size field */ rp->SAL_nasids_size = 128; } - xpc_sizeof_nasid_mask = rp->SAL_nasids_size; - xpc_nasid_mask_words = DIV_ROUND_UP(xpc_sizeof_nasid_mask, - BYTES_PER_WORD); + xpc_nasid_mask_nbytes = rp->SAL_nasids_size; + xpc_nasid_mask_nlongs = BITS_TO_LONGS(rp->SAL_nasids_size * + BITS_PER_BYTE); /* setup the pointers to the various items in the reserved page */ xpc_part_nasids = XPC_RP_PART_NASIDS(rp); @@ -199,10 +199,10 @@ xpc_setup_rsvd_page(void) * part_nasids mask. */ enum xp_retval -xpc_get_remote_rp(int nasid, u64 *discovered_nasids, +xpc_get_remote_rp(int nasid, unsigned long *discovered_nasids, struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa) { - int i; + int l; enum xp_retval ret; /* get the reserved page's physical address */ @@ -213,15 +213,16 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids, /* pull over the reserved page header and part_nasids mask */ ret = xp_remote_memcpy(remote_rp, (void *)*remote_rp_pa, - XPC_RP_HEADER_SIZE + xpc_sizeof_nasid_mask); + XPC_RP_HEADER_SIZE + xpc_nasid_mask_nbytes); if (ret != xpSuccess) return ret; if (discovered_nasids != NULL) { - u64 *remote_part_nasids = XPC_RP_PART_NASIDS(remote_rp); + unsigned long *remote_part_nasids = + XPC_RP_PART_NASIDS(remote_rp); - for (i = 0; i < xpc_nasid_mask_words; i++) - discovered_nasids[i] |= remote_part_nasids[i]; + for (l = 0; l < xpc_nasid_mask_nlongs; l++) + discovered_nasids[l] |= remote_part_nasids[l]; } /* see if the reserved page has been set up by XPC */ @@ -401,16 +402,16 @@ xpc_discovery(void) int max_regions; int nasid; struct xpc_rsvd_page *rp; - u64 *discovered_nasids; + unsigned long *discovered_nasids; enum xp_retval ret; remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE + - xpc_sizeof_nasid_mask, + xpc_nasid_mask_nbytes, GFP_KERNEL, &remote_rp_base); if (remote_rp == NULL) return; - discovered_nasids = kzalloc(sizeof(u64) * xpc_nasid_mask_words, + discovered_nasids = kzalloc(sizeof(long) * xpc_nasid_mask_nlongs, GFP_KERNEL); if (discovered_nasids == NULL) { kfree(remote_rp_base); @@ -453,21 +454,21 @@ xpc_discovery(void) dev_dbg(xpc_part, "checking nasid %d\n", nasid); - if (XPC_NASID_IN_ARRAY(nasid, xpc_part_nasids)) { + if (test_bit(nasid / 2, xpc_part_nasids)) { dev_dbg(xpc_part, "PROM indicates Nasid %d is " "part of the local partition; skipping " "region\n", nasid); break; } - if (!(XPC_NASID_IN_ARRAY(nasid, xpc_mach_nasids))) { + if (!(test_bit(nasid / 2, xpc_mach_nasids))) { dev_dbg(xpc_part, "PROM indicates Nasid %d was " "not on Numa-Link network at reset\n", nasid); continue; } - if (XPC_NASID_IN_ARRAY(nasid, discovered_nasids)) { + if (test_bit(nasid / 2, discovered_nasids)) { dev_dbg(xpc_part, "Nasid %d is part of a " "partition which was previously " "discovered\n", nasid); @@ -512,10 +513,10 @@ xpc_initiate_partid_to_nasids(short partid, void *nasid_mask) if (part->remote_rp_pa == 0) return xpPartitionDown; - memset(nasid_mask, 0, xpc_sizeof_nasid_mask); + memset(nasid_mask, 0, xpc_nasid_mask_nbytes); part_nasid_pa = (u64)XPC_RP_PART_NASIDS(part->remote_rp_pa); return xp_remote_memcpy(nasid_mask, (void *)part_nasid_pa, - xpc_sizeof_nasid_mask); + xpc_nasid_mask_nbytes); } diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c index e42c3038203c..f82889f60151 100644 --- a/drivers/misc/sgi-xp/xpc_sn2.c +++ b/drivers/misc/sgi-xp/xpc_sn2.c @@ -210,28 +210,26 @@ static void xpc_send_activate_IRQ_sn2(u64 amos_page_pa, int from_nasid, int to_nasid, int to_phys_cpuid) { - int w_index = XPC_NASID_W_INDEX(from_nasid); - int b_index = XPC_NASID_B_INDEX(from_nasid); struct amo *amos = (struct amo *)__va(amos_page_pa + (XPC_ACTIVATE_IRQ_AMOS_SN2 * sizeof(struct amo))); - (void)xpc_send_IRQ_sn2(&amos[w_index], (1UL << b_index), to_nasid, + (void)xpc_send_IRQ_sn2(&amos[BIT_WORD(from_nasid / 2)], + BIT_MASK(from_nasid / 2), to_nasid, to_phys_cpuid, SGI_XPC_ACTIVATE); } static void xpc_send_local_activate_IRQ_sn2(int from_nasid) { - int w_index = XPC_NASID_W_INDEX(from_nasid); - int b_index = XPC_NASID_B_INDEX(from_nasid); struct amo *amos = (struct amo *)__va(xpc_vars_sn2->amos_page_pa + (XPC_ACTIVATE_IRQ_AMOS_SN2 * sizeof(struct amo))); /* fake the sending and receipt of an activate IRQ from remote nasid */ - FETCHOP_STORE_OP(TO_AMO((u64)&amos[w_index].variable), FETCHOP_OR, - (1UL << b_index)); + FETCHOP_STORE_OP(TO_AMO((u64)&amos[BIT_WORD(from_nasid / 2)].variable), + FETCHOP_OR, BIT_MASK(from_nasid / 2)); + atomic_inc(&xpc_activate_IRQ_rcvd); wake_up_interruptible(&xpc_activate_IRQ_wq); } @@ -439,7 +437,8 @@ xpc_indicate_partition_engaged_sn2(struct xpc_partition *part) /* set bit corresponding to our partid in remote partition's amo */ FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, - (1UL << sn_partition_id)); + BIT(sn_partition_id)); + /* * We must always use the nofault function regardless of whether we * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we @@ -466,7 +465,8 @@ xpc_indicate_partition_disengaged_sn2(struct xpc_partition *part) /* clear bit corresponding to our partid in remote partition's amo */ FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, - ~(1UL << sn_partition_id)); + ~BIT(sn_partition_id)); + /* * We must always use the nofault function regardless of whether we * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we @@ -497,7 +497,7 @@ xpc_partition_engaged_sn2(short partid) /* our partition's amo variable ANDed with partid mask */ return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) & - (1UL << partid)) != 0; + BIT(partid)) != 0; } static int @@ -518,7 +518,7 @@ xpc_assume_partition_disengaged_sn2(short partid) /* clear bit(s) based on partid mask in our partition's amo */ FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, - ~(1UL << partid)); + ~BIT(partid)); } /* original protection values for each node */ @@ -639,7 +639,7 @@ xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp) xp_max_npartitions); /* initialize the activate IRQ related amo variables */ - for (i = 0; i < xpc_nasid_mask_words; i++) + for (i = 0; i < xpc_nasid_mask_nlongs; i++) (void)xpc_init_IRQ_amo_sn2(XPC_ACTIVATE_IRQ_AMOS_SN2 + i); /* initialize the engaged remote partitions related amo variables */ @@ -796,7 +796,8 @@ xpc_request_partition_deactivation_sn2(struct xpc_partition *part) /* set bit corresponding to our partid in remote partition's amo */ FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_OR, - (1UL << sn_partition_id)); + BIT(sn_partition_id)); + /* * We must always use the nofault function regardless of whether we * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we @@ -831,7 +832,8 @@ xpc_cancel_partition_deactivation_request_sn2(struct xpc_partition *part) /* clear bit corresponding to our partid in remote partition's amo */ FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, - ~(1UL << sn_partition_id)); + ~BIT(sn_partition_id)); + /* * We must always use the nofault function regardless of whether we * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we @@ -853,7 +855,7 @@ xpc_partition_deactivation_requested_sn2(short partid) /* our partition's amo variable ANDed with partid mask */ return (FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) & - (1UL << partid)) != 0; + BIT(partid)) != 0; } /* @@ -1031,28 +1033,31 @@ xpc_identify_activate_IRQ_req_sn2(int nasid) int xpc_identify_activate_IRQ_sender_sn2(void) { - int word, bit; - u64 nasid_mask; + int l; + int b; + unsigned long nasid_mask_long; u64 nasid; /* remote nasid */ int n_IRQs_detected = 0; struct amo *act_amos; act_amos = xpc_vars_sn2->amos_page + XPC_ACTIVATE_IRQ_AMOS_SN2; - /* scan through act amo variable looking for non-zero entries */ - for (word = 0; word < xpc_nasid_mask_words; word++) { + /* scan through activate amo variables looking for non-zero entries */ + for (l = 0; l < xpc_nasid_mask_nlongs; l++) { if (xpc_exiting) break; - nasid_mask = xpc_receive_IRQ_amo_sn2(&act_amos[word]); - if (nasid_mask == 0) { - /* no IRQs from nasids in this variable */ + nasid_mask_long = xpc_receive_IRQ_amo_sn2(&act_amos[l]); + + b = find_first_bit(&nasid_mask_long, BITS_PER_LONG); + if (b >= BITS_PER_LONG) { + /* no IRQs from nasids in this amo variable */ continue; } - dev_dbg(xpc_part, "amo[%d] gave back 0x%lx\n", word, - nasid_mask); + dev_dbg(xpc_part, "amo[%d] gave back 0x%lx\n", l, + nasid_mask_long); /* * If this nasid has been added to the machine since @@ -1060,19 +1065,19 @@ xpc_identify_activate_IRQ_sender_sn2(void) * remote nasid in our reserved pages machine mask. * This is used in the event of module reload. */ - xpc_mach_nasids[word] |= nasid_mask; + xpc_mach_nasids[l] |= nasid_mask_long; /* locate the nasid(s) which sent interrupts */ - for (bit = 0; bit < (8 * sizeof(u64)); bit++) { - if (nasid_mask & (1UL << bit)) { - n_IRQs_detected++; - nasid = XPC_NASID_FROM_W_B(word, bit); - dev_dbg(xpc_part, "interrupt from nasid %ld\n", - nasid); - xpc_identify_activate_IRQ_req_sn2(nasid); - } - } + do { + n_IRQs_detected++; + nasid = (l * BITS_PER_LONG + b) * 2; + dev_dbg(xpc_part, "interrupt from nasid %ld\n", nasid); + xpc_identify_activate_IRQ_req_sn2(nasid); + + b = find_next_bit(&nasid_mask_long, BITS_PER_LONG, + b + 1); + } while (b < BITS_PER_LONG); } return n_IRQs_detected; } -- cgit v1.2.3 From 261f3b4979db88d29fc86aad9f76fbc0c2c6d21a Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Tue, 29 Jul 2008 22:34:16 -0700 Subject: sgi-xp: enable building of XPC/XPNET on x86_64 Get XPC/XPNET to build on x86_64. Trying to modprobe them up on a non-UV or sn2 system will result in a -ENODEV. Signed-off-by: Dean Nelson Cc: Jack Steiner Cc: "Luck, Tony" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/misc/Kconfig | 2 +- drivers/misc/sgi-xp/Makefile | 14 ++++++++--- drivers/misc/sgi-xp/xp.h | 32 +++++++++++++++--------- drivers/misc/sgi-xp/xp_main.c | 10 +++++++- drivers/misc/sgi-xp/xp_sn2.c | 10 ++++++++ drivers/misc/sgi-xp/xpc.h | 34 ++++++++++++------------- drivers/misc/sgi-xp/xpc_channel.c | 18 +++++--------- drivers/misc/sgi-xp/xpc_main.c | 49 +++++++++++++++++++------------------ drivers/misc/sgi-xp/xpc_partition.c | 47 +++++++++++++++-------------------- drivers/misc/sgi-xp/xpc_sn2.c | 30 +++++++++++++++++++---- drivers/misc/sgi-xp/xpc_uv.c | 7 ++---- drivers/misc/sgi-xp/xpnet.c | 28 +++++++++------------ 12 files changed, 155 insertions(+), 126 deletions(-) (limited to 'drivers/misc/sgi-xp/xp.h') diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 4b288f43ca8a..fa50e9ede0e6 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -426,7 +426,7 @@ config ENCLOSURE_SERVICES config SGI_XP tristate "Support communication between SGI SSIs" - depends on IA64_GENERIC || IA64_SGI_SN2 + depends on IA64_GENERIC || IA64_SGI_SN2 || IA64_SGI_UV || (X86_64 && SMP) select IA64_UNCACHED_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2 select GENERIC_ALLOCATOR if IA64_GENERIC || IA64_SGI_SN2 ---help--- diff --git a/drivers/misc/sgi-xp/Makefile b/drivers/misc/sgi-xp/Makefile index b3eeff31ebf8..35ce28578075 100644 --- a/drivers/misc/sgi-xp/Makefile +++ b/drivers/misc/sgi-xp/Makefile @@ -3,11 +3,17 @@ # obj-$(CONFIG_SGI_XP) += xp.o -xp-y := xp_main.o xp_uv.o -xp-$(CONFIG_IA64) += xp_sn2.o xp_nofault.o +xp-y := xp_main.o +xp-$(CONFIG_IA64_SGI_SN2) += xp_sn2.o xp_nofault.o +xp-$(CONFIG_IA64_GENERIC) += xp_sn2.o xp_nofault.o xp_uv.o +xp-$(CONFIG_IA64_SGI_UV) += xp_uv.o +xp-$(CONFIG_X86_64) += xp_uv.o obj-$(CONFIG_SGI_XP) += xpc.o -xpc-y := xpc_main.o xpc_uv.o xpc_channel.o xpc_partition.o -xpc-$(CONFIG_IA64) += xpc_sn2.o +xpc-y := xpc_main.o xpc_channel.o xpc_partition.o +xpc-$(CONFIG_IA64_SGI_SN2) += xpc_sn2.o +xpc-$(CONFIG_IA64_GENERIC) += xpc_sn2.o xpc_uv.o +xpc-$(CONFIG_IA64_SGI_UV) += xpc_uv.o +xpc-$(CONFIG_X86_64) += xpc_uv.o obj-$(CONFIG_SGI_XP) += xpnet.o diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h index 3054fae8b023..01bf1a2cd8ed 100644 --- a/drivers/misc/sgi-xp/xp.h +++ b/drivers/misc/sgi-xp/xp.h @@ -13,18 +13,17 @@ #ifndef _DRIVERS_MISC_SGIXP_XP_H #define _DRIVERS_MISC_SGIXP_XP_H -#include -#include #include -#include + #ifdef CONFIG_IA64 -#include +#include +#include /* defines is_shub1() and is_shub2() */ +#define is_shub() ia64_platform_is("sn2") +#define is_uv() ia64_platform_is("uv") #endif - -#ifdef USE_DBUG_ON -#define DBUG_ON(condition) BUG_ON(condition) -#else -#define DBUG_ON(condition) +#ifdef CONFIG_X86_64 +#include +#define is_uv() is_uv_system() #endif #ifndef is_shub1 @@ -36,13 +35,19 @@ #endif #ifndef is_shub -#define is_shub() (is_shub1() || is_shub2()) +#define is_shub() 0 #endif #ifndef is_uv #define is_uv() 0 #endif +#ifdef USE_DBUG_ON +#define DBUG_ON(condition) BUG_ON(condition) +#else +#define DBUG_ON(condition) +#endif + /* * Define the maximum number of partitions the system can possibly support. * It is based on the maximum number of hardware partitionable regions. The @@ -200,7 +205,9 @@ enum xp_retval { xpPayloadTooBig, /* 55: payload too large for message slot */ xpUnsupported, /* 56: unsupported functionality or resource */ - xpUnknownReason /* 57: unknown reason - must be last in enum */ + xpNeedMoreInfo, /* 57: more info is needed by SAL */ + + xpUnknownReason /* 58: unknown reason - must be last in enum */ }; /* @@ -339,8 +346,11 @@ xpc_partid_to_nasids(short partid, void *nasids) } extern short xp_max_npartitions; +extern short xp_partition_id; +extern u8 xp_region_size; extern enum xp_retval (*xp_remote_memcpy) (void *, const void *, size_t); +extern int (*xp_cpu_to_nasid) (int); extern u64 xp_nofault_PIOR_target; extern int xp_nofault_PIOR(void *); diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c index 9c0ce2f15ff6..c34b23fe498f 100644 --- a/drivers/misc/sgi-xp/xp_main.c +++ b/drivers/misc/sgi-xp/xp_main.c @@ -14,7 +14,6 @@ * */ -#include #include #include #include "xp.h" @@ -36,9 +35,18 @@ struct device *xp = &xp_dbg_subname; short xp_max_npartitions; EXPORT_SYMBOL_GPL(xp_max_npartitions); +short xp_partition_id; +EXPORT_SYMBOL_GPL(xp_partition_id); + +u8 xp_region_size; +EXPORT_SYMBOL_GPL(xp_region_size); + enum xp_retval (*xp_remote_memcpy) (void *dst, const void *src, size_t len); EXPORT_SYMBOL_GPL(xp_remote_memcpy); +int (*xp_cpu_to_nasid) (int cpuid); +EXPORT_SYMBOL_GPL(xp_cpu_to_nasid); + /* * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level * users of XPC. diff --git a/drivers/misc/sgi-xp/xp_sn2.c b/drivers/misc/sgi-xp/xp_sn2.c index baabc1cb3fee..c6a1ede7d6e6 100644 --- a/drivers/misc/sgi-xp/xp_sn2.c +++ b/drivers/misc/sgi-xp/xp_sn2.c @@ -12,6 +12,7 @@ * Architecture specific implementation of common functions. */ +#include #include #include #include @@ -116,14 +117,23 @@ xp_remote_memcpy_sn2(void *vdst, const void *psrc, size_t len) return xpBteCopyError; } +static int +xp_cpu_to_nasid_sn2(int cpuid) +{ + return cpuid_to_nasid(cpuid); +} + enum xp_retval xp_init_sn2(void) { BUG_ON(!is_shub()); xp_max_npartitions = XP_MAX_NPARTITIONS_SN2; + xp_partition_id = sn_partition_id; + xp_region_size = sn_region_size; xp_remote_memcpy = xp_remote_memcpy_sn2; + xp_cpu_to_nasid = xp_cpu_to_nasid_sn2; return xp_register_nofault_code_sn2(); } diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h index e194d3140f68..96408fcf5a1e 100644 --- a/drivers/misc/sgi-xp/xpc.h +++ b/drivers/misc/sgi-xp/xpc.h @@ -13,17 +13,10 @@ #ifndef _DRIVERS_MISC_SGIXP_XPC_H #define _DRIVERS_MISC_SGIXP_XPC_H -#include -#include -#include -#include +#include #include -#include -#include -#include -#include -#include -#include +#include +#include #include "xp.h" /* @@ -179,7 +172,8 @@ struct xpc_vars_part_sn2 { #define XPC_RP_HEADER_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page)) #define XPC_RP_VARS_SIZE L1_CACHE_ALIGN(sizeof(struct xpc_vars_sn2)) -#define XPC_RP_PART_NASIDS(_rp) ((u64 *)((u8 *)(_rp) + XPC_RP_HEADER_SIZE)) +#define XPC_RP_PART_NASIDS(_rp) ((unsigned long *)((u8 *)(_rp) + \ + XPC_RP_HEADER_SIZE)) #define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + \ xpc_nasid_mask_nlongs) #define XPC_RP_VARS(_rp) ((struct xpc_vars_sn2 *) \ @@ -202,13 +196,13 @@ struct xpc_vars_part_sn2 { /* * Define a Get/Put value pair (pointers) used with a message queue. */ -struct xpc_gp { +struct xpc_gp_sn2 { s64 get; /* Get value */ s64 put; /* Put value */ }; #define XPC_GP_SIZE \ - L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_MAX_NCHANNELS) + L1_CACHE_ALIGN(sizeof(struct xpc_gp_sn2) * XPC_MAX_NCHANNELS) /* * Define a structure that contains arguments associated with opening and @@ -340,10 +334,10 @@ struct xpc_channel_sn2 { /* various flavors of local and remote Get/Put values */ - struct xpc_gp *local_GP; /* local Get/Put values */ - struct xpc_gp remote_GP; /* remote Get/Put values */ - struct xpc_gp w_local_GP; /* working local Get/Put values */ - struct xpc_gp w_remote_GP; /* working remote Get/Put values */ + struct xpc_gp_sn2 *local_GP; /* local Get/Put values */ + struct xpc_gp_sn2 remote_GP; /* remote Get/Put values */ + struct xpc_gp_sn2 w_local_GP; /* working local Get/Put values */ + struct xpc_gp_sn2 w_remote_GP; /* working remote Get/Put values */ s64 next_msg_to_pull; /* Put value of next msg to pull */ struct mutex msg_to_pull_mutex; /* next msg to pull serialization */ @@ -506,9 +500,9 @@ struct xpc_partition_sn2 { u8 remote_vars_version; /* version# of partition's vars */ void *local_GPs_base; /* base address of kmalloc'd space */ - struct xpc_gp *local_GPs; /* local Get/Put values */ + struct xpc_gp_sn2 *local_GPs; /* local Get/Put values */ void *remote_GPs_base; /* base address of kmalloc'd space */ - struct xpc_gp *remote_GPs; /* copy of remote partition's local */ + struct xpc_gp_sn2 *remote_GPs; /* copy of remote partition's local */ /* Get/Put values */ u64 remote_GPs_pa; /* phys address of remote partition's local */ /* Get/Put values */ @@ -629,6 +623,8 @@ extern void xpc_activate_partition(struct xpc_partition *); extern void xpc_activate_kthreads(struct xpc_channel *, int); extern void xpc_create_kthreads(struct xpc_channel *, int, int); extern void xpc_disconnect_wait(int); +extern enum xp_retval (*xpc_get_partition_rsvd_page_pa) (u64, u64 *, u64 *, + size_t *); extern enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *); extern void (*xpc_heartbeat_init) (void); extern void (*xpc_heartbeat_exit) (void); diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c index f1afc0a7c33f..0615efbe0070 100644 --- a/drivers/misc/sgi-xp/xpc_channel.c +++ b/drivers/misc/sgi-xp/xpc_channel.c @@ -14,14 +14,7 @@ * */ -#include -#include -#include -#include -#include -#include -#include -#include +#include #include "xpc.h" /* @@ -373,8 +366,9 @@ again: dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY (local_msgqueue_pa=" "0x%lx, local_nentries=%d, remote_nentries=%d) " "received from partid=%d, channel=%d\n", - args->local_msgqueue_pa, args->local_nentries, - args->remote_nentries, ch->partid, ch->number); + (unsigned long)args->local_msgqueue_pa, + args->local_nentries, args->remote_nentries, + ch->partid, ch->number); if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) { spin_unlock_irqrestore(&ch->lock, irq_flags); @@ -940,7 +934,7 @@ xpc_deliver_msg(struct xpc_channel *ch) if (ch->func != NULL) { dev_dbg(xpc_chan, "ch->func() called, msg=0x%p, " "msg_number=%ld, partid=%d, channel=%d\n", - (void *)msg, msg->number, ch->partid, + msg, (signed long)msg->number, ch->partid, ch->number); /* deliver the message to its intended recipient */ @@ -949,7 +943,7 @@ xpc_deliver_msg(struct xpc_channel *ch) dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, " "msg_number=%ld, partid=%d, channel=%d\n", - (void *)msg, msg->number, ch->partid, + msg, (signed long)msg->number, ch->partid, ch->number); } diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index e7ff9e1670f6..f7478cc3572d 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c @@ -43,19 +43,13 @@ * */ -#include #include -#include -#include -#include +#include +#include #include #include -#include #include #include -#include -#include -#include #include "xpc.h" /* define two XPC debug device structures to be used with dev_dbg() et al */ @@ -175,6 +169,8 @@ static struct notifier_block xpc_die_notifier = { .notifier_call = xpc_system_die, }; +enum xp_retval (*xpc_get_partition_rsvd_page_pa) (u64 buf, u64 *cookie, + u64 *paddr, size_t *len); enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *rp); void (*xpc_heartbeat_init) (void); void (*xpc_heartbeat_exit) (void); @@ -920,7 +916,8 @@ xpc_die_deactivate(void) struct xpc_partition *part; short partid; int any_engaged; - long time, printmsg_time, disengage_timeout; + long keep_waiting; + long wait_to_print; /* keep xpc_hb_checker thread from doing anything (just in case) */ xpc_exiting = 1; @@ -937,16 +934,17 @@ xpc_die_deactivate(void) } } - time = rtc_time(); - printmsg_time = time + - (XPC_DEACTIVATE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second); - disengage_timeout = time + - (xpc_disengage_timelimit * sn_rtc_cycles_per_second); - /* * Though we requested that all other partitions deactivate from us, - * we only wait until they've all disengaged. + * we only wait until they've all disengaged or we've reached the + * defined timelimit. + * + * Given that one iteration through the following while-loop takes + * approximately 200 microseconds, calculate the #of loops to take + * before bailing and the #of loops before printing a waiting message. */ + keep_waiting = xpc_disengage_timelimit * 1000 * 5; + wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * 1000 * 5; while (1) { any_engaged = xpc_any_partition_engaged(); @@ -955,8 +953,7 @@ xpc_die_deactivate(void) break; } - time = rtc_time(); - if (time >= disengage_timeout) { + if (!keep_waiting--) { for (partid = 0; partid < xp_max_npartitions; partid++) { if (xpc_partition_engaged(partid)) { @@ -968,15 +965,15 @@ xpc_die_deactivate(void) break; } - if (time >= printmsg_time) { + if (!wait_to_print--) { dev_info(xpc_part, "waiting for remote partitions to " "deactivate, timeout in %ld seconds\n", - (disengage_timeout - time) / - sn_rtc_cycles_per_second); - printmsg_time = time + - (XPC_DEACTIVATE_PRINTMSG_INTERVAL * - sn_rtc_cycles_per_second); + keep_waiting / (1000 * 5)); + wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * + 1000 * 5; } + + udelay(200); } } @@ -991,6 +988,7 @@ xpc_die_deactivate(void) static int xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused) { +#ifdef CONFIG_IA64 /* !!! temporary kludge */ switch (event) { case DIE_MACHINE_RESTART: case DIE_MACHINE_HALT: @@ -1019,6 +1017,9 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused) xpc_online_heartbeat(); break; } +#else + xpc_die_deactivate(); +#endif return NOTIFY_DONE; } diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c index 70d4a00c9723..f84d66410205 100644 --- a/drivers/misc/sgi-xp/xpc_partition.c +++ b/drivers/misc/sgi-xp/xpc_partition.c @@ -15,15 +15,8 @@ * */ -#include -#include -#include -#include -#include -#include -#include -#include -#include +#include +#include #include "xpc.h" /* XPC is exiting flag */ @@ -71,24 +64,23 @@ static u64 xpc_get_rsvd_page_pa(int nasid) { enum xp_retval ret; - s64 status; u64 cookie = 0; u64 rp_pa = nasid; /* seed with nasid */ - u64 len = 0; + size_t len = 0; u64 buf = buf; u64 buf_len = 0; void *buf_base = NULL; while (1) { - status = sn_partition_reserved_page_pa(buf, &cookie, &rp_pa, - &len); + ret = xpc_get_partition_rsvd_page_pa(buf, &cookie, &rp_pa, + &len); - dev_dbg(xpc_part, "SAL returned with status=%li, cookie=" - "0x%016lx, address=0x%016lx, len=0x%016lx\n", - status, cookie, rp_pa, len); + dev_dbg(xpc_part, "SAL returned with ret=%d, cookie=0x%016lx, " + "address=0x%016lx, len=0x%016lx\n", ret, + (unsigned long)cookie, (unsigned long)rp_pa, len); - if (status != SALRET_MORE_PASSES) + if (ret != xpNeedMoreInfo) break; /* !!! L1_CACHE_ALIGN() is only a sn2-bte_copy requirement */ @@ -100,8 +92,9 @@ xpc_get_rsvd_page_pa(int nasid) &buf_base); if (buf_base == NULL) { dev_err(xpc_part, "unable to kmalloc " - "len=0x%016lx\n", buf_len); - status = SALRET_ERROR; + "len=0x%016lx\n", + (unsigned long)buf_len); + ret = xpNoMemory; break; } } @@ -109,17 +102,17 @@ xpc_get_rsvd_page_pa(int nasid) ret = xp_remote_memcpy((void *)buf, (void *)rp_pa, buf_len); if (ret != xpSuccess) { dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret); - status = SALRET_ERROR; break; } } kfree(buf_base); - if (status != SALRET_OK) + if (ret != xpSuccess) rp_pa = 0; - dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa); + dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", + (unsigned long)rp_pa); return rp_pa; } @@ -138,7 +131,7 @@ xpc_setup_rsvd_page(void) /* get the local reserved page's address */ preempt_disable(); - rp_pa = xpc_get_rsvd_page_pa(cpuid_to_nasid(smp_processor_id())); + rp_pa = xpc_get_rsvd_page_pa(xp_cpu_to_nasid(smp_processor_id())); preempt_enable(); if (rp_pa == 0) { dev_err(xpc_part, "SAL failed to locate the reserved page\n"); @@ -150,7 +143,7 @@ xpc_setup_rsvd_page(void) /* SAL_versions < 3 had a SAL_partid defined as a u8 */ rp->SAL_partid &= 0xff; } - BUG_ON(rp->SAL_partid != sn_partition_id); + BUG_ON(rp->SAL_partid != xp_partition_id); if (rp->SAL_partid < 0 || rp->SAL_partid >= xp_max_npartitions) { dev_err(xpc_part, "the reserved page's partid of %d is outside " @@ -237,11 +230,11 @@ xpc_get_remote_rp(int nasid, unsigned long *discovered_nasids, /* check that both remote and local partids are valid for each side */ if (remote_rp->SAL_partid < 0 || remote_rp->SAL_partid >= xp_max_npartitions || - remote_rp->max_npartitions <= sn_partition_id) { + remote_rp->max_npartitions <= xp_partition_id) { return xpInvalidPartid; } - if (remote_rp->SAL_partid == sn_partition_id) + if (remote_rp->SAL_partid == xp_partition_id) return xpLocalPartid; return xpSuccess; @@ -426,7 +419,7 @@ xpc_discovery(void) * protection is in regards to memory, IOI and IPI. */ max_regions = 64; - region_size = sn_region_size; + region_size = xp_region_size; switch (region_size) { case 128: diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c index 4b5f69edf0d2..fde870aebcb9 100644 --- a/drivers/misc/sgi-xp/xpc_sn2.c +++ b/drivers/misc/sgi-xp/xpc_sn2.c @@ -13,9 +13,9 @@ * */ -#include #include #include +#include #include #include "xpc.h" @@ -176,7 +176,7 @@ xpc_send_IRQ_sn2(struct amo *amo, u64 flag, int nasid, int phys_cpuid, local_irq_restore(irq_flags); - return ((ret == 0) ? xpSuccess : xpPioReadError); + return (ret == 0) ? xpSuccess : xpPioReadError; } static struct amo * @@ -284,7 +284,7 @@ xpc_handle_notify_IRQ_sn2(int irq, void *dev_id) short partid = (short)(u64)dev_id; struct xpc_partition *part = &xpc_partitions[partid]; - DBUG_ON(partid < 0 || partid >= xp_max_npartitions); + DBUG_ON(partid < 0 || partid >= XP_MAX_NPARTITIONS_SN2); if (xpc_part_ref(part)) { xpc_check_for_sent_chctl_flags_sn2(part); @@ -576,6 +576,25 @@ xpc_allow_amo_ops_shub_wars_1_1_sn2(void) } } +static enum xp_retval +xpc_get_partition_rsvd_page_pa_sn2(u64 buf, u64 *cookie, u64 *paddr, + size_t *len) +{ + s64 status; + enum xp_retval ret; + + status = sn_partition_reserved_page_pa(buf, cookie, paddr, len); + if (status == SALRET_OK) + ret = xpSuccess; + else if (status == SALRET_MORE_PASSES) + ret = xpNeedMoreInfo; + else + ret = xpSalError; + + return ret; +} + + static enum xp_retval xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp) { @@ -636,7 +655,7 @@ xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp) /* clear xpc_vars_part_sn2 */ memset((u64 *)xpc_vars_part_sn2, 0, sizeof(struct xpc_vars_part_sn2) * - xp_max_npartitions); + XP_MAX_NPARTITIONS_SN2); /* initialize the activate IRQ related amo variables */ for (i = 0; i < xpc_nasid_mask_nlongs; i++) @@ -699,7 +718,7 @@ xpc_check_remote_hb_sn2(void) remote_vars = (struct xpc_vars_sn2 *)xpc_remote_copy_buffer_sn2; - for (partid = 0; partid < xp_max_npartitions; partid++) { + for (partid = 0; partid < XP_MAX_NPARTITIONS_SN2; partid++) { if (xpc_exiting) break; @@ -2386,6 +2405,7 @@ xpc_init_sn2(void) int ret; size_t buf_size; + xpc_get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_sn2; xpc_rsvd_page_init = xpc_rsvd_page_init_sn2; xpc_increment_heartbeat = xpc_increment_heartbeat_sn2; xpc_offline_heartbeat = xpc_offline_heartbeat_sn2; diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index 2aec1dfbb3db..232867aa6929 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c @@ -14,11 +14,8 @@ */ #include - -/* !!! #include */ -/* !!! uv_gpa() is defined in */ -#define uv_gpa(_a) ((unsigned long)_a) - +#include +#include "../sgi-gru/grukservices.h" #include "xpc.h" static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV); diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c index 07c89c4e2c25..49385f441705 100644 --- a/drivers/misc/sgi-xp/xpnet.c +++ b/drivers/misc/sgi-xp/xpnet.c @@ -21,17 +21,8 @@ */ #include -#include -#include -#include #include #include -#include -#include -#include -#include -#include -#include #include "xp.h" /* @@ -175,8 +166,9 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg) return; } - dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", msg->buf_pa, msg->size, - msg->leadin_ignore, msg->tailout_ignore); + dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", + (unsigned long)msg->buf_pa, msg->size, msg->leadin_ignore, + msg->tailout_ignore); /* reserve an extra cache line */ skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES); @@ -320,8 +312,10 @@ xpnet_dev_open(struct net_device *dev) dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %ld, " "%ld)\n", XPC_NET_CHANNEL, xpnet_connection_activity, - XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, XPNET_MAX_KTHREADS, - XPNET_MAX_IDLE_KTHREADS); + (unsigned long)XPNET_MSG_SIZE, + (unsigned long)XPNET_MSG_NENTRIES, + (unsigned long)XPNET_MAX_KTHREADS, + (unsigned long)XPNET_MAX_IDLE_KTHREADS); ret = xpc_connect(XPC_NET_CHANNEL, xpnet_connection_activity, NULL, XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, @@ -439,8 +433,8 @@ xpnet_send(struct sk_buff *skb, struct xpnet_pending_msg *queued_msg, dev_dbg(xpnet, "sending XPC message to %d:%d\n" KERN_DEBUG "msg->buf_pa=0x%lx, msg->size=%u, " "msg->leadin_ignore=%u, msg->tailout_ignore=%u\n", - dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size, - msg->leadin_ignore, msg->tailout_ignore); + dest_partid, XPC_NET_CHANNEL, (unsigned long)msg->buf_pa, + msg->size, msg->leadin_ignore, msg->tailout_ignore); atomic_inc(&queued_msg->use_count); @@ -602,8 +596,8 @@ xpnet_init(void) */ xpnet_device->dev_addr[0] = 0x02; /* locally administered, no OUI */ - xpnet_device->dev_addr[XPNET_PARTID_OCTET + 1] = sn_partition_id; - xpnet_device->dev_addr[XPNET_PARTID_OCTET + 0] = (sn_partition_id >> 8); + xpnet_device->dev_addr[XPNET_PARTID_OCTET + 1] = xp_partition_id; + xpnet_device->dev_addr[XPNET_PARTID_OCTET + 0] = (xp_partition_id >> 8); /* * ether_setup() sets this to a multicast device. We are -- cgit v1.2.3 From a812dcc3a298eef650c381e094e2cf41a4ecc9ad Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Tue, 29 Jul 2008 22:34:16 -0700 Subject: sgi-xp: add usage of GRU driver by xpc_remote_memcpy() Add UV support to xpc_remote_memcpy(), which involves interfacing to the GRU driver. Signed-off-by: Dean Nelson Cc: Jack Steiner Cc: "Luck, Tony" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/misc/sgi-xp/xp.h | 8 +++-- drivers/misc/sgi-xp/xp_main.c | 6 +++- drivers/misc/sgi-xp/xp_sn2.c | 50 +++++++++++++-------------- drivers/misc/sgi-xp/xp_uv.c | 27 +++++++++++++-- drivers/misc/sgi-xp/xpc.h | 44 ++++++++++++------------ drivers/misc/sgi-xp/xpc_channel.c | 5 ++- drivers/misc/sgi-xp/xpc_main.c | 8 +++-- drivers/misc/sgi-xp/xpc_partition.c | 37 ++++++++++---------- drivers/misc/sgi-xp/xpc_sn2.c | 68 ++++++++++++++++++++----------------- drivers/misc/sgi-xp/xpc_uv.c | 2 +- drivers/misc/sgi-xp/xpnet.c | 26 ++++++-------- 11 files changed, 154 insertions(+), 127 deletions(-) (limited to 'drivers/misc/sgi-xp/xp.h') diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h index 01bf1a2cd8ed..45d0a08c2ddd 100644 --- a/drivers/misc/sgi-xp/xp.h +++ b/drivers/misc/sgi-xp/xp.h @@ -207,7 +207,9 @@ enum xp_retval { xpUnsupported, /* 56: unsupported functionality or resource */ xpNeedMoreInfo, /* 57: more info is needed by SAL */ - xpUnknownReason /* 58: unknown reason - must be last in enum */ + xpGruCopyError, /* 58: gru_copy_gru() returned error */ + + xpUnknownReason /* 59: unknown reason - must be last in enum */ }; /* @@ -349,7 +351,9 @@ extern short xp_max_npartitions; extern short xp_partition_id; extern u8 xp_region_size; -extern enum xp_retval (*xp_remote_memcpy) (void *, const void *, size_t); +extern unsigned long (*xp_pa) (void *); +extern enum xp_retval (*xp_remote_memcpy) (unsigned long, const unsigned long, + size_t); extern int (*xp_cpu_to_nasid) (int); extern u64 xp_nofault_PIOR_target; diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c index c34b23fe498f..f86ad3af26b7 100644 --- a/drivers/misc/sgi-xp/xp_main.c +++ b/drivers/misc/sgi-xp/xp_main.c @@ -41,7 +41,11 @@ EXPORT_SYMBOL_GPL(xp_partition_id); u8 xp_region_size; EXPORT_SYMBOL_GPL(xp_region_size); -enum xp_retval (*xp_remote_memcpy) (void *dst, const void *src, size_t len); +unsigned long (*xp_pa) (void *addr); +EXPORT_SYMBOL_GPL(xp_pa); + +enum xp_retval (*xp_remote_memcpy) (unsigned long dst_gpa, + const unsigned long src_gpa, size_t len); EXPORT_SYMBOL_GPL(xp_remote_memcpy); int (*xp_cpu_to_nasid) (int cpuid); diff --git a/drivers/misc/sgi-xp/xp_sn2.c b/drivers/misc/sgi-xp/xp_sn2.c index c6a1ede7d6e6..1440134caf31 100644 --- a/drivers/misc/sgi-xp/xp_sn2.c +++ b/drivers/misc/sgi-xp/xp_sn2.c @@ -63,7 +63,7 @@ xp_register_nofault_code_sn2(void) return xpSuccess; } -void +static void xp_unregister_nofault_code_sn2(void) { u64 func_addr = *(u64 *)xp_nofault_PIOR; @@ -74,45 +74,42 @@ xp_unregister_nofault_code_sn2(void) err_func_addr, 1, 0); } +/* + * Convert a virtual memory address to a physical memory address. + */ +static unsigned long +xp_pa_sn2(void *addr) +{ + return __pa(addr); +} + /* * Wrapper for bte_copy(). * - * vdst - virtual address of the destination of the transfer. - * psrc - physical address of the source of the transfer. + * dst_pa - physical address of the destination of the transfer. + * src_pa - physical address of the source of the transfer. * len - number of bytes to transfer from source to destination. * * Note: xp_remote_memcpy_sn2() should never be called while holding a spinlock. */ static enum xp_retval -xp_remote_memcpy_sn2(void *vdst, const void *psrc, size_t len) +xp_remote_memcpy_sn2(unsigned long dst_pa, const unsigned long src_pa, + size_t len) { bte_result_t ret; - u64 pdst = ia64_tpa(vdst); - /* ??? What are the rules governing the src and dst addresses passed in? - * ??? Currently we're assuming that dst is a virtual address and src - * ??? is a physical address, is this appropriate? Can we allow them to - * ??? be whatever and we make the change here without damaging the - * ??? addresses? - */ - /* - * Ensure that the physically mapped memory is contiguous. - * - * We do this by ensuring that the memory is from region 7 only. - * If the need should arise to use memory from one of the other - * regions, then modify the BUG_ON() statement to ensure that the - * memory from that region is always physically contiguous. - */ - BUG_ON(REGION_NUMBER(vdst) != RGN_KERNEL); - - ret = bte_copy((u64)psrc, pdst, len, (BTE_NOTIFY | BTE_WACQUIRE), NULL); + ret = bte_copy(src_pa, dst_pa, len, (BTE_NOTIFY | BTE_WACQUIRE), NULL); if (ret == BTE_SUCCESS) return xpSuccess; - if (is_shub2()) - dev_err(xp, "bte_copy() on shub2 failed, error=0x%x\n", ret); - else - dev_err(xp, "bte_copy() failed, error=%d\n", ret); + if (is_shub2()) { + dev_err(xp, "bte_copy() on shub2 failed, error=0x%x dst_pa=" + "0x%016lx src_pa=0x%016lx len=%ld\\n", ret, dst_pa, + src_pa, len); + } else { + dev_err(xp, "bte_copy() failed, error=%d dst_pa=0x%016lx " + "src_pa=0x%016lx len=%ld\\n", ret, dst_pa, src_pa, len); + } return xpBteCopyError; } @@ -132,6 +129,7 @@ xp_init_sn2(void) xp_partition_id = sn_partition_id; xp_region_size = sn_region_size; + xp_pa = xp_pa_sn2; xp_remote_memcpy = xp_remote_memcpy_sn2; xp_cpu_to_nasid = xp_cpu_to_nasid_sn2; diff --git a/drivers/misc/sgi-xp/xp_uv.c b/drivers/misc/sgi-xp/xp_uv.c index 382b1b6bcc0b..44f2c2b58c2f 100644 --- a/drivers/misc/sgi-xp/xp_uv.c +++ b/drivers/misc/sgi-xp/xp_uv.c @@ -13,13 +13,33 @@ * */ +#include +#include +#include "../sgi-gru/grukservices.h" #include "xp.h" +/* + * Convert a virtual memory address to a physical memory address. + */ +static unsigned long +xp_pa_uv(void *addr) +{ + return uv_gpa(addr); +} + static enum xp_retval -xp_remote_memcpy_uv(void *vdst, const void *psrc, size_t len) +xp_remote_memcpy_uv(unsigned long dst_gpa, const unsigned long src_gpa, + size_t len) { - /* !!! this function needs fleshing out */ - return xpUnsupported; + int ret; + + ret = gru_copy_gpa(dst_gpa, src_gpa, len); + if (ret == 0) + return xpSuccess; + + dev_err(xp, "gru_copy_gpa() failed, dst_gpa=0x%016lx src_gpa=0x%016lx " + "len=%ld\n", dst_gpa, src_gpa, len); + return xpGruCopyError; } enum xp_retval @@ -29,6 +49,7 @@ xp_init_uv(void) xp_max_npartitions = XP_MAX_NPARTITIONS_UV; + xp_pa = xp_pa_uv; xp_remote_memcpy = xp_remote_memcpy_uv; return xpSuccess; diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h index 96408fcf5a1e..49e26993345b 100644 --- a/drivers/misc/sgi-xp/xpc.h +++ b/drivers/misc/sgi-xp/xpc.h @@ -91,8 +91,8 @@ struct xpc_rsvd_page { u8 version; u8 pad1[3]; /* align to next u64 in 1st 64-byte cacheline */ union { - u64 vars_pa; /* physical address of struct xpc_vars */ - u64 activate_mq_gpa; /* global phys address of activate_mq */ + unsigned long vars_pa; /* phys address of struct xpc_vars */ + unsigned long activate_mq_gpa; /* gru phy addr of activate_mq */ } sn; unsigned long ts_jiffies; /* timestamp when rsvd pg was setup by XPC */ u64 pad2[10]; /* align to last u64 in 2nd 64-byte cacheline */ @@ -122,8 +122,8 @@ struct xpc_vars_sn2 { u64 heartbeat_offline; /* if 0, heartbeat should be changing */ int activate_IRQ_nasid; int activate_IRQ_phys_cpuid; - u64 vars_part_pa; - u64 amos_page_pa; /* paddr of page of amos from MSPEC driver */ + unsigned long vars_part_pa; + unsigned long amos_page_pa;/* paddr of page of amos from MSPEC driver */ struct amo *amos_page; /* vaddr of page of amos from MSPEC driver */ }; @@ -142,10 +142,10 @@ struct xpc_vars_sn2 { struct xpc_vars_part_sn2 { u64 magic; - u64 openclose_args_pa; /* physical address of open and close args */ - u64 GPs_pa; /* physical address of Get/Put values */ + unsigned long openclose_args_pa; /* phys addr of open and close args */ + unsigned long GPs_pa; /* physical address of Get/Put values */ - u64 chctl_amo_pa; /* physical address of chctl flags' amo */ + unsigned long chctl_amo_pa; /* physical address of chctl flags' amo */ int notify_IRQ_nasid; /* nasid of where to send notify IRQs */ int notify_IRQ_phys_cpuid; /* CPUID of where to send notify IRQs */ @@ -213,7 +213,7 @@ struct xpc_openclose_args { u16 msg_size; /* sizeof each message entry */ u16 remote_nentries; /* #of message entries in remote msg queue */ u16 local_nentries; /* #of message entries in local msg queue */ - u64 local_msgqueue_pa; /* physical address of local message queue */ + unsigned long local_msgqueue_pa; /* phys addr of local message queue */ }; #define XPC_OPENCLOSE_ARGS_SIZE \ @@ -366,8 +366,8 @@ struct xpc_channel { void *remote_msgqueue_base; /* base address of kmalloc'd space */ struct xpc_msg *remote_msgqueue; /* cached copy of remote partition's */ /* local message queue */ - u64 remote_msgqueue_pa; /* phys addr of remote partition's */ - /* local message queue */ + unsigned long remote_msgqueue_pa; /* phys addr of remote partition's */ + /* local message queue */ atomic_t references; /* #of external references to queues */ @@ -491,12 +491,12 @@ xpc_any_msg_chctl_flags_set(union xpc_channel_ctl_flags *chctl) */ struct xpc_partition_sn2 { - u64 remote_amos_page_pa; /* phys addr of partition's amos page */ + unsigned long remote_amos_page_pa; /* paddr of partition's amos page */ int activate_IRQ_nasid; /* active partition's act/deact nasid */ int activate_IRQ_phys_cpuid; /* active part's act/deact phys cpuid */ - u64 remote_vars_pa; /* phys addr of partition's vars */ - u64 remote_vars_part_pa; /* phys addr of partition's vars part */ + unsigned long remote_vars_pa; /* phys addr of partition's vars */ + unsigned long remote_vars_part_pa; /* paddr of partition's vars part */ u8 remote_vars_version; /* version# of partition's vars */ void *local_GPs_base; /* base address of kmalloc'd space */ @@ -504,10 +504,10 @@ struct xpc_partition_sn2 { void *remote_GPs_base; /* base address of kmalloc'd space */ struct xpc_gp_sn2 *remote_GPs; /* copy of remote partition's local */ /* Get/Put values */ - u64 remote_GPs_pa; /* phys address of remote partition's local */ - /* Get/Put values */ + unsigned long remote_GPs_pa; /* phys addr of remote partition's local */ + /* Get/Put values */ - u64 remote_openclose_args_pa; /* phys addr of remote's args */ + unsigned long remote_openclose_args_pa; /* phys addr of remote's args */ int notify_IRQ_nasid; /* nasid of where to send notify IRQs */ int notify_IRQ_phys_cpuid; /* CPUID of where to send notify IRQs */ @@ -529,7 +529,7 @@ struct xpc_partition { u8 remote_rp_version; /* version# of partition's rsvd pg */ unsigned long remote_rp_ts_jiffies; /* timestamp when rsvd pg setup */ - u64 remote_rp_pa; /* phys addr of partition's rsvd pg */ + unsigned long remote_rp_pa; /* phys addr of partition's rsvd pg */ u64 last_heartbeat; /* HB at last read */ u32 activate_IRQ_rcvd; /* IRQs since activation */ spinlock_t act_lock; /* protect updating of act_state */ @@ -623,7 +623,8 @@ extern void xpc_activate_partition(struct xpc_partition *); extern void xpc_activate_kthreads(struct xpc_channel *, int); extern void xpc_create_kthreads(struct xpc_channel *, int, int); extern void xpc_disconnect_wait(int); -extern enum xp_retval (*xpc_get_partition_rsvd_page_pa) (u64, u64 *, u64 *, +extern enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *, u64 *, + unsigned long *, size_t *); extern enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *); extern void (*xpc_heartbeat_init) (void); @@ -640,8 +641,8 @@ extern void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *); extern void (*xpc_process_msg_chctl_flags) (struct xpc_partition *, int); extern int (*xpc_n_of_deliverable_msgs) (struct xpc_channel *); extern struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *); -extern void (*xpc_request_partition_activation) (struct xpc_rsvd_page *, u64, - int); +extern void (*xpc_request_partition_activation) (struct xpc_rsvd_page *, + unsigned long, int); extern void (*xpc_request_partition_reactivation) (struct xpc_partition *); extern void (*xpc_request_partition_deactivation) (struct xpc_partition *); extern void (*xpc_cancel_partition_deactivation_request) ( @@ -690,7 +691,8 @@ extern enum xp_retval xpc_mark_partition_active(struct xpc_partition *); extern void xpc_mark_partition_inactive(struct xpc_partition *); extern void xpc_discovery(void); extern enum xp_retval xpc_get_remote_rp(int, unsigned long *, - struct xpc_rsvd_page *, u64 *); + struct xpc_rsvd_page *, + unsigned long *); extern void xpc_deactivate_partition(const int, struct xpc_partition *, enum xp_retval); extern enum xp_retval xpc_initiate_partid_to_nasids(short, void *); diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c index 0615efbe0070..d7a15f1a78a5 100644 --- a/drivers/misc/sgi-xp/xpc_channel.c +++ b/drivers/misc/sgi-xp/xpc_channel.c @@ -366,9 +366,8 @@ again: dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY (local_msgqueue_pa=" "0x%lx, local_nentries=%d, remote_nentries=%d) " "received from partid=%d, channel=%d\n", - (unsigned long)args->local_msgqueue_pa, - args->local_nentries, args->remote_nentries, - ch->partid, ch->number); + args->local_msgqueue_pa, args->local_nentries, + args->remote_nentries, ch->partid, ch->number); if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) { spin_unlock_irqrestore(&ch->lock, irq_flags); diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index f7478cc3572d..dc686110aef7 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c @@ -169,8 +169,9 @@ static struct notifier_block xpc_die_notifier = { .notifier_call = xpc_system_die, }; -enum xp_retval (*xpc_get_partition_rsvd_page_pa) (u64 buf, u64 *cookie, - u64 *paddr, size_t *len); +enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *buf, u64 *cookie, + unsigned long *rp_pa, + size_t *len); enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *rp); void (*xpc_heartbeat_init) (void); void (*xpc_heartbeat_exit) (void); @@ -189,7 +190,8 @@ int (*xpc_n_of_deliverable_msgs) (struct xpc_channel *ch); struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *ch); void (*xpc_request_partition_activation) (struct xpc_rsvd_page *remote_rp, - u64 remote_rp_pa, int nasid); + unsigned long remote_rp_pa, + int nasid); void (*xpc_request_partition_reactivation) (struct xpc_partition *part); void (*xpc_request_partition_deactivation) (struct xpc_partition *part); void (*xpc_cancel_partition_deactivation_request) (struct xpc_partition *part); diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c index f84d66410205..f150dbfcfcc7 100644 --- a/drivers/misc/sgi-xp/xpc_partition.c +++ b/drivers/misc/sgi-xp/xpc_partition.c @@ -60,15 +60,15 @@ xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) * Given a nasid, get the physical address of the partition's reserved page * for that nasid. This function returns 0 on any error. */ -static u64 +static unsigned long xpc_get_rsvd_page_pa(int nasid) { enum xp_retval ret; u64 cookie = 0; - u64 rp_pa = nasid; /* seed with nasid */ + unsigned long rp_pa = nasid; /* seed with nasid */ size_t len = 0; - u64 buf = buf; - u64 buf_len = 0; + size_t buf_len = 0; + void *buf = buf; void *buf_base = NULL; while (1) { @@ -78,7 +78,7 @@ xpc_get_rsvd_page_pa(int nasid) dev_dbg(xpc_part, "SAL returned with ret=%d, cookie=0x%016lx, " "address=0x%016lx, len=0x%016lx\n", ret, - (unsigned long)cookie, (unsigned long)rp_pa, len); + (unsigned long)cookie, rp_pa, len); if (ret != xpNeedMoreInfo) break; @@ -87,19 +87,17 @@ xpc_get_rsvd_page_pa(int nasid) if (L1_CACHE_ALIGN(len) > buf_len) { kfree(buf_base); buf_len = L1_CACHE_ALIGN(len); - buf = (u64)xpc_kmalloc_cacheline_aligned(buf_len, - GFP_KERNEL, - &buf_base); + buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL, + &buf_base); if (buf_base == NULL) { dev_err(xpc_part, "unable to kmalloc " - "len=0x%016lx\n", - (unsigned long)buf_len); + "len=0x%016lx\n", buf_len); ret = xpNoMemory; break; } } - ret = xp_remote_memcpy((void *)buf, (void *)rp_pa, buf_len); + ret = xp_remote_memcpy(xp_pa(buf), rp_pa, buf_len); if (ret != xpSuccess) { dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret); break; @@ -111,8 +109,7 @@ xpc_get_rsvd_page_pa(int nasid) if (ret != xpSuccess) rp_pa = 0; - dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", - (unsigned long)rp_pa); + dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa); return rp_pa; } @@ -125,7 +122,7 @@ struct xpc_rsvd_page * xpc_setup_rsvd_page(void) { struct xpc_rsvd_page *rp; - u64 rp_pa; + unsigned long rp_pa; unsigned long new_ts_jiffies; /* get the local reserved page's address */ @@ -193,7 +190,7 @@ xpc_setup_rsvd_page(void) */ enum xp_retval xpc_get_remote_rp(int nasid, unsigned long *discovered_nasids, - struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa) + struct xpc_rsvd_page *remote_rp, unsigned long *remote_rp_pa) { int l; enum xp_retval ret; @@ -205,7 +202,7 @@ xpc_get_remote_rp(int nasid, unsigned long *discovered_nasids, return xpNoRsvdPageAddr; /* pull over the reserved page header and part_nasids mask */ - ret = xp_remote_memcpy(remote_rp, (void *)*remote_rp_pa, + ret = xp_remote_memcpy(xp_pa(remote_rp), *remote_rp_pa, XPC_RP_HEADER_SIZE + xpc_nasid_mask_nbytes); if (ret != xpSuccess) return ret; @@ -389,7 +386,7 @@ xpc_discovery(void) { void *remote_rp_base; struct xpc_rsvd_page *remote_rp; - u64 remote_rp_pa; + unsigned long remote_rp_pa; int region; int region_size; int max_regions; @@ -500,7 +497,7 @@ enum xp_retval xpc_initiate_partid_to_nasids(short partid, void *nasid_mask) { struct xpc_partition *part; - u64 part_nasid_pa; + unsigned long part_nasid_pa; part = &xpc_partitions[partid]; if (part->remote_rp_pa == 0) @@ -508,8 +505,8 @@ xpc_initiate_partid_to_nasids(short partid, void *nasid_mask) memset(nasid_mask, 0, xpc_nasid_mask_nbytes); - part_nasid_pa = (u64)XPC_RP_PART_NASIDS(part->remote_rp_pa); + part_nasid_pa = (unsigned long)XPC_RP_PART_NASIDS(part->remote_rp_pa); - return xp_remote_memcpy(nasid_mask, (void *)part_nasid_pa, + return xp_remote_memcpy(xp_pa(nasid_mask), part_nasid_pa, xpc_nasid_mask_nbytes); } diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c index fde870aebcb9..1571a7cdf9d0 100644 --- a/drivers/misc/sgi-xp/xpc_sn2.c +++ b/drivers/misc/sgi-xp/xpc_sn2.c @@ -207,8 +207,8 @@ xpc_handle_activate_IRQ_sn2(int irq, void *dev_id) * Flag the appropriate amo variable and send an IRQ to the specified node. */ static void -xpc_send_activate_IRQ_sn2(u64 amos_page_pa, int from_nasid, int to_nasid, - int to_phys_cpuid) +xpc_send_activate_IRQ_sn2(unsigned long amos_page_pa, int from_nasid, + int to_nasid, int to_phys_cpuid) { struct amo *amos = (struct amo *)__va(amos_page_pa + (XPC_ACTIVATE_IRQ_AMOS_SN2 * @@ -404,7 +404,7 @@ xpc_send_chctl_openreply_sn2(struct xpc_channel *ch, unsigned long *irq_flags) args->remote_nentries = ch->remote_nentries; args->local_nentries = ch->local_nentries; - args->local_msgqueue_pa = __pa(ch->local_msgqueue); + args->local_msgqueue_pa = xp_pa(ch->local_msgqueue); XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENREPLY, irq_flags); } @@ -577,13 +577,13 @@ xpc_allow_amo_ops_shub_wars_1_1_sn2(void) } static enum xp_retval -xpc_get_partition_rsvd_page_pa_sn2(u64 buf, u64 *cookie, u64 *paddr, +xpc_get_partition_rsvd_page_pa_sn2(void *buf, u64 *cookie, unsigned long *rp_pa, size_t *len) { s64 status; enum xp_retval ret; - status = sn_partition_reserved_page_pa(buf, cookie, paddr, len); + status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len); if (status == SALRET_OK) ret = xpSuccess; else if (status == SALRET_MORE_PASSES) @@ -604,7 +604,7 @@ xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp) xpc_vars_sn2 = XPC_RP_VARS(rp); - rp->sn.vars_pa = __pa(xpc_vars_sn2); + rp->sn.vars_pa = xp_pa(xpc_vars_sn2); /* vars_part array follows immediately after vars */ xpc_vars_part_sn2 = (struct xpc_vars_part_sn2 *)((u8 *)XPC_RP_VARS(rp) + @@ -649,7 +649,7 @@ xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp) xpc_vars_sn2->version = XPC_V_VERSION; xpc_vars_sn2->activate_IRQ_nasid = cpuid_to_nasid(0); xpc_vars_sn2->activate_IRQ_phys_cpuid = cpu_physical_id(0); - xpc_vars_sn2->vars_part_pa = __pa(xpc_vars_part_sn2); + xpc_vars_sn2->vars_part_pa = xp_pa(xpc_vars_part_sn2); xpc_vars_sn2->amos_page_pa = ia64_tpa((u64)amos_page); xpc_vars_sn2->amos_page = amos_page; /* save for next load of XPC */ @@ -734,8 +734,8 @@ xpc_check_remote_hb_sn2(void) } /* pull the remote_hb cache line */ - ret = xp_remote_memcpy(remote_vars, - (void *)part->sn.sn2.remote_vars_pa, + ret = xp_remote_memcpy(xp_pa(remote_vars), + part->sn.sn2.remote_vars_pa, XPC_RP_VARS_SIZE); if (ret != xpSuccess) { XPC_DEACTIVATE_PARTITION(part, ret); @@ -768,7 +768,8 @@ xpc_check_remote_hb_sn2(void) * assumed to be of size XPC_RP_VARS_SIZE. */ static enum xp_retval -xpc_get_remote_vars_sn2(u64 remote_vars_pa, struct xpc_vars_sn2 *remote_vars) +xpc_get_remote_vars_sn2(unsigned long remote_vars_pa, + struct xpc_vars_sn2 *remote_vars) { enum xp_retval ret; @@ -776,7 +777,7 @@ xpc_get_remote_vars_sn2(u64 remote_vars_pa, struct xpc_vars_sn2 *remote_vars) return xpVarsNotSet; /* pull over the cross partition variables */ - ret = xp_remote_memcpy(remote_vars, (void *)remote_vars_pa, + ret = xp_remote_memcpy(xp_pa(remote_vars), remote_vars_pa, XPC_RP_VARS_SIZE); if (ret != xpSuccess) return ret; @@ -791,7 +792,7 @@ xpc_get_remote_vars_sn2(u64 remote_vars_pa, struct xpc_vars_sn2 *remote_vars) static void xpc_request_partition_activation_sn2(struct xpc_rsvd_page *remote_rp, - u64 remote_rp_pa, int nasid) + unsigned long remote_rp_pa, int nasid) { xpc_send_local_activate_IRQ_sn2(nasid); } @@ -883,7 +884,8 @@ xpc_partition_deactivation_requested_sn2(short partid) static void xpc_update_partition_info_sn2(struct xpc_partition *part, u8 remote_rp_version, unsigned long *remote_rp_ts_jiffies, - u64 remote_rp_pa, u64 remote_vars_pa, + unsigned long remote_rp_pa, + unsigned long remote_vars_pa, struct xpc_vars_sn2 *remote_vars) { struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; @@ -948,8 +950,8 @@ xpc_identify_activate_IRQ_req_sn2(int nasid) { struct xpc_rsvd_page *remote_rp; struct xpc_vars_sn2 *remote_vars; - u64 remote_rp_pa; - u64 remote_vars_pa; + unsigned long remote_rp_pa; + unsigned long remote_vars_pa; int remote_rp_version; int reactivate = 0; unsigned long remote_rp_ts_jiffies = 0; @@ -1291,11 +1293,11 @@ xpc_setup_infrastructure_sn2(struct xpc_partition *part) * The setting of the magic # indicates that these per partition * specific variables are ready to be used. */ - xpc_vars_part_sn2[partid].GPs_pa = __pa(part_sn2->local_GPs); + xpc_vars_part_sn2[partid].GPs_pa = xp_pa(part_sn2->local_GPs); xpc_vars_part_sn2[partid].openclose_args_pa = - __pa(part->local_openclose_args); + xp_pa(part->local_openclose_args); xpc_vars_part_sn2[partid].chctl_amo_pa = - __pa(part_sn2->local_chctl_amo_va); + xp_pa(part_sn2->local_chctl_amo_va); cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */ xpc_vars_part_sn2[partid].notify_IRQ_nasid = cpuid_to_nasid(cpuid); xpc_vars_part_sn2[partid].notify_IRQ_phys_cpuid = @@ -1382,25 +1384,25 @@ xpc_teardown_infrastructure_sn2(struct xpc_partition *part) * Create a wrapper that hides the underlying mechanism for pulling a cacheline * (or multiple cachelines) from a remote partition. * - * src must be a cacheline aligned physical address on the remote partition. + * src_pa must be a cacheline aligned physical address on the remote partition. * dst must be a cacheline aligned virtual address on this partition. * cnt must be cacheline sized */ /* ??? Replace this function by call to xp_remote_memcpy() or bte_copy()? */ static enum xp_retval xpc_pull_remote_cachelines_sn2(struct xpc_partition *part, void *dst, - const void *src, size_t cnt) + const unsigned long src_pa, size_t cnt) { enum xp_retval ret; - DBUG_ON((u64)src != L1_CACHE_ALIGN((u64)src)); - DBUG_ON((u64)dst != L1_CACHE_ALIGN((u64)dst)); + DBUG_ON(src_pa != L1_CACHE_ALIGN(src_pa)); + DBUG_ON((unsigned long)dst != L1_CACHE_ALIGN((unsigned long)dst)); DBUG_ON(cnt != L1_CACHE_ALIGN(cnt)); if (part->act_state == XPC_P_DEACTIVATING) return part->reason; - ret = xp_remote_memcpy(dst, src, cnt); + ret = xp_remote_memcpy(xp_pa(dst), src_pa, cnt); if (ret != xpSuccess) { dev_dbg(xpc_chan, "xp_remote_memcpy() from partition %d failed," " ret=%d\n", XPC_PARTID(part), ret); @@ -1420,7 +1422,8 @@ xpc_pull_remote_vars_part_sn2(struct xpc_partition *part) struct xpc_vars_part_sn2 *pulled_entry_cacheline = (struct xpc_vars_part_sn2 *)L1_CACHE_ALIGN((u64)buffer); struct xpc_vars_part_sn2 *pulled_entry; - u64 remote_entry_cacheline_pa, remote_entry_pa; + unsigned long remote_entry_cacheline_pa; + unsigned long remote_entry_pa; short partid = XPC_PARTID(part); enum xp_retval ret; @@ -1440,7 +1443,7 @@ xpc_pull_remote_vars_part_sn2(struct xpc_partition *part) (L1_CACHE_BYTES - 1))); ret = xpc_pull_remote_cachelines_sn2(part, pulled_entry_cacheline, - (void *)remote_entry_cacheline_pa, + remote_entry_cacheline_pa, L1_CACHE_BYTES); if (ret != xpSuccess) { dev_dbg(xpc_chan, "failed to pull XPC vars_part from " @@ -1587,7 +1590,7 @@ xpc_get_chctl_all_flags_sn2(struct xpc_partition *part) if (xpc_any_openclose_chctl_flags_set(&chctl)) { ret = xpc_pull_remote_cachelines_sn2(part, part-> remote_openclose_args, - (void *)part_sn2-> + part_sn2-> remote_openclose_args_pa, XPC_OPENCLOSE_ARGS_SIZE); if (ret != xpSuccess) { @@ -1604,7 +1607,7 @@ xpc_get_chctl_all_flags_sn2(struct xpc_partition *part) if (xpc_any_msg_chctl_flags_set(&chctl)) { ret = xpc_pull_remote_cachelines_sn2(part, part_sn2->remote_GPs, - (void *)part_sn2->remote_GPs_pa, + part_sn2->remote_GPs_pa, XPC_GP_SIZE); if (ret != xpSuccess) { XPC_DEACTIVATE_PARTITION(part, ret); @@ -1971,8 +1974,10 @@ xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get) { struct xpc_partition *part = &xpc_partitions[ch->partid]; struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; - struct xpc_msg *remote_msg, *msg; - u32 msg_index, nmsgs; + unsigned long remote_msg_pa; + struct xpc_msg *msg; + u32 msg_index; + u32 nmsgs; u64 msg_offset; enum xp_retval ret; @@ -1996,10 +2001,9 @@ xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get) msg_offset = msg_index * ch->msg_size; msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset); - remote_msg = (struct xpc_msg *)(ch->remote_msgqueue_pa + - msg_offset); + remote_msg_pa = ch->remote_msgqueue_pa + msg_offset; - ret = xpc_pull_remote_cachelines_sn2(part, msg, remote_msg, + ret = xpc_pull_remote_cachelines_sn2(part, msg, remote_msg_pa, nmsgs * ch->msg_size); if (ret != xpSuccess) { diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index 232867aa6929..c2d4ddd6e955 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c @@ -61,7 +61,7 @@ xpc_heartbeat_exit_uv(void) static void xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp, - u64 remote_rp_pa, int nasid) + unsigned long remote_rp_pa, int nasid) { short partid = remote_rp->SAL_partid; struct xpc_partition *part = &xpc_partitions[partid]; diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c index 49385f441705..4f5d62230116 100644 --- a/drivers/misc/sgi-xp/xpnet.c +++ b/drivers/misc/sgi-xp/xpnet.c @@ -44,7 +44,7 @@ struct xpnet_message { u16 version; /* Version for this message */ u16 embedded_bytes; /* #of bytes embedded in XPC message */ u32 magic; /* Special number indicating this is xpnet */ - u64 buf_pa; /* phys address of buffer to retrieve */ + unsigned long buf_pa; /* phys address of buffer to retrieve */ u32 size; /* #of bytes in buffer */ u8 leadin_ignore; /* #of bytes to ignore at the beginning */ u8 tailout_ignore; /* #of bytes to ignore at the end */ @@ -152,6 +152,7 @@ static void xpnet_receive(short partid, int channel, struct xpnet_message *msg) { struct sk_buff *skb; + void *dst; enum xp_retval ret; struct xpnet_dev_private *priv = (struct xpnet_dev_private *)xpnet_device->priv; @@ -166,9 +167,8 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg) return; } - dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", - (unsigned long)msg->buf_pa, msg->size, msg->leadin_ignore, - msg->tailout_ignore); + dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", msg->buf_pa, msg->size, + msg->leadin_ignore, msg->tailout_ignore); /* reserve an extra cache line */ skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES); @@ -210,15 +210,12 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg) skb_copy_to_linear_data(skb, &msg->data, (size_t)msg->embedded_bytes); } else { + dst = (void *)((u64)skb->data & ~(L1_CACHE_BYTES - 1)); dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t" - "xp_remote_memcpy(0x%p, 0x%p, %hu)\n", (void *) - ((u64)skb->data & ~(L1_CACHE_BYTES - 1)), + "xp_remote_memcpy(0x%p, 0x%p, %hu)\n", dst, (void *)msg->buf_pa, msg->size); - ret = xp_remote_memcpy((void *)((u64)skb->data & - ~(L1_CACHE_BYTES - 1)), - (void *)msg->buf_pa, msg->size); - + ret = xp_remote_memcpy(xp_pa(dst), msg->buf_pa, msg->size); if (ret != xpSuccess) { /* * !!! Need better way of cleaning skb. Currently skb @@ -226,8 +223,7 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg) * !!! dev_kfree_skb. */ dev_err(xpnet, "xp_remote_memcpy(0x%p, 0x%p, 0x%hx) " - "returned error=0x%x\n", (void *) - ((u64)skb->data & ~(L1_CACHE_BYTES - 1)), + "returned error=0x%x\n", dst, (void *)msg->buf_pa, msg->size, ret); xpc_received(partid, channel, (void *)msg); @@ -428,13 +424,13 @@ xpnet_send(struct sk_buff *skb, struct xpnet_pending_msg *queued_msg, msg->size = end_addr - start_addr; msg->leadin_ignore = (u64)skb->data - start_addr; msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb); - msg->buf_pa = __pa(start_addr); + msg->buf_pa = xp_pa((void *)start_addr); dev_dbg(xpnet, "sending XPC message to %d:%d\n" KERN_DEBUG "msg->buf_pa=0x%lx, msg->size=%u, " "msg->leadin_ignore=%u, msg->tailout_ignore=%u\n", - dest_partid, XPC_NET_CHANNEL, (unsigned long)msg->buf_pa, - msg->size, msg->leadin_ignore, msg->tailout_ignore); + dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size, + msg->leadin_ignore, msg->tailout_ignore); atomic_inc(&queued_msg->use_count); -- cgit v1.2.3 From 5b8669dfd110a62a74eea525a009342f73987ea0 Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Tue, 29 Jul 2008 22:34:18 -0700 Subject: sgi-xp: setup the activate GRU message queue Setup the activate GRU message queue that is used for partition activation and channel connection on UV systems. Signed-off-by: Dean Nelson Cc: Jack Steiner Cc: "Luck, Tony" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/misc/sgi-xp/xp.h | 3 +- drivers/misc/sgi-xp/xp_uv.c | 10 + drivers/misc/sgi-xp/xpc.h | 158 ++++++-- drivers/misc/sgi-xp/xpc_channel.c | 22 +- drivers/misc/sgi-xp/xpc_main.c | 329 +++++++++++---- drivers/misc/sgi-xp/xpc_partition.c | 28 +- drivers/misc/sgi-xp/xpc_sn2.c | 387 +++++++----------- drivers/misc/sgi-xp/xpc_uv.c | 781 ++++++++++++++++++++++++++++++++++-- 8 files changed, 1328 insertions(+), 390 deletions(-) (limited to 'drivers/misc/sgi-xp/xp.h') diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h index 45d0a08c2ddd..9ac5758f4d08 100644 --- a/drivers/misc/sgi-xp/xp.h +++ b/drivers/misc/sgi-xp/xp.h @@ -208,8 +208,9 @@ enum xp_retval { xpNeedMoreInfo, /* 57: more info is needed by SAL */ xpGruCopyError, /* 58: gru_copy_gru() returned error */ + xpGruSendMqError, /* 59: gru send message queue related error */ - xpUnknownReason /* 59: unknown reason - must be last in enum */ + xpUnknownReason /* 60: unknown reason - must be last in enum */ }; /* diff --git a/drivers/misc/sgi-xp/xp_uv.c b/drivers/misc/sgi-xp/xp_uv.c index 44f2c2b58c2f..d9f7ce2510bc 100644 --- a/drivers/misc/sgi-xp/xp_uv.c +++ b/drivers/misc/sgi-xp/xp_uv.c @@ -42,15 +42,25 @@ xp_remote_memcpy_uv(unsigned long dst_gpa, const unsigned long src_gpa, return xpGruCopyError; } +static int +xp_cpu_to_nasid_uv(int cpuid) +{ + /* ??? Is this same as sn2 nasid in mach/part bitmaps set up by SAL? */ + return UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpuid)); +} + enum xp_retval xp_init_uv(void) { BUG_ON(!is_uv()); xp_max_npartitions = XP_MAX_NPARTITIONS_UV; + xp_partition_id = 0; /* !!! not correct value */ + xp_region_size = 0; /* !!! not correct value */ xp_pa = xp_pa_uv; xp_remote_memcpy = xp_remote_memcpy_uv; + xp_cpu_to_nasid = xp_cpu_to_nasid_uv; return xpSuccess; } diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h index 1e48f7765050..4c26181defff 100644 --- a/drivers/misc/sgi-xp/xpc.h +++ b/drivers/misc/sgi-xp/xpc.h @@ -164,8 +164,8 @@ struct xpc_vars_part_sn2 { * MAGIC2 indicates that this partition has pulled the remote partititions * per partition variables that pertain to this partition. */ -#define XPC_VP_MAGIC1 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */ -#define XPC_VP_MAGIC2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */ +#define XPC_VP_MAGIC1_SN2 0x0053524156435058L /* 'XPCVARS\0'L (little endian) */ +#define XPC_VP_MAGIC2_SN2 0x0073726176435058L /* 'XPCvars\0'L (little endian) */ /* the reserved page sizes and offsets */ @@ -180,6 +180,80 @@ struct xpc_vars_part_sn2 { (XPC_RP_MACH_NASIDS(_rp) + \ xpc_nasid_mask_nlongs)) +/* + * The activate_mq is used to send/receive messages that affect XPC's heartbeat, + * partition active state, and channel state. This is UV only. + */ +struct xpc_activate_mq_msghdr_uv { + short partid; /* sender's partid */ + u8 act_state; /* sender's act_state at time msg sent */ + u8 type; /* message's type */ + unsigned long rp_ts_jiffies; /* timestamp of sender's rp setup by XPC */ +}; + +/* activate_mq defined message types */ +#define XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV 0 +#define XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV 1 +#define XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV 2 +#define XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV 3 + +#define XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV 4 +#define XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV 5 + +#define XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV 6 +#define XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV 7 +#define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV 8 +#define XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV 9 + +#define XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV 10 +#define XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV 11 + +struct xpc_activate_mq_msg_uv { + struct xpc_activate_mq_msghdr_uv header; +}; + +struct xpc_activate_mq_msg_heartbeat_req_uv { + struct xpc_activate_mq_msghdr_uv header; + u64 heartbeat; +}; + +struct xpc_activate_mq_msg_activate_req_uv { + struct xpc_activate_mq_msghdr_uv header; + unsigned long rp_gpa; + unsigned long activate_mq_gpa; +}; + +struct xpc_activate_mq_msg_deactivate_req_uv { + struct xpc_activate_mq_msghdr_uv header; + enum xp_retval reason; +}; + +struct xpc_activate_mq_msg_chctl_closerequest_uv { + struct xpc_activate_mq_msghdr_uv header; + short ch_number; + enum xp_retval reason; +}; + +struct xpc_activate_mq_msg_chctl_closereply_uv { + struct xpc_activate_mq_msghdr_uv header; + short ch_number; +}; + +struct xpc_activate_mq_msg_chctl_openrequest_uv { + struct xpc_activate_mq_msghdr_uv header; + short ch_number; + short msg_size; /* size of notify_mq's messages */ + short local_nentries; /* ??? Is this needed? What is? */ +}; + +struct xpc_activate_mq_msg_chctl_openreply_uv { + struct xpc_activate_mq_msghdr_uv header; + short ch_number; + short remote_nentries; /* ??? Is this needed? What is? */ + short local_nentries; /* ??? Is this needed? What is? */ + unsigned long local_notify_mq_gpa; +}; + /* * Functions registered by add_timer() or called by kernel_thread() only * allow for a single 64-bit argument. The following macros can be used to @@ -331,6 +405,18 @@ struct xpc_notify { */ struct xpc_channel_sn2 { + struct xpc_openclose_args *local_openclose_args; /* args passed on */ + /* opening or closing of channel */ + + void *local_msgqueue_base; /* base address of kmalloc'd space */ + struct xpc_msg *local_msgqueue; /* local message queue */ + void *remote_msgqueue_base; /* base address of kmalloc'd space */ + struct xpc_msg *remote_msgqueue; /* cached copy of remote partition's */ + /* local message queue */ + unsigned long remote_msgqueue_pa; /* phys addr of remote partition's */ + /* local message queue */ + + struct xpc_notify *notify_queue; /* notify queue for messages sent */ /* various flavors of local and remote Get/Put values */ @@ -344,13 +430,14 @@ struct xpc_channel_sn2 { }; struct xpc_channel_uv { - /* !!! code is coming */ + unsigned long remote_notify_mq_gpa; /* gru phys address of remote */ + /* partition's notify mq */ }; struct xpc_channel { short partid; /* ID of remote partition connected */ spinlock_t lock; /* lock for updating this structure */ - u32 flags; /* general flags */ + unsigned int flags; /* general flags */ enum xp_retval reason; /* reason why channel is disconnect'g */ int reason_line; /* line# disconnect initiated from */ @@ -361,14 +448,6 @@ struct xpc_channel { u16 local_nentries; /* #of msg entries in local msg queue */ u16 remote_nentries; /* #of msg entries in remote msg queue */ - void *local_msgqueue_base; /* base address of kmalloc'd space */ - struct xpc_msg *local_msgqueue; /* local message queue */ - void *remote_msgqueue_base; /* base address of kmalloc'd space */ - struct xpc_msg *remote_msgqueue; /* cached copy of remote partition's */ - /* local message queue */ - unsigned long remote_msgqueue_pa; /* phys addr of remote partition's */ - /* local message queue */ - atomic_t references; /* #of external references to queues */ atomic_t n_on_msg_allocate_wq; /* #on msg allocation wait queue */ @@ -377,19 +456,13 @@ struct xpc_channel { u8 delayed_chctl_flags; /* chctl flags received, but delayed */ /* action until channel disconnected */ - /* queue of msg senders who want to be notified when msg received */ - atomic_t n_to_notify; /* #of msg senders to notify */ - struct xpc_notify *notify_queue; /* notify queue for messages sent */ xpc_channel_func func; /* user's channel function */ void *key; /* pointer to user's key */ struct completion wdisconnect_wait; /* wait for channel disconnect */ - struct xpc_openclose_args *local_openclose_args; /* args passed on */ - /* opening or closing of channel */ - /* kthread management related fields */ atomic_t kthreads_assigned; /* #of kthreads assigned to channel */ @@ -507,6 +580,8 @@ struct xpc_partition_sn2 { unsigned long remote_GPs_pa; /* phys addr of remote partition's local */ /* Get/Put values */ + void *local_openclose_args_base; /* base address of kmalloc'd space */ + struct xpc_openclose_args *local_openclose_args; /* local's args */ unsigned long remote_openclose_args_pa; /* phys addr of remote's args */ int notify_IRQ_nasid; /* nasid of where to send notify IRQs */ @@ -520,9 +595,27 @@ struct xpc_partition_sn2 { }; struct xpc_partition_uv { - /* !!! code is coming */ + unsigned long remote_activate_mq_gpa; /* gru phys address of remote */ + /* partition's activate mq */ + spinlock_t flags_lock; /* protect updating of flags */ + unsigned int flags; /* general flags */ + u8 remote_act_state; /* remote partition's act_state */ + u8 act_state_req; /* act_state request from remote partition */ + enum xp_retval reason; /* reason for deactivate act_state request */ + u64 heartbeat; /* incremented by remote partition */ }; +/* struct xpc_partition_uv flags */ + +#define XPC_P_HEARTBEAT_OFFLINE_UV 0x00000001 +#define XPC_P_ENGAGED_UV 0x00000002 + +/* struct xpc_partition_uv act_state change requests */ + +#define XPC_P_ASR_ACTIVATE_UV 0x01 +#define XPC_P_ASR_REACTIVATE_UV 0x02 +#define XPC_P_ASR_DEACTIVATE_UV 0x03 + struct xpc_partition { /* XPC HB infrastructure */ @@ -556,8 +649,6 @@ struct xpc_partition { union xpc_channel_ctl_flags chctl; /* chctl flags yet to be processed */ spinlock_t chctl_lock; /* chctl flags lock */ - void *local_openclose_args_base; /* base address of kmalloc'd space */ - struct xpc_openclose_args *local_openclose_args; /* local's args */ void *remote_openclose_args_base; /* base address of kmalloc'd space */ struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */ /* args */ @@ -616,17 +707,20 @@ extern struct device *xpc_part; extern struct device *xpc_chan; extern int xpc_disengage_timelimit; extern int xpc_disengage_timedout; -extern atomic_t xpc_activate_IRQ_rcvd; +extern int xpc_activate_IRQ_rcvd; +extern spinlock_t xpc_activate_IRQ_rcvd_lock; extern wait_queue_head_t xpc_activate_IRQ_wq; extern void *xpc_heartbeating_to_mask; +extern void *xpc_kzalloc_cacheline_aligned(size_t, gfp_t, void **); extern void xpc_activate_partition(struct xpc_partition *); extern void xpc_activate_kthreads(struct xpc_channel *, int); extern void xpc_create_kthreads(struct xpc_channel *, int, int); extern void xpc_disconnect_wait(int); +extern int (*xpc_setup_partitions_sn) (void); extern enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *, u64 *, unsigned long *, size_t *); -extern enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *); +extern int (*xpc_setup_rsvd_page_sn) (struct xpc_rsvd_page *); extern void (*xpc_heartbeat_init) (void); extern void (*xpc_heartbeat_exit) (void); extern void (*xpc_increment_heartbeat) (void); @@ -635,8 +729,8 @@ extern void (*xpc_online_heartbeat) (void); extern enum xp_retval (*xpc_get_remote_heartbeat) (struct xpc_partition *); extern enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *); extern u64 (*xpc_get_chctl_all_flags) (struct xpc_partition *); -extern enum xp_retval (*xpc_allocate_msgqueues) (struct xpc_channel *); -extern void (*xpc_free_msgqueues) (struct xpc_channel *); +extern enum xp_retval (*xpc_setup_msg_structures) (struct xpc_channel *); +extern void (*xpc_teardown_msg_structures) (struct xpc_channel *); extern void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *); extern void (*xpc_process_msg_chctl_flags) (struct xpc_partition *, int); extern int (*xpc_n_of_deliverable_msgs) (struct xpc_channel *); @@ -647,9 +741,9 @@ extern void (*xpc_request_partition_reactivation) (struct xpc_partition *); extern void (*xpc_request_partition_deactivation) (struct xpc_partition *); extern void (*xpc_cancel_partition_deactivation_request) ( struct xpc_partition *); -extern void (*xpc_process_activate_IRQ_rcvd) (int); -extern enum xp_retval (*xpc_setup_infrastructure) (struct xpc_partition *); -extern void (*xpc_teardown_infrastructure) (struct xpc_partition *); +extern void (*xpc_process_activate_IRQ_rcvd) (void); +extern enum xp_retval (*xpc_setup_ch_structures_sn) (struct xpc_partition *); +extern void (*xpc_teardown_ch_structures_sn) (struct xpc_partition *); extern void (*xpc_indicate_partition_engaged) (struct xpc_partition *); extern int (*xpc_partition_engaged) (short); @@ -665,6 +759,9 @@ extern void (*xpc_send_chctl_openrequest) (struct xpc_channel *, unsigned long *); extern void (*xpc_send_chctl_openreply) (struct xpc_channel *, unsigned long *); +extern void (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *, + unsigned long); + extern enum xp_retval (*xpc_send_msg) (struct xpc_channel *, u32, void *, u16, u8, xpc_notify_func, void *); extern void (*xpc_received_msg) (struct xpc_channel *, struct xpc_msg *); @@ -674,7 +771,7 @@ extern int xpc_init_sn2(void); extern void xpc_exit_sn2(void); /* found in xpc_uv.c */ -extern void xpc_init_uv(void); +extern int xpc_init_uv(void); extern void xpc_exit_uv(void); /* found in xpc_partition.c */ @@ -684,7 +781,8 @@ extern struct xpc_rsvd_page *xpc_rsvd_page; extern unsigned long *xpc_mach_nasids; extern struct xpc_partition *xpc_partitions; extern void *xpc_kmalloc_cacheline_aligned(size_t, gfp_t, void **); -extern struct xpc_rsvd_page *xpc_setup_rsvd_page(void); +extern int xpc_setup_rsvd_page(void); +extern void xpc_teardown_rsvd_page(void); extern int xpc_identify_activate_IRQ_sender(void); extern int xpc_partition_disengaged(struct xpc_partition *); extern enum xp_retval xpc_mark_partition_active(struct xpc_partition *); diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c index 17ab75d69e83..73df9fb5ee66 100644 --- a/drivers/misc/sgi-xp/xpc_channel.c +++ b/drivers/misc/sgi-xp/xpc_channel.c @@ -39,7 +39,7 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) if (!(ch->flags & XPC_C_SETUP)) { spin_unlock_irqrestore(&ch->lock, *irq_flags); - ret = xpc_allocate_msgqueues(ch); + ret = xpc_setup_msg_structures(ch); spin_lock_irqsave(&ch->lock, *irq_flags); if (ret != xpSuccess) @@ -62,8 +62,6 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) if (!(ch->flags & XPC_C_ROPENREPLY)) return; - DBUG_ON(ch->remote_msgqueue_pa == 0); - ch->flags = (XPC_C_CONNECTED | XPC_C_SETUP); /* clear all else */ dev_info(xpc_chan, "channel %d to partition %d connected\n", @@ -134,13 +132,23 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) spin_lock_irqsave(&ch->lock, *irq_flags); } + DBUG_ON(atomic_read(&ch->n_to_notify) != 0); + /* it's now safe to free the channel's message queues */ - xpc_free_msgqueues(ch); + xpc_teardown_msg_structures(ch); + + ch->func = NULL; + ch->key = NULL; + ch->msg_size = 0; + ch->local_nentries = 0; + ch->remote_nentries = 0; + ch->kthreads_assigned_limit = 0; + ch->kthreads_idle_limit = 0; /* * Mark the channel disconnected and clear all other flags, including - * XPC_C_SETUP (because of call to xpc_free_msgqueues()) but not - * including XPC_C_WDISCONNECT (if it was set). + * XPC_C_SETUP (because of call to xpc_teardown_msg_structures()) but + * not including XPC_C_WDISCONNECT (if it was set). */ ch->flags = (XPC_C_DISCONNECTED | (ch->flags & XPC_C_WDISCONNECT)); @@ -395,7 +403,7 @@ again: DBUG_ON(args->remote_nentries == 0); ch->flags |= XPC_C_ROPENREPLY; - ch->remote_msgqueue_pa = args->local_msgqueue_pa; + xpc_save_remote_msgqueue_pa(ch, args->local_msgqueue_pa); if (args->local_nentries < ch->remote_nentries) { dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY: new " diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index b303c130bba8..13ec47928994 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c @@ -141,8 +141,9 @@ static struct ctl_table_header *xpc_sysctl; /* non-zero if any remote partition disengage was timed out */ int xpc_disengage_timedout; -/* #of activate IRQs received */ -atomic_t xpc_activate_IRQ_rcvd = ATOMIC_INIT(0); +/* #of activate IRQs received and not yet processed */ +int xpc_activate_IRQ_rcvd; +DEFINE_SPINLOCK(xpc_activate_IRQ_rcvd_lock); /* IRQ handler notifies this wait queue on receipt of an IRQ */ DECLARE_WAIT_QUEUE_HEAD(xpc_activate_IRQ_wq); @@ -169,10 +170,11 @@ static struct notifier_block xpc_die_notifier = { .notifier_call = xpc_system_die, }; +int (*xpc_setup_partitions_sn) (void); enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *buf, u64 *cookie, unsigned long *rp_pa, size_t *len); -enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *rp); +int (*xpc_setup_rsvd_page_sn) (struct xpc_rsvd_page *rp); void (*xpc_heartbeat_init) (void); void (*xpc_heartbeat_exit) (void); void (*xpc_increment_heartbeat) (void); @@ -183,8 +185,8 @@ enum xp_retval (*xpc_get_remote_heartbeat) (struct xpc_partition *part); enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *part); void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *ch); u64 (*xpc_get_chctl_all_flags) (struct xpc_partition *part); -enum xp_retval (*xpc_allocate_msgqueues) (struct xpc_channel *ch); -void (*xpc_free_msgqueues) (struct xpc_channel *ch); +enum xp_retval (*xpc_setup_msg_structures) (struct xpc_channel *ch); +void (*xpc_teardown_msg_structures) (struct xpc_channel *ch); void (*xpc_process_msg_chctl_flags) (struct xpc_partition *part, int ch_number); int (*xpc_n_of_deliverable_msgs) (struct xpc_channel *ch); struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *ch); @@ -196,9 +198,9 @@ void (*xpc_request_partition_reactivation) (struct xpc_partition *part); void (*xpc_request_partition_deactivation) (struct xpc_partition *part); void (*xpc_cancel_partition_deactivation_request) (struct xpc_partition *part); -void (*xpc_process_activate_IRQ_rcvd) (int n_IRQs_expected); -enum xp_retval (*xpc_setup_infrastructure) (struct xpc_partition *part); -void (*xpc_teardown_infrastructure) (struct xpc_partition *part); +void (*xpc_process_activate_IRQ_rcvd) (void); +enum xp_retval (*xpc_setup_ch_structures_sn) (struct xpc_partition *part); +void (*xpc_teardown_ch_structures_sn) (struct xpc_partition *part); void (*xpc_indicate_partition_engaged) (struct xpc_partition *part); int (*xpc_partition_engaged) (short partid); @@ -215,6 +217,9 @@ void (*xpc_send_chctl_openrequest) (struct xpc_channel *ch, void (*xpc_send_chctl_openreply) (struct xpc_channel *ch, unsigned long *irq_flags); +void (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *ch, + unsigned long msgqueue_pa); + enum xp_retval (*xpc_send_msg) (struct xpc_channel *ch, u32 flags, void *payload, u16 payload_size, u8 notify_type, xpc_notify_func func, void *key); @@ -308,8 +313,6 @@ xpc_check_remote_hb(void) static int xpc_hb_checker(void *ignore) { - int last_IRQ_count = 0; - int new_IRQ_count; int force_IRQ = 0; /* this thread was marked active by xpc_hb_init() */ @@ -325,43 +328,37 @@ xpc_hb_checker(void *ignore) dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have " "been received\n", (int)(xpc_hb_check_timeout - jiffies), - atomic_read(&xpc_activate_IRQ_rcvd) - last_IRQ_count); + xpc_activate_IRQ_rcvd); /* checking of remote heartbeats is skewed by IRQ handling */ if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) { + xpc_hb_check_timeout = jiffies + + (xpc_hb_check_interval * HZ); + dev_dbg(xpc_part, "checking remote heartbeats\n"); xpc_check_remote_hb(); /* - * We need to periodically recheck to ensure no - * IRQ/amo pairs have been missed. That check - * must always reset xpc_hb_check_timeout. + * On sn2 we need to periodically recheck to ensure no + * IRQ/amo pairs have been missed. */ - force_IRQ = 1; + if (is_shub()) + force_IRQ = 1; } /* check for outstanding IRQs */ - new_IRQ_count = atomic_read(&xpc_activate_IRQ_rcvd); - if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) { + if (xpc_activate_IRQ_rcvd > 0 || force_IRQ != 0) { force_IRQ = 0; - - dev_dbg(xpc_part, "found an IRQ to process; will be " - "resetting xpc_hb_check_timeout\n"); - - xpc_process_activate_IRQ_rcvd(new_IRQ_count - - last_IRQ_count); - last_IRQ_count = new_IRQ_count; - - xpc_hb_check_timeout = jiffies + - (xpc_hb_check_interval * HZ); + dev_dbg(xpc_part, "processing activate IRQs " + "received\n"); + xpc_process_activate_IRQ_rcvd(); } /* wait for IRQ or timeout */ (void)wait_event_interruptible(xpc_activate_IRQ_wq, - (last_IRQ_count < atomic_read( - &xpc_activate_IRQ_rcvd) - || time_is_before_eq_jiffies( + (time_is_before_eq_jiffies( xpc_hb_check_timeout) || + xpc_activate_IRQ_rcvd > 0 || xpc_exiting)); } @@ -436,6 +433,153 @@ xpc_channel_mgr(struct xpc_partition *part) } } +/* + * Guarantee that the kzalloc'd memory is cacheline aligned. + */ +void * +xpc_kzalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) +{ + /* see if kzalloc will give us cachline aligned memory by default */ + *base = kzalloc(size, flags); + if (*base == NULL) + return NULL; + + if ((u64)*base == L1_CACHE_ALIGN((u64)*base)) + return *base; + + kfree(*base); + + /* nope, we'll have to do it ourselves */ + *base = kzalloc(size + L1_CACHE_BYTES, flags); + if (*base == NULL) + return NULL; + + return (void *)L1_CACHE_ALIGN((u64)*base); +} + +/* + * Setup the channel structures necessary to support XPartition Communication + * between the specified remote partition and the local one. + */ +static enum xp_retval +xpc_setup_ch_structures(struct xpc_partition *part) +{ + enum xp_retval ret; + int ch_number; + struct xpc_channel *ch; + short partid = XPC_PARTID(part); + + /* + * Allocate all of the channel structures as a contiguous chunk of + * memory. + */ + DBUG_ON(part->channels != NULL); + part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_MAX_NCHANNELS, + GFP_KERNEL); + if (part->channels == NULL) { + dev_err(xpc_chan, "can't get memory for channels\n"); + return xpNoMemory; + } + + /* allocate the remote open and close args */ + + part->remote_openclose_args = + xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, + GFP_KERNEL, &part-> + remote_openclose_args_base); + if (part->remote_openclose_args == NULL) { + dev_err(xpc_chan, "can't get memory for remote connect args\n"); + ret = xpNoMemory; + goto out_1; + } + + part->chctl.all_flags = 0; + spin_lock_init(&part->chctl_lock); + + atomic_set(&part->channel_mgr_requests, 1); + init_waitqueue_head(&part->channel_mgr_wq); + + part->nchannels = XPC_MAX_NCHANNELS; + + atomic_set(&part->nchannels_active, 0); + atomic_set(&part->nchannels_engaged, 0); + + for (ch_number = 0; ch_number < part->nchannels; ch_number++) { + ch = &part->channels[ch_number]; + + ch->partid = partid; + ch->number = ch_number; + ch->flags = XPC_C_DISCONNECTED; + + atomic_set(&ch->kthreads_assigned, 0); + atomic_set(&ch->kthreads_idle, 0); + atomic_set(&ch->kthreads_active, 0); + + atomic_set(&ch->references, 0); + atomic_set(&ch->n_to_notify, 0); + + spin_lock_init(&ch->lock); + init_completion(&ch->wdisconnect_wait); + + atomic_set(&ch->n_on_msg_allocate_wq, 0); + init_waitqueue_head(&ch->msg_allocate_wq); + init_waitqueue_head(&ch->idle_wq); + } + + ret = xpc_setup_ch_structures_sn(part); + if (ret != xpSuccess) + goto out_2; + + /* + * With the setting of the partition setup_state to XPC_P_SS_SETUP, + * we're declaring that this partition is ready to go. + */ + part->setup_state = XPC_P_SS_SETUP; + + return xpSuccess; + + /* setup of ch structures failed */ +out_2: + kfree(part->remote_openclose_args_base); + part->remote_openclose_args = NULL; +out_1: + kfree(part->channels); + part->channels = NULL; + return ret; +} + +/* + * Teardown the channel structures necessary to support XPartition Communication + * between the specified remote partition and the local one. + */ +static void +xpc_teardown_ch_structures(struct xpc_partition *part) +{ + DBUG_ON(atomic_read(&part->nchannels_engaged) != 0); + DBUG_ON(atomic_read(&part->nchannels_active) != 0); + + /* + * Make this partition inaccessible to local processes by marking it + * as no longer setup. Then wait before proceeding with the teardown + * until all existing references cease. + */ + DBUG_ON(part->setup_state != XPC_P_SS_SETUP); + part->setup_state = XPC_P_SS_WTEARDOWN; + + wait_event(part->teardown_wq, (atomic_read(&part->references) == 0)); + + /* now we can begin tearing down the infrastructure */ + + xpc_teardown_ch_structures_sn(part); + + kfree(part->remote_openclose_args_base); + part->remote_openclose_args = NULL; + kfree(part->channels); + part->channels = NULL; + + part->setup_state = XPC_P_SS_TORNDOWN; +} + /* * When XPC HB determines that a partition has come up, it will create a new * kthread and that kthread will call this function to attempt to set up the @@ -476,7 +620,7 @@ xpc_activating(void *__partid) xpc_allow_hb(partid); - if (xpc_setup_infrastructure(part) == xpSuccess) { + if (xpc_setup_ch_structures(part) == xpSuccess) { (void)xpc_part_ref(part); /* this will always succeed */ if (xpc_make_first_contact(part) == xpSuccess) { @@ -486,7 +630,7 @@ xpc_activating(void *__partid) } xpc_part_deref(part); - xpc_teardown_infrastructure(part); + xpc_teardown_ch_structures(part); } xpc_disallow_hb(partid); @@ -806,6 +950,56 @@ xpc_disconnect_wait(int ch_number) } } +static int +xpc_setup_partitions(void) +{ + short partid; + struct xpc_partition *part; + + xpc_partitions = kzalloc(sizeof(struct xpc_partition) * + xp_max_npartitions, GFP_KERNEL); + if (xpc_partitions == NULL) { + dev_err(xpc_part, "can't get memory for partition structure\n"); + return -ENOMEM; + } + + /* + * The first few fields of each entry of xpc_partitions[] need to + * be initialized now so that calls to xpc_connect() and + * xpc_disconnect() can be made prior to the activation of any remote + * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE + * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING + * PARTITION HAS BEEN ACTIVATED. + */ + for (partid = 0; partid < xp_max_npartitions; partid++) { + part = &xpc_partitions[partid]; + + DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part)); + + part->activate_IRQ_rcvd = 0; + spin_lock_init(&part->act_lock); + part->act_state = XPC_P_AS_INACTIVE; + XPC_SET_REASON(part, 0, 0); + + init_timer(&part->disengage_timer); + part->disengage_timer.function = + xpc_timeout_partition_disengage; + part->disengage_timer.data = (unsigned long)part; + + part->setup_state = XPC_P_SS_UNSET; + init_waitqueue_head(&part->teardown_wq); + atomic_set(&part->references, 0); + } + + return xpc_setup_partitions_sn(); +} + +static void +xpc_teardown_partitions(void) +{ + kfree(xpc_partitions); +} + static void xpc_do_exit(enum xp_retval reason) { @@ -892,8 +1086,7 @@ xpc_do_exit(enum xp_retval reason) DBUG_ON(xpc_any_partition_engaged()); DBUG_ON(xpc_any_hbs_allowed() != 0); - /* a zero timestamp indicates our rsvd page is not initialized */ - xpc_rsvd_page->ts_jiffies = 0; + xpc_teardown_rsvd_page(); if (reason == xpUnloading) { (void)unregister_die_notifier(&xpc_die_notifier); @@ -906,7 +1099,7 @@ xpc_do_exit(enum xp_retval reason) if (xpc_sysctl) unregister_sysctl_table(xpc_sysctl); - kfree(xpc_partitions); + xpc_teardown_partitions(); if (is_shub()) xpc_exit_sn2(); @@ -1062,8 +1255,6 @@ int __init xpc_init(void) { int ret; - short partid; - struct xpc_partition *part; struct task_struct *kthread; snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part"); @@ -1076,56 +1267,29 @@ xpc_init(void) * further to only support exactly 64 partitions on this * architecture, no less. */ - if (xp_max_npartitions != 64) - return -EINVAL; - - ret = xpc_init_sn2(); - if (ret != 0) - return ret; + if (xp_max_npartitions != 64) { + dev_err(xpc_part, "max #of partitions not set to 64\n"); + ret = -EINVAL; + } else { + ret = xpc_init_sn2(); + } } else if (is_uv()) { - xpc_init_uv(); + ret = xpc_init_uv(); } else { - return -ENODEV; + ret = -ENODEV; } - xpc_partitions = kzalloc(sizeof(struct xpc_partition) * - xp_max_npartitions, GFP_KERNEL); - if (xpc_partitions == NULL) { + if (ret != 0) + return ret; + + ret = xpc_setup_partitions(); + if (ret != 0) { dev_err(xpc_part, "can't get memory for partition structure\n"); - ret = -ENOMEM; goto out_1; } - /* - * The first few fields of each entry of xpc_partitions[] need to - * be initialized now so that calls to xpc_connect() and - * xpc_disconnect() can be made prior to the activation of any remote - * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE - * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING - * PARTITION HAS BEEN ACTIVATED. - */ - for (partid = 0; partid < xp_max_npartitions; partid++) { - part = &xpc_partitions[partid]; - - DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part)); - - part->activate_IRQ_rcvd = 0; - spin_lock_init(&part->act_lock); - part->act_state = XPC_P_AS_INACTIVE; - XPC_SET_REASON(part, 0, 0); - - init_timer(&part->disengage_timer); - part->disengage_timer.function = - xpc_timeout_partition_disengage; - part->disengage_timer.data = (unsigned long)part; - - part->setup_state = XPC_P_SS_UNSET; - init_waitqueue_head(&part->teardown_wq); - atomic_set(&part->references, 0); - } - xpc_sysctl = register_sysctl_table(xpc_sys_dir); /* @@ -1133,10 +1297,9 @@ xpc_init(void) * other partitions to discover we are alive and establish initial * communications. */ - xpc_rsvd_page = xpc_setup_rsvd_page(); - if (xpc_rsvd_page == NULL) { + ret = xpc_setup_rsvd_page(); + if (ret != 0) { dev_err(xpc_part, "can't setup our reserved page\n"); - ret = -EBUSY; goto out_2; } @@ -1187,15 +1350,15 @@ xpc_init(void) /* initialization was not successful */ out_3: - /* a zero timestamp indicates our rsvd page is not initialized */ - xpc_rsvd_page->ts_jiffies = 0; + xpc_teardown_rsvd_page(); (void)unregister_die_notifier(&xpc_die_notifier); (void)unregister_reboot_notifier(&xpc_reboot_notifier); out_2: if (xpc_sysctl) unregister_sysctl_table(xpc_sysctl); - kfree(xpc_partitions); + + xpc_teardown_partitions(); out_1: if (is_shub()) xpc_exit_sn2(); diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c index b5fb21641130..6722f6fe4dc7 100644 --- a/drivers/misc/sgi-xp/xpc_partition.c +++ b/drivers/misc/sgi-xp/xpc_partition.c @@ -73,6 +73,12 @@ xpc_get_rsvd_page_pa(int nasid) while (1) { + /* !!! rp_pa will need to be _gpa on UV. + * ??? So do we save it into the architecture specific parts + * ??? of the xpc_partition structure? Do we rename this + * ??? function or have two versions? Rename rp_pa for UV to + * ??? rp_gpa? + */ ret = xpc_get_partition_rsvd_page_pa(buf, &cookie, &rp_pa, &len); @@ -118,9 +124,10 @@ xpc_get_rsvd_page_pa(int nasid) * other partitions to discover we are alive and establish initial * communications. */ -struct xpc_rsvd_page * +int xpc_setup_rsvd_page(void) { + int ret; struct xpc_rsvd_page *rp; unsigned long rp_pa; unsigned long new_ts_jiffies; @@ -132,7 +139,7 @@ xpc_setup_rsvd_page(void) preempt_enable(); if (rp_pa == 0) { dev_err(xpc_part, "SAL failed to locate the reserved page\n"); - return NULL; + return -ESRCH; } rp = (struct xpc_rsvd_page *)__va(rp_pa); @@ -146,7 +153,7 @@ xpc_setup_rsvd_page(void) dev_err(xpc_part, "the reserved page's partid of %d is outside " "supported range (< 0 || >= %d)\n", rp->SAL_partid, xp_max_npartitions); - return NULL; + return -EINVAL; } rp->version = XPC_RP_VERSION; @@ -165,8 +172,9 @@ xpc_setup_rsvd_page(void) xpc_part_nasids = XPC_RP_PART_NASIDS(rp); xpc_mach_nasids = XPC_RP_MACH_NASIDS(rp); - if (xpc_rsvd_page_init(rp) != xpSuccess) - return NULL; + ret = xpc_setup_rsvd_page_sn(rp); + if (ret != 0) + return ret; /* * Set timestamp of when reserved page was setup by XPC. @@ -178,7 +186,15 @@ xpc_setup_rsvd_page(void) new_ts_jiffies++; rp->ts_jiffies = new_ts_jiffies; - return rp; + xpc_rsvd_page = rp; + return 0; +} + +void +xpc_teardown_rsvd_page(void) +{ + /* a zero timestamp indicates our rsvd page is not initialized */ + xpc_rsvd_page->ts_jiffies = 0; } /* diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c index d1ccadc0857d..8b4b0653d9e9 100644 --- a/drivers/misc/sgi-xp/xpc_sn2.c +++ b/drivers/misc/sgi-xp/xpc_sn2.c @@ -53,12 +53,19 @@ * Buffer used to store a local copy of portions of a remote partition's * reserved page (either its header and part_nasids mask, or its vars). */ -static char *xpc_remote_copy_buffer_sn2; static void *xpc_remote_copy_buffer_base_sn2; +static char *xpc_remote_copy_buffer_sn2; static struct xpc_vars_sn2 *xpc_vars_sn2; static struct xpc_vars_part_sn2 *xpc_vars_part_sn2; +static int +xpc_setup_partitions_sn_sn2(void) +{ + /* nothing needs to be done */ + return 0; +} + /* SH_IPI_ACCESS shub register value on startup */ static u64 xpc_sh1_IPI_access_sn2; static u64 xpc_sh2_IPI_access0_sn2; @@ -198,7 +205,12 @@ xpc_init_IRQ_amo_sn2(int index) static irqreturn_t xpc_handle_activate_IRQ_sn2(int irq, void *dev_id) { - atomic_inc(&xpc_activate_IRQ_rcvd); + unsigned long irq_flags; + + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); + xpc_activate_IRQ_rcvd++; + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); + wake_up_interruptible(&xpc_activate_IRQ_wq); return IRQ_HANDLED; } @@ -222,6 +234,7 @@ xpc_send_activate_IRQ_sn2(unsigned long amos_page_pa, int from_nasid, static void xpc_send_local_activate_IRQ_sn2(int from_nasid) { + unsigned long irq_flags; struct amo *amos = (struct amo *)__va(xpc_vars_sn2->amos_page_pa + (XPC_ACTIVATE_IRQ_AMOS_SN2 * sizeof(struct amo))); @@ -230,7 +243,10 @@ xpc_send_local_activate_IRQ_sn2(int from_nasid) FETCHOP_STORE_OP(TO_AMO((u64)&amos[BIT_WORD(from_nasid / 2)].variable), FETCHOP_OR, BIT_MASK(from_nasid / 2)); - atomic_inc(&xpc_activate_IRQ_rcvd); + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); + xpc_activate_IRQ_rcvd++; + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); + wake_up_interruptible(&xpc_activate_IRQ_wq); } @@ -375,7 +391,7 @@ static void xpc_send_chctl_closerequest_sn2(struct xpc_channel *ch, unsigned long *irq_flags) { - struct xpc_openclose_args *args = ch->local_openclose_args; + struct xpc_openclose_args *args = ch->sn.sn2.local_openclose_args; args->reason = ch->reason; XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_CLOSEREQUEST, irq_flags); @@ -390,7 +406,7 @@ xpc_send_chctl_closereply_sn2(struct xpc_channel *ch, unsigned long *irq_flags) static void xpc_send_chctl_openrequest_sn2(struct xpc_channel *ch, unsigned long *irq_flags) { - struct xpc_openclose_args *args = ch->local_openclose_args; + struct xpc_openclose_args *args = ch->sn.sn2.local_openclose_args; args->msg_size = ch->msg_size; args->local_nentries = ch->local_nentries; @@ -400,11 +416,11 @@ xpc_send_chctl_openrequest_sn2(struct xpc_channel *ch, unsigned long *irq_flags) static void xpc_send_chctl_openreply_sn2(struct xpc_channel *ch, unsigned long *irq_flags) { - struct xpc_openclose_args *args = ch->local_openclose_args; + struct xpc_openclose_args *args = ch->sn.sn2.local_openclose_args; args->remote_nentries = ch->remote_nentries; args->local_nentries = ch->local_nentries; - args->local_msgqueue_pa = xp_pa(ch->local_msgqueue); + args->local_msgqueue_pa = xp_pa(ch->sn.sn2.local_msgqueue); XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENREPLY, irq_flags); } @@ -420,6 +436,13 @@ xpc_send_chctl_local_msgrequest_sn2(struct xpc_channel *ch) XPC_SEND_LOCAL_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_MSGREQUEST); } +static void +xpc_save_remote_msgqueue_pa_sn2(struct xpc_channel *ch, + unsigned long msgqueue_pa) +{ + ch->sn.sn2.remote_msgqueue_pa = msgqueue_pa; +} + /* * This next set of functions are used to keep track of when a partition is * potentially engaged in accessing memory belonging to another partition. @@ -489,6 +512,17 @@ xpc_indicate_partition_disengaged_sn2(struct xpc_partition *part) part_sn2->activate_IRQ_phys_cpuid); } +static void +xpc_assume_partition_disengaged_sn2(short partid) +{ + struct amo *amo = xpc_vars_sn2->amos_page + + XPC_ENGAGED_PARTITIONS_AMO_SN2; + + /* clear bit(s) based on partid mask in our partition's amo */ + FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, + ~BIT(partid)); +} + static int xpc_partition_engaged_sn2(short partid) { @@ -510,17 +544,6 @@ xpc_any_partition_engaged_sn2(void) return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_LOAD) != 0; } -static void -xpc_assume_partition_disengaged_sn2(short partid) -{ - struct amo *amo = xpc_vars_sn2->amos_page + - XPC_ENGAGED_PARTITIONS_AMO_SN2; - - /* clear bit(s) based on partid mask in our partition's amo */ - FETCHOP_STORE_OP(TO_AMO((u64)&amo->variable), FETCHOP_AND, - ~BIT(partid)); -} - /* original protection values for each node */ static u64 xpc_prot_vec_sn2[MAX_NUMNODES]; @@ -595,8 +618,8 @@ xpc_get_partition_rsvd_page_pa_sn2(void *buf, u64 *cookie, unsigned long *rp_pa, } -static enum xp_retval -xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp) +static int +xpc_setup_rsvd_page_sn_sn2(struct xpc_rsvd_page *rp) { struct amo *amos_page; int i; @@ -627,7 +650,7 @@ xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp) amos_page = (struct amo *)TO_AMO(uncached_alloc_page(0, 1)); if (amos_page == NULL) { dev_err(xpc_part, "can't allocate page of amos\n"); - return xpNoMemory; + return -ENOMEM; } /* @@ -639,7 +662,7 @@ xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp) dev_err(xpc_part, "can't allow amo operations\n"); uncached_free_page(__IA64_UNCACHED_OFFSET | TO_PHYS((u64)amos_page), 1); - return ret; + return -EPERM; } } @@ -665,7 +688,7 @@ xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp) (void)xpc_init_IRQ_amo_sn2(XPC_ENGAGED_PARTITIONS_AMO_SN2); (void)xpc_init_IRQ_amo_sn2(XPC_DEACTIVATE_REQUEST_AMO_SN2); - return xpSuccess; + return 0; } static void @@ -1082,10 +1105,19 @@ xpc_identify_activate_IRQ_sender_sn2(void) } static void -xpc_process_activate_IRQ_rcvd_sn2(int n_IRQs_expected) +xpc_process_activate_IRQ_rcvd_sn2(void) { + unsigned long irq_flags; + int n_IRQs_expected; int n_IRQs_detected; + DBUG_ON(xpc_activate_IRQ_rcvd == 0); + + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); + n_IRQs_expected = xpc_activate_IRQ_rcvd; + xpc_activate_IRQ_rcvd = 0; + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); + n_IRQs_detected = xpc_identify_activate_IRQ_sender_sn2(); if (n_IRQs_detected < n_IRQs_expected) { /* retry once to help avoid missing amo */ @@ -1094,116 +1126,63 @@ xpc_process_activate_IRQ_rcvd_sn2(int n_IRQs_expected) } /* - * Guarantee that the kzalloc'd memory is cacheline aligned. - */ -static void * -xpc_kzalloc_cacheline_aligned_sn2(size_t size, gfp_t flags, void **base) -{ - /* see if kzalloc will give us cachline aligned memory by default */ - *base = kzalloc(size, flags); - if (*base == NULL) - return NULL; - - if ((u64)*base == L1_CACHE_ALIGN((u64)*base)) - return *base; - - kfree(*base); - - /* nope, we'll have to do it ourselves */ - *base = kzalloc(size + L1_CACHE_BYTES, flags); - if (*base == NULL) - return NULL; - - return (void *)L1_CACHE_ALIGN((u64)*base); -} - -/* - * Setup the infrastructure necessary to support XPartition Communication - * between the specified remote partition and the local one. + * Setup the channel structures that are sn2 specific. */ static enum xp_retval -xpc_setup_infrastructure_sn2(struct xpc_partition *part) +xpc_setup_ch_structures_sn_sn2(struct xpc_partition *part) { struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; + struct xpc_channel_sn2 *ch_sn2; enum xp_retval retval; int ret; int cpuid; int ch_number; - struct xpc_channel *ch; struct timer_list *timer; short partid = XPC_PARTID(part); - /* - * Allocate all of the channel structures as a contiguous chunk of - * memory. - */ - DBUG_ON(part->channels != NULL); - part->channels = kzalloc(sizeof(struct xpc_channel) * XPC_MAX_NCHANNELS, - GFP_KERNEL); - if (part->channels == NULL) { - dev_err(xpc_chan, "can't get memory for channels\n"); - return xpNoMemory; - } - /* allocate all the required GET/PUT values */ part_sn2->local_GPs = - xpc_kzalloc_cacheline_aligned_sn2(XPC_GP_SIZE, GFP_KERNEL, - &part_sn2->local_GPs_base); + xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, GFP_KERNEL, + &part_sn2->local_GPs_base); if (part_sn2->local_GPs == NULL) { dev_err(xpc_chan, "can't get memory for local get/put " "values\n"); - retval = xpNoMemory; - goto out_1; + return xpNoMemory; } part_sn2->remote_GPs = - xpc_kzalloc_cacheline_aligned_sn2(XPC_GP_SIZE, GFP_KERNEL, - &part_sn2->remote_GPs_base); + xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, GFP_KERNEL, + &part_sn2->remote_GPs_base); if (part_sn2->remote_GPs == NULL) { dev_err(xpc_chan, "can't get memory for remote get/put " "values\n"); retval = xpNoMemory; - goto out_2; + goto out_1; } part_sn2->remote_GPs_pa = 0; /* allocate all the required open and close args */ - part->local_openclose_args = - xpc_kzalloc_cacheline_aligned_sn2(XPC_OPENCLOSE_ARGS_SIZE, - GFP_KERNEL, - &part->local_openclose_args_base); - if (part->local_openclose_args == NULL) { + part_sn2->local_openclose_args = + xpc_kzalloc_cacheline_aligned(XPC_OPENCLOSE_ARGS_SIZE, + GFP_KERNEL, &part_sn2-> + local_openclose_args_base); + if (part_sn2->local_openclose_args == NULL) { dev_err(xpc_chan, "can't get memory for local connect args\n"); retval = xpNoMemory; - goto out_3; - } - - part->remote_openclose_args = - xpc_kzalloc_cacheline_aligned_sn2(XPC_OPENCLOSE_ARGS_SIZE, - GFP_KERNEL, - &part->remote_openclose_args_base); - if (part->remote_openclose_args == NULL) { - dev_err(xpc_chan, "can't get memory for remote connect args\n"); - retval = xpNoMemory; - goto out_4; + goto out_2; } part_sn2->remote_openclose_args_pa = 0; part_sn2->local_chctl_amo_va = xpc_init_IRQ_amo_sn2(partid); - part->chctl.all_flags = 0; - spin_lock_init(&part->chctl_lock); part_sn2->notify_IRQ_nasid = 0; part_sn2->notify_IRQ_phys_cpuid = 0; part_sn2->remote_chctl_amo_va = NULL; - atomic_set(&part->channel_mgr_requests, 1); - init_waitqueue_head(&part->channel_mgr_wq); - sprintf(part_sn2->notify_IRQ_owner, "xpc%02d", partid); ret = request_irq(SGI_XPC_NOTIFY, xpc_handle_notify_IRQ_sn2, IRQF_SHARED, part_sn2->notify_IRQ_owner, @@ -1212,7 +1191,7 @@ xpc_setup_infrastructure_sn2(struct xpc_partition *part) dev_err(xpc_chan, "can't register NOTIFY IRQ handler, " "errno=%d\n", -ret); retval = xpLackOfResources; - goto out_5; + goto out_3; } /* Setup a timer to check for dropped notify IRQs */ @@ -1224,44 +1203,16 @@ xpc_setup_infrastructure_sn2(struct xpc_partition *part) timer->expires = jiffies + XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL; add_timer(timer); - part->nchannels = XPC_MAX_NCHANNELS; - - atomic_set(&part->nchannels_active, 0); - atomic_set(&part->nchannels_engaged, 0); - for (ch_number = 0; ch_number < part->nchannels; ch_number++) { - ch = &part->channels[ch_number]; - - ch->partid = partid; - ch->number = ch_number; - ch->flags = XPC_C_DISCONNECTED; - - ch->sn.sn2.local_GP = &part_sn2->local_GPs[ch_number]; - ch->local_openclose_args = - &part->local_openclose_args[ch_number]; - - atomic_set(&ch->kthreads_assigned, 0); - atomic_set(&ch->kthreads_idle, 0); - atomic_set(&ch->kthreads_active, 0); + ch_sn2 = &part->channels[ch_number].sn.sn2; - atomic_set(&ch->references, 0); - atomic_set(&ch->n_to_notify, 0); + ch_sn2->local_GP = &part_sn2->local_GPs[ch_number]; + ch_sn2->local_openclose_args = + &part_sn2->local_openclose_args[ch_number]; - spin_lock_init(&ch->lock); - mutex_init(&ch->sn.sn2.msg_to_pull_mutex); - init_completion(&ch->wdisconnect_wait); - - atomic_set(&ch->n_on_msg_allocate_wq, 0); - init_waitqueue_head(&ch->msg_allocate_wq); - init_waitqueue_head(&ch->idle_wq); + mutex_init(&ch_sn2->msg_to_pull_mutex); } - /* - * With the setting of the partition setup_state to XPC_P_SS_SETUP, - * we're declaring that this partition is ready to go. - */ - part->setup_state = XPC_P_SS_SETUP; - /* * Setup the per partition specific variables required by the * remote partition to establish channel connections with us. @@ -1271,7 +1222,7 @@ xpc_setup_infrastructure_sn2(struct xpc_partition *part) */ xpc_vars_part_sn2[partid].GPs_pa = xp_pa(part_sn2->local_GPs); xpc_vars_part_sn2[partid].openclose_args_pa = - xp_pa(part->local_openclose_args); + xp_pa(part_sn2->local_openclose_args); xpc_vars_part_sn2[partid].chctl_amo_pa = xp_pa(part_sn2->local_chctl_amo_va); cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */ @@ -1279,80 +1230,48 @@ xpc_setup_infrastructure_sn2(struct xpc_partition *part) xpc_vars_part_sn2[partid].notify_IRQ_phys_cpuid = cpu_physical_id(cpuid); xpc_vars_part_sn2[partid].nchannels = part->nchannels; - xpc_vars_part_sn2[partid].magic = XPC_VP_MAGIC1; + xpc_vars_part_sn2[partid].magic = XPC_VP_MAGIC1_SN2; return xpSuccess; - /* setup of infrastructure failed */ -out_5: - kfree(part->remote_openclose_args_base); - part->remote_openclose_args = NULL; -out_4: - kfree(part->local_openclose_args_base); - part->local_openclose_args = NULL; + /* setup of ch structures failed */ out_3: + kfree(part_sn2->local_openclose_args_base); + part_sn2->local_openclose_args = NULL; +out_2: kfree(part_sn2->remote_GPs_base); part_sn2->remote_GPs = NULL; -out_2: +out_1: kfree(part_sn2->local_GPs_base); part_sn2->local_GPs = NULL; -out_1: - kfree(part->channels); - part->channels = NULL; return retval; } /* - * Teardown the infrastructure necessary to support XPartition Communication - * between the specified remote partition and the local one. + * Teardown the channel structures that are sn2 specific. */ static void -xpc_teardown_infrastructure_sn2(struct xpc_partition *part) +xpc_teardown_ch_structures_sn_sn2(struct xpc_partition *part) { struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; short partid = XPC_PARTID(part); /* - * We start off by making this partition inaccessible to local - * processes by marking it as no longer setup. Then we make it - * inaccessible to remote processes by clearing the XPC per partition - * specific variable's magic # (which indicates that these variables - * are no longer valid) and by ignoring all XPC notify IRQs sent to - * this partition. + * Indicate that the variables specific to the remote partition are no + * longer available for its use. */ - - DBUG_ON(atomic_read(&part->nchannels_engaged) != 0); - DBUG_ON(atomic_read(&part->nchannels_active) != 0); - DBUG_ON(part->setup_state != XPC_P_SS_SETUP); - part->setup_state = XPC_P_SS_WTEARDOWN; - xpc_vars_part_sn2[partid].magic = 0; - free_irq(SGI_XPC_NOTIFY, (void *)(u64)partid); - - /* - * Before proceeding with the teardown we have to wait until all - * existing references cease. - */ - wait_event(part->teardown_wq, (atomic_read(&part->references) == 0)); - - /* now we can begin tearing down the infrastructure */ - - part->setup_state = XPC_P_SS_TORNDOWN; - /* in case we've still got outstanding timers registered... */ del_timer_sync(&part_sn2->dropped_notify_IRQ_timer); + free_irq(SGI_XPC_NOTIFY, (void *)(u64)partid); - kfree(part->remote_openclose_args_base); - part->remote_openclose_args = NULL; - kfree(part->local_openclose_args_base); - part->local_openclose_args = NULL; + kfree(part_sn2->local_openclose_args_base); + part_sn2->local_openclose_args = NULL; kfree(part_sn2->remote_GPs_base); part_sn2->remote_GPs = NULL; kfree(part_sn2->local_GPs_base); part_sn2->local_GPs = NULL; - kfree(part->channels); - part->channels = NULL; part_sn2->local_chctl_amo_va = NULL; } @@ -1429,8 +1348,8 @@ xpc_pull_remote_vars_part_sn2(struct xpc_partition *part) /* see if they've been set up yet */ - if (pulled_entry->magic != XPC_VP_MAGIC1 && - pulled_entry->magic != XPC_VP_MAGIC2) { + if (pulled_entry->magic != XPC_VP_MAGIC1_SN2 && + pulled_entry->magic != XPC_VP_MAGIC2_SN2) { if (pulled_entry->magic != 0) { dev_dbg(xpc_chan, "partition %d's XPC vars_part for " @@ -1443,7 +1362,7 @@ xpc_pull_remote_vars_part_sn2(struct xpc_partition *part) return xpRetry; } - if (xpc_vars_part_sn2[partid].magic == XPC_VP_MAGIC1) { + if (xpc_vars_part_sn2[partid].magic == XPC_VP_MAGIC1_SN2) { /* validate the variables */ @@ -1473,10 +1392,10 @@ xpc_pull_remote_vars_part_sn2(struct xpc_partition *part) /* let the other side know that we've pulled their variables */ - xpc_vars_part_sn2[partid].magic = XPC_VP_MAGIC2; + xpc_vars_part_sn2[partid].magic = XPC_VP_MAGIC2_SN2; } - if (pulled_entry->magic == XPC_VP_MAGIC1) + if (pulled_entry->magic == XPC_VP_MAGIC1_SN2) return xpRetry; return xpSuccess; @@ -1605,6 +1524,7 @@ xpc_get_chctl_all_flags_sn2(struct xpc_partition *part) static enum xp_retval xpc_allocate_local_msgqueue_sn2(struct xpc_channel *ch) { + struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; unsigned long irq_flags; int nentries; size_t nbytes; @@ -1612,17 +1532,17 @@ xpc_allocate_local_msgqueue_sn2(struct xpc_channel *ch) for (nentries = ch->local_nentries; nentries > 0; nentries--) { nbytes = nentries * ch->msg_size; - ch->local_msgqueue = - xpc_kzalloc_cacheline_aligned_sn2(nbytes, GFP_KERNEL, - &ch->local_msgqueue_base); - if (ch->local_msgqueue == NULL) + ch_sn2->local_msgqueue = + xpc_kzalloc_cacheline_aligned(nbytes, GFP_KERNEL, + &ch_sn2->local_msgqueue_base); + if (ch_sn2->local_msgqueue == NULL) continue; nbytes = nentries * sizeof(struct xpc_notify); - ch->notify_queue = kzalloc(nbytes, GFP_KERNEL); - if (ch->notify_queue == NULL) { - kfree(ch->local_msgqueue_base); - ch->local_msgqueue = NULL; + ch_sn2->notify_queue = kzalloc(nbytes, GFP_KERNEL); + if (ch_sn2->notify_queue == NULL) { + kfree(ch_sn2->local_msgqueue_base); + ch_sn2->local_msgqueue = NULL; continue; } @@ -1649,6 +1569,7 @@ xpc_allocate_local_msgqueue_sn2(struct xpc_channel *ch) static enum xp_retval xpc_allocate_remote_msgqueue_sn2(struct xpc_channel *ch) { + struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; unsigned long irq_flags; int nentries; size_t nbytes; @@ -1658,10 +1579,10 @@ xpc_allocate_remote_msgqueue_sn2(struct xpc_channel *ch) for (nentries = ch->remote_nentries; nentries > 0; nentries--) { nbytes = nentries * ch->msg_size; - ch->remote_msgqueue = - xpc_kzalloc_cacheline_aligned_sn2(nbytes, GFP_KERNEL, - &ch->remote_msgqueue_base); - if (ch->remote_msgqueue == NULL) + ch_sn2->remote_msgqueue = + xpc_kzalloc_cacheline_aligned(nbytes, GFP_KERNEL, &ch_sn2-> + remote_msgqueue_base); + if (ch_sn2->remote_msgqueue == NULL) continue; spin_lock_irqsave(&ch->lock, irq_flags); @@ -1687,8 +1608,9 @@ xpc_allocate_remote_msgqueue_sn2(struct xpc_channel *ch) * Note: Assumes all of the channel sizes are filled in. */ static enum xp_retval -xpc_allocate_msgqueues_sn2(struct xpc_channel *ch) +xpc_setup_msg_structures_sn2(struct xpc_channel *ch) { + struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; enum xp_retval ret; DBUG_ON(ch->flags & XPC_C_SETUP); @@ -1698,10 +1620,10 @@ xpc_allocate_msgqueues_sn2(struct xpc_channel *ch) ret = xpc_allocate_remote_msgqueue_sn2(ch); if (ret != xpSuccess) { - kfree(ch->local_msgqueue_base); - ch->local_msgqueue = NULL; - kfree(ch->notify_queue); - ch->notify_queue = NULL; + kfree(ch_sn2->local_msgqueue_base); + ch_sn2->local_msgqueue = NULL; + kfree(ch_sn2->notify_queue); + ch_sn2->notify_queue = NULL; } } return ret; @@ -1715,21 +1637,13 @@ xpc_allocate_msgqueues_sn2(struct xpc_channel *ch) * they're cleared when XPC_C_DISCONNECTED is cleared. */ static void -xpc_free_msgqueues_sn2(struct xpc_channel *ch) +xpc_teardown_msg_structures_sn2(struct xpc_channel *ch) { struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; DBUG_ON(!spin_is_locked(&ch->lock)); - DBUG_ON(atomic_read(&ch->n_to_notify) != 0); - ch->remote_msgqueue_pa = 0; - ch->func = NULL; - ch->key = NULL; - ch->msg_size = 0; - ch->local_nentries = 0; - ch->remote_nentries = 0; - ch->kthreads_assigned_limit = 0; - ch->kthreads_idle_limit = 0; + ch_sn2->remote_msgqueue_pa = 0; ch_sn2->local_GP->get = 0; ch_sn2->local_GP->put = 0; @@ -1745,12 +1659,12 @@ xpc_free_msgqueues_sn2(struct xpc_channel *ch) dev_dbg(xpc_chan, "ch->flags=0x%x, partid=%d, channel=%d\n", ch->flags, ch->partid, ch->number); - kfree(ch->local_msgqueue_base); - ch->local_msgqueue = NULL; - kfree(ch->remote_msgqueue_base); - ch->remote_msgqueue = NULL; - kfree(ch->notify_queue); - ch->notify_queue = NULL; + kfree(ch_sn2->local_msgqueue_base); + ch_sn2->local_msgqueue = NULL; + kfree(ch_sn2->remote_msgqueue_base); + ch_sn2->remote_msgqueue = NULL; + kfree(ch_sn2->notify_queue); + ch_sn2->notify_queue = NULL; } } @@ -1766,7 +1680,7 @@ xpc_notify_senders_sn2(struct xpc_channel *ch, enum xp_retval reason, s64 put) while (++get < put && atomic_read(&ch->n_to_notify) > 0) { - notify = &ch->notify_queue[get % ch->local_nentries]; + notify = &ch->sn.sn2.notify_queue[get % ch->local_nentries]; /* * See if the notify entry indicates it was associated with @@ -1818,7 +1732,7 @@ xpc_clear_local_msgqueue_flags_sn2(struct xpc_channel *ch) get = ch_sn2->w_remote_GP.get; do { - msg = (struct xpc_msg *)((u64)ch->local_msgqueue + + msg = (struct xpc_msg *)((u64)ch_sn2->local_msgqueue + (get % ch->local_nentries) * ch->msg_size); msg->flags = 0; @@ -1837,7 +1751,7 @@ xpc_clear_remote_msgqueue_flags_sn2(struct xpc_channel *ch) put = ch_sn2->w_remote_GP.put; do { - msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + + msg = (struct xpc_msg *)((u64)ch_sn2->remote_msgqueue + (put % ch->remote_nentries) * ch->msg_size); msg->flags = 0; @@ -1976,8 +1890,9 @@ xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get) } msg_offset = msg_index * ch->msg_size; - msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset); - remote_msg_pa = ch->remote_msgqueue_pa + msg_offset; + msg = (struct xpc_msg *)((u64)ch_sn2->remote_msgqueue + + msg_offset); + remote_msg_pa = ch_sn2->remote_msgqueue_pa + msg_offset; ret = xpc_pull_remote_cachelines_sn2(part, msg, remote_msg_pa, nmsgs * ch->msg_size); @@ -2001,7 +1916,7 @@ xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get) /* return the message we were looking for */ msg_offset = (get % ch->remote_nentries) * ch->msg_size; - msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset); + msg = (struct xpc_msg *)((u64)ch_sn2->remote_msgqueue + msg_offset); return msg; } @@ -2080,7 +1995,7 @@ xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put) if (put == ch_sn2->w_local_GP.put) break; - msg = (struct xpc_msg *)((u64)ch->local_msgqueue + + msg = (struct xpc_msg *)((u64)ch_sn2->local_msgqueue + (put % ch->local_nentries) * ch->msg_size); @@ -2182,7 +2097,7 @@ xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags, } /* get the message's address and initialize it */ - msg = (struct xpc_msg *)((u64)ch->local_msgqueue + + msg = (struct xpc_msg *)((u64)ch_sn2->local_msgqueue + (put % ch->local_nentries) * ch->msg_size); DBUG_ON(msg->flags != 0); @@ -2207,6 +2122,7 @@ xpc_send_msg_sn2(struct xpc_channel *ch, u32 flags, void *payload, void *key) { enum xp_retval ret = xpSuccess; + struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; struct xpc_msg *msg = msg; struct xpc_notify *notify = notify; s64 msg_number; @@ -2243,7 +2159,7 @@ xpc_send_msg_sn2(struct xpc_channel *ch, u32 flags, void *payload, atomic_inc(&ch->n_to_notify); - notify = &ch->notify_queue[msg_number % ch->local_nentries]; + notify = &ch_sn2->notify_queue[msg_number % ch->local_nentries]; notify->func = func; notify->key = key; notify->type = notify_type; @@ -2279,7 +2195,7 @@ xpc_send_msg_sn2(struct xpc_channel *ch, u32 flags, void *payload, /* see if the message is next in line to be sent, if so send it */ - put = ch->sn.sn2.local_GP->put; + put = ch_sn2->local_GP->put; if (put == msg_number) xpc_send_msgs_sn2(ch, put); @@ -2307,7 +2223,7 @@ xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags) if (get == ch_sn2->w_local_GP.get) break; - msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + + msg = (struct xpc_msg *)((u64)ch_sn2->remote_msgqueue + (get % ch->remote_nentries) * ch->msg_size); @@ -2385,8 +2301,9 @@ xpc_init_sn2(void) int ret; size_t buf_size; + xpc_setup_partitions_sn = xpc_setup_partitions_sn_sn2; xpc_get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_sn2; - xpc_rsvd_page_init = xpc_rsvd_page_init_sn2; + xpc_setup_rsvd_page_sn = xpc_setup_rsvd_page_sn_sn2; xpc_increment_heartbeat = xpc_increment_heartbeat_sn2; xpc_offline_heartbeat = xpc_offline_heartbeat_sn2; xpc_online_heartbeat = xpc_online_heartbeat_sn2; @@ -2403,29 +2320,33 @@ xpc_init_sn2(void) xpc_cancel_partition_deactivation_request_sn2; xpc_process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2; - xpc_setup_infrastructure = xpc_setup_infrastructure_sn2; - xpc_teardown_infrastructure = xpc_teardown_infrastructure_sn2; + xpc_setup_ch_structures_sn = xpc_setup_ch_structures_sn_sn2; + xpc_teardown_ch_structures_sn = xpc_teardown_ch_structures_sn_sn2; xpc_make_first_contact = xpc_make_first_contact_sn2; + xpc_get_chctl_all_flags = xpc_get_chctl_all_flags_sn2; - xpc_allocate_msgqueues = xpc_allocate_msgqueues_sn2; - xpc_free_msgqueues = xpc_free_msgqueues_sn2; + xpc_send_chctl_closerequest = xpc_send_chctl_closerequest_sn2; + xpc_send_chctl_closereply = xpc_send_chctl_closereply_sn2; + xpc_send_chctl_openrequest = xpc_send_chctl_openrequest_sn2; + xpc_send_chctl_openreply = xpc_send_chctl_openreply_sn2; + + xpc_save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_sn2; + + xpc_setup_msg_structures = xpc_setup_msg_structures_sn2; + xpc_teardown_msg_structures = xpc_teardown_msg_structures_sn2; + xpc_notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_sn2; xpc_process_msg_chctl_flags = xpc_process_msg_chctl_flags_sn2; xpc_n_of_deliverable_msgs = xpc_n_of_deliverable_msgs_sn2; xpc_get_deliverable_msg = xpc_get_deliverable_msg_sn2; xpc_indicate_partition_engaged = xpc_indicate_partition_engaged_sn2; - xpc_partition_engaged = xpc_partition_engaged_sn2; - xpc_any_partition_engaged = xpc_any_partition_engaged_sn2; xpc_indicate_partition_disengaged = xpc_indicate_partition_disengaged_sn2; + xpc_partition_engaged = xpc_partition_engaged_sn2; + xpc_any_partition_engaged = xpc_any_partition_engaged_sn2; xpc_assume_partition_disengaged = xpc_assume_partition_disengaged_sn2; - xpc_send_chctl_closerequest = xpc_send_chctl_closerequest_sn2; - xpc_send_chctl_closereply = xpc_send_chctl_closereply_sn2; - xpc_send_chctl_openrequest = xpc_send_chctl_openrequest_sn2; - xpc_send_chctl_openreply = xpc_send_chctl_openreply_sn2; - xpc_send_msg = xpc_send_msg_sn2; xpc_received_msg = xpc_received_msg_sn2; diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index c2d4ddd6e955..689cb5c68ccf 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c @@ -14,41 +14,528 @@ */ #include +#include +#include +#include +#include #include +#include "../sgi-gru/gru.h" #include "../sgi-gru/grukservices.h" #include "xpc.h" +static atomic64_t xpc_heartbeat_uv; static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV); -static void *xpc_activate_mq; +#define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES) +#define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES) + +#define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ + XPC_ACTIVATE_MSG_SIZE_UV) +#define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ + XPC_NOTIFY_MSG_SIZE_UV) + +static void *xpc_activate_mq_uv; +static void *xpc_notify_mq_uv; + +static int +xpc_setup_partitions_sn_uv(void) +{ + short partid; + struct xpc_partition_uv *part_uv; + + for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { + part_uv = &xpc_partitions[partid].sn.uv; + + spin_lock_init(&part_uv->flags_lock); + part_uv->remote_act_state = XPC_P_AS_INACTIVE; + } + return 0; +} + +static void * +xpc_create_gru_mq_uv(unsigned int mq_size, int cpuid, unsigned int irq, + irq_handler_t irq_handler) +{ + int ret; + int nid; + int mq_order; + struct page *page; + void *mq; + + nid = cpu_to_node(cpuid); + mq_order = get_order(mq_size); + page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, + mq_order); + if (page == NULL) + return NULL; + + mq = page_address(page); + ret = gru_create_message_queue(mq, mq_size); + if (ret != 0) { + dev_err(xpc_part, "gru_create_message_queue() returned " + "error=%d\n", ret); + free_pages((unsigned long)mq, mq_order); + return NULL; + } + + /* !!! Need to do some other things to set up IRQ */ + + ret = request_irq(irq, irq_handler, 0, "xpc", NULL); + if (ret != 0) { + dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n", + irq, ret); + free_pages((unsigned long)mq, mq_order); + return NULL; + } + + /* !!! enable generation of irq when GRU mq op occurs to this mq */ + + /* ??? allow other partitions to access GRU mq? */ + + return mq; +} static void -xpc_send_local_activate_IRQ_uv(struct xpc_partition *part) +xpc_destroy_gru_mq_uv(void *mq, unsigned int mq_size, unsigned int irq) +{ + /* ??? disallow other partitions to access GRU mq? */ + + /* !!! disable generation of irq when GRU mq op occurs to this mq */ + + free_irq(irq, NULL); + + free_pages((unsigned long)mq, get_order(mq_size)); +} + +static enum xp_retval +xpc_send_gru_msg(unsigned long mq_gpa, void *msg, size_t msg_size) { + enum xp_retval xp_ret; + int ret; + + while (1) { + ret = gru_send_message_gpa(mq_gpa, msg, msg_size); + if (ret == MQE_OK) { + xp_ret = xpSuccess; + break; + } + + if (ret == MQE_QUEUE_FULL) { + dev_dbg(xpc_chan, "gru_send_message_gpa() returned " + "error=MQE_QUEUE_FULL\n"); + /* !!! handle QLimit reached; delay & try again */ + /* ??? Do we add a limit to the number of retries? */ + (void)msleep_interruptible(10); + } else if (ret == MQE_CONGESTION) { + dev_dbg(xpc_chan, "gru_send_message_gpa() returned " + "error=MQE_CONGESTION\n"); + /* !!! handle LB Overflow; simply try again */ + /* ??? Do we add a limit to the number of retries? */ + } else { + /* !!! Currently this is MQE_UNEXPECTED_CB_ERR */ + dev_err(xpc_chan, "gru_send_message_gpa() returned " + "error=%d\n", ret); + xp_ret = xpGruSendMqError; + break; + } + } + return xp_ret; +} + +static void +xpc_process_activate_IRQ_rcvd_uv(void) +{ + unsigned long irq_flags; + short partid; + struct xpc_partition *part; + u8 act_state_req; + + DBUG_ON(xpc_activate_IRQ_rcvd == 0); + + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); + for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { + part = &xpc_partitions[partid]; + + if (part->sn.uv.act_state_req == 0) + continue; + + xpc_activate_IRQ_rcvd--; + BUG_ON(xpc_activate_IRQ_rcvd < 0); + + act_state_req = part->sn.uv.act_state_req; + part->sn.uv.act_state_req = 0; + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); + + if (act_state_req == XPC_P_ASR_ACTIVATE_UV) { + if (part->act_state == XPC_P_AS_INACTIVE) + xpc_activate_partition(part); + else if (part->act_state == XPC_P_AS_DEACTIVATING) + XPC_DEACTIVATE_PARTITION(part, xpReactivating); + + } else if (act_state_req == XPC_P_ASR_REACTIVATE_UV) { + if (part->act_state == XPC_P_AS_INACTIVE) + xpc_activate_partition(part); + else + XPC_DEACTIVATE_PARTITION(part, xpReactivating); + + } else if (act_state_req == XPC_P_ASR_DEACTIVATE_UV) { + XPC_DEACTIVATE_PARTITION(part, part->sn.uv.reason); + + } else { + BUG(); + } + + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); + if (xpc_activate_IRQ_rcvd == 0) + break; + } + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); + +} + +static irqreturn_t +xpc_handle_activate_IRQ_uv(int irq, void *dev_id) +{ + unsigned long irq_flags; + struct xpc_activate_mq_msghdr_uv *msg_hdr; + short partid; + struct xpc_partition *part; + struct xpc_partition_uv *part_uv; + struct xpc_openclose_args *args; + int wakeup_hb_checker = 0; + + while ((msg_hdr = gru_get_next_message(xpc_activate_mq_uv)) != NULL) { + + partid = msg_hdr->partid; + if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { + dev_err(xpc_part, "xpc_handle_activate_IRQ_uv() invalid" + "partid=0x%x passed in message\n", partid); + gru_free_message(xpc_activate_mq_uv, msg_hdr); + continue; + } + part = &xpc_partitions[partid]; + part_uv = &part->sn.uv; + + part_uv->remote_act_state = msg_hdr->act_state; + + switch (msg_hdr->type) { + case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV: + /* syncing of remote_act_state was just done above */ + break; + + case XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV: { + struct xpc_activate_mq_msg_heartbeat_req_uv *msg; + + msg = (struct xpc_activate_mq_msg_heartbeat_req_uv *) + msg_hdr; + part_uv->heartbeat = msg->heartbeat; + break; + } + case XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV: { + struct xpc_activate_mq_msg_heartbeat_req_uv *msg; + + msg = (struct xpc_activate_mq_msg_heartbeat_req_uv *) + msg_hdr; + part_uv->heartbeat = msg->heartbeat; + spin_lock_irqsave(&part_uv->flags_lock, irq_flags); + part_uv->flags |= XPC_P_HEARTBEAT_OFFLINE_UV; + spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); + break; + } + case XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV: { + struct xpc_activate_mq_msg_heartbeat_req_uv *msg; + + msg = (struct xpc_activate_mq_msg_heartbeat_req_uv *) + msg_hdr; + part_uv->heartbeat = msg->heartbeat; + spin_lock_irqsave(&part_uv->flags_lock, irq_flags); + part_uv->flags &= ~XPC_P_HEARTBEAT_OFFLINE_UV; + spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); + break; + } + case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: { + struct xpc_activate_mq_msg_activate_req_uv *msg; + + /* + * ??? Do we deal here with ts_jiffies being different + * ??? if act_state != XPC_P_AS_INACTIVE instead of + * ??? below? + */ + msg = (struct xpc_activate_mq_msg_activate_req_uv *) + msg_hdr; + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, + irq_flags); + if (part_uv->act_state_req == 0) + xpc_activate_IRQ_rcvd++; + part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV; + part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */ + part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies; + part_uv->remote_activate_mq_gpa = msg->activate_mq_gpa; + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, + irq_flags); + wakeup_hb_checker++; + break; + } + case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV: { + struct xpc_activate_mq_msg_deactivate_req_uv *msg; + + msg = (struct xpc_activate_mq_msg_deactivate_req_uv *) + msg_hdr; + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, + irq_flags); + if (part_uv->act_state_req == 0) + xpc_activate_IRQ_rcvd++; + part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; + part_uv->reason = msg->reason; + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, + irq_flags); + wakeup_hb_checker++; + break; + } + case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: { + struct xpc_activate_mq_msg_chctl_closerequest_uv *msg; + + msg = (struct xpc_activate_mq_msg_chctl_closerequest_uv + *)msg_hdr; + args = &part->remote_openclose_args[msg->ch_number]; + args->reason = msg->reason; + + spin_lock_irqsave(&part->chctl_lock, irq_flags); + part->chctl.flags[msg->ch_number] |= + XPC_CHCTL_CLOSEREQUEST; + spin_unlock_irqrestore(&part->chctl_lock, irq_flags); + + xpc_wakeup_channel_mgr(part); + break; + } + case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: { + struct xpc_activate_mq_msg_chctl_closereply_uv *msg; + + msg = (struct xpc_activate_mq_msg_chctl_closereply_uv *) + msg_hdr; + + spin_lock_irqsave(&part->chctl_lock, irq_flags); + part->chctl.flags[msg->ch_number] |= + XPC_CHCTL_CLOSEREPLY; + spin_unlock_irqrestore(&part->chctl_lock, irq_flags); + + xpc_wakeup_channel_mgr(part); + break; + } + case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: { + struct xpc_activate_mq_msg_chctl_openrequest_uv *msg; + + msg = (struct xpc_activate_mq_msg_chctl_openrequest_uv + *)msg_hdr; + args = &part->remote_openclose_args[msg->ch_number]; + args->msg_size = msg->msg_size; + args->local_nentries = msg->local_nentries; + + spin_lock_irqsave(&part->chctl_lock, irq_flags); + part->chctl.flags[msg->ch_number] |= + XPC_CHCTL_OPENREQUEST; + spin_unlock_irqrestore(&part->chctl_lock, irq_flags); + + xpc_wakeup_channel_mgr(part); + break; + } + case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: { + struct xpc_activate_mq_msg_chctl_openreply_uv *msg; + + msg = (struct xpc_activate_mq_msg_chctl_openreply_uv *) + msg_hdr; + args = &part->remote_openclose_args[msg->ch_number]; + args->remote_nentries = msg->remote_nentries; + args->local_nentries = msg->local_nentries; + args->local_msgqueue_pa = msg->local_notify_mq_gpa; + + spin_lock_irqsave(&part->chctl_lock, irq_flags); + part->chctl.flags[msg->ch_number] |= + XPC_CHCTL_OPENREPLY; + spin_unlock_irqrestore(&part->chctl_lock, irq_flags); + + xpc_wakeup_channel_mgr(part); + break; + } + case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV: + spin_lock_irqsave(&part_uv->flags_lock, irq_flags); + part_uv->flags |= XPC_P_ENGAGED_UV; + spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); + break; + + case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV: + spin_lock_irqsave(&part_uv->flags_lock, irq_flags); + part_uv->flags &= ~XPC_P_ENGAGED_UV; + spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); + break; + + default: + dev_err(xpc_part, "received unknown activate_mq msg " + "type=%d from partition=%d\n", msg_hdr->type, + partid); + } + + if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies && + part->remote_rp_ts_jiffies != 0) { + /* + * ??? Does what we do here need to be sensitive to + * ??? act_state or remote_act_state? + */ + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, + irq_flags); + if (part_uv->act_state_req == 0) + xpc_activate_IRQ_rcvd++; + part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV; + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, + irq_flags); + wakeup_hb_checker++; + } + + gru_free_message(xpc_activate_mq_uv, msg_hdr); + } + + if (wakeup_hb_checker) + wake_up_interruptible(&xpc_activate_IRQ_wq); + + return IRQ_HANDLED; +} + +static enum xp_retval +xpc_send_activate_IRQ_uv(struct xpc_partition *part, void *msg, size_t msg_size, + int msg_type) +{ + struct xpc_activate_mq_msghdr_uv *msg_hdr = msg; + + DBUG_ON(msg_size > XPC_ACTIVATE_MSG_SIZE_UV); + + msg_hdr->type = msg_type; + msg_hdr->partid = XPC_PARTID(part); + msg_hdr->act_state = part->act_state; + msg_hdr->rp_ts_jiffies = xpc_rsvd_page->ts_jiffies; + + /* ??? Is holding a spin_lock (ch->lock) during this call a bad idea? */ + return xpc_send_gru_msg(part->sn.uv.remote_activate_mq_gpa, msg, + msg_size); +} + +static void +xpc_send_activate_IRQ_part_uv(struct xpc_partition *part, void *msg, + size_t msg_size, int msg_type) +{ + enum xp_retval ret; + + ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type); + if (unlikely(ret != xpSuccess)) + XPC_DEACTIVATE_PARTITION(part, ret); +} + +static void +xpc_send_activate_IRQ_ch_uv(struct xpc_channel *ch, unsigned long *irq_flags, + void *msg, size_t msg_size, int msg_type) +{ + struct xpc_partition *part = &xpc_partitions[ch->number]; + enum xp_retval ret; + + ret = xpc_send_activate_IRQ_uv(part, msg, msg_size, msg_type); + if (unlikely(ret != xpSuccess)) { + if (irq_flags != NULL) + spin_unlock_irqrestore(&ch->lock, *irq_flags); + + XPC_DEACTIVATE_PARTITION(part, ret); + + if (irq_flags != NULL) + spin_lock_irqsave(&ch->lock, *irq_flags); + } +} + +static void +xpc_send_local_activate_IRQ_uv(struct xpc_partition *part, int act_state_req) +{ + unsigned long irq_flags; + struct xpc_partition_uv *part_uv = &part->sn.uv; + /* * !!! Make our side think that the remote parition sent an activate - * !!! message our way. Also do what the activate IRQ handler would + * !!! message our way by doing what the activate IRQ handler would * !!! do had one really been sent. */ + + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); + if (part_uv->act_state_req == 0) + xpc_activate_IRQ_rcvd++; + part_uv->act_state_req = act_state_req; + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); + + wake_up_interruptible(&xpc_activate_IRQ_wq); } static enum xp_retval -xpc_rsvd_page_init_uv(struct xpc_rsvd_page *rp) +xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa, + size_t *len) { - /* !!! need to have established xpc_activate_mq earlier */ - rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq); - return xpSuccess; + /* !!! call the UV version of sn_partition_reserved_page_pa() */ + return xpUnsupported; +} + +static int +xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page *rp) +{ + rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq_uv); + return 0; +} + +static void +xpc_send_heartbeat_uv(int msg_type) +{ + short partid; + struct xpc_partition *part; + struct xpc_activate_mq_msg_heartbeat_req_uv msg; + + /* + * !!! On uv we're broadcasting a heartbeat message every 5 seconds. + * !!! Whereas on sn2 we're bte_copy'ng the heartbeat info every 20 + * !!! seconds. This is an increase in numalink traffic. + * ??? Is this good? + */ + + msg.heartbeat = atomic64_inc_return(&xpc_heartbeat_uv); + + partid = find_first_bit(xpc_heartbeating_to_mask_uv, + XP_MAX_NPARTITIONS_UV); + + while (partid < XP_MAX_NPARTITIONS_UV) { + part = &xpc_partitions[partid]; + + xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), + msg_type); + + partid = find_next_bit(xpc_heartbeating_to_mask_uv, + XP_MAX_NPARTITIONS_UV, partid + 1); + } } static void xpc_increment_heartbeat_uv(void) { - /* !!! send heartbeat msg to xpc_heartbeating_to_mask partids */ + xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV); +} + +static void +xpc_offline_heartbeat_uv(void) +{ + xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV); +} + +static void +xpc_online_heartbeat_uv(void) +{ + xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV); } static void xpc_heartbeat_init_uv(void) { + atomic64_set(&xpc_heartbeat_uv, 0); bitmap_zero(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV); xpc_heartbeating_to_mask = &xpc_heartbeating_to_mask_uv[0]; } @@ -56,48 +543,94 @@ xpc_heartbeat_init_uv(void) static void xpc_heartbeat_exit_uv(void) { - /* !!! send heartbeat_offline msg to xpc_heartbeating_to_mask partids */ + xpc_send_heartbeat_uv(XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV); +} + +static enum xp_retval +xpc_get_remote_heartbeat_uv(struct xpc_partition *part) +{ + struct xpc_partition_uv *part_uv = &part->sn.uv; + enum xp_retval ret = xpNoHeartbeat; + + if (part_uv->remote_act_state != XPC_P_AS_INACTIVE && + part_uv->remote_act_state != XPC_P_AS_DEACTIVATING) { + + if (part_uv->heartbeat != part->last_heartbeat || + (part_uv->flags & XPC_P_HEARTBEAT_OFFLINE_UV)) { + + part->last_heartbeat = part_uv->heartbeat; + ret = xpSuccess; + } + } + return ret; } static void xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp, - unsigned long remote_rp_pa, int nasid) + unsigned long remote_rp_gpa, int nasid) { short partid = remote_rp->SAL_partid; struct xpc_partition *part = &xpc_partitions[partid]; + struct xpc_activate_mq_msg_activate_req_uv msg; -/* - * !!! Setup part structure with the bits of info we can glean from the rp: - * !!! part->remote_rp_pa = remote_rp_pa; - * !!! part->sn.uv.activate_mq_gpa = remote_rp->sn.activate_mq_gpa; - */ + part->remote_rp_pa = remote_rp_gpa; /* !!! _pa here is really _gpa */ + part->remote_rp_ts_jiffies = remote_rp->ts_jiffies; + part->sn.uv.remote_activate_mq_gpa = remote_rp->sn.activate_mq_gpa; + + /* + * ??? Is it a good idea to make this conditional on what is + * ??? potentially stale state information? + */ + if (part->sn.uv.remote_act_state == XPC_P_AS_INACTIVE) { + msg.rp_gpa = uv_gpa(xpc_rsvd_page); + msg.activate_mq_gpa = xpc_rsvd_page->sn.activate_mq_gpa; + xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), + XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV); + } - xpc_send_local_activate_IRQ_uv(part); + if (part->act_state == XPC_P_AS_INACTIVE) + xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV); } static void xpc_request_partition_reactivation_uv(struct xpc_partition *part) { - xpc_send_local_activate_IRQ_uv(part); + xpc_send_local_activate_IRQ_uv(part, XPC_P_ASR_ACTIVATE_UV); +} + +static void +xpc_request_partition_deactivation_uv(struct xpc_partition *part) +{ + struct xpc_activate_mq_msg_deactivate_req_uv msg; + + /* + * ??? Is it a good idea to make this conditional on what is + * ??? potentially stale state information? + */ + if (part->sn.uv.remote_act_state != XPC_P_AS_DEACTIVATING && + part->sn.uv.remote_act_state != XPC_P_AS_INACTIVE) { + + msg.reason = part->reason; + xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), + XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV); + } } /* - * Setup the infrastructure necessary to support XPartition Communication - * between the specified remote partition and the local one. + * Setup the channel structures that are uv specific. */ static enum xp_retval -xpc_setup_infrastructure_uv(struct xpc_partition *part) +xpc_setup_ch_structures_sn_uv(struct xpc_partition *part) { /* !!! this function needs fleshing out */ return xpUnsupported; } /* - * Teardown the infrastructure necessary to support XPartition Communication - * between the specified remote partition and the local one. + * Teardown the channel structures that are uv specific. */ static void -xpc_teardown_infrastructure_uv(struct xpc_partition *part) +xpc_teardown_ch_structures_sn_uv(struct xpc_partition *part) { /* !!! this function needs fleshing out */ return; @@ -106,15 +639,163 @@ xpc_teardown_infrastructure_uv(struct xpc_partition *part) static enum xp_retval xpc_make_first_contact_uv(struct xpc_partition *part) { - /* !!! this function needs fleshing out */ - return xpUnsupported; + struct xpc_activate_mq_msg_uv msg; + + /* + * We send a sync msg to get the remote partition's remote_act_state + * updated to our current act_state which at this point should + * be XPC_P_AS_ACTIVATING. + */ + xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), + XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV); + + while (part->sn.uv.remote_act_state != XPC_P_AS_ACTIVATING) { + + dev_dbg(xpc_part, "waiting to make first contact with " + "partition %d\n", XPC_PARTID(part)); + + /* wait a 1/4 of a second or so */ + (void)msleep_interruptible(250); + + if (part->act_state == XPC_P_AS_DEACTIVATING) + return part->reason; + } + + return xpSuccess; } static u64 xpc_get_chctl_all_flags_uv(struct xpc_partition *part) { + unsigned long irq_flags; + union xpc_channel_ctl_flags chctl; + + spin_lock_irqsave(&part->chctl_lock, irq_flags); + chctl = part->chctl; + if (chctl.all_flags != 0) + part->chctl.all_flags = 0; + + spin_unlock_irqrestore(&part->chctl_lock, irq_flags); + return chctl.all_flags; +} + +static enum xp_retval +xpc_setup_msg_structures_uv(struct xpc_channel *ch) +{ + /* !!! this function needs fleshing out */ + return xpUnsupported; +} + +static void +xpc_teardown_msg_structures_uv(struct xpc_channel *ch) +{ + struct xpc_channel_uv *ch_uv = &ch->sn.uv; + + ch_uv->remote_notify_mq_gpa = 0; + /* !!! this function needs fleshing out */ - return 0UL; +} + +static void +xpc_send_chctl_closerequest_uv(struct xpc_channel *ch, unsigned long *irq_flags) +{ + struct xpc_activate_mq_msg_chctl_closerequest_uv msg; + + msg.ch_number = ch->number; + msg.reason = ch->reason; + xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), + XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV); +} + +static void +xpc_send_chctl_closereply_uv(struct xpc_channel *ch, unsigned long *irq_flags) +{ + struct xpc_activate_mq_msg_chctl_closereply_uv msg; + + msg.ch_number = ch->number; + xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), + XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV); +} + +static void +xpc_send_chctl_openrequest_uv(struct xpc_channel *ch, unsigned long *irq_flags) +{ + struct xpc_activate_mq_msg_chctl_openrequest_uv msg; + + msg.ch_number = ch->number; + msg.msg_size = ch->msg_size; + msg.local_nentries = ch->local_nentries; + xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), + XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV); +} + +static void +xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags) +{ + struct xpc_activate_mq_msg_chctl_openreply_uv msg; + + msg.ch_number = ch->number; + msg.local_nentries = ch->local_nentries; + msg.remote_nentries = ch->remote_nentries; + msg.local_notify_mq_gpa = uv_gpa(xpc_notify_mq_uv); + xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), + XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV); +} + +static void +xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch, + unsigned long msgqueue_pa) +{ + ch->sn.uv.remote_notify_mq_gpa = msgqueue_pa; +} + +static void +xpc_indicate_partition_engaged_uv(struct xpc_partition *part) +{ + struct xpc_activate_mq_msg_uv msg; + + xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), + XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV); +} + +static void +xpc_indicate_partition_disengaged_uv(struct xpc_partition *part) +{ + struct xpc_activate_mq_msg_uv msg; + + xpc_send_activate_IRQ_part_uv(part, &msg, sizeof(msg), + XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV); +} + +static void +xpc_assume_partition_disengaged_uv(short partid) +{ + struct xpc_partition_uv *part_uv = &xpc_partitions[partid].sn.uv; + unsigned long irq_flags; + + spin_lock_irqsave(&part_uv->flags_lock, irq_flags); + part_uv->flags &= ~XPC_P_ENGAGED_UV; + spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); +} + +static int +xpc_partition_engaged_uv(short partid) +{ + return (xpc_partitions[partid].sn.uv.flags & XPC_P_ENGAGED_UV) != 0; +} + +static int +xpc_any_partition_engaged_uv(void) +{ + struct xpc_partition_uv *part_uv; + short partid; + + for (partid = 0; partid < XP_MAX_NPARTITIONS_UV; partid++) { + part_uv = &xpc_partitions[partid].sn.uv; + if ((part_uv->flags & XPC_P_ENGAGED_UV) != 0) + return 1; + } + return 0; } static struct xpc_msg * @@ -124,24 +805,64 @@ xpc_get_deliverable_msg_uv(struct xpc_channel *ch) return NULL; } -void +int xpc_init_uv(void) { - xpc_rsvd_page_init = xpc_rsvd_page_init_uv; + xpc_setup_partitions_sn = xpc_setup_partitions_sn_uv; + xpc_process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv; + xpc_get_partition_rsvd_page_pa = xpc_get_partition_rsvd_page_pa_uv; + xpc_setup_rsvd_page_sn = xpc_setup_rsvd_page_sn_uv; xpc_increment_heartbeat = xpc_increment_heartbeat_uv; + xpc_offline_heartbeat = xpc_offline_heartbeat_uv; + xpc_online_heartbeat = xpc_online_heartbeat_uv; xpc_heartbeat_init = xpc_heartbeat_init_uv; xpc_heartbeat_exit = xpc_heartbeat_exit_uv; + xpc_get_remote_heartbeat = xpc_get_remote_heartbeat_uv; + xpc_request_partition_activation = xpc_request_partition_activation_uv; xpc_request_partition_reactivation = xpc_request_partition_reactivation_uv; - xpc_setup_infrastructure = xpc_setup_infrastructure_uv; - xpc_teardown_infrastructure = xpc_teardown_infrastructure_uv; + xpc_request_partition_deactivation = + xpc_request_partition_deactivation_uv; + + xpc_setup_ch_structures_sn = xpc_setup_ch_structures_sn_uv; + xpc_teardown_ch_structures_sn = xpc_teardown_ch_structures_sn_uv; + xpc_make_first_contact = xpc_make_first_contact_uv; + xpc_get_chctl_all_flags = xpc_get_chctl_all_flags_uv; + xpc_send_chctl_closerequest = xpc_send_chctl_closerequest_uv; + xpc_send_chctl_closereply = xpc_send_chctl_closereply_uv; + xpc_send_chctl_openrequest = xpc_send_chctl_openrequest_uv; + xpc_send_chctl_openreply = xpc_send_chctl_openreply_uv; + + xpc_save_remote_msgqueue_pa = xpc_save_remote_msgqueue_pa_uv; + + xpc_setup_msg_structures = xpc_setup_msg_structures_uv; + xpc_teardown_msg_structures = xpc_teardown_msg_structures_uv; + + xpc_indicate_partition_engaged = xpc_indicate_partition_engaged_uv; + xpc_indicate_partition_disengaged = + xpc_indicate_partition_disengaged_uv; + xpc_assume_partition_disengaged = xpc_assume_partition_disengaged_uv; + xpc_partition_engaged = xpc_partition_engaged_uv; + xpc_any_partition_engaged = xpc_any_partition_engaged_uv; + xpc_get_deliverable_msg = xpc_get_deliverable_msg_uv; + + /* ??? The cpuid argument's value is 0, is that what we want? */ + /* !!! The irq argument's value isn't correct. */ + xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, 0, + xpc_handle_activate_IRQ_uv); + if (xpc_activate_mq_uv == NULL) + return -ENOMEM; + + return 0; } void xpc_exit_uv(void) { + /* !!! The irq argument's value isn't correct. */ + xpc_destroy_gru_mq_uv(xpc_activate_mq_uv, XPC_ACTIVATE_MQ_SIZE_UV, 0); } -- cgit v1.2.3 From bd3e64c1759e4930315ebf022611468ee9621486 Mon Sep 17 00:00:00 2001 From: Dean Nelson Date: Tue, 29 Jul 2008 22:34:19 -0700 Subject: sgi-xp: setup the notify GRU message queue Setup the notify GRU message queue that is used for sending user messages on UV systems. Signed-off-by: Dean Nelson Cc: Jack Steiner Cc: "Luck, Tony" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/misc/sgi-xp/xp.h | 45 +- drivers/misc/sgi-xp/xp_main.c | 7 +- drivers/misc/sgi-xp/xpc.h | 140 ++++-- drivers/misc/sgi-xp/xpc_channel.c | 63 ++- drivers/misc/sgi-xp/xpc_main.c | 21 +- drivers/misc/sgi-xp/xpc_sn2.c | 178 +++---- drivers/misc/sgi-xp/xpc_uv.c | 951 ++++++++++++++++++++++++++++++-------- drivers/misc/sgi-xp/xpnet.c | 11 +- 8 files changed, 1032 insertions(+), 384 deletions(-) (limited to 'drivers/misc/sgi-xp/xp.h') diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h index 9ac5758f4d08..859a5281c61b 100644 --- a/drivers/misc/sgi-xp/xp.h +++ b/drivers/misc/sgi-xp/xp.h @@ -87,39 +87,18 @@ #endif /* - * The format of an XPC message is as follows: - * - * +-------+--------------------------------+ - * | flags |////////////////////////////////| - * +-------+--------------------------------+ - * | message # | - * +----------------------------------------+ - * | payload (user-defined message) | - * | | - * : - * | | - * +----------------------------------------+ - * - * The size of the payload is defined by the user via xpc_connect(). A user- - * defined message resides in the payload area. - * - * The size of a message entry (within a message queue) must be a cacheline - * sized multiple in order to facilitate the BTE transfer of messages from one - * message queue to another. A macro, XPC_MSG_SIZE(), is provided for the user + * Define macro, XPC_MSG_SIZE(), is provided for the user * that wants to fit as many msg entries as possible in a given memory size * (e.g. a memory page). */ -struct xpc_msg { - u8 flags; /* FOR XPC INTERNAL USE ONLY */ - u8 reserved[7]; /* FOR XPC INTERNAL USE ONLY */ - s64 number; /* FOR XPC INTERNAL USE ONLY */ - - u64 payload; /* user defined portion of message */ -}; +#define XPC_MSG_MAX_SIZE 128 +#define XPC_MSG_HDR_MAX_SIZE 16 +#define XPC_MSG_PAYLOAD_MAX_SIZE (XPC_MSG_MAX_SIZE - XPC_MSG_HDR_MAX_SIZE) -#define XPC_MSG_PAYLOAD_OFFSET (u64) (&((struct xpc_msg *)0)->payload) #define XPC_MSG_SIZE(_payload_size) \ - L1_CACHE_ALIGN(XPC_MSG_PAYLOAD_OFFSET + (_payload_size)) + ALIGN(XPC_MSG_HDR_MAX_SIZE + (_payload_size), \ + is_uv() ? 64 : 128) + /* * Define the return values and values passed to user's callout functions. @@ -210,7 +189,10 @@ enum xp_retval { xpGruCopyError, /* 58: gru_copy_gru() returned error */ xpGruSendMqError, /* 59: gru send message queue related error */ - xpUnknownReason /* 60: unknown reason - must be last in enum */ + xpBadChannelNumber, /* 60: invalid channel number */ + xpBadMsgType, /* 60: invalid message type */ + + xpUnknownReason /* 61: unknown reason - must be last in enum */ }; /* @@ -261,6 +243,9 @@ typedef void (*xpc_channel_func) (enum xp_retval reason, short partid, * calling xpc_received(). * * All other reason codes indicate failure. + * + * NOTE: The user defined function must be callable by an interrupt handler + * and thus cannot block. */ typedef void (*xpc_notify_func) (enum xp_retval reason, short partid, int ch_number, void *key); @@ -284,7 +269,7 @@ struct xpc_registration { xpc_channel_func func; /* function to call */ void *key; /* pointer to user's key */ u16 nentries; /* #of msg entries in local msg queue */ - u16 msg_size; /* message queue's message size */ + u16 entry_size; /* message queue's message entry size */ u32 assigned_limit; /* limit on #of assigned kthreads */ u32 idle_limit; /* limit on #of idle kthreads */ } ____cacheline_aligned; diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c index f86ad3af26b7..66a1d19e08ad 100644 --- a/drivers/misc/sgi-xp/xp_main.c +++ b/drivers/misc/sgi-xp/xp_main.c @@ -154,6 +154,9 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, DBUG_ON(func == NULL); DBUG_ON(assigned_limit == 0 || idle_limit > assigned_limit); + if (XPC_MSG_SIZE(payload_size) > XPC_MSG_MAX_SIZE) + return xpPayloadTooBig; + registration = &xpc_registrations[ch_number]; if (mutex_lock_interruptible(®istration->mutex) != 0) @@ -166,7 +169,7 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, } /* register the channel for connection */ - registration->msg_size = XPC_MSG_SIZE(payload_size); + registration->entry_size = XPC_MSG_SIZE(payload_size); registration->nentries = nentries; registration->assigned_limit = assigned_limit; registration->idle_limit = idle_limit; @@ -220,7 +223,7 @@ xpc_disconnect(int ch_number) registration->func = NULL; registration->key = NULL; registration->nentries = 0; - registration->msg_size = 0; + registration->entry_size = 0; registration->assigned_limit = 0; registration->idle_limit = 0; diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h index 4c26181defff..619208d61862 100644 --- a/drivers/misc/sgi-xp/xpc.h +++ b/drivers/misc/sgi-xp/xpc.h @@ -181,8 +181,8 @@ struct xpc_vars_part_sn2 { xpc_nasid_mask_nlongs)) /* - * The activate_mq is used to send/receive messages that affect XPC's heartbeat, - * partition active state, and channel state. This is UV only. + * The activate_mq is used to send/receive GRU messages that affect XPC's + * heartbeat, partition active state, and channel state. This is UV only. */ struct xpc_activate_mq_msghdr_uv { short partid; /* sender's partid */ @@ -209,45 +209,45 @@ struct xpc_activate_mq_msghdr_uv { #define XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV 11 struct xpc_activate_mq_msg_uv { - struct xpc_activate_mq_msghdr_uv header; + struct xpc_activate_mq_msghdr_uv hdr; }; struct xpc_activate_mq_msg_heartbeat_req_uv { - struct xpc_activate_mq_msghdr_uv header; + struct xpc_activate_mq_msghdr_uv hdr; u64 heartbeat; }; struct xpc_activate_mq_msg_activate_req_uv { - struct xpc_activate_mq_msghdr_uv header; + struct xpc_activate_mq_msghdr_uv hdr; unsigned long rp_gpa; unsigned long activate_mq_gpa; }; struct xpc_activate_mq_msg_deactivate_req_uv { - struct xpc_activate_mq_msghdr_uv header; + struct xpc_activate_mq_msghdr_uv hdr; enum xp_retval reason; }; struct xpc_activate_mq_msg_chctl_closerequest_uv { - struct xpc_activate_mq_msghdr_uv header; + struct xpc_activate_mq_msghdr_uv hdr; short ch_number; enum xp_retval reason; }; struct xpc_activate_mq_msg_chctl_closereply_uv { - struct xpc_activate_mq_msghdr_uv header; + struct xpc_activate_mq_msghdr_uv hdr; short ch_number; }; struct xpc_activate_mq_msg_chctl_openrequest_uv { - struct xpc_activate_mq_msghdr_uv header; + struct xpc_activate_mq_msghdr_uv hdr; short ch_number; - short msg_size; /* size of notify_mq's messages */ + short entry_size; /* size of notify_mq's GRU messages */ short local_nentries; /* ??? Is this needed? What is? */ }; struct xpc_activate_mq_msg_chctl_openreply_uv { - struct xpc_activate_mq_msghdr_uv header; + struct xpc_activate_mq_msghdr_uv hdr; short ch_number; short remote_nentries; /* ??? Is this needed? What is? */ short local_nentries; /* ??? Is this needed? What is? */ @@ -284,7 +284,7 @@ struct xpc_gp_sn2 { */ struct xpc_openclose_args { u16 reason; /* reason why channel is closing */ - u16 msg_size; /* sizeof each message entry */ + u16 entry_size; /* sizeof each message entry */ u16 remote_nentries; /* #of message entries in remote msg queue */ u16 local_nentries; /* #of message entries in local msg queue */ unsigned long local_msgqueue_pa; /* phys addr of local message queue */ @@ -294,22 +294,79 @@ struct xpc_openclose_args { L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * \ XPC_MAX_NCHANNELS) -/* struct xpc_msg flags */ -#define XPC_M_DONE 0x01 /* msg has been received/consumed */ -#define XPC_M_READY 0x02 /* msg is ready to be sent */ -#define XPC_M_INTERRUPT 0x04 /* send interrupt when msg consumed */ +/* + * Structures to define a fifo singly-linked list. + */ + +struct xpc_fifo_entry_uv { + struct xpc_fifo_entry_uv *next; +}; + +struct xpc_fifo_head_uv { + struct xpc_fifo_entry_uv *first; + struct xpc_fifo_entry_uv *last; + spinlock_t lock; + int n_entries; +}; + +/* + * Define a sn2 styled message. + * + * A user-defined message resides in the payload area. The max size of the + * payload is defined by the user via xpc_connect(). + * + * The size of a message entry (within a message queue) must be a 128-byte + * cacheline sized multiple in order to facilitate the BTE transfer of messages + * from one message queue to another. + */ +struct xpc_msg_sn2 { + u8 flags; /* FOR XPC INTERNAL USE ONLY */ + u8 reserved[7]; /* FOR XPC INTERNAL USE ONLY */ + s64 number; /* FOR XPC INTERNAL USE ONLY */ + + u64 payload; /* user defined portion of message */ +}; + +/* struct xpc_msg_sn2 flags */ + +#define XPC_M_SN2_DONE 0x01 /* msg has been received/consumed */ +#define XPC_M_SN2_READY 0x02 /* msg is ready to be sent */ +#define XPC_M_SN2_INTERRUPT 0x04 /* send interrupt when msg consumed */ + +/* + * The format of a uv XPC notify_mq GRU message is as follows: + * + * A user-defined message resides in the payload area. The max size of the + * payload is defined by the user via xpc_connect(). + * + * The size of a message (payload and header) sent via the GRU must be either 1 + * or 2 GRU_CACHE_LINE_BYTES in length. + */ -#define XPC_MSG_ADDRESS(_payload) \ - ((struct xpc_msg *)((u8 *)(_payload) - XPC_MSG_PAYLOAD_OFFSET)) +struct xpc_notify_mq_msghdr_uv { + union { + unsigned int gru_msg_hdr; /* FOR GRU INTERNAL USE ONLY */ + struct xpc_fifo_entry_uv next; /* FOR XPC INTERNAL USE ONLY */ + } u; + short partid; /* FOR XPC INTERNAL USE ONLY */ + u8 ch_number; /* FOR XPC INTERNAL USE ONLY */ + u8 size; /* FOR XPC INTERNAL USE ONLY */ + unsigned int msg_slot_number; /* FOR XPC INTERNAL USE ONLY */ +}; + +struct xpc_notify_mq_msg_uv { + struct xpc_notify_mq_msghdr_uv hdr; + unsigned long payload; +}; /* - * Defines notify entry. + * Define sn2's notify entry. * * This is used to notify a message's sender that their message was received * and consumed by the intended recipient. */ -struct xpc_notify { +struct xpc_notify_sn2 { u8 type; /* type of notification */ /* the following two fields are only used if type == XPC_N_CALL */ @@ -317,9 +374,20 @@ struct xpc_notify { void *key; /* pointer to user's key */ }; -/* struct xpc_notify type of notification */ +/* struct xpc_notify_sn2 type of notification */ -#define XPC_N_CALL 0x01 /* notify function provided by user */ +#define XPC_N_CALL 0x01 /* notify function provided by user */ + +/* + * Define uv's version of the notify entry. It additionally is used to allocate + * a msg slot on the remote partition into which is copied a sent message. + */ +struct xpc_send_msg_slot_uv { + struct xpc_fifo_entry_uv next; + unsigned int msg_slot_number; + xpc_notify_func func; /* user's notify function */ + void *key; /* pointer to user's key */ +}; /* * Define the structure that manages all the stuff required by a channel. In @@ -409,14 +477,14 @@ struct xpc_channel_sn2 { /* opening or closing of channel */ void *local_msgqueue_base; /* base address of kmalloc'd space */ - struct xpc_msg *local_msgqueue; /* local message queue */ + struct xpc_msg_sn2 *local_msgqueue; /* local message queue */ void *remote_msgqueue_base; /* base address of kmalloc'd space */ - struct xpc_msg *remote_msgqueue; /* cached copy of remote partition's */ - /* local message queue */ + struct xpc_msg_sn2 *remote_msgqueue; /* cached copy of remote */ + /* partition's local message queue */ unsigned long remote_msgqueue_pa; /* phys addr of remote partition's */ /* local message queue */ - struct xpc_notify *notify_queue; /* notify queue for messages sent */ + struct xpc_notify_sn2 *notify_queue;/* notify queue for messages sent */ /* various flavors of local and remote Get/Put values */ @@ -432,6 +500,12 @@ struct xpc_channel_sn2 { struct xpc_channel_uv { unsigned long remote_notify_mq_gpa; /* gru phys address of remote */ /* partition's notify mq */ + + struct xpc_send_msg_slot_uv *send_msg_slots; + struct xpc_notify_mq_msg_uv *recv_msg_slots; + + struct xpc_fifo_head_uv msg_slot_free_list; + struct xpc_fifo_head_uv recv_msg_list; /* deliverable payloads */ }; struct xpc_channel { @@ -444,7 +518,7 @@ struct xpc_channel { u16 number; /* channel # */ - u16 msg_size; /* sizeof each msg entry */ + u16 entry_size; /* sizeof each msg entry */ u16 local_nentries; /* #of msg entries in local msg queue */ u16 remote_nentries; /* #of msg entries in remote msg queue */ @@ -733,8 +807,8 @@ extern enum xp_retval (*xpc_setup_msg_structures) (struct xpc_channel *); extern void (*xpc_teardown_msg_structures) (struct xpc_channel *); extern void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *); extern void (*xpc_process_msg_chctl_flags) (struct xpc_partition *, int); -extern int (*xpc_n_of_deliverable_msgs) (struct xpc_channel *); -extern struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *); +extern int (*xpc_n_of_deliverable_payloads) (struct xpc_channel *); +extern void *(*xpc_get_deliverable_payload) (struct xpc_channel *); extern void (*xpc_request_partition_activation) (struct xpc_rsvd_page *, unsigned long, int); extern void (*xpc_request_partition_reactivation) (struct xpc_partition *); @@ -762,9 +836,9 @@ extern void (*xpc_send_chctl_openreply) (struct xpc_channel *, unsigned long *); extern void (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *, unsigned long); -extern enum xp_retval (*xpc_send_msg) (struct xpc_channel *, u32, void *, u16, - u8, xpc_notify_func, void *); -extern void (*xpc_received_msg) (struct xpc_channel *, struct xpc_msg *); +extern enum xp_retval (*xpc_send_payload) (struct xpc_channel *, u32, void *, + u16, u8, xpc_notify_func, void *); +extern void (*xpc_received_payload) (struct xpc_channel *, void *); /* found in xpc_sn2.c */ extern int xpc_init_sn2(void); @@ -805,7 +879,7 @@ extern enum xp_retval xpc_initiate_send_notify(short, int, u32, void *, u16, extern void xpc_initiate_received(short, int, void *); extern void xpc_process_sent_chctl_flags(struct xpc_partition *); extern void xpc_connected_callout(struct xpc_channel *); -extern void xpc_deliver_msg(struct xpc_channel *); +extern void xpc_deliver_payload(struct xpc_channel *); extern void xpc_disconnect_channel(const int, struct xpc_channel *, enum xp_retval, unsigned long *); extern void xpc_disconnect_callout(struct xpc_channel *, enum xp_retval); diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c index 73df9fb5ee66..9cd2ebe2a3b6 100644 --- a/drivers/misc/sgi-xp/xpc_channel.c +++ b/drivers/misc/sgi-xp/xpc_channel.c @@ -139,7 +139,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) ch->func = NULL; ch->key = NULL; - ch->msg_size = 0; + ch->entry_size = 0; ch->local_nentries = 0; ch->remote_nentries = 0; ch->kthreads_assigned_limit = 0; @@ -315,9 +315,9 @@ again: if (chctl_flags & XPC_CHCTL_OPENREQUEST) { - dev_dbg(xpc_chan, "XPC_CHCTL_OPENREQUEST (msg_size=%d, " + dev_dbg(xpc_chan, "XPC_CHCTL_OPENREQUEST (entry_size=%d, " "local_nentries=%d) received from partid=%d, " - "channel=%d\n", args->msg_size, args->local_nentries, + "channel=%d\n", args->entry_size, args->local_nentries, ch->partid, ch->number); if (part->act_state == XPC_P_AS_DEACTIVATING || @@ -338,10 +338,10 @@ again: /* * The meaningful OPENREQUEST connection state fields are: - * msg_size = size of channel's messages in bytes + * entry_size = size of channel's messages in bytes * local_nentries = remote partition's local_nentries */ - if (args->msg_size == 0 || args->local_nentries == 0) { + if (args->entry_size == 0 || args->local_nentries == 0) { /* assume OPENREQUEST was delayed by mistake */ spin_unlock_irqrestore(&ch->lock, irq_flags); return; @@ -351,14 +351,14 @@ again: ch->remote_nentries = args->local_nentries; if (ch->flags & XPC_C_OPENREQUEST) { - if (args->msg_size != ch->msg_size) { + if (args->entry_size != ch->entry_size) { XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes, &irq_flags); spin_unlock_irqrestore(&ch->lock, irq_flags); return; } } else { - ch->msg_size = args->msg_size; + ch->entry_size = args->entry_size; XPC_SET_REASON(ch, 0, 0); ch->flags &= ~XPC_C_DISCONNECTED; @@ -473,7 +473,7 @@ xpc_connect_channel(struct xpc_channel *ch) ch->local_nentries = registration->nentries; if (ch->flags & XPC_C_ROPENREQUEST) { - if (registration->msg_size != ch->msg_size) { + if (registration->entry_size != ch->entry_size) { /* the local and remote sides aren't the same */ /* @@ -492,7 +492,7 @@ xpc_connect_channel(struct xpc_channel *ch) return xpUnequalMsgSizes; } } else { - ch->msg_size = registration->msg_size; + ch->entry_size = registration->entry_size; XPC_SET_REASON(ch, 0, 0); ch->flags &= ~XPC_C_DISCONNECTED; @@ -859,8 +859,8 @@ xpc_initiate_send(short partid, int ch_number, u32 flags, void *payload, DBUG_ON(payload == NULL); if (xpc_part_ref(part)) { - ret = xpc_send_msg(&part->channels[ch_number], flags, payload, - payload_size, 0, NULL, NULL); + ret = xpc_send_payload(&part->channels[ch_number], flags, + payload, payload_size, 0, NULL, NULL); xpc_part_deref(part); } @@ -911,23 +911,24 @@ xpc_initiate_send_notify(short partid, int ch_number, u32 flags, void *payload, DBUG_ON(func == NULL); if (xpc_part_ref(part)) { - ret = xpc_send_msg(&part->channels[ch_number], flags, payload, - payload_size, XPC_N_CALL, func, key); + ret = xpc_send_payload(&part->channels[ch_number], flags, + payload, payload_size, XPC_N_CALL, func, + key); xpc_part_deref(part); } return ret; } /* - * Deliver a message to its intended recipient. + * Deliver a message's payload to its intended recipient. */ void -xpc_deliver_msg(struct xpc_channel *ch) +xpc_deliver_payload(struct xpc_channel *ch) { - struct xpc_msg *msg; + void *payload; - msg = xpc_get_deliverable_msg(ch); - if (msg != NULL) { + payload = xpc_get_deliverable_payload(ch); + if (payload != NULL) { /* * This ref is taken to protect the payload itself from being @@ -939,18 +940,16 @@ xpc_deliver_msg(struct xpc_channel *ch) atomic_inc(&ch->kthreads_active); if (ch->func != NULL) { - dev_dbg(xpc_chan, "ch->func() called, msg=0x%p, " - "msg_number=%ld, partid=%d, channel=%d\n", - msg, (signed long)msg->number, ch->partid, + dev_dbg(xpc_chan, "ch->func() called, payload=0x%p " + "partid=%d channel=%d\n", payload, ch->partid, ch->number); /* deliver the message to its intended recipient */ - ch->func(xpMsgReceived, ch->partid, ch->number, - &msg->payload, ch->key); + ch->func(xpMsgReceived, ch->partid, ch->number, payload, + ch->key); - dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, " - "msg_number=%ld, partid=%d, channel=%d\n", - msg, (signed long)msg->number, ch->partid, + dev_dbg(xpc_chan, "ch->func() returned, payload=0x%p " + "partid=%d channel=%d\n", payload, ch->partid, ch->number); } @@ -959,14 +958,11 @@ xpc_deliver_msg(struct xpc_channel *ch) } /* - * Acknowledge receipt of a delivered message. - * - * If a message has XPC_M_INTERRUPT set, send an interrupt to the partition - * that sent the message. + * Acknowledge receipt of a delivered message's payload. * * This function, although called by users, does not call xpc_part_ref() to * ensure that the partition infrastructure is in place. It relies on the - * fact that we called xpc_msgqueue_ref() in xpc_deliver_msg(). + * fact that we called xpc_msgqueue_ref() in xpc_deliver_payload(). * * Arguments: * @@ -980,14 +976,13 @@ xpc_initiate_received(short partid, int ch_number, void *payload) { struct xpc_partition *part = &xpc_partitions[partid]; struct xpc_channel *ch; - struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); DBUG_ON(partid < 0 || partid >= xp_max_npartitions); DBUG_ON(ch_number < 0 || ch_number >= part->nchannels); ch = &part->channels[ch_number]; - xpc_received_msg(ch, msg); + xpc_received_payload(ch, payload); - /* the call to xpc_msgqueue_ref() was done by xpc_deliver_msg() */ + /* the call to xpc_msgqueue_ref() was done by xpc_deliver_payload() */ xpc_msgqueue_deref(ch); } diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index 13ec47928994..46325fc84811 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c @@ -188,8 +188,8 @@ u64 (*xpc_get_chctl_all_flags) (struct xpc_partition *part); enum xp_retval (*xpc_setup_msg_structures) (struct xpc_channel *ch); void (*xpc_teardown_msg_structures) (struct xpc_channel *ch); void (*xpc_process_msg_chctl_flags) (struct xpc_partition *part, int ch_number); -int (*xpc_n_of_deliverable_msgs) (struct xpc_channel *ch); -struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *ch); +int (*xpc_n_of_deliverable_payloads) (struct xpc_channel *ch); +void *(*xpc_get_deliverable_payload) (struct xpc_channel *ch); void (*xpc_request_partition_activation) (struct xpc_rsvd_page *remote_rp, unsigned long remote_rp_pa, @@ -220,10 +220,11 @@ void (*xpc_send_chctl_openreply) (struct xpc_channel *ch, void (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *ch, unsigned long msgqueue_pa); -enum xp_retval (*xpc_send_msg) (struct xpc_channel *ch, u32 flags, - void *payload, u16 payload_size, u8 notify_type, - xpc_notify_func func, void *key); -void (*xpc_received_msg) (struct xpc_channel *ch, struct xpc_msg *msg); +enum xp_retval (*xpc_send_payload) (struct xpc_channel *ch, u32 flags, + void *payload, u16 payload_size, + u8 notify_type, xpc_notify_func func, + void *key); +void (*xpc_received_payload) (struct xpc_channel *ch, void *payload); /* * Timer function to enforce the timelimit on the partition disengage. @@ -714,9 +715,9 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch) do { /* deliver messages to their intended recipients */ - while (xpc_n_of_deliverable_msgs(ch) > 0 && + while (xpc_n_of_deliverable_payloads(ch) > 0 && !(ch->flags & XPC_C_DISCONNECTING)) { - xpc_deliver_msg(ch); + xpc_deliver_payload(ch); } if (atomic_inc_return(&ch->kthreads_idle) > @@ -730,7 +731,7 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch) "wait_event_interruptible_exclusive()\n"); (void)wait_event_interruptible_exclusive(ch->idle_wq, - (xpc_n_of_deliverable_msgs(ch) > 0 || + (xpc_n_of_deliverable_payloads(ch) > 0 || (ch->flags & XPC_C_DISCONNECTING))); atomic_dec(&ch->kthreads_idle); @@ -775,7 +776,7 @@ xpc_kthread_start(void *args) * additional kthreads to help deliver them. We only * need one less than total #of messages to deliver. */ - n_needed = xpc_n_of_deliverable_msgs(ch) - 1; + n_needed = xpc_n_of_deliverable_payloads(ch) - 1; if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING)) xpc_activate_kthreads(ch, n_needed); diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c index 8b4b0653d9e9..b4882ccf6344 100644 --- a/drivers/misc/sgi-xp/xpc_sn2.c +++ b/drivers/misc/sgi-xp/xpc_sn2.c @@ -408,7 +408,7 @@ xpc_send_chctl_openrequest_sn2(struct xpc_channel *ch, unsigned long *irq_flags) { struct xpc_openclose_args *args = ch->sn.sn2.local_openclose_args; - args->msg_size = ch->msg_size; + args->entry_size = ch->entry_size; args->local_nentries = ch->local_nentries; XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENREQUEST, irq_flags); } @@ -1531,14 +1531,14 @@ xpc_allocate_local_msgqueue_sn2(struct xpc_channel *ch) for (nentries = ch->local_nentries; nentries > 0; nentries--) { - nbytes = nentries * ch->msg_size; + nbytes = nentries * ch->entry_size; ch_sn2->local_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes, GFP_KERNEL, &ch_sn2->local_msgqueue_base); if (ch_sn2->local_msgqueue == NULL) continue; - nbytes = nentries * sizeof(struct xpc_notify); + nbytes = nentries * sizeof(struct xpc_notify_sn2); ch_sn2->notify_queue = kzalloc(nbytes, GFP_KERNEL); if (ch_sn2->notify_queue == NULL) { kfree(ch_sn2->local_msgqueue_base); @@ -1578,7 +1578,7 @@ xpc_allocate_remote_msgqueue_sn2(struct xpc_channel *ch) for (nentries = ch->remote_nentries; nentries > 0; nentries--) { - nbytes = nentries * ch->msg_size; + nbytes = nentries * ch->entry_size; ch_sn2->remote_msgqueue = xpc_kzalloc_cacheline_aligned(nbytes, GFP_KERNEL, &ch_sn2-> remote_msgqueue_base); @@ -1632,9 +1632,6 @@ xpc_setup_msg_structures_sn2(struct xpc_channel *ch) /* * Free up message queues and other stuff that were allocated for the specified * channel. - * - * Note: ch->reason and ch->reason_line are left set for debugging purposes, - * they're cleared when XPC_C_DISCONNECTED is cleared. */ static void xpc_teardown_msg_structures_sn2(struct xpc_channel *ch) @@ -1674,7 +1671,7 @@ xpc_teardown_msg_structures_sn2(struct xpc_channel *ch) static void xpc_notify_senders_sn2(struct xpc_channel *ch, enum xp_retval reason, s64 put) { - struct xpc_notify *notify; + struct xpc_notify_sn2 *notify; u8 notify_type; s64 get = ch->sn.sn2.w_remote_GP.get - 1; @@ -1699,17 +1696,16 @@ xpc_notify_senders_sn2(struct xpc_channel *ch, enum xp_retval reason, s64 put) atomic_dec(&ch->n_to_notify); if (notify->func != NULL) { - dev_dbg(xpc_chan, "notify->func() called, notify=0x%p, " - "msg_number=%ld, partid=%d, channel=%d\n", + dev_dbg(xpc_chan, "notify->func() called, notify=0x%p " + "msg_number=%ld partid=%d channel=%d\n", (void *)notify, get, ch->partid, ch->number); notify->func(reason, ch->partid, ch->number, notify->key); - dev_dbg(xpc_chan, "notify->func() returned, " - "notify=0x%p, msg_number=%ld, partid=%d, " - "channel=%d\n", (void *)notify, get, - ch->partid, ch->number); + dev_dbg(xpc_chan, "notify->func() returned, notify=0x%p" + " msg_number=%ld partid=%d channel=%d\n", + (void *)notify, get, ch->partid, ch->number); } } } @@ -1727,14 +1723,14 @@ static inline void xpc_clear_local_msgqueue_flags_sn2(struct xpc_channel *ch) { struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; - struct xpc_msg *msg; + struct xpc_msg_sn2 *msg; s64 get; get = ch_sn2->w_remote_GP.get; do { - msg = (struct xpc_msg *)((u64)ch_sn2->local_msgqueue + - (get % ch->local_nentries) * - ch->msg_size); + msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->local_msgqueue + + (get % ch->local_nentries) * + ch->entry_size); msg->flags = 0; } while (++get < ch_sn2->remote_GP.get); } @@ -1746,24 +1742,30 @@ static inline void xpc_clear_remote_msgqueue_flags_sn2(struct xpc_channel *ch) { struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; - struct xpc_msg *msg; + struct xpc_msg_sn2 *msg; s64 put; put = ch_sn2->w_remote_GP.put; do { - msg = (struct xpc_msg *)((u64)ch_sn2->remote_msgqueue + - (put % ch->remote_nentries) * - ch->msg_size); + msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->remote_msgqueue + + (put % ch->remote_nentries) * + ch->entry_size); msg->flags = 0; } while (++put < ch_sn2->remote_GP.put); } +static int +xpc_n_of_deliverable_payloads_sn2(struct xpc_channel *ch) +{ + return ch->sn.sn2.w_remote_GP.put - ch->sn.sn2.w_local_GP.get; +} + static void xpc_process_msg_chctl_flags_sn2(struct xpc_partition *part, int ch_number) { struct xpc_channel *ch = &part->channels[ch_number]; struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; - int nmsgs_sent; + int npayloads_sent; ch_sn2->remote_GP = part->sn.sn2.remote_GPs[ch_number]; @@ -1835,7 +1837,7 @@ xpc_process_msg_chctl_flags_sn2(struct xpc_partition *part, int ch_number) if (ch_sn2->w_remote_GP.put != ch_sn2->remote_GP.put) { /* * Clear msg->flags in previously received messages, so that - * they're ready for xpc_get_deliverable_msg(). + * they're ready for xpc_get_deliverable_payload_sn2(). */ xpc_clear_remote_msgqueue_flags_sn2(ch); @@ -1845,27 +1847,27 @@ xpc_process_msg_chctl_flags_sn2(struct xpc_partition *part, int ch_number) "channel=%d\n", ch_sn2->w_remote_GP.put, ch->partid, ch->number); - nmsgs_sent = ch_sn2->w_remote_GP.put - ch_sn2->w_local_GP.get; - if (nmsgs_sent > 0) { + npayloads_sent = xpc_n_of_deliverable_payloads_sn2(ch); + if (npayloads_sent > 0) { dev_dbg(xpc_chan, "msgs waiting to be copied and " "delivered=%d, partid=%d, channel=%d\n", - nmsgs_sent, ch->partid, ch->number); + npayloads_sent, ch->partid, ch->number); if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) - xpc_activate_kthreads(ch, nmsgs_sent); + xpc_activate_kthreads(ch, npayloads_sent); } } xpc_msgqueue_deref(ch); } -static struct xpc_msg * +static struct xpc_msg_sn2 * xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get) { struct xpc_partition *part = &xpc_partitions[ch->partid]; struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; unsigned long remote_msg_pa; - struct xpc_msg *msg; + struct xpc_msg_sn2 *msg; u32 msg_index; u32 nmsgs; u64 msg_offset; @@ -1889,13 +1891,13 @@ xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get) nmsgs = ch->remote_nentries - msg_index; } - msg_offset = msg_index * ch->msg_size; - msg = (struct xpc_msg *)((u64)ch_sn2->remote_msgqueue + + msg_offset = msg_index * ch->entry_size; + msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->remote_msgqueue + msg_offset); remote_msg_pa = ch_sn2->remote_msgqueue_pa + msg_offset; ret = xpc_pull_remote_cachelines_sn2(part, msg, remote_msg_pa, - nmsgs * ch->msg_size); + nmsgs * ch->entry_size); if (ret != xpSuccess) { dev_dbg(xpc_chan, "failed to pull %d msgs starting with" @@ -1915,26 +1917,21 @@ xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get) mutex_unlock(&ch_sn2->msg_to_pull_mutex); /* return the message we were looking for */ - msg_offset = (get % ch->remote_nentries) * ch->msg_size; - msg = (struct xpc_msg *)((u64)ch_sn2->remote_msgqueue + msg_offset); + msg_offset = (get % ch->remote_nentries) * ch->entry_size; + msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->remote_msgqueue + msg_offset); return msg; } -static int -xpc_n_of_deliverable_msgs_sn2(struct xpc_channel *ch) -{ - return ch->sn.sn2.w_remote_GP.put - ch->sn.sn2.w_local_GP.get; -} - /* - * Get a message to be delivered. + * Get the next deliverable message's payload. */ -static struct xpc_msg * -xpc_get_deliverable_msg_sn2(struct xpc_channel *ch) +static void * +xpc_get_deliverable_payload_sn2(struct xpc_channel *ch) { struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; - struct xpc_msg *msg = NULL; + struct xpc_msg_sn2 *msg; + void *payload = NULL; s64 get; do { @@ -1965,15 +1962,16 @@ xpc_get_deliverable_msg_sn2(struct xpc_channel *ch) msg = xpc_pull_remote_msg_sn2(ch, get); DBUG_ON(msg != NULL && msg->number != get); - DBUG_ON(msg != NULL && (msg->flags & XPC_M_DONE)); - DBUG_ON(msg != NULL && !(msg->flags & XPC_M_READY)); + DBUG_ON(msg != NULL && (msg->flags & XPC_M_SN2_DONE)); + DBUG_ON(msg != NULL && !(msg->flags & XPC_M_SN2_READY)); + payload = &msg->payload; break; } } while (1); - return msg; + return payload; } /* @@ -1985,7 +1983,7 @@ static void xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put) { struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; - struct xpc_msg *msg; + struct xpc_msg_sn2 *msg; s64 put = initial_put + 1; int send_msgrequest = 0; @@ -1995,11 +1993,12 @@ xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put) if (put == ch_sn2->w_local_GP.put) break; - msg = (struct xpc_msg *)((u64)ch_sn2->local_msgqueue + - (put % ch->local_nentries) * - ch->msg_size); + msg = (struct xpc_msg_sn2 *)((u64)ch_sn2-> + local_msgqueue + (put % + ch->local_nentries) * + ch->entry_size); - if (!(msg->flags & XPC_M_READY)) + if (!(msg->flags & XPC_M_SN2_READY)) break; put++; @@ -2026,7 +2025,7 @@ xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put) /* * We need to ensure that the message referenced by - * local_GP->put is not XPC_M_READY or that local_GP->put + * local_GP->put is not XPC_M_SN2_READY or that local_GP->put * equals w_local_GP.put, so we'll go have a look. */ initial_put = put; @@ -2042,10 +2041,10 @@ xpc_send_msgs_sn2(struct xpc_channel *ch, s64 initial_put) */ static enum xp_retval xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags, - struct xpc_msg **address_of_msg) + struct xpc_msg_sn2 **address_of_msg) { struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; - struct xpc_msg *msg; + struct xpc_msg_sn2 *msg; enum xp_retval ret; s64 put; @@ -2097,8 +2096,9 @@ xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags, } /* get the message's address and initialize it */ - msg = (struct xpc_msg *)((u64)ch_sn2->local_msgqueue + - (put % ch->local_nentries) * ch->msg_size); + msg = (struct xpc_msg_sn2 *)((u64)ch_sn2->local_msgqueue + + (put % ch->local_nentries) * + ch->entry_size); DBUG_ON(msg->flags != 0); msg->number = put; @@ -2117,20 +2117,20 @@ xpc_allocate_msg_sn2(struct xpc_channel *ch, u32 flags, * partition the message is being sent to. */ static enum xp_retval -xpc_send_msg_sn2(struct xpc_channel *ch, u32 flags, void *payload, - u16 payload_size, u8 notify_type, xpc_notify_func func, - void *key) +xpc_send_payload_sn2(struct xpc_channel *ch, u32 flags, void *payload, + u16 payload_size, u8 notify_type, xpc_notify_func func, + void *key) { enum xp_retval ret = xpSuccess; struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; - struct xpc_msg *msg = msg; - struct xpc_notify *notify = notify; + struct xpc_msg_sn2 *msg = msg; + struct xpc_notify_sn2 *notify = notify; s64 msg_number; s64 put; DBUG_ON(notify_type == XPC_N_CALL && func == NULL); - if (XPC_MSG_SIZE(payload_size) > ch->msg_size) + if (XPC_MSG_SIZE(payload_size) > ch->entry_size) return xpPayloadTooBig; xpc_msgqueue_ref(ch); @@ -2155,7 +2155,7 @@ xpc_send_msg_sn2(struct xpc_channel *ch, u32 flags, void *payload, * Tell the remote side to send an ACK interrupt when the * message has been delivered. */ - msg->flags |= XPC_M_INTERRUPT; + msg->flags |= XPC_M_SN2_INTERRUPT; atomic_inc(&ch->n_to_notify); @@ -2185,7 +2185,7 @@ xpc_send_msg_sn2(struct xpc_channel *ch, u32 flags, void *payload, memcpy(&msg->payload, payload, payload_size); - msg->flags |= XPC_M_READY; + msg->flags |= XPC_M_SN2_READY; /* * The preceding store of msg->flags must occur before the following @@ -2208,12 +2208,15 @@ out_1: * Now we actually acknowledge the messages that have been delivered and ack'd * by advancing the cached remote message queue's Get value and if requested * send a chctl msgrequest to the message sender's partition. + * + * If a message has XPC_M_SN2_INTERRUPT set, send an interrupt to the partition + * that sent the message. */ static void xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags) { struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; - struct xpc_msg *msg; + struct xpc_msg_sn2 *msg; s64 get = initial_get + 1; int send_msgrequest = 0; @@ -2223,11 +2226,12 @@ xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags) if (get == ch_sn2->w_local_GP.get) break; - msg = (struct xpc_msg *)((u64)ch_sn2->remote_msgqueue + - (get % ch->remote_nentries) * - ch->msg_size); + msg = (struct xpc_msg_sn2 *)((u64)ch_sn2-> + remote_msgqueue + (get % + ch->remote_nentries) * + ch->entry_size); - if (!(msg->flags & XPC_M_DONE)) + if (!(msg->flags & XPC_M_SN2_DONE)) break; msg_flags |= msg->flags; @@ -2251,11 +2255,11 @@ xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags) dev_dbg(xpc_chan, "local_GP->get changed to %ld, partid=%d, " "channel=%d\n", get, ch->partid, ch->number); - send_msgrequest = (msg_flags & XPC_M_INTERRUPT); + send_msgrequest = (msg_flags & XPC_M_SN2_INTERRUPT); /* * We need to ensure that the message referenced by - * local_GP->get is not XPC_M_DONE or that local_GP->get + * local_GP->get is not XPC_M_SN2_DONE or that local_GP->get * equals w_local_GP.get, so we'll go have a look. */ initial_get = get; @@ -2266,19 +2270,23 @@ xpc_acknowledge_msgs_sn2(struct xpc_channel *ch, s64 initial_get, u8 msg_flags) } static void -xpc_received_msg_sn2(struct xpc_channel *ch, struct xpc_msg *msg) +xpc_received_payload_sn2(struct xpc_channel *ch, void *payload) { + struct xpc_msg_sn2 *msg; + s64 msg_number; s64 get; - s64 msg_number = msg->number; + + msg = container_of(payload, struct xpc_msg_sn2, payload); + msg_number = msg->number; dev_dbg(xpc_chan, "msg=0x%p, msg_number=%ld, partid=%d, channel=%d\n", (void *)msg, msg_number, ch->partid, ch->number); - DBUG_ON((((u64)msg - (u64)ch->remote_msgqueue) / ch->msg_size) != + DBUG_ON((((u64)msg - (u64)ch->remote_msgqueue) / ch->entry_size) != msg_number % ch->remote_nentries); - DBUG_ON(msg->flags & XPC_M_DONE); + DBUG_ON(msg->flags & XPC_M_SN2_DONE); - msg->flags |= XPC_M_DONE; + msg->flags |= XPC_M_SN2_DONE; /* * The preceding store of msg->flags must occur before the following @@ -2337,8 +2345,8 @@ xpc_init_sn2(void) xpc_notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_sn2; xpc_process_msg_chctl_flags = xpc_process_msg_chctl_flags_sn2; - xpc_n_of_deliverable_msgs = xpc_n_of_deliverable_msgs_sn2; - xpc_get_deliverable_msg = xpc_get_deliverable_msg_sn2; + xpc_n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_sn2; + xpc_get_deliverable_payload = xpc_get_deliverable_payload_sn2; xpc_indicate_partition_engaged = xpc_indicate_partition_engaged_sn2; xpc_indicate_partition_disengaged = @@ -2347,8 +2355,14 @@ xpc_init_sn2(void) xpc_any_partition_engaged = xpc_any_partition_engaged_sn2; xpc_assume_partition_disengaged = xpc_assume_partition_disengaged_sn2; - xpc_send_msg = xpc_send_msg_sn2; - xpc_received_msg = xpc_received_msg_sn2; + xpc_send_payload = xpc_send_payload_sn2; + xpc_received_payload = xpc_received_payload_sn2; + + if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) { + dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is " + "larger than %d\n", XPC_MSG_HDR_MAX_SIZE); + return -E2BIG; + } buf_size = max(XPC_RP_VARS_SIZE, XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES_SN2); diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index 689cb5c68ccf..1ac694c01623 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c @@ -66,8 +66,11 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpuid, unsigned int irq, mq_order = get_order(mq_size); page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, mq_order); - if (page == NULL) + if (page == NULL) { + dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " + "bytes of memory on nid=%d for GRU mq\n", mq_size, nid); return NULL; + } mq = page_address(page); ret = gru_create_message_queue(mq, mq_size); @@ -193,202 +196,226 @@ xpc_process_activate_IRQ_rcvd_uv(void) } -static irqreturn_t -xpc_handle_activate_IRQ_uv(int irq, void *dev_id) +static void +xpc_handle_activate_mq_msg_uv(struct xpc_partition *part, + struct xpc_activate_mq_msghdr_uv *msg_hdr, + int *wakeup_hb_checker) { unsigned long irq_flags; - struct xpc_activate_mq_msghdr_uv *msg_hdr; - short partid; - struct xpc_partition *part; - struct xpc_partition_uv *part_uv; + struct xpc_partition_uv *part_uv = &part->sn.uv; struct xpc_openclose_args *args; - int wakeup_hb_checker = 0; - while ((msg_hdr = gru_get_next_message(xpc_activate_mq_uv)) != NULL) { + part_uv->remote_act_state = msg_hdr->act_state; - partid = msg_hdr->partid; - if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { - dev_err(xpc_part, "xpc_handle_activate_IRQ_uv() invalid" - "partid=0x%x passed in message\n", partid); - gru_free_message(xpc_activate_mq_uv, msg_hdr); - continue; - } - part = &xpc_partitions[partid]; - part_uv = &part->sn.uv; + switch (msg_hdr->type) { + case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV: + /* syncing of remote_act_state was just done above */ + break; - part_uv->remote_act_state = msg_hdr->act_state; + case XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV: { + struct xpc_activate_mq_msg_heartbeat_req_uv *msg; - switch (msg_hdr->type) { - case XPC_ACTIVATE_MQ_MSG_SYNC_ACT_STATE_UV: - /* syncing of remote_act_state was just done above */ - break; + msg = container_of(msg_hdr, + struct xpc_activate_mq_msg_heartbeat_req_uv, + hdr); + part_uv->heartbeat = msg->heartbeat; + break; + } + case XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV: { + struct xpc_activate_mq_msg_heartbeat_req_uv *msg; + + msg = container_of(msg_hdr, + struct xpc_activate_mq_msg_heartbeat_req_uv, + hdr); + part_uv->heartbeat = msg->heartbeat; + + spin_lock_irqsave(&part_uv->flags_lock, irq_flags); + part_uv->flags |= XPC_P_HEARTBEAT_OFFLINE_UV; + spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); + break; + } + case XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV: { + struct xpc_activate_mq_msg_heartbeat_req_uv *msg; + + msg = container_of(msg_hdr, + struct xpc_activate_mq_msg_heartbeat_req_uv, + hdr); + part_uv->heartbeat = msg->heartbeat; + + spin_lock_irqsave(&part_uv->flags_lock, irq_flags); + part_uv->flags &= ~XPC_P_HEARTBEAT_OFFLINE_UV; + spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); + break; + } + case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: { + struct xpc_activate_mq_msg_activate_req_uv *msg; - case XPC_ACTIVATE_MQ_MSG_INC_HEARTBEAT_UV: { - struct xpc_activate_mq_msg_heartbeat_req_uv *msg; + /* + * ??? Do we deal here with ts_jiffies being different + * ??? if act_state != XPC_P_AS_INACTIVE instead of + * ??? below? + */ + msg = container_of(msg_hdr, struct + xpc_activate_mq_msg_activate_req_uv, hdr); - msg = (struct xpc_activate_mq_msg_heartbeat_req_uv *) - msg_hdr; - part_uv->heartbeat = msg->heartbeat; - break; - } - case XPC_ACTIVATE_MQ_MSG_OFFLINE_HEARTBEAT_UV: { - struct xpc_activate_mq_msg_heartbeat_req_uv *msg; - - msg = (struct xpc_activate_mq_msg_heartbeat_req_uv *) - msg_hdr; - part_uv->heartbeat = msg->heartbeat; - spin_lock_irqsave(&part_uv->flags_lock, irq_flags); - part_uv->flags |= XPC_P_HEARTBEAT_OFFLINE_UV; - spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); - break; - } - case XPC_ACTIVATE_MQ_MSG_ONLINE_HEARTBEAT_UV: { - struct xpc_activate_mq_msg_heartbeat_req_uv *msg; - - msg = (struct xpc_activate_mq_msg_heartbeat_req_uv *) - msg_hdr; - part_uv->heartbeat = msg->heartbeat; - spin_lock_irqsave(&part_uv->flags_lock, irq_flags); - part_uv->flags &= ~XPC_P_HEARTBEAT_OFFLINE_UV; - spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); - break; - } - case XPC_ACTIVATE_MQ_MSG_ACTIVATE_REQ_UV: { - struct xpc_activate_mq_msg_activate_req_uv *msg; - - /* - * ??? Do we deal here with ts_jiffies being different - * ??? if act_state != XPC_P_AS_INACTIVE instead of - * ??? below? - */ - msg = (struct xpc_activate_mq_msg_activate_req_uv *) - msg_hdr; - spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, - irq_flags); - if (part_uv->act_state_req == 0) - xpc_activate_IRQ_rcvd++; - part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV; - part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */ - part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies; - part_uv->remote_activate_mq_gpa = msg->activate_mq_gpa; - spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, - irq_flags); - wakeup_hb_checker++; - break; - } - case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV: { - struct xpc_activate_mq_msg_deactivate_req_uv *msg; - - msg = (struct xpc_activate_mq_msg_deactivate_req_uv *) - msg_hdr; - spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, - irq_flags); - if (part_uv->act_state_req == 0) - xpc_activate_IRQ_rcvd++; - part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; - part_uv->reason = msg->reason; - spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, - irq_flags); - wakeup_hb_checker++; - break; - } - case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: { - struct xpc_activate_mq_msg_chctl_closerequest_uv *msg; + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); + if (part_uv->act_state_req == 0) + xpc_activate_IRQ_rcvd++; + part_uv->act_state_req = XPC_P_ASR_ACTIVATE_UV; + part->remote_rp_pa = msg->rp_gpa; /* !!! _pa is _gpa */ + part->remote_rp_ts_jiffies = msg_hdr->rp_ts_jiffies; + part_uv->remote_activate_mq_gpa = msg->activate_mq_gpa; + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); - msg = (struct xpc_activate_mq_msg_chctl_closerequest_uv - *)msg_hdr; - args = &part->remote_openclose_args[msg->ch_number]; - args->reason = msg->reason; + (*wakeup_hb_checker)++; + break; + } + case XPC_ACTIVATE_MQ_MSG_DEACTIVATE_REQ_UV: { + struct xpc_activate_mq_msg_deactivate_req_uv *msg; - spin_lock_irqsave(&part->chctl_lock, irq_flags); - part->chctl.flags[msg->ch_number] |= - XPC_CHCTL_CLOSEREQUEST; - spin_unlock_irqrestore(&part->chctl_lock, irq_flags); + msg = container_of(msg_hdr, struct + xpc_activate_mq_msg_deactivate_req_uv, hdr); - xpc_wakeup_channel_mgr(part); - break; - } - case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: { - struct xpc_activate_mq_msg_chctl_closereply_uv *msg; + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); + if (part_uv->act_state_req == 0) + xpc_activate_IRQ_rcvd++; + part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; + part_uv->reason = msg->reason; + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); + + (*wakeup_hb_checker)++; + return; + } + case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREQUEST_UV: { + struct xpc_activate_mq_msg_chctl_closerequest_uv *msg; - msg = (struct xpc_activate_mq_msg_chctl_closereply_uv *) - msg_hdr; + msg = container_of(msg_hdr, struct + xpc_activate_mq_msg_chctl_closerequest_uv, + hdr); + args = &part->remote_openclose_args[msg->ch_number]; + args->reason = msg->reason; - spin_lock_irqsave(&part->chctl_lock, irq_flags); - part->chctl.flags[msg->ch_number] |= - XPC_CHCTL_CLOSEREPLY; - spin_unlock_irqrestore(&part->chctl_lock, irq_flags); + spin_lock_irqsave(&part->chctl_lock, irq_flags); + part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREQUEST; + spin_unlock_irqrestore(&part->chctl_lock, irq_flags); - xpc_wakeup_channel_mgr(part); - break; - } - case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: { - struct xpc_activate_mq_msg_chctl_openrequest_uv *msg; + xpc_wakeup_channel_mgr(part); + break; + } + case XPC_ACTIVATE_MQ_MSG_CHCTL_CLOSEREPLY_UV: { + struct xpc_activate_mq_msg_chctl_closereply_uv *msg; - msg = (struct xpc_activate_mq_msg_chctl_openrequest_uv - *)msg_hdr; - args = &part->remote_openclose_args[msg->ch_number]; - args->msg_size = msg->msg_size; - args->local_nentries = msg->local_nentries; + msg = container_of(msg_hdr, struct + xpc_activate_mq_msg_chctl_closereply_uv, + hdr); - spin_lock_irqsave(&part->chctl_lock, irq_flags); - part->chctl.flags[msg->ch_number] |= - XPC_CHCTL_OPENREQUEST; - spin_unlock_irqrestore(&part->chctl_lock, irq_flags); + spin_lock_irqsave(&part->chctl_lock, irq_flags); + part->chctl.flags[msg->ch_number] |= XPC_CHCTL_CLOSEREPLY; + spin_unlock_irqrestore(&part->chctl_lock, irq_flags); - xpc_wakeup_channel_mgr(part); - break; - } - case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: { - struct xpc_activate_mq_msg_chctl_openreply_uv *msg; - - msg = (struct xpc_activate_mq_msg_chctl_openreply_uv *) - msg_hdr; - args = &part->remote_openclose_args[msg->ch_number]; - args->remote_nentries = msg->remote_nentries; - args->local_nentries = msg->local_nentries; - args->local_msgqueue_pa = msg->local_notify_mq_gpa; - - spin_lock_irqsave(&part->chctl_lock, irq_flags); - part->chctl.flags[msg->ch_number] |= - XPC_CHCTL_OPENREPLY; - spin_unlock_irqrestore(&part->chctl_lock, irq_flags); - - xpc_wakeup_channel_mgr(part); - break; - } - case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV: - spin_lock_irqsave(&part_uv->flags_lock, irq_flags); - part_uv->flags |= XPC_P_ENGAGED_UV; - spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); - break; + xpc_wakeup_channel_mgr(part); + break; + } + case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV: { + struct xpc_activate_mq_msg_chctl_openrequest_uv *msg; + + msg = container_of(msg_hdr, struct + xpc_activate_mq_msg_chctl_openrequest_uv, + hdr); + args = &part->remote_openclose_args[msg->ch_number]; + args->entry_size = msg->entry_size; + args->local_nentries = msg->local_nentries; + + spin_lock_irqsave(&part->chctl_lock, irq_flags); + part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREQUEST; + spin_unlock_irqrestore(&part->chctl_lock, irq_flags); + + xpc_wakeup_channel_mgr(part); + break; + } + case XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV: { + struct xpc_activate_mq_msg_chctl_openreply_uv *msg; + + msg = container_of(msg_hdr, struct + xpc_activate_mq_msg_chctl_openreply_uv, hdr); + args = &part->remote_openclose_args[msg->ch_number]; + args->remote_nentries = msg->remote_nentries; + args->local_nentries = msg->local_nentries; + args->local_msgqueue_pa = msg->local_notify_mq_gpa; + + spin_lock_irqsave(&part->chctl_lock, irq_flags); + part->chctl.flags[msg->ch_number] |= XPC_CHCTL_OPENREPLY; + spin_unlock_irqrestore(&part->chctl_lock, irq_flags); + + xpc_wakeup_channel_mgr(part); + break; + } + case XPC_ACTIVATE_MQ_MSG_MARK_ENGAGED_UV: + spin_lock_irqsave(&part_uv->flags_lock, irq_flags); + part_uv->flags |= XPC_P_ENGAGED_UV; + spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); + break; + + case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV: + spin_lock_irqsave(&part_uv->flags_lock, irq_flags); + part_uv->flags &= ~XPC_P_ENGAGED_UV; + spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); + break; + + default: + dev_err(xpc_part, "received unknown activate_mq msg type=%d " + "from partition=%d\n", msg_hdr->type, XPC_PARTID(part)); + + /* get hb checker to deactivate from the remote partition */ + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); + if (part_uv->act_state_req == 0) + xpc_activate_IRQ_rcvd++; + part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; + part_uv->reason = xpBadMsgType; + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); - case XPC_ACTIVATE_MQ_MSG_MARK_DISENGAGED_UV: - spin_lock_irqsave(&part_uv->flags_lock, irq_flags); - part_uv->flags &= ~XPC_P_ENGAGED_UV; - spin_unlock_irqrestore(&part_uv->flags_lock, irq_flags); - break; + (*wakeup_hb_checker)++; + return; + } - default: - dev_err(xpc_part, "received unknown activate_mq msg " - "type=%d from partition=%d\n", msg_hdr->type, - partid); - } + if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies && + part->remote_rp_ts_jiffies != 0) { + /* + * ??? Does what we do here need to be sensitive to + * ??? act_state or remote_act_state? + */ + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); + if (part_uv->act_state_req == 0) + xpc_activate_IRQ_rcvd++; + part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV; + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); - if (msg_hdr->rp_ts_jiffies != part->remote_rp_ts_jiffies && - part->remote_rp_ts_jiffies != 0) { - /* - * ??? Does what we do here need to be sensitive to - * ??? act_state or remote_act_state? - */ - spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, - irq_flags); - if (part_uv->act_state_req == 0) - xpc_activate_IRQ_rcvd++; - part_uv->act_state_req = XPC_P_ASR_REACTIVATE_UV; - spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, - irq_flags); - wakeup_hb_checker++; + (*wakeup_hb_checker)++; + } +} + +static irqreturn_t +xpc_handle_activate_IRQ_uv(int irq, void *dev_id) +{ + struct xpc_activate_mq_msghdr_uv *msg_hdr; + short partid; + struct xpc_partition *part; + int wakeup_hb_checker = 0; + + while ((msg_hdr = gru_get_next_message(xpc_activate_mq_uv)) != NULL) { + + partid = msg_hdr->partid; + if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { + dev_err(xpc_part, "xpc_handle_activate_IRQ_uv() " + "received invalid partid=0x%x in message\n", + partid); + } else { + part = &xpc_partitions[partid]; + if (xpc_part_ref(part)) { + xpc_handle_activate_mq_msg_uv(part, msg_hdr, + &wakeup_hb_checker); + xpc_part_deref(part); + } } gru_free_message(xpc_activate_mq_uv, msg_hdr); @@ -616,14 +643,82 @@ xpc_request_partition_deactivation_uv(struct xpc_partition *part) } } +static void +xpc_cancel_partition_deactivation_request_uv(struct xpc_partition *part) +{ + /* nothing needs to be done */ + return; +} + +static void +xpc_init_fifo_uv(struct xpc_fifo_head_uv *head) +{ + head->first = NULL; + head->last = NULL; + spin_lock_init(&head->lock); + head->n_entries = 0; +} + +static void * +xpc_get_fifo_entry_uv(struct xpc_fifo_head_uv *head) +{ + unsigned long irq_flags; + struct xpc_fifo_entry_uv *first; + + spin_lock_irqsave(&head->lock, irq_flags); + first = head->first; + if (head->first != NULL) { + head->first = first->next; + if (head->first == NULL) + head->last = NULL; + } + head->n_entries++; + spin_unlock_irqrestore(&head->lock, irq_flags); + first->next = NULL; + return first; +} + +static void +xpc_put_fifo_entry_uv(struct xpc_fifo_head_uv *head, + struct xpc_fifo_entry_uv *last) +{ + unsigned long irq_flags; + + last->next = NULL; + spin_lock_irqsave(&head->lock, irq_flags); + if (head->last != NULL) + head->last->next = last; + else + head->first = last; + head->last = last; + head->n_entries--; + BUG_ON(head->n_entries < 0); + spin_unlock_irqrestore(&head->lock, irq_flags); +} + +static int +xpc_n_of_fifo_entries_uv(struct xpc_fifo_head_uv *head) +{ + return head->n_entries; +} + /* * Setup the channel structures that are uv specific. */ static enum xp_retval xpc_setup_ch_structures_sn_uv(struct xpc_partition *part) { - /* !!! this function needs fleshing out */ - return xpUnsupported; + struct xpc_channel_uv *ch_uv; + int ch_number; + + for (ch_number = 0; ch_number < part->nchannels; ch_number++) { + ch_uv = &part->channels[ch_number].sn.uv; + + xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); + xpc_init_fifo_uv(&ch_uv->recv_msg_list); + } + + return xpSuccess; } /* @@ -632,7 +727,7 @@ xpc_setup_ch_structures_sn_uv(struct xpc_partition *part) static void xpc_teardown_ch_structures_sn_uv(struct xpc_partition *part) { - /* !!! this function needs fleshing out */ + /* nothing needs to be done */ return; } @@ -679,21 +774,115 @@ xpc_get_chctl_all_flags_uv(struct xpc_partition *part) return chctl.all_flags; } +static enum xp_retval +xpc_allocate_send_msg_slot_uv(struct xpc_channel *ch) +{ + struct xpc_channel_uv *ch_uv = &ch->sn.uv; + struct xpc_send_msg_slot_uv *msg_slot; + unsigned long irq_flags; + int nentries; + int entry; + size_t nbytes; + + for (nentries = ch->local_nentries; nentries > 0; nentries--) { + nbytes = nentries * sizeof(struct xpc_send_msg_slot_uv); + ch_uv->send_msg_slots = kzalloc(nbytes, GFP_KERNEL); + if (ch_uv->send_msg_slots == NULL) + continue; + + for (entry = 0; entry < nentries; entry++) { + msg_slot = &ch_uv->send_msg_slots[entry]; + + msg_slot->msg_slot_number = entry; + xpc_put_fifo_entry_uv(&ch_uv->msg_slot_free_list, + &msg_slot->next); + } + + spin_lock_irqsave(&ch->lock, irq_flags); + if (nentries < ch->local_nentries) + ch->local_nentries = nentries; + spin_unlock_irqrestore(&ch->lock, irq_flags); + return xpSuccess; + } + + return xpNoMemory; +} + +static enum xp_retval +xpc_allocate_recv_msg_slot_uv(struct xpc_channel *ch) +{ + struct xpc_channel_uv *ch_uv = &ch->sn.uv; + struct xpc_notify_mq_msg_uv *msg_slot; + unsigned long irq_flags; + int nentries; + int entry; + size_t nbytes; + + for (nentries = ch->remote_nentries; nentries > 0; nentries--) { + nbytes = nentries * ch->entry_size; + ch_uv->recv_msg_slots = kzalloc(nbytes, GFP_KERNEL); + if (ch_uv->recv_msg_slots == NULL) + continue; + + for (entry = 0; entry < nentries; entry++) { + msg_slot = ch_uv->recv_msg_slots + entry * + ch->entry_size; + + msg_slot->hdr.msg_slot_number = entry; + } + + spin_lock_irqsave(&ch->lock, irq_flags); + if (nentries < ch->remote_nentries) + ch->remote_nentries = nentries; + spin_unlock_irqrestore(&ch->lock, irq_flags); + return xpSuccess; + } + + return xpNoMemory; +} + +/* + * Allocate msg_slots associated with the channel. + */ static enum xp_retval xpc_setup_msg_structures_uv(struct xpc_channel *ch) { - /* !!! this function needs fleshing out */ - return xpUnsupported; + static enum xp_retval ret; + struct xpc_channel_uv *ch_uv = &ch->sn.uv; + + DBUG_ON(ch->flags & XPC_C_SETUP); + + ret = xpc_allocate_send_msg_slot_uv(ch); + if (ret == xpSuccess) { + + ret = xpc_allocate_recv_msg_slot_uv(ch); + if (ret != xpSuccess) { + kfree(ch_uv->send_msg_slots); + xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); + } + } + return ret; } +/* + * Free up msg_slots and clear other stuff that were setup for the specified + * channel. + */ static void xpc_teardown_msg_structures_uv(struct xpc_channel *ch) { struct xpc_channel_uv *ch_uv = &ch->sn.uv; + DBUG_ON(!spin_is_locked(&ch->lock)); + ch_uv->remote_notify_mq_gpa = 0; - /* !!! this function needs fleshing out */ + if (ch->flags & XPC_C_SETUP) { + xpc_init_fifo_uv(&ch_uv->msg_slot_free_list); + kfree(ch_uv->send_msg_slots); + xpc_init_fifo_uv(&ch_uv->recv_msg_list); + kfree(ch_uv->recv_msg_slots); + } } static void @@ -723,7 +912,7 @@ xpc_send_chctl_openrequest_uv(struct xpc_channel *ch, unsigned long *irq_flags) struct xpc_activate_mq_msg_chctl_openrequest_uv msg; msg.ch_number = ch->number; - msg.msg_size = ch->msg_size; + msg.entry_size = ch->entry_size; msg.local_nentries = ch->local_nentries; xpc_send_activate_IRQ_ch_uv(ch, irq_flags, &msg, sizeof(msg), XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREQUEST_UV); @@ -742,6 +931,18 @@ xpc_send_chctl_openreply_uv(struct xpc_channel *ch, unsigned long *irq_flags) XPC_ACTIVATE_MQ_MSG_CHCTL_OPENREPLY_UV); } +static void +xpc_send_chctl_local_msgrequest_uv(struct xpc_partition *part, int ch_number) +{ + unsigned long irq_flags; + + spin_lock_irqsave(&part->chctl_lock, irq_flags); + part->chctl.flags[ch_number] |= XPC_CHCTL_MSGREQUEST; + spin_unlock_irqrestore(&part->chctl_lock, irq_flags); + + xpc_wakeup_channel_mgr(part); +} + static void xpc_save_remote_msgqueue_pa_uv(struct xpc_channel *ch, unsigned long msgqueue_pa) @@ -798,11 +999,358 @@ xpc_any_partition_engaged_uv(void) return 0; } -static struct xpc_msg * -xpc_get_deliverable_msg_uv(struct xpc_channel *ch) +static enum xp_retval +xpc_allocate_msg_slot_uv(struct xpc_channel *ch, u32 flags, + struct xpc_send_msg_slot_uv **address_of_msg_slot) +{ + enum xp_retval ret; + struct xpc_send_msg_slot_uv *msg_slot; + struct xpc_fifo_entry_uv *entry; + + while (1) { + entry = xpc_get_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list); + if (entry != NULL) + break; + + if (flags & XPC_NOWAIT) + return xpNoWait; + + ret = xpc_allocate_msg_wait(ch); + if (ret != xpInterrupted && ret != xpTimeout) + return ret; + } + + msg_slot = container_of(entry, struct xpc_send_msg_slot_uv, next); + *address_of_msg_slot = msg_slot; + return xpSuccess; +} + +static void +xpc_free_msg_slot_uv(struct xpc_channel *ch, + struct xpc_send_msg_slot_uv *msg_slot) +{ + xpc_put_fifo_entry_uv(&ch->sn.uv.msg_slot_free_list, &msg_slot->next); + + /* wakeup anyone waiting for a free msg slot */ + if (atomic_read(&ch->n_on_msg_allocate_wq) > 0) + wake_up(&ch->msg_allocate_wq); +} + +static void +xpc_notify_sender_uv(struct xpc_channel *ch, + struct xpc_send_msg_slot_uv *msg_slot, + enum xp_retval reason) +{ + xpc_notify_func func = msg_slot->func; + + if (func != NULL && cmpxchg(&msg_slot->func, func, NULL) == func) { + + atomic_dec(&ch->n_to_notify); + + dev_dbg(xpc_chan, "msg_slot->func() called, msg_slot=0x%p " + "msg_slot_number=%d partid=%d channel=%d\n", msg_slot, + msg_slot->msg_slot_number, ch->partid, ch->number); + + func(reason, ch->partid, ch->number, msg_slot->key); + + dev_dbg(xpc_chan, "msg_slot->func() returned, msg_slot=0x%p " + "msg_slot_number=%d partid=%d channel=%d\n", msg_slot, + msg_slot->msg_slot_number, ch->partid, ch->number); + } +} + +static void +xpc_handle_notify_mq_ack_uv(struct xpc_channel *ch, + struct xpc_notify_mq_msg_uv *msg) +{ + struct xpc_send_msg_slot_uv *msg_slot; + int entry = msg->hdr.msg_slot_number % ch->local_nentries; + + msg_slot = &ch->sn.uv.send_msg_slots[entry]; + + BUG_ON(msg_slot->msg_slot_number != msg->hdr.msg_slot_number); + msg_slot->msg_slot_number += ch->local_nentries; + + if (msg_slot->func != NULL) + xpc_notify_sender_uv(ch, msg_slot, xpMsgDelivered); + + xpc_free_msg_slot_uv(ch, msg_slot); +} + +static void +xpc_handle_notify_mq_msg_uv(struct xpc_partition *part, + struct xpc_notify_mq_msg_uv *msg) +{ + struct xpc_partition_uv *part_uv = &part->sn.uv; + struct xpc_channel *ch; + struct xpc_channel_uv *ch_uv; + struct xpc_notify_mq_msg_uv *msg_slot; + unsigned long irq_flags; + int ch_number = msg->hdr.ch_number; + + if (unlikely(ch_number >= part->nchannels)) { + dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received invalid " + "channel number=0x%x in message from partid=%d\n", + ch_number, XPC_PARTID(part)); + + /* get hb checker to deactivate from the remote partition */ + spin_lock_irqsave(&xpc_activate_IRQ_rcvd_lock, irq_flags); + if (part_uv->act_state_req == 0) + xpc_activate_IRQ_rcvd++; + part_uv->act_state_req = XPC_P_ASR_DEACTIVATE_UV; + part_uv->reason = xpBadChannelNumber; + spin_unlock_irqrestore(&xpc_activate_IRQ_rcvd_lock, irq_flags); + + wake_up_interruptible(&xpc_activate_IRQ_wq); + return; + } + + ch = &part->channels[ch_number]; + xpc_msgqueue_ref(ch); + + if (!(ch->flags & XPC_C_CONNECTED)) { + xpc_msgqueue_deref(ch); + return; + } + + /* see if we're really dealing with an ACK for a previously sent msg */ + if (msg->hdr.size == 0) { + xpc_handle_notify_mq_ack_uv(ch, msg); + xpc_msgqueue_deref(ch); + return; + } + + /* we're dealing with a normal message sent via the notify_mq */ + ch_uv = &ch->sn.uv; + + msg_slot = (struct xpc_notify_mq_msg_uv *)((u64)ch_uv->recv_msg_slots + + (msg->hdr.msg_slot_number % ch->remote_nentries) * + ch->entry_size); + + BUG_ON(msg->hdr.msg_slot_number != msg_slot->hdr.msg_slot_number); + BUG_ON(msg_slot->hdr.size != 0); + + memcpy(msg_slot, msg, msg->hdr.size); + + xpc_put_fifo_entry_uv(&ch_uv->recv_msg_list, &msg_slot->hdr.u.next); + + if (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) { + /* + * If there is an existing idle kthread get it to deliver + * the payload, otherwise we'll have to get the channel mgr + * for this partition to create a kthread to do the delivery. + */ + if (atomic_read(&ch->kthreads_idle) > 0) + wake_up_nr(&ch->idle_wq, 1); + else + xpc_send_chctl_local_msgrequest_uv(part, ch->number); + } + xpc_msgqueue_deref(ch); +} + +static irqreturn_t +xpc_handle_notify_IRQ_uv(int irq, void *dev_id) +{ + struct xpc_notify_mq_msg_uv *msg; + short partid; + struct xpc_partition *part; + + while ((msg = gru_get_next_message(xpc_notify_mq_uv)) != NULL) { + + partid = msg->hdr.partid; + if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { + dev_err(xpc_part, "xpc_handle_notify_IRQ_uv() received " + "invalid partid=0x%x in message\n", partid); + } else { + part = &xpc_partitions[partid]; + + if (xpc_part_ref(part)) { + xpc_handle_notify_mq_msg_uv(part, msg); + xpc_part_deref(part); + } + } + + gru_free_message(xpc_notify_mq_uv, msg); + } + + return IRQ_HANDLED; +} + +static int +xpc_n_of_deliverable_payloads_uv(struct xpc_channel *ch) +{ + return xpc_n_of_fifo_entries_uv(&ch->sn.uv.recv_msg_list); +} + +static void +xpc_process_msg_chctl_flags_uv(struct xpc_partition *part, int ch_number) +{ + struct xpc_channel *ch = &part->channels[ch_number]; + int ndeliverable_payloads; + + xpc_msgqueue_ref(ch); + + ndeliverable_payloads = xpc_n_of_deliverable_payloads_uv(ch); + + if (ndeliverable_payloads > 0 && + (ch->flags & XPC_C_CONNECTED) && + (ch->flags & XPC_C_CONNECTEDCALLOUT_MADE)) { + + xpc_activate_kthreads(ch, ndeliverable_payloads); + } + + xpc_msgqueue_deref(ch); +} + +static enum xp_retval +xpc_send_payload_uv(struct xpc_channel *ch, u32 flags, void *payload, + u16 payload_size, u8 notify_type, xpc_notify_func func, + void *key) +{ + enum xp_retval ret = xpSuccess; + struct xpc_send_msg_slot_uv *msg_slot = NULL; + struct xpc_notify_mq_msg_uv *msg; + u8 msg_buffer[XPC_NOTIFY_MSG_SIZE_UV]; + size_t msg_size; + + DBUG_ON(notify_type != XPC_N_CALL); + + msg_size = sizeof(struct xpc_notify_mq_msghdr_uv) + payload_size; + if (msg_size > ch->entry_size) + return xpPayloadTooBig; + + xpc_msgqueue_ref(ch); + + if (ch->flags & XPC_C_DISCONNECTING) { + ret = ch->reason; + goto out_1; + } + if (!(ch->flags & XPC_C_CONNECTED)) { + ret = xpNotConnected; + goto out_1; + } + + ret = xpc_allocate_msg_slot_uv(ch, flags, &msg_slot); + if (ret != xpSuccess) + goto out_1; + + if (func != NULL) { + atomic_inc(&ch->n_to_notify); + + msg_slot->key = key; + wmb(); /* a non-NULL func must hit memory after the key */ + msg_slot->func = func; + + if (ch->flags & XPC_C_DISCONNECTING) { + ret = ch->reason; + goto out_2; + } + } + + msg = (struct xpc_notify_mq_msg_uv *)&msg_buffer; + msg->hdr.partid = xp_partition_id; + msg->hdr.ch_number = ch->number; + msg->hdr.size = msg_size; + msg->hdr.msg_slot_number = msg_slot->msg_slot_number; + memcpy(&msg->payload, payload, payload_size); + + ret = xpc_send_gru_msg(ch->sn.uv.remote_notify_mq_gpa, msg, msg_size); + if (ret == xpSuccess) + goto out_1; + + XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); +out_2: + if (func != NULL) { + /* + * Try to NULL the msg_slot's func field. If we fail, then + * xpc_notify_senders_of_disconnect_uv() beat us to it, in which + * case we need to pretend we succeeded to send the message + * since the user will get a callout for the disconnect error + * by xpc_notify_senders_of_disconnect_uv(), and to also get an + * error returned here will confuse them. Additionally, since + * in this case the channel is being disconnected we don't need + * to put the the msg_slot back on the free list. + */ + if (cmpxchg(&msg_slot->func, func, NULL) != func) { + ret = xpSuccess; + goto out_1; + } + + msg_slot->key = NULL; + atomic_dec(&ch->n_to_notify); + } + xpc_free_msg_slot_uv(ch, msg_slot); +out_1: + xpc_msgqueue_deref(ch); + return ret; +} + +/* + * Tell the callers of xpc_send_notify() that the status of their payloads + * is unknown because the channel is now disconnecting. + * + * We don't worry about putting these msg_slots on the free list since the + * msg_slots themselves are about to be kfree'd. + */ +static void +xpc_notify_senders_of_disconnect_uv(struct xpc_channel *ch) +{ + struct xpc_send_msg_slot_uv *msg_slot; + int entry; + + DBUG_ON(!(ch->flags & XPC_C_DISCONNECTING)); + + for (entry = 0; entry < ch->local_nentries; entry++) { + + if (atomic_read(&ch->n_to_notify) == 0) + break; + + msg_slot = &ch->sn.uv.send_msg_slots[entry]; + if (msg_slot->func != NULL) + xpc_notify_sender_uv(ch, msg_slot, ch->reason); + } +} + +/* + * Get the next deliverable message's payload. + */ +static void * +xpc_get_deliverable_payload_uv(struct xpc_channel *ch) +{ + struct xpc_fifo_entry_uv *entry; + struct xpc_notify_mq_msg_uv *msg; + void *payload = NULL; + + if (!(ch->flags & XPC_C_DISCONNECTING)) { + entry = xpc_get_fifo_entry_uv(&ch->sn.uv.recv_msg_list); + if (entry != NULL) { + msg = container_of(entry, struct xpc_notify_mq_msg_uv, + hdr.u.next); + payload = &msg->payload; + } + } + return payload; +} + +static void +xpc_received_payload_uv(struct xpc_channel *ch, void *payload) { - /* !!! this function needs fleshing out */ - return NULL; + struct xpc_notify_mq_msg_uv *msg; + enum xp_retval ret; + + msg = container_of(payload, struct xpc_notify_mq_msg_uv, payload); + + /* return an ACK to the sender of this message */ + + msg->hdr.partid = xp_partition_id; + msg->hdr.size = 0; /* size of zero indicates this is an ACK */ + + ret = xpc_send_gru_msg(ch->sn.uv.remote_notify_mq_gpa, msg, + sizeof(struct xpc_notify_mq_msghdr_uv)); + if (ret != xpSuccess) + XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); + + msg->hdr.msg_slot_number += ch->remote_nentries; } int @@ -824,6 +1372,8 @@ xpc_init_uv(void) xpc_request_partition_reactivation_uv; xpc_request_partition_deactivation = xpc_request_partition_deactivation_uv; + xpc_cancel_partition_deactivation_request = + xpc_cancel_partition_deactivation_request_uv; xpc_setup_ch_structures_sn = xpc_setup_ch_structures_sn_uv; xpc_teardown_ch_structures_sn = xpc_teardown_ch_structures_sn_uv; @@ -848,7 +1398,18 @@ xpc_init_uv(void) xpc_partition_engaged = xpc_partition_engaged_uv; xpc_any_partition_engaged = xpc_any_partition_engaged_uv; - xpc_get_deliverable_msg = xpc_get_deliverable_msg_uv; + xpc_n_of_deliverable_payloads = xpc_n_of_deliverable_payloads_uv; + xpc_process_msg_chctl_flags = xpc_process_msg_chctl_flags_uv; + xpc_send_payload = xpc_send_payload_uv; + xpc_notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv; + xpc_get_deliverable_payload = xpc_get_deliverable_payload_uv; + xpc_received_payload = xpc_received_payload_uv; + + if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) { + dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n", + XPC_MSG_HDR_MAX_SIZE); + return -E2BIG; + } /* ??? The cpuid argument's value is 0, is that what we want? */ /* !!! The irq argument's value isn't correct. */ @@ -857,12 +1418,26 @@ xpc_init_uv(void) if (xpc_activate_mq_uv == NULL) return -ENOMEM; + /* ??? The cpuid argument's value is 0, is that what we want? */ + /* !!! The irq argument's value isn't correct. */ + xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, 0, + xpc_handle_notify_IRQ_uv); + if (xpc_notify_mq_uv == NULL) { + /* !!! The irq argument's value isn't correct. */ + xpc_destroy_gru_mq_uv(xpc_activate_mq_uv, + XPC_ACTIVATE_MQ_SIZE_UV, 0); + return -ENOMEM; + } + return 0; } void xpc_exit_uv(void) { + /* !!! The irq argument's value isn't correct. */ + xpc_destroy_gru_mq_uv(xpc_notify_mq_uv, XPC_NOTIFY_MQ_SIZE_UV, 0); + /* !!! The irq argument's value isn't correct. */ xpc_destroy_gru_mq_uv(xpc_activate_mq_uv, XPC_ACTIVATE_MQ_SIZE_UV, 0); } diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c index 4f5d62230116..71513b3af708 100644 --- a/drivers/misc/sgi-xp/xpnet.c +++ b/drivers/misc/sgi-xp/xpnet.c @@ -57,11 +57,10 @@ struct xpnet_message { * * XPC expects each message to exist in an individual cacheline. */ -#define XPNET_MSG_SIZE (L1_CACHE_BYTES - XPC_MSG_PAYLOAD_OFFSET) +#define XPNET_MSG_SIZE XPC_MSG_PAYLOAD_MAX_SIZE #define XPNET_MSG_DATA_MAX \ - (XPNET_MSG_SIZE - (u64)(&((struct xpnet_message *)0)->data)) -#define XPNET_MSG_ALIGNED_SIZE (L1_CACHE_ALIGN(XPNET_MSG_SIZE)) -#define XPNET_MSG_NENTRIES (PAGE_SIZE / XPNET_MSG_ALIGNED_SIZE) + (XPNET_MSG_SIZE - offsetof(struct xpnet_message, data)) +#define XPNET_MSG_NENTRIES (PAGE_SIZE / XPC_MSG_MAX_SIZE) #define XPNET_MAX_KTHREADS (XPNET_MSG_NENTRIES + 1) #define XPNET_MAX_IDLE_KTHREADS (XPNET_MSG_NENTRIES + 1) @@ -408,6 +407,7 @@ xpnet_send(struct sk_buff *skb, struct xpnet_pending_msg *queued_msg, { u8 msg_buffer[XPNET_MSG_SIZE]; struct xpnet_message *msg = (struct xpnet_message *)&msg_buffer; + u16 msg_size = sizeof(struct xpnet_message); enum xp_retval ret; msg->embedded_bytes = embedded_bytes; @@ -417,6 +417,7 @@ xpnet_send(struct sk_buff *skb, struct xpnet_pending_msg *queued_msg, &msg->data, skb->data, (size_t)embedded_bytes); skb_copy_from_linear_data(skb, &msg->data, (size_t)embedded_bytes); + msg_size += embedded_bytes - 1; } else { msg->version = XPNET_VERSION; } @@ -435,7 +436,7 @@ xpnet_send(struct sk_buff *skb, struct xpnet_pending_msg *queued_msg, atomic_inc(&queued_msg->use_count); ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, XPC_NOWAIT, msg, - XPNET_MSG_SIZE, xpnet_send_completed, queued_msg); + msg_size, xpnet_send_completed, queued_msg); if (unlikely(ret != xpSuccess)) atomic_dec(&queued_msg->use_count); } -- cgit v1.2.3