diff options
Diffstat (limited to 'drivers')
93 files changed, 7727 insertions, 1523 deletions
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile index 0a1b3435f920..7f1d40797e80 100644 --- a/drivers/acpi/acpica/Makefile +++ b/drivers/acpi/acpica/Makefile @@ -158,5 +158,6 @@ acpi-y += \ utresrc.o \ utstate.o \ utxface.o \ + utxfinit.o \ utxferror.o \ utxfmutex.o diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h index 5de4ec72766d..d902d31abc6c 100644 --- a/drivers/acpi/acpica/achware.h +++ b/drivers/acpi/acpica/achware.h @@ -110,8 +110,7 @@ acpi_status acpi_hw_write_port(acpi_io_address address, u32 value, u32 width); /* * hwgpe - GPE support */ -u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info, - struct acpi_gpe_register_info *gpe_register_info); +u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info); acpi_status acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action); diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index cc80fe10e8ea..c816ee675094 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h @@ -707,15 +707,18 @@ union acpi_parse_value { u8 disasm_opcode; /* Subtype used for disassembly */\ char aml_op_name[16]) /* Op name (debug only) */ -#define ACPI_DASM_BUFFER 0x00 -#define ACPI_DASM_RESOURCE 0x01 -#define ACPI_DASM_STRING 0x02 -#define ACPI_DASM_UNICODE 0x03 -#define ACPI_DASM_EISAID 0x04 -#define ACPI_DASM_MATCHOP 0x05 -#define ACPI_DASM_LNOT_PREFIX 0x06 -#define ACPI_DASM_LNOT_SUFFIX 0x07 -#define ACPI_DASM_IGNORE 0x08 +/* Flags for disasm_flags field above */ + +#define ACPI_DASM_BUFFER 0x00 /* Buffer is a simple data buffer */ +#define ACPI_DASM_RESOURCE 0x01 /* Buffer is a Resource Descriptor */ +#define ACPI_DASM_STRING 0x02 /* Buffer is a ASCII string */ +#define ACPI_DASM_UNICODE 0x03 /* Buffer is a Unicode string */ +#define ACPI_DASM_PLD_METHOD 0x04 /* Buffer is a _PLD method bit-packed buffer */ +#define ACPI_DASM_EISAID 0x05 /* Integer is an EISAID */ +#define ACPI_DASM_MATCHOP 0x06 /* Parent opcode is a Match() operator */ +#define ACPI_DASM_LNOT_PREFIX 0x07 /* Start of a Lnot_equal (etc.) pair of opcodes */ +#define ACPI_DASM_LNOT_SUFFIX 0x08 /* End of a Lnot_equal (etc.) pair of opcodes */ +#define ACPI_DASM_IGNORE 0x09 /* Not used at this time */ /* * Generic operation (for example: If, While, Store) @@ -932,6 +935,7 @@ struct acpi_bit_register_info { #define ACPI_OSI_WIN_VISTA_SP1 0x09 #define ACPI_OSI_WIN_VISTA_SP2 0x0A #define ACPI_OSI_WIN_7 0x0B +#define ACPI_OSI_WIN_8 0x0C #define ACPI_ALWAYS_ILLEGAL 0x00 @@ -1024,6 +1028,7 @@ struct acpi_port_info { ****************************************************************************/ struct acpi_db_method_info { + acpi_handle method; acpi_handle main_thread_gate; acpi_handle thread_complete_gate; acpi_thread_id *threads; diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h index 832b6198652e..a7f68c47f517 100644 --- a/drivers/acpi/acpica/acmacros.h +++ b/drivers/acpi/acpica/acmacros.h @@ -277,10 +277,33 @@ /* Bitfields within ACPI registers */ -#define ACPI_REGISTER_PREPARE_BITS(val, pos, mask) ((val << pos) & mask) -#define ACPI_REGISTER_INSERT_VALUE(reg, pos, mask, val) reg = (reg & (~(mask))) | ACPI_REGISTER_PREPARE_BITS(val, pos, mask) +#define ACPI_REGISTER_PREPARE_BITS(val, pos, mask) \ + ((val << pos) & mask) -#define ACPI_INSERT_BITS(target, mask, source) target = ((target & (~(mask))) | (source & mask)) +#define ACPI_REGISTER_INSERT_VALUE(reg, pos, mask, val) \ + reg = (reg & (~(mask))) | ACPI_REGISTER_PREPARE_BITS(val, pos, mask) + +#define ACPI_INSERT_BITS(target, mask, source) \ + target = ((target & (~(mask))) | (source & mask)) + +/* Generic bitfield macros and masks */ + +#define ACPI_GET_BITS(source_ptr, position, mask) \ + ((*source_ptr >> position) & mask) + +#define ACPI_SET_BITS(target_ptr, position, mask, value) \ + (*target_ptr |= ((value & mask) << position)) + +#define ACPI_1BIT_MASK 0x00000001 +#define ACPI_2BIT_MASK 0x00000003 +#define ACPI_3BIT_MASK 0x00000007 +#define ACPI_4BIT_MASK 0x0000000F +#define ACPI_5BIT_MASK 0x0000001F +#define ACPI_6BIT_MASK 0x0000003F +#define ACPI_7BIT_MASK 0x0000007F +#define ACPI_8BIT_MASK 0x000000FF +#define ACPI_16BIT_MASK 0x0000FFFF +#define ACPI_24BIT_MASK 0x00FFFFFF /* * An object of type struct acpi_namespace_node can appear in some contexts diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c index 552aa3a50c84..557510084c7a 100644 --- a/drivers/acpi/acpica/dswload.c +++ b/drivers/acpi/acpica/dswload.c @@ -230,6 +230,20 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state, walk_state->scope_info->common.value = ACPI_TYPE_ANY; break; + case ACPI_TYPE_METHOD: + + /* + * Allow scope change to root during execution of module-level + * code. Root is typed METHOD during this time. + */ + if ((node == acpi_gbl_root_node) && + (walk_state-> + parse_flags & ACPI_PARSE_MODULE_LEVEL)) { + break; + } + + /*lint -fallthrough */ + default: /* All other types are an error */ diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c index ae7147724763..89c0114210c0 100644 --- a/drivers/acpi/acpica/dswload2.c +++ b/drivers/acpi/acpica/dswload2.c @@ -230,6 +230,20 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state, walk_state->scope_info->common.value = ACPI_TYPE_ANY; break; + case ACPI_TYPE_METHOD: + + /* + * Allow scope change to root during execution of module-level + * code. Root is typed METHOD during this time. + */ + if ((node == acpi_gbl_root_node) && + (walk_state-> + parse_flags & ACPI_PARSE_MODULE_LEVEL)) { + break; + } + + /*lint -fallthrough */ + default: /* All other types are an error */ diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index afbd5cb391f6..ef0193d74b5d 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c @@ -80,8 +80,7 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info) return_ACPI_STATUS(AE_NOT_EXIST); } - register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info, - gpe_register_info); + register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info); /* Clear the run bit up front */ @@ -379,6 +378,18 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) */ if (!(gpe_register_info->enable_for_run | gpe_register_info->enable_for_wake)) { + ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS, + "Ignore disabled registers for GPE%02X-GPE%02X: " + "RunEnable=%02X, WakeEnable=%02X\n", + gpe_register_info-> + base_gpe_number, + gpe_register_info-> + base_gpe_number + + (ACPI_GPE_REGISTER_WIDTH - 1), + gpe_register_info-> + enable_for_run, + gpe_register_info-> + enable_for_wake)); continue; } @@ -401,9 +412,14 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) } ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS, - "Read GPE Register at GPE%02X: Status=%02X, Enable=%02X\n", + "Read registers for GPE%02X-GPE%02X: Status=%02X, Enable=%02X, " + "RunEnable=%02X, WakeEnable=%02X\n", gpe_register_info->base_gpe_number, - status_reg, enable_reg)); + gpe_register_info->base_gpe_number + + (ACPI_GPE_REGISTER_WIDTH - 1), + status_reg, enable_reg, + gpe_register_info->enable_for_run, + gpe_register_info->enable_for_wake)); /* Check if there is anything active at all in this register */ diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c index 6affbdb4b88c..87c5f2332260 100644 --- a/drivers/acpi/acpica/evxfgpe.c +++ b/drivers/acpi/acpica/evxfgpe.c @@ -357,8 +357,7 @@ acpi_status acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 ac goto unlock_and_exit; } - register_bit = - acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info); + register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info); /* Perform the action */ diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c index 25bd28c4ae8d..db4076580e2b 100644 --- a/drivers/acpi/acpica/hwgpe.c +++ b/drivers/acpi/acpica/hwgpe.c @@ -60,7 +60,6 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, * FUNCTION: acpi_hw_get_gpe_register_bit * * PARAMETERS: gpe_event_info - Info block for the GPE - * gpe_register_info - Info block for the GPE register * * RETURN: Register mask with a one in the GPE bit position * @@ -69,11 +68,10 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, * ******************************************************************************/ -u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info, - struct acpi_gpe_register_info *gpe_register_info) +u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info) { return (u32)1 << (gpe_event_info->gpe_number - - gpe_register_info->base_gpe_number); + gpe_event_info->register_info->base_gpe_number); } /****************************************************************************** @@ -115,8 +113,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action) /* Set or clear just the bit that corresponds to this GPE */ - register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info, - gpe_register_info); + register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info); switch (action) { case ACPI_GPE_CONDITIONAL_ENABLE: @@ -178,8 +175,7 @@ acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info) * Write a one to the appropriate bit in the status register to * clear this GPE. */ - register_bit = - acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info); + register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info); status = acpi_hw_write(register_bit, &gpe_register_info->status_address); @@ -222,8 +218,7 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info, /* Get the register bitmask for this GPE */ - register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info, - gpe_register_info); + register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info); /* GPE currently enabled? (enabled for runtime?) */ diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c index 1f165a750ae2..0ff1ecea5c3a 100644 --- a/drivers/acpi/acpica/hwxfsleep.c +++ b/drivers/acpi/acpica/hwxfsleep.c @@ -381,7 +381,6 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state) * FUNCTION: acpi_leave_sleep_state_prep * * PARAMETERS: sleep_state - Which sleep state we are exiting - * flags - ACPI_EXECUTE_BFS to run optional method * * RETURN: Status * diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c index 7ee4e6aeb0a2..2526aaf945ee 100644 --- a/drivers/acpi/acpica/nsdump.c +++ b/drivers/acpi/acpica/nsdump.c @@ -264,7 +264,7 @@ acpi_ns_dump_one_object(acpi_handle obj_handle, switch (type) { case ACPI_TYPE_PROCESSOR: - acpi_os_printf("ID %X Len %.4X Addr %p\n", + acpi_os_printf("ID %02X Len %02X Addr %p\n", obj_desc->processor.proc_id, obj_desc->processor.length, ACPI_CAST_PTR(void, diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c index 74f97d74db1c..70f9d787c82c 100644 --- a/drivers/acpi/acpica/tbinstal.c +++ b/drivers/acpi/acpica/tbinstal.c @@ -350,6 +350,7 @@ struct acpi_table_header *acpi_tb_table_override(struct acpi_table_header acpi_status acpi_tb_resize_root_table_list(void) { struct acpi_table_desc *tables; + u32 table_count; ACPI_FUNCTION_TRACE(tb_resize_root_table_list); @@ -363,8 +364,13 @@ acpi_status acpi_tb_resize_root_table_list(void) /* Increase the Table Array size */ - tables = ACPI_ALLOCATE_ZEROED(((acpi_size) acpi_gbl_root_table_list. - max_table_count + + if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) { + table_count = acpi_gbl_root_table_list.max_table_count; + } else { + table_count = acpi_gbl_root_table_list.current_table_count; + } + + tables = ACPI_ALLOCATE_ZEROED(((acpi_size) table_count + ACPI_ROOT_TABLE_SIZE_INCREMENT) * sizeof(struct acpi_table_desc)); if (!tables) { @@ -377,8 +383,8 @@ acpi_status acpi_tb_resize_root_table_list(void) if (acpi_gbl_root_table_list.tables) { ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables, - (acpi_size) acpi_gbl_root_table_list. - max_table_count * sizeof(struct acpi_table_desc)); + (acpi_size) table_count * + sizeof(struct acpi_table_desc)); if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) { ACPI_FREE(acpi_gbl_root_table_list.tables); @@ -386,9 +392,9 @@ acpi_status acpi_tb_resize_root_table_list(void) } acpi_gbl_root_table_list.tables = tables; - acpi_gbl_root_table_list.max_table_count += - ACPI_ROOT_TABLE_SIZE_INCREMENT; - acpi_gbl_root_table_list.flags |= (u8)ACPI_ROOT_ORIGIN_ALLOCATED; + acpi_gbl_root_table_list.max_table_count = + table_count + ACPI_ROOT_TABLE_SIZE_INCREMENT; + acpi_gbl_root_table_list.flags |= ACPI_ROOT_ORIGIN_ALLOCATED; return_ACPI_STATUS(AE_OK); } diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c index 29e51bc01383..21101262e47a 100644 --- a/drivers/acpi/acpica/tbxface.c +++ b/drivers/acpi/acpica/tbxface.c @@ -159,14 +159,12 @@ acpi_initialize_tables(struct acpi_table_desc * initial_table_array, * DESCRIPTION: Reallocate Root Table List into dynamic memory. Copies the * root list from the previously provided scratch area. Should * be called once dynamic memory allocation is available in the - * kernel + * kernel. * ******************************************************************************/ acpi_status acpi_reallocate_root_table(void) { - struct acpi_table_desc *tables; - acpi_size new_size; - acpi_size current_size; + acpi_status status; ACPI_FUNCTION_TRACE(acpi_reallocate_root_table); @@ -178,39 +176,10 @@ acpi_status acpi_reallocate_root_table(void) return_ACPI_STATUS(AE_SUPPORT); } - /* - * Get the current size of the root table and add the default - * increment to create the new table size. - */ - current_size = (acpi_size) - acpi_gbl_root_table_list.current_table_count * - sizeof(struct acpi_table_desc); - - new_size = current_size + - (ACPI_ROOT_TABLE_SIZE_INCREMENT * sizeof(struct acpi_table_desc)); - - /* Create new array and copy the old array */ - - tables = ACPI_ALLOCATE_ZEROED(new_size); - if (!tables) { - return_ACPI_STATUS(AE_NO_MEMORY); - } + acpi_gbl_root_table_list.flags |= ACPI_ROOT_ALLOW_RESIZE; - ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables, current_size); - - /* - * Update the root table descriptor. The new size will be the current - * number of tables plus the increment, independent of the reserved - * size of the original table list. - */ - acpi_gbl_root_table_list.tables = tables; - acpi_gbl_root_table_list.max_table_count = - acpi_gbl_root_table_list.current_table_count + - ACPI_ROOT_TABLE_SIZE_INCREMENT; - acpi_gbl_root_table_list.flags = - ACPI_ROOT_ORIGIN_ALLOCATED | ACPI_ROOT_ALLOW_RESIZE; - - return_ACPI_STATUS(AE_OK); + status = acpi_tb_resize_root_table_list(); + return_ACPI_STATUS(status); } /******************************************************************************* diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c index 34ef0bd7e4b4..676285d6116d 100644 --- a/drivers/acpi/acpica/utosi.c +++ b/drivers/acpi/acpica/utosi.c @@ -73,6 +73,7 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = { {"Windows 2006 SP1", NULL, 0, ACPI_OSI_WIN_VISTA_SP1}, /* Windows Vista SP1 - Added 09/2009 */ {"Windows 2006 SP2", NULL, 0, ACPI_OSI_WIN_VISTA_SP2}, /* Windows Vista SP2 - Added 09/2010 */ {"Windows 2009", NULL, 0, ACPI_OSI_WIN_7}, /* Windows 7 and Server 2008 R2 - Added 09/2009 */ + {"Windows 2012", NULL, 0, ACPI_OSI_WIN_8}, /* Windows 8 and Server 2012 - Added 08/2012 */ /* Feature Group Strings */ diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c index 534179f1177b..b09632b4f5b3 100644 --- a/drivers/acpi/acpica/utxface.c +++ b/drivers/acpi/acpica/utxface.c @@ -1,6 +1,6 @@ /****************************************************************************** * - * Module Name: utxface - External interfaces for "global" ACPI functions + * Module Name: utxface - External interfaces, miscellaneous utility functions * *****************************************************************************/ @@ -53,271 +53,6 @@ #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("utxface") -#ifndef ACPI_ASL_COMPILER -/******************************************************************************* - * - * FUNCTION: acpi_initialize_subsystem - * - * PARAMETERS: None - * - * RETURN: Status - * - * DESCRIPTION: Initializes all global variables. This is the first function - * called, so any early initialization belongs here. - * - ******************************************************************************/ -acpi_status __init acpi_initialize_subsystem(void) -{ - acpi_status status; - - ACPI_FUNCTION_TRACE(acpi_initialize_subsystem); - - acpi_gbl_startup_flags = ACPI_SUBSYSTEM_INITIALIZE; - ACPI_DEBUG_EXEC(acpi_ut_init_stack_ptr_trace()); - - /* Initialize the OS-Dependent layer */ - - status = acpi_os_initialize(); - if (ACPI_FAILURE(status)) { - ACPI_EXCEPTION((AE_INFO, status, "During OSL initialization")); - return_ACPI_STATUS(status); - } - - /* Initialize all globals used by the subsystem */ - - status = acpi_ut_init_globals(); - if (ACPI_FAILURE(status)) { - ACPI_EXCEPTION((AE_INFO, status, - "During initialization of globals")); - return_ACPI_STATUS(status); - } - - /* Create the default mutex objects */ - - status = acpi_ut_mutex_initialize(); - if (ACPI_FAILURE(status)) { - ACPI_EXCEPTION((AE_INFO, status, - "During Global Mutex creation")); - return_ACPI_STATUS(status); - } - - /* - * Initialize the namespace manager and - * the root of the namespace tree - */ - status = acpi_ns_root_initialize(); - if (ACPI_FAILURE(status)) { - ACPI_EXCEPTION((AE_INFO, status, - "During Namespace initialization")); - return_ACPI_STATUS(status); - } - - /* Initialize the global OSI interfaces list with the static names */ - - status = acpi_ut_initialize_interfaces(); - if (ACPI_FAILURE(status)) { - ACPI_EXCEPTION((AE_INFO, status, - "During OSI interfaces initialization")); - return_ACPI_STATUS(status); - } - - /* If configured, initialize the AML debugger */ - - ACPI_DEBUGGER_EXEC(status = acpi_db_initialize()); - return_ACPI_STATUS(status); -} - -/******************************************************************************* - * - * FUNCTION: acpi_enable_subsystem - * - * PARAMETERS: flags - Init/enable Options - * - * RETURN: Status - * - * DESCRIPTION: Completes the subsystem initialization including hardware. - * Puts system into ACPI mode if it isn't already. - * - ******************************************************************************/ -acpi_status acpi_enable_subsystem(u32 flags) -{ - acpi_status status = AE_OK; - - ACPI_FUNCTION_TRACE(acpi_enable_subsystem); - -#if (!ACPI_REDUCED_HARDWARE) - - /* Enable ACPI mode */ - - if (!(flags & ACPI_NO_ACPI_ENABLE)) { - ACPI_DEBUG_PRINT((ACPI_DB_EXEC, - "[Init] Going into ACPI mode\n")); - - acpi_gbl_original_mode = acpi_hw_get_mode(); - - status = acpi_enable(); - if (ACPI_FAILURE(status)) { - ACPI_WARNING((AE_INFO, "AcpiEnable failed")); - return_ACPI_STATUS(status); - } - } - - /* - * Obtain a permanent mapping for the FACS. This is required for the - * Global Lock and the Firmware Waking Vector - */ - status = acpi_tb_initialize_facs(); - if (ACPI_FAILURE(status)) { - ACPI_WARNING((AE_INFO, "Could not map the FACS table")); - return_ACPI_STATUS(status); - } -#endif /* !ACPI_REDUCED_HARDWARE */ - - /* - * Install the default op_region handlers. These are installed unless - * other handlers have already been installed via the - * install_address_space_handler interface. - */ - if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) { - ACPI_DEBUG_PRINT((ACPI_DB_EXEC, - "[Init] Installing default address space handlers\n")); - - status = acpi_ev_install_region_handlers(); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - } -#if (!ACPI_REDUCED_HARDWARE) - /* - * Initialize ACPI Event handling (Fixed and General Purpose) - * - * Note1: We must have the hardware and events initialized before we can - * execute any control methods safely. Any control method can require - * ACPI hardware support, so the hardware must be fully initialized before - * any method execution! - * - * Note2: Fixed events are initialized and enabled here. GPEs are - * initialized, but cannot be enabled until after the hardware is - * completely initialized (SCI and global_lock activated) - */ - if (!(flags & ACPI_NO_EVENT_INIT)) { - ACPI_DEBUG_PRINT((ACPI_DB_EXEC, - "[Init] Initializing ACPI events\n")); - - status = acpi_ev_initialize_events(); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - } - - /* - * Install the SCI handler and Global Lock handler. This completes the - * hardware initialization. - */ - if (!(flags & ACPI_NO_HANDLER_INIT)) { - ACPI_DEBUG_PRINT((ACPI_DB_EXEC, - "[Init] Installing SCI/GL handlers\n")); - - status = acpi_ev_install_xrupt_handlers(); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - } -#endif /* !ACPI_REDUCED_HARDWARE */ - - return_ACPI_STATUS(status); -} - -ACPI_EXPORT_SYMBOL(acpi_enable_subsystem) - -/******************************************************************************* - * - * FUNCTION: acpi_initialize_objects - * - * PARAMETERS: flags - Init/enable Options - * - * RETURN: Status - * - * DESCRIPTION: Completes namespace initialization by initializing device - * objects and executing AML code for Regions, buffers, etc. - * - ******************************************************************************/ -acpi_status acpi_initialize_objects(u32 flags) -{ - acpi_status status = AE_OK; - - ACPI_FUNCTION_TRACE(acpi_initialize_objects); - - /* - * Run all _REG methods - * - * Note: Any objects accessed by the _REG methods will be automatically - * initialized, even if they contain executable AML (see the call to - * acpi_ns_initialize_objects below). - */ - if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) { - ACPI_DEBUG_PRINT((ACPI_DB_EXEC, - "[Init] Executing _REG OpRegion methods\n")); - - status = acpi_ev_initialize_op_regions(); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - } - - /* - * Execute any module-level code that was detected during the table load - * phase. Although illegal since ACPI 2.0, there are many machines that - * contain this type of code. Each block of detected executable AML code - * outside of any control method is wrapped with a temporary control - * method object and placed on a global list. The methods on this list - * are executed below. - */ - acpi_ns_exec_module_code_list(); - - /* - * Initialize the objects that remain uninitialized. This runs the - * executable AML that may be part of the declaration of these objects: - * operation_regions, buffer_fields, Buffers, and Packages. - */ - if (!(flags & ACPI_NO_OBJECT_INIT)) { - ACPI_DEBUG_PRINT((ACPI_DB_EXEC, - "[Init] Completing Initialization of ACPI Objects\n")); - - status = acpi_ns_initialize_objects(); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - } - - /* - * Initialize all device objects in the namespace. This runs the device - * _STA and _INI methods. - */ - if (!(flags & ACPI_NO_DEVICE_INIT)) { - ACPI_DEBUG_PRINT((ACPI_DB_EXEC, - "[Init] Initializing ACPI Devices\n")); - - status = acpi_ns_initialize_devices(); - if (ACPI_FAILURE(status)) { - return_ACPI_STATUS(status); - } - } - - /* - * Empty the caches (delete the cached objects) on the assumption that - * the table load filled them up more than they will be at runtime -- - * thus wasting non-paged memory. - */ - status = acpi_purge_cached_objects(); - - acpi_gbl_startup_flags |= ACPI_INITIALIZED_OK; - return_ACPI_STATUS(status); -} - -ACPI_EXPORT_SYMBOL(acpi_initialize_objects) - -#endif /******************************************************************************* * * FUNCTION: acpi_terminate @@ -683,3 +418,90 @@ acpi_check_address_range(acpi_adr_space_type space_id, ACPI_EXPORT_SYMBOL(acpi_check_address_range) #endif /* !ACPI_ASL_COMPILER */ +/******************************************************************************* + * + * FUNCTION: acpi_decode_pld_buffer + * + * PARAMETERS: in_buffer - Buffer returned by _PLD method + * length - Length of the in_buffer + * return_buffer - Where the decode buffer is returned + * + * RETURN: Status and the decoded _PLD buffer. User must deallocate + * the buffer via ACPI_FREE. + * + * DESCRIPTION: Decode the bit-packed buffer returned by the _PLD method into + * a local struct that is much more useful to an ACPI driver. + * + ******************************************************************************/ +acpi_status +acpi_decode_pld_buffer(u8 *in_buffer, + acpi_size length, struct acpi_pld_info ** return_buffer) +{ + struct acpi_pld_info *pld_info; + u32 *buffer = ACPI_CAST_PTR(u32, in_buffer); + u32 dword; + + /* Parameter validation */ + + if (!in_buffer || !return_buffer || (length < 16)) { + return (AE_BAD_PARAMETER); + } + + pld_info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pld_info)); + if (!pld_info) { + return (AE_NO_MEMORY); + } + + /* First 32-bit DWord */ + + ACPI_MOVE_32_TO_32(&dword, &buffer[0]); + pld_info->revision = ACPI_PLD_GET_REVISION(&dword); + pld_info->ignore_color = ACPI_PLD_GET_IGNORE_COLOR(&dword); + pld_info->color = ACPI_PLD_GET_COLOR(&dword); + + /* Second 32-bit DWord */ + + ACPI_MOVE_32_TO_32(&dword, &buffer[1]); + pld_info->width = ACPI_PLD_GET_WIDTH(&dword); + pld_info->height = ACPI_PLD_GET_HEIGHT(&dword); + + /* Third 32-bit DWord */ + + ACPI_MOVE_32_TO_32(&dword, &buffer[2]); + pld_info->user_visible = ACPI_PLD_GET_USER_VISIBLE(&dword); + pld_info->dock = ACPI_PLD_GET_DOCK(&dword); + pld_info->lid = ACPI_PLD_GET_LID(&dword); + pld_info->panel = ACPI_PLD_GET_PANEL(&dword); + pld_info->vertical_position = ACPI_PLD_GET_VERTICAL(&dword); + pld_info->horizontal_position = ACPI_PLD_GET_HORIZONTAL(&dword); + pld_info->shape = ACPI_PLD_GET_SHAPE(&dword); + pld_info->group_orientation = ACPI_PLD_GET_ORIENTATION(&dword); + pld_info->group_token = ACPI_PLD_GET_TOKEN(&dword); + pld_info->group_position = ACPI_PLD_GET_POSITION(&dword); + pld_info->bay = ACPI_PLD_GET_BAY(&dword); + + /* Fourth 32-bit DWord */ + + ACPI_MOVE_32_TO_32(&dword, &buffer[3]); + pld_info->ejectable = ACPI_PLD_GET_EJECTABLE(&dword); + pld_info->ospm_eject_required = ACPI_PLD_GET_OSPM_EJECT(&dword); + pld_info->cabinet_number = ACPI_PLD_GET_CABINET(&dword); + pld_info->card_cage_number = ACPI_PLD_GET_CARD_CAGE(&dword); + pld_info->reference = ACPI_PLD_GET_REFERENCE(&dword); + pld_info->rotation = ACPI_PLD_GET_ROTATION(&dword); + pld_info->order = ACPI_PLD_GET_ORDER(&dword); + + if (length >= ACPI_PLD_BUFFER_SIZE) { + + /* Fifth 32-bit DWord (Revision 2 of _PLD) */ + + ACPI_MOVE_32_TO_32(&dword, &buffer[4]); + pld_info->vertical_offset = ACPI_PLD_GET_VERT_OFFSET(&dword); + pld_info->horizontal_offset = ACPI_PLD_GET_HORIZ_OFFSET(&dword); + } + + *return_buffer = pld_info; + return (AE_OK); +} + +ACPI_EXPORT_SYMBOL(acpi_decode_pld_buffer) diff --git a/drivers/acpi/acpica/utxfinit.c b/drivers/acpi/acpica/utxfinit.c new file mode 100644 index 000000000000..14f523627a5e --- /dev/null +++ b/drivers/acpi/acpica/utxfinit.c @@ -0,0 +1,317 @@ +/****************************************************************************** + * + * Module Name: utxfinit - External interfaces for ACPICA initialization + * + *****************************************************************************/ + +/* + * Copyright (C) 2000 - 2012, Intel Corp. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + */ + +#include <linux/export.h> +#include <acpi/acpi.h> +#include "accommon.h" +#include "acevents.h" +#include "acnamesp.h" +#include "acdebug.h" +#include "actables.h" + +#define _COMPONENT ACPI_UTILITIES +ACPI_MODULE_NAME("utxfinit") + +/******************************************************************************* + * + * FUNCTION: acpi_initialize_subsystem + * + * PARAMETERS: None + * + * RETURN: Status + * + * DESCRIPTION: Initializes all global variables. This is the first function + * called, so any early initialization belongs here. + * + ******************************************************************************/ +acpi_status acpi_initialize_subsystem(void) +{ + acpi_status status; + + ACPI_FUNCTION_TRACE(acpi_initialize_subsystem); + + acpi_gbl_startup_flags = ACPI_SUBSYSTEM_INITIALIZE; + ACPI_DEBUG_EXEC(acpi_ut_init_stack_ptr_trace()); + + /* Initialize the OS-Dependent layer */ + + status = acpi_os_initialize(); + if (ACPI_FAILURE(status)) { + ACPI_EXCEPTION((AE_INFO, status, "During OSL initialization")); + return_ACPI_STATUS(status); + } + + /* Initialize all globals used by the subsystem */ + + status = acpi_ut_init_globals(); + if (ACPI_FAILURE(status)) { + ACPI_EXCEPTION((AE_INFO, status, + "During initialization of globals")); + return_ACPI_STATUS(status); + } + + /* Create the default mutex objects */ + + status = acpi_ut_mutex_initialize(); + if (ACPI_FAILURE(status)) { + ACPI_EXCEPTION((AE_INFO, status, + "During Global Mutex creation")); + return_ACPI_STATUS(status); + } + + /* + * Initialize the namespace manager and + * the root of the namespace tree + */ + status = acpi_ns_root_initialize(); + if (ACPI_FAILURE(status)) { + ACPI_EXCEPTION((AE_INFO, status, + "During Namespace initialization")); + return_ACPI_STATUS(status); + } + + /* Initialize the global OSI interfaces list with the static names */ + + status = acpi_ut_initialize_interfaces(); + if (ACPI_FAILURE(status)) { + ACPI_EXCEPTION((AE_INFO, status, + "During OSI interfaces initialization")); + return_ACPI_STATUS(status); + } + + /* If configured, initialize the AML debugger */ + + ACPI_DEBUGGER_EXEC(status = acpi_db_initialize()); + return_ACPI_STATUS(status); +} +ACPI_EXPORT_SYMBOL(acpi_initialize_subsystem) + +/******************************************************************************* + * + * FUNCTION: acpi_enable_subsystem + * + * PARAMETERS: flags - Init/enable Options + * + * RETURN: Status + * + * DESCRIPTION: Completes the subsystem initialization including hardware. + * Puts system into ACPI mode if it isn't already. + * + ******************************************************************************/ +acpi_status acpi_enable_subsystem(u32 flags) +{ + acpi_status status = AE_OK; + + ACPI_FUNCTION_TRACE(acpi_enable_subsystem); + +#if (!ACPI_REDUCED_HARDWARE) + + /* Enable ACPI mode */ + + if (!(flags & ACPI_NO_ACPI_ENABLE)) { + ACPI_DEBUG_PRINT((ACPI_DB_EXEC, + "[Init] Going into ACPI mode\n")); + + acpi_gbl_original_mode = acpi_hw_get_mode(); + + status = acpi_enable(); + if (ACPI_FAILURE(status)) { + ACPI_WARNING((AE_INFO, "AcpiEnable failed")); + return_ACPI_STATUS(status); + } + } + + /* + * Obtain a permanent mapping for the FACS. This is required for the + * Global Lock and the Firmware Waking Vector + */ + status = acpi_tb_initialize_facs(); + if (ACPI_FAILURE(status)) { + ACPI_WARNING((AE_INFO, "Could not map the FACS table")); + return_ACPI_STATUS(status); + } +#endif /* !ACPI_REDUCED_HARDWARE */ + + /* + * Install the default op_region handlers. These are installed unless + * other handlers have already been installed via the + * install_address_space_handler interface. + */ + if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) { + ACPI_DEBUG_PRINT((ACPI_DB_EXEC, + "[Init] Installing default address space handlers\n")); + + status = acpi_ev_install_region_handlers(); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + } +#if (!ACPI_REDUCED_HARDWARE) + /* + * Initialize ACPI Event handling (Fixed and General Purpose) + * + * Note1: We must have the hardware and events initialized before we can + * execute any control methods safely. Any control method can require + * ACPI hardware support, so the hardware must be fully initialized before + * any method execution! + * + * Note2: Fixed events are initialized and enabled here. GPEs are + * initialized, but cannot be enabled until after the hardware is + * completely initialized (SCI and global_lock activated) and the various + * initialization control methods are run (_REG, _STA, _INI) on the + * entire namespace. + */ + if (!(flags & ACPI_NO_EVENT_INIT)) { + ACPI_DEBUG_PRINT((ACPI_DB_EXEC, + "[Init] Initializing ACPI events\n")); + + status = acpi_ev_initialize_events(); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + } + + /* + * Install the SCI handler and Global Lock handler. This completes the + * hardware initialization. + */ + if (!(flags & ACPI_NO_HANDLER_INIT)) { + ACPI_DEBUG_PRINT((ACPI_DB_EXEC, + "[Init] Installing SCI/GL handlers\n")); + + status = acpi_ev_install_xrupt_handlers(); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + } +#endif /* !ACPI_REDUCED_HARDWARE */ + + return_ACPI_STATUS(status); +} +ACPI_EXPORT_SYMBOL(acpi_enable_subsystem) + +/******************************************************************************* + * + * FUNCTION: acpi_initialize_objects + * + * PARAMETERS: flags - Init/enable Options + * + * RETURN: Status + * + * DESCRIPTION: Completes namespace initialization by initializing device + * objects and executing AML code for Regions, buffers, etc. + * + ******************************************************************************/ +acpi_status acpi_initialize_objects(u32 flags) +{ + acpi_status status = AE_OK; + + ACPI_FUNCTION_TRACE(acpi_initialize_objects); + + /* + * Run all _REG methods + * + * Note: Any objects accessed by the _REG methods will be automatically + * initialized, even if they contain executable AML (see the call to + * acpi_ns_initialize_objects below). + */ + if (!(flags & ACPI_NO_ADDRESS_SPACE_INIT)) { + ACPI_DEBUG_PRINT((ACPI_DB_EXEC, + "[Init] Executing _REG OpRegion methods\n")); + + status = acpi_ev_initialize_op_regions(); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + } + + /* + * Execute any module-level code that was detected during the table load + * phase. Although illegal since ACPI 2.0, there are many machines that + * contain this type of code. Each block of detected executable AML code + * outside of any control method is wrapped with a temporary control + * method object and placed on a global list. The methods on this list + * are executed below. + */ + acpi_ns_exec_module_code_list(); + + /* + * Initialize the objects that remain uninitialized. This runs the + * executable AML that may be part of the declaration of these objects: + * operation_regions, buffer_fields, Buffers, and Packages. + */ + if (!(flags & ACPI_NO_OBJECT_INIT)) { + ACPI_DEBUG_PRINT((ACPI_DB_EXEC, + "[Init] Completing Initialization of ACPI Objects\n")); + + status = acpi_ns_initialize_objects(); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + } + + /* + * Initialize all device objects in the namespace. This runs the device + * _STA and _INI methods. + */ + if (!(flags & ACPI_NO_DEVICE_INIT)) { + ACPI_DEBUG_PRINT((ACPI_DB_EXEC, + "[Init] Initializing ACPI Devices\n")); + + status = acpi_ns_initialize_devices(); + if (ACPI_FAILURE(status)) { + return_ACPI_STATUS(status); + } + } + + /* + * Empty the caches (delete the cached objects) on the assumption that + * the table load filled them up more than they will be at runtime -- + * thus wasting non-paged memory. + */ + status = acpi_purge_cached_objects(); + + acpi_gbl_startup_flags |= ACPI_INITIALIZED_OK; + return_ACPI_STATUS(status); +} +ACPI_EXPORT_SYMBOL(acpi_initialize_objects) diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index e0596954290b..d59175efc428 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -994,8 +994,6 @@ static int __init acpi_bus_init(void) status = acpi_ec_ecdt_probe(); /* Ignore result. Not having an ECDT is not fatal. */ - acpi_bus_osc_support(); - status = acpi_initialize_objects(ACPI_FULL_INITIALIZATION); if (ACPI_FAILURE(status)) { printk(KERN_ERR PREFIX "Unable to initialize ACPI objects\n"); @@ -1003,6 +1001,12 @@ static int __init acpi_bus_init(void) } /* + * _OSC method may exist in module level code, + * so it must be run after ACPI_FULL_INITIALIZATION + */ + acpi_bus_osc_support(); + + /* * _PDC control method may load dynamic SSDT tables, * and we need to install the table handler before that. */ diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index 314a3b84bbc7..f0d936b65e37 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c @@ -450,15 +450,4 @@ static int acpi_button_remove(struct acpi_device *device, int type) return 0; } -static int __init acpi_button_init(void) -{ - return acpi_bus_register_driver(&acpi_button_driver); -} - -static void __exit acpi_button_exit(void) -{ - acpi_bus_unregister_driver(&acpi_button_driver); -} - -module_init(acpi_button_init); -module_exit(acpi_button_exit); +module_acpi_driver(acpi_button_driver); diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index bc36a476f1ab..3bd6a54702d6 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c @@ -212,24 +212,4 @@ static int acpi_fan_resume(struct device *dev) } #endif -static int __init acpi_fan_init(void) -{ - int result = 0; - - result = acpi_bus_register_driver(&acpi_fan_driver); - if (result < 0) - return -ENODEV; - - return 0; -} - -static void __exit acpi_fan_exit(void) -{ - - acpi_bus_unregister_driver(&acpi_fan_driver); - - return; -} - -module_init(acpi_fan_init); -module_exit(acpi_fan_exit); +module_acpi_driver(acpi_fan_driver); diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 243ee85e4d2e..d1a2d74033e9 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c @@ -25,6 +25,8 @@ static LIST_HEAD(bus_type_list); static DECLARE_RWSEM(bus_type_sem); +#define PHYSICAL_NODE_STRING "physical_node" + int register_acpi_bus_type(struct acpi_bus_type *type) { if (acpi_disabled) @@ -124,84 +126,119 @@ acpi_handle acpi_get_child(acpi_handle parent, u64 address) EXPORT_SYMBOL(acpi_get_child); -/* Link ACPI devices with physical devices */ -static void acpi_glue_data_handler(acpi_handle handle, - void *context) -{ - /* we provide an empty handler */ -} - -/* Note: a success call will increase reference count by one */ -struct device *acpi_get_physical_device(acpi_handle handle) -{ - acpi_status status; - struct device *dev; - - status = acpi_get_data(handle, acpi_glue_data_handler, (void **)&dev); - if (ACPI_SUCCESS(status)) - return get_device(dev); - return NULL; -} - -EXPORT_SYMBOL(acpi_get_physical_device); - static int acpi_bind_one(struct device *dev, acpi_handle handle) { struct acpi_device *acpi_dev; acpi_status status; + struct acpi_device_physical_node *physical_node; + char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2]; + int retval = -EINVAL; if (dev->archdata.acpi_handle) { dev_warn(dev, "Drivers changed 'acpi_handle'\n"); return -EINVAL; } + get_device(dev); - status = acpi_attach_data(handle, acpi_glue_data_handler, dev); - if (ACPI_FAILURE(status)) { - put_device(dev); - return -EINVAL; + status = acpi_bus_get_device(handle, &acpi_dev); + if (ACPI_FAILURE(status)) + goto err; + + physical_node = kzalloc(sizeof(struct acpi_device_physical_node), + GFP_KERNEL); + if (!physical_node) { + retval = -ENOMEM; + goto err; } - dev->archdata.acpi_handle = handle; - status = acpi_bus_get_device(handle, &acpi_dev); - if (!ACPI_FAILURE(status)) { - int ret; - - ret = sysfs_create_link(&dev->kobj, &acpi_dev->dev.kobj, - "firmware_node"); - ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, - "physical_node"); - if (acpi_dev->wakeup.flags.valid) - device_set_wakeup_capable(dev, true); + mutex_lock(&acpi_dev->physical_node_lock); + /* allocate physical node id according to physical_node_id_bitmap */ + physical_node->node_id = + find_first_zero_bit(acpi_dev->physical_node_id_bitmap, + ACPI_MAX_PHYSICAL_NODE); + if (physical_node->node_id >= ACPI_MAX_PHYSICAL_NODE) { + retval = -ENOSPC; + mutex_unlock(&acpi_dev->physical_node_lock); + goto err; } + set_bit(physical_node->node_id, acpi_dev->physical_node_id_bitmap); + physical_node->dev = dev; + list_add_tail(&physical_node->node, &acpi_dev->physical_node_list); + acpi_dev->physical_node_count++; + mutex_unlock(&acpi_dev->physical_node_lock); + + dev->archdata.acpi_handle = handle; + + if (!physical_node->node_id) + strcpy(physical_node_name, PHYSICAL_NODE_STRING); + else + sprintf(physical_node_name, + "physical_node%d", physical_node->node_id); + retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, + physical_node_name); + retval = sysfs_create_link(&dev->kobj, &acpi_dev->dev.kobj, + "firmware_node"); + + if (acpi_dev->wakeup.flags.valid) + device_set_wakeup_capable(dev, true); + return 0; + + err: + put_device(dev); + return retval; } static int acpi_unbind_one(struct device *dev) { + struct acpi_device_physical_node *entry; + struct acpi_device *acpi_dev; + acpi_status status; + struct list_head *node, *next; + if (!dev->archdata.acpi_handle) return 0; - if (dev == acpi_get_physical_device(dev->archdata.acpi_handle)) { - struct acpi_device *acpi_dev; - /* acpi_get_physical_device increase refcnt by one */ - put_device(dev); + status = acpi_bus_get_device(dev->archdata.acpi_handle, + &acpi_dev); + if (ACPI_FAILURE(status)) + goto err; - if (!acpi_bus_get_device(dev->archdata.acpi_handle, - &acpi_dev)) { - sysfs_remove_link(&dev->kobj, "firmware_node"); - sysfs_remove_link(&acpi_dev->dev.kobj, "physical_node"); - } + mutex_lock(&acpi_dev->physical_node_lock); + list_for_each_safe(node, next, &acpi_dev->physical_node_list) { + char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2]; + + entry = list_entry(node, struct acpi_device_physical_node, + node); + if (entry->dev != dev) + continue; + + list_del(node); + clear_bit(entry->node_id, acpi_dev->physical_node_id_bitmap); - acpi_detach_data(dev->archdata.acpi_handle, - acpi_glue_data_handler); + acpi_dev->physical_node_count--; + + if (!entry->node_id) + strcpy(physical_node_name, PHYSICAL_NODE_STRING); + else + sprintf(physical_node_name, + "physical_node%d", entry->node_id); + + sysfs_remove_link(&acpi_dev->dev.kobj, physical_node_name); + sysfs_remove_link(&dev->kobj, "firmware_node"); dev->archdata.acpi_handle = NULL; /* acpi_bind_one increase refcnt by one */ put_device(dev); - } else { - dev_err(dev, "Oops, 'acpi_handle' corrupt\n"); + kfree(entry); } + mutex_unlock(&acpi_dev->physical_node_lock); + return 0; + +err: + dev_err(dev, "Oops, 'acpi_handle' corrupt\n"); + return -EINVAL; } static int acpi_platform_notify(struct device *dev) diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c index d0c1967f7597..20a0f2c3ca3b 100644 --- a/drivers/acpi/hed.c +++ b/drivers/acpi/hed.c @@ -86,25 +86,7 @@ static struct acpi_driver acpi_hed_driver = { .notify = acpi_hed_notify, }, }; - -static int __init acpi_hed_init(void) -{ - if (acpi_disabled) - return -ENODEV; - - if (acpi_bus_register_driver(&acpi_hed_driver) < 0) - return -ENODEV; - - return 0; -} - -static void __exit acpi_hed_exit(void) -{ - acpi_bus_unregister_driver(&acpi_hed_driver); -} - -module_init(acpi_hed_init); -module_exit(acpi_hed_exit); +module_acpi_driver(acpi_hed_driver); ACPI_MODULE_NAME("hed"); MODULE_AUTHOR("Huang Ying"); diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c index 251c7b6273a9..27adb090bb30 100644 --- a/drivers/acpi/proc.c +++ b/drivers/acpi/proc.c @@ -302,26 +302,41 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) list_for_each_safe(node, next, &acpi_wakeup_device_list) { struct acpi_device *dev = container_of(node, struct acpi_device, wakeup_list); - struct device *ldev; + struct acpi_device_physical_node *entry; if (!dev->wakeup.flags.valid) continue; - ldev = acpi_get_physical_device(dev->handle); - seq_printf(seq, "%s\t S%d\t%c%-8s ", + seq_printf(seq, "%s\t S%d\t", dev->pnp.bus_id, - (u32) dev->wakeup.sleep_state, - dev->wakeup.flags.run_wake ? '*' : ' ', - (device_may_wakeup(&dev->dev) - || (ldev && device_may_wakeup(ldev))) ? - "enabled" : "disabled"); - if (ldev) - seq_printf(seq, "%s:%s", - ldev->bus ? ldev->bus->name : "no-bus", - dev_name(ldev)); - seq_printf(seq, "\n"); - put_device(ldev); - + (u32) dev->wakeup.sleep_state); + + if (!dev->physical_node_count) + seq_printf(seq, "%c%-8s\n", + dev->wakeup.flags.run_wake ? + '*' : ' ', "disabled"); + else { + struct device *ldev; + list_for_each_entry(entry, &dev->physical_node_list, + node) { + ldev = get_device(entry->dev); + if (!ldev) + continue; + + if (&entry->node != + dev->physical_node_list.next) + seq_printf(seq, "\t\t"); + + seq_printf(seq, "%c%-8s %s:%s\n", + dev->wakeup.flags.run_wake ? '*' : ' ', + (device_may_wakeup(&dev->dev) || + (ldev && device_may_wakeup(ldev))) ? + "enabled" : "disabled", + ldev->bus ? ldev->bus->name : + "no-bus", dev_name(ldev)); + put_device(ldev); + } + } } mutex_unlock(&acpi_device_lock); return 0; @@ -329,12 +344,14 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) static void physical_device_enable_wakeup(struct acpi_device *adev) { - struct device *dev = acpi_get_physical_device(adev->handle); + struct acpi_device_physical_node *entry; - if (dev && device_can_wakeup(dev)) { - bool enable = !device_may_wakeup(dev); - device_set_wakeup_enable(dev, enable); - } + list_for_each_entry(entry, + &adev->physical_node_list, node) + if (entry->dev && device_can_wakeup(entry->dev)) { + bool enable = !device_may_wakeup(entry->dev); + device_set_wakeup_enable(entry->dev, enable); + } } static ssize_t diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c index f8d2a472795c..cf6129a8af7c 100644 --- a/drivers/acpi/sbshc.c +++ b/drivers/acpi/sbshc.c @@ -310,23 +310,7 @@ static int acpi_smbus_hc_remove(struct acpi_device *device, int type) return 0; } -static int __init acpi_smb_hc_init(void) -{ - int result; - - result = acpi_bus_register_driver(&acpi_smb_hc_driver); - if (result < 0) - return -ENODEV; - return 0; -} - -static void __exit acpi_smb_hc_exit(void) -{ - acpi_bus_unregister_driver(&acpi_smb_hc_driver); -} - -module_init(acpi_smb_hc_init); -module_exit(acpi_smb_hc_exit); +module_acpi_driver(acpi_smb_hc_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Alexey Starikovskiy"); diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index d1ecca2b641a..1fcb8678665c 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -10,6 +10,7 @@ #include <linux/signal.h> #include <linux/kthread.h> #include <linux/dmi.h> +#include <linux/nls.h> #include <acpi/acpi_drivers.h> @@ -232,8 +233,35 @@ end: } static DEVICE_ATTR(path, 0444, acpi_device_path_show, NULL); +/* sysfs file that shows description text from the ACPI _STR method */ +static ssize_t description_show(struct device *dev, + struct device_attribute *attr, + char *buf) { + struct acpi_device *acpi_dev = to_acpi_device(dev); + int result; + + if (acpi_dev->pnp.str_obj == NULL) + return 0; + + /* + * The _STR object contains a Unicode identifier for a device. + * We need to convert to utf-8 so it can be displayed. + */ + result = utf16s_to_utf8s( + (wchar_t *)acpi_dev->pnp.str_obj->buffer.pointer, + acpi_dev->pnp.str_obj->buffer.length, + UTF16_LITTLE_ENDIAN, buf, + PAGE_SIZE); + + buf[result++] = '\n'; + + return result; +} +static DEVICE_ATTR(description, 0444, description_show, NULL); + static int acpi_device_setup_files(struct acpi_device *dev) { + struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; acpi_status status; acpi_handle temp; int result = 0; @@ -257,6 +285,21 @@ static int acpi_device_setup_files(struct acpi_device *dev) goto end; } + /* + * If device has _STR, 'description' file is created + */ + status = acpi_get_handle(dev->handle, "_STR", &temp); + if (ACPI_SUCCESS(status)) { + status = acpi_evaluate_object(dev->handle, "_STR", + NULL, &buffer); + if (ACPI_FAILURE(status)) + buffer.pointer = NULL; + dev->pnp.str_obj = buffer.pointer; + result = device_create_file(&dev->dev, &dev_attr_description); + if (result) + goto end; + } + /* * If device has _EJ0, 'eject' file is created that is used to trigger * hot-removal function from userland. @@ -274,8 +317,15 @@ static void acpi_device_remove_files(struct acpi_device *dev) acpi_handle temp; /* - * If device has _EJ0, 'eject' file is created that is used to trigger - * hot-removal function from userland. + * If device has _STR, remove 'description' file + */ + status = acpi_get_handle(dev->handle, "_STR", &temp); + if (ACPI_SUCCESS(status)) { + kfree(dev->pnp.str_obj); + device_remove_file(&dev->dev, &dev_attr_description); + } + /* + * If device has _EJ0, remove 'eject' file. */ status = acpi_get_handle(dev->handle, "_EJ0", &temp); if (ACPI_SUCCESS(status)) @@ -481,6 +531,8 @@ static int acpi_device_register(struct acpi_device *device) INIT_LIST_HEAD(&device->children); INIT_LIST_HEAD(&device->node); INIT_LIST_HEAD(&device->wakeup_list); + INIT_LIST_HEAD(&device->physical_node_list); + mutex_init(&device->physical_node_lock); new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL); if (!new_bus_id) { diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index f336bca7c450..2572d9715bda 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c @@ -240,10 +240,17 @@ acpi_table_parse_entries(char *id, table_end) { if (entry->type == entry_id && (!max_entries || count++ < max_entries)) - if (handler(entry, table_end)) { - early_acpi_os_unmap_memory((char *)table_header, tbl_size); - return -EINVAL; - } + if (handler(entry, table_end)) + goto err; + + /* + * If entry->length is 0, break from this loop to avoid + * infinite loop. + */ + if (entry->length == 0) { + pr_err(PREFIX "[%4.4s:0x%02x] Invalid zero length\n", id, entry_id); + goto err; + } entry = (struct acpi_subtable_header *) ((unsigned long)entry + entry->length); @@ -255,6 +262,9 @@ acpi_table_parse_entries(char *id, early_acpi_os_unmap_memory((char *)table_header, tbl_size); return count; +err: + early_acpi_os_unmap_memory((char *)table_header, tbl_size); + return -EINVAL; } int __init diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c index 3e87c9c538aa..462f7e300363 100644 --- a/drivers/acpi/utils.c +++ b/drivers/acpi/utils.c @@ -384,7 +384,7 @@ acpi_evaluate_reference(acpi_handle handle, EXPORT_SYMBOL(acpi_evaluate_reference); acpi_status -acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld *pld) +acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld) { acpi_status status; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; @@ -400,13 +400,16 @@ acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld *pld) if (!output || output->type != ACPI_TYPE_PACKAGE || !output->package.count || output->package.elements[0].type != ACPI_TYPE_BUFFER - || output->package.elements[0].buffer.length > sizeof(*pld)) { + || output->package.elements[0].buffer.length < ACPI_PLD_REV1_BUFFER_SIZE) { status = AE_TYPE; goto out; } - memcpy(pld, output->package.elements[0].buffer.pointer, - output->package.elements[0].buffer.length); + status = acpi_decode_pld_buffer( + output->package.elements[0].buffer.pointer, + output->package.elements[0].buffer.length, + pld); + out: kfree(buffer.pointer); return status; diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 54a55f03115d..bb3d9be3b1b4 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -41,6 +41,8 @@ #include "rbd_types.h" +#define RBD_DEBUG /* Activate rbd_assert() calls */ + /* * The basic unit of block I/O is a sector. It is interpreted in a * number of contexts in Linux (blk, bio, genhd), but the default is @@ -50,16 +52,24 @@ #define SECTOR_SHIFT 9 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT) +/* It might be useful to have this defined elsewhere too */ + +#define U64_MAX ((u64) (~0ULL)) + #define RBD_DRV_NAME "rbd" #define RBD_DRV_NAME_LONG "rbd (rados block device)" #define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */ #define RBD_MAX_SNAP_NAME_LEN 32 +#define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */ #define RBD_MAX_OPT_LEN 1024 #define RBD_SNAP_HEAD_NAME "-" +#define RBD_IMAGE_ID_LEN_MAX 64 +#define RBD_OBJ_PREFIX_LEN_MAX 64 + /* * An RBD device name will be "rbd#", where the "rbd" comes from * RBD_DRV_NAME above, and # is a unique integer identifier. @@ -69,21 +79,22 @@ #define DEV_NAME_LEN 32 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1) -#define RBD_NOTIFY_TIMEOUT_DEFAULT 10 +#define RBD_READ_ONLY_DEFAULT false /* * block device image metadata (in-memory version) */ struct rbd_image_header { - u64 image_size; + /* These four fields never change for a given rbd image */ char *object_prefix; + u64 features; __u8 obj_order; __u8 crypt_type; __u8 comp_type; - struct ceph_snap_context *snapc; - size_t snap_names_len; - u32 total_snaps; + /* The remaining fields need to be updated occasionally */ + u64 image_size; + struct ceph_snap_context *snapc; char *snap_names; u64 *snap_sizes; @@ -91,7 +102,7 @@ struct rbd_image_header { }; struct rbd_options { - int notify_timeout; + bool read_only; }; /* @@ -99,7 +110,6 @@ struct rbd_options { */ struct rbd_client { struct ceph_client *client; - struct rbd_options *rbd_opts; struct kref kref; struct list_head node; }; @@ -141,6 +151,16 @@ struct rbd_snap { u64 size; struct list_head node; u64 id; + u64 features; +}; + +struct rbd_mapping { + char *snap_name; + u64 snap_id; + u64 size; + u64 features; + bool snap_exists; + bool read_only; }; /* @@ -151,8 +171,9 @@ struct rbd_device { int major; /* blkdev assigned major */ struct gendisk *disk; /* blkdev's gendisk and rq */ - struct request_queue *q; + u32 image_format; /* Either 1 or 2 */ + struct rbd_options rbd_opts; struct rbd_client *rbd_client; char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */ @@ -160,6 +181,8 @@ struct rbd_device { spinlock_t lock; /* queue lock */ struct rbd_image_header header; + char *image_id; + size_t image_id_len; char *image_name; size_t image_name_len; char *header_name; @@ -171,13 +194,8 @@ struct rbd_device { /* protects updating the header */ struct rw_semaphore header_rwsem; - /* name of the snapshot this device reads from */ - char *snap_name; - /* id of the snapshot this device reads from */ - u64 snap_id; /* current snapshot id */ - /* whether the snap_id this device reads from still exists */ - bool snap_exists; - int read_only; + + struct rbd_mapping mapping; struct list_head node; @@ -196,12 +214,10 @@ static DEFINE_SPINLOCK(rbd_dev_list_lock); static LIST_HEAD(rbd_client_list); /* clients */ static DEFINE_SPINLOCK(rbd_client_list_lock); -static int __rbd_init_snaps_header(struct rbd_device *rbd_dev); +static int rbd_dev_snaps_update(struct rbd_device *rbd_dev); +static int rbd_dev_snaps_register(struct rbd_device *rbd_dev); + static void rbd_dev_release(struct device *dev); -static ssize_t rbd_snap_add(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count); static void __rbd_remove_snap_dev(struct rbd_snap *snap); static ssize_t rbd_add(struct bus_type *bus, const char *buf, @@ -229,6 +245,18 @@ static struct device rbd_root_dev = { .release = rbd_root_dev_release, }; +#ifdef RBD_DEBUG +#define rbd_assert(expr) \ + if (unlikely(!(expr))) { \ + printk(KERN_ERR "\nAssertion failure in %s() " \ + "at line %d:\n\n" \ + "\trbd_assert(%s);\n\n", \ + __func__, __LINE__, #expr); \ + BUG(); \ + } +#else /* !RBD_DEBUG */ +# define rbd_assert(expr) ((void) 0) +#endif /* !RBD_DEBUG */ static struct device *rbd_get_dev(struct rbd_device *rbd_dev) { @@ -246,11 +274,11 @@ static int rbd_open(struct block_device *bdev, fmode_t mode) { struct rbd_device *rbd_dev = bdev->bd_disk->private_data; - if ((mode & FMODE_WRITE) && rbd_dev->read_only) + if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only) return -EROFS; rbd_get_dev(rbd_dev); - set_device_ro(bdev, rbd_dev->read_only); + set_device_ro(bdev, rbd_dev->mapping.read_only); return 0; } @@ -274,8 +302,7 @@ static const struct block_device_operations rbd_bd_ops = { * Initialize an rbd client instance. * We own *ceph_opts. */ -static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts, - struct rbd_options *rbd_opts) +static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts) { struct rbd_client *rbdc; int ret = -ENOMEM; @@ -299,8 +326,6 @@ static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts, if (ret < 0) goto out_err; - rbdc->rbd_opts = rbd_opts; - spin_lock(&rbd_client_list_lock); list_add_tail(&rbdc->node, &rbd_client_list); spin_unlock(&rbd_client_list_lock); @@ -322,36 +347,52 @@ out_opt: } /* - * Find a ceph client with specific addr and configuration. + * Find a ceph client with specific addr and configuration. If + * found, bump its reference count. */ -static struct rbd_client *__rbd_client_find(struct ceph_options *ceph_opts) +static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts) { struct rbd_client *client_node; + bool found = false; if (ceph_opts->flags & CEPH_OPT_NOSHARE) return NULL; - list_for_each_entry(client_node, &rbd_client_list, node) - if (!ceph_compare_options(ceph_opts, client_node->client)) - return client_node; - return NULL; + spin_lock(&rbd_client_list_lock); + list_for_each_entry(client_node, &rbd_client_list, node) { + if (!ceph_compare_options(ceph_opts, client_node->client)) { + kref_get(&client_node->kref); + found = true; + break; + } + } + spin_unlock(&rbd_client_list_lock); + + return found ? client_node : NULL; } /* * mount options */ enum { - Opt_notify_timeout, Opt_last_int, /* int args above */ Opt_last_string, /* string args above */ + Opt_read_only, + Opt_read_write, + /* Boolean args above */ + Opt_last_bool, }; static match_table_t rbd_opts_tokens = { - {Opt_notify_timeout, "notify_timeout=%d"}, /* int args above */ /* string args above */ + {Opt_read_only, "mapping.read_only"}, + {Opt_read_only, "ro"}, /* Alternate spelling */ + {Opt_read_write, "read_write"}, + {Opt_read_write, "rw"}, /* Alternate spelling */ + /* Boolean args above */ {-1, NULL} }; @@ -376,16 +417,22 @@ static int parse_rbd_opts_token(char *c, void *private) } else if (token > Opt_last_int && token < Opt_last_string) { dout("got string token %d val %s\n", token, argstr[0].from); + } else if (token > Opt_last_string && token < Opt_last_bool) { + dout("got Boolean token %d\n", token); } else { dout("got token %d\n", token); } switch (token) { - case Opt_notify_timeout: - rbd_opts->notify_timeout = intval; + case Opt_read_only: + rbd_opts->read_only = true; + break; + case Opt_read_write: + rbd_opts->read_only = false; break; default: - BUG_ON(token); + rbd_assert(false); + break; } return 0; } @@ -394,48 +441,33 @@ static int parse_rbd_opts_token(char *c, void *private) * Get a ceph client with specific addr and configuration, if one does * not exist create it. */ -static struct rbd_client *rbd_get_client(const char *mon_addr, - size_t mon_addr_len, - char *options) +static int rbd_get_client(struct rbd_device *rbd_dev, const char *mon_addr, + size_t mon_addr_len, char *options) { - struct rbd_client *rbdc; + struct rbd_options *rbd_opts = &rbd_dev->rbd_opts; struct ceph_options *ceph_opts; - struct rbd_options *rbd_opts; - - rbd_opts = kzalloc(sizeof(*rbd_opts), GFP_KERNEL); - if (!rbd_opts) - return ERR_PTR(-ENOMEM); + struct rbd_client *rbdc; - rbd_opts->notify_timeout = RBD_NOTIFY_TIMEOUT_DEFAULT; + rbd_opts->read_only = RBD_READ_ONLY_DEFAULT; ceph_opts = ceph_parse_options(options, mon_addr, mon_addr + mon_addr_len, parse_rbd_opts_token, rbd_opts); - if (IS_ERR(ceph_opts)) { - kfree(rbd_opts); - return ERR_CAST(ceph_opts); - } + if (IS_ERR(ceph_opts)) + return PTR_ERR(ceph_opts); - spin_lock(&rbd_client_list_lock); - rbdc = __rbd_client_find(ceph_opts); + rbdc = rbd_client_find(ceph_opts); if (rbdc) { /* using an existing client */ - kref_get(&rbdc->kref); - spin_unlock(&rbd_client_list_lock); - ceph_destroy_options(ceph_opts); - kfree(rbd_opts); - - return rbdc; + } else { + rbdc = rbd_client_create(ceph_opts); + if (IS_ERR(rbdc)) + return PTR_ERR(rbdc); } - spin_unlock(&rbd_client_list_lock); - - rbdc = rbd_client_create(ceph_opts, rbd_opts); + rbd_dev->rbd_client = rbdc; - if (IS_ERR(rbdc)) - kfree(rbd_opts); - - return rbdc; + return 0; } /* @@ -453,7 +485,6 @@ static void rbd_client_release(struct kref *kref) spin_unlock(&rbd_client_list_lock); ceph_destroy_client(rbdc->client); - kfree(rbdc->rbd_opts); kfree(rbdc); } @@ -479,10 +510,38 @@ static void rbd_coll_release(struct kref *kref) kfree(coll); } +static bool rbd_image_format_valid(u32 image_format) +{ + return image_format == 1 || image_format == 2; +} + static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk) { - return !memcmp(&ondisk->text, - RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)); + size_t size; + u32 snap_count; + + /* The header has to start with the magic rbd header text */ + if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT))) + return false; + + /* + * The size of a snapshot header has to fit in a size_t, and + * that limits the number of snapshots. + */ + snap_count = le32_to_cpu(ondisk->snap_count); + size = SIZE_MAX - sizeof (struct ceph_snap_context); + if (snap_count > size / sizeof (__le64)) + return false; + + /* + * Not only that, but the size of the entire the snapshot + * header must also be representable in a size_t. + */ + size -= snap_count * sizeof (__le64); + if ((u64) size < le64_to_cpu(ondisk->snap_names_len)) + return false; + + return true; } /* @@ -490,179 +549,203 @@ static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk) * header. */ static int rbd_header_from_disk(struct rbd_image_header *header, - struct rbd_image_header_ondisk *ondisk, - u32 allocated_snaps) + struct rbd_image_header_ondisk *ondisk) { u32 snap_count; + size_t len; + size_t size; + u32 i; - if (!rbd_dev_ondisk_valid(ondisk)) - return -ENXIO; + memset(header, 0, sizeof (*header)); snap_count = le32_to_cpu(ondisk->snap_count); - if (snap_count > (SIZE_MAX - sizeof(struct ceph_snap_context)) - / sizeof (u64)) - return -EINVAL; - header->snapc = kmalloc(sizeof(struct ceph_snap_context) + - snap_count * sizeof(u64), - GFP_KERNEL); - if (!header->snapc) + + len = strnlen(ondisk->object_prefix, sizeof (ondisk->object_prefix)); + header->object_prefix = kmalloc(len + 1, GFP_KERNEL); + if (!header->object_prefix) return -ENOMEM; + memcpy(header->object_prefix, ondisk->object_prefix, len); + header->object_prefix[len] = '\0'; if (snap_count) { - header->snap_names_len = le64_to_cpu(ondisk->snap_names_len); - header->snap_names = kmalloc(header->snap_names_len, - GFP_KERNEL); + u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len); + + /* Save a copy of the snapshot names */ + + if (snap_names_len > (u64) SIZE_MAX) + return -EIO; + header->snap_names = kmalloc(snap_names_len, GFP_KERNEL); if (!header->snap_names) - goto err_snapc; - header->snap_sizes = kmalloc(snap_count * sizeof(u64), - GFP_KERNEL); + goto out_err; + /* + * Note that rbd_dev_v1_header_read() guarantees + * the ondisk buffer we're working with has + * snap_names_len bytes beyond the end of the + * snapshot id array, this memcpy() is safe. + */ + memcpy(header->snap_names, &ondisk->snaps[snap_count], + snap_names_len); + + /* Record each snapshot's size */ + + size = snap_count * sizeof (*header->snap_sizes); + header->snap_sizes = kmalloc(size, GFP_KERNEL); if (!header->snap_sizes) - goto err_names; + goto out_err; + for (i = 0; i < snap_count; i++) + header->snap_sizes[i] = + le64_to_cpu(ondisk->snaps[i].image_size); } else { WARN_ON(ondisk->snap_names_len); - header->snap_names_len = 0; header->snap_names = NULL; header->snap_sizes = NULL; } - header->object_prefix = kmalloc(sizeof (ondisk->block_name) + 1, - GFP_KERNEL); - if (!header->object_prefix) - goto err_sizes; - - memcpy(header->object_prefix, ondisk->block_name, - sizeof(ondisk->block_name)); - header->object_prefix[sizeof (ondisk->block_name)] = '\0'; - - header->image_size = le64_to_cpu(ondisk->image_size); + header->features = 0; /* No features support in v1 images */ header->obj_order = ondisk->options.order; header->crypt_type = ondisk->options.crypt_type; header->comp_type = ondisk->options.comp_type; + /* Allocate and fill in the snapshot context */ + + header->image_size = le64_to_cpu(ondisk->image_size); + size = sizeof (struct ceph_snap_context); + size += snap_count * sizeof (header->snapc->snaps[0]); + header->snapc = kzalloc(size, GFP_KERNEL); + if (!header->snapc) + goto out_err; + atomic_set(&header->snapc->nref, 1); header->snapc->seq = le64_to_cpu(ondisk->snap_seq); header->snapc->num_snaps = snap_count; - header->total_snaps = snap_count; - - if (snap_count && allocated_snaps == snap_count) { - int i; - - for (i = 0; i < snap_count; i++) { - header->snapc->snaps[i] = - le64_to_cpu(ondisk->snaps[i].id); - header->snap_sizes[i] = - le64_to_cpu(ondisk->snaps[i].image_size); - } - - /* copy snapshot names */ - memcpy(header->snap_names, &ondisk->snaps[snap_count], - header->snap_names_len); - } + for (i = 0; i < snap_count; i++) + header->snapc->snaps[i] = + le64_to_cpu(ondisk->snaps[i].id); return 0; -err_sizes: +out_err: kfree(header->snap_sizes); header->snap_sizes = NULL; -err_names: kfree(header->snap_names); header->snap_names = NULL; -err_snapc: - kfree(header->snapc); - header->snapc = NULL; + kfree(header->object_prefix); + header->object_prefix = NULL; return -ENOMEM; } -static int snap_by_name(struct rbd_image_header *header, const char *snap_name, - u64 *seq, u64 *size) +static int snap_by_name(struct rbd_device *rbd_dev, const char *snap_name) { - int i; - char *p = header->snap_names; - for (i = 0; i < header->total_snaps; i++) { - if (!strcmp(snap_name, p)) { + struct rbd_snap *snap; - /* Found it. Pass back its id and/or size */ + list_for_each_entry(snap, &rbd_dev->snaps, node) { + if (!strcmp(snap_name, snap->name)) { + rbd_dev->mapping.snap_id = snap->id; + rbd_dev->mapping.size = snap->size; + rbd_dev->mapping.features = snap->features; - if (seq) - *seq = header->snapc->snaps[i]; - if (size) - *size = header->snap_sizes[i]; - return i; + return 0; } - p += strlen(p) + 1; /* Skip ahead to the next name */ } + return -ENOENT; } -static int rbd_header_set_snap(struct rbd_device *rbd_dev, u64 *size) +static int rbd_dev_set_mapping(struct rbd_device *rbd_dev, char *snap_name) { int ret; - down_write(&rbd_dev->header_rwsem); - - if (!memcmp(rbd_dev->snap_name, RBD_SNAP_HEAD_NAME, + if (!memcmp(snap_name, RBD_SNAP_HEAD_NAME, sizeof (RBD_SNAP_HEAD_NAME))) { - rbd_dev->snap_id = CEPH_NOSNAP; - rbd_dev->snap_exists = false; - rbd_dev->read_only = 0; - if (size) - *size = rbd_dev->header.image_size; + rbd_dev->mapping.snap_id = CEPH_NOSNAP; + rbd_dev->mapping.size = rbd_dev->header.image_size; + rbd_dev->mapping.features = rbd_dev->header.features; + rbd_dev->mapping.snap_exists = false; + rbd_dev->mapping.read_only = rbd_dev->rbd_opts.read_only; + ret = 0; } else { - u64 snap_id = 0; - - ret = snap_by_name(&rbd_dev->header, rbd_dev->snap_name, - &snap_id, size); + ret = snap_by_name(rbd_dev, snap_name); if (ret < 0) goto done; - rbd_dev->snap_id = snap_id; - rbd_dev->snap_exists = true; - rbd_dev->read_only = 1; + rbd_dev->mapping.snap_exists = true; + rbd_dev->mapping.read_only = true; } - - ret = 0; + rbd_dev->mapping.snap_name = snap_name; done: - up_write(&rbd_dev->header_rwsem); return ret; } static void rbd_header_free(struct rbd_image_header *header) { kfree(header->object_prefix); + header->object_prefix = NULL; kfree(header->snap_sizes); + header->snap_sizes = NULL; kfree(header->snap_names); + header->snap_names = NULL; ceph_put_snap_context(header->snapc); + header->snapc = NULL; } -/* - * get the actual striped segment name, offset and length - */ -static u64 rbd_get_segment(struct rbd_image_header *header, - const char *object_prefix, - u64 ofs, u64 len, - char *seg_name, u64 *segofs) +static char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset) +{ + char *name; + u64 segment; + int ret; + + name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO); + if (!name) + return NULL; + segment = offset >> rbd_dev->header.obj_order; + ret = snprintf(name, RBD_MAX_SEG_NAME_LEN, "%s.%012llx", + rbd_dev->header.object_prefix, segment); + if (ret < 0 || ret >= RBD_MAX_SEG_NAME_LEN) { + pr_err("error formatting segment name for #%llu (%d)\n", + segment, ret); + kfree(name); + name = NULL; + } + + return name; +} + +static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset) { - u64 seg = ofs >> header->obj_order; + u64 segment_size = (u64) 1 << rbd_dev->header.obj_order; - if (seg_name) - snprintf(seg_name, RBD_MAX_SEG_NAME_LEN, - "%s.%012llx", object_prefix, seg); + return offset & (segment_size - 1); +} + +static u64 rbd_segment_length(struct rbd_device *rbd_dev, + u64 offset, u64 length) +{ + u64 segment_size = (u64) 1 << rbd_dev->header.obj_order; - ofs = ofs & ((1 << header->obj_order) - 1); - len = min_t(u64, len, (1 << header->obj_order) - ofs); + offset &= segment_size - 1; - if (segofs) - *segofs = ofs; + rbd_assert(length <= U64_MAX - offset); + if (offset + length > segment_size) + length = segment_size - offset; - return len; + return length; } static int rbd_get_num_segments(struct rbd_image_header *header, u64 ofs, u64 len) { - u64 start_seg = ofs >> header->obj_order; - u64 end_seg = (ofs + len - 1) >> header->obj_order; + u64 start_seg; + u64 end_seg; + + if (!len) + return 0; + if (len - 1 > U64_MAX - ofs) + return -ERANGE; + + start_seg = ofs >> header->obj_order; + end_seg = (ofs + len - 1) >> header->obj_order; + return end_seg - start_seg + 1; } @@ -724,7 +807,9 @@ static struct bio *bio_chain_clone(struct bio **old, struct bio **next, struct bio_pair **bp, int len, gfp_t gfpmask) { - struct bio *tmp, *old_chain = *old, *new_chain = NULL, *tail = NULL; + struct bio *old_chain = *old; + struct bio *new_chain = NULL; + struct bio *tail; int total = 0; if (*bp) { @@ -733,9 +818,12 @@ static struct bio *bio_chain_clone(struct bio **old, struct bio **next, } while (old_chain && (total < len)) { + struct bio *tmp; + tmp = bio_kmalloc(gfpmask, old_chain->bi_max_vecs); if (!tmp) goto err_out; + gfpmask &= ~__GFP_WAIT; /* can't wait after the first */ if (total + old_chain->bi_size > len) { struct bio_pair *bp; @@ -763,24 +851,18 @@ static struct bio *bio_chain_clone(struct bio **old, struct bio **next, } tmp->bi_bdev = NULL; - gfpmask &= ~__GFP_WAIT; tmp->bi_next = NULL; - - if (!new_chain) { - new_chain = tail = tmp; - } else { + if (new_chain) tail->bi_next = tmp; - tail = tmp; - } + else + new_chain = tmp; + tail = tmp; old_chain = old_chain->bi_next; total += tmp->bi_size; } - BUG_ON(total < len); - - if (tail) - tail->bi_next = NULL; + rbd_assert(total == len); *old = old_chain; @@ -938,8 +1020,9 @@ static int rbd_do_request(struct request *rq, layout->fl_stripe_count = cpu_to_le32(1); layout->fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER); layout->fl_pg_pool = cpu_to_le32(rbd_dev->pool_id); - ceph_calc_raw_layout(osdc, layout, snapid, ofs, &len, &bno, - req, ops); + ret = ceph_calc_raw_layout(osdc, layout, snapid, ofs, &len, &bno, + req, ops); + rbd_assert(ret == 0); ceph_osdc_build_request(req, ofs, &len, ops, @@ -1030,8 +1113,8 @@ static int rbd_req_sync_op(struct rbd_device *rbd_dev, int flags, struct ceph_osd_req_op *ops, const char *object_name, - u64 ofs, u64 len, - char *buf, + u64 ofs, u64 inbound_size, + char *inbound, struct ceph_osd_request **linger_req, u64 *ver) { @@ -1039,15 +1122,15 @@ static int rbd_req_sync_op(struct rbd_device *rbd_dev, struct page **pages; int num_pages; - BUG_ON(ops == NULL); + rbd_assert(ops != NULL); - num_pages = calc_pages_for(ofs , len); + num_pages = calc_pages_for(ofs, inbound_size); pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL); if (IS_ERR(pages)) return PTR_ERR(pages); ret = rbd_do_request(NULL, rbd_dev, snapc, snapid, - object_name, ofs, len, NULL, + object_name, ofs, inbound_size, NULL, pages, num_pages, flags, ops, @@ -1057,8 +1140,8 @@ static int rbd_req_sync_op(struct rbd_device *rbd_dev, if (ret < 0) goto done; - if ((flags & CEPH_OSD_FLAG_READ) && buf) - ret = ceph_copy_from_page_vector(pages, buf, ofs, ret); + if ((flags & CEPH_OSD_FLAG_READ) && inbound) + ret = ceph_copy_from_page_vector(pages, inbound, ofs, ret); done: ceph_release_page_vector(pages, num_pages); @@ -1085,14 +1168,11 @@ static int rbd_do_op(struct request *rq, struct ceph_osd_req_op *ops; u32 payload_len; - seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO); + seg_name = rbd_segment_name(rbd_dev, ofs); if (!seg_name) return -ENOMEM; - - seg_len = rbd_get_segment(&rbd_dev->header, - rbd_dev->header.object_prefix, - ofs, len, - seg_name, &seg_ofs); + seg_len = rbd_segment_length(rbd_dev, ofs, len); + seg_ofs = rbd_segment_offset(rbd_dev, ofs); payload_len = (flags & CEPH_OSD_FLAG_WRITE ? seg_len : 0); @@ -1104,7 +1184,7 @@ static int rbd_do_op(struct request *rq, /* we've taken care of segment sizes earlier when we cloned the bios. We should never have a segment truncated at this point */ - BUG_ON(seg_len < len); + rbd_assert(seg_len == len); ret = rbd_do_request(rq, rbd_dev, snapc, snapid, seg_name, seg_ofs, seg_len, @@ -1306,89 +1386,36 @@ static int rbd_req_sync_unwatch(struct rbd_device *rbd_dev) return ret; } -struct rbd_notify_info { - struct rbd_device *rbd_dev; -}; - -static void rbd_notify_cb(u64 ver, u64 notify_id, u8 opcode, void *data) -{ - struct rbd_device *rbd_dev = (struct rbd_device *)data; - if (!rbd_dev) - return; - - dout("rbd_notify_cb %s notify_id=%llu opcode=%u\n", - rbd_dev->header_name, (unsigned long long) notify_id, - (unsigned int) opcode); -} - /* - * Request sync osd notify - */ -static int rbd_req_sync_notify(struct rbd_device *rbd_dev) -{ - struct ceph_osd_req_op *ops; - struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; - struct ceph_osd_event *event; - struct rbd_notify_info info; - int payload_len = sizeof(u32) + sizeof(u32); - int ret; - - ops = rbd_create_rw_ops(1, CEPH_OSD_OP_NOTIFY, payload_len); - if (!ops) - return -ENOMEM; - - info.rbd_dev = rbd_dev; - - ret = ceph_osdc_create_event(osdc, rbd_notify_cb, 1, - (void *)&info, &event); - if (ret < 0) - goto fail; - - ops[0].watch.ver = 1; - ops[0].watch.flag = 1; - ops[0].watch.cookie = event->cookie; - ops[0].watch.prot_ver = RADOS_NOTIFY_VER; - ops[0].watch.timeout = 12; - - ret = rbd_req_sync_op(rbd_dev, NULL, - CEPH_NOSNAP, - CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, - ops, - rbd_dev->header_name, - 0, 0, NULL, NULL, NULL); - if (ret < 0) - goto fail_event; - - ret = ceph_osdc_wait_event(event, CEPH_OSD_TIMEOUT_DEFAULT); - dout("ceph_osdc_wait_event returned %d\n", ret); - rbd_destroy_ops(ops); - return 0; - -fail_event: - ceph_osdc_cancel_event(event); -fail: - rbd_destroy_ops(ops); - return ret; -} - -/* - * Request sync osd read + * Synchronous osd object method call */ static int rbd_req_sync_exec(struct rbd_device *rbd_dev, const char *object_name, const char *class_name, const char *method_name, - const char *data, - int len, + const char *outbound, + size_t outbound_size, + char *inbound, + size_t inbound_size, + int flags, u64 *ver) { struct ceph_osd_req_op *ops; int class_name_len = strlen(class_name); int method_name_len = strlen(method_name); + int payload_size; int ret; - ops = rbd_create_rw_ops(1, CEPH_OSD_OP_CALL, - class_name_len + method_name_len + len); + /* + * Any input parameters required by the method we're calling + * will be sent along with the class and method names as + * part of the message payload. That data and its size are + * supplied via the indata and indata_len fields (named from + * the perspective of the server side) in the OSD request + * operation. + */ + payload_size = class_name_len + method_name_len + outbound_size; + ops = rbd_create_rw_ops(1, CEPH_OSD_OP_CALL, payload_size); if (!ops) return -ENOMEM; @@ -1397,14 +1424,14 @@ static int rbd_req_sync_exec(struct rbd_device *rbd_dev, ops[0].cls.method_name = method_name; ops[0].cls.method_len = (__u8) method_name_len; ops[0].cls.argc = 0; - ops[0].cls.indata = data; - ops[0].cls.indata_len = len; + ops[0].cls.indata = outbound; + ops[0].cls.indata_len = outbound_size; ret = rbd_req_sync_op(rbd_dev, NULL, CEPH_NOSNAP, - CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, - ops, - object_name, 0, 0, NULL, NULL, ver); + flags, ops, + object_name, 0, inbound_size, inbound, + NULL, ver); rbd_destroy_ops(ops); @@ -1446,10 +1473,6 @@ static void rbd_rq_fn(struct request_queue *q) struct rbd_req_coll *coll; struct ceph_snap_context *snapc; - /* peek at request from block layer */ - if (!rq) - break; - dout("fetched request\n"); /* filter out block requests we don't understand */ @@ -1464,7 +1487,7 @@ static void rbd_rq_fn(struct request_queue *q) size = blk_rq_bytes(rq); ofs = blk_rq_pos(rq) * SECTOR_SIZE; rq_bio = rq->bio; - if (do_write && rbd_dev->read_only) { + if (do_write && rbd_dev->mapping.read_only) { __blk_end_request_all(rq, -EROFS); continue; } @@ -1473,7 +1496,8 @@ static void rbd_rq_fn(struct request_queue *q) down_read(&rbd_dev->header_rwsem); - if (rbd_dev->snap_id != CEPH_NOSNAP && !rbd_dev->snap_exists) { + if (rbd_dev->mapping.snap_id != CEPH_NOSNAP && + !rbd_dev->mapping.snap_exists) { up_read(&rbd_dev->header_rwsem); dout("request for non-existent snapshot"); spin_lock_irq(q->queue_lock); @@ -1490,6 +1514,12 @@ static void rbd_rq_fn(struct request_queue *q) size, (unsigned long long) blk_rq_pos(rq) * SECTOR_SIZE); num_segs = rbd_get_num_segments(&rbd_dev->header, ofs, size); + if (num_segs <= 0) { + spin_lock_irq(q->queue_lock); + __blk_end_request_all(rq, num_segs); + ceph_put_snap_context(snapc); + continue; + } coll = rbd_alloc_coll(num_segs); if (!coll) { spin_lock_irq(q->queue_lock); @@ -1501,10 +1531,7 @@ static void rbd_rq_fn(struct request_queue *q) do { /* a bio clone to be passed down to OSD req */ dout("rq->bio->bi_vcnt=%hu\n", rq->bio->bi_vcnt); - op_size = rbd_get_segment(&rbd_dev->header, - rbd_dev->header.object_prefix, - ofs, size, - NULL, NULL); + op_size = rbd_segment_length(rbd_dev, ofs, size); kref_get(&coll->kref); bio = bio_chain_clone(&rq_bio, &next_bio, &bp, op_size, GFP_ATOMIC); @@ -1524,7 +1551,7 @@ static void rbd_rq_fn(struct request_queue *q) coll, cur_seg); else rbd_req_read(rq, rbd_dev, - rbd_dev->snap_id, + rbd_dev->mapping.snap_id, ofs, op_size, bio, coll, cur_seg); @@ -1580,8 +1607,6 @@ static void rbd_free_disk(struct rbd_device *rbd_dev) if (!disk) return; - rbd_header_free(&rbd_dev->header); - if (disk->flags & GENHD_FL_UP) del_gendisk(disk); if (disk->queue) @@ -1590,105 +1615,96 @@ static void rbd_free_disk(struct rbd_device *rbd_dev) } /* - * reload the ondisk the header + * Read the complete header for the given rbd device. + * + * Returns a pointer to a dynamically-allocated buffer containing + * the complete and validated header. Caller can pass the address + * of a variable that will be filled in with the version of the + * header object at the time it was read. + * + * Returns a pointer-coded errno if a failure occurs. */ -static int rbd_read_header(struct rbd_device *rbd_dev, - struct rbd_image_header *header) +static struct rbd_image_header_ondisk * +rbd_dev_v1_header_read(struct rbd_device *rbd_dev, u64 *version) { - ssize_t rc; - struct rbd_image_header_ondisk *dh; + struct rbd_image_header_ondisk *ondisk = NULL; u32 snap_count = 0; - u64 ver; - size_t len; + u64 names_size = 0; + u32 want_count; + int ret; /* - * First reads the fixed-size header to determine the number - * of snapshots, then re-reads it, along with all snapshot - * records as well as their stored names. + * The complete header will include an array of its 64-bit + * snapshot ids, followed by the names of those snapshots as + * a contiguous block of NUL-terminated strings. Note that + * the number of snapshots could change by the time we read + * it in, in which case we re-read it. */ - len = sizeof (*dh); - while (1) { - dh = kmalloc(len, GFP_KERNEL); - if (!dh) - return -ENOMEM; - - rc = rbd_req_sync_read(rbd_dev, - CEPH_NOSNAP, + do { + size_t size; + + kfree(ondisk); + + size = sizeof (*ondisk); + size += snap_count * sizeof (struct rbd_image_snap_ondisk); + size += names_size; + ondisk = kmalloc(size, GFP_KERNEL); + if (!ondisk) + return ERR_PTR(-ENOMEM); + + ret = rbd_req_sync_read(rbd_dev, CEPH_NOSNAP, rbd_dev->header_name, - 0, len, - (char *)dh, &ver); - if (rc < 0) - goto out_dh; - - rc = rbd_header_from_disk(header, dh, snap_count); - if (rc < 0) { - if (rc == -ENXIO) - pr_warning("unrecognized header format" - " for image %s\n", - rbd_dev->image_name); - goto out_dh; + 0, size, + (char *) ondisk, version); + + if (ret < 0) + goto out_err; + if (WARN_ON((size_t) ret < size)) { + ret = -ENXIO; + pr_warning("short header read for image %s" + " (want %zd got %d)\n", + rbd_dev->image_name, size, ret); + goto out_err; + } + if (!rbd_dev_ondisk_valid(ondisk)) { + ret = -ENXIO; + pr_warning("invalid header for image %s\n", + rbd_dev->image_name); + goto out_err; } - if (snap_count == header->total_snaps) - break; + names_size = le64_to_cpu(ondisk->snap_names_len); + want_count = snap_count; + snap_count = le32_to_cpu(ondisk->snap_count); + } while (snap_count != want_count); - snap_count = header->total_snaps; - len = sizeof (*dh) + - snap_count * sizeof(struct rbd_image_snap_ondisk) + - header->snap_names_len; + return ondisk; - rbd_header_free(header); - kfree(dh); - } - header->obj_version = ver; +out_err: + kfree(ondisk); -out_dh: - kfree(dh); - return rc; + return ERR_PTR(ret); } /* - * create a snapshot + * reload the ondisk the header */ -static int rbd_header_add_snap(struct rbd_device *rbd_dev, - const char *snap_name, - gfp_t gfp_flags) +static int rbd_read_header(struct rbd_device *rbd_dev, + struct rbd_image_header *header) { - int name_len = strlen(snap_name); - u64 new_snapid; + struct rbd_image_header_ondisk *ondisk; + u64 ver = 0; int ret; - void *data, *p, *e; - struct ceph_mon_client *monc; - - /* we should create a snapshot only if we're pointing at the head */ - if (rbd_dev->snap_id != CEPH_NOSNAP) - return -EINVAL; - monc = &rbd_dev->rbd_client->client->monc; - ret = ceph_monc_create_snapid(monc, rbd_dev->pool_id, &new_snapid); - dout("created snapid=%llu\n", (unsigned long long) new_snapid); - if (ret < 0) - return ret; - - data = kmalloc(name_len + 16, gfp_flags); - if (!data) - return -ENOMEM; + ondisk = rbd_dev_v1_header_read(rbd_dev, &ver); + if (IS_ERR(ondisk)) + return PTR_ERR(ondisk); + ret = rbd_header_from_disk(header, ondisk); + if (ret >= 0) + header->obj_version = ver; + kfree(ondisk); - p = data; - e = data + name_len + 16; - - ceph_encode_string_safe(&p, e, snap_name, name_len, bad); - ceph_encode_64_safe(&p, e, new_snapid, bad); - - ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name, - "rbd", "snap_add", - data, p - data, NULL); - - kfree(data); - - return ret < 0 ? ret : 0; -bad: - return -ERANGE; + return ret; } static void __rbd_remove_all_snaps(struct rbd_device *rbd_dev) @@ -1715,11 +1731,15 @@ static int __rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver) down_write(&rbd_dev->header_rwsem); /* resized? */ - if (rbd_dev->snap_id == CEPH_NOSNAP) { + if (rbd_dev->mapping.snap_id == CEPH_NOSNAP) { sector_t size = (sector_t) h.image_size / SECTOR_SIZE; - dout("setting size to %llu sectors", (unsigned long long) size); - set_capacity(rbd_dev->disk, size); + if (size != (sector_t) rbd_dev->mapping.size) { + dout("setting size to %llu sectors", + (unsigned long long) size); + rbd_dev->mapping.size = (u64) size; + set_capacity(rbd_dev->disk, size); + } } /* rbd_dev->header.object_prefix shouldn't change */ @@ -1732,16 +1752,16 @@ static int __rbd_refresh_header(struct rbd_device *rbd_dev, u64 *hver) *hver = h.obj_version; rbd_dev->header.obj_version = h.obj_version; rbd_dev->header.image_size = h.image_size; - rbd_dev->header.total_snaps = h.total_snaps; rbd_dev->header.snapc = h.snapc; rbd_dev->header.snap_names = h.snap_names; - rbd_dev->header.snap_names_len = h.snap_names_len; rbd_dev->header.snap_sizes = h.snap_sizes; /* Free the extra copy of the object prefix */ WARN_ON(strcmp(rbd_dev->header.object_prefix, h.object_prefix)); kfree(h.object_prefix); - ret = __rbd_init_snaps_header(rbd_dev); + ret = rbd_dev_snaps_update(rbd_dev); + if (!ret) + ret = rbd_dev_snaps_register(rbd_dev); up_write(&rbd_dev->header_rwsem); @@ -1763,29 +1783,12 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) { struct gendisk *disk; struct request_queue *q; - int rc; u64 segment_size; - u64 total_size = 0; - - /* contact OSD, request size info about the object being mapped */ - rc = rbd_read_header(rbd_dev, &rbd_dev->header); - if (rc) - return rc; - - /* no need to lock here, as rbd_dev is not registered yet */ - rc = __rbd_init_snaps_header(rbd_dev); - if (rc) - return rc; - - rc = rbd_header_set_snap(rbd_dev, &total_size); - if (rc) - return rc; /* create gendisk info */ - rc = -ENOMEM; disk = alloc_disk(RBD_MINORS_PER_MAJOR); if (!disk) - goto out; + return -ENOMEM; snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d", rbd_dev->dev_id); @@ -1795,7 +1798,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) disk->private_data = rbd_dev; /* init rq */ - rc = -ENOMEM; q = blk_init_queue(rbd_rq_fn, &rbd_dev->lock); if (!q) goto out_disk; @@ -1816,20 +1818,14 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) q->queuedata = rbd_dev; rbd_dev->disk = disk; - rbd_dev->q = q; - /* finally, announce the disk to the world */ - set_capacity(disk, total_size / SECTOR_SIZE); - add_disk(disk); + set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); - pr_info("%s: added with size 0x%llx\n", - disk->disk_name, (unsigned long long)total_size); return 0; - out_disk: put_disk(disk); -out: - return rc; + + return -ENOMEM; } /* @@ -1854,6 +1850,19 @@ static ssize_t rbd_size_show(struct device *dev, return sprintf(buf, "%llu\n", (unsigned long long) size * SECTOR_SIZE); } +/* + * Note this shows the features for whatever's mapped, which is not + * necessarily the base image. + */ +static ssize_t rbd_features_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); + + return sprintf(buf, "0x%016llx\n", + (unsigned long long) rbd_dev->mapping.features); +} + static ssize_t rbd_major_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1895,13 +1904,25 @@ static ssize_t rbd_name_show(struct device *dev, return sprintf(buf, "%s\n", rbd_dev->image_name); } +static ssize_t rbd_image_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); + + return sprintf(buf, "%s\n", rbd_dev->image_id); +} + +/* + * Shows the name of the currently-mapped snapshot (or + * RBD_SNAP_HEAD_NAME for the base image). + */ static ssize_t rbd_snap_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); - return sprintf(buf, "%s\n", rbd_dev->snap_name); + return sprintf(buf, "%s\n", rbd_dev->mapping.snap_name); } static ssize_t rbd_image_refresh(struct device *dev, @@ -1918,25 +1939,27 @@ static ssize_t rbd_image_refresh(struct device *dev, } static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL); +static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL); static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL); static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL); static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL); static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL); static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL); +static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL); static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh); static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL); -static DEVICE_ATTR(create_snap, S_IWUSR, NULL, rbd_snap_add); static struct attribute *rbd_attrs[] = { &dev_attr_size.attr, + &dev_attr_features.attr, &dev_attr_major.attr, &dev_attr_client_id.attr, &dev_attr_pool.attr, &dev_attr_pool_id.attr, &dev_attr_name.attr, + &dev_attr_image_id.attr, &dev_attr_current_snap.attr, &dev_attr_refresh.attr, - &dev_attr_create_snap.attr, NULL }; @@ -1982,12 +2005,24 @@ static ssize_t rbd_snap_id_show(struct device *dev, return sprintf(buf, "%llu\n", (unsigned long long)snap->id); } +static ssize_t rbd_snap_features_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev); + + return sprintf(buf, "0x%016llx\n", + (unsigned long long) snap->features); +} + static DEVICE_ATTR(snap_size, S_IRUGO, rbd_snap_size_show, NULL); static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL); +static DEVICE_ATTR(snap_features, S_IRUGO, rbd_snap_features_show, NULL); static struct attribute *rbd_snap_attrs[] = { &dev_attr_snap_size.attr, &dev_attr_snap_id.attr, + &dev_attr_snap_features.attr, NULL, }; @@ -2012,10 +2047,21 @@ static struct device_type rbd_snap_device_type = { .release = rbd_snap_dev_release, }; +static bool rbd_snap_registered(struct rbd_snap *snap) +{ + bool ret = snap->dev.type == &rbd_snap_device_type; + bool reg = device_is_registered(&snap->dev); + + rbd_assert(!ret ^ reg); + + return ret; +} + static void __rbd_remove_snap_dev(struct rbd_snap *snap) { list_del(&snap->node); - device_unregister(&snap->dev); + if (device_is_registered(&snap->dev)) + device_unregister(&snap->dev); } static int rbd_register_snap_dev(struct rbd_snap *snap, @@ -2028,13 +2074,17 @@ static int rbd_register_snap_dev(struct rbd_snap *snap, dev->parent = parent; dev->release = rbd_snap_dev_release; dev_set_name(dev, "snap_%s", snap->name); + dout("%s: registering device for snapshot %s\n", __func__, snap->name); + ret = device_register(dev); return ret; } static struct rbd_snap *__rbd_add_snap_dev(struct rbd_device *rbd_dev, - int i, const char *name) + const char *snap_name, + u64 snap_id, u64 snap_size, + u64 snap_features) { struct rbd_snap *snap; int ret; @@ -2044,17 +2094,13 @@ static struct rbd_snap *__rbd_add_snap_dev(struct rbd_device *rbd_dev, return ERR_PTR(-ENOMEM); ret = -ENOMEM; - snap->name = kstrdup(name, GFP_KERNEL); + snap->name = kstrdup(snap_name, GFP_KERNEL); if (!snap->name) goto err; - snap->size = rbd_dev->header.snap_sizes[i]; - snap->id = rbd_dev->header.snapc->snaps[i]; - if (device_is_registered(&rbd_dev->dev)) { - ret = rbd_register_snap_dev(snap, &rbd_dev->dev); - if (ret < 0) - goto err; - } + snap->id = snap_id; + snap->size = snap_size; + snap->features = snap_features; return snap; @@ -2065,128 +2111,439 @@ err: return ERR_PTR(ret); } +static char *rbd_dev_v1_snap_info(struct rbd_device *rbd_dev, u32 which, + u64 *snap_size, u64 *snap_features) +{ + char *snap_name; + + rbd_assert(which < rbd_dev->header.snapc->num_snaps); + + *snap_size = rbd_dev->header.snap_sizes[which]; + *snap_features = 0; /* No features for v1 */ + + /* Skip over names until we find the one we are looking for */ + + snap_name = rbd_dev->header.snap_names; + while (which--) + snap_name += strlen(snap_name) + 1; + + return snap_name; +} + /* - * search for the previous snap in a null delimited string list + * Get the size and object order for an image snapshot, or if + * snap_id is CEPH_NOSNAP, gets this information for the base + * image. */ -const char *rbd_prev_snap_name(const char *name, const char *start) +static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, + u8 *order, u64 *snap_size) { - if (name < start + 2) - return NULL; + __le64 snapid = cpu_to_le64(snap_id); + int ret; + struct { + u8 order; + __le64 size; + } __attribute__ ((packed)) size_buf = { 0 }; + + ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name, + "rbd", "get_size", + (char *) &snapid, sizeof (snapid), + (char *) &size_buf, sizeof (size_buf), + CEPH_OSD_FLAG_READ, NULL); + dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret); + if (ret < 0) + return ret; + + *order = size_buf.order; + *snap_size = le64_to_cpu(size_buf.size); + + dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n", + (unsigned long long) snap_id, (unsigned int) *order, + (unsigned long long) *snap_size); + + return 0; +} + +static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev) +{ + return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP, + &rbd_dev->header.obj_order, + &rbd_dev->header.image_size); +} + +static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev) +{ + void *reply_buf; + int ret; + void *p; + + reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL); + if (!reply_buf) + return -ENOMEM; + + ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name, + "rbd", "get_object_prefix", + NULL, 0, + reply_buf, RBD_OBJ_PREFIX_LEN_MAX, + CEPH_OSD_FLAG_READ, NULL); + dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret); + if (ret < 0) + goto out; + + p = reply_buf; + rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p, + p + RBD_OBJ_PREFIX_LEN_MAX, + NULL, GFP_NOIO); + + if (IS_ERR(rbd_dev->header.object_prefix)) { + ret = PTR_ERR(rbd_dev->header.object_prefix); + rbd_dev->header.object_prefix = NULL; + } else { + dout(" object_prefix = %s\n", rbd_dev->header.object_prefix); + } - name -= 2; - while (*name) { - if (name == start) - return start; - name--; +out: + kfree(reply_buf); + + return ret; +} + +static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id, + u64 *snap_features) +{ + __le64 snapid = cpu_to_le64(snap_id); + struct { + __le64 features; + __le64 incompat; + } features_buf = { 0 }; + int ret; + + ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name, + "rbd", "get_features", + (char *) &snapid, sizeof (snapid), + (char *) &features_buf, sizeof (features_buf), + CEPH_OSD_FLAG_READ, NULL); + dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret); + if (ret < 0) + return ret; + *snap_features = le64_to_cpu(features_buf.features); + + dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n", + (unsigned long long) snap_id, + (unsigned long long) *snap_features, + (unsigned long long) le64_to_cpu(features_buf.incompat)); + + return 0; +} + +static int rbd_dev_v2_features(struct rbd_device *rbd_dev) +{ + return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP, + &rbd_dev->header.features); +} + +static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver) +{ + size_t size; + int ret; + void *reply_buf; + void *p; + void *end; + u64 seq; + u32 snap_count; + struct ceph_snap_context *snapc; + u32 i; + + /* + * We'll need room for the seq value (maximum snapshot id), + * snapshot count, and array of that many snapshot ids. + * For now we have a fixed upper limit on the number we're + * prepared to receive. + */ + size = sizeof (__le64) + sizeof (__le32) + + RBD_MAX_SNAP_COUNT * sizeof (__le64); + reply_buf = kzalloc(size, GFP_KERNEL); + if (!reply_buf) + return -ENOMEM; + + ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name, + "rbd", "get_snapcontext", + NULL, 0, + reply_buf, size, + CEPH_OSD_FLAG_READ, ver); + dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret); + if (ret < 0) + goto out; + + ret = -ERANGE; + p = reply_buf; + end = (char *) reply_buf + size; + ceph_decode_64_safe(&p, end, seq, out); + ceph_decode_32_safe(&p, end, snap_count, out); + + /* + * Make sure the reported number of snapshot ids wouldn't go + * beyond the end of our buffer. But before checking that, + * make sure the computed size of the snapshot context we + * allocate is representable in a size_t. + */ + if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context)) + / sizeof (u64)) { + ret = -EINVAL; + goto out; } - return name + 1; + if (!ceph_has_room(&p, end, snap_count * sizeof (__le64))) + goto out; + + size = sizeof (struct ceph_snap_context) + + snap_count * sizeof (snapc->snaps[0]); + snapc = kmalloc(size, GFP_KERNEL); + if (!snapc) { + ret = -ENOMEM; + goto out; + } + + atomic_set(&snapc->nref, 1); + snapc->seq = seq; + snapc->num_snaps = snap_count; + for (i = 0; i < snap_count; i++) + snapc->snaps[i] = ceph_decode_64(&p); + + rbd_dev->header.snapc = snapc; + + dout(" snap context seq = %llu, snap_count = %u\n", + (unsigned long long) seq, (unsigned int) snap_count); + +out: + kfree(reply_buf); + + return 0; } -/* - * compare the old list of snapshots that we have to what's in the header - * and update it accordingly. Note that the header holds the snapshots - * in a reverse order (from newest to oldest) and we need to go from - * older to new so that we don't get a duplicate snap name when - * doing the process (e.g., removed snapshot and recreated a new - * one with the same name. - */ -static int __rbd_init_snaps_header(struct rbd_device *rbd_dev) +static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which) { - const char *name, *first_name; - int i = rbd_dev->header.total_snaps; - struct rbd_snap *snap, *old_snap = NULL; - struct list_head *p, *n; + size_t size; + void *reply_buf; + __le64 snap_id; + int ret; + void *p; + void *end; + size_t snap_name_len; + char *snap_name; + + size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN; + reply_buf = kmalloc(size, GFP_KERNEL); + if (!reply_buf) + return ERR_PTR(-ENOMEM); - first_name = rbd_dev->header.snap_names; - name = first_name + rbd_dev->header.snap_names_len; + snap_id = cpu_to_le64(rbd_dev->header.snapc->snaps[which]); + ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name, + "rbd", "get_snapshot_name", + (char *) &snap_id, sizeof (snap_id), + reply_buf, size, + CEPH_OSD_FLAG_READ, NULL); + dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret); + if (ret < 0) + goto out; - list_for_each_prev_safe(p, n, &rbd_dev->snaps) { - u64 cur_id; + p = reply_buf; + end = (char *) reply_buf + size; + snap_name_len = 0; + snap_name = ceph_extract_encoded_string(&p, end, &snap_name_len, + GFP_KERNEL); + if (IS_ERR(snap_name)) { + ret = PTR_ERR(snap_name); + goto out; + } else { + dout(" snap_id 0x%016llx snap_name = %s\n", + (unsigned long long) le64_to_cpu(snap_id), snap_name); + } + kfree(reply_buf); - old_snap = list_entry(p, struct rbd_snap, node); + return snap_name; +out: + kfree(reply_buf); - if (i) - cur_id = rbd_dev->header.snapc->snaps[i - 1]; + return ERR_PTR(ret); +} - if (!i || old_snap->id < cur_id) { - /* - * old_snap->id was skipped, thus was - * removed. If this rbd_dev is mapped to - * the removed snapshot, record that it no - * longer exists, to prevent further I/O. - */ - if (rbd_dev->snap_id == old_snap->id) - rbd_dev->snap_exists = false; - __rbd_remove_snap_dev(old_snap); - continue; - } - if (old_snap->id == cur_id) { - /* we have this snapshot already */ - i--; - name = rbd_prev_snap_name(name, first_name); +static char *rbd_dev_v2_snap_info(struct rbd_device *rbd_dev, u32 which, + u64 *snap_size, u64 *snap_features) +{ + __le64 snap_id; + u8 order; + int ret; + + snap_id = rbd_dev->header.snapc->snaps[which]; + ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, &order, snap_size); + if (ret) + return ERR_PTR(ret); + ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, snap_features); + if (ret) + return ERR_PTR(ret); + + return rbd_dev_v2_snap_name(rbd_dev, which); +} + +static char *rbd_dev_snap_info(struct rbd_device *rbd_dev, u32 which, + u64 *snap_size, u64 *snap_features) +{ + if (rbd_dev->image_format == 1) + return rbd_dev_v1_snap_info(rbd_dev, which, + snap_size, snap_features); + if (rbd_dev->image_format == 2) + return rbd_dev_v2_snap_info(rbd_dev, which, + snap_size, snap_features); + return ERR_PTR(-EINVAL); +} + +/* + * Scan the rbd device's current snapshot list and compare it to the + * newly-received snapshot context. Remove any existing snapshots + * not present in the new snapshot context. Add a new snapshot for + * any snaphots in the snapshot context not in the current list. + * And verify there are no changes to snapshots we already know + * about. + * + * Assumes the snapshots in the snapshot context are sorted by + * snapshot id, highest id first. (Snapshots in the rbd_dev's list + * are also maintained in that order.) + */ +static int rbd_dev_snaps_update(struct rbd_device *rbd_dev) +{ + struct ceph_snap_context *snapc = rbd_dev->header.snapc; + const u32 snap_count = snapc->num_snaps; + struct list_head *head = &rbd_dev->snaps; + struct list_head *links = head->next; + u32 index = 0; + + dout("%s: snap count is %u\n", __func__, (unsigned int) snap_count); + while (index < snap_count || links != head) { + u64 snap_id; + struct rbd_snap *snap; + char *snap_name; + u64 snap_size = 0; + u64 snap_features = 0; + + snap_id = index < snap_count ? snapc->snaps[index] + : CEPH_NOSNAP; + snap = links != head ? list_entry(links, struct rbd_snap, node) + : NULL; + rbd_assert(!snap || snap->id != CEPH_NOSNAP); + + if (snap_id == CEPH_NOSNAP || (snap && snap->id > snap_id)) { + struct list_head *next = links->next; + + /* Existing snapshot not in the new snap context */ + + if (rbd_dev->mapping.snap_id == snap->id) + rbd_dev->mapping.snap_exists = false; + __rbd_remove_snap_dev(snap); + dout("%ssnap id %llu has been removed\n", + rbd_dev->mapping.snap_id == snap->id ? + "mapped " : "", + (unsigned long long) snap->id); + + /* Done with this list entry; advance */ + + links = next; continue; } - for (; i > 0; - i--, name = rbd_prev_snap_name(name, first_name)) { - if (!name) { - WARN_ON(1); - return -EINVAL; + + snap_name = rbd_dev_snap_info(rbd_dev, index, + &snap_size, &snap_features); + if (IS_ERR(snap_name)) + return PTR_ERR(snap_name); + + dout("entry %u: snap_id = %llu\n", (unsigned int) snap_count, + (unsigned long long) snap_id); + if (!snap || (snap_id != CEPH_NOSNAP && snap->id < snap_id)) { + struct rbd_snap *new_snap; + + /* We haven't seen this snapshot before */ + + new_snap = __rbd_add_snap_dev(rbd_dev, snap_name, + snap_id, snap_size, snap_features); + if (IS_ERR(new_snap)) { + int err = PTR_ERR(new_snap); + + dout(" failed to add dev, error %d\n", err); + + return err; } - cur_id = rbd_dev->header.snapc->snaps[i]; - /* snapshot removal? handle it above */ - if (cur_id >= old_snap->id) - break; - /* a new snapshot */ - snap = __rbd_add_snap_dev(rbd_dev, i - 1, name); - if (IS_ERR(snap)) - return PTR_ERR(snap); - - /* note that we add it backward so using n and not p */ - list_add(&snap->node, n); - p = &snap->node; + + /* New goes before existing, or at end of list */ + + dout(" added dev%s\n", snap ? "" : " at end\n"); + if (snap) + list_add_tail(&new_snap->node, &snap->node); + else + list_add_tail(&new_snap->node, head); + } else { + /* Already have this one */ + + dout(" already present\n"); + + rbd_assert(snap->size == snap_size); + rbd_assert(!strcmp(snap->name, snap_name)); + rbd_assert(snap->features == snap_features); + + /* Done with this list entry; advance */ + + links = links->next; } + + /* Advance to the next entry in the snapshot context */ + + index++; } - /* we're done going over the old snap list, just add what's left */ - for (; i > 0; i--) { - name = rbd_prev_snap_name(name, first_name); - if (!name) { - WARN_ON(1); - return -EINVAL; + dout("%s: done\n", __func__); + + return 0; +} + +/* + * Scan the list of snapshots and register the devices for any that + * have not already been registered. + */ +static int rbd_dev_snaps_register(struct rbd_device *rbd_dev) +{ + struct rbd_snap *snap; + int ret = 0; + + dout("%s called\n", __func__); + if (WARN_ON(!device_is_registered(&rbd_dev->dev))) + return -EIO; + + list_for_each_entry(snap, &rbd_dev->snaps, node) { + if (!rbd_snap_registered(snap)) { + ret = rbd_register_snap_dev(snap, &rbd_dev->dev); + if (ret < 0) + break; } - snap = __rbd_add_snap_dev(rbd_dev, i - 1, name); - if (IS_ERR(snap)) - return PTR_ERR(snap); - list_add(&snap->node, &rbd_dev->snaps); } + dout("%s: returning %d\n", __func__, ret); - return 0; + return ret; } static int rbd_bus_add_dev(struct rbd_device *rbd_dev) { - int ret; struct device *dev; - struct rbd_snap *snap; + int ret; mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); - dev = &rbd_dev->dev; + dev = &rbd_dev->dev; dev->bus = &rbd_bus_type; dev->type = &rbd_device_type; dev->parent = &rbd_root_dev; dev->release = rbd_dev_release; dev_set_name(dev, "%d", rbd_dev->dev_id); ret = device_register(dev); - if (ret < 0) - goto out; - list_for_each_entry(snap, &rbd_dev->snaps, node) { - ret = rbd_register_snap_dev(snap, &rbd_dev->dev); - if (ret < 0) - break; - } -out: mutex_unlock(&ctl_mutex); + return ret; } @@ -2211,33 +2568,37 @@ static int rbd_init_watch_dev(struct rbd_device *rbd_dev) return ret; } -static atomic64_t rbd_id_max = ATOMIC64_INIT(0); +static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0); /* * Get a unique rbd identifier for the given new rbd_dev, and add * the rbd_dev to the global list. The minimum rbd id is 1. */ -static void rbd_id_get(struct rbd_device *rbd_dev) +static void rbd_dev_id_get(struct rbd_device *rbd_dev) { - rbd_dev->dev_id = atomic64_inc_return(&rbd_id_max); + rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max); spin_lock(&rbd_dev_list_lock); list_add_tail(&rbd_dev->node, &rbd_dev_list); spin_unlock(&rbd_dev_list_lock); + dout("rbd_dev %p given dev id %llu\n", rbd_dev, + (unsigned long long) rbd_dev->dev_id); } /* * Remove an rbd_dev from the global list, and record that its * identifier is no longer in use. */ -static void rbd_id_put(struct rbd_device *rbd_dev) +static void rbd_dev_id_put(struct rbd_device *rbd_dev) { struct list_head *tmp; int rbd_id = rbd_dev->dev_id; int max_id; - BUG_ON(rbd_id < 1); + rbd_assert(rbd_id > 0); + dout("rbd_dev %p released dev id %llu\n", rbd_dev, + (unsigned long long) rbd_dev->dev_id); spin_lock(&rbd_dev_list_lock); list_del_init(&rbd_dev->node); @@ -2245,7 +2606,7 @@ static void rbd_id_put(struct rbd_device *rbd_dev) * If the id being "put" is not the current maximum, there * is nothing special we need to do. */ - if (rbd_id != atomic64_read(&rbd_id_max)) { + if (rbd_id != atomic64_read(&rbd_dev_id_max)) { spin_unlock(&rbd_dev_list_lock); return; } @@ -2266,12 +2627,13 @@ static void rbd_id_put(struct rbd_device *rbd_dev) spin_unlock(&rbd_dev_list_lock); /* - * The max id could have been updated by rbd_id_get(), in + * The max id could have been updated by rbd_dev_id_get(), in * which case it now accurately reflects the new maximum. * Be careful not to overwrite the maximum value in that * case. */ - atomic64_cmpxchg(&rbd_id_max, rbd_id, max_id); + atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id); + dout(" max dev id has been reset\n"); } /* @@ -2360,28 +2722,31 @@ static inline char *dup_token(const char **buf, size_t *lenp) } /* - * This fills in the pool_name, image_name, image_name_len, snap_name, - * rbd_dev, rbd_md_name, and name fields of the given rbd_dev, based - * on the list of monitor addresses and other options provided via - * /sys/bus/rbd/add. + * This fills in the pool_name, image_name, image_name_len, rbd_dev, + * rbd_md_name, and name fields of the given rbd_dev, based on the + * list of monitor addresses and other options provided via + * /sys/bus/rbd/add. Returns a pointer to a dynamically-allocated + * copy of the snapshot name to map if successful, or a + * pointer-coded error otherwise. * * Note: rbd_dev is assumed to have been initially zero-filled. */ -static int rbd_add_parse_args(struct rbd_device *rbd_dev, - const char *buf, - const char **mon_addrs, - size_t *mon_addrs_size, - char *options, - size_t options_size) +static char *rbd_add_parse_args(struct rbd_device *rbd_dev, + const char *buf, + const char **mon_addrs, + size_t *mon_addrs_size, + char *options, + size_t options_size) { size_t len; - int ret; + char *err_ptr = ERR_PTR(-EINVAL); + char *snap_name; /* The first four tokens are required */ len = next_token(&buf); if (!len) - return -EINVAL; + return err_ptr; *mon_addrs_size = len + 1; *mon_addrs = buf; @@ -2389,9 +2754,9 @@ static int rbd_add_parse_args(struct rbd_device *rbd_dev, len = copy_token(&buf, options, options_size); if (!len || len >= options_size) - return -EINVAL; + return err_ptr; - ret = -ENOMEM; + err_ptr = ERR_PTR(-ENOMEM); rbd_dev->pool_name = dup_token(&buf, NULL); if (!rbd_dev->pool_name) goto out_err; @@ -2400,41 +2765,227 @@ static int rbd_add_parse_args(struct rbd_device *rbd_dev, if (!rbd_dev->image_name) goto out_err; - /* Create the name of the header object */ + /* Snapshot name is optional */ + len = next_token(&buf); + if (!len) { + buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */ + len = sizeof (RBD_SNAP_HEAD_NAME) - 1; + } + snap_name = kmalloc(len + 1, GFP_KERNEL); + if (!snap_name) + goto out_err; + memcpy(snap_name, buf, len); + *(snap_name + len) = '\0'; - rbd_dev->header_name = kmalloc(rbd_dev->image_name_len - + sizeof (RBD_SUFFIX), - GFP_KERNEL); - if (!rbd_dev->header_name) +dout(" SNAP_NAME is <%s>, len is %zd\n", snap_name, len); + + return snap_name; + +out_err: + kfree(rbd_dev->image_name); + rbd_dev->image_name = NULL; + rbd_dev->image_name_len = 0; + kfree(rbd_dev->pool_name); + rbd_dev->pool_name = NULL; + + return err_ptr; +} + +/* + * An rbd format 2 image has a unique identifier, distinct from the + * name given to it by the user. Internally, that identifier is + * what's used to specify the names of objects related to the image. + * + * A special "rbd id" object is used to map an rbd image name to its + * id. If that object doesn't exist, then there is no v2 rbd image + * with the supplied name. + * + * This function will record the given rbd_dev's image_id field if + * it can be determined, and in that case will return 0. If any + * errors occur a negative errno will be returned and the rbd_dev's + * image_id field will be unchanged (and should be NULL). + */ +static int rbd_dev_image_id(struct rbd_device *rbd_dev) +{ + int ret; + size_t size; + char *object_name; + void *response; + void *p; + + /* + * First, see if the format 2 image id file exists, and if + * so, get the image's persistent id from it. + */ + size = sizeof (RBD_ID_PREFIX) + rbd_dev->image_name_len; + object_name = kmalloc(size, GFP_NOIO); + if (!object_name) + return -ENOMEM; + sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->image_name); + dout("rbd id object name is %s\n", object_name); + + /* Response will be an encoded string, which includes a length */ + + size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX; + response = kzalloc(size, GFP_NOIO); + if (!response) { + ret = -ENOMEM; + goto out; + } + + ret = rbd_req_sync_exec(rbd_dev, object_name, + "rbd", "get_id", + NULL, 0, + response, RBD_IMAGE_ID_LEN_MAX, + CEPH_OSD_FLAG_READ, NULL); + dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret); + if (ret < 0) + goto out; + + p = response; + rbd_dev->image_id = ceph_extract_encoded_string(&p, + p + RBD_IMAGE_ID_LEN_MAX, + &rbd_dev->image_id_len, + GFP_NOIO); + if (IS_ERR(rbd_dev->image_id)) { + ret = PTR_ERR(rbd_dev->image_id); + rbd_dev->image_id = NULL; + } else { + dout("image_id is %s\n", rbd_dev->image_id); + } +out: + kfree(response); + kfree(object_name); + + return ret; +} + +static int rbd_dev_v1_probe(struct rbd_device *rbd_dev) +{ + int ret; + size_t size; + + /* Version 1 images have no id; empty string is used */ + + rbd_dev->image_id = kstrdup("", GFP_KERNEL); + if (!rbd_dev->image_id) + return -ENOMEM; + rbd_dev->image_id_len = 0; + + /* Record the header object name for this rbd image. */ + + size = rbd_dev->image_name_len + sizeof (RBD_SUFFIX); + rbd_dev->header_name = kmalloc(size, GFP_KERNEL); + if (!rbd_dev->header_name) { + ret = -ENOMEM; goto out_err; + } sprintf(rbd_dev->header_name, "%s%s", rbd_dev->image_name, RBD_SUFFIX); + /* Populate rbd image metadata */ + + ret = rbd_read_header(rbd_dev, &rbd_dev->header); + if (ret < 0) + goto out_err; + rbd_dev->image_format = 1; + + dout("discovered version 1 image, header name is %s\n", + rbd_dev->header_name); + + return 0; + +out_err: + kfree(rbd_dev->header_name); + rbd_dev->header_name = NULL; + kfree(rbd_dev->image_id); + rbd_dev->image_id = NULL; + + return ret; +} + +static int rbd_dev_v2_probe(struct rbd_device *rbd_dev) +{ + size_t size; + int ret; + u64 ver = 0; + /* - * The snapshot name is optional. If none is is supplied, - * we use the default value. + * Image id was filled in by the caller. Record the header + * object name for this rbd image. */ - rbd_dev->snap_name = dup_token(&buf, &len); - if (!rbd_dev->snap_name) + size = sizeof (RBD_HEADER_PREFIX) + rbd_dev->image_id_len; + rbd_dev->header_name = kmalloc(size, GFP_KERNEL); + if (!rbd_dev->header_name) + return -ENOMEM; + sprintf(rbd_dev->header_name, "%s%s", + RBD_HEADER_PREFIX, rbd_dev->image_id); + + /* Get the size and object order for the image */ + + ret = rbd_dev_v2_image_size(rbd_dev); + if (ret < 0) goto out_err; - if (!len) { - /* Replace the empty name with the default */ - kfree(rbd_dev->snap_name); - rbd_dev->snap_name - = kmalloc(sizeof (RBD_SNAP_HEAD_NAME), GFP_KERNEL); - if (!rbd_dev->snap_name) - goto out_err; - memcpy(rbd_dev->snap_name, RBD_SNAP_HEAD_NAME, - sizeof (RBD_SNAP_HEAD_NAME)); - } + /* Get the object prefix (a.k.a. block_name) for the image */ - return 0; + ret = rbd_dev_v2_object_prefix(rbd_dev); + if (ret < 0) + goto out_err; + + /* Get the features for the image */ + ret = rbd_dev_v2_features(rbd_dev); + if (ret < 0) + goto out_err; + + /* crypto and compression type aren't (yet) supported for v2 images */ + + rbd_dev->header.crypt_type = 0; + rbd_dev->header.comp_type = 0; + + /* Get the snapshot context, plus the header version */ + + ret = rbd_dev_v2_snap_context(rbd_dev, &ver); + if (ret) + goto out_err; + rbd_dev->header.obj_version = ver; + + rbd_dev->image_format = 2; + + dout("discovered version 2 image, header name is %s\n", + rbd_dev->header_name); + + return -ENOTSUPP; out_err: kfree(rbd_dev->header_name); - kfree(rbd_dev->image_name); - kfree(rbd_dev->pool_name); - rbd_dev->pool_name = NULL; + rbd_dev->header_name = NULL; + kfree(rbd_dev->header.object_prefix); + rbd_dev->header.object_prefix = NULL; + + return ret; +} + +/* + * Probe for the existence of the header object for the given rbd + * device. For format 2 images this includes determining the image + * id. + */ +static int rbd_dev_probe(struct rbd_device *rbd_dev) +{ + int ret; + + /* + * Get the id from the image id object. If it's not a + * format 2 image, we'll get ENOENT back, and we'll assume + * it's a format 1 image. + */ + ret = rbd_dev_image_id(rbd_dev); + if (ret) + ret = rbd_dev_v1_probe(rbd_dev); + else + ret = rbd_dev_v2_probe(rbd_dev); + if (ret) + dout("probe failed, returning %d\n", ret); return ret; } @@ -2449,16 +3000,17 @@ static ssize_t rbd_add(struct bus_type *bus, size_t mon_addrs_size = 0; struct ceph_osd_client *osdc; int rc = -ENOMEM; + char *snap_name; if (!try_module_get(THIS_MODULE)) return -ENODEV; options = kmalloc(count, GFP_KERNEL); if (!options) - goto err_nomem; + goto err_out_mem; rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL); if (!rbd_dev) - goto err_nomem; + goto err_out_mem; /* static rbd_device initialization */ spin_lock_init(&rbd_dev->lock); @@ -2466,27 +3018,18 @@ static ssize_t rbd_add(struct bus_type *bus, INIT_LIST_HEAD(&rbd_dev->snaps); init_rwsem(&rbd_dev->header_rwsem); - /* generate unique id: find highest unique id, add one */ - rbd_id_get(rbd_dev); - - /* Fill in the device name, now that we have its id. */ - BUILD_BUG_ON(DEV_NAME_LEN - < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH); - sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id); - /* parse add command */ - rc = rbd_add_parse_args(rbd_dev, buf, &mon_addrs, &mon_addrs_size, - options, count); - if (rc) - goto err_put_id; - - rbd_dev->rbd_client = rbd_get_client(mon_addrs, mon_addrs_size - 1, - options); - if (IS_ERR(rbd_dev->rbd_client)) { - rc = PTR_ERR(rbd_dev->rbd_client); - goto err_put_id; + snap_name = rbd_add_parse_args(rbd_dev, buf, + &mon_addrs, &mon_addrs_size, options, count); + if (IS_ERR(snap_name)) { + rc = PTR_ERR(snap_name); + goto err_out_mem; } + rc = rbd_get_client(rbd_dev, mon_addrs, mon_addrs_size - 1, options); + if (rc < 0) + goto err_out_args; + /* pick the pool */ osdc = &rbd_dev->rbd_client->client->osdc; rc = ceph_pg_poolid_by_name(osdc->osdmap, rbd_dev->pool_name); @@ -2494,23 +3037,53 @@ static ssize_t rbd_add(struct bus_type *bus, goto err_out_client; rbd_dev->pool_id = rc; - /* register our block device */ - rc = register_blkdev(0, rbd_dev->name); + rc = rbd_dev_probe(rbd_dev); if (rc < 0) goto err_out_client; + rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); + + /* no need to lock here, as rbd_dev is not registered yet */ + rc = rbd_dev_snaps_update(rbd_dev); + if (rc) + goto err_out_header; + + rc = rbd_dev_set_mapping(rbd_dev, snap_name); + if (rc) + goto err_out_header; + + /* generate unique id: find highest unique id, add one */ + rbd_dev_id_get(rbd_dev); + + /* Fill in the device name, now that we have its id. */ + BUILD_BUG_ON(DEV_NAME_LEN + < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH); + sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id); + + /* Get our block major device number. */ + + rc = register_blkdev(0, rbd_dev->name); + if (rc < 0) + goto err_out_id; rbd_dev->major = rc; - rc = rbd_bus_add_dev(rbd_dev); + /* Set up the blkdev mapping. */ + + rc = rbd_init_disk(rbd_dev); if (rc) goto err_out_blkdev; + rc = rbd_bus_add_dev(rbd_dev); + if (rc) + goto err_out_disk; + /* * At this point cleanup in the event of an error is the job * of the sysfs code (initiated by rbd_bus_del_dev()). - * - * Set up and announce blkdev mapping. */ - rc = rbd_init_disk(rbd_dev); + + down_write(&rbd_dev->header_rwsem); + rc = rbd_dev_snaps_register(rbd_dev); + up_write(&rbd_dev->header_rwsem); if (rc) goto err_out_bus; @@ -2518,6 +3091,13 @@ static ssize_t rbd_add(struct bus_type *bus, if (rc) goto err_out_bus; + /* Everything's ready. Announce the disk to the world. */ + + add_disk(rbd_dev->disk); + + pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name, + (unsigned long long) rbd_dev->mapping.size); + return count; err_out_bus: @@ -2527,19 +3107,23 @@ err_out_bus: kfree(options); return rc; +err_out_disk: + rbd_free_disk(rbd_dev); err_out_blkdev: unregister_blkdev(rbd_dev->major, rbd_dev->name); +err_out_id: + rbd_dev_id_put(rbd_dev); +err_out_header: + rbd_header_free(&rbd_dev->header); err_out_client: + kfree(rbd_dev->header_name); rbd_put_client(rbd_dev); -err_put_id: - if (rbd_dev->pool_name) { - kfree(rbd_dev->snap_name); - kfree(rbd_dev->header_name); - kfree(rbd_dev->image_name); - kfree(rbd_dev->pool_name); - } - rbd_id_put(rbd_dev); -err_nomem: + kfree(rbd_dev->image_id); +err_out_args: + kfree(rbd_dev->mapping.snap_name); + kfree(rbd_dev->image_name); + kfree(rbd_dev->pool_name); +err_out_mem: kfree(rbd_dev); kfree(options); @@ -2585,12 +3169,16 @@ static void rbd_dev_release(struct device *dev) rbd_free_disk(rbd_dev); unregister_blkdev(rbd_dev->major, rbd_dev->name); + /* release allocated disk header fields */ + rbd_header_free(&rbd_dev->header); + /* done with the id, and with the rbd_dev */ - kfree(rbd_dev->snap_name); + kfree(rbd_dev->mapping.snap_name); + kfree(rbd_dev->image_id); kfree(rbd_dev->header_name); kfree(rbd_dev->pool_name); kfree(rbd_dev->image_name); - rbd_id_put(rbd_dev); + rbd_dev_id_put(rbd_dev); kfree(rbd_dev); /* release module ref */ @@ -2628,47 +3216,7 @@ static ssize_t rbd_remove(struct bus_type *bus, done: mutex_unlock(&ctl_mutex); - return ret; -} -static ssize_t rbd_snap_add(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count) -{ - struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); - int ret; - char *name = kmalloc(count + 1, GFP_KERNEL); - if (!name) - return -ENOMEM; - - snprintf(name, count, "%s", buf); - - mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); - - ret = rbd_header_add_snap(rbd_dev, - name, GFP_KERNEL); - if (ret < 0) - goto err_unlock; - - ret = __rbd_refresh_header(rbd_dev, NULL); - if (ret < 0) - goto err_unlock; - - /* shouldn't hold ctl_mutex when notifying.. notify might - trigger a watch callback that would need to get that mutex */ - mutex_unlock(&ctl_mutex); - - /* make a best effort, don't error if failed */ - rbd_req_sync_notify(rbd_dev); - - ret = count; - kfree(name); - return ret; - -err_unlock: - mutex_unlock(&ctl_mutex); - kfree(name); return ret; } diff --git a/drivers/block/rbd_types.h b/drivers/block/rbd_types.h index 0924e9e41a60..cbe77fa105ba 100644 --- a/drivers/block/rbd_types.h +++ b/drivers/block/rbd_types.h @@ -15,15 +15,30 @@ #include <linux/types.h> +/* For format version 2, rbd image 'foo' consists of objects + * rbd_id.foo - id of image + * rbd_header.<id> - image metadata + * rbd_data.<id>.0000000000000000 + * rbd_data.<id>.0000000000000001 + * ... - data + * Clients do not access header data directly in rbd format 2. + */ + +#define RBD_HEADER_PREFIX "rbd_header." +#define RBD_DATA_PREFIX "rbd_data." +#define RBD_ID_PREFIX "rbd_id." + /* - * rbd image 'foo' consists of objects - * foo.rbd - image metadata - * foo.00000000 - * foo.00000001 - * ... - data + * For format version 1, rbd image 'foo' consists of objects + * foo.rbd - image metadata + * rb.<idhi>.<idlo>.00000000 + * rb.<idhi>.<idlo>.00000001 + * ... - data + * There is no notion of a persistent image id in rbd format 1. */ #define RBD_SUFFIX ".rbd" + #define RBD_DIRECTORY "rbd_directory" #define RBD_INFO "rbd_info" @@ -47,7 +62,7 @@ struct rbd_image_snap_ondisk { struct rbd_image_header_ondisk { char text[40]; - char block_name[24]; + char object_prefix[24]; char signature[4]; char version[8]; struct { diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index c0bbeb470754..0bdde8fba397 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -14,6 +14,9 @@ #define PART_BITS 4 +static bool use_bio; +module_param(use_bio, bool, S_IRUGO); + static int major; static DEFINE_IDA(vd_index_ida); @@ -23,6 +26,7 @@ struct virtio_blk { struct virtio_device *vdev; struct virtqueue *vq; + wait_queue_head_t queue_wait; /* The disk structure for the kernel. */ struct gendisk *disk; @@ -51,53 +55,244 @@ struct virtio_blk struct virtblk_req { struct request *req; + struct bio *bio; struct virtio_blk_outhdr out_hdr; struct virtio_scsi_inhdr in_hdr; + struct work_struct work; + struct virtio_blk *vblk; + int flags; u8 status; + struct scatterlist sg[]; +}; + +enum { + VBLK_IS_FLUSH = 1, + VBLK_REQ_FLUSH = 2, + VBLK_REQ_DATA = 4, + VBLK_REQ_FUA = 8, }; -static void blk_done(struct virtqueue *vq) +static inline int virtblk_result(struct virtblk_req *vbr) +{ + switch (vbr->status) { + case VIRTIO_BLK_S_OK: + return 0; + case VIRTIO_BLK_S_UNSUPP: + return -ENOTTY; + default: + return -EIO; + } +} + +static inline struct virtblk_req *virtblk_alloc_req(struct virtio_blk *vblk, + gfp_t gfp_mask) { - struct virtio_blk *vblk = vq->vdev->priv; struct virtblk_req *vbr; - unsigned int len; - unsigned long flags; - spin_lock_irqsave(vblk->disk->queue->queue_lock, flags); - while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) { - int error; + vbr = mempool_alloc(vblk->pool, gfp_mask); + if (!vbr) + return NULL; - switch (vbr->status) { - case VIRTIO_BLK_S_OK: - error = 0; - break; - case VIRTIO_BLK_S_UNSUPP: - error = -ENOTTY; - break; - default: - error = -EIO; + vbr->vblk = vblk; + if (use_bio) + sg_init_table(vbr->sg, vblk->sg_elems); + + return vbr; +} + +static void virtblk_add_buf_wait(struct virtio_blk *vblk, + struct virtblk_req *vbr, + unsigned long out, + unsigned long in) +{ + DEFINE_WAIT(wait); + + for (;;) { + prepare_to_wait_exclusive(&vblk->queue_wait, &wait, + TASK_UNINTERRUPTIBLE); + + spin_lock_irq(vblk->disk->queue->queue_lock); + if (virtqueue_add_buf(vblk->vq, vbr->sg, out, in, vbr, + GFP_ATOMIC) < 0) { + spin_unlock_irq(vblk->disk->queue->queue_lock); + io_schedule(); + } else { + virtqueue_kick(vblk->vq); + spin_unlock_irq(vblk->disk->queue->queue_lock); break; } - switch (vbr->req->cmd_type) { - case REQ_TYPE_BLOCK_PC: - vbr->req->resid_len = vbr->in_hdr.residual; - vbr->req->sense_len = vbr->in_hdr.sense_len; - vbr->req->errors = vbr->in_hdr.errors; - break; - case REQ_TYPE_SPECIAL: - vbr->req->errors = (error != 0); - break; - default: - break; + } + + finish_wait(&vblk->queue_wait, &wait); +} + +static inline void virtblk_add_req(struct virtblk_req *vbr, + unsigned int out, unsigned int in) +{ + struct virtio_blk *vblk = vbr->vblk; + + spin_lock_irq(vblk->disk->queue->queue_lock); + if (unlikely(virtqueue_add_buf(vblk->vq, vbr->sg, out, in, vbr, + GFP_ATOMIC) < 0)) { + spin_unlock_irq(vblk->disk->queue->queue_lock); + virtblk_add_buf_wait(vblk, vbr, out, in); + return; + } + virtqueue_kick(vblk->vq); + spin_unlock_irq(vblk->disk->queue->queue_lock); +} + +static int virtblk_bio_send_flush(struct virtblk_req *vbr) +{ + unsigned int out = 0, in = 0; + + vbr->flags |= VBLK_IS_FLUSH; + vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; + vbr->out_hdr.sector = 0; + vbr->out_hdr.ioprio = 0; + sg_set_buf(&vbr->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr)); + sg_set_buf(&vbr->sg[out + in++], &vbr->status, sizeof(vbr->status)); + + virtblk_add_req(vbr, out, in); + + return 0; +} + +static int virtblk_bio_send_data(struct virtblk_req *vbr) +{ + struct virtio_blk *vblk = vbr->vblk; + unsigned int num, out = 0, in = 0; + struct bio *bio = vbr->bio; + + vbr->flags &= ~VBLK_IS_FLUSH; + vbr->out_hdr.type = 0; + vbr->out_hdr.sector = bio->bi_sector; + vbr->out_hdr.ioprio = bio_prio(bio); + + sg_set_buf(&vbr->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr)); + + num = blk_bio_map_sg(vblk->disk->queue, bio, vbr->sg + out); + + sg_set_buf(&vbr->sg[num + out + in++], &vbr->status, + sizeof(vbr->status)); + + if (num) { + if (bio->bi_rw & REQ_WRITE) { + vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; + out += num; + } else { + vbr->out_hdr.type |= VIRTIO_BLK_T_IN; + in += num; } + } + + virtblk_add_req(vbr, out, in); + + return 0; +} + +static void virtblk_bio_send_data_work(struct work_struct *work) +{ + struct virtblk_req *vbr; + + vbr = container_of(work, struct virtblk_req, work); + + virtblk_bio_send_data(vbr); +} + +static void virtblk_bio_send_flush_work(struct work_struct *work) +{ + struct virtblk_req *vbr; + + vbr = container_of(work, struct virtblk_req, work); + + virtblk_bio_send_flush(vbr); +} + +static inline void virtblk_request_done(struct virtblk_req *vbr) +{ + struct virtio_blk *vblk = vbr->vblk; + struct request *req = vbr->req; + int error = virtblk_result(vbr); + + if (req->cmd_type == REQ_TYPE_BLOCK_PC) { + req->resid_len = vbr->in_hdr.residual; + req->sense_len = vbr->in_hdr.sense_len; + req->errors = vbr->in_hdr.errors; + } else if (req->cmd_type == REQ_TYPE_SPECIAL) { + req->errors = (error != 0); + } + + __blk_end_request_all(req, error); + mempool_free(vbr, vblk->pool); +} + +static inline void virtblk_bio_flush_done(struct virtblk_req *vbr) +{ + struct virtio_blk *vblk = vbr->vblk; + + if (vbr->flags & VBLK_REQ_DATA) { + /* Send out the actual write data */ + INIT_WORK(&vbr->work, virtblk_bio_send_data_work); + queue_work(virtblk_wq, &vbr->work); + } else { + bio_endio(vbr->bio, virtblk_result(vbr)); + mempool_free(vbr, vblk->pool); + } +} + +static inline void virtblk_bio_data_done(struct virtblk_req *vbr) +{ + struct virtio_blk *vblk = vbr->vblk; - __blk_end_request_all(vbr->req, error); + if (unlikely(vbr->flags & VBLK_REQ_FUA)) { + /* Send out a flush before end the bio */ + vbr->flags &= ~VBLK_REQ_DATA; + INIT_WORK(&vbr->work, virtblk_bio_send_flush_work); + queue_work(virtblk_wq, &vbr->work); + } else { + bio_endio(vbr->bio, virtblk_result(vbr)); mempool_free(vbr, vblk->pool); } +} + +static inline void virtblk_bio_done(struct virtblk_req *vbr) +{ + if (unlikely(vbr->flags & VBLK_IS_FLUSH)) + virtblk_bio_flush_done(vbr); + else + virtblk_bio_data_done(vbr); +} + +static void virtblk_done(struct virtqueue *vq) +{ + struct virtio_blk *vblk = vq->vdev->priv; + bool bio_done = false, req_done = false; + struct virtblk_req *vbr; + unsigned long flags; + unsigned int len; + + spin_lock_irqsave(vblk->disk->queue->queue_lock, flags); + do { + virtqueue_disable_cb(vq); + while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) { + if (vbr->bio) { + virtblk_bio_done(vbr); + bio_done = true; + } else { + virtblk_request_done(vbr); + req_done = true; + } + } + } while (!virtqueue_enable_cb(vq)); /* In case queue is stopped waiting for more buffers. */ - blk_start_queue(vblk->disk->queue); + if (req_done) + blk_start_queue(vblk->disk->queue); spin_unlock_irqrestore(vblk->disk->queue->queue_lock, flags); + + if (bio_done) + wake_up(&vblk->queue_wait); } static bool do_req(struct request_queue *q, struct virtio_blk *vblk, @@ -106,13 +301,13 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, unsigned long num, out = 0, in = 0; struct virtblk_req *vbr; - vbr = mempool_alloc(vblk->pool, GFP_ATOMIC); + vbr = virtblk_alloc_req(vblk, GFP_ATOMIC); if (!vbr) /* When another request finishes we'll try again. */ return false; vbr->req = req; - + vbr->bio = NULL; if (req->cmd_flags & REQ_FLUSH) { vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; vbr->out_hdr.sector = 0; @@ -172,7 +367,8 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, } } - if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr, GFP_ATOMIC)<0) { + if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr, + GFP_ATOMIC) < 0) { mempool_free(vbr, vblk->pool); return false; } @@ -180,7 +376,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk, return true; } -static void do_virtblk_request(struct request_queue *q) +static void virtblk_request(struct request_queue *q) { struct virtio_blk *vblk = q->queuedata; struct request *req; @@ -203,6 +399,34 @@ static void do_virtblk_request(struct request_queue *q) virtqueue_kick(vblk->vq); } +static void virtblk_make_request(struct request_queue *q, struct bio *bio) +{ + struct virtio_blk *vblk = q->queuedata; + struct virtblk_req *vbr; + + BUG_ON(bio->bi_phys_segments + 2 > vblk->sg_elems); + + vbr = virtblk_alloc_req(vblk, GFP_NOIO); + if (!vbr) { + bio_endio(bio, -ENOMEM); + return; + } + + vbr->bio = bio; + vbr->flags = 0; + if (bio->bi_rw & REQ_FLUSH) + vbr->flags |= VBLK_REQ_FLUSH; + if (bio->bi_rw & REQ_FUA) + vbr->flags |= VBLK_REQ_FUA; + if (bio->bi_size) + vbr->flags |= VBLK_REQ_DATA; + + if (unlikely(vbr->flags & VBLK_REQ_FLUSH)) + virtblk_bio_send_flush(vbr); + else + virtblk_bio_send_data(vbr); +} + /* return id (s/n) string for *disk to *id_str */ static int virtblk_get_id(struct gendisk *disk, char *id_str) @@ -360,7 +584,7 @@ static int init_vq(struct virtio_blk *vblk) int err = 0; /* We expect one virtqueue, for output. */ - vblk->vq = virtio_find_single_vq(vblk->vdev, blk_done, "requests"); + vblk->vq = virtio_find_single_vq(vblk->vdev, virtblk_done, "requests"); if (IS_ERR(vblk->vq)) err = PTR_ERR(vblk->vq); @@ -477,6 +701,8 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) struct virtio_blk *vblk; struct request_queue *q; int err, index; + int pool_size; + u64 cap; u32 v, blk_size, sg_elems, opt_io_size; u16 min_io_size; @@ -506,10 +732,12 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) goto out_free_index; } + init_waitqueue_head(&vblk->queue_wait); vblk->vdev = vdev; vblk->sg_elems = sg_elems; sg_init_table(vblk->sg, vblk->sg_elems); mutex_init(&vblk->config_lock); + INIT_WORK(&vblk->config_work, virtblk_config_changed_work); vblk->config_enable = true; @@ -517,7 +745,10 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) if (err) goto out_free_vblk; - vblk->pool = mempool_create_kmalloc_pool(1,sizeof(struct virtblk_req)); + pool_size = sizeof(struct virtblk_req); + if (use_bio) + pool_size += sizeof(struct scatterlist) * sg_elems; + vblk->pool = mempool_create_kmalloc_pool(1, pool_size); if (!vblk->pool) { err = -ENOMEM; goto out_free_vq; @@ -530,12 +761,14 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) goto out_mempool; } - q = vblk->disk->queue = blk_init_queue(do_virtblk_request, NULL); + q = vblk->disk->queue = blk_init_queue(virtblk_request, NULL); if (!q) { err = -ENOMEM; goto out_put_disk; } + if (use_bio) + blk_queue_make_request(q, virtblk_make_request); q->queuedata = vblk; virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); @@ -620,7 +853,6 @@ static int __devinit virtblk_probe(struct virtio_device *vdev) if (!err && opt_io_size) blk_queue_io_opt(q, blk_size * opt_io_size); - add_disk(vblk->disk); err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial); if (err) diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c index 4fbdceb6f773..a5effd813abd 100644 --- a/drivers/char/hw_random/omap-rng.c +++ b/drivers/char/hw_random/omap-rng.c @@ -18,11 +18,12 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/random.h> -#include <linux/clk.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/hw_random.h> #include <linux/delay.h> +#include <linux/slab.h> +#include <linux/pm_runtime.h> #include <asm/io.h> @@ -46,26 +47,36 @@ #define RNG_SYSSTATUS 0x44 /* System status [0] = RESETDONE */ -static void __iomem *rng_base; -static struct clk *rng_ick; -static struct platform_device *rng_dev; +/** + * struct omap_rng_private_data - RNG IP block-specific data + * @base: virtual address of the beginning of the RNG IP block registers + * @mem_res: struct resource * for the IP block registers physical memory + */ +struct omap_rng_private_data { + void __iomem *base; + struct resource *mem_res; +}; -static inline u32 omap_rng_read_reg(int reg) +static inline u32 omap_rng_read_reg(struct omap_rng_private_data *priv, int reg) { - return __raw_readl(rng_base + reg); + return __raw_readl(priv->base + reg); } -static inline void omap_rng_write_reg(int reg, u32 val) +static inline void omap_rng_write_reg(struct omap_rng_private_data *priv, + int reg, u32 val) { - __raw_writel(val, rng_base + reg); + __raw_writel(val, priv->base + reg); } static int omap_rng_data_present(struct hwrng *rng, int wait) { + struct omap_rng_private_data *priv; int data, i; + priv = (struct omap_rng_private_data *)rng->priv; + for (i = 0; i < 20; i++) { - data = omap_rng_read_reg(RNG_STAT_REG) ? 0 : 1; + data = omap_rng_read_reg(priv, RNG_STAT_REG) ? 0 : 1; if (data || !wait) break; /* RNG produces data fast enough (2+ MBit/sec, even @@ -80,9 +91,13 @@ static int omap_rng_data_present(struct hwrng *rng, int wait) static int omap_rng_data_read(struct hwrng *rng, u32 *data) { - *data = omap_rng_read_reg(RNG_OUT_REG); + struct omap_rng_private_data *priv; + + priv = (struct omap_rng_private_data *)rng->priv; + + *data = omap_rng_read_reg(priv, RNG_OUT_REG); - return 4; + return sizeof(u32); } static struct hwrng omap_rng_ops = { @@ -93,69 +108,68 @@ static struct hwrng omap_rng_ops = { static int __devinit omap_rng_probe(struct platform_device *pdev) { - struct resource *res; + struct omap_rng_private_data *priv; int ret; - /* - * A bit ugly, and it will never actually happen but there can - * be only one RNG and this catches any bork - */ - if (rng_dev) - return -EBUSY; - - if (cpu_is_omap24xx()) { - rng_ick = clk_get(&pdev->dev, "ick"); - if (IS_ERR(rng_ick)) { - dev_err(&pdev->dev, "Could not get rng_ick\n"); - ret = PTR_ERR(rng_ick); - return ret; - } else - clk_enable(rng_ick); - } + priv = kzalloc(sizeof(struct omap_rng_private_data), GFP_KERNEL); + if (!priv) { + dev_err(&pdev->dev, "could not allocate memory\n"); + return -ENOMEM; + }; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + omap_rng_ops.priv = (unsigned long)priv; + dev_set_drvdata(&pdev->dev, priv); - rng_base = devm_request_and_ioremap(&pdev->dev, res); - if (!rng_base) { + priv->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!priv->mem_res) { + ret = -ENOENT; + goto err_ioremap; + } + + priv->base = devm_request_and_ioremap(&pdev->dev, priv->mem_res); + if (!priv->base) { ret = -ENOMEM; goto err_ioremap; } - dev_set_drvdata(&pdev->dev, res); + dev_set_drvdata(&pdev->dev, priv); + + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); ret = hwrng_register(&omap_rng_ops); if (ret) goto err_register; dev_info(&pdev->dev, "OMAP Random Number Generator ver. %02x\n", - omap_rng_read_reg(RNG_REV_REG)); - omap_rng_write_reg(RNG_MASK_REG, 0x1); + omap_rng_read_reg(priv, RNG_REV_REG)); - rng_dev = pdev; + omap_rng_write_reg(priv, RNG_MASK_REG, 0x1); return 0; err_register: - rng_base = NULL; + priv->base = NULL; + pm_runtime_disable(&pdev->dev); err_ioremap: - if (cpu_is_omap24xx()) { - clk_disable(rng_ick); - clk_put(rng_ick); - } + kfree(priv); + return ret; } static int __exit omap_rng_remove(struct platform_device *pdev) { + struct omap_rng_private_data *priv = dev_get_drvdata(&pdev->dev); + hwrng_unregister(&omap_rng_ops); - omap_rng_write_reg(RNG_MASK_REG, 0x0); + omap_rng_write_reg(priv, RNG_MASK_REG, 0x0); - if (cpu_is_omap24xx()) { - clk_disable(rng_ick); - clk_put(rng_ick); - } + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + release_mem_region(priv->mem_res->start, resource_size(priv->mem_res)); - rng_base = NULL; + kfree(priv); return 0; } @@ -164,13 +178,21 @@ static int __exit omap_rng_remove(struct platform_device *pdev) static int omap_rng_suspend(struct device *dev) { - omap_rng_write_reg(RNG_MASK_REG, 0x0); + struct omap_rng_private_data *priv = dev_get_drvdata(dev); + + omap_rng_write_reg(priv, RNG_MASK_REG, 0x0); + pm_runtime_put_sync(dev); + return 0; } static int omap_rng_resume(struct device *dev) { - omap_rng_write_reg(RNG_MASK_REG, 0x1); + struct omap_rng_private_data *priv = dev_get_drvdata(dev); + + pm_runtime_get_sync(dev); + omap_rng_write_reg(priv, RNG_MASK_REG, 0x1); + return 0; } @@ -198,9 +220,6 @@ static struct platform_driver omap_rng_driver = { static int __init omap_rng_init(void) { - if (!cpu_is_omap16xx() && !cpu_is_omap24xx()) - return -ENODEV; - return platform_driver_register(&omap_rng_driver); } diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 060a672ebb7b..8ab9c3d4bf13 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -24,6 +24,8 @@ #include <linux/err.h> #include <linux/freezer.h> #include <linux/fs.h> +#include <linux/splice.h> +#include <linux/pagemap.h> #include <linux/init.h> #include <linux/list.h> #include <linux/poll.h> @@ -474,26 +476,53 @@ static ssize_t send_control_msg(struct port *port, unsigned int event, return 0; } +struct buffer_token { + union { + void *buf; + struct scatterlist *sg; + } u; + /* If sgpages == 0 then buf is used, else sg is used */ + unsigned int sgpages; +}; + +static void reclaim_sg_pages(struct scatterlist *sg, unsigned int nrpages) +{ + int i; + struct page *page; + + for (i = 0; i < nrpages; i++) { + page = sg_page(&sg[i]); + if (!page) + break; + put_page(page); + } + kfree(sg); +} + /* Callers must take the port->outvq_lock */ static void reclaim_consumed_buffers(struct port *port) { - void *buf; + struct buffer_token *tok; unsigned int len; if (!port->portdev) { /* Device has been unplugged. vqs are already gone. */ return; } - while ((buf = virtqueue_get_buf(port->out_vq, &len))) { - kfree(buf); + while ((tok = virtqueue_get_buf(port->out_vq, &len))) { + if (tok->sgpages) + reclaim_sg_pages(tok->u.sg, tok->sgpages); + else + kfree(tok->u.buf); + kfree(tok); port->outvq_full = false; } } -static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count, - bool nonblock) +static ssize_t __send_to_port(struct port *port, struct scatterlist *sg, + int nents, size_t in_count, + struct buffer_token *tok, bool nonblock) { - struct scatterlist sg[1]; struct virtqueue *out_vq; ssize_t ret; unsigned long flags; @@ -505,8 +534,7 @@ static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count, reclaim_consumed_buffers(port); - sg_init_one(sg, in_buf, in_count); - ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf, GFP_ATOMIC); + ret = virtqueue_add_buf(out_vq, sg, nents, 0, tok, GFP_ATOMIC); /* Tell Host to go! */ virtqueue_kick(out_vq); @@ -544,6 +572,37 @@ done: return in_count; } +static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count, + bool nonblock) +{ + struct scatterlist sg[1]; + struct buffer_token *tok; + + tok = kmalloc(sizeof(*tok), GFP_ATOMIC); + if (!tok) + return -ENOMEM; + tok->sgpages = 0; + tok->u.buf = in_buf; + + sg_init_one(sg, in_buf, in_count); + + return __send_to_port(port, sg, 1, in_count, tok, nonblock); +} + +static ssize_t send_pages(struct port *port, struct scatterlist *sg, int nents, + size_t in_count, bool nonblock) +{ + struct buffer_token *tok; + + tok = kmalloc(sizeof(*tok), GFP_ATOMIC); + if (!tok) + return -ENOMEM; + tok->sgpages = nents; + tok->u.sg = sg; + + return __send_to_port(port, sg, nents, in_count, tok, nonblock); +} + /* * Give out the data that's requested from the buffer that we have * queued up. @@ -665,6 +724,26 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf, return fill_readbuf(port, ubuf, count, true); } +static int wait_port_writable(struct port *port, bool nonblock) +{ + int ret; + + if (will_write_block(port)) { + if (nonblock) + return -EAGAIN; + + ret = wait_event_freezable(port->waitqueue, + !will_write_block(port)); + if (ret < 0) + return ret; + } + /* Port got hot-unplugged. */ + if (!port->guest_connected) + return -ENODEV; + + return 0; +} + static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, size_t count, loff_t *offp) { @@ -681,18 +760,9 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf, nonblock = filp->f_flags & O_NONBLOCK; - if (will_write_block(port)) { - if (nonblock) - return -EAGAIN; - - ret = wait_event_freezable(port->waitqueue, - !will_write_block(port)); - if (ret < 0) - return ret; - } - /* Port got hot-unplugged. */ - if (!port->guest_connected) - return -ENODEV; + ret = wait_port_writable(port, nonblock); + if (ret < 0) + return ret; count = min((size_t)(32 * 1024), count); @@ -725,6 +795,93 @@ out: return ret; } +struct sg_list { + unsigned int n; + unsigned int size; + size_t len; + struct scatterlist *sg; +}; + +static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf, + struct splice_desc *sd) +{ + struct sg_list *sgl = sd->u.data; + unsigned int offset, len; + + if (sgl->n == sgl->size) + return 0; + + /* Try lock this page */ + if (buf->ops->steal(pipe, buf) == 0) { + /* Get reference and unlock page for moving */ + get_page(buf->page); + unlock_page(buf->page); + + len = min(buf->len, sd->len); + sg_set_page(&(sgl->sg[sgl->n]), buf->page, len, buf->offset); + } else { + /* Failback to copying a page */ + struct page *page = alloc_page(GFP_KERNEL); + char *src = buf->ops->map(pipe, buf, 1); + char *dst; + + if (!page) + return -ENOMEM; + dst = kmap(page); + + offset = sd->pos & ~PAGE_MASK; + + len = sd->len; + if (len + offset > PAGE_SIZE) + len = PAGE_SIZE - offset; + + memcpy(dst + offset, src + buf->offset, len); + + kunmap(page); + buf->ops->unmap(pipe, buf, src); + + sg_set_page(&(sgl->sg[sgl->n]), page, len, offset); + } + sgl->n++; + sgl->len += len; + + return len; +} + +/* Faster zero-copy write by splicing */ +static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe, + struct file *filp, loff_t *ppos, + size_t len, unsigned int flags) +{ + struct port *port = filp->private_data; + struct sg_list sgl; + ssize_t ret; + struct splice_desc sd = { + .total_len = len, + .flags = flags, + .pos = *ppos, + .u.data = &sgl, + }; + + ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK); + if (ret < 0) + return ret; + + sgl.n = 0; + sgl.len = 0; + sgl.size = pipe->nrbufs; + sgl.sg = kmalloc(sizeof(struct scatterlist) * sgl.size, GFP_KERNEL); + if (unlikely(!sgl.sg)) + return -ENOMEM; + + sg_init_table(sgl.sg, sgl.size); + ret = __splice_from_pipe(pipe, &sd, pipe_to_sg); + if (likely(ret > 0)) + ret = send_pages(port, sgl.sg, sgl.n, sgl.len, true); + + return ret; +} + static unsigned int port_fops_poll(struct file *filp, poll_table *wait) { struct port *port; @@ -856,6 +1013,7 @@ static const struct file_operations port_fops = { .open = port_fops_open, .read = port_fops_read, .write = port_fops_write, + .splice_write = port_fops_splice_write, .poll = port_fops_poll, .release = port_fops_release, .fasync = port_fops_fasync, diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index 21c1a87032b7..24ccae453e79 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c @@ -19,6 +19,9 @@ #include <linux/clk.h> #include <crypto/internal/hash.h> #include <crypto/sha.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/of_irq.h> #include "mv_cesa.h" @@ -1062,7 +1065,10 @@ static int mv_probe(struct platform_device *pdev) goto err_unmap_reg; } - irq = platform_get_irq(pdev, 0); + if (pdev->dev.of_node) + irq = irq_of_parse_and_map(pdev->dev.of_node, 0); + else + irq = platform_get_irq(pdev, 0); if (irq < 0 || irq == NO_IRQ) { ret = irq; goto err_unmap_sram; @@ -1170,12 +1176,19 @@ static int mv_remove(struct platform_device *pdev) return 0; } +static const struct of_device_id mv_cesa_of_match_table[] = { + { .compatible = "marvell,orion-crypto", }, + {} +}; +MODULE_DEVICE_TABLE(of, mv_cesa_of_match_table); + static struct platform_driver marvell_crypto = { .probe = mv_probe, - .remove = mv_remove, + .remove = __devexit_p(mv_remove), .driver = { .owner = THIS_MODULE, .name = "mv_crypto", + .of_match_table = of_match_ptr(mv_cesa_of_match_table), }, }; MODULE_ALIAS("platform:mv_crypto"); diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index aa73ef3233b8..d055cee36942 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -150,6 +150,12 @@ config GPIO_MSM_V2 Qualcomm MSM chips. Most of the pins on the MSM can be selected for GPIO, and are controlled by this driver. +config GPIO_MVEBU + def_bool y + depends on ARCH_MVEBU + select GPIO_GENERIC + select GENERIC_IRQ_CHIP + config GPIO_MXC def_bool y depends on ARCH_MXC diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index b2c109d1303d..9aeed6707326 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -42,6 +42,7 @@ obj-$(CONFIG_GPIO_MPC8XXX) += gpio-mpc8xxx.o obj-$(CONFIG_GPIO_MSIC) += gpio-msic.o obj-$(CONFIG_GPIO_MSM_V1) += gpio-msm-v1.o obj-$(CONFIG_GPIO_MSM_V2) += gpio-msm-v2.o +obj-$(CONFIG_GPIO_MVEBU) += gpio-mvebu.o obj-$(CONFIG_GPIO_MXC) += gpio-mxc.o obj-$(CONFIG_GPIO_MXS) += gpio-mxs.o obj-$(CONFIG_ARCH_OMAP) += gpio-omap.o diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c new file mode 100644 index 000000000000..902af437eaf2 --- /dev/null +++ b/drivers/gpio/gpio-mvebu.c @@ -0,0 +1,679 @@ +/* + * GPIO driver for Marvell SoCs + * + * Copyright (C) 2012 Marvell + * + * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> + * Andrew Lunn <andrew@lunn.ch> + * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + * + * This driver is a fairly straightforward GPIO driver for the + * complete family of Marvell EBU SoC platforms (Orion, Dove, + * Kirkwood, Discovery, Armada 370/XP). The only complexity of this + * driver is the different register layout that exists between the + * non-SMP platforms (Orion, Dove, Kirkwood, Armada 370) and the SMP + * platforms (MV78200 from the Discovery family and the Armada + * XP). Therefore, this driver handles three variants of the GPIO + * block: + * - the basic variant, called "orion-gpio", with the simplest + * register set. Used on Orion, Dove, Kirkwoord, Armada 370 and + * non-SMP Discovery systems + * - the mv78200 variant for MV78200 Discovery systems. This variant + * turns the edge mask and level mask registers into CPU0 edge + * mask/level mask registers, and adds CPU1 edge mask/level mask + * registers. + * - the armadaxp variant for Armada XP systems. This variant keeps + * the normal cause/edge mask/level mask registers when the global + * interrupts are used, but adds per-CPU cause/edge mask/level mask + * registers n a separate memory area for the per-CPU GPIO + * interrupts. + */ + +#include <linux/module.h> +#include <linux/gpio.h> +#include <linux/irq.h> +#include <linux/slab.h> +#include <linux/irqdomain.h> +#include <linux/io.h> +#include <linux/of_irq.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pinctrl/consumer.h> + +/* + * GPIO unit register offsets. + */ +#define GPIO_OUT_OFF 0x0000 +#define GPIO_IO_CONF_OFF 0x0004 +#define GPIO_BLINK_EN_OFF 0x0008 +#define GPIO_IN_POL_OFF 0x000c +#define GPIO_DATA_IN_OFF 0x0010 +#define GPIO_EDGE_CAUSE_OFF 0x0014 +#define GPIO_EDGE_MASK_OFF 0x0018 +#define GPIO_LEVEL_MASK_OFF 0x001c + +/* The MV78200 has per-CPU registers for edge mask and level mask */ +#define GPIO_EDGE_MASK_MV78200_OFF(cpu) ((cpu) ? 0x30 : 0x18) +#define GPIO_LEVEL_MASK_MV78200_OFF(cpu) ((cpu) ? 0x34 : 0x1C) + +/* The Armada XP has per-CPU registers for interrupt cause, interrupt + * mask and interrupt level mask. Those are relative to the + * percpu_membase. */ +#define GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu) ((cpu) * 0x4) +#define GPIO_EDGE_MASK_ARMADAXP_OFF(cpu) (0x10 + (cpu) * 0x4) +#define GPIO_LEVEL_MASK_ARMADAXP_OFF(cpu) (0x20 + (cpu) * 0x4) + +#define MVEBU_GPIO_SOC_VARIANT_ORION 0x1 +#define MVEBU_GPIO_SOC_VARIANT_MV78200 0x2 +#define MVEBU_GPIO_SOC_VARIANT_ARMADAXP 0x3 + +#define MVEBU_MAX_GPIO_PER_BANK 32 + +struct mvebu_gpio_chip { + struct gpio_chip chip; + spinlock_t lock; + void __iomem *membase; + void __iomem *percpu_membase; + unsigned int irqbase; + struct irq_domain *domain; + int soc_variant; +}; + +/* + * Functions returning addresses of individual registers for a given + * GPIO controller. + */ +static inline void __iomem *mvebu_gpioreg_out(struct mvebu_gpio_chip *mvchip) +{ + return mvchip->membase + GPIO_OUT_OFF; +} + +static inline void __iomem *mvebu_gpioreg_io_conf(struct mvebu_gpio_chip *mvchip) +{ + return mvchip->membase + GPIO_IO_CONF_OFF; +} + +static inline void __iomem *mvebu_gpioreg_in_pol(struct mvebu_gpio_chip *mvchip) +{ + return mvchip->membase + GPIO_IN_POL_OFF; +} + +static inline void __iomem *mvebu_gpioreg_data_in(struct mvebu_gpio_chip *mvchip) +{ + return mvchip->membase + GPIO_DATA_IN_OFF; +} + +static inline void __iomem *mvebu_gpioreg_edge_cause(struct mvebu_gpio_chip *mvchip) +{ + int cpu; + + switch(mvchip->soc_variant) { + case MVEBU_GPIO_SOC_VARIANT_ORION: + case MVEBU_GPIO_SOC_VARIANT_MV78200: + return mvchip->membase + GPIO_EDGE_CAUSE_OFF; + case MVEBU_GPIO_SOC_VARIANT_ARMADAXP: + cpu = smp_processor_id(); + return mvchip->percpu_membase + GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu); + default: + BUG(); + } +} + +static inline void __iomem *mvebu_gpioreg_edge_mask(struct mvebu_gpio_chip *mvchip) +{ + int cpu; + + switch(mvchip->soc_variant) { + case MVEBU_GPIO_SOC_VARIANT_ORION: + return mvchip->membase + GPIO_EDGE_MASK_OFF; + case MVEBU_GPIO_SOC_VARIANT_MV78200: + cpu = smp_processor_id(); + return mvchip->membase + GPIO_EDGE_MASK_MV78200_OFF(cpu); + case MVEBU_GPIO_SOC_VARIANT_ARMADAXP: + cpu = smp_processor_id(); + return mvchip->percpu_membase + GPIO_EDGE_MASK_ARMADAXP_OFF(cpu); + default: + BUG(); + } +} + +static void __iomem *mvebu_gpioreg_level_mask(struct mvebu_gpio_chip *mvchip) +{ + int cpu; + + switch(mvchip->soc_variant) { + case MVEBU_GPIO_SOC_VARIANT_ORION: + return mvchip->membase + GPIO_LEVEL_MASK_OFF; + case MVEBU_GPIO_SOC_VARIANT_MV78200: + cpu = smp_processor_id(); + return mvchip->membase + GPIO_LEVEL_MASK_MV78200_OFF(cpu); + case MVEBU_GPIO_SOC_VARIANT_ARMADAXP: + cpu = smp_processor_id(); + return mvchip->percpu_membase + GPIO_LEVEL_MASK_ARMADAXP_OFF(cpu); + default: + BUG(); + } +} + +/* + * Functions implementing the gpio_chip methods + */ + +int mvebu_gpio_request(struct gpio_chip *chip, unsigned pin) +{ + return pinctrl_request_gpio(chip->base + pin); +} + +void mvebu_gpio_free(struct gpio_chip *chip, unsigned pin) +{ + pinctrl_free_gpio(chip->base + pin); +} + +static void mvebu_gpio_set(struct gpio_chip *chip, unsigned pin, int value) +{ + struct mvebu_gpio_chip *mvchip = + container_of(chip, struct mvebu_gpio_chip, chip); + unsigned long flags; + u32 u; + + spin_lock_irqsave(&mvchip->lock, flags); + u = readl_relaxed(mvebu_gpioreg_out(mvchip)); + if (value) + u |= 1 << pin; + else + u &= ~(1 << pin); + writel_relaxed(u, mvebu_gpioreg_out(mvchip)); + spin_unlock_irqrestore(&mvchip->lock, flags); +} + +static int mvebu_gpio_get(struct gpio_chip *chip, unsigned pin) +{ + struct mvebu_gpio_chip *mvchip = + container_of(chip, struct mvebu_gpio_chip, chip); + u32 u; + + if (readl_relaxed(mvebu_gpioreg_io_conf(mvchip)) & (1 << pin)) { + u = readl_relaxed(mvebu_gpioreg_data_in(mvchip)) ^ + readl_relaxed(mvebu_gpioreg_in_pol(mvchip)); + } else { + u = readl_relaxed(mvebu_gpioreg_out(mvchip)); + } + + return (u >> pin) & 1; +} + +static int mvebu_gpio_direction_input(struct gpio_chip *chip, unsigned pin) +{ + struct mvebu_gpio_chip *mvchip = + container_of(chip, struct mvebu_gpio_chip, chip); + unsigned long flags; + int ret; + u32 u; + + /* Check with the pinctrl driver whether this pin is usable as + * an input GPIO */ + ret = pinctrl_gpio_direction_input(chip->base + pin); + if (ret) + return ret; + + spin_lock_irqsave(&mvchip->lock, flags); + u = readl_relaxed(mvebu_gpioreg_io_conf(mvchip)); + u |= 1 << pin; + writel_relaxed(u, mvebu_gpioreg_io_conf(mvchip)); + spin_unlock_irqrestore(&mvchip->lock, flags); + + return 0; +} + +static int mvebu_gpio_direction_output(struct gpio_chip *chip, unsigned pin, + int value) +{ + struct mvebu_gpio_chip *mvchip = + container_of(chip, struct mvebu_gpio_chip, chip); + unsigned long flags; + int ret; + u32 u; + + /* Check with the pinctrl driver whether this pin is usable as + * an output GPIO */ + ret = pinctrl_gpio_direction_output(chip->base + pin); + if (ret) + return ret; + + spin_lock_irqsave(&mvchip->lock, flags); + u = readl_relaxed(mvebu_gpioreg_io_conf(mvchip)); + u &= ~(1 << pin); + writel_relaxed(u, mvebu_gpioreg_io_conf(mvchip)); + spin_unlock_irqrestore(&mvchip->lock, flags); + + return 0; +} + +static int mvebu_gpio_to_irq(struct gpio_chip *chip, unsigned pin) +{ + struct mvebu_gpio_chip *mvchip = + container_of(chip, struct mvebu_gpio_chip, chip); + return irq_create_mapping(mvchip->domain, pin); +} + +/* + * Functions implementing the irq_chip methods + */ +static void mvebu_gpio_irq_ack(struct irq_data *d) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + struct mvebu_gpio_chip *mvchip = gc->private; + u32 mask = ~(1 << (d->irq - gc->irq_base)); + + irq_gc_lock(gc); + writel_relaxed(mask, mvebu_gpioreg_edge_cause(mvchip)); + irq_gc_unlock(gc); +} + +static void mvebu_gpio_edge_irq_mask(struct irq_data *d) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + struct mvebu_gpio_chip *mvchip = gc->private; + u32 mask = 1 << (d->irq - gc->irq_base); + + irq_gc_lock(gc); + gc->mask_cache &= ~mask; + writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip)); + irq_gc_unlock(gc); +} + +static void mvebu_gpio_edge_irq_unmask(struct irq_data *d) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + struct mvebu_gpio_chip *mvchip = gc->private; + u32 mask = 1 << (d->irq - gc->irq_base); + + irq_gc_lock(gc); + gc->mask_cache |= mask; + writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip)); + irq_gc_unlock(gc); +} + +static void mvebu_gpio_level_irq_mask(struct irq_data *d) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + struct mvebu_gpio_chip *mvchip = gc->private; + u32 mask = 1 << (d->irq - gc->irq_base); + + irq_gc_lock(gc); + gc->mask_cache &= ~mask; + writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip)); + irq_gc_unlock(gc); +} + +static void mvebu_gpio_level_irq_unmask(struct irq_data *d) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + struct mvebu_gpio_chip *mvchip = gc->private; + u32 mask = 1 << (d->irq - gc->irq_base); + + irq_gc_lock(gc); + gc->mask_cache |= mask; + writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip)); + irq_gc_unlock(gc); +} + +/***************************************************************************** + * MVEBU GPIO IRQ + * + * GPIO_IN_POL register controls whether GPIO_DATA_IN will hold the same + * value of the line or the opposite value. + * + * Level IRQ handlers: DATA_IN is used directly as cause register. + * Interrupt are masked by LEVEL_MASK registers. + * Edge IRQ handlers: Change in DATA_IN are latched in EDGE_CAUSE. + * Interrupt are masked by EDGE_MASK registers. + * Both-edge handlers: Similar to regular Edge handlers, but also swaps + * the polarity to catch the next line transaction. + * This is a race condition that might not perfectly + * work on some use cases. + * + * Every eight GPIO lines are grouped (OR'ed) before going up to main + * cause register. + * + * EDGE cause mask + * data-in /--------| |-----| |----\ + * -----| |----- ---- to main cause reg + * X \----------------| |----/ + * polarity LEVEL mask + * + ****************************************************************************/ + +static int mvebu_gpio_irq_set_type(struct irq_data *d, unsigned int type) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + struct irq_chip_type *ct = irq_data_get_chip_type(d); + struct mvebu_gpio_chip *mvchip = gc->private; + int pin; + u32 u; + + pin = d->hwirq; + + u = readl_relaxed(mvebu_gpioreg_io_conf(mvchip)) & (1 << pin); + if (!u) { + return -EINVAL; + } + + type &= IRQ_TYPE_SENSE_MASK; + if (type == IRQ_TYPE_NONE) + return -EINVAL; + + /* Check if we need to change chip and handler */ + if (!(ct->type & type)) + if (irq_setup_alt_chip(d, type)) + return -EINVAL; + + /* + * Configure interrupt polarity. + */ + switch(type) { + case IRQ_TYPE_EDGE_RISING: + case IRQ_TYPE_LEVEL_HIGH: + u = readl_relaxed(mvebu_gpioreg_in_pol(mvchip)); + u &= ~(1 << pin); + writel_relaxed(u, mvebu_gpioreg_in_pol(mvchip)); + case IRQ_TYPE_EDGE_FALLING: + case IRQ_TYPE_LEVEL_LOW: + u = readl_relaxed(mvebu_gpioreg_in_pol(mvchip)); + u |= 1 << pin; + writel_relaxed(u, mvebu_gpioreg_in_pol(mvchip)); + case IRQ_TYPE_EDGE_BOTH: { + u32 v; + + v = readl_relaxed(mvebu_gpioreg_in_pol(mvchip)) ^ + readl_relaxed(mvebu_gpioreg_data_in(mvchip)); + + /* + * set initial polarity based on current input level + */ + u = readl_relaxed(mvebu_gpioreg_in_pol(mvchip)); + if (v & (1 << pin)) + u |= 1 << pin; /* falling */ + else + u &= ~(1 << pin); /* rising */ + writel_relaxed(u, mvebu_gpioreg_in_pol(mvchip)); + } + } + return 0; +} + +static void mvebu_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) +{ + struct mvebu_gpio_chip *mvchip = irq_get_handler_data(irq); + u32 cause, type; + int i; + + if (mvchip == NULL) + return; + + cause = readl_relaxed(mvebu_gpioreg_data_in(mvchip)) & + readl_relaxed(mvebu_gpioreg_level_mask(mvchip)); + cause |= readl_relaxed(mvebu_gpioreg_edge_cause(mvchip)) & + readl_relaxed(mvebu_gpioreg_edge_mask(mvchip)); + + for (i = 0; i < mvchip->chip.ngpio; i++) { + int irq; + + irq = mvchip->irqbase + i; + + if (!(cause & (1 << i))) + continue; + + type = irqd_get_trigger_type(irq_get_irq_data(irq)); + if ((type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) { + /* Swap polarity (race with GPIO line) */ + u32 polarity; + + polarity = readl_relaxed(mvebu_gpioreg_in_pol(mvchip)); + polarity ^= 1 << i; + writel_relaxed(polarity, mvebu_gpioreg_in_pol(mvchip)); + } + generic_handle_irq(irq); + } +} + +static struct platform_device_id mvebu_gpio_ids[] = { + { + .name = "orion-gpio", + }, { + .name = "mv78200-gpio", + }, { + .name = "armadaxp-gpio", + }, { + /* sentinel */ + }, +}; +MODULE_DEVICE_TABLE(platform, mvebu_gpio_ids); + +static struct of_device_id mvebu_gpio_of_match[] __devinitdata = { + { + .compatible = "marvell,orion-gpio", + .data = (void*) MVEBU_GPIO_SOC_VARIANT_ORION, + }, + { + .compatible = "marvell,mv78200-gpio", + .data = (void*) MVEBU_GPIO_SOC_VARIANT_MV78200, + }, + { + .compatible = "marvell,armadaxp-gpio", + .data = (void*) MVEBU_GPIO_SOC_VARIANT_ARMADAXP, + }, + { + /* sentinel */ + }, +}; +MODULE_DEVICE_TABLE(of, mvebu_gpio_of_match); + +static int __devinit mvebu_gpio_probe(struct platform_device *pdev) +{ + struct mvebu_gpio_chip *mvchip; + const struct of_device_id *match; + struct device_node *np = pdev->dev.of_node; + struct resource *res; + struct irq_chip_generic *gc; + struct irq_chip_type *ct; + unsigned int ngpios; + int soc_variant; + int i, cpu, id; + + match = of_match_device(mvebu_gpio_of_match, &pdev->dev); + if (match) + soc_variant = (int) match->data; + else + soc_variant = MVEBU_GPIO_SOC_VARIANT_ORION; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (! res) { + dev_err(&pdev->dev, "Cannot get memory resource\n"); + return -ENODEV; + } + + mvchip = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_gpio_chip), GFP_KERNEL); + if (! mvchip){ + dev_err(&pdev->dev, "Cannot allocate memory\n"); + return -ENOMEM; + } + + if (of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpios)) { + dev_err(&pdev->dev, "Missing ngpios OF property\n"); + return -ENODEV; + } + + id = of_alias_get_id(pdev->dev.of_node, "gpio"); + if (id < 0) { + dev_err(&pdev->dev, "Couldn't get OF id\n"); + return id; + } + + mvchip->soc_variant = soc_variant; + mvchip->chip.label = dev_name(&pdev->dev); + mvchip->chip.dev = &pdev->dev; + mvchip->chip.request = mvebu_gpio_request; + mvchip->chip.direction_input = mvebu_gpio_direction_input; + mvchip->chip.get = mvebu_gpio_get; + mvchip->chip.direction_output = mvebu_gpio_direction_output; + mvchip->chip.set = mvebu_gpio_set; + mvchip->chip.to_irq = mvebu_gpio_to_irq; + mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK; + mvchip->chip.ngpio = ngpios; + mvchip->chip.can_sleep = 0; +#ifdef CONFIG_OF + mvchip->chip.of_node = np; +#endif + + spin_lock_init(&mvchip->lock); + mvchip->membase = devm_request_and_ioremap(&pdev->dev, res); + if (! mvchip->membase) { + dev_err(&pdev->dev, "Cannot ioremap\n"); + kfree(mvchip->chip.label); + return -ENOMEM; + } + + /* The Armada XP has a second range of registers for the + * per-CPU registers */ + if (soc_variant == MVEBU_GPIO_SOC_VARIANT_ARMADAXP) { + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (! res) { + dev_err(&pdev->dev, "Cannot get memory resource\n"); + kfree(mvchip->chip.label); + return -ENODEV; + } + + mvchip->percpu_membase = devm_request_and_ioremap(&pdev->dev, res); + if (! mvchip->percpu_membase) { + dev_err(&pdev->dev, "Cannot ioremap\n"); + kfree(mvchip->chip.label); + return -ENOMEM; + } + } + + /* + * Mask and clear GPIO interrupts. + */ + switch(soc_variant) { + case MVEBU_GPIO_SOC_VARIANT_ORION: + writel_relaxed(0, mvchip->membase + GPIO_EDGE_CAUSE_OFF); + writel_relaxed(0, mvchip->membase + GPIO_EDGE_MASK_OFF); + writel_relaxed(0, mvchip->membase + GPIO_LEVEL_MASK_OFF); + break; + case MVEBU_GPIO_SOC_VARIANT_MV78200: + writel_relaxed(0, mvchip->membase + GPIO_EDGE_CAUSE_OFF); + for (cpu = 0; cpu < 2; cpu++) { + writel_relaxed(0, mvchip->membase + + GPIO_EDGE_MASK_MV78200_OFF(cpu)); + writel_relaxed(0, mvchip->membase + + GPIO_LEVEL_MASK_MV78200_OFF(cpu)); + } + break; + case MVEBU_GPIO_SOC_VARIANT_ARMADAXP: + writel_relaxed(0, mvchip->membase + GPIO_EDGE_CAUSE_OFF); + writel_relaxed(0, mvchip->membase + GPIO_EDGE_MASK_OFF); + writel_relaxed(0, mvchip->membase + GPIO_LEVEL_MASK_OFF); + for (cpu = 0; cpu < 4; cpu++) { + writel_relaxed(0, mvchip->percpu_membase + + GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu)); + writel_relaxed(0, mvchip->percpu_membase + + GPIO_EDGE_MASK_ARMADAXP_OFF(cpu)); + writel_relaxed(0, mvchip->percpu_membase + + GPIO_LEVEL_MASK_ARMADAXP_OFF(cpu)); + } + break; + default: + BUG(); + } + + gpiochip_add(&mvchip->chip); + + /* Some gpio controllers do not provide irq support */ + if (!of_irq_count(np)) + return 0; + + /* Setup the interrupt handlers. Each chip can have up to 4 + * interrupt handlers, with each handler dealing with 8 GPIO + * pins. */ + for (i = 0; i < 4; i++) { + int irq; + irq = platform_get_irq(pdev, i); + if (irq < 0) + continue; + irq_set_handler_data(irq, mvchip); + irq_set_chained_handler(irq, mvebu_gpio_irq_handler); + } + + mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1); + if (mvchip->irqbase < 0) { + dev_err(&pdev->dev, "no irqs\n"); + kfree(mvchip->chip.label); + return -ENOMEM; + } + + gc = irq_alloc_generic_chip("mvebu_gpio_irq", 2, mvchip->irqbase, + mvchip->membase, handle_level_irq); + if (! gc) { + dev_err(&pdev->dev, "Cannot allocate generic irq_chip\n"); + kfree(mvchip->chip.label); + return -ENOMEM; + } + + gc->private = mvchip; + ct = &gc->chip_types[0]; + ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW; + ct->chip.irq_mask = mvebu_gpio_level_irq_mask; + ct->chip.irq_unmask = mvebu_gpio_level_irq_unmask; + ct->chip.irq_set_type = mvebu_gpio_irq_set_type; + ct->chip.name = mvchip->chip.label; + + ct = &gc->chip_types[1]; + ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; + ct->chip.irq_ack = mvebu_gpio_irq_ack; + ct->chip.irq_mask = mvebu_gpio_edge_irq_mask; + ct->chip.irq_unmask = mvebu_gpio_edge_irq_unmask; + ct->chip.irq_set_type = mvebu_gpio_irq_set_type; + ct->handler = handle_edge_irq; + ct->chip.name = mvchip->chip.label; + + irq_setup_generic_chip(gc, IRQ_MSK(ngpios), IRQ_GC_INIT_MASK_CACHE, + IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE); + + /* Setup irq domain on top of the generic chip. */ + mvchip->domain = irq_domain_add_legacy(np, mvchip->chip.ngpio, + mvchip->irqbase, 0, + &irq_domain_simple_ops, + mvchip); + if (!mvchip->domain) { + dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n", + mvchip->chip.label); + irq_remove_generic_chip(gc, IRQ_MSK(ngpios), IRQ_NOREQUEST, + IRQ_LEVEL | IRQ_NOPROBE); + kfree(gc); + kfree(mvchip->chip.label); + return -ENODEV; + } + + return 0; +} + +static struct platform_driver mvebu_gpio_driver = { + .driver = { + .name = "mvebu-gpio", + .owner = THIS_MODULE, + .of_match_table = mvebu_gpio_of_match, + }, + .probe = mvebu_gpio_probe, + .id_table = mvebu_gpio_ids, +}; + +static int __init mvebu_gpio_init(void) +{ + return platform_driver_register(&mvebu_gpio_driver); +} +postcore_initcall(mvebu_gpio_init); diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c index 2f4b01bda87c..36509ae32083 100644 --- a/drivers/hwmon/gpio-fan.c +++ b/drivers/hwmon/gpio-fan.c @@ -31,6 +31,8 @@ #include <linux/hwmon.h> #include <linux/gpio.h> #include <linux/gpio-fan.h> +#include <linux/of_platform.h> +#include <linux/of_gpio.h> struct gpio_fan_data { struct platform_device *pdev; @@ -400,14 +402,131 @@ static ssize_t show_name(struct device *dev, static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); + +#ifdef CONFIG_OF_GPIO +/* + * Translate OpenFirmware node properties into platform_data + */ +static int gpio_fan_get_of_pdata(struct device *dev, + struct gpio_fan_platform_data *pdata) +{ + struct device_node *node; + struct gpio_fan_speed *speed; + unsigned *ctrl; + unsigned i; + u32 u; + struct property *prop; + const __be32 *p; + + node = dev->of_node; + + /* Fill GPIO pin array */ + pdata->num_ctrl = of_gpio_count(node); + if (!pdata->num_ctrl) { + dev_err(dev, "gpios DT property empty / missing"); + return -ENODEV; + } + ctrl = devm_kzalloc(dev, pdata->num_ctrl * sizeof(unsigned), + GFP_KERNEL); + if (!ctrl) + return -ENOMEM; + for (i = 0; i < pdata->num_ctrl; i++) { + int val; + + val = of_get_gpio(node, i); + if (val < 0) + return val; + ctrl[i] = val; + } + pdata->ctrl = ctrl; + + /* Get number of RPM/ctrl_val pairs in speed map */ + prop = of_find_property(node, "gpio-fan,speed-map", &i); + if (!prop) { + dev_err(dev, "gpio-fan,speed-map DT property missing"); + return -ENODEV; + } + i = i / sizeof(u32); + if (i == 0 || i & 1) { + dev_err(dev, "gpio-fan,speed-map contains zero/odd number of entries"); + return -ENODEV; + } + pdata->num_speed = i / 2; + + /* + * Populate speed map + * Speed map is in the form <RPM ctrl_val RPM ctrl_val ...> + * this needs splitting into pairs to create gpio_fan_speed structs + */ + speed = devm_kzalloc(dev, + pdata->num_speed * sizeof(struct gpio_fan_speed), + GFP_KERNEL); + if (!speed) + return -ENOMEM; + p = NULL; + for (i = 0; i < pdata->num_speed; i++) { + p = of_prop_next_u32(prop, p, &u); + if (!p) + return -ENODEV; + speed[i].rpm = u; + p = of_prop_next_u32(prop, p, &u); + if (!p) + return -ENODEV; + speed[i].ctrl_val = u; + } + pdata->speed = speed; + + /* Alarm GPIO if one exists */ + if (of_gpio_named_count(node, "alarm-gpios")) { + struct gpio_fan_alarm *alarm; + int val; + enum of_gpio_flags flags; + + alarm = devm_kzalloc(dev, sizeof(struct gpio_fan_alarm), + GFP_KERNEL); + if (!alarm) + return -ENOMEM; + + val = of_get_named_gpio_flags(node, "alarm-gpios", 0, &flags); + if (val < 0) + return val; + alarm->gpio = val; + alarm->active_low = flags & OF_GPIO_ACTIVE_LOW; + + pdata->alarm = alarm; + } + + return 0; +} + +static struct of_device_id of_gpio_fan_match[] __devinitdata = { + { .compatible = "gpio-fan", }, + {}, +}; +#endif /* CONFIG_OF_GPIO */ + static int __devinit gpio_fan_probe(struct platform_device *pdev) { int err; struct gpio_fan_data *fan_data; struct gpio_fan_platform_data *pdata = pdev->dev.platform_data; +#ifdef CONFIG_OF_GPIO + if (!pdata) { + pdata = devm_kzalloc(&pdev->dev, + sizeof(struct gpio_fan_platform_data), + GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + err = gpio_fan_get_of_pdata(&pdev->dev, pdata); + if (err) + return err; + } +#else /* CONFIG_OF_GPIO */ if (!pdata) return -EINVAL; +#endif /* CONFIG_OF_GPIO */ fan_data = devm_kzalloc(&pdev->dev, sizeof(struct gpio_fan_data), GFP_KERNEL); @@ -511,6 +630,7 @@ static struct platform_driver gpio_fan_driver = { .driver = { .name = "gpio-fan", .pm = GPIO_FAN_PM, + .of_match_table = of_match_ptr(of_gpio_fan_match), }, }; diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig index 5a3bb3d738d8..2f8c76becc6b 100644 --- a/drivers/i2c/Kconfig +++ b/drivers/i2c/Kconfig @@ -4,7 +4,7 @@ menuconfig I2C tristate "I2C support" - depends on HAS_IOMEM + depends on !S390 select RT_MUTEXES ---help--- I2C (pronounce: I-squared-C) is a slow serial bus protocol used in @@ -49,6 +49,7 @@ config I2C_CHARDEV config I2C_MUX tristate "I2C bus multiplexing support" + depends on HAS_IOMEM help Say Y here if you want the I2C core to support the ability to handle multiplexed I2C bus topologies, by presenting each @@ -86,6 +87,19 @@ config I2C_SMBUS source drivers/i2c/algos/Kconfig source drivers/i2c/busses/Kconfig +config I2C_STUB + tristate "I2C/SMBus Test Stub" + depends on EXPERIMENTAL && m + default 'n' + help + This module may be useful to developers of SMBus client drivers, + especially for certain kinds of sensor chips. + + If you do build this module, be sure to read the notes and warnings + in <file:Documentation/i2c/i2c-stub>. + + If you don't know what to do here, definitely say N. + config I2C_DEBUG_CORE bool "I2C Core debugging messages" help @@ -103,6 +117,7 @@ config I2C_DEBUG_ALGO config I2C_DEBUG_BUS bool "I2C Bus debugging messages" + depends on HAS_IOMEM help Say Y here if you want the I2C bus drivers to produce a bunch of debug messages to the system log. Select this if you are having diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 42d9fdd63de0..ff01c389e2da 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -3,6 +3,7 @@ # menu "I2C Hardware Bus support" + depends on HAS_IOMEM comment "PC SMBus host controller drivers" depends on PCI @@ -80,6 +81,7 @@ config I2C_I801 tristate "Intel 82801 (ICH/PCH)" depends on PCI select CHECK_SIGNATURE if X86 && DMI + select GPIOLIB if I2C_MUX help If you say yes to this option, support will be included for the Intel 801 family of mainboard I2C interfaces. Specifically, the following @@ -224,7 +226,7 @@ config I2C_VIA will be called i2c-via. config I2C_VIAPRO - tristate "VIA VT82C596/82C686/82xx and CX700/VX8xx" + tristate "VIA VT82C596/82C686/82xx and CX700/VX8xx/VX900" depends on PCI help If you say yes to this option, support will be included for the VIA @@ -240,6 +242,7 @@ config I2C_VIAPRO CX700 VX800/VX820 VX855/VX875 + VX900 This driver can also be built as a module. If so, the module will be called i2c-viapro. @@ -849,19 +852,6 @@ config I2C_SIBYTE help Supports the SiByte SOC on-chip I2C interfaces (2 channels). -config I2C_STUB - tristate "I2C/SMBus Test Stub" - depends on EXPERIMENTAL && m - default 'n' - help - This module may be useful to developers of SMBus client drivers, - especially for certain kinds of sensor chips. - - If you do build this module, be sure to read the notes and warnings - in <file:Documentation/i2c/i2c-stub>. - - If you don't know what to do here, definitely say N. - config SCx200_I2C tristate "NatSemi SCx200 I2C using GPIO pins (DEPRECATED)" depends on SCx200_GPIO diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c index 7b8ebbefb581..cbba7db9ad59 100644 --- a/drivers/i2c/busses/i2c-designware-core.c +++ b/drivers/i2c/busses/i2c-designware-core.c @@ -370,7 +370,7 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) * messages into the tx buffer. Even if the size of i2c_msg data is * longer than the size of the tx buffer, it handles everything. */ -void +static void i2c_dw_xfer_msg(struct dw_i2c_dev *dev) { struct i2c_msg *msgs = dev->msgs; diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 33e9b0c09af2..37793156bd93 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -80,6 +80,13 @@ #include <linux/dmi.h> #include <linux/slab.h> #include <linux/wait.h> +#include <linux/err.h> + +#if defined CONFIG_I2C_MUX || defined CONFIG_I2C_MUX_MODULE +#include <linux/gpio.h> +#include <linux/i2c-mux-gpio.h> +#include <linux/platform_device.h> +#endif /* I801 SMBus address offsets */ #define SMBHSTSTS(p) (0 + (p)->smba) @@ -158,6 +165,15 @@ #define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS 0x9c22 +struct i801_mux_config { + char *gpio_chip; + unsigned values[3]; + int n_values; + unsigned classes[3]; + unsigned gpios[2]; /* Relative to gpio_chip->base */ + int n_gpios; +}; + struct i801_priv { struct i2c_adapter adapter; unsigned long smba; @@ -175,6 +191,11 @@ struct i801_priv { int count; int len; u8 *data; + +#if defined CONFIG_I2C_MUX || defined CONFIG_I2C_MUX_MODULE + const struct i801_mux_config *mux_drvdata; + struct platform_device *mux_pdev; +#endif }; static struct pci_driver i801_driver; @@ -900,6 +921,165 @@ static void __init input_apanel_init(void) {} static void __devinit i801_probe_optional_slaves(struct i801_priv *priv) {} #endif /* CONFIG_X86 && CONFIG_DMI */ +#if defined CONFIG_I2C_MUX || defined CONFIG_I2C_MUX_MODULE +static struct i801_mux_config i801_mux_config_asus_z8_d12 = { + .gpio_chip = "gpio_ich", + .values = { 0x02, 0x03 }, + .n_values = 2, + .classes = { I2C_CLASS_SPD, I2C_CLASS_SPD }, + .gpios = { 52, 53 }, + .n_gpios = 2, +}; + +static struct i801_mux_config i801_mux_config_asus_z8_d18 = { + .gpio_chip = "gpio_ich", + .values = { 0x02, 0x03, 0x01 }, + .n_values = 3, + .classes = { I2C_CLASS_SPD, I2C_CLASS_SPD, I2C_CLASS_SPD }, + .gpios = { 52, 53 }, + .n_gpios = 2, +}; + +static struct dmi_system_id __devinitdata mux_dmi_table[] = { + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "Z8NA-D6(C)"), + }, + .driver_data = &i801_mux_config_asus_z8_d12, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "Z8P(N)E-D12(X)"), + }, + .driver_data = &i801_mux_config_asus_z8_d12, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "Z8NH-D12"), + }, + .driver_data = &i801_mux_config_asus_z8_d12, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "Z8PH-D12/IFB"), + }, + .driver_data = &i801_mux_config_asus_z8_d12, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "Z8NR-D12"), + }, + .driver_data = &i801_mux_config_asus_z8_d12, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "Z8P(N)H-D12"), + }, + .driver_data = &i801_mux_config_asus_z8_d12, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "Z8PG-D18"), + }, + .driver_data = &i801_mux_config_asus_z8_d18, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "Z8PE-D18"), + }, + .driver_data = &i801_mux_config_asus_z8_d18, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "Z8PS-D12"), + }, + .driver_data = &i801_mux_config_asus_z8_d12, + }, + { } +}; + +/* Setup multiplexing if needed */ +static int __devinit i801_add_mux(struct i801_priv *priv) +{ + struct device *dev = &priv->adapter.dev; + const struct i801_mux_config *mux_config; + struct i2c_mux_gpio_platform_data gpio_data; + int err; + + if (!priv->mux_drvdata) + return 0; + mux_config = priv->mux_drvdata; + + /* Prepare the platform data */ + memset(&gpio_data, 0, sizeof(struct i2c_mux_gpio_platform_data)); + gpio_data.parent = priv->adapter.nr; + gpio_data.values = mux_config->values; + gpio_data.n_values = mux_config->n_values; + gpio_data.classes = mux_config->classes; + gpio_data.gpio_chip = mux_config->gpio_chip; + gpio_data.gpios = mux_config->gpios; + gpio_data.n_gpios = mux_config->n_gpios; + gpio_data.idle = I2C_MUX_GPIO_NO_IDLE; + + /* Register the mux device */ + priv->mux_pdev = platform_device_register_data(dev, "i2c-mux-gpio", + PLATFORM_DEVID_AUTO, &gpio_data, + sizeof(struct i2c_mux_gpio_platform_data)); + if (IS_ERR(priv->mux_pdev)) { + err = PTR_ERR(priv->mux_pdev); + priv->mux_pdev = NULL; + dev_err(dev, "Failed to register i2c-mux-gpio device\n"); + return err; + } + + return 0; +} + +static void __devexit i801_del_mux(struct i801_priv *priv) +{ + if (priv->mux_pdev) + platform_device_unregister(priv->mux_pdev); +} + +static unsigned int __devinit i801_get_adapter_class(struct i801_priv *priv) +{ + const struct dmi_system_id *id; + const struct i801_mux_config *mux_config; + unsigned int class = I2C_CLASS_HWMON | I2C_CLASS_SPD; + int i; + + id = dmi_first_match(mux_dmi_table); + if (id) { + /* Remove from branch classes from trunk */ + mux_config = id->driver_data; + for (i = 0; i < mux_config->n_values; i++) + class &= ~mux_config->classes[i]; + + /* Remember for later */ + priv->mux_drvdata = mux_config; + } + + return class; +} +#else +static inline int i801_add_mux(struct i801_priv *priv) { return 0; } +static inline void i801_del_mux(struct i801_priv *priv) { } + +static inline unsigned int i801_get_adapter_class(struct i801_priv *priv) +{ + return I2C_CLASS_HWMON | I2C_CLASS_SPD; +} +#endif + static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id *id) { @@ -913,7 +1093,7 @@ static int __devinit i801_probe(struct pci_dev *dev, i2c_set_adapdata(&priv->adapter, priv); priv->adapter.owner = THIS_MODULE; - priv->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD; + priv->adapter.class = i801_get_adapter_class(priv); priv->adapter.algo = &smbus_algorithm; priv->pci_dev = dev; @@ -1033,6 +1213,8 @@ static int __devinit i801_probe(struct pci_dev *dev, } i801_probe_optional_slaves(priv); + /* We ignore errors - multiplexing is optional */ + i801_add_mux(priv); pci_set_drvdata(dev, priv); @@ -1052,6 +1234,7 @@ static void __devexit i801_remove(struct pci_dev *dev) { struct i801_priv *priv = pci_get_drvdata(dev); + i801_del_mux(priv); i2c_del_adapter(&priv->adapter); pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg); diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c index 24565687ac9b..81d887869620 100644 --- a/drivers/i2c/busses/i2c-parport.c +++ b/drivers/i2c/busses/i2c-parport.c @@ -151,7 +151,7 @@ static const struct i2c_algo_bit_data parport_algo_data = { /* ----- I2c and parallel port call-back functions and structures --------- */ -void i2c_parport_irq(void *data) +static void i2c_parport_irq(void *data) { struct i2c_par *adapter = data; struct i2c_client *ara = adapter->ara; diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index ef511df2c965..8bbd6ece7c41 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -37,6 +37,7 @@ #include <linux/stddef.h> #include <linux/ioport.h> #include <linux/i2c.h> +#include <linux/slab.h> #include <linux/init.h> #include <linux/dmi.h> #include <linux/acpi.h> diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c index 388cbdc96db7..6aafa3d88ff0 100644 --- a/drivers/i2c/busses/i2c-scmi.c +++ b/drivers/i2c/busses/i2c-scmi.c @@ -426,19 +426,7 @@ static struct acpi_driver acpi_smbus_cmi_driver = { .remove = acpi_smbus_cmi_remove, }, }; - -static int __init acpi_smbus_cmi_init(void) -{ - return acpi_bus_register_driver(&acpi_smbus_cmi_driver); -} - -static void __exit acpi_smbus_cmi_exit(void) -{ - acpi_bus_unregister_driver(&acpi_smbus_cmi_driver); -} - -module_init(acpi_smbus_cmi_init); -module_exit(acpi_smbus_cmi_exit); +module_acpi_driver(acpi_smbus_cmi_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Crane Cai <crane.cai@amd.com>"); diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c index 333011c83d52..271c9a2b0fd7 100644 --- a/drivers/i2c/busses/i2c-viapro.c +++ b/drivers/i2c/busses/i2c-viapro.c @@ -401,6 +401,7 @@ found: case PCI_DEVICE_ID_VIA_CX700: case PCI_DEVICE_ID_VIA_VX800: case PCI_DEVICE_ID_VIA_VX855: + case PCI_DEVICE_ID_VIA_VX900: case PCI_DEVICE_ID_VIA_8251: case PCI_DEVICE_ID_VIA_8237: case PCI_DEVICE_ID_VIA_8237A: @@ -470,6 +471,8 @@ static DEFINE_PCI_DEVICE_TABLE(vt596_ids) = { .driver_data = SMBBA3 }, { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX855), .driver_data = SMBBA3 }, + { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX900), + .driver_data = SMBBA3 }, { 0, } }; diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c index 2eacb7784d56..08aab57337dd 100644 --- a/drivers/i2c/busses/scx200_acb.c +++ b/drivers/i2c/busses/scx200_acb.c @@ -23,6 +23,8 @@ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> @@ -37,8 +39,6 @@ #include <linux/scx200.h> -#define NAME "scx200_acb" - MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>"); MODULE_DESCRIPTION("NatSemi SCx200 ACCESS.bus Driver"); MODULE_ALIAS("platform:cs5535-smb"); @@ -398,7 +398,7 @@ static __devinit int scx200_acb_probe(struct scx200_acb_iface *iface) outb(0x70, ACBCTL2); if (inb(ACBCTL2) != 0x70) { - pr_debug(NAME ": ACBCTL2 readback failed\n"); + pr_debug("ACBCTL2 readback failed\n"); return -ENXIO; } @@ -406,8 +406,7 @@ static __devinit int scx200_acb_probe(struct scx200_acb_iface *iface) val = inb(ACBCTL1); if (val) { - pr_debug(NAME ": disabled, but ACBCTL1=0x%02x\n", - val); + pr_debug("disabled, but ACBCTL1=0x%02x\n", val); return -ENXIO; } @@ -417,8 +416,8 @@ static __devinit int scx200_acb_probe(struct scx200_acb_iface *iface) val = inb(ACBCTL1); if ((val & ACBCTL1_NMINTE) != ACBCTL1_NMINTE) { - pr_debug(NAME ": enabled, but NMINTE won't be set, " - "ACBCTL1=0x%02x\n", val); + pr_debug("enabled, but NMINTE won't be set, ACBCTL1=0x%02x\n", + val); return -ENXIO; } @@ -433,7 +432,7 @@ static __devinit struct scx200_acb_iface *scx200_create_iface(const char *text, iface = kzalloc(sizeof(*iface), GFP_KERNEL); if (!iface) { - printk(KERN_ERR NAME ": can't allocate memory\n"); + pr_err("can't allocate memory\n"); return NULL; } @@ -459,14 +458,14 @@ static int __devinit scx200_acb_create(struct scx200_acb_iface *iface) rc = scx200_acb_probe(iface); if (rc) { - printk(KERN_WARNING NAME ": probe failed\n"); + pr_warn("probe failed\n"); return rc; } scx200_acb_reset(iface); if (i2c_add_adapter(adapter) < 0) { - printk(KERN_ERR NAME ": failed to register\n"); + pr_err("failed to register\n"); return -ENODEV; } @@ -493,8 +492,7 @@ static struct scx200_acb_iface * __devinit scx200_create_dev(const char *text, return NULL; if (!request_region(base, 8, iface->adapter.name)) { - printk(KERN_ERR NAME ": can't allocate io 0x%lx-0x%lx\n", - base, base + 8 - 1); + pr_err("can't allocate io 0x%lx-0x%lx\n", base, base + 8 - 1); goto errout_free; } @@ -583,7 +581,7 @@ static __init void scx200_scan_isa(void) static int __init scx200_acb_init(void) { - pr_debug(NAME ": NatSemi SCx200 ACCESS.bus Driver\n"); + pr_debug("NatSemi SCx200 ACCESS.bus Driver\n"); /* First scan for ISA-based devices */ scx200_scan_isa(); /* XXX: should we care about errors? */ diff --git a/drivers/i2c/busses/scx200_i2c.c b/drivers/i2c/busses/scx200_i2c.c index 7ee0d502ceab..ae1258b95d60 100644 --- a/drivers/i2c/busses/scx200_i2c.c +++ b/drivers/i2c/busses/scx200_i2c.c @@ -21,6 +21,8 @@ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> @@ -31,8 +33,6 @@ #include <linux/scx200_gpio.h> -#define NAME "scx200_i2c" - MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>"); MODULE_DESCRIPTION("NatSemi SCx200 I2C Driver"); MODULE_LICENSE("GPL"); @@ -88,17 +88,17 @@ static struct i2c_adapter scx200_i2c_ops = { static int scx200_i2c_init(void) { - pr_debug(NAME ": NatSemi SCx200 I2C Driver\n"); + pr_debug("NatSemi SCx200 I2C Driver\n"); if (!scx200_gpio_present()) { - printk(KERN_ERR NAME ": no SCx200 gpio pins available\n"); + pr_err("no SCx200 gpio pins available\n"); return -ENODEV; } - pr_debug(NAME ": SCL=GPIO%02u, SDA=GPIO%02u\n", scl, sda); + pr_debug("SCL=GPIO%02u, SDA=GPIO%02u\n", scl, sda); if (scl == -1 || sda == -1 || scl == sda) { - printk(KERN_ERR NAME ": scl and sda must be specified\n"); + pr_err("scl and sda must be specified\n"); return -EINVAL; } @@ -107,8 +107,7 @@ static int scx200_i2c_init(void) scx200_gpio_configure(sda, ~2, 5); if (i2c_bit_add_bus(&scx200_i2c_ops) < 0) { - printk(KERN_ERR NAME ": adapter %s registration failed\n", - scx200_i2c_ops.name); + pr_err("adapter %s registration failed\n", scx200_i2c_ops.name); return -ENODEV; } diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 2421d95130d4..a7edf987a339 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -1989,12 +1989,22 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr, unsigned char msgbuf0[I2C_SMBUS_BLOCK_MAX+3]; unsigned char msgbuf1[I2C_SMBUS_BLOCK_MAX+2]; int num = read_write == I2C_SMBUS_READ ? 2 : 1; - struct i2c_msg msg[2] = { { addr, flags, 1, msgbuf0 }, - { addr, flags | I2C_M_RD, 0, msgbuf1 } - }; int i; u8 partial_pec = 0; int status; + struct i2c_msg msg[2] = { + { + .addr = addr, + .flags = flags, + .len = 1, + .buf = msgbuf0, + }, { + .addr = addr, + .flags = flags | I2C_M_RD, + .len = 0, + .buf = msgbuf1, + }, + }; msgbuf0[0] = command; switch (size) { diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c index 1038c381aea5..d94e0ce78277 100644 --- a/drivers/i2c/i2c-mux.c +++ b/drivers/i2c/i2c-mux.c @@ -88,9 +88,23 @@ static u32 i2c_mux_functionality(struct i2c_adapter *adap) return parent->algo->functionality(parent); } +/* Return all parent classes, merged */ +static unsigned int i2c_mux_parent_classes(struct i2c_adapter *parent) +{ + unsigned int class = 0; + + do { + class |= parent->class; + parent = i2c_parent_is_i2c_adapter(parent); + } while (parent); + + return class; +} + struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent, struct device *mux_dev, void *mux_priv, u32 force_nr, u32 chan_id, + unsigned int class, int (*select) (struct i2c_adapter *, void *, u32), int (*deselect) (struct i2c_adapter *, @@ -127,6 +141,14 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent, priv->adap.algo_data = priv; priv->adap.dev.parent = &parent->dev; + /* Sanity check on class */ + if (i2c_mux_parent_classes(parent) & class) + dev_err(&parent->dev, + "Segment %d behind mux can't share classes with ancestors\n", + chan_id); + else + priv->adap.class = class; + /* * Try to populate the mux adapter's of_node, expands to * nothing if !CONFIG_OF. diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c index df3e0bf31eb3..92cdd2323b03 100644 --- a/drivers/i2c/i2c-smbus.c +++ b/drivers/i2c/i2c-smbus.c @@ -142,7 +142,8 @@ static int smbalert_probe(struct i2c_client *ara, struct i2c_adapter *adapter = ara->adapter; int res; - alert = kzalloc(sizeof(struct i2c_smbus_alert), GFP_KERNEL); + alert = devm_kzalloc(&ara->dev, sizeof(struct i2c_smbus_alert), + GFP_KERNEL); if (!alert) return -ENOMEM; @@ -154,10 +155,8 @@ static int smbalert_probe(struct i2c_client *ara, if (setup->irq > 0) { res = devm_request_irq(&ara->dev, setup->irq, smbalert_irq, 0, "smbus_alert", alert); - if (res) { - kfree(alert); + if (res) return res; - } } i2c_set_clientdata(ara, alert); @@ -167,14 +166,12 @@ static int smbalert_probe(struct i2c_client *ara, return 0; } -/* IRQ resource is managed so it is freed automatically */ +/* IRQ and memory resources are managed so they are freed automatically */ static int smbalert_remove(struct i2c_client *ara) { struct i2c_smbus_alert *alert = i2c_get_clientdata(ara); cancel_work_sync(&alert->alert); - - kfree(alert); return 0; } diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c index 68b1f8ec3436..566a6757a33d 100644 --- a/drivers/i2c/muxes/i2c-mux-gpio.c +++ b/drivers/i2c/muxes/i2c-mux-gpio.c @@ -21,6 +21,7 @@ struct gpiomux { struct i2c_adapter *parent; struct i2c_adapter **adap; /* child busses */ struct i2c_mux_gpio_platform_data data; + unsigned gpio_base; }; static void i2c_mux_gpio_set(const struct gpiomux *mux, unsigned val) @@ -28,7 +29,8 @@ static void i2c_mux_gpio_set(const struct gpiomux *mux, unsigned val) int i; for (i = 0; i < mux->data.n_gpios; i++) - gpio_set_value(mux->data.gpios[i], val & (1 << i)); + gpio_set_value(mux->gpio_base + mux->data.gpios[i], + val & (1 << i)); } static int i2c_mux_gpio_select(struct i2c_adapter *adap, void *data, u32 chan) @@ -49,13 +51,19 @@ static int i2c_mux_gpio_deselect(struct i2c_adapter *adap, void *data, u32 chan) return 0; } +static int __devinit match_gpio_chip_by_label(struct gpio_chip *chip, + void *data) +{ + return !strcmp(chip->label, data); +} + static int __devinit i2c_mux_gpio_probe(struct platform_device *pdev) { struct gpiomux *mux; struct i2c_mux_gpio_platform_data *pdata; struct i2c_adapter *parent; int (*deselect) (struct i2c_adapter *, void *, u32); - unsigned initial_state; + unsigned initial_state, gpio_base; int i, ret; pdata = pdev->dev.platform_data; @@ -64,6 +72,23 @@ static int __devinit i2c_mux_gpio_probe(struct platform_device *pdev) return -ENODEV; } + /* + * If a GPIO chip name is provided, the GPIO pin numbers provided are + * relative to its base GPIO number. Otherwise they are absolute. + */ + if (pdata->gpio_chip) { + struct gpio_chip *gpio; + + gpio = gpiochip_find(pdata->gpio_chip, + match_gpio_chip_by_label); + if (!gpio) + return -EPROBE_DEFER; + + gpio_base = gpio->base; + } else { + gpio_base = 0; + } + parent = i2c_get_adapter(pdata->parent); if (!parent) { dev_err(&pdev->dev, "Parent adapter (%d) not found\n", @@ -71,7 +96,7 @@ static int __devinit i2c_mux_gpio_probe(struct platform_device *pdev) return -ENODEV; } - mux = kzalloc(sizeof(*mux), GFP_KERNEL); + mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL); if (!mux) { ret = -ENOMEM; goto alloc_failed; @@ -79,11 +104,13 @@ static int __devinit i2c_mux_gpio_probe(struct platform_device *pdev) mux->parent = parent; mux->data = *pdata; - mux->adap = kzalloc(sizeof(struct i2c_adapter *) * pdata->n_values, - GFP_KERNEL); + mux->gpio_base = gpio_base; + mux->adap = devm_kzalloc(&pdev->dev, + sizeof(*mux->adap) * pdata->n_values, + GFP_KERNEL); if (!mux->adap) { ret = -ENOMEM; - goto alloc_failed2; + goto alloc_failed; } if (pdata->idle != I2C_MUX_GPIO_NO_IDLE) { @@ -95,17 +122,19 @@ static int __devinit i2c_mux_gpio_probe(struct platform_device *pdev) } for (i = 0; i < pdata->n_gpios; i++) { - ret = gpio_request(pdata->gpios[i], "i2c-mux-gpio"); + ret = gpio_request(gpio_base + pdata->gpios[i], "i2c-mux-gpio"); if (ret) goto err_request_gpio; - gpio_direction_output(pdata->gpios[i], + gpio_direction_output(gpio_base + pdata->gpios[i], initial_state & (1 << i)); } for (i = 0; i < pdata->n_values; i++) { u32 nr = pdata->base_nr ? (pdata->base_nr + i) : 0; + unsigned int class = pdata->classes ? pdata->classes[i] : 0; - mux->adap[i] = i2c_add_mux_adapter(parent, &pdev->dev, mux, nr, i, + mux->adap[i] = i2c_add_mux_adapter(parent, &pdev->dev, mux, nr, + i, class, i2c_mux_gpio_select, deselect); if (!mux->adap[i]) { ret = -ENODEV; @@ -127,10 +156,7 @@ add_adapter_failed: i = pdata->n_gpios; err_request_gpio: for (; i > 0; i--) - gpio_free(pdata->gpios[i - 1]); - kfree(mux->adap); -alloc_failed2: - kfree(mux); + gpio_free(gpio_base + pdata->gpios[i - 1]); alloc_failed: i2c_put_adapter(parent); @@ -146,12 +172,10 @@ static int __devexit i2c_mux_gpio_remove(struct platform_device *pdev) i2c_del_mux_adapter(mux->adap[i]); for (i = 0; i < mux->data.n_gpios; i++) - gpio_free(mux->data.gpios[i]); + gpio_free(mux->gpio_base + mux->data.gpios[i]); platform_set_drvdata(pdev, NULL); i2c_put_adapter(mux->parent); - kfree(mux->adap); - kfree(mux); return 0; } diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c index f8f72f39e0b5..f3b8f9a6a89b 100644 --- a/drivers/i2c/muxes/i2c-mux-pca9541.c +++ b/drivers/i2c/muxes/i2c-mux-pca9541.c @@ -354,7 +354,7 @@ static int pca9541_probe(struct i2c_client *client, if (pdata) force = pdata->modes[0].adap_id; data->mux_adap = i2c_add_mux_adapter(adap, &client->dev, client, - force, 0, + force, 0, 0, pca9541_select_chan, pca9541_release_chan); diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c index f2dfe0d8fcce..8e4387235b69 100644 --- a/drivers/i2c/muxes/i2c-mux-pca954x.c +++ b/drivers/i2c/muxes/i2c-mux-pca954x.c @@ -186,7 +186,7 @@ static int pca954x_probe(struct i2c_client *client, { struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent); struct pca954x_platform_data *pdata = client->dev.platform_data; - int num, force; + int num, force, class; struct pca954x *data; int ret = -ENODEV; @@ -216,18 +216,20 @@ static int pca954x_probe(struct i2c_client *client, /* Now create an adapter for each channel */ for (num = 0; num < chips[data->type].nchans; num++) { force = 0; /* dynamic adap number */ + class = 0; /* no class by default */ if (pdata) { - if (num < pdata->num_modes) + if (num < pdata->num_modes) { /* force static number */ force = pdata->modes[num].adap_id; - else + class = pdata->modes[num].class; + } else /* discard unconfigured channels */ break; } data->virt_adaps[num] = i2c_add_mux_adapter(adap, &client->dev, client, - force, num, pca954x_select_chan, + force, num, class, pca954x_select_chan, (pdata && pdata->modes[num].deselect_on_exit) ? pca954x_deselect_mux : NULL); diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c index 46a669763476..5f097f309b9f 100644 --- a/drivers/i2c/muxes/i2c-mux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c @@ -221,7 +221,7 @@ static int __devinit i2c_mux_pinctrl_probe(struct platform_device *pdev) (mux->pdata->base_bus_num + i) : 0; mux->busses[i] = i2c_add_mux_adapter(mux->parent, &pdev->dev, - mux, bus, i, + mux, bus, i, 0, i2c_mux_pinctrl_select, deselect); if (!mux->busses[i]) { diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index e8726177d103..b0f6b4c8ee14 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -413,6 +413,7 @@ static const struct x86_cpu_id intel_idle_ids[] = { ICPU(0x2a, idle_cpu_snb), ICPU(0x2d, idle_cpu_snb), ICPU(0x3a, idle_cpu_ivb), + ICPU(0x3e, idle_cpu_ivb), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids); diff --git a/drivers/input/misc/atlas_btns.c b/drivers/input/misc/atlas_btns.c index 601f7372f9c4..26f13131639a 100644 --- a/drivers/input/misc/atlas_btns.c +++ b/drivers/input/misc/atlas_btns.c @@ -151,22 +151,7 @@ static struct acpi_driver atlas_acpi_driver = { .remove = atlas_acpi_button_remove, }, }; - -static int __init atlas_acpi_init(void) -{ - if (acpi_disabled) - return -ENODEV; - - return acpi_bus_register_driver(&atlas_acpi_driver); -} - -static void __exit atlas_acpi_exit(void) -{ - acpi_bus_unregister_driver(&atlas_acpi_driver); -} - -module_init(atlas_acpi_init); -module_exit(atlas_acpi_exit); +module_acpi_driver(atlas_acpi_driver); MODULE_AUTHOR("Jaya Kumar"); MODULE_LICENSE("GPL"); diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 9f69b561f5db..e39f9dbf297b 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -42,7 +42,7 @@ config AMD_IOMMU select PCI_PRI select PCI_PASID select IOMMU_API - depends on X86_64 && PCI && ACPI + depends on X86_64 && PCI && ACPI && X86_IO_APIC ---help--- With this option you can enable support for AMD IOMMU hardware in your system. An IOMMU is a hardware component which provides diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index e89daf1b21b4..55074cba20eb 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -31,6 +31,12 @@ #include <linux/amd-iommu.h> #include <linux/notifier.h> #include <linux/export.h> +#include <linux/irq.h> +#include <linux/msi.h> +#include <asm/irq_remapping.h> +#include <asm/io_apic.h> +#include <asm/apic.h> +#include <asm/hw_irq.h> #include <asm/msidef.h> #include <asm/proto.h> #include <asm/iommu.h> @@ -39,6 +45,7 @@ #include "amd_iommu_proto.h" #include "amd_iommu_types.h" +#include "irq_remapping.h" #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) @@ -72,6 +79,9 @@ static DEFINE_SPINLOCK(iommu_pd_list_lock); static LIST_HEAD(dev_data_list); static DEFINE_SPINLOCK(dev_data_list_lock); +LIST_HEAD(ioapic_map); +LIST_HEAD(hpet_map); + /* * Domain for untranslated devices - only allocated * if iommu=pt passed on kernel cmd line. @@ -92,6 +102,8 @@ struct iommu_cmd { u32 data[4]; }; +struct kmem_cache *amd_iommu_irq_cache; + static void update_domain(struct protection_domain *domain); static int __init alloc_passthrough_domain(void); @@ -686,7 +698,7 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu) /* * Release iommu->lock because ppr-handling might need to - * re-aquire it + * re-acquire it */ spin_unlock_irqrestore(&iommu->lock, flags); @@ -804,7 +816,7 @@ static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES); if (s) /* size bit - we flush more than one 4kb page */ cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; - if (pde) /* PDE bit - we wan't flush everything not only the PTEs */ + if (pde) /* PDE bit - we want to flush everything, not only the PTEs */ cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; } @@ -899,6 +911,13 @@ static void build_inv_all(struct iommu_cmd *cmd) CMD_SET_TYPE(cmd, CMD_INV_ALL); } +static void build_inv_irt(struct iommu_cmd *cmd, u16 devid) +{ + memset(cmd, 0, sizeof(*cmd)); + cmd->data[0] = devid; + CMD_SET_TYPE(cmd, CMD_INV_IRT); +} + /* * Writes the command to the IOMMUs command buffer and informs the * hardware about the new command. @@ -1020,12 +1039,32 @@ static void iommu_flush_all(struct amd_iommu *iommu) iommu_completion_wait(iommu); } +static void iommu_flush_irt(struct amd_iommu *iommu, u16 devid) +{ + struct iommu_cmd cmd; + + build_inv_irt(&cmd, devid); + + iommu_queue_command(iommu, &cmd); +} + +static void iommu_flush_irt_all(struct amd_iommu *iommu) +{ + u32 devid; + + for (devid = 0; devid <= MAX_DEV_TABLE_ENTRIES; devid++) + iommu_flush_irt(iommu, devid); + + iommu_completion_wait(iommu); +} + void iommu_flush_all_caches(struct amd_iommu *iommu) { if (iommu_feature(iommu, FEATURE_IA)) { iommu_flush_all(iommu); } else { iommu_flush_dte_all(iommu); + iommu_flush_irt_all(iommu); iommu_flush_tlb_all(iommu); } } @@ -2155,7 +2194,7 @@ static bool pci_pri_tlp_required(struct pci_dev *pdev) } /* - * If a device is not yet associated with a domain, this function does + * If a device is not yet associated with a domain, this function * assigns it visible for the hardware */ static int attach_device(struct device *dev, @@ -2405,7 +2444,7 @@ static struct protection_domain *get_domain(struct device *dev) if (domain != NULL) return domain; - /* Device not bount yet - bind it */ + /* Device not bound yet - bind it */ dma_dom = find_protection_domain(devid); if (!dma_dom) dma_dom = amd_iommu_rlookup_table[devid]->default_dom; @@ -2944,7 +2983,7 @@ static void __init prealloc_protection_domains(void) alloc_passthrough_domain(); dev_data->passthrough = true; attach_device(&dev->dev, pt_domain); - pr_info("AMD-Vi: Using passthough domain for device %s\n", + pr_info("AMD-Vi: Using passthrough domain for device %s\n", dev_name(&dev->dev)); } @@ -3316,6 +3355,8 @@ static int amd_iommu_domain_has_cap(struct iommu_domain *domain, switch (cap) { case IOMMU_CAP_CACHE_COHERENCY: return 1; + case IOMMU_CAP_INTR_REMAP: + return irq_remapping_enabled; } return 0; @@ -3743,3 +3784,466 @@ int amd_iommu_device_info(struct pci_dev *pdev, return 0; } EXPORT_SYMBOL(amd_iommu_device_info); + +#ifdef CONFIG_IRQ_REMAP + +/***************************************************************************** + * + * Interrupt Remapping Implementation + * + *****************************************************************************/ + +union irte { + u32 val; + struct { + u32 valid : 1, + no_fault : 1, + int_type : 3, + rq_eoi : 1, + dm : 1, + rsvd_1 : 1, + destination : 8, + vector : 8, + rsvd_2 : 8; + } fields; +}; + +#define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6) +#define DTE_IRQ_REMAP_INTCTL (2ULL << 60) +#define DTE_IRQ_TABLE_LEN (8ULL << 1) +#define DTE_IRQ_REMAP_ENABLE 1ULL + +static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table) +{ + u64 dte; + + dte = amd_iommu_dev_table[devid].data[2]; + dte &= ~DTE_IRQ_PHYS_ADDR_MASK; + dte |= virt_to_phys(table->table); + dte |= DTE_IRQ_REMAP_INTCTL; + dte |= DTE_IRQ_TABLE_LEN; + dte |= DTE_IRQ_REMAP_ENABLE; + + amd_iommu_dev_table[devid].data[2] = dte; +} + +#define IRTE_ALLOCATED (~1U) + +static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic) +{ + struct irq_remap_table *table = NULL; + struct amd_iommu *iommu; + unsigned long flags; + u16 alias; + + write_lock_irqsave(&amd_iommu_devtable_lock, flags); + + iommu = amd_iommu_rlookup_table[devid]; + if (!iommu) + goto out_unlock; + + table = irq_lookup_table[devid]; + if (table) + goto out; + + alias = amd_iommu_alias_table[devid]; + table = irq_lookup_table[alias]; + if (table) { + irq_lookup_table[devid] = table; + set_dte_irq_entry(devid, table); + iommu_flush_dte(iommu, devid); + goto out; + } + + /* Nothing there yet, allocate new irq remapping table */ + table = kzalloc(sizeof(*table), GFP_ATOMIC); + if (!table) + goto out; + + if (ioapic) + /* Keep the first 32 indexes free for IOAPIC interrupts */ + table->min_index = 32; + + table->table = kmem_cache_alloc(amd_iommu_irq_cache, GFP_ATOMIC); + if (!table->table) { + kfree(table); + table = NULL; + goto out; + } + + memset(table->table, 0, MAX_IRQS_PER_TABLE * sizeof(u32)); + + if (ioapic) { + int i; + + for (i = 0; i < 32; ++i) + table->table[i] = IRTE_ALLOCATED; + } + + irq_lookup_table[devid] = table; + set_dte_irq_entry(devid, table); + iommu_flush_dte(iommu, devid); + if (devid != alias) { + irq_lookup_table[alias] = table; + set_dte_irq_entry(devid, table); + iommu_flush_dte(iommu, alias); + } + +out: + iommu_completion_wait(iommu); + +out_unlock: + write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); + + return table; +} + +static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count) +{ + struct irq_remap_table *table; + unsigned long flags; + int index, c; + + table = get_irq_table(devid, false); + if (!table) + return -ENODEV; + + spin_lock_irqsave(&table->lock, flags); + + /* Scan table for free entries */ + for (c = 0, index = table->min_index; + index < MAX_IRQS_PER_TABLE; + ++index) { + if (table->table[index] == 0) + c += 1; + else + c = 0; + + if (c == count) { + struct irq_2_iommu *irte_info; + + for (; c != 0; --c) + table->table[index - c + 1] = IRTE_ALLOCATED; + + index -= count - 1; + + irte_info = &cfg->irq_2_iommu; + irte_info->sub_handle = devid; + irte_info->irte_index = index; + irte_info->iommu = (void *)cfg; + + goto out; + } + } + + index = -ENOSPC; + +out: + spin_unlock_irqrestore(&table->lock, flags); + + return index; +} + +static int get_irte(u16 devid, int index, union irte *irte) +{ + struct irq_remap_table *table; + unsigned long flags; + + table = get_irq_table(devid, false); + if (!table) + return -ENOMEM; + + spin_lock_irqsave(&table->lock, flags); + irte->val = table->table[index]; + spin_unlock_irqrestore(&table->lock, flags); + + return 0; +} + +static int modify_irte(u16 devid, int index, union irte irte) +{ + struct irq_remap_table *table; + struct amd_iommu *iommu; + unsigned long flags; + + iommu = amd_iommu_rlookup_table[devid]; + if (iommu == NULL) + return -EINVAL; + + table = get_irq_table(devid, false); + if (!table) + return -ENOMEM; + + spin_lock_irqsave(&table->lock, flags); + table->table[index] = irte.val; + spin_unlock_irqrestore(&table->lock, flags); + + iommu_flush_irt(iommu, devid); + iommu_completion_wait(iommu); + + return 0; +} + +static void free_irte(u16 devid, int index) +{ + struct irq_remap_table *table; + struct amd_iommu *iommu; + unsigned long flags; + + iommu = amd_iommu_rlookup_table[devid]; + if (iommu == NULL) + return; + + table = get_irq_table(devid, false); + if (!table) + return; + + spin_lock_irqsave(&table->lock, flags); + table->table[index] = 0; + spin_unlock_irqrestore(&table->lock, flags); + + iommu_flush_irt(iommu, devid); + iommu_completion_wait(iommu); +} + +static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry, + unsigned int destination, int vector, + struct io_apic_irq_attr *attr) +{ + struct irq_remap_table *table; + struct irq_2_iommu *irte_info; + struct irq_cfg *cfg; + union irte irte; + int ioapic_id; + int index; + int devid; + int ret; + + cfg = irq_get_chip_data(irq); + if (!cfg) + return -EINVAL; + + irte_info = &cfg->irq_2_iommu; + ioapic_id = mpc_ioapic_id(attr->ioapic); + devid = get_ioapic_devid(ioapic_id); + + if (devid < 0) + return devid; + + table = get_irq_table(devid, true); + if (table == NULL) + return -ENOMEM; + + index = attr->ioapic_pin; + + /* Setup IRQ remapping info */ + irte_info->sub_handle = devid; + irte_info->irte_index = index; + irte_info->iommu = (void *)cfg; + + /* Setup IRTE for IOMMU */ + irte.val = 0; + irte.fields.vector = vector; + irte.fields.int_type = apic->irq_delivery_mode; + irte.fields.destination = destination; + irte.fields.dm = apic->irq_dest_mode; + irte.fields.valid = 1; + + ret = modify_irte(devid, index, irte); + if (ret) + return ret; + + /* Setup IOAPIC entry */ + memset(entry, 0, sizeof(*entry)); + + entry->vector = index; + entry->mask = 0; + entry->trigger = attr->trigger; + entry->polarity = attr->polarity; + + /* + * Mask level triggered irqs. + */ + if (attr->trigger) + entry->mask = 1; + + return 0; +} + +static int set_affinity(struct irq_data *data, const struct cpumask *mask, + bool force) +{ + struct irq_2_iommu *irte_info; + unsigned int dest, irq; + struct irq_cfg *cfg; + union irte irte; + int err; + + if (!config_enabled(CONFIG_SMP)) + return -1; + + cfg = data->chip_data; + irq = data->irq; + irte_info = &cfg->irq_2_iommu; + + if (!cpumask_intersects(mask, cpu_online_mask)) + return -EINVAL; + + if (get_irte(irte_info->sub_handle, irte_info->irte_index, &irte)) + return -EBUSY; + + if (assign_irq_vector(irq, cfg, mask)) + return -EBUSY; + + err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest); + if (err) { + if (assign_irq_vector(irq, cfg, data->affinity)) + pr_err("AMD-Vi: Failed to recover vector for irq %d\n", irq); + return err; + } + + irte.fields.vector = cfg->vector; + irte.fields.destination = dest; + + modify_irte(irte_info->sub_handle, irte_info->irte_index, irte); + + if (cfg->move_in_progress) + send_cleanup_vector(cfg); + + cpumask_copy(data->affinity, mask); + + return 0; +} + +static int free_irq(int irq) +{ + struct irq_2_iommu *irte_info; + struct irq_cfg *cfg; + + cfg = irq_get_chip_data(irq); + if (!cfg) + return -EINVAL; + + irte_info = &cfg->irq_2_iommu; + + free_irte(irte_info->sub_handle, irte_info->irte_index); + + return 0; +} + +static void compose_msi_msg(struct pci_dev *pdev, + unsigned int irq, unsigned int dest, + struct msi_msg *msg, u8 hpet_id) +{ + struct irq_2_iommu *irte_info; + struct irq_cfg *cfg; + union irte irte; + + cfg = irq_get_chip_data(irq); + if (!cfg) + return; + + irte_info = &cfg->irq_2_iommu; + + irte.val = 0; + irte.fields.vector = cfg->vector; + irte.fields.int_type = apic->irq_delivery_mode; + irte.fields.destination = dest; + irte.fields.dm = apic->irq_dest_mode; + irte.fields.valid = 1; + + modify_irte(irte_info->sub_handle, irte_info->irte_index, irte); + + msg->address_hi = MSI_ADDR_BASE_HI; + msg->address_lo = MSI_ADDR_BASE_LO; + msg->data = irte_info->irte_index; +} + +static int msi_alloc_irq(struct pci_dev *pdev, int irq, int nvec) +{ + struct irq_cfg *cfg; + int index; + u16 devid; + + if (!pdev) + return -EINVAL; + + cfg = irq_get_chip_data(irq); + if (!cfg) + return -EINVAL; + + devid = get_device_id(&pdev->dev); + index = alloc_irq_index(cfg, devid, nvec); + + return index < 0 ? MAX_IRQS_PER_TABLE : index; +} + +static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq, + int index, int offset) +{ + struct irq_2_iommu *irte_info; + struct irq_cfg *cfg; + u16 devid; + + if (!pdev) + return -EINVAL; + + cfg = irq_get_chip_data(irq); + if (!cfg) + return -EINVAL; + + if (index >= MAX_IRQS_PER_TABLE) + return 0; + + devid = get_device_id(&pdev->dev); + irte_info = &cfg->irq_2_iommu; + + irte_info->sub_handle = devid; + irte_info->irte_index = index + offset; + irte_info->iommu = (void *)cfg; + + return 0; +} + +static int setup_hpet_msi(unsigned int irq, unsigned int id) +{ + struct irq_2_iommu *irte_info; + struct irq_cfg *cfg; + int index, devid; + + cfg = irq_get_chip_data(irq); + if (!cfg) + return -EINVAL; + + irte_info = &cfg->irq_2_iommu; + devid = get_hpet_devid(id); + if (devid < 0) + return devid; + + index = alloc_irq_index(cfg, devid, 1); + if (index < 0) + return index; + + irte_info->sub_handle = devid; + irte_info->irte_index = index; + irte_info->iommu = (void *)cfg; + + return 0; +} + +struct irq_remap_ops amd_iommu_irq_ops = { + .supported = amd_iommu_supported, + .prepare = amd_iommu_prepare, + .enable = amd_iommu_enable, + .disable = amd_iommu_disable, + .reenable = amd_iommu_reenable, + .enable_faulting = amd_iommu_enable_faulting, + .setup_ioapic_entry = setup_ioapic_entry, + .set_affinity = set_affinity, + .free_irq = free_irq, + .compose_msi_msg = compose_msi_msg, + .msi_alloc_irq = msi_alloc_irq, + .msi_setup_irq = msi_setup_irq, + .setup_hpet_msi = setup_hpet_msi, +}; +#endif diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 18a89b760aaa..18b0d99bd4d6 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -26,16 +26,18 @@ #include <linux/msi.h> #include <linux/amd-iommu.h> #include <linux/export.h> -#include <linux/acpi.h> #include <acpi/acpi.h> #include <asm/pci-direct.h> #include <asm/iommu.h> #include <asm/gart.h> #include <asm/x86_init.h> #include <asm/iommu_table.h> +#include <asm/io_apic.h> +#include <asm/irq_remapping.h> #include "amd_iommu_proto.h" #include "amd_iommu_types.h" +#include "irq_remapping.h" /* * definitions for the ACPI scanning code @@ -55,6 +57,10 @@ #define IVHD_DEV_ALIAS_RANGE 0x43 #define IVHD_DEV_EXT_SELECT 0x46 #define IVHD_DEV_EXT_SELECT_RANGE 0x47 +#define IVHD_DEV_SPECIAL 0x48 + +#define IVHD_SPECIAL_IOAPIC 1 +#define IVHD_SPECIAL_HPET 2 #define IVHD_FLAG_HT_TUN_EN_MASK 0x01 #define IVHD_FLAG_PASSPW_EN_MASK 0x02 @@ -123,6 +129,7 @@ struct ivmd_header { } __attribute__((packed)); bool amd_iommu_dump; +bool amd_iommu_irq_remap __read_mostly; static bool amd_iommu_detected; static bool __initdata amd_iommu_disabled; @@ -178,7 +185,13 @@ u16 *amd_iommu_alias_table; struct amd_iommu **amd_iommu_rlookup_table; /* - * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap + * This table is used to find the irq remapping table for a given device id + * quickly. + */ +struct irq_remap_table **irq_lookup_table; + +/* + * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap * to know which ones are already in use. */ unsigned long *amd_iommu_pd_alloc_bitmap; @@ -478,7 +491,7 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table) /**************************************************************************** * - * The following functions belong the the code path which parses the ACPI table + * The following functions belong to the code path which parses the ACPI table * the second time. In this ACPI parsing iteration we allocate IOMMU specific * data structures, initialize the device/alias/rlookup table and also * basically initialize the hardware. @@ -690,8 +703,33 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu, set_iommu_for_device(iommu, devid); } +static int add_special_device(u8 type, u8 id, u16 devid) +{ + struct devid_map *entry; + struct list_head *list; + + if (type != IVHD_SPECIAL_IOAPIC && type != IVHD_SPECIAL_HPET) + return -EINVAL; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + entry->id = id; + entry->devid = devid; + + if (type == IVHD_SPECIAL_IOAPIC) + list = &ioapic_map; + else + list = &hpet_map; + + list_add_tail(&entry->list, list); + + return 0; +} + /* - * Reads the device exclusion range from ACPI and initialize IOMMU with + * Reads the device exclusion range from ACPI and initializes the IOMMU with * it */ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) @@ -717,7 +755,7 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) * Takes a pointer to an AMD IOMMU entry in the ACPI table and * initializes the hardware and our data structures with it. */ -static void __init init_iommu_from_acpi(struct amd_iommu *iommu, +static int __init init_iommu_from_acpi(struct amd_iommu *iommu, struct ivhd_header *h) { u8 *p = (u8 *)h; @@ -867,12 +905,43 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu, flags, ext_flags); } break; + case IVHD_DEV_SPECIAL: { + u8 handle, type; + const char *var; + u16 devid; + int ret; + + handle = e->ext & 0xff; + devid = (e->ext >> 8) & 0xffff; + type = (e->ext >> 24) & 0xff; + + if (type == IVHD_SPECIAL_IOAPIC) + var = "IOAPIC"; + else if (type == IVHD_SPECIAL_HPET) + var = "HPET"; + else + var = "UNKNOWN"; + + DUMP_printk(" DEV_SPECIAL(%s[%d])\t\tdevid: %02x:%02x.%x\n", + var, (int)handle, + PCI_BUS(devid), + PCI_SLOT(devid), + PCI_FUNC(devid)); + + set_dev_entry_from_acpi(iommu, devid, e->flags, 0); + ret = add_special_device(type, handle, devid); + if (ret) + return ret; + break; + } default: break; } p += ivhd_entry_length(p); } + + return 0; } /* Initializes the device->iommu mapping for the driver */ @@ -912,6 +981,8 @@ static void __init free_iommu_all(void) */ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) { + int ret; + spin_lock_init(&iommu->lock); /* Add IOMMU to internal data structures */ @@ -947,7 +1018,16 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) iommu->int_enabled = false; - init_iommu_from_acpi(iommu, h); + ret = init_iommu_from_acpi(iommu, h); + if (ret) + return ret; + + /* + * Make sure IOMMU is not considered to translate itself. The IVRS + * table tells us so, but this is a lie! + */ + amd_iommu_rlookup_table[iommu->devid] = NULL; + init_iommu_devices(iommu); return 0; @@ -1115,9 +1195,11 @@ static void print_iommu_info(void) if (iommu_feature(iommu, (1ULL << i))) pr_cont(" %s", feat_str[i]); } - } pr_cont("\n"); + } } + if (irq_remapping_enabled) + pr_info("AMD-Vi: Interrupt remapping enabled\n"); } static int __init amd_iommu_init_pci(void) @@ -1141,7 +1223,7 @@ static int __init amd_iommu_init_pci(void) /**************************************************************************** * * The following functions initialize the MSI interrupts for all IOMMUs - * in the system. Its a bit challenging because there could be multiple + * in the system. It's a bit challenging because there could be multiple * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per * pci_dev. * @@ -1199,7 +1281,7 @@ enable_faults: * * The next functions belong to the third pass of parsing the ACPI * table. In this last pass the memory mapping requirements are - * gathered (like exclusion and unity mapping reanges). + * gathered (like exclusion and unity mapping ranges). * ****************************************************************************/ @@ -1308,7 +1390,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table) * Init the device table to not allow DMA access for devices and * suppress all page faults */ -static void init_device_table(void) +static void init_device_table_dma(void) { u32 devid; @@ -1318,6 +1400,27 @@ static void init_device_table(void) } } +static void __init uninit_device_table_dma(void) +{ + u32 devid; + + for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { + amd_iommu_dev_table[devid].data[0] = 0ULL; + amd_iommu_dev_table[devid].data[1] = 0ULL; + } +} + +static void init_device_table(void) +{ + u32 devid; + + if (!amd_iommu_irq_remap) + return; + + for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) + set_dev_entry_bit(devid, DEV_ENTRY_IRQ_TBL_EN); +} + static void iommu_init_flags(struct amd_iommu *iommu) { iommu->acpi_flags & IVHD_FLAG_HT_TUN_EN_MASK ? @@ -1466,10 +1569,14 @@ static struct syscore_ops amd_iommu_syscore_ops = { static void __init free_on_init_error(void) { - amd_iommu_uninit_devices(); + free_pages((unsigned long)irq_lookup_table, + get_order(rlookup_table_size)); - free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, - get_order(MAX_DOMAIN_ID/8)); + if (amd_iommu_irq_cache) { + kmem_cache_destroy(amd_iommu_irq_cache); + amd_iommu_irq_cache = NULL; + + } free_pages((unsigned long)amd_iommu_rlookup_table, get_order(rlookup_table_size)); @@ -1482,8 +1589,6 @@ static void __init free_on_init_error(void) free_iommu_all(); - free_unity_maps(); - #ifdef CONFIG_GART_IOMMU /* * We failed to initialize the AMD IOMMU - try fallback to GART @@ -1494,6 +1599,33 @@ static void __init free_on_init_error(void) #endif } +static bool __init check_ioapic_information(void) +{ + int idx; + + for (idx = 0; idx < nr_ioapics; idx++) { + int id = mpc_ioapic_id(idx); + + if (get_ioapic_devid(id) < 0) { + pr_err(FW_BUG "AMD-Vi: IO-APIC[%d] not in IVRS table\n", id); + pr_err("AMD-Vi: Disabling interrupt remapping due to BIOS Bug\n"); + return false; + } + } + + return true; +} + +static void __init free_dma_resources(void) +{ + amd_iommu_uninit_devices(); + + free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, + get_order(MAX_DOMAIN_ID/8)); + + free_unity_maps(); +} + /* * This is the hardware init function for AMD IOMMU in the system. * This function is called either from amd_iommu_init or from the interrupt @@ -1580,9 +1712,6 @@ static int __init early_amd_iommu_init(void) if (amd_iommu_pd_alloc_bitmap == NULL) goto out; - /* init the device table */ - init_device_table(); - /* * let all alias entries point to itself */ @@ -1605,10 +1734,35 @@ static int __init early_amd_iommu_init(void) if (ret) goto out; + if (amd_iommu_irq_remap) + amd_iommu_irq_remap = check_ioapic_information(); + + if (amd_iommu_irq_remap) { + /* + * Interrupt remapping enabled, create kmem_cache for the + * remapping tables. + */ + amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache", + MAX_IRQS_PER_TABLE * sizeof(u32), + IRQ_TABLE_ALIGNMENT, + 0, NULL); + if (!amd_iommu_irq_cache) + goto out; + + irq_lookup_table = (void *)__get_free_pages( + GFP_KERNEL | __GFP_ZERO, + get_order(rlookup_table_size)); + if (!irq_lookup_table) + goto out; + } + ret = init_memory_definitions(ivrs_base); if (ret) goto out; + /* init the device table */ + init_device_table(); + out: /* Don't leak any ACPI memory */ early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size); @@ -1652,13 +1806,22 @@ static bool detect_ivrs(void) /* Make sure ACS will be enabled during PCI probe */ pci_request_acs(); + if (!disable_irq_remap) + amd_iommu_irq_remap = true; + return true; } static int amd_iommu_init_dma(void) { + struct amd_iommu *iommu; int ret; + init_device_table_dma(); + + for_each_iommu(iommu) + iommu_flush_all_caches(iommu); + if (iommu_pass_through) ret = amd_iommu_init_passthrough(); else @@ -1749,7 +1912,48 @@ static int __init iommu_go_to_state(enum iommu_init_state state) return ret; } +#ifdef CONFIG_IRQ_REMAP +int __init amd_iommu_prepare(void) +{ + return iommu_go_to_state(IOMMU_ACPI_FINISHED); +} + +int __init amd_iommu_supported(void) +{ + return amd_iommu_irq_remap ? 1 : 0; +} + +int __init amd_iommu_enable(void) +{ + int ret; + + ret = iommu_go_to_state(IOMMU_ENABLED); + if (ret) + return ret; + + irq_remapping_enabled = 1; + + return 0; +} + +void amd_iommu_disable(void) +{ + amd_iommu_suspend(); +} + +int amd_iommu_reenable(int mode) +{ + amd_iommu_resume(); + + return 0; +} +int __init amd_iommu_enable_faulting(void) +{ + /* We enable MSI later when PCI is initialized */ + return 0; +} +#endif /* * This is the core init function for AMD IOMMU hardware in the system. @@ -1762,8 +1966,17 @@ static int __init amd_iommu_init(void) ret = iommu_go_to_state(IOMMU_INITIALIZED); if (ret) { - disable_iommus(); - free_on_init_error(); + free_dma_resources(); + if (!irq_remapping_enabled) { + disable_iommus(); + free_on_init_error(); + } else { + struct amd_iommu *iommu; + + uninit_device_table_dma(); + for_each_iommu(iommu) + iommu_flush_all_caches(iommu); + } } return ret; diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h index 1a7f41c6cc66..c294961bdd36 100644 --- a/drivers/iommu/amd_iommu_proto.h +++ b/drivers/iommu/amd_iommu_proto.h @@ -32,6 +32,14 @@ extern void amd_iommu_uninit_devices(void); extern void amd_iommu_init_notifier(void); extern void amd_iommu_init_api(void); +/* Needed for interrupt remapping */ +extern int amd_iommu_supported(void); +extern int amd_iommu_prepare(void); +extern int amd_iommu_enable(void); +extern void amd_iommu_disable(void); +extern int amd_iommu_reenable(int); +extern int amd_iommu_enable_faulting(void); + /* IOMMUv2 specific functions */ struct iommu_domain; diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index d0dab865a8b8..c9aa3d079ff0 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -152,6 +152,7 @@ #define CMD_INV_DEV_ENTRY 0x02 #define CMD_INV_IOMMU_PAGES 0x03 #define CMD_INV_IOTLB_PAGES 0x04 +#define CMD_INV_IRT 0x05 #define CMD_COMPLETE_PPR 0x07 #define CMD_INV_ALL 0x08 @@ -175,6 +176,7 @@ #define DEV_ENTRY_EX 0x67 #define DEV_ENTRY_SYSMGT1 0x68 #define DEV_ENTRY_SYSMGT2 0x69 +#define DEV_ENTRY_IRQ_TBL_EN 0x80 #define DEV_ENTRY_INIT_PASS 0xb8 #define DEV_ENTRY_EINT_PASS 0xb9 #define DEV_ENTRY_NMI_PASS 0xba @@ -183,6 +185,8 @@ #define DEV_ENTRY_MODE_MASK 0x07 #define DEV_ENTRY_MODE_SHIFT 0x09 +#define MAX_DEV_TABLE_ENTRIES 0xffff + /* constants to configure the command buffer */ #define CMD_BUFFER_SIZE 8192 #define CMD_BUFFER_UNINITIALIZED 1 @@ -255,7 +259,7 @@ #define PAGE_SIZE_ALIGN(address, pagesize) \ ((address) & ~((pagesize) - 1)) /* - * Creates an IOMMU PTE for an address an a given pagesize + * Creates an IOMMU PTE for an address and a given pagesize * The PTE has no permission bits set * Pagesize is expected to be a power-of-two larger than 4096 */ @@ -334,6 +338,23 @@ extern bool amd_iommu_np_cache; /* Only true if all IOMMUs support device IOTLBs */ extern bool amd_iommu_iotlb_sup; +#define MAX_IRQS_PER_TABLE 256 +#define IRQ_TABLE_ALIGNMENT 128 + +struct irq_remap_table { + spinlock_t lock; + unsigned min_index; + u32 *table; +}; + +extern struct irq_remap_table **irq_lookup_table; + +/* Interrupt remapping feature used? */ +extern bool amd_iommu_irq_remap; + +/* kmem_cache to get tables with 128 byte alignement */ +extern struct kmem_cache *amd_iommu_irq_cache; + /* * Make iterating over all IOMMUs easier */ @@ -404,7 +425,7 @@ struct iommu_dev_data { struct list_head dev_data_list; /* For global dev_data_list */ struct iommu_dev_data *alias_data;/* The alias dev_data */ struct protection_domain *domain; /* Domain the device is bound to */ - atomic_t bind; /* Domain attach reverent count */ + atomic_t bind; /* Domain attach reference count */ u16 devid; /* PCI Device ID */ bool iommu_v2; /* Device can make use of IOMMUv2 */ bool passthrough; /* Default for device is pt_domain */ @@ -565,6 +586,16 @@ struct amd_iommu { u32 stored_l2[0x83]; }; +struct devid_map { + struct list_head list; + u8 id; + u16 devid; +}; + +/* Map HPET and IOAPIC ids to the devid used by the IOMMU */ +extern struct list_head ioapic_map; +extern struct list_head hpet_map; + /* * List with all IOMMUs in the system. This list is not locked because it is * only written and read at driver initialization or suspend time @@ -678,6 +709,30 @@ static inline u16 calc_devid(u8 bus, u8 devfn) return (((u16)bus) << 8) | devfn; } +static inline int get_ioapic_devid(int id) +{ + struct devid_map *entry; + + list_for_each_entry(entry, &ioapic_map, list) { + if (entry->id == id) + return entry->devid; + } + + return -EINVAL; +} + +static inline int get_hpet_devid(int id) +{ + struct devid_map *entry; + + list_for_each_entry(entry, &hpet_map, list) { + if (entry->id == id) + return entry->devid; + } + + return -EINVAL; +} + #ifdef CONFIG_AMD_IOMMU_STATS struct __iommu_counter { diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 80bad32aa463..7fe44f83cc37 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -840,8 +840,7 @@ static void exynos_iommu_detach_device(struct iommu_domain *domain, if (__exynos_sysmmu_disable(data)) { dev_dbg(dev, "%s: Detached IOMMU with pgtable %#lx\n", __func__, __pa(priv->pgtable)); - list_del(&data->node); - INIT_LIST_HEAD(&data->node); + list_del_init(&data->node); } else { dev_dbg(dev, "%s: Detaching IOMMU with pgtable %#lx delayed", diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index db820d7dd0bc..d4a4cd445cab 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -589,7 +589,9 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain) { int i; - domain->iommu_coherency = 1; + i = find_first_bit(domain->iommu_bmp, g_num_of_iommus); + + domain->iommu_coherency = i < g_num_of_iommus ? 1 : 0; for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) { if (!ecap_coherent(g_iommus[i]->ecap)) { diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c index 151690db692c..faf85d6e33fe 100644 --- a/drivers/iommu/irq_remapping.c +++ b/drivers/iommu/irq_remapping.c @@ -51,6 +51,11 @@ early_param("intremap", setup_irqremap); void __init setup_irq_remapping_ops(void) { remap_ops = &intel_irq_remap_ops; + +#ifdef CONFIG_AMD_IOMMU + if (amd_iommu_irq_ops.prepare() == 0) + remap_ops = &amd_iommu_irq_ops; +#endif } int irq_remapping_supported(void) diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h index b12974cc1dfe..95363acb583f 100644 --- a/drivers/iommu/irq_remapping.h +++ b/drivers/iommu/irq_remapping.h @@ -82,6 +82,12 @@ struct irq_remap_ops { }; extern struct irq_remap_ops intel_irq_remap_ops; +extern struct irq_remap_ops amd_iommu_irq_ops; + +#else /* CONFIG_IRQ_REMAP */ + +#define irq_remapping_enabled 0 +#define disable_irq_remap 1 #endif /* CONFIG_IRQ_REMAP */ diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 2a4bb36bc688..0b4d62e0c645 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -32,14 +32,55 @@ #include <linux/io.h> #include <linux/of.h> #include <linux/of_iommu.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> #include <asm/page.h> #include <asm/cacheflush.h> #include <mach/iomap.h> -#include <mach/smmu.h> #include <mach/tegra-ahb.h> +enum smmu_hwgrp { + HWGRP_AFI, + HWGRP_AVPC, + HWGRP_DC, + HWGRP_DCB, + HWGRP_EPP, + HWGRP_G2, + HWGRP_HC, + HWGRP_HDA, + HWGRP_ISP, + HWGRP_MPE, + HWGRP_NV, + HWGRP_NV2, + HWGRP_PPCS, + HWGRP_SATA, + HWGRP_VDE, + HWGRP_VI, + + HWGRP_COUNT, + + HWGRP_END = ~0, +}; + +#define HWG_AFI (1 << HWGRP_AFI) +#define HWG_AVPC (1 << HWGRP_AVPC) +#define HWG_DC (1 << HWGRP_DC) +#define HWG_DCB (1 << HWGRP_DCB) +#define HWG_EPP (1 << HWGRP_EPP) +#define HWG_G2 (1 << HWGRP_G2) +#define HWG_HC (1 << HWGRP_HC) +#define HWG_HDA (1 << HWGRP_HDA) +#define HWG_ISP (1 << HWGRP_ISP) +#define HWG_MPE (1 << HWGRP_MPE) +#define HWG_NV (1 << HWGRP_NV) +#define HWG_NV2 (1 << HWGRP_NV2) +#define HWG_PPCS (1 << HWGRP_PPCS) +#define HWG_SATA (1 << HWGRP_SATA) +#define HWG_VDE (1 << HWGRP_VDE) +#define HWG_VI (1 << HWGRP_VI) + /* bitmap of the page sizes currently supported */ #define SMMU_IOMMU_PGSIZES (SZ_4K) @@ -47,16 +88,29 @@ #define SMMU_CONFIG_DISABLE 0 #define SMMU_CONFIG_ENABLE 1 -#define SMMU_TLB_CONFIG 0x14 -#define SMMU_TLB_CONFIG_STATS__MASK (1 << 31) -#define SMMU_TLB_CONFIG_STATS__ENABLE (1 << 31) +/* REVISIT: To support multiple MCs */ +enum { + _MC = 0, +}; + +enum { + _TLB = 0, + _PTC, +}; + +#define SMMU_CACHE_CONFIG_BASE 0x14 +#define __SMMU_CACHE_CONFIG(mc, cache) (SMMU_CACHE_CONFIG_BASE + 4 * cache) +#define SMMU_CACHE_CONFIG(cache) __SMMU_CACHE_CONFIG(_MC, cache) + +#define SMMU_CACHE_CONFIG_STATS_SHIFT 31 +#define SMMU_CACHE_CONFIG_STATS_ENABLE (1 << SMMU_CACHE_CONFIG_STATS_SHIFT) +#define SMMU_CACHE_CONFIG_STATS_TEST_SHIFT 30 +#define SMMU_CACHE_CONFIG_STATS_TEST (1 << SMMU_CACHE_CONFIG_STATS_TEST_SHIFT) + #define SMMU_TLB_CONFIG_HIT_UNDER_MISS__ENABLE (1 << 29) #define SMMU_TLB_CONFIG_ACTIVE_LINES__VALUE 0x10 #define SMMU_TLB_CONFIG_RESET_VAL 0x20000010 -#define SMMU_PTC_CONFIG 0x18 -#define SMMU_PTC_CONFIG_STATS__MASK (1 << 31) -#define SMMU_PTC_CONFIG_STATS__ENABLE (1 << 31) #define SMMU_PTC_CONFIG_CACHE__ENABLE (1 << 29) #define SMMU_PTC_CONFIG_INDEX_MAP__PATTERN 0x3f #define SMMU_PTC_CONFIG_RESET_VAL 0x2000003f @@ -86,10 +140,10 @@ #define SMMU_ASID_SECURITY 0x38 -#define SMMU_STATS_TLB_HIT_COUNT 0x1f0 -#define SMMU_STATS_TLB_MISS_COUNT 0x1f4 -#define SMMU_STATS_PTC_HIT_COUNT 0x1f8 -#define SMMU_STATS_PTC_MISS_COUNT 0x1fc +#define SMMU_STATS_CACHE_COUNT_BASE 0x1f0 + +#define SMMU_STATS_CACHE_COUNT(mc, cache, hitmiss) \ + (SMMU_STATS_CACHE_COUNT_BASE + 8 * cache + 4 * hitmiss) #define SMMU_TRANSLATION_ENABLE_0 0x228 #define SMMU_TRANSLATION_ENABLE_1 0x22c @@ -231,6 +285,12 @@ struct smmu_as { spinlock_t client_lock; /* for client list */ }; +struct smmu_debugfs_info { + struct smmu_device *smmu; + int mc; + int cache; +}; + /* * Per SMMU device - IOMMU device */ @@ -251,6 +311,9 @@ struct smmu_device { unsigned long translation_enable_2; unsigned long asid_security; + struct dentry *debugfs_root; + struct smmu_debugfs_info *debugfs_info; + struct device_node *ahb; int num_as; @@ -412,8 +475,8 @@ static int smmu_setup_regs(struct smmu_device *smmu) smmu_write(smmu, smmu->translation_enable_1, SMMU_TRANSLATION_ENABLE_1); smmu_write(smmu, smmu->translation_enable_2, SMMU_TRANSLATION_ENABLE_2); smmu_write(smmu, smmu->asid_security, SMMU_ASID_SECURITY); - smmu_write(smmu, SMMU_TLB_CONFIG_RESET_VAL, SMMU_TLB_CONFIG); - smmu_write(smmu, SMMU_PTC_CONFIG_RESET_VAL, SMMU_PTC_CONFIG); + smmu_write(smmu, SMMU_TLB_CONFIG_RESET_VAL, SMMU_CACHE_CONFIG(_TLB)); + smmu_write(smmu, SMMU_PTC_CONFIG_RESET_VAL, SMMU_CACHE_CONFIG(_PTC)); smmu_flush_regs(smmu, 1); @@ -895,6 +958,175 @@ static struct iommu_ops smmu_iommu_ops = { .pgsize_bitmap = SMMU_IOMMU_PGSIZES, }; +/* Should be in the order of enum */ +static const char * const smmu_debugfs_mc[] = { "mc", }; +static const char * const smmu_debugfs_cache[] = { "tlb", "ptc", }; + +static ssize_t smmu_debugfs_stats_write(struct file *file, + const char __user *buffer, + size_t count, loff_t *pos) +{ + struct smmu_debugfs_info *info; + struct smmu_device *smmu; + struct dentry *dent; + int i; + enum { + _OFF = 0, + _ON, + _RESET, + }; + const char * const command[] = { + [_OFF] = "off", + [_ON] = "on", + [_RESET] = "reset", + }; + char str[] = "reset"; + u32 val; + size_t offs; + + count = min_t(size_t, count, sizeof(str)); + if (copy_from_user(str, buffer, count)) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(command); i++) + if (strncmp(str, command[i], + strlen(command[i])) == 0) + break; + + if (i == ARRAY_SIZE(command)) + return -EINVAL; + + dent = file->f_dentry; + info = dent->d_inode->i_private; + smmu = info->smmu; + + offs = SMMU_CACHE_CONFIG(info->cache); + val = smmu_read(smmu, offs); + switch (i) { + case _OFF: + val &= ~SMMU_CACHE_CONFIG_STATS_ENABLE; + val &= ~SMMU_CACHE_CONFIG_STATS_TEST; + smmu_write(smmu, val, offs); + break; + case _ON: + val |= SMMU_CACHE_CONFIG_STATS_ENABLE; + val &= ~SMMU_CACHE_CONFIG_STATS_TEST; + smmu_write(smmu, val, offs); + break; + case _RESET: + val |= SMMU_CACHE_CONFIG_STATS_TEST; + smmu_write(smmu, val, offs); + val &= ~SMMU_CACHE_CONFIG_STATS_TEST; + smmu_write(smmu, val, offs); + break; + default: + BUG(); + break; + } + + dev_dbg(smmu->dev, "%s() %08x, %08x @%08x\n", __func__, + val, smmu_read(smmu, offs), offs); + + return count; +} + +static int smmu_debugfs_stats_show(struct seq_file *s, void *v) +{ + struct smmu_debugfs_info *info; + struct smmu_device *smmu; + struct dentry *dent; + int i; + const char * const stats[] = { "hit", "miss", }; + + dent = d_find_alias(s->private); + info = dent->d_inode->i_private; + smmu = info->smmu; + + for (i = 0; i < ARRAY_SIZE(stats); i++) { + u32 val; + size_t offs; + + offs = SMMU_STATS_CACHE_COUNT(info->mc, info->cache, i); + val = smmu_read(smmu, offs); + seq_printf(s, "%s:%08x ", stats[i], val); + + dev_dbg(smmu->dev, "%s() %s %08x @%08x\n", __func__, + stats[i], val, offs); + } + seq_printf(s, "\n"); + + return 0; +} + +static int smmu_debugfs_stats_open(struct inode *inode, struct file *file) +{ + return single_open(file, smmu_debugfs_stats_show, inode); +} + +static const struct file_operations smmu_debugfs_stats_fops = { + .open = smmu_debugfs_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = smmu_debugfs_stats_write, +}; + +static void smmu_debugfs_delete(struct smmu_device *smmu) +{ + debugfs_remove_recursive(smmu->debugfs_root); + kfree(smmu->debugfs_info); +} + +static void smmu_debugfs_create(struct smmu_device *smmu) +{ + int i; + size_t bytes; + struct dentry *root; + + bytes = ARRAY_SIZE(smmu_debugfs_mc) * ARRAY_SIZE(smmu_debugfs_cache) * + sizeof(*smmu->debugfs_info); + smmu->debugfs_info = kmalloc(bytes, GFP_KERNEL); + if (!smmu->debugfs_info) + return; + + root = debugfs_create_dir(dev_name(smmu->dev), NULL); + if (!root) + goto err_out; + smmu->debugfs_root = root; + + for (i = 0; i < ARRAY_SIZE(smmu_debugfs_mc); i++) { + int j; + struct dentry *mc; + + mc = debugfs_create_dir(smmu_debugfs_mc[i], root); + if (!mc) + goto err_out; + + for (j = 0; j < ARRAY_SIZE(smmu_debugfs_cache); j++) { + struct dentry *cache; + struct smmu_debugfs_info *info; + + info = smmu->debugfs_info; + info += i * ARRAY_SIZE(smmu_debugfs_mc) + j; + info->smmu = smmu; + info->mc = i; + info->cache = j; + + cache = debugfs_create_file(smmu_debugfs_cache[j], + S_IWUGO | S_IRUGO, mc, + (void *)info, + &smmu_debugfs_stats_fops); + if (!cache) + goto err_out; + } + } + + return; + +err_out: + smmu_debugfs_delete(smmu); +} + static int tegra_smmu_suspend(struct device *dev) { struct smmu_device *smmu = dev_get_drvdata(dev); @@ -999,6 +1231,7 @@ static int tegra_smmu_probe(struct platform_device *pdev) if (!smmu->avp_vector_page) return -ENOMEM; + smmu_debugfs_create(smmu); smmu_handle = smmu; return 0; } @@ -1008,6 +1241,8 @@ static int tegra_smmu_remove(struct platform_device *pdev) struct smmu_device *smmu = platform_get_drvdata(pdev); int i; + smmu_debugfs_delete(smmu); + smmu_write(smmu, SMMU_CONFIG_DISABLE, SMMU_CONFIG); for (i = 0; i < smmu->num_as; i++) free_pdir(&smmu->as[i]); diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c index 9e8388efd88e..fc92ccbd71dc 100644 --- a/drivers/lguest/lguest_device.c +++ b/drivers/lguest/lguest_device.c @@ -263,6 +263,9 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev, struct virtqueue *vq; int err; + if (!name) + return NULL; + /* We must have this many virtqueues. */ if (index >= ldev->desc->num_vq) return ERR_PTR(-ENOENT); @@ -296,7 +299,7 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev, * to 'true': the host just a(nother) SMP CPU, so we only need inter-cpu * barriers. */ - vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN, vdev, + vq = vring_new_virtqueue(index, lvq->config.num, LGUEST_VRING_ALIGN, vdev, true, lvq->pages, lg_notify, callback, name); if (!vq) { err = -ENOMEM; diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index 1ef6e1e8c6c6..33e3df9e39ca 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -175,6 +175,28 @@ config PINCTRL_EXYNOS4 bool "Pinctrl driver data for Exynos4 SoC" select PINCTRL_SAMSUNG +config PINCTRL_MVEBU + bool + depends on ARCH_MVEBU + select PINMUX + select PINCONF + +config PINCTRL_DOVE + bool + select PINCTRL_MVEBU + +config PINCTRL_KIRKWOOD + bool + select PINCTRL_MVEBU + +config PINCTRL_ARMADA_370 + bool + select PINCTRL_MVEBU + +config PINCTRL_ARMADA_XP + bool + select PINCTRL_MVEBU + source "drivers/pinctrl/spear/Kconfig" endmenu diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile index 698527dce29d..f162e0196300 100644 --- a/drivers/pinctrl/Makefile +++ b/drivers/pinctrl/Makefile @@ -35,5 +35,10 @@ obj-$(CONFIG_PINCTRL_U300) += pinctrl-u300.o obj-$(CONFIG_PINCTRL_COH901) += pinctrl-coh901.o obj-$(CONFIG_PINCTRL_SAMSUNG) += pinctrl-samsung.o obj-$(CONFIG_PINCTRL_EXYNOS4) += pinctrl-exynos.o +obj-$(CONFIG_PINCTRL_MVEBU) += pinctrl-mvebu.o +obj-$(CONFIG_PINCTRL_DOVE) += pinctrl-dove.o +obj-$(CONFIG_PINCTRL_KIRKWOOD) += pinctrl-kirkwood.o +obj-$(CONFIG_PINCTRL_ARMADA_370) += pinctrl-armada-370.o +obj-$(CONFIG_PINCTRL_ARMADA_XP) += pinctrl-armada-xp.o obj-$(CONFIG_PLAT_SPEAR) += spear/ diff --git a/drivers/pinctrl/pinctrl-armada-370.c b/drivers/pinctrl/pinctrl-armada-370.c new file mode 100644 index 000000000000..c907647de6ad --- /dev/null +++ b/drivers/pinctrl/pinctrl-armada-370.c @@ -0,0 +1,421 @@ +/* + * Marvell Armada 370 pinctrl driver based on mvebu pinctrl core + * + * Copyright (C) 2012 Marvell + * + * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/err.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/pinctrl/pinctrl.h> + +#include "pinctrl-mvebu.h" + +static struct mvebu_mpp_mode mv88f6710_mpp_modes[] = { + MPP_MODE(0, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "uart0", "rxd")), + MPP_MODE(1, + MPP_FUNCTION(0x0, "gpo", NULL), + MPP_FUNCTION(0x1, "uart0", "txd")), + MPP_MODE(2, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "i2c0", "sck"), + MPP_FUNCTION(0x2, "uart0", "txd")), + MPP_MODE(3, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "i2c0", "sda"), + MPP_FUNCTION(0x2, "uart0", "rxd")), + MPP_MODE(4, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "cpu_pd", "vdd")), + MPP_MODE(5, + MPP_FUNCTION(0x0, "gpo", NULL), + MPP_FUNCTION(0x1, "ge0", "txclko"), + MPP_FUNCTION(0x2, "uart1", "txd"), + MPP_FUNCTION(0x4, "spi1", "clk"), + MPP_FUNCTION(0x5, "audio", "mclk")), + MPP_MODE(6, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "ge0", "txd0"), + MPP_FUNCTION(0x2, "sata0", "prsnt"), + MPP_FUNCTION(0x4, "tdm", "rst"), + MPP_FUNCTION(0x5, "audio", "sdo")), + MPP_MODE(7, + MPP_FUNCTION(0x0, "gpo", NULL), + MPP_FUNCTION(0x1, "ge0", "txd1"), + MPP_FUNCTION(0x4, "tdm", "tdx"), + MPP_FUNCTION(0x5, "audio", "lrclk")), + MPP_MODE(8, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "ge0", "txd2"), + MPP_FUNCTION(0x2, "uart0", "rts"), + MPP_FUNCTION(0x4, "tdm", "drx"), + MPP_FUNCTION(0x5, "audio", "bclk")), + MPP_MODE(9, + MPP_FUNCTION(0x0, "gpo", NULL), + MPP_FUNCTION(0x1, "ge0", "txd3"), + MPP_FUNCTION(0x2, "uart1", "txd"), + MPP_FUNCTION(0x3, "sd0", "clk"), + MPP_FUNCTION(0x5, "audio", "spdifo")), + MPP_MODE(10, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "ge0", "txctl"), + MPP_FUNCTION(0x2, "uart0", "cts"), + MPP_FUNCTION(0x4, "tdm", "fsync"), + MPP_FUNCTION(0x5, "audio", "sdi")), + MPP_MODE(11, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "ge0", "rxd0"), + MPP_FUNCTION(0x2, "uart1", "rxd"), + MPP_FUNCTION(0x3, "sd0", "cmd"), + MPP_FUNCTION(0x4, "spi0", "cs1"), + MPP_FUNCTION(0x5, "sata1", "prsnt"), + MPP_FUNCTION(0x6, "spi1", "cs1")), + MPP_MODE(12, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "ge0", "rxd1"), + MPP_FUNCTION(0x2, "i2c1", "sda"), + MPP_FUNCTION(0x3, "sd0", "d0"), + MPP_FUNCTION(0x4, "spi1", "cs0"), + MPP_FUNCTION(0x5, "audio", "spdifi")), + MPP_MODE(13, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "ge0", "rxd2"), + MPP_FUNCTION(0x2, "i2c1", "sck"), + MPP_FUNCTION(0x3, "sd0", "d1"), + MPP_FUNCTION(0x4, "tdm", "pclk"), + MPP_FUNCTION(0x5, "audio", "rmclk")), + MPP_MODE(14, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "ge0", "rxd3"), + MPP_FUNCTION(0x2, "pcie", "clkreq0"), + MPP_FUNCTION(0x3, "sd0", "d2"), + MPP_FUNCTION(0x4, "spi1", "mosi"), + MPP_FUNCTION(0x5, "spi0", "cs2")), + MPP_MODE(15, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "ge0", "rxctl"), + MPP_FUNCTION(0x2, "pcie", "clkreq1"), + MPP_FUNCTION(0x3, "sd0", "d3"), + MPP_FUNCTION(0x4, "spi1", "miso"), + MPP_FUNCTION(0x5, "spi0", "cs3")), + MPP_MODE(16, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "ge0", "rxclk"), + MPP_FUNCTION(0x2, "uart1", "rxd"), + MPP_FUNCTION(0x4, "tdm", "int"), + MPP_FUNCTION(0x5, "audio", "extclk")), + MPP_MODE(17, + MPP_FUNCTION(0x0, "gpo", NULL), + MPP_FUNCTION(0x1, "ge", "mdc")), + MPP_MODE(18, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "ge", "mdio")), + MPP_MODE(19, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "ge0", "txclk"), + MPP_FUNCTION(0x2, "ge1", "txclkout"), + MPP_FUNCTION(0x4, "tdm", "pclk")), + MPP_MODE(20, + MPP_FUNCTION(0x0, "gpo", NULL), + MPP_FUNCTION(0x1, "ge0", "txd4"), + MPP_FUNCTION(0x2, "ge1", "txd0")), + MPP_MODE(21, + MPP_FUNCTION(0x0, "gpo", NULL), + MPP_FUNCTION(0x1, "ge0", "txd5"), + MPP_FUNCTION(0x2, "ge1", "txd1"), + MPP_FUNCTION(0x4, "uart1", "txd")), + MPP_MODE(22, + MPP_FUNCTION(0x0, "gpo", NULL), + MPP_FUNCTION(0x1, "ge0", "txd6"), + MPP_FUNCTION(0x2, "ge1", "txd2"), + MPP_FUNCTION(0x4, "uart0", "rts")), + MPP_MODE(23, + MPP_FUNCTION(0x0, "gpo", NULL), + MPP_FUNCTION(0x1, "ge0", "txd7"), + MPP_FUNCTION(0x2, "ge1", "txd3"), + MPP_FUNCTION(0x4, "spi1", "mosi")), + MPP_MODE(24, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "ge0", "col"), + MPP_FUNCTION(0x2, "ge1", "txctl"), + MPP_FUNCTION(0x4, "spi1", "cs0")), + MPP_MODE(25, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "ge0", "rxerr"), + MPP_FUNCTION(0x2, "ge1", "rxd0"), + MPP_FUNCTION(0x4, "uart1", "rxd")), + MPP_MODE(26, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "ge0", "crs"), + MPP_FUNCTION(0x2, "ge1", "rxd1"), + MPP_FUNCTION(0x4, "spi1", "miso")), + MPP_MODE(27, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "ge0", "rxd4"), + MPP_FUNCTION(0x2, "ge1", "rxd2"), + MPP_FUNCTION(0x4, "uart0", "cts")), + MPP_MODE(28, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "ge0", "rxd5"), + MPP_FUNCTION(0x2, "ge1", "rxd3")), + MPP_MODE(29, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "ge0", "rxd6"), + MPP_FUNCTION(0x2, "ge1", "rxctl"), + MPP_FUNCTION(0x4, "i2c1", "sda")), + MPP_MODE(30, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "ge0", "rxd7"), + MPP_FUNCTION(0x2, "ge1", "rxclk"), + MPP_FUNCTION(0x4, "i2c1", "sck")), + MPP_MODE(31, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x3, "tclk", NULL), + MPP_FUNCTION(0x4, "ge0", "txerr")), + MPP_MODE(32, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "spi0", "cs0")), + MPP_MODE(33, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "dev", "bootcs"), + MPP_FUNCTION(0x2, "spi0", "cs0")), + MPP_MODE(34, + MPP_FUNCTION(0x0, "gpo", NULL), + MPP_FUNCTION(0x1, "dev", "wen0"), + MPP_FUNCTION(0x2, "spi0", "mosi")), + MPP_MODE(35, + MPP_FUNCTION(0x0, "gpo", NULL), + MPP_FUNCTION(0x1, "dev", "oen"), + MPP_FUNCTION(0x2, "spi0", "sck")), + MPP_MODE(36, + MPP_FUNCTION(0x0, "gpo", NULL), + MPP_FUNCTION(0x1, "dev", "a1"), + MPP_FUNCTION(0x2, "spi0", "miso")), + MPP_MODE(37, + MPP_FUNCTION(0x0, "gpo", NULL), + MPP_FUNCTION(0x1, "dev", "a0"), + MPP_FUNCTION(0x2, "sata0", "prsnt")), + MPP_MODE(38, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "dev", "ready"), + MPP_FUNCTION(0x2, "uart1", "cts"), + MPP_FUNCTION(0x3, "uart0", "cts")), + MPP_MODE(39, + MPP_FUNCTION(0x0, "gpo", NULL), + MPP_FUNCTION(0x1, "dev", "ad0"), + MPP_FUNCTION(0x2, "audio", "spdifo")), + MPP_MODE(40, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "dev", "ad1"), + MPP_FUNCTION(0x2, "uart1", "rts"), + MPP_FUNCTION(0x3, "uart0", "rts")), + MPP_MODE(41, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "dev", "ad2"), + MPP_FUNCTION(0x2, "uart1", "rxd")), + MPP_MODE(42, + MPP_FUNCTION(0x0, "gpo", NULL), + MPP_FUNCTION(0x1, "dev", "ad3"), + MPP_FUNCTION(0x2, "uart1", "txd")), + MPP_MODE(43, + MPP_FUNCTION(0x0, "gpo", NULL), + MPP_FUNCTION(0x1, "dev", "ad4"), + MPP_FUNCTION(0x2, "audio", "bclk")), + MPP_MODE(44, + MPP_FUNCTION(0x0, "gpo", NULL), + MPP_FUNCTION(0x1, "dev", "ad5"), + MPP_FUNCTION(0x2, "audio", "mclk")), + MPP_MODE(45, + MPP_FUNCTION(0x0, "gpo", NULL), + MPP_FUNCTION(0x1, "dev", "ad6"), + MPP_FUNCTION(0x2, "audio", "lrclk")), + MPP_MODE(46, + MPP_FUNCTION(0x0, "gpo", NULL), + MPP_FUNCTION(0x1, "dev", "ad7"), + MPP_FUNCTION(0x2, "audio", "sdo")), + MPP_MODE(47, + MPP_FUNCTION(0x0, "gpo", NULL), + MPP_FUNCTION(0x1, "dev", "ad8"), + MPP_FUNCTION(0x3, "sd0", "clk"), + MPP_FUNCTION(0x5, "audio", "spdifo")), + MPP_MODE(48, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "dev", "ad9"), + MPP_FUNCTION(0x2, "uart0", "rts"), + MPP_FUNCTION(0x3, "sd0", "cmd"), + MPP_FUNCTION(0x4, "sata1", "prsnt"), + MPP_FUNCTION(0x5, "spi0", "cs1")), + MPP_MODE(49, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "dev", "ad10"), + MPP_FUNCTION(0x2, "pcie", "clkreq1"), + MPP_FUNCTION(0x3, "sd0", "d0"), + MPP_FUNCTION(0x4, "spi1", "cs0"), + MPP_FUNCTION(0x5, "audio", "spdifi")), + MPP_MODE(50, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "dev", "ad11"), + MPP_FUNCTION(0x2, "uart0", "cts"), + MPP_FUNCTION(0x3, "sd0", "d1"), + MPP_FUNCTION(0x4, "spi1", "miso"), + MPP_FUNCTION(0x5, "audio", "rmclk")), + MPP_MODE(51, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "dev", "ad12"), + MPP_FUNCTION(0x2, "i2c1", "sda"), + MPP_FUNCTION(0x3, "sd0", "d2"), + MPP_FUNCTION(0x4, "spi1", "mosi")), + MPP_MODE(52, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "dev", "ad13"), + MPP_FUNCTION(0x2, "i2c1", "sck"), + MPP_FUNCTION(0x3, "sd0", "d3"), + MPP_FUNCTION(0x4, "spi1", "sck")), + MPP_MODE(53, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "dev", "ad14"), + MPP_FUNCTION(0x2, "sd0", "clk"), + MPP_FUNCTION(0x3, "tdm", "pclk"), + MPP_FUNCTION(0x4, "spi0", "cs2"), + MPP_FUNCTION(0x5, "pcie", "clkreq1")), + MPP_MODE(54, + MPP_FUNCTION(0x0, "gpo", NULL), + MPP_FUNCTION(0x1, "dev", "ad15"), + MPP_FUNCTION(0x3, "tdm", "dtx")), + MPP_MODE(55, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "dev", "cs1"), + MPP_FUNCTION(0x2, "uart1", "txd"), + MPP_FUNCTION(0x3, "tdm", "rst"), + MPP_FUNCTION(0x4, "sata1", "prsnt"), + MPP_FUNCTION(0x5, "sata0", "prsnt")), + MPP_MODE(56, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "dev", "cs2"), + MPP_FUNCTION(0x2, "uart1", "cts"), + MPP_FUNCTION(0x3, "uart0", "cts"), + MPP_FUNCTION(0x4, "spi0", "cs3"), + MPP_FUNCTION(0x5, "pcie", "clkreq0"), + MPP_FUNCTION(0x6, "spi1", "cs1")), + MPP_MODE(57, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "dev", "cs3"), + MPP_FUNCTION(0x2, "uart1", "rxd"), + MPP_FUNCTION(0x3, "tdm", "fsync"), + MPP_FUNCTION(0x4, "sata0", "prsnt"), + MPP_FUNCTION(0x5, "audio", "sdo")), + MPP_MODE(58, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "dev", "cs0"), + MPP_FUNCTION(0x2, "uart1", "rts"), + MPP_FUNCTION(0x3, "tdm", "int"), + MPP_FUNCTION(0x5, "audio", "extclk"), + MPP_FUNCTION(0x6, "uart0", "rts")), + MPP_MODE(59, + MPP_FUNCTION(0x0, "gpo", NULL), + MPP_FUNCTION(0x1, "dev", "ale0"), + MPP_FUNCTION(0x2, "uart1", "rts"), + MPP_FUNCTION(0x3, "uart0", "rts"), + MPP_FUNCTION(0x5, "audio", "bclk")), + MPP_MODE(60, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "dev", "ale1"), + MPP_FUNCTION(0x2, "uart1", "rxd"), + MPP_FUNCTION(0x3, "sata0", "prsnt"), + MPP_FUNCTION(0x4, "pcie", "rst-out"), + MPP_FUNCTION(0x5, "audio", "sdi")), + MPP_MODE(61, + MPP_FUNCTION(0x0, "gpo", NULL), + MPP_FUNCTION(0x1, "dev", "wen1"), + MPP_FUNCTION(0x2, "uart1", "txd"), + MPP_FUNCTION(0x5, "audio", "rclk")), + MPP_MODE(62, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "dev", "a2"), + MPP_FUNCTION(0x2, "uart1", "cts"), + MPP_FUNCTION(0x3, "tdm", "drx"), + MPP_FUNCTION(0x4, "pcie", "clkreq0"), + MPP_FUNCTION(0x5, "audio", "mclk"), + MPP_FUNCTION(0x6, "uart0", "cts")), + MPP_MODE(63, + MPP_FUNCTION(0x0, "gpo", NULL), + MPP_FUNCTION(0x1, "spi0", "sck"), + MPP_FUNCTION(0x2, "tclk", NULL)), + MPP_MODE(64, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "spi0", "miso"), + MPP_FUNCTION(0x2, "spi0-1", "cs1")), + MPP_MODE(65, + MPP_FUNCTION(0x0, "gpio", NULL), + MPP_FUNCTION(0x1, "spi0", "mosi"), + MPP_FUNCTION(0x2, "spi0-1", "cs2")), +}; + +static struct mvebu_pinctrl_soc_info armada_370_pinctrl_info; + +static struct of_device_id armada_370_pinctrl_of_match[] __devinitdata = { + { .compatible = "marvell,mv88f6710-pinctrl" }, + { }, +}; + +static struct mvebu_mpp_ctrl mv88f6710_mpp_controls[] = { + MPP_REG_CTRL(0, 65), +}; + +static struct pinctrl_gpio_range mv88f6710_mpp_gpio_ranges[] = { + MPP_GPIO_RANGE(0, 0, 0, 32), + MPP_GPIO_RANGE(1, 32, 32, 32), + MPP_GPIO_RANGE(2, 64, 64, 2), +}; + +static int __devinit armada_370_pinctrl_probe(struct platform_device *pdev) +{ + struct mvebu_pinctrl_soc_info *soc = &armada_370_pinctrl_info; + + soc->variant = 0; /* no variants for Armada 370 */ + soc->controls = mv88f6710_mpp_controls; + soc->ncontrols = ARRAY_SIZE(mv88f6710_mpp_controls); + soc->modes = mv88f6710_mpp_modes; + soc->nmodes = ARRAY_SIZE(mv88f6710_mpp_modes); + soc->gpioranges = mv88f6710_mpp_gpio_ranges; + soc->ngpioranges = ARRAY_SIZE(mv88f6710_mpp_gpio_ranges); + + pdev->dev.platform_data = soc; + + return mvebu_pinctrl_probe(pdev); +} + +static int __devexit armada_370_pinctrl_remove(struct platform_device *pdev) +{ + return mvebu_pinctrl_remove(pdev); +} + +static struct platform_driver armada_370_pinctrl_driver = { + .driver = { + .name = "armada-370-pinctrl", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(armada_370_pinctrl_of_match), + }, + .probe = armada_370_pinctrl_probe, + .remove = __devexit_p(armada_370_pinctrl_remove), +}; + +module_platform_driver(armada_370_pinctrl_driver); + +MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); +MODULE_DESCRIPTION("Marvell Armada 370 pinctrl driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/pinctrl-armada-xp.c b/drivers/pinctrl/pinctrl-armada-xp.c new file mode 100644 index 000000000000..40bd52a46b4e --- /dev/null +++ b/drivers/pinctrl/pinctrl-armada-xp.c @@ -0,0 +1,468 @@ +/* + * Marvell Armada XP pinctrl driver based on mvebu pinctrl core + * + * Copyright (C) 2012 Marvell + * + * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This file supports the three variants of Armada XP SoCs that are + * available: mv78230, mv78260 and mv78460. From a pin muxing + * perspective, the mv78230 has 49 MPP pins. The mv78260 and mv78460 + * both have 67 MPP pins (more GPIOs and address lines for the memory + * bus mainly). The only difference between the mv78260 and the + * mv78460 in terms of pin muxing is the addition of two functions on + * pins 43 and 56 to access the VDD of the CPU2 and 3 (mv78260 has two + * cores, mv78460 has four cores). + */ + +#include <linux/err.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/pinctrl/pinctrl.h> +#include <linux/bitops.h> + +#include "pinctrl-mvebu.h" + +enum armada_xp_variant { + V_MV78230 = BIT(0), + V_MV78260 = BIT(1), + V_MV78460 = BIT(2), + V_MV78230_PLUS = (V_MV78230 | V_MV78260 | V_MV78460), + V_MV78260_PLUS = (V_MV78260 | V_MV78460), +}; + +static struct mvebu_mpp_mode armada_xp_mpp_modes[] = { + MPP_MODE(0, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "ge0", "txclko", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "lcd", "d0", V_MV78230_PLUS)), + MPP_MODE(1, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "ge0", "txd0", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "lcd", "d1", V_MV78230_PLUS)), + MPP_MODE(2, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "ge0", "txd1", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "lcd", "d2", V_MV78230_PLUS)), + MPP_MODE(3, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "ge0", "txd2", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "lcd", "d3", V_MV78230_PLUS)), + MPP_MODE(4, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "ge0", "txd3", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "lcd", "d4", V_MV78230_PLUS)), + MPP_MODE(5, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "ge0", "txctl", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "lcd", "d5", V_MV78230_PLUS)), + MPP_MODE(6, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "ge0", "rxd0", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "lcd", "d6", V_MV78230_PLUS)), + MPP_MODE(7, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "ge0", "rxd1", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "lcd", "d7", V_MV78230_PLUS)), + MPP_MODE(8, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "ge0", "rxd2", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "lcd", "d8", V_MV78230_PLUS)), + MPP_MODE(9, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "ge0", "rxd3", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "lcd", "d9", V_MV78230_PLUS)), + MPP_MODE(10, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "ge0", "rxctl", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "lcd", "d10", V_MV78230_PLUS)), + MPP_MODE(11, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "ge0", "rxclk", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "lcd", "d11", V_MV78230_PLUS)), + MPP_MODE(12, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "ge0", "txd4", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x2, "ge1", "clkout", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "lcd", "d12", V_MV78230_PLUS)), + MPP_MODE(13, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "ge0", "txd5", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x2, "ge1", "txd0", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "lcd", "d13", V_MV78230_PLUS)), + MPP_MODE(14, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "ge0", "txd6", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x2, "ge1", "txd1", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "lcd", "d14", V_MV78230_PLUS)), + MPP_MODE(15, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "ge0", "txd7", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x2, "ge1", "txd2", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "lcd", "d15", V_MV78230_PLUS)), + MPP_MODE(16, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "ge0", "txclk", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x2, "ge1", "txd3", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "lcd", "d16", V_MV78230_PLUS)), + MPP_MODE(17, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "ge0", "col", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x2, "ge1", "txctl", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "lcd", "d17", V_MV78230_PLUS)), + MPP_MODE(18, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "ge0", "rxerr", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x2, "ge1", "rxd0", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x3, "ptp", "trig", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "lcd", "d18", V_MV78230_PLUS)), + MPP_MODE(19, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "ge0", "crs", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x2, "ge1", "rxd1", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x3, "ptp", "evreq", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "lcd", "d19", V_MV78230_PLUS)), + MPP_MODE(20, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "ge0", "rxd4", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x2, "ge1", "rxd2", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x3, "ptp", "clk", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "lcd", "d20", V_MV78230_PLUS)), + MPP_MODE(21, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "ge0", "rxd5", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x2, "ge1", "rxd3", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x3, "mem", "bat", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "lcd", "d21", V_MV78230_PLUS)), + MPP_MODE(22, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "ge0", "rxd6", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x2, "ge1", "rxctl", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x3, "sata0", "prsnt", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "lcd", "d22", V_MV78230_PLUS)), + MPP_MODE(23, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "ge0", "rxd7", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x2, "ge1", "rxclk", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x3, "sata1", "prsnt", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "lcd", "d23", V_MV78230_PLUS)), + MPP_MODE(24, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "sata1", "prsnt", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x2, "nf", "bootcs-re", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x3, "tdm", "rst", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "lcd", "hsync", V_MV78230_PLUS)), + MPP_MODE(25, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "sata0", "prsnt", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x2, "nf", "bootcs-we", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x3, "tdm", "pclk", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "lcd", "vsync", V_MV78230_PLUS)), + MPP_MODE(26, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x3, "tdm", "fsync", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "lcd", "clk", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x5, "vdd", "cpu1-pd", V_MV78230_PLUS)), + MPP_MODE(27, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "ptp", "trig", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x3, "tdm", "dtx", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "lcd", "e", V_MV78230_PLUS)), + MPP_MODE(28, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "ptp", "evreq", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x3, "tdm", "drx", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "lcd", "pwm", V_MV78230_PLUS)), + MPP_MODE(29, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "ptp", "clk", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x3, "tdm", "int0", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "lcd", "ref-clk", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x5, "vdd", "cpu0-pd", V_MV78230_PLUS)), + MPP_MODE(30, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "sd0", "clk", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x3, "tdm", "int1", V_MV78230_PLUS)), + MPP_MODE(31, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "sd0", "cmd", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x3, "tdm", "int2", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x5, "vdd", "cpu0-pd", V_MV78230_PLUS)), + MPP_MODE(32, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "sd0", "d0", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x3, "tdm", "int3", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x5, "vdd", "cpu1-pd", V_MV78230_PLUS)), + MPP_MODE(33, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "sd0", "d1", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x3, "tdm", "int4", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "mem", "bat", V_MV78230_PLUS)), + MPP_MODE(34, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "sd0", "d2", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x2, "sata0", "prsnt", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x3, "tdm", "int5", V_MV78230_PLUS)), + MPP_MODE(35, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "sd0", "d3", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x2, "sata1", "prsnt", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x3, "tdm", "int6", V_MV78230_PLUS)), + MPP_MODE(36, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "spi", "mosi", V_MV78230_PLUS)), + MPP_MODE(37, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "spi", "miso", V_MV78230_PLUS)), + MPP_MODE(38, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "spi", "sck", V_MV78230_PLUS)), + MPP_MODE(39, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "spi", "cs0", V_MV78230_PLUS)), + MPP_MODE(40, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "spi", "cs1", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x2, "uart2", "cts", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x3, "vdd", "cpu1-pd", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "lcd", "vga-hsync", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x5, "pcie", "clkreq0", V_MV78230_PLUS)), + MPP_MODE(41, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "spi", "cs2", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x2, "uart2", "rts", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x3, "sata1", "prsnt", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "lcd", "vga-vsync", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x5, "pcie", "clkreq1", V_MV78230_PLUS)), + MPP_MODE(42, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "uart2", "rxd", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x2, "uart0", "cts", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x3, "tdm", "int7", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "tdm-1", "timer", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x5, "vdd", "cpu0-pd", V_MV78230_PLUS)), + MPP_MODE(43, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "uart2", "txd", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x2, "uart0", "rts", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x3, "spi", "cs3", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "pcie", "rstout", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x5, "vdd", "cpu2-3-pd", V_MV78460)), + MPP_MODE(44, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "uart2", "cts", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x2, "uart3", "rxd", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x3, "spi", "cs4", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "mem", "bat", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x5, "pcie", "clkreq2", V_MV78230_PLUS)), + MPP_MODE(45, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "uart2", "rts", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x2, "uart3", "txd", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x3, "spi", "cs5", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "sata1", "prsnt", V_MV78230_PLUS)), + MPP_MODE(46, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "uart3", "rts", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x2, "uart1", "rts", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x3, "spi", "cs6", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "sata0", "prsnt", V_MV78230_PLUS)), + MPP_MODE(47, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "uart3", "cts", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x2, "uart1", "cts", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x3, "spi", "cs7", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x4, "ref", "clkout", V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x5, "pcie", "clkreq3", V_MV78230_PLUS)), + MPP_MODE(48, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x1, "tclk", NULL, V_MV78230_PLUS), + MPP_VAR_FUNCTION(0x2, "dev", "burst/last", V_MV78230_PLUS)), + MPP_MODE(49, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS), + MPP_VAR_FUNCTION(0x1, "dev", "we3", V_MV78260_PLUS)), + MPP_MODE(50, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS), + MPP_VAR_FUNCTION(0x1, "dev", "we2", V_MV78260_PLUS)), + MPP_MODE(51, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS), + MPP_VAR_FUNCTION(0x1, "dev", "ad16", V_MV78260_PLUS)), + MPP_MODE(52, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS), + MPP_VAR_FUNCTION(0x1, "dev", "ad17", V_MV78260_PLUS)), + MPP_MODE(53, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS), + MPP_VAR_FUNCTION(0x1, "dev", "ad18", V_MV78260_PLUS)), + MPP_MODE(54, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS), + MPP_VAR_FUNCTION(0x1, "dev", "ad19", V_MV78260_PLUS)), + MPP_MODE(55, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS), + MPP_VAR_FUNCTION(0x1, "dev", "ad20", V_MV78260_PLUS), + MPP_VAR_FUNCTION(0x2, "vdd", "cpu0-pd", V_MV78260_PLUS)), + MPP_MODE(56, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS), + MPP_VAR_FUNCTION(0x1, "dev", "ad21", V_MV78260_PLUS), + MPP_VAR_FUNCTION(0x2, "vdd", "cpu1-pd", V_MV78260_PLUS)), + MPP_MODE(57, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS), + MPP_VAR_FUNCTION(0x1, "dev", "ad22", V_MV78260_PLUS), + MPP_VAR_FUNCTION(0x2, "vdd", "cpu2-3-pd", V_MV78460)), + MPP_MODE(58, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS), + MPP_VAR_FUNCTION(0x1, "dev", "ad23", V_MV78260_PLUS)), + MPP_MODE(59, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS), + MPP_VAR_FUNCTION(0x1, "dev", "ad24", V_MV78260_PLUS)), + MPP_MODE(60, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS), + MPP_VAR_FUNCTION(0x1, "dev", "ad25", V_MV78260_PLUS)), + MPP_MODE(61, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS), + MPP_VAR_FUNCTION(0x1, "dev", "ad26", V_MV78260_PLUS)), + MPP_MODE(62, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS), + MPP_VAR_FUNCTION(0x1, "dev", "ad27", V_MV78260_PLUS)), + MPP_MODE(63, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS), + MPP_VAR_FUNCTION(0x1, "dev", "ad28", V_MV78260_PLUS)), + MPP_MODE(64, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS), + MPP_VAR_FUNCTION(0x1, "dev", "ad29", V_MV78260_PLUS)), + MPP_MODE(65, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS), + MPP_VAR_FUNCTION(0x1, "dev", "ad30", V_MV78260_PLUS)), + MPP_MODE(66, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_MV78260_PLUS), + MPP_VAR_FUNCTION(0x1, "dev", "ad31", V_MV78260_PLUS)), +}; + +static struct mvebu_pinctrl_soc_info armada_xp_pinctrl_info; + +static struct of_device_id armada_xp_pinctrl_of_match[] __devinitdata = { + { + .compatible = "marvell,mv78230-pinctrl", + .data = (void *) V_MV78230, + }, + { + .compatible = "marvell,mv78260-pinctrl", + .data = (void *) V_MV78260, + }, + { + .compatible = "marvell,mv78460-pinctrl", + .data = (void *) V_MV78460, + }, + { }, +}; + +static struct mvebu_mpp_ctrl mv78230_mpp_controls[] = { + MPP_REG_CTRL(0, 48), +}; + +static struct pinctrl_gpio_range mv78230_mpp_gpio_ranges[] = { + MPP_GPIO_RANGE(0, 0, 0, 32), + MPP_GPIO_RANGE(1, 32, 32, 17), +}; + +static struct mvebu_mpp_ctrl mv78260_mpp_controls[] = { + MPP_REG_CTRL(0, 66), +}; + +static struct pinctrl_gpio_range mv78260_mpp_gpio_ranges[] = { + MPP_GPIO_RANGE(0, 0, 0, 32), + MPP_GPIO_RANGE(1, 32, 32, 32), + MPP_GPIO_RANGE(2, 64, 64, 3), +}; + +static struct mvebu_mpp_ctrl mv78460_mpp_controls[] = { + MPP_REG_CTRL(0, 66), +}; + +static struct pinctrl_gpio_range mv78460_mpp_gpio_ranges[] = { + MPP_GPIO_RANGE(0, 0, 0, 32), + MPP_GPIO_RANGE(1, 32, 32, 32), + MPP_GPIO_RANGE(2, 64, 64, 3), +}; + +static int __devinit armada_xp_pinctrl_probe(struct platform_device *pdev) +{ + struct mvebu_pinctrl_soc_info *soc = &armada_xp_pinctrl_info; + const struct of_device_id *match = + of_match_device(armada_xp_pinctrl_of_match, &pdev->dev); + + if (!match) + return -ENODEV; + + soc->variant = (unsigned) match->data & 0xff; + + switch (soc->variant) { + case V_MV78230: + soc->controls = mv78230_mpp_controls; + soc->ncontrols = ARRAY_SIZE(mv78230_mpp_controls); + soc->modes = armada_xp_mpp_modes; + /* We don't necessarily want the full list of the + * armada_xp_mpp_modes, but only the first 'n' ones + * that are available on this SoC */ + soc->nmodes = mv78230_mpp_controls[0].npins; + soc->gpioranges = mv78230_mpp_gpio_ranges; + soc->ngpioranges = ARRAY_SIZE(mv78230_mpp_gpio_ranges); + break; + case V_MV78260: + soc->controls = mv78260_mpp_controls; + soc->ncontrols = ARRAY_SIZE(mv78260_mpp_controls); + soc->modes = armada_xp_mpp_modes; + /* We don't necessarily want the full list of the + * armada_xp_mpp_modes, but only the first 'n' ones + * that are available on this SoC */ + soc->nmodes = mv78260_mpp_controls[0].npins; + soc->gpioranges = mv78260_mpp_gpio_ranges; + soc->ngpioranges = ARRAY_SIZE(mv78260_mpp_gpio_ranges); + break; + case V_MV78460: + soc->controls = mv78460_mpp_controls; + soc->ncontrols = ARRAY_SIZE(mv78460_mpp_controls); + soc->modes = armada_xp_mpp_modes; + /* We don't necessarily want the full list of the + * armada_xp_mpp_modes, but only the first 'n' ones + * that are available on this SoC */ + soc->nmodes = mv78460_mpp_controls[0].npins; + soc->gpioranges = mv78460_mpp_gpio_ranges; + soc->ngpioranges = ARRAY_SIZE(mv78460_mpp_gpio_ranges); + break; + } + + pdev->dev.platform_data = soc; + + return mvebu_pinctrl_probe(pdev); +} + +static int __devexit armada_xp_pinctrl_remove(struct platform_device *pdev) +{ + return mvebu_pinctrl_remove(pdev); +} + +static struct platform_driver armada_xp_pinctrl_driver = { + .driver = { + .name = "armada-xp-pinctrl", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(armada_xp_pinctrl_of_match), + }, + .probe = armada_xp_pinctrl_probe, + .remove = __devexit_p(armada_xp_pinctrl_remove), +}; + +module_platform_driver(armada_xp_pinctrl_driver); + +MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); +MODULE_DESCRIPTION("Marvell Armada XP pinctrl driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/pinctrl-dove.c b/drivers/pinctrl/pinctrl-dove.c new file mode 100644 index 000000000000..ffe74b27d66d --- /dev/null +++ b/drivers/pinctrl/pinctrl-dove.c @@ -0,0 +1,620 @@ +/* + * Marvell Dove pinctrl driver based on mvebu pinctrl core + * + * Author: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/err.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/bitops.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/pinctrl/pinctrl.h> + +#include "pinctrl-mvebu.h" + +#define DOVE_SB_REGS_VIRT_BASE 0xfde00000 +#define DOVE_MPP_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE | 0xd0200) +#define DOVE_PMU_MPP_GENERAL_CTRL (DOVE_MPP_VIRT_BASE + 0x10) +#define DOVE_AU0_AC97_SEL BIT(16) +#define DOVE_GLOBAL_CONFIG_1 (DOVE_SB_REGS_VIRT_BASE | 0xe802C) +#define DOVE_TWSI_ENABLE_OPTION1 BIT(7) +#define DOVE_GLOBAL_CONFIG_2 (DOVE_SB_REGS_VIRT_BASE | 0xe8030) +#define DOVE_TWSI_ENABLE_OPTION2 BIT(20) +#define DOVE_TWSI_ENABLE_OPTION3 BIT(21) +#define DOVE_TWSI_OPTION3_GPIO BIT(22) +#define DOVE_SSP_CTRL_STATUS_1 (DOVE_SB_REGS_VIRT_BASE | 0xe8034) +#define DOVE_SSP_ON_AU1 BIT(0) +#define DOVE_MPP_GENERAL_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE | 0xe803c) +#define DOVE_AU1_SPDIFO_GPIO_EN BIT(1) +#define DOVE_NAND_GPIO_EN BIT(0) +#define DOVE_GPIO_LO_VIRT_BASE (DOVE_SB_REGS_VIRT_BASE | 0xd0400) +#define DOVE_MPP_CTRL4_VIRT_BASE (DOVE_GPIO_LO_VIRT_BASE + 0x40) +#define DOVE_SPI_GPIO_SEL BIT(5) +#define DOVE_UART1_GPIO_SEL BIT(4) +#define DOVE_AU1_GPIO_SEL BIT(3) +#define DOVE_CAM_GPIO_SEL BIT(2) +#define DOVE_SD1_GPIO_SEL BIT(1) +#define DOVE_SD0_GPIO_SEL BIT(0) + +#define MPPS_PER_REG 8 +#define MPP_BITS 4 +#define MPP_MASK 0xf + +#define CONFIG_PMU BIT(4) + +static int dove_pmu_mpp_ctrl_get(struct mvebu_mpp_ctrl *ctrl, + unsigned long *config) +{ + unsigned off = (ctrl->pid / MPPS_PER_REG) * MPP_BITS; + unsigned shift = (ctrl->pid % MPPS_PER_REG) * MPP_BITS; + unsigned long pmu = readl(DOVE_PMU_MPP_GENERAL_CTRL); + unsigned long mpp = readl(DOVE_MPP_VIRT_BASE + off); + + if (pmu & (1 << ctrl->pid)) + *config = CONFIG_PMU; + else + *config = (mpp >> shift) & MPP_MASK; + return 0; +} + +static int dove_pmu_mpp_ctrl_set(struct mvebu_mpp_ctrl *ctrl, + unsigned long config) +{ + unsigned off = (ctrl->pid / MPPS_PER_REG) * MPP_BITS; + unsigned shift = (ctrl->pid % MPPS_PER_REG) * MPP_BITS; + unsigned long pmu = readl(DOVE_PMU_MPP_GENERAL_CTRL); + unsigned long mpp = readl(DOVE_MPP_VIRT_BASE + off); + + if (config == CONFIG_PMU) + writel(pmu | (1 << ctrl->pid), DOVE_PMU_MPP_GENERAL_CTRL); + else { + writel(pmu & ~(1 << ctrl->pid), DOVE_PMU_MPP_GENERAL_CTRL); + mpp &= ~(MPP_MASK << shift); + mpp |= config << shift; + writel(mpp, DOVE_MPP_VIRT_BASE + off); + } + return 0; +} + +static int dove_mpp4_ctrl_get(struct mvebu_mpp_ctrl *ctrl, + unsigned long *config) +{ + unsigned long mpp4 = readl(DOVE_MPP_CTRL4_VIRT_BASE); + unsigned long mask; + + switch (ctrl->pid) { + case 24: /* mpp_camera */ + mask = DOVE_CAM_GPIO_SEL; + break; + case 40: /* mpp_sdio0 */ + mask = DOVE_SD0_GPIO_SEL; + break; + case 46: /* mpp_sdio1 */ + mask = DOVE_SD1_GPIO_SEL; + break; + case 58: /* mpp_spi0 */ + mask = DOVE_SPI_GPIO_SEL; + break; + case 62: /* mpp_uart1 */ + mask = DOVE_UART1_GPIO_SEL; + break; + default: + return -EINVAL; + } + + *config = ((mpp4 & mask) != 0); + + return 0; +} + +static int dove_mpp4_ctrl_set(struct mvebu_mpp_ctrl *ctrl, + unsigned long config) +{ + unsigned long mpp4 = readl(DOVE_MPP_CTRL4_VIRT_BASE); + unsigned long mask; + + switch (ctrl->pid) { + case 24: /* mpp_camera */ + mask = DOVE_CAM_GPIO_SEL; + break; + case 40: /* mpp_sdio0 */ + mask = DOVE_SD0_GPIO_SEL; + break; + case 46: /* mpp_sdio1 */ + mask = DOVE_SD1_GPIO_SEL; + break; + case 58: /* mpp_spi0 */ + mask = DOVE_SPI_GPIO_SEL; + break; + case 62: /* mpp_uart1 */ + mask = DOVE_UART1_GPIO_SEL; + break; + default: + return -EINVAL; + } + + mpp4 &= ~mask; + if (config) + mpp4 |= mask; + + writel(mpp4, DOVE_MPP_CTRL4_VIRT_BASE); + + return 0; +} + +static int dove_nand_ctrl_get(struct mvebu_mpp_ctrl *ctrl, + unsigned long *config) +{ + unsigned long gmpp = readl(DOVE_MPP_GENERAL_VIRT_BASE); + + *config = ((gmpp & DOVE_NAND_GPIO_EN) != 0); + + return 0; +} + +static int dove_nand_ctrl_set(struct mvebu_mpp_ctrl *ctrl, + unsigned long config) +{ + unsigned long gmpp = readl(DOVE_MPP_GENERAL_VIRT_BASE); + + gmpp &= ~DOVE_NAND_GPIO_EN; + if (config) + gmpp |= DOVE_NAND_GPIO_EN; + + writel(gmpp, DOVE_MPP_GENERAL_VIRT_BASE); + + return 0; +} + +static int dove_audio0_ctrl_get(struct mvebu_mpp_ctrl *ctrl, + unsigned long *config) +{ + unsigned long pmu = readl(DOVE_PMU_MPP_GENERAL_CTRL); + + *config = ((pmu & DOVE_AU0_AC97_SEL) != 0); + + return 0; +} + +static int dove_audio0_ctrl_set(struct mvebu_mpp_ctrl *ctrl, + unsigned long config) +{ + unsigned long pmu = readl(DOVE_PMU_MPP_GENERAL_CTRL); + + pmu &= ~DOVE_AU0_AC97_SEL; + if (config) + pmu |= DOVE_AU0_AC97_SEL; + writel(pmu, DOVE_PMU_MPP_GENERAL_CTRL); + + return 0; +} + +static int dove_audio1_ctrl_get(struct mvebu_mpp_ctrl *ctrl, + unsigned long *config) +{ + unsigned long mpp4 = readl(DOVE_MPP_CTRL4_VIRT_BASE); + unsigned long sspc1 = readl(DOVE_SSP_CTRL_STATUS_1); + unsigned long gmpp = readl(DOVE_MPP_GENERAL_VIRT_BASE); + unsigned long gcfg2 = readl(DOVE_GLOBAL_CONFIG_2); + + *config = 0; + if (mpp4 & DOVE_AU1_GPIO_SEL) + *config |= BIT(3); + if (sspc1 & DOVE_SSP_ON_AU1) + *config |= BIT(2); + if (gmpp & DOVE_AU1_SPDIFO_GPIO_EN) + *config |= BIT(1); + if (gcfg2 & DOVE_TWSI_OPTION3_GPIO) + *config |= BIT(0); + + /* SSP/TWSI only if I2S1 not set*/ + if ((*config & BIT(3)) == 0) + *config &= ~(BIT(2) | BIT(0)); + /* TWSI only if SPDIFO not set*/ + if ((*config & BIT(1)) == 0) + *config &= ~BIT(0); + return 0; +} + +static int dove_audio1_ctrl_set(struct mvebu_mpp_ctrl *ctrl, + unsigned long config) +{ + unsigned long mpp4 = readl(DOVE_MPP_CTRL4_VIRT_BASE); + unsigned long sspc1 = readl(DOVE_SSP_CTRL_STATUS_1); + unsigned long gmpp = readl(DOVE_MPP_GENERAL_VIRT_BASE); + unsigned long gcfg2 = readl(DOVE_GLOBAL_CONFIG_2); + + if (config & BIT(0)) + gcfg2 |= DOVE_TWSI_OPTION3_GPIO; + if (config & BIT(1)) + gmpp |= DOVE_AU1_SPDIFO_GPIO_EN; + if (config & BIT(2)) + sspc1 |= DOVE_SSP_ON_AU1; + if (config & BIT(3)) + mpp4 |= DOVE_AU1_GPIO_SEL; + + writel(mpp4, DOVE_MPP_CTRL4_VIRT_BASE); + writel(sspc1, DOVE_SSP_CTRL_STATUS_1); + writel(gmpp, DOVE_MPP_GENERAL_VIRT_BASE); + writel(gcfg2, DOVE_GLOBAL_CONFIG_2); + + return 0; +} + +/* mpp[52:57] gpio pins depend heavily on current config; + * gpio_req does not try to mux in gpio capabilities to not + * break other functions. If you require all mpps as gpio + * enforce gpio setting by pinctrl mapping. + */ +static int dove_audio1_ctrl_gpio_req(struct mvebu_mpp_ctrl *ctrl, u8 pid) +{ + unsigned long config; + + dove_audio1_ctrl_get(ctrl, &config); + + switch (config) { + case 0x02: /* i2s1 : gpio[56:57] */ + case 0x0e: /* ssp : gpio[56:57] */ + if (pid >= 56) + return 0; + return -ENOTSUPP; + case 0x08: /* spdifo : gpio[52:55] */ + case 0x0b: /* twsi : gpio[52:55] */ + if (pid <= 55) + return 0; + return -ENOTSUPP; + case 0x0a: /* all gpio */ + return 0; + /* 0x00 : i2s1/spdifo : no gpio */ + /* 0x0c : ssp/spdifo : no gpio */ + /* 0x0f : ssp/twsi : no gpio */ + } + return -ENOTSUPP; +} + +/* mpp[52:57] has gpio pins capable of in and out */ +static int dove_audio1_ctrl_gpio_dir(struct mvebu_mpp_ctrl *ctrl, u8 pid, + bool input) +{ + if (pid < 52 || pid > 57) + return -ENOTSUPP; + return 0; +} + +static int dove_twsi_ctrl_get(struct mvebu_mpp_ctrl *ctrl, + unsigned long *config) +{ + unsigned long gcfg1 = readl(DOVE_GLOBAL_CONFIG_1); + unsigned long gcfg2 = readl(DOVE_GLOBAL_CONFIG_2); + + *config = 0; + if (gcfg1 & DOVE_TWSI_ENABLE_OPTION1) + *config = 1; + else if (gcfg2 & DOVE_TWSI_ENABLE_OPTION2) + *config = 2; + else if (gcfg2 & DOVE_TWSI_ENABLE_OPTION3) + *config = 3; + + return 0; +} + +static int dove_twsi_ctrl_set(struct mvebu_mpp_ctrl *ctrl, + unsigned long config) +{ + unsigned long gcfg1 = readl(DOVE_GLOBAL_CONFIG_1); + unsigned long gcfg2 = readl(DOVE_GLOBAL_CONFIG_2); + + gcfg1 &= ~DOVE_TWSI_ENABLE_OPTION1; + gcfg2 &= ~(DOVE_TWSI_ENABLE_OPTION2 | DOVE_TWSI_ENABLE_OPTION2); + + switch (config) { + case 1: + gcfg1 |= DOVE_TWSI_ENABLE_OPTION1; + break; + case 2: + gcfg2 |= DOVE_TWSI_ENABLE_OPTION2; + break; + case 3: + gcfg2 |= DOVE_TWSI_ENABLE_OPTION3; + break; + } + + writel(gcfg1, DOVE_GLOBAL_CONFIG_1); + writel(gcfg2, DOVE_GLOBAL_CONFIG_2); + + return 0; +} + +static struct mvebu_mpp_ctrl dove_mpp_controls[] = { + MPP_FUNC_CTRL(0, 0, "mpp0", dove_pmu_mpp_ctrl), + MPP_FUNC_CTRL(1, 1, "mpp1", dove_pmu_mpp_ctrl), + MPP_FUNC_CTRL(2, 2, "mpp2", dove_pmu_mpp_ctrl), + MPP_FUNC_CTRL(3, 3, "mpp3", dove_pmu_mpp_ctrl), + MPP_FUNC_CTRL(4, 4, "mpp4", dove_pmu_mpp_ctrl), + MPP_FUNC_CTRL(5, 5, "mpp5", dove_pmu_mpp_ctrl), + MPP_FUNC_CTRL(6, 6, "mpp6", dove_pmu_mpp_ctrl), + MPP_FUNC_CTRL(7, 7, "mpp7", dove_pmu_mpp_ctrl), + MPP_FUNC_CTRL(8, 8, "mpp8", dove_pmu_mpp_ctrl), + MPP_FUNC_CTRL(9, 9, "mpp9", dove_pmu_mpp_ctrl), + MPP_FUNC_CTRL(10, 10, "mpp10", dove_pmu_mpp_ctrl), + MPP_FUNC_CTRL(11, 11, "mpp11", dove_pmu_mpp_ctrl), + MPP_FUNC_CTRL(12, 12, "mpp12", dove_pmu_mpp_ctrl), + MPP_FUNC_CTRL(13, 13, "mpp13", dove_pmu_mpp_ctrl), + MPP_FUNC_CTRL(14, 14, "mpp14", dove_pmu_mpp_ctrl), + MPP_FUNC_CTRL(15, 15, "mpp15", dove_pmu_mpp_ctrl), + MPP_REG_CTRL(16, 23), + MPP_FUNC_CTRL(24, 39, "mpp_camera", dove_mpp4_ctrl), + MPP_FUNC_CTRL(40, 45, "mpp_sdio0", dove_mpp4_ctrl), + MPP_FUNC_CTRL(46, 51, "mpp_sdio1", dove_mpp4_ctrl), + MPP_FUNC_GPIO_CTRL(52, 57, "mpp_audio1", dove_audio1_ctrl), + MPP_FUNC_CTRL(58, 61, "mpp_spi0", dove_mpp4_ctrl), + MPP_FUNC_CTRL(62, 63, "mpp_uart1", dove_mpp4_ctrl), + MPP_FUNC_CTRL(64, 71, "mpp_nand", dove_nand_ctrl), + MPP_FUNC_CTRL(72, 72, "audio0", dove_audio0_ctrl), + MPP_FUNC_CTRL(73, 73, "twsi", dove_twsi_ctrl), +}; + +static struct mvebu_mpp_mode dove_mpp_modes[] = { + MPP_MODE(0, + MPP_FUNCTION(0x00, "gpio", NULL), + MPP_FUNCTION(0x02, "uart2", "rts"), + MPP_FUNCTION(0x03, "sdio0", "cd"), + MPP_FUNCTION(0x0f, "lcd0", "pwm"), + MPP_FUNCTION(0x10, "pmu", NULL)), + MPP_MODE(1, + MPP_FUNCTION(0x00, "gpio", NULL), + MPP_FUNCTION(0x02, "uart2", "cts"), + MPP_FUNCTION(0x03, "sdio0", "wp"), + MPP_FUNCTION(0x0f, "lcd1", "pwm"), + MPP_FUNCTION(0x10, "pmu", NULL)), + MPP_MODE(2, + MPP_FUNCTION(0x00, "gpio", NULL), + MPP_FUNCTION(0x01, "sata", "prsnt"), + MPP_FUNCTION(0x02, "uart2", "txd"), + MPP_FUNCTION(0x03, "sdio0", "buspwr"), + MPP_FUNCTION(0x04, "uart1", "rts"), + MPP_FUNCTION(0x10, "pmu", NULL)), + MPP_MODE(3, + MPP_FUNCTION(0x00, "gpio", NULL), + MPP_FUNCTION(0x01, "sata", "act"), + MPP_FUNCTION(0x02, "uart2", "rxd"), + MPP_FUNCTION(0x03, "sdio0", "ledctrl"), + MPP_FUNCTION(0x04, "uart1", "cts"), + MPP_FUNCTION(0x0f, "lcd-spi", "cs1"), + MPP_FUNCTION(0x10, "pmu", NULL)), + MPP_MODE(4, + MPP_FUNCTION(0x00, "gpio", NULL), + MPP_FUNCTION(0x02, "uart3", "rts"), + MPP_FUNCTION(0x03, "sdio1", "cd"), + MPP_FUNCTION(0x04, "spi1", "miso"), + MPP_FUNCTION(0x10, "pmu", NULL)), + MPP_MODE(5, + MPP_FUNCTION(0x00, "gpio", NULL), + MPP_FUNCTION(0x02, "uart3", "cts"), + MPP_FUNCTION(0x03, "sdio1", "wp"), + MPP_FUNCTION(0x04, "spi1", "cs"), + MPP_FUNCTION(0x10, "pmu", NULL)), + MPP_MODE(6, + MPP_FUNCTION(0x00, "gpio", NULL), + MPP_FUNCTION(0x02, "uart3", "txd"), + MPP_FUNCTION(0x03, "sdio1", "buspwr"), + MPP_FUNCTION(0x04, "spi1", "mosi"), + MPP_FUNCTION(0x10, "pmu", NULL)), + MPP_MODE(7, + MPP_FUNCTION(0x00, "gpio", NULL), + MPP_FUNCTION(0x02, "uart3", "rxd"), + MPP_FUNCTION(0x03, "sdio1", "ledctrl"), + MPP_FUNCTION(0x04, "spi1", "sck"), + MPP_FUNCTION(0x10, "pmu", NULL)), + MPP_MODE(8, + MPP_FUNCTION(0x00, "gpio", NULL), + MPP_FUNCTION(0x01, "watchdog", "rstout"), + MPP_FUNCTION(0x10, "pmu", NULL)), + MPP_MODE(9, + MPP_FUNCTION(0x00, "gpio", NULL), + MPP_FUNCTION(0x05, "pex1", "clkreq"), + MPP_FUNCTION(0x10, "pmu", NULL)), + MPP_MODE(10, + MPP_FUNCTION(0x00, "gpio", NULL), + MPP_FUNCTION(0x05, "ssp", "sclk"), + MPP_FUNCTION(0x10, "pmu", NULL)), + MPP_MODE(11, + MPP_FUNCTION(0x00, "gpio", NULL), + MPP_FUNCTION(0x01, "sata", "prsnt"), + MPP_FUNCTION(0x02, "sata-1", "act"), + MPP_FUNCTION(0x03, "sdio0", "ledctrl"), + MPP_FUNCTION(0x04, "sdio1", "ledctrl"), + MPP_FUNCTION(0x05, "pex0", "clkreq"), + MPP_FUNCTION(0x10, "pmu", NULL)), + MPP_MODE(12, + MPP_FUNCTION(0x00, "gpio", NULL), + MPP_FUNCTION(0x01, "sata", "act"), + MPP_FUNCTION(0x02, "uart2", "rts"), + MPP_FUNCTION(0x03, "audio0", "extclk"), + MPP_FUNCTION(0x04, "sdio1", "cd"), + MPP_FUNCTION(0x10, "pmu", NULL)), + MPP_MODE(13, + MPP_FUNCTION(0x00, "gpio", NULL), + MPP_FUNCTION(0x02, "uart2", "cts"), + MPP_FUNCTION(0x03, "audio1", "extclk"), + MPP_FUNCTION(0x04, "sdio1", "wp"), + MPP_FUNCTION(0x05, "ssp", "extclk"), + MPP_FUNCTION(0x10, "pmu", NULL)), + MPP_MODE(14, + MPP_FUNCTION(0x00, "gpio", NULL), + MPP_FUNCTION(0x02, "uart2", "txd"), + MPP_FUNCTION(0x04, "sdio1", "buspwr"), + MPP_FUNCTION(0x05, "ssp", "rxd"), + MPP_FUNCTION(0x10, "pmu", NULL)), + MPP_MODE(15, + MPP_FUNCTION(0x00, "gpio", NULL), + MPP_FUNCTION(0x02, "uart2", "rxd"), + MPP_FUNCTION(0x04, "sdio1", "ledctrl"), + MPP_FUNCTION(0x05, "ssp", "sfrm"), + MPP_FUNCTION(0x10, "pmu", NULL)), + MPP_MODE(16, + MPP_FUNCTION(0x00, "gpio", NULL), + MPP_FUNCTION(0x02, "uart3", "rts"), + MPP_FUNCTION(0x03, "sdio0", "cd"), + MPP_FUNCTION(0x04, "lcd-spi", "cs1"), + MPP_FUNCTION(0x05, "ac97", "sdi1")), + MPP_MODE(17, + MPP_FUNCTION(0x00, "gpio", NULL), + MPP_FUNCTION(0x01, "ac97-1", "sysclko"), + MPP_FUNCTION(0x02, "uart3", "cts"), + MPP_FUNCTION(0x03, "sdio0", "wp"), + MPP_FUNCTION(0x04, "twsi", "sda"), + MPP_FUNCTION(0x05, "ac97", "sdi2")), + MPP_MODE(18, + MPP_FUNCTION(0x00, "gpio", NULL), + MPP_FUNCTION(0x02, "uart3", "txd"), + MPP_FUNCTION(0x03, "sdio0", "buspwr"), + MPP_FUNCTION(0x04, "lcd0", "pwm"), + MPP_FUNCTION(0x05, "ac97", "sdi3")), + MPP_MODE(19, + MPP_FUNCTION(0x00, "gpio", NULL), + MPP_FUNCTION(0x02, "uart3", "rxd"), + MPP_FUNCTION(0x03, "sdio0", "ledctrl"), + MPP_FUNCTION(0x04, "twsi", "sck")), + MPP_MODE(20, + MPP_FUNCTION(0x00, "gpio", NULL), + MPP_FUNCTION(0x01, "ac97", "sysclko"), + MPP_FUNCTION(0x02, "lcd-spi", "miso"), + MPP_FUNCTION(0x03, "sdio1", "cd"), + MPP_FUNCTION(0x05, "sdio0", "cd"), + MPP_FUNCTION(0x06, "spi1", "miso")), + MPP_MODE(21, + MPP_FUNCTION(0x00, "gpio", NULL), + MPP_FUNCTION(0x01, "uart1", "rts"), + MPP_FUNCTION(0x02, "lcd-spi", "cs0"), + MPP_FUNCTION(0x03, "sdio1", "wp"), + MPP_FUNCTION(0x04, "ssp", "sfrm"), + MPP_FUNCTION(0x05, "sdio0", "wp"), + MPP_FUNCTION(0x06, "spi1", "cs")), + MPP_MODE(22, + MPP_FUNCTION(0x00, "gpio", NULL), + MPP_FUNCTION(0x01, "uart1", "cts"), + MPP_FUNCTION(0x02, "lcd-spi", "mosi"), + MPP_FUNCTION(0x03, "sdio1", "buspwr"), + MPP_FUNCTION(0x04, "ssp", "txd"), + MPP_FUNCTION(0x05, "sdio0", "buspwr"), + MPP_FUNCTION(0x06, "spi1", "mosi")), + MPP_MODE(23, + MPP_FUNCTION(0x00, "gpio", NULL), + MPP_FUNCTION(0x02, "lcd-spi", "sck"), + MPP_FUNCTION(0x03, "sdio1", "ledctrl"), + MPP_FUNCTION(0x04, "ssp", "sclk"), + MPP_FUNCTION(0x05, "sdio0", "ledctrl"), + MPP_FUNCTION(0x06, "spi1", "sck")), + MPP_MODE(24, + MPP_FUNCTION(0x00, "camera", NULL), + MPP_FUNCTION(0x01, "gpio", NULL)), + MPP_MODE(40, + MPP_FUNCTION(0x00, "sdio0", NULL), + MPP_FUNCTION(0x01, "gpio", NULL)), + MPP_MODE(46, + MPP_FUNCTION(0x00, "sdio1", NULL), + MPP_FUNCTION(0x01, "gpio", NULL)), + MPP_MODE(52, + MPP_FUNCTION(0x00, "i2s1/spdifo", NULL), + MPP_FUNCTION(0x02, "i2s1", NULL), + MPP_FUNCTION(0x08, "spdifo", NULL), + MPP_FUNCTION(0x0a, "gpio", NULL), + MPP_FUNCTION(0x0b, "twsi", NULL), + MPP_FUNCTION(0x0c, "ssp/spdifo", NULL), + MPP_FUNCTION(0x0e, "ssp", NULL), + MPP_FUNCTION(0x0f, "ssp/twsi", NULL)), + MPP_MODE(58, + MPP_FUNCTION(0x00, "spi0", NULL), + MPP_FUNCTION(0x01, "gpio", NULL)), + MPP_MODE(62, + MPP_FUNCTION(0x00, "uart1", NULL), + MPP_FUNCTION(0x01, "gpio", NULL)), + MPP_MODE(64, + MPP_FUNCTION(0x00, "nand", NULL), + MPP_FUNCTION(0x01, "gpo", NULL)), + MPP_MODE(72, + MPP_FUNCTION(0x00, "i2s", NULL), + MPP_FUNCTION(0x01, "ac97", NULL)), + MPP_MODE(73, + MPP_FUNCTION(0x00, "twsi-none", NULL), + MPP_FUNCTION(0x01, "twsi-opt1", NULL), + MPP_FUNCTION(0x02, "twsi-opt2", NULL), + MPP_FUNCTION(0x03, "twsi-opt3", NULL)), +}; + +static struct pinctrl_gpio_range dove_mpp_gpio_ranges[] = { + MPP_GPIO_RANGE(0, 0, 0, 32), + MPP_GPIO_RANGE(1, 32, 32, 32), + MPP_GPIO_RANGE(2, 64, 64, 8), +}; + +static struct mvebu_pinctrl_soc_info dove_pinctrl_info = { + .controls = dove_mpp_controls, + .ncontrols = ARRAY_SIZE(dove_mpp_controls), + .modes = dove_mpp_modes, + .nmodes = ARRAY_SIZE(dove_mpp_modes), + .gpioranges = dove_mpp_gpio_ranges, + .ngpioranges = ARRAY_SIZE(dove_mpp_gpio_ranges), + .variant = 0, +}; + +static struct clk *clk; + +static struct of_device_id dove_pinctrl_of_match[] __devinitdata = { + { .compatible = "marvell,dove-pinctrl", .data = &dove_pinctrl_info }, + { } +}; + +static int __devinit dove_pinctrl_probe(struct platform_device *pdev) +{ + const struct of_device_id *match = + of_match_device(dove_pinctrl_of_match, &pdev->dev); + pdev->dev.platform_data = match->data; + + /* + * General MPP Configuration Register is part of pdma registers. + * grab clk to make sure it is ticking. + */ + clk = devm_clk_get(&pdev->dev, NULL); + if (!IS_ERR(clk)) + clk_prepare_enable(clk); + + return mvebu_pinctrl_probe(pdev); +} + +static int __devexit dove_pinctrl_remove(struct platform_device *pdev) +{ + int ret; + + ret = mvebu_pinctrl_remove(pdev); + if (!IS_ERR(clk)) + clk_disable_unprepare(clk); + return ret; +} + +static struct platform_driver dove_pinctrl_driver = { + .driver = { + .name = "dove-pinctrl", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(dove_pinctrl_of_match), + }, + .probe = dove_pinctrl_probe, + .remove = __devexit_p(dove_pinctrl_remove), +}; + +module_platform_driver(dove_pinctrl_driver); + +MODULE_AUTHOR("Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>"); +MODULE_DESCRIPTION("Marvell Dove pinctrl driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/pinctrl-kirkwood.c b/drivers/pinctrl/pinctrl-kirkwood.c new file mode 100644 index 000000000000..9a74ef674a0e --- /dev/null +++ b/drivers/pinctrl/pinctrl-kirkwood.c @@ -0,0 +1,472 @@ +/* + * Marvell Kirkwood pinctrl driver based on mvebu pinctrl core + * + * Author: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/err.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/pinctrl/pinctrl.h> + +#include "pinctrl-mvebu.h" + +#define V(f6180, f6190, f6192, f6281, f6282) \ + ((f6180 << 0) | (f6190 << 1) | (f6192 << 2) | \ + (f6281 << 3) | (f6282 << 4)) + +enum kirkwood_variant { + VARIANT_MV88F6180 = V(1, 0, 0, 0, 0), + VARIANT_MV88F6190 = V(0, 1, 0, 0, 0), + VARIANT_MV88F6192 = V(0, 0, 1, 0, 0), + VARIANT_MV88F6281 = V(0, 0, 0, 1, 0), + VARIANT_MV88F6282 = V(0, 0, 0, 0, 1), +}; + +static struct mvebu_mpp_mode mv88f6xxx_mpp_modes[] = { + MPP_MODE(0, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x1, "nand", "io2", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x2, "spi", "cs", V(1, 1, 1, 1, 1))), + MPP_MODE(1, + MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x1, "nand", "io3", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x2, "spi", "mosi", V(1, 1, 1, 1, 1))), + MPP_MODE(2, + MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x1, "nand", "io4", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x2, "spi", "sck", V(1, 1, 1, 1, 1))), + MPP_MODE(3, + MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x1, "nand", "io5", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x2, "spi", "miso", V(1, 1, 1, 1, 1))), + MPP_MODE(4, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x1, "nand", "io6", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x2, "uart0", "rxd", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x5, "sata1", "act", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0xb, "lcd", "hsync", V(0, 0, 0, 0, 1)), + MPP_VAR_FUNCTION(0xd, "ptp", "clk", V(1, 1, 1, 1, 0))), + MPP_MODE(5, + MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x1, "nand", "io7", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x2, "uart0", "txd", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x4, "ptp", "trig", V(1, 1, 1, 1, 0)), + MPP_VAR_FUNCTION(0x5, "sata0", "act", V(0, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0xb, "lcd", "vsync", V(0, 0, 0, 0, 1))), + MPP_MODE(6, + MPP_VAR_FUNCTION(0x0, "sysrst", "out", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x1, "spi", "mosi", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x2, "ptp", "trig", V(1, 1, 1, 1, 0))), + MPP_MODE(7, + MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x1, "pex", "rsto", V(1, 1, 1, 1, 0)), + MPP_VAR_FUNCTION(0x2, "spi", "cs", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x3, "ptp", "trig", V(1, 1, 1, 1, 0)), + MPP_VAR_FUNCTION(0xb, "lcd", "pwm", V(0, 0, 0, 0, 1))), + MPP_MODE(8, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x1, "twsi0", "sda", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x2, "uart0", "rts", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x3, "uart1", "rts", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x4, "mii-1", "rxerr", V(0, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x5, "sata1", "prsnt", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0xc, "ptp", "clk", V(1, 1, 1, 1, 0)), + MPP_VAR_FUNCTION(0xd, "mii", "col", V(1, 1, 1, 1, 1))), + MPP_MODE(9, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x1, "twsi0", "sck", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x2, "uart0", "cts", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x3, "uart1", "cts", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x5, "sata0", "prsnt", V(0, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0xc, "ptp", "evreq", V(1, 1, 1, 1, 0)), + MPP_VAR_FUNCTION(0xd, "mii", "crs", V(1, 1, 1, 1, 1))), + MPP_MODE(10, + MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x2, "spi", "sck", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0X3, "uart0", "txd", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x5, "sata1", "act", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0xc, "ptp", "trig", V(1, 1, 1, 1, 0))), + MPP_MODE(11, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x2, "spi", "miso", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x3, "uart0", "rxd", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x4, "ptp-1", "evreq", V(1, 1, 1, 1, 0)), + MPP_VAR_FUNCTION(0xc, "ptp-2", "trig", V(1, 1, 1, 1, 0)), + MPP_VAR_FUNCTION(0xd, "ptp", "clk", V(1, 1, 1, 1, 0)), + MPP_VAR_FUNCTION(0x5, "sata0", "act", V(0, 1, 1, 1, 1))), + MPP_MODE(12, + MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 0, 1)), + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 0)), + MPP_VAR_FUNCTION(0x1, "sdio", "clk", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0xa, "audio", "spdifo", V(0, 0, 0, 0, 1)), + MPP_VAR_FUNCTION(0xb, "spi", "mosi", V(0, 0, 0, 0, 1)), + MPP_VAR_FUNCTION(0xd, "twsi1", "sda", V(0, 0, 0, 0, 1))), + MPP_MODE(13, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x1, "sdio", "cmd", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x3, "uart1", "txd", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0xa, "audio", "rmclk", V(0, 0, 0, 0, 1)), + MPP_VAR_FUNCTION(0xb, "lcd", "pwm", V(0, 0, 0, 0, 1))), + MPP_MODE(14, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x1, "sdio", "d0", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x3, "uart1", "rxd", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x4, "sata1", "prsnt", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0xa, "audio", "spdifi", V(0, 0, 0, 0, 1)), + MPP_VAR_FUNCTION(0xb, "audio-1", "sdi", V(0, 0, 0, 0, 1)), + MPP_VAR_FUNCTION(0xd, "mii", "col", V(1, 1, 1, 1, 1))), + MPP_MODE(15, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x1, "sdio", "d1", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x2, "uart0", "rts", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x3, "uart1", "txd", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x4, "sata0", "act", V(0, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0xb, "spi", "cs", V(0, 0, 0, 0, 1))), + MPP_MODE(16, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x1, "sdio", "d2", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x2, "uart0", "cts", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x3, "uart1", "rxd", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x4, "sata1", "act", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0xb, "lcd", "extclk", V(0, 0, 0, 0, 1)), + MPP_VAR_FUNCTION(0xd, "mii", "crs", V(1, 1, 1, 1, 1))), + MPP_MODE(17, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x1, "sdio", "d3", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x4, "sata0", "prsnt", V(0, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0xa, "sata1", "act", V(0, 0, 0, 0, 1)), + MPP_VAR_FUNCTION(0xd, "twsi1", "sck", V(0, 0, 0, 0, 1))), + MPP_MODE(18, + MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x1, "nand", "io0", V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x2, "pex", "clkreq", V(0, 0, 0, 0, 1))), + MPP_MODE(19, + MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x1, "nand", "io1", V(1, 1, 1, 1, 1))), + MPP_MODE(20, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x1, "ts", "mp0", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x2, "tdm", "tx0ql", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x3, "ge1", "txd0", V(0, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x4, "audio", "spdifi", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x5, "sata1", "act", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0xb, "lcd", "d0", V(0, 0, 0, 0, 1)), + MPP_VAR_FUNCTION(0xc, "mii", "rxerr", V(1, 0, 0, 0, 0))), + MPP_MODE(21, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x1, "ts", "mp1", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x2, "tdm", "rx0ql", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x3, "ge1", "txd1", V(0, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x4, "audio", "spdifi", V(1, 0, 0, 0, 0)), + MPP_VAR_FUNCTION(0x4, "audio", "spdifo", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x5, "sata0", "act", V(0, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0xb, "lcd", "d1", V(0, 0, 0, 0, 1))), + MPP_MODE(22, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x1, "ts", "mp2", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x2, "tdm", "tx2ql", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x3, "ge1", "txd2", V(0, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x4, "audio", "spdifo", V(1, 0, 0, 0, 0)), + MPP_VAR_FUNCTION(0x4, "audio", "rmclk", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x5, "sata1", "prsnt", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0xb, "lcd", "d2", V(0, 0, 0, 0, 1))), + MPP_MODE(23, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x1, "ts", "mp3", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x2, "tdm", "rx2ql", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x3, "ge1", "txd3", V(0, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x4, "audio", "rmclk", V(1, 0, 0, 0, 0)), + MPP_VAR_FUNCTION(0x4, "audio", "bclk", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x5, "sata0", "prsnt", V(0, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0xb, "lcd", "d3", V(0, 0, 0, 0, 1))), + MPP_MODE(24, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x1, "ts", "mp4", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x2, "tdm", "spi-cs0", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x3, "ge1", "rxd0", V(0, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x4, "audio", "bclk", V(1, 0, 0, 0, 0)), + MPP_VAR_FUNCTION(0x4, "audio", "sdo", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0xb, "lcd", "d4", V(0, 0, 0, 0, 1))), + MPP_MODE(25, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x1, "ts", "mp5", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x2, "tdm", "spi-sck", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x3, "ge1", "rxd1", V(0, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x4, "audio", "sdo", V(1, 0, 0, 0, 0)), + MPP_VAR_FUNCTION(0x4, "audio", "lrclk", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0xb, "lcd", "d5", V(0, 0, 0, 0, 1))), + MPP_MODE(26, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x1, "ts", "mp6", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x2, "tdm", "spi-miso", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x3, "ge1", "rxd2", V(0, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x4, "audio", "lrclk", V(1, 0, 0, 0, 0)), + MPP_VAR_FUNCTION(0x4, "audio", "mclk", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0xb, "lcd", "d6", V(0, 0, 0, 0, 1))), + MPP_MODE(27, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x1, "ts", "mp7", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x2, "tdm", "spi-mosi", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x3, "ge1", "rxd3", V(0, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x4, "audio", "mclk", V(1, 0, 0, 0, 0)), + MPP_VAR_FUNCTION(0x4, "audio", "sdi", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0xb, "lcd", "d7", V(0, 0, 0, 0, 1))), + MPP_MODE(28, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x1, "ts", "mp8", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x2, "tdm", "int", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x3, "ge1", "col", V(0, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x4, "audio", "sdi", V(1, 0, 0, 0, 0)), + MPP_VAR_FUNCTION(0x4, "audio", "extclk", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0xb, "lcd", "d8", V(0, 0, 0, 0, 1))), + MPP_MODE(29, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(1, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x1, "ts", "mp9", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x2, "tdm", "rst", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x3, "ge1", "txclk", V(0, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x4, "audio", "extclk", V(1, 0, 0, 0, 0)), + MPP_VAR_FUNCTION(0xb, "lcd", "d9", V(0, 0, 0, 0, 1))), + MPP_MODE(30, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x1, "ts", "mp10", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x2, "tdm", "pclk", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x3, "ge1", "rxctl", V(0, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0xb, "lcd", "d10", V(0, 0, 0, 0, 1))), + MPP_MODE(31, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x1, "ts", "mp11", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x2, "tdm", "fs", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x3, "ge1", "rxclk", V(0, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0xb, "lcd", "d11", V(0, 0, 0, 0, 1))), + MPP_MODE(32, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x1, "ts", "mp12", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x2, "tdm", "drx", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x3, "ge1", "txclko", V(0, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0xb, "lcd", "d12", V(0, 0, 0, 0, 1))), + MPP_MODE(33, + MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(0, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x2, "tdm", "dtx", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x3, "ge1", "txctl", V(0, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0xb, "lcd", "d13", V(0, 0, 0, 0, 1))), + MPP_MODE(34, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x2, "tdm", "spi-cs1", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x3, "ge1", "txen", V(0, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x5, "sata1", "act", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0xb, "lcd", "d14", V(0, 0, 0, 0, 1))), + MPP_MODE(35, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x2, "tdm", "tx0ql", V(0, 0, 1, 1, 1)), + MPP_VAR_FUNCTION(0x3, "ge1", "rxerr", V(0, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0x5, "sata0", "act", V(0, 1, 1, 1, 1)), + MPP_VAR_FUNCTION(0xb, "lcd", "d15", V(0, 0, 0, 0, 1)), + MPP_VAR_FUNCTION(0xc, "mii", "rxerr", V(0, 1, 1, 1, 1))), + MPP_MODE(36, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x1, "ts", "mp0", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x2, "tdm", "spi-cs1", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x4, "audio", "spdifi", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0xb, "twsi1", "sda", V(0, 0, 0, 0, 1))), + MPP_MODE(37, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x1, "ts", "mp1", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x2, "tdm", "tx2ql", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x4, "audio", "spdifo", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0xb, "twsi1", "sck", V(0, 0, 0, 0, 1))), + MPP_MODE(38, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x1, "ts", "mp2", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x2, "tdm", "rx2ql", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x4, "audio", "rmclk", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0xb, "lcd", "d18", V(0, 0, 0, 0, 1))), + MPP_MODE(39, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x1, "ts", "mp3", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x2, "tdm", "spi-cs0", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x4, "audio", "bclk", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0xb, "lcd", "d19", V(0, 0, 0, 0, 1))), + MPP_MODE(40, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x1, "ts", "mp4", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x2, "tdm", "spi-sck", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x4, "audio", "sdo", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0xb, "lcd", "d20", V(0, 0, 0, 0, 1))), + MPP_MODE(41, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x1, "ts", "mp5", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x2, "tdm", "spi-miso", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x4, "audio", "lrclk", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0xb, "lcd", "d21", V(0, 0, 0, 0, 1))), + MPP_MODE(42, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x1, "ts", "mp6", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x2, "tdm", "spi-mosi", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x4, "audio", "mclk", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0xb, "lcd", "d22", V(0, 0, 0, 0, 1))), + MPP_MODE(43, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x1, "ts", "mp7", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x2, "tdm", "int", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x4, "audio", "sdi", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0xb, "lcd", "d23", V(0, 0, 0, 0, 1))), + MPP_MODE(44, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x1, "ts", "mp8", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x2, "tdm", "rst", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x4, "audio", "extclk", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0xb, "lcd", "clk", V(0, 0, 0, 0, 1))), + MPP_MODE(45, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x1, "ts", "mp9", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x2, "tdm", "pclk", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0xb, "lcd", "e", V(0, 0, 0, 0, 1))), + MPP_MODE(46, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x1, "ts", "mp10", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x2, "tdm", "fs", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0xb, "lcd", "hsync", V(0, 0, 0, 0, 1))), + MPP_MODE(47, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x1, "ts", "mp11", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x2, "tdm", "drx", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0xb, "lcd", "vsync", V(0, 0, 0, 0, 1))), + MPP_MODE(48, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x1, "ts", "mp12", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x2, "tdm", "dtx", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0xb, "lcd", "d16", V(0, 0, 0, 0, 1))), + MPP_MODE(49, + MPP_VAR_FUNCTION(0x0, "gpio", NULL, V(0, 0, 0, 1, 0)), + MPP_VAR_FUNCTION(0x0, "gpo", NULL, V(0, 0, 0, 0, 1)), + MPP_VAR_FUNCTION(0x1, "ts", "mp9", V(0, 0, 0, 1, 0)), + MPP_VAR_FUNCTION(0x2, "tdm", "rx0ql", V(0, 0, 0, 1, 1)), + MPP_VAR_FUNCTION(0x5, "ptp", "clk", V(0, 0, 0, 1, 0)), + MPP_VAR_FUNCTION(0xa, "pex", "clkreq", V(0, 0, 0, 0, 1)), + MPP_VAR_FUNCTION(0xb, "lcd", "d17", V(0, 0, 0, 0, 1))), +}; + +static struct mvebu_mpp_ctrl mv88f6180_mpp_controls[] = { + MPP_REG_CTRL(0, 29), +}; + +static struct pinctrl_gpio_range mv88f6180_gpio_ranges[] = { + MPP_GPIO_RANGE(0, 0, 0, 30), +}; + +static struct mvebu_mpp_ctrl mv88f619x_mpp_controls[] = { + MPP_REG_CTRL(0, 35), +}; + +static struct pinctrl_gpio_range mv88f619x_gpio_ranges[] = { + MPP_GPIO_RANGE(0, 0, 0, 32), + MPP_GPIO_RANGE(1, 32, 32, 4), +}; + +static struct mvebu_mpp_ctrl mv88f628x_mpp_controls[] = { + MPP_REG_CTRL(0, 49), +}; + +static struct pinctrl_gpio_range mv88f628x_gpio_ranges[] = { + MPP_GPIO_RANGE(0, 0, 0, 32), + MPP_GPIO_RANGE(1, 32, 32, 18), +}; + +static struct mvebu_pinctrl_soc_info mv88f6180_info = { + .variant = VARIANT_MV88F6180, + .controls = mv88f6180_mpp_controls, + .ncontrols = ARRAY_SIZE(mv88f6180_mpp_controls), + .modes = mv88f6xxx_mpp_modes, + .nmodes = ARRAY_SIZE(mv88f6xxx_mpp_modes), + .gpioranges = mv88f6180_gpio_ranges, + .ngpioranges = ARRAY_SIZE(mv88f6180_gpio_ranges), +}; + +static struct mvebu_pinctrl_soc_info mv88f6190_info = { + .variant = VARIANT_MV88F6190, + .controls = mv88f619x_mpp_controls, + .ncontrols = ARRAY_SIZE(mv88f619x_mpp_controls), + .modes = mv88f6xxx_mpp_modes, + .nmodes = ARRAY_SIZE(mv88f6xxx_mpp_modes), + .gpioranges = mv88f619x_gpio_ranges, + .ngpioranges = ARRAY_SIZE(mv88f619x_gpio_ranges), +}; + +static struct mvebu_pinctrl_soc_info mv88f6192_info = { + .variant = VARIANT_MV88F6192, + .controls = mv88f619x_mpp_controls, + .ncontrols = ARRAY_SIZE(mv88f619x_mpp_controls), + .modes = mv88f6xxx_mpp_modes, + .nmodes = ARRAY_SIZE(mv88f6xxx_mpp_modes), + .gpioranges = mv88f619x_gpio_ranges, + .ngpioranges = ARRAY_SIZE(mv88f619x_gpio_ranges), +}; + +static struct mvebu_pinctrl_soc_info mv88f6281_info = { + .variant = VARIANT_MV88F6281, + .controls = mv88f628x_mpp_controls, + .ncontrols = ARRAY_SIZE(mv88f628x_mpp_controls), + .modes = mv88f6xxx_mpp_modes, + .nmodes = ARRAY_SIZE(mv88f6xxx_mpp_modes), + .gpioranges = mv88f628x_gpio_ranges, + .ngpioranges = ARRAY_SIZE(mv88f628x_gpio_ranges), +}; + +static struct mvebu_pinctrl_soc_info mv88f6282_info = { + .variant = VARIANT_MV88F6282, + .controls = mv88f628x_mpp_controls, + .ncontrols = ARRAY_SIZE(mv88f628x_mpp_controls), + .modes = mv88f6xxx_mpp_modes, + .nmodes = ARRAY_SIZE(mv88f6xxx_mpp_modes), + .gpioranges = mv88f628x_gpio_ranges, + .ngpioranges = ARRAY_SIZE(mv88f628x_gpio_ranges), +}; + +static struct of_device_id kirkwood_pinctrl_of_match[] __devinitdata = { + { .compatible = "marvell,88f6180-pinctrl", .data = &mv88f6180_info }, + { .compatible = "marvell,88f6190-pinctrl", .data = &mv88f6190_info }, + { .compatible = "marvell,88f6192-pinctrl", .data = &mv88f6192_info }, + { .compatible = "marvell,88f6281-pinctrl", .data = &mv88f6281_info }, + { .compatible = "marvell,88f6282-pinctrl", .data = &mv88f6282_info }, + { } +}; + +static int __devinit kirkwood_pinctrl_probe(struct platform_device *pdev) +{ + const struct of_device_id *match = + of_match_device(kirkwood_pinctrl_of_match, &pdev->dev); + pdev->dev.platform_data = match->data; + return mvebu_pinctrl_probe(pdev); +} + +static int __devexit kirkwood_pinctrl_remove(struct platform_device *pdev) +{ + return mvebu_pinctrl_remove(pdev); +} + +static struct platform_driver kirkwood_pinctrl_driver = { + .driver = { + .name = "kirkwood-pinctrl", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(kirkwood_pinctrl_of_match), + }, + .probe = kirkwood_pinctrl_probe, + .remove = __devexit_p(kirkwood_pinctrl_remove), +}; + +module_platform_driver(kirkwood_pinctrl_driver); + +MODULE_AUTHOR("Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>"); +MODULE_DESCRIPTION("Marvell Kirkwood pinctrl driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/pinctrl-mvebu.c b/drivers/pinctrl/pinctrl-mvebu.c new file mode 100644 index 000000000000..8e6266c6249a --- /dev/null +++ b/drivers/pinctrl/pinctrl-mvebu.c @@ -0,0 +1,754 @@ +/* + * Marvell MVEBU pinctrl core driver + * + * Authors: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> + * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/platform_device.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_platform.h> +#include <linux/err.h> +#include <linux/gpio.h> +#include <linux/pinctrl/machine.h> +#include <linux/pinctrl/pinconf.h> +#include <linux/pinctrl/pinctrl.h> +#include <linux/pinctrl/pinmux.h> + +#include "core.h" +#include "pinctrl-mvebu.h" + +#define MPPS_PER_REG 8 +#define MPP_BITS 4 +#define MPP_MASK 0xf + +struct mvebu_pinctrl_function { + const char *name; + const char **groups; + unsigned num_groups; +}; + +struct mvebu_pinctrl_group { + const char *name; + struct mvebu_mpp_ctrl *ctrl; + struct mvebu_mpp_ctrl_setting *settings; + unsigned num_settings; + unsigned gid; + unsigned *pins; + unsigned npins; +}; + +struct mvebu_pinctrl { + struct device *dev; + struct pinctrl_dev *pctldev; + struct pinctrl_desc desc; + void __iomem *base; + struct mvebu_pinctrl_group *groups; + unsigned num_groups; + struct mvebu_pinctrl_function *functions; + unsigned num_functions; + u8 variant; +}; + +static struct mvebu_pinctrl_group *mvebu_pinctrl_find_group_by_pid( + struct mvebu_pinctrl *pctl, unsigned pid) +{ + unsigned n; + for (n = 0; n < pctl->num_groups; n++) { + if (pid >= pctl->groups[n].pins[0] && + pid < pctl->groups[n].pins[0] + + pctl->groups[n].npins) + return &pctl->groups[n]; + } + return NULL; +} + +static struct mvebu_pinctrl_group *mvebu_pinctrl_find_group_by_name( + struct mvebu_pinctrl *pctl, const char *name) +{ + unsigned n; + for (n = 0; n < pctl->num_groups; n++) { + if (strcmp(name, pctl->groups[n].name) == 0) + return &pctl->groups[n]; + } + return NULL; +} + +static struct mvebu_mpp_ctrl_setting *mvebu_pinctrl_find_setting_by_val( + struct mvebu_pinctrl *pctl, struct mvebu_pinctrl_group *grp, + unsigned long config) +{ + unsigned n; + for (n = 0; n < grp->num_settings; n++) { + if (config == grp->settings[n].val) { + if (!pctl->variant || (pctl->variant & + grp->settings[n].variant)) + return &grp->settings[n]; + } + } + return NULL; +} + +static struct mvebu_mpp_ctrl_setting *mvebu_pinctrl_find_setting_by_name( + struct mvebu_pinctrl *pctl, struct mvebu_pinctrl_group *grp, + const char *name) +{ + unsigned n; + for (n = 0; n < grp->num_settings; n++) { + if (strcmp(name, grp->settings[n].name) == 0) { + if (!pctl->variant || (pctl->variant & + grp->settings[n].variant)) + return &grp->settings[n]; + } + } + return NULL; +} + +static struct mvebu_mpp_ctrl_setting *mvebu_pinctrl_find_gpio_setting( + struct mvebu_pinctrl *pctl, struct mvebu_pinctrl_group *grp) +{ + unsigned n; + for (n = 0; n < grp->num_settings; n++) { + if (grp->settings[n].flags & + (MVEBU_SETTING_GPO | MVEBU_SETTING_GPI)) { + if (!pctl->variant || (pctl->variant & + grp->settings[n].variant)) + return &grp->settings[n]; + } + } + return NULL; +} + +static struct mvebu_pinctrl_function *mvebu_pinctrl_find_function_by_name( + struct mvebu_pinctrl *pctl, const char *name) +{ + unsigned n; + for (n = 0; n < pctl->num_functions; n++) { + if (strcmp(name, pctl->functions[n].name) == 0) + return &pctl->functions[n]; + } + return NULL; +} + +/* + * Common mpp pin configuration registers on MVEBU are + * registers of eight 4-bit values for each mpp setting. + * Register offset and bit mask are calculated accordingly below. + */ +static int mvebu_common_mpp_get(struct mvebu_pinctrl *pctl, + struct mvebu_pinctrl_group *grp, + unsigned long *config) +{ + unsigned pin = grp->gid; + unsigned off = (pin / MPPS_PER_REG) * MPP_BITS; + unsigned shift = (pin % MPPS_PER_REG) * MPP_BITS; + + *config = readl(pctl->base + off); + *config >>= shift; + *config &= MPP_MASK; + + return 0; +} + +static int mvebu_common_mpp_set(struct mvebu_pinctrl *pctl, + struct mvebu_pinctrl_group *grp, + unsigned long config) +{ + unsigned pin = grp->gid; + unsigned off = (pin / MPPS_PER_REG) * MPP_BITS; + unsigned shift = (pin % MPPS_PER_REG) * MPP_BITS; + unsigned long reg; + + reg = readl(pctl->base + off); + reg &= ~(MPP_MASK << shift); + reg |= (config << shift); + writel(reg, pctl->base + off); + + return 0; +} + +static int mvebu_pinconf_group_get(struct pinctrl_dev *pctldev, + unsigned gid, unsigned long *config) +{ + struct mvebu_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + struct mvebu_pinctrl_group *grp = &pctl->groups[gid]; + + if (!grp->ctrl) + return -EINVAL; + + if (grp->ctrl->mpp_get) + return grp->ctrl->mpp_get(grp->ctrl, config); + + return mvebu_common_mpp_get(pctl, grp, config); +} + +static int mvebu_pinconf_group_set(struct pinctrl_dev *pctldev, + unsigned gid, unsigned long config) +{ + struct mvebu_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + struct mvebu_pinctrl_group *grp = &pctl->groups[gid]; + + if (!grp->ctrl) + return -EINVAL; + + if (grp->ctrl->mpp_set) + return grp->ctrl->mpp_set(grp->ctrl, config); + + return mvebu_common_mpp_set(pctl, grp, config); +} + +static void mvebu_pinconf_group_dbg_show(struct pinctrl_dev *pctldev, + struct seq_file *s, unsigned gid) +{ + struct mvebu_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + struct mvebu_pinctrl_group *grp = &pctl->groups[gid]; + struct mvebu_mpp_ctrl_setting *curr; + unsigned long config; + unsigned n; + + if (mvebu_pinconf_group_get(pctldev, gid, &config)) + return; + + curr = mvebu_pinctrl_find_setting_by_val(pctl, grp, config); + + if (curr) { + seq_printf(s, "current: %s", curr->name); + if (curr->subname) + seq_printf(s, "(%s)", curr->subname); + if (curr->flags & (MVEBU_SETTING_GPO | MVEBU_SETTING_GPI)) { + seq_printf(s, "("); + if (curr->flags & MVEBU_SETTING_GPI) + seq_printf(s, "i"); + if (curr->flags & MVEBU_SETTING_GPO) + seq_printf(s, "o"); + seq_printf(s, ")"); + } + } else + seq_printf(s, "current: UNKNOWN"); + + if (grp->num_settings > 1) { + seq_printf(s, ", available = ["); + for (n = 0; n < grp->num_settings; n++) { + if (curr == &grp->settings[n]) + continue; + + /* skip unsupported settings for this variant */ + if (pctl->variant && + !(pctl->variant & grp->settings[n].variant)) + continue; + + seq_printf(s, " %s", grp->settings[n].name); + if (grp->settings[n].subname) + seq_printf(s, "(%s)", grp->settings[n].subname); + if (grp->settings[n].flags & + (MVEBU_SETTING_GPO | MVEBU_SETTING_GPI)) { + seq_printf(s, "("); + if (grp->settings[n].flags & MVEBU_SETTING_GPI) + seq_printf(s, "i"); + if (grp->settings[n].flags & MVEBU_SETTING_GPO) + seq_printf(s, "o"); + seq_printf(s, ")"); + } + } + seq_printf(s, " ]"); + } + return; +} + +static struct pinconf_ops mvebu_pinconf_ops = { + .pin_config_group_get = mvebu_pinconf_group_get, + .pin_config_group_set = mvebu_pinconf_group_set, + .pin_config_group_dbg_show = mvebu_pinconf_group_dbg_show, +}; + +static int mvebu_pinmux_get_funcs_count(struct pinctrl_dev *pctldev) +{ + struct mvebu_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + + return pctl->num_functions; +} + +static const char *mvebu_pinmux_get_func_name(struct pinctrl_dev *pctldev, + unsigned fid) +{ + struct mvebu_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + + return pctl->functions[fid].name; +} + +static int mvebu_pinmux_get_groups(struct pinctrl_dev *pctldev, unsigned fid, + const char * const **groups, + unsigned * const num_groups) +{ + struct mvebu_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + + *groups = pctl->functions[fid].groups; + *num_groups = pctl->functions[fid].num_groups; + return 0; +} + +static int mvebu_pinmux_enable(struct pinctrl_dev *pctldev, unsigned fid, + unsigned gid) +{ + struct mvebu_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + struct mvebu_pinctrl_function *func = &pctl->functions[fid]; + struct mvebu_pinctrl_group *grp = &pctl->groups[gid]; + struct mvebu_mpp_ctrl_setting *setting; + int ret; + + setting = mvebu_pinctrl_find_setting_by_name(pctl, grp, + func->name); + if (!setting) { + dev_err(pctl->dev, + "unable to find setting %s in group %s\n", + func->name, func->groups[gid]); + return -EINVAL; + } + + ret = mvebu_pinconf_group_set(pctldev, grp->gid, setting->val); + if (ret) { + dev_err(pctl->dev, "cannot set group %s to %s\n", + func->groups[gid], func->name); + return ret; + } + + return 0; +} + +static int mvebu_pinmux_gpio_request_enable(struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range, unsigned offset) +{ + struct mvebu_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + struct mvebu_pinctrl_group *grp; + struct mvebu_mpp_ctrl_setting *setting; + + grp = mvebu_pinctrl_find_group_by_pid(pctl, offset); + if (!grp) + return -EINVAL; + + if (grp->ctrl->mpp_gpio_req) + return grp->ctrl->mpp_gpio_req(grp->ctrl, offset); + + setting = mvebu_pinctrl_find_gpio_setting(pctl, grp); + if (!setting) + return -ENOTSUPP; + + return mvebu_pinconf_group_set(pctldev, grp->gid, setting->val); +} + +static int mvebu_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range, unsigned offset, bool input) +{ + struct mvebu_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + struct mvebu_pinctrl_group *grp; + struct mvebu_mpp_ctrl_setting *setting; + + grp = mvebu_pinctrl_find_group_by_pid(pctl, offset); + if (!grp) + return -EINVAL; + + if (grp->ctrl->mpp_gpio_dir) + return grp->ctrl->mpp_gpio_dir(grp->ctrl, offset, input); + + setting = mvebu_pinctrl_find_gpio_setting(pctl, grp); + if (!setting) + return -ENOTSUPP; + + if ((input && (setting->flags & MVEBU_SETTING_GPI)) || + (!input && (setting->flags & MVEBU_SETTING_GPO))) + return 0; + + return -ENOTSUPP; +} + +static struct pinmux_ops mvebu_pinmux_ops = { + .get_functions_count = mvebu_pinmux_get_funcs_count, + .get_function_name = mvebu_pinmux_get_func_name, + .get_function_groups = mvebu_pinmux_get_groups, + .gpio_request_enable = mvebu_pinmux_gpio_request_enable, + .gpio_set_direction = mvebu_pinmux_gpio_set_direction, + .enable = mvebu_pinmux_enable, +}; + +static int mvebu_pinctrl_get_groups_count(struct pinctrl_dev *pctldev) +{ + struct mvebu_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + return pctl->num_groups; +} + +static const char *mvebu_pinctrl_get_group_name(struct pinctrl_dev *pctldev, + unsigned gid) +{ + struct mvebu_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + return pctl->groups[gid].name; +} + +static int mvebu_pinctrl_get_group_pins(struct pinctrl_dev *pctldev, + unsigned gid, const unsigned **pins, + unsigned *num_pins) +{ + struct mvebu_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + *pins = pctl->groups[gid].pins; + *num_pins = pctl->groups[gid].npins; + return 0; +} + +static int mvebu_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev, + struct device_node *np, + struct pinctrl_map **map, + unsigned *num_maps) +{ + struct mvebu_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); + struct property *prop; + const char *function; + const char *group; + int ret, nmaps, n; + + *map = NULL; + *num_maps = 0; + + ret = of_property_read_string(np, "marvell,function", &function); + if (ret) { + dev_err(pctl->dev, + "missing marvell,function in node %s\n", np->name); + return 0; + } + + nmaps = of_property_count_strings(np, "marvell,pins"); + if (nmaps < 0) { + dev_err(pctl->dev, + "missing marvell,pins in node %s\n", np->name); + return 0; + } + + *map = kmalloc(nmaps * sizeof(struct pinctrl_map), GFP_KERNEL); + if (map == NULL) { + dev_err(pctl->dev, + "cannot allocate pinctrl_map memory for %s\n", + np->name); + return -ENOMEM; + } + + n = 0; + of_property_for_each_string(np, "marvell,pins", prop, group) { + struct mvebu_pinctrl_group *grp = + mvebu_pinctrl_find_group_by_name(pctl, group); + + if (!grp) { + dev_err(pctl->dev, "unknown pin %s", group); + continue; + } + + if (!mvebu_pinctrl_find_setting_by_name(pctl, grp, function)) { + dev_err(pctl->dev, "unsupported function %s on pin %s", + function, group); + continue; + } + + (*map)[n].type = PIN_MAP_TYPE_MUX_GROUP; + (*map)[n].data.mux.group = group; + (*map)[n].data.mux.function = function; + n++; + } + + *num_maps = nmaps; + + return 0; +} + +static void mvebu_pinctrl_dt_free_map(struct pinctrl_dev *pctldev, + struct pinctrl_map *map, unsigned num_maps) +{ + kfree(map); +} + +static struct pinctrl_ops mvebu_pinctrl_ops = { + .get_groups_count = mvebu_pinctrl_get_groups_count, + .get_group_name = mvebu_pinctrl_get_group_name, + .get_group_pins = mvebu_pinctrl_get_group_pins, + .dt_node_to_map = mvebu_pinctrl_dt_node_to_map, + .dt_free_map = mvebu_pinctrl_dt_free_map, +}; + +static int __devinit _add_function(struct mvebu_pinctrl_function *funcs, + const char *name) +{ + while (funcs->num_groups) { + /* function already there */ + if (strcmp(funcs->name, name) == 0) { + funcs->num_groups++; + return -EEXIST; + } + funcs++; + } + funcs->name = name; + funcs->num_groups = 1; + return 0; +} + +static int __devinit mvebu_pinctrl_build_functions(struct platform_device *pdev, + struct mvebu_pinctrl *pctl) +{ + struct mvebu_pinctrl_function *funcs; + int num = 0; + int n, s; + + /* we allocate functions for number of pins and hope + * there are less unique functions than pins available */ + funcs = devm_kzalloc(&pdev->dev, pctl->desc.npins * + sizeof(struct mvebu_pinctrl_function), GFP_KERNEL); + if (!funcs) + return -ENOMEM; + + for (n = 0; n < pctl->num_groups; n++) { + struct mvebu_pinctrl_group *grp = &pctl->groups[n]; + for (s = 0; s < grp->num_settings; s++) { + /* skip unsupported settings on this variant */ + if (pctl->variant && + !(pctl->variant & grp->settings[s].variant)) + continue; + + /* check for unique functions and count groups */ + if (_add_function(funcs, grp->settings[s].name)) + continue; + + num++; + } + } + + /* with the number of unique functions and it's groups known, + reallocate functions and assign group names */ + funcs = krealloc(funcs, num * sizeof(struct mvebu_pinctrl_function), + GFP_KERNEL); + if (!funcs) + return -ENOMEM; + + pctl->num_functions = num; + pctl->functions = funcs; + + for (n = 0; n < pctl->num_groups; n++) { + struct mvebu_pinctrl_group *grp = &pctl->groups[n]; + for (s = 0; s < grp->num_settings; s++) { + struct mvebu_pinctrl_function *f; + const char **groups; + + /* skip unsupported settings on this variant */ + if (pctl->variant && + !(pctl->variant & grp->settings[s].variant)) + continue; + + f = mvebu_pinctrl_find_function_by_name(pctl, + grp->settings[s].name); + + /* allocate group name array if not done already */ + if (!f->groups) { + f->groups = devm_kzalloc(&pdev->dev, + f->num_groups * sizeof(char *), + GFP_KERNEL); + if (!f->groups) + return -ENOMEM; + } + + /* find next free group name and assign current name */ + groups = f->groups; + while (*groups) + groups++; + *groups = grp->name; + } + } + + return 0; +} + +int __devinit mvebu_pinctrl_probe(struct platform_device *pdev) +{ + struct mvebu_pinctrl_soc_info *soc = dev_get_platdata(&pdev->dev); + struct device_node *np = pdev->dev.of_node; + struct mvebu_pinctrl *pctl; + void __iomem *base; + struct pinctrl_pin_desc *pdesc; + unsigned gid, n, k; + int ret; + + if (!soc || !soc->controls || !soc->modes) { + dev_err(&pdev->dev, "wrong pinctrl soc info\n"); + return -EINVAL; + } + + base = of_iomap(np, 0); + if (!base) { + dev_err(&pdev->dev, "unable to get base address\n"); + return -ENODEV; + } + + pctl = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pinctrl), + GFP_KERNEL); + if (!pctl) { + dev_err(&pdev->dev, "unable to alloc driver\n"); + return -ENOMEM; + } + + pctl->desc.name = dev_name(&pdev->dev); + pctl->desc.owner = THIS_MODULE; + pctl->desc.pctlops = &mvebu_pinctrl_ops; + pctl->desc.pmxops = &mvebu_pinmux_ops; + pctl->desc.confops = &mvebu_pinconf_ops; + pctl->variant = soc->variant; + pctl->base = base; + pctl->dev = &pdev->dev; + platform_set_drvdata(pdev, pctl); + + /* count controls and create names for mvebu generic + register controls; also does sanity checks */ + pctl->num_groups = 0; + pctl->desc.npins = 0; + for (n = 0; n < soc->ncontrols; n++) { + struct mvebu_mpp_ctrl *ctrl = &soc->controls[n]; + char *names; + + pctl->desc.npins += ctrl->npins; + /* initial control pins */ + for (k = 0; k < ctrl->npins; k++) + ctrl->pins[k] = ctrl->pid + k; + + /* special soc specific control */ + if (ctrl->mpp_get || ctrl->mpp_set) { + if (!ctrl->name || !ctrl->mpp_set || !ctrl->mpp_set) { + dev_err(&pdev->dev, "wrong soc control info\n"); + return -EINVAL; + } + pctl->num_groups += 1; + continue; + } + + /* generic mvebu register control */ + names = devm_kzalloc(&pdev->dev, ctrl->npins * 8, GFP_KERNEL); + if (!names) { + dev_err(&pdev->dev, "failed to alloc mpp names\n"); + return -ENOMEM; + } + for (k = 0; k < ctrl->npins; k++) + sprintf(names + 8*k, "mpp%d", ctrl->pid+k); + ctrl->name = names; + pctl->num_groups += ctrl->npins; + } + + pdesc = devm_kzalloc(&pdev->dev, pctl->desc.npins * + sizeof(struct pinctrl_pin_desc), GFP_KERNEL); + if (!pdesc) { + dev_err(&pdev->dev, "failed to alloc pinctrl pins\n"); + return -ENOMEM; + } + + for (n = 0; n < pctl->desc.npins; n++) + pdesc[n].number = n; + pctl->desc.pins = pdesc; + + pctl->groups = devm_kzalloc(&pdev->dev, pctl->num_groups * + sizeof(struct mvebu_pinctrl_group), GFP_KERNEL); + if (!pctl->groups) { + dev_err(&pdev->dev, "failed to alloc pinctrl groups\n"); + return -ENOMEM; + } + + /* assign mpp controls to groups */ + gid = 0; + for (n = 0; n < soc->ncontrols; n++) { + struct mvebu_mpp_ctrl *ctrl = &soc->controls[n]; + pctl->groups[gid].gid = gid; + pctl->groups[gid].ctrl = ctrl; + pctl->groups[gid].name = ctrl->name; + pctl->groups[gid].pins = ctrl->pins; + pctl->groups[gid].npins = ctrl->npins; + + /* generic mvebu register control maps to a number of groups */ + if (!ctrl->mpp_get && !ctrl->mpp_set) { + pctl->groups[gid].npins = 1; + + for (k = 1; k < ctrl->npins; k++) { + gid++; + pctl->groups[gid].gid = gid; + pctl->groups[gid].ctrl = ctrl; + pctl->groups[gid].name = &ctrl->name[8*k]; + pctl->groups[gid].pins = &ctrl->pins[k]; + pctl->groups[gid].npins = 1; + } + } + gid++; + } + + /* assign mpp modes to groups */ + for (n = 0; n < soc->nmodes; n++) { + struct mvebu_mpp_mode *mode = &soc->modes[n]; + struct mvebu_pinctrl_group *grp = + mvebu_pinctrl_find_group_by_pid(pctl, mode->pid); + unsigned num_settings; + + if (!grp) { + dev_warn(&pdev->dev, "unknown pinctrl group %d\n", + mode->pid); + continue; + } + + for (num_settings = 0; ;) { + struct mvebu_mpp_ctrl_setting *set = + &mode->settings[num_settings]; + + if (!set->name) + break; + num_settings++; + + /* skip unsupported settings for this variant */ + if (pctl->variant && !(pctl->variant & set->variant)) + continue; + + /* find gpio/gpo/gpi settings */ + if (strcmp(set->name, "gpio") == 0) + set->flags = MVEBU_SETTING_GPI | + MVEBU_SETTING_GPO; + else if (strcmp(set->name, "gpo") == 0) + set->flags = MVEBU_SETTING_GPO; + else if (strcmp(set->name, "gpi") == 0) + set->flags = MVEBU_SETTING_GPI; + } + + grp->settings = mode->settings; + grp->num_settings = num_settings; + } + + ret = mvebu_pinctrl_build_functions(pdev, pctl); + if (ret) { + dev_err(&pdev->dev, "unable to build functions\n"); + return ret; + } + + pctl->pctldev = pinctrl_register(&pctl->desc, &pdev->dev, pctl); + if (!pctl->pctldev) { + dev_err(&pdev->dev, "unable to register pinctrl driver\n"); + return -EINVAL; + } + + dev_info(&pdev->dev, "registered pinctrl driver\n"); + + /* register gpio ranges */ + for (n = 0; n < soc->ngpioranges; n++) + pinctrl_add_gpio_range(pctl->pctldev, &soc->gpioranges[n]); + + return 0; +} + +int __devexit mvebu_pinctrl_remove(struct platform_device *pdev) +{ + struct mvebu_pinctrl *pctl = platform_get_drvdata(pdev); + pinctrl_unregister(pctl->pctldev); + return 0; +} diff --git a/drivers/pinctrl/pinctrl-mvebu.h b/drivers/pinctrl/pinctrl-mvebu.h new file mode 100644 index 000000000000..90bd3beee860 --- /dev/null +++ b/drivers/pinctrl/pinctrl-mvebu.h @@ -0,0 +1,192 @@ +/* + * Marvell MVEBU pinctrl driver + * + * Authors: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> + * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef __PINCTRL_MVEBU_H__ +#define __PINCTRL_MVEBU_H__ + +/** + * struct mvebu_mpp_ctrl - describe a mpp control + * @name: name of the control group + * @pid: first pin id handled by this control + * @npins: number of pins controlled by this control + * @mpp_get: (optional) special function to get mpp setting + * @mpp_set: (optional) special function to set mpp setting + * @mpp_gpio_req: (optional) special function to request gpio + * @mpp_gpio_dir: (optional) special function to set gpio direction + * + * A mpp_ctrl describes a muxable unit, e.g. pin, group of pins, or + * internal function, inside the SoC. Each muxable unit can be switched + * between two or more different settings, e.g. assign mpp pin 13 to + * uart1 or sata. + * + * If optional mpp_get/_set functions are set these are used to get/set + * a specific mode. Otherwise it is assumed that the mpp control is based + * on 4-bit groups in subsequent registers. The optional mpp_gpio_req/_dir + * functions can be used to allow pin settings with varying gpio pins. + */ +struct mvebu_mpp_ctrl { + const char *name; + u8 pid; + u8 npins; + unsigned *pins; + int (*mpp_get)(struct mvebu_mpp_ctrl *ctrl, unsigned long *config); + int (*mpp_set)(struct mvebu_mpp_ctrl *ctrl, unsigned long config); + int (*mpp_gpio_req)(struct mvebu_mpp_ctrl *ctrl, u8 pid); + int (*mpp_gpio_dir)(struct mvebu_mpp_ctrl *ctrl, u8 pid, bool input); +}; + +/** + * struct mvebu_mpp_ctrl_setting - describe a mpp ctrl setting + * @val: ctrl setting value + * @name: ctrl setting name, e.g. uart2, spi0 - unique per mpp_mode + * @subname: (optional) additional ctrl setting name, e.g. rts, cts + * @variant: (optional) variant identifier mask + * @flags: (private) flags to store gpi/gpo/gpio capabilities + * + * A ctrl_setting describes a specific internal mux function that a mpp pin + * can be switched to. The value (val) will be written in the corresponding + * register for common mpp pin configuration registers on MVEBU. SoC specific + * mpp_get/_set function may use val to distinguish between different settings. + * + * The name will be used to switch to this setting in DT description, e.g. + * marvell,function = "uart2". subname is only for debugging purposes. + * + * If name is one of "gpi", "gpo", "gpio" gpio capabilities are + * parsed during initialization and stored in flags. + * + * The variant can be used to combine different revisions of one SoC to a + * common pinctrl driver. It is matched (AND) with variant of soc_info to + * determine if a setting is available on the current SoC revision. + */ +struct mvebu_mpp_ctrl_setting { + u8 val; + const char *name; + const char *subname; + u8 variant; + u8 flags; +#define MVEBU_SETTING_GPO (1 << 0) +#define MVEBU_SETTING_GPI (1 << 1) +}; + +/** + * struct mvebu_mpp_mode - link ctrl and settings + * @pid: first pin id handled by this mode + * @settings: list of settings available for this mode + * + * A mode connects all available settings with the corresponding mpp_ctrl + * given by pid. + */ +struct mvebu_mpp_mode { + u8 pid; + struct mvebu_mpp_ctrl_setting *settings; +}; + +/** + * struct mvebu_pinctrl_soc_info - SoC specific info passed to pinctrl-mvebu + * @variant: variant mask of soc_info + * @controls: list of available mvebu_mpp_ctrls + * @ncontrols: number of available mvebu_mpp_ctrls + * @modes: list of available mvebu_mpp_modes + * @nmodes: number of available mvebu_mpp_modes + * @gpioranges: list of pinctrl_gpio_ranges + * @ngpioranges: number of available pinctrl_gpio_ranges + * + * This struct describes all pinctrl related information for a specific SoC. + * If variant is unequal 0 it will be matched (AND) with variant of each + * setting and allows to distinguish between different revisions of one SoC. + */ +struct mvebu_pinctrl_soc_info { + u8 variant; + struct mvebu_mpp_ctrl *controls; + int ncontrols; + struct mvebu_mpp_mode *modes; + int nmodes; + struct pinctrl_gpio_range *gpioranges; + int ngpioranges; +}; + +#define MPP_REG_CTRL(_idl, _idh) \ + { \ + .name = NULL, \ + .pid = _idl, \ + .npins = _idh - _idl + 1, \ + .pins = (unsigned[_idh - _idl + 1]) { }, \ + .mpp_get = NULL, \ + .mpp_set = NULL, \ + .mpp_gpio_req = NULL, \ + .mpp_gpio_dir = NULL, \ + } + +#define MPP_FUNC_CTRL(_idl, _idh, _name, _func) \ + { \ + .name = _name, \ + .pid = _idl, \ + .npins = _idh - _idl + 1, \ + .pins = (unsigned[_idh - _idl + 1]) { }, \ + .mpp_get = _func ## _get, \ + .mpp_set = _func ## _set, \ + .mpp_gpio_req = NULL, \ + .mpp_gpio_dir = NULL, \ + } + +#define MPP_FUNC_GPIO_CTRL(_idl, _idh, _name, _func) \ + { \ + .name = _name, \ + .pid = _idl, \ + .npins = _idh - _idl + 1, \ + .pins = (unsigned[_idh - _idl + 1]) { }, \ + .mpp_get = _func ## _get, \ + .mpp_set = _func ## _set, \ + .mpp_gpio_req = _func ## _gpio_req, \ + .mpp_gpio_dir = _func ## _gpio_dir, \ + } + +#define _MPP_VAR_FUNCTION(_val, _name, _subname, _mask) \ + { \ + .val = _val, \ + .name = _name, \ + .subname = _subname, \ + .variant = _mask, \ + .flags = 0, \ + } + +#if defined(CONFIG_DEBUG_FS) +#define MPP_VAR_FUNCTION(_val, _name, _subname, _mask) \ + _MPP_VAR_FUNCTION(_val, _name, _subname, _mask) +#else +#define MPP_VAR_FUNCTION(_val, _name, _subname, _mask) \ + _MPP_VAR_FUNCTION(_val, _name, NULL, _mask) +#endif + +#define MPP_FUNCTION(_val, _name, _subname) \ + MPP_VAR_FUNCTION(_val, _name, _subname, (u8)-1) + +#define MPP_MODE(_id, ...) \ + { \ + .pid = _id, \ + .settings = (struct mvebu_mpp_ctrl_setting[]){ \ + __VA_ARGS__, { } }, \ + } + +#define MPP_GPIO_RANGE(_id, _pinbase, _gpiobase, _npins) \ + { \ + .name = "mvebu-gpio", \ + .id = _id, \ + .pin_base = _pinbase, \ + .base = _gpiobase, \ + .npins = _npins, \ + } + +int mvebu_pinctrl_probe(struct platform_device *pdev); +int mvebu_pinctrl_remove(struct platform_device *pdev); + +#endif diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c index 6b9af989632b..18d74f29dcb2 100644 --- a/drivers/platform/x86/hp_accel.c +++ b/drivers/platform/x86/hp_accel.c @@ -382,31 +382,8 @@ static struct acpi_driver lis3lv02d_driver = { }, .drv.pm = HP_ACCEL_PM, }; - -static int __init lis3lv02d_init_module(void) -{ - int ret; - - if (acpi_disabled) - return -ENODEV; - - ret = acpi_bus_register_driver(&lis3lv02d_driver); - if (ret < 0) - return ret; - - pr_info("driver loaded\n"); - - return 0; -} - -static void __exit lis3lv02d_exit_module(void) -{ - acpi_bus_unregister_driver(&lis3lv02d_driver); -} +module_acpi_driver(lis3lv02d_driver); MODULE_DESCRIPTION("Glue between LIS3LV02Dx and HP ACPI BIOS and support for disk protection LED."); MODULE_AUTHOR("Yan Burman, Eric Piel, Pavel Machek"); MODULE_LICENSE("GPL"); - -module_init(lis3lv02d_init_module); -module_exit(lis3lv02d_exit_module); diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index dae7abe1d711..5ff4f2e314d2 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -917,20 +917,8 @@ static struct acpi_driver ideapad_acpi_driver = { .drv.pm = &ideapad_pm, .owner = THIS_MODULE, }; - -static int __init ideapad_acpi_module_init(void) -{ - return acpi_bus_register_driver(&ideapad_acpi_driver); -} - -static void __exit ideapad_acpi_module_exit(void) -{ - acpi_bus_unregister_driver(&ideapad_acpi_driver); -} +module_acpi_driver(ideapad_acpi_driver); MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); MODULE_DESCRIPTION("IdeaPad ACPI Extras"); MODULE_LICENSE("GPL"); - -module_init(ideapad_acpi_module_init); -module_exit(ideapad_acpi_module_exit); diff --git a/drivers/platform/x86/topstar-laptop.c b/drivers/platform/x86/topstar-laptop.c index d528daa0e81c..d727bfee89a6 100644 --- a/drivers/platform/x86/topstar-laptop.c +++ b/drivers/platform/x86/topstar-laptop.c @@ -186,27 +186,7 @@ static struct acpi_driver acpi_topstar_driver = { .notify = acpi_topstar_notify, }, }; - -static int __init topstar_laptop_init(void) -{ - int ret; - - ret = acpi_bus_register_driver(&acpi_topstar_driver); - if (ret < 0) - return ret; - - pr_info("ACPI extras driver loaded\n"); - - return 0; -} - -static void __exit topstar_laptop_exit(void) -{ - acpi_bus_unregister_driver(&acpi_topstar_driver); -} - -module_init(topstar_laptop_init); -module_exit(topstar_laptop_exit); +module_acpi_driver(acpi_topstar_driver); MODULE_AUTHOR("Herton Ronaldo Krzesinski"); MODULE_DESCRIPTION("Topstar Laptop ACPI Extras driver"); diff --git a/drivers/platform/x86/toshiba_bluetooth.c b/drivers/platform/x86/toshiba_bluetooth.c index 5e5d6317d690..e95be0b74859 100644 --- a/drivers/platform/x86/toshiba_bluetooth.c +++ b/drivers/platform/x86/toshiba_bluetooth.c @@ -122,30 +122,10 @@ static int toshiba_bt_rfkill_add(struct acpi_device *device) return result; } -static int __init toshiba_bt_rfkill_init(void) -{ - int result; - - result = acpi_bus_register_driver(&toshiba_bt_rfkill_driver); - if (result < 0) { - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, - "Error registering driver\n")); - return result; - } - - return 0; -} - static int toshiba_bt_rfkill_remove(struct acpi_device *device, int type) { /* clean up */ return 0; } -static void __exit toshiba_bt_rfkill_exit(void) -{ - acpi_bus_unregister_driver(&toshiba_bt_rfkill_driver); -} - -module_init(toshiba_bt_rfkill_init); -module_exit(toshiba_bt_rfkill_exit); +module_acpi_driver(toshiba_bt_rfkill_driver); diff --git a/drivers/platform/x86/xo15-ebook.c b/drivers/platform/x86/xo15-ebook.c index 38ba39d7ca7d..16d340c3b852 100644 --- a/drivers/platform/x86/xo15-ebook.c +++ b/drivers/platform/x86/xo15-ebook.c @@ -170,16 +170,4 @@ static struct acpi_driver xo15_ebook_driver = { }, .drv.pm = &ebook_switch_pm, }; - -static int __init xo15_ebook_init(void) -{ - return acpi_bus_register_driver(&xo15_ebook_driver); -} - -static void __exit xo15_ebook_exit(void) -{ - acpi_bus_unregister_driver(&xo15_ebook_driver); -} - -module_init(xo15_ebook_init); -module_exit(xo15_ebook_exit); +module_acpi_driver(xo15_ebook_driver); diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c index 507a8e2b9a4c..26b5d4b18dd7 100644 --- a/drivers/pnp/pnpacpi/core.c +++ b/drivers/pnp/pnpacpi/core.c @@ -321,14 +321,9 @@ static int __init acpi_pnp_match(struct device *dev, void *_pnp) { struct acpi_device *acpi = to_acpi_device(dev); struct pnp_dev *pnp = _pnp; - struct device *physical_device; - - physical_device = acpi_get_physical_device(acpi->handle); - if (physical_device) - put_device(physical_device); /* true means it matched */ - return !physical_device + return !acpi->physical_node_count && compare_pnp_id(pnp->id, acpi_device_hid(acpi)); } diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c index 3541b4492f64..e7a4780e93db 100644 --- a/drivers/remoteproc/remoteproc_virtio.c +++ b/drivers/remoteproc/remoteproc_virtio.c @@ -84,6 +84,9 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev, if (id >= ARRAY_SIZE(rvdev->vring)) return ERR_PTR(-EINVAL); + if (!name) + return NULL; + ret = rproc_alloc_vring(rvdev, id); if (ret) return ERR_PTR(ret); @@ -103,7 +106,7 @@ static struct virtqueue *rp_find_vq(struct virtio_device *vdev, * Create the new vq, and tell virtio we're not interested in * the 'weak' smp barriers, since we're talking with a real device. */ - vq = vring_new_virtqueue(len, rvring->align, vdev, false, addr, + vq = vring_new_virtqueue(id, len, rvring->align, vdev, false, addr, rproc_virtio_notify, callback, name); if (!vq) { dev_err(dev, "vring_new_virtqueue %s failed\n", name); diff --git a/drivers/rpmsg/Kconfig b/drivers/rpmsg/Kconfig index 32aead65735a..2bd911f12571 100644 --- a/drivers/rpmsg/Kconfig +++ b/drivers/rpmsg/Kconfig @@ -4,7 +4,6 @@ menu "Rpmsg drivers (EXPERIMENTAL)" config RPMSG tristate select VIRTIO - select VIRTIO_RING depends on EXPERIMENTAL endmenu diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c index 47cccd52aae8..7dabef624da3 100644 --- a/drivers/s390/kvm/kvm_virtio.c +++ b/drivers/s390/kvm/kvm_virtio.c @@ -190,6 +190,9 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev, if (index >= kdev->desc->num_vq) return ERR_PTR(-ENOENT); + if (!name) + return NULL; + config = kvm_vq_config(kdev->desc)+index; err = vmem_add_mapping(config->address, @@ -198,7 +201,7 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev, if (err) goto out; - vq = vring_new_virtqueue(config->num, KVM_S390_VIRTIO_RING_ALIGN, + vq = vring_new_virtqueue(index, config->num, KVM_S390_VIRTIO_RING_ALIGN, vdev, true, (void *) config->address, kvm_notify, callback, name); if (!vq) { diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c index 9bd1c92ad96e..dfb4b7f448c5 100644 --- a/drivers/spi/spi-omap-100k.c +++ b/drivers/spi/spi-omap-100k.c @@ -37,8 +37,6 @@ #include <linux/spi/spi.h> -#include <plat/clock.h> - #define OMAP1_SPI100K_MAX_FREQ 48000000 #define ICR_SPITAS (OMAP7XX_ICR_BASE + 0x12) diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index 474e2174e08a..3542fdc664b1 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -43,7 +43,6 @@ #include <linux/spi/spi.h> -#include <plat/clock.h> #include <linux/platform_data/spi-omap2-mcspi.h> #define OMAP2_MCSPI_MAX_FREQ 48000000 diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c index 0ef7d42d8abe..cef4252bb31a 100644 --- a/drivers/usb/core/usb-acpi.c +++ b/drivers/usb/core/usb-acpi.c @@ -87,7 +87,7 @@ static int usb_acpi_check_port_connect_type(struct usb_device *hdev, acpi_status status; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *upc; - struct acpi_pld pld; + struct acpi_pld_info *pld; int ret = 0; /* @@ -111,16 +111,17 @@ static int usb_acpi_check_port_connect_type(struct usb_device *hdev, } if (upc->package.elements[0].integer.value) - if (pld.user_visible) + if (pld->user_visible) usb_set_hub_port_connect_type(hdev, port1, USB_PORT_CONNECT_TYPE_HOT_PLUG); else usb_set_hub_port_connect_type(hdev, port1, USB_PORT_CONNECT_TYPE_HARD_WIRED); - else if (!pld.user_visible) + else if (!pld->user_visible) usb_set_hub_port_connect_type(hdev, port1, USB_PORT_NOT_USED); out: + ACPI_FREE(pld); kfree(upc); return ret; } diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index f38b17a86c35..8d5bddb56cb1 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -1,11 +1,9 @@ -# Virtio always gets selected by whoever wants it. config VIRTIO tristate - -# Similarly the virtio ring implementation. -config VIRTIO_RING - tristate - depends on VIRTIO + ---help--- + This option is selected by any driver which implements the virtio + bus, such as CONFIG_VIRTIO_PCI, CONFIG_VIRTIO_MMIO, CONFIG_LGUEST, + CONFIG_RPMSG or CONFIG_S390_GUEST. menu "Virtio drivers" @@ -13,7 +11,6 @@ config VIRTIO_PCI tristate "PCI driver for virtio devices (EXPERIMENTAL)" depends on PCI && EXPERIMENTAL select VIRTIO - select VIRTIO_RING ---help--- This drivers provides support for virtio based paravirtual device drivers over PCI. This requires that your VMM has appropriate PCI @@ -26,9 +23,8 @@ config VIRTIO_PCI If unsure, say M. config VIRTIO_BALLOON - tristate "Virtio balloon driver (EXPERIMENTAL)" - select VIRTIO - select VIRTIO_RING + tristate "Virtio balloon driver" + depends on VIRTIO ---help--- This driver supports increasing and decreasing the amount of memory within a KVM guest. @@ -39,7 +35,6 @@ config VIRTIO_BALLOON tristate "Platform bus driver for memory mapped virtio devices (EXPERIMENTAL)" depends on HAS_IOMEM && EXPERIMENTAL select VIRTIO - select VIRTIO_RING ---help--- This drivers provides support for memory mapped virtio platform device driver. diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile index 5a4c63cfd380..9076635697bb 100644 --- a/drivers/virtio/Makefile +++ b/drivers/virtio/Makefile @@ -1,5 +1,4 @@ -obj-$(CONFIG_VIRTIO) += virtio.o -obj-$(CONFIG_VIRTIO_RING) += virtio_ring.o +obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index c3b3f7f0d9d1..1e8659ca27ef 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -159,7 +159,7 @@ static int virtio_dev_remove(struct device *_d) drv->remove(dev); /* Driver should have reset device. */ - BUG_ON(dev->config->get_status(dev)); + WARN_ON_ONCE(dev->config->get_status(dev)); /* Acknowledge the device's existence again. */ add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index 453db0c403d8..6b1b7e184939 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -131,9 +131,6 @@ struct virtio_mmio_vq_info { /* the number of entries in the queue */ unsigned int num; - /* the index of the queue */ - int queue_index; - /* the virtual address of the ring queue */ void *queue; @@ -225,11 +222,10 @@ static void vm_reset(struct virtio_device *vdev) static void vm_notify(struct virtqueue *vq) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); - struct virtio_mmio_vq_info *info = vq->priv; /* We write the queue's selector into the notification register to * signal the other end */ - writel(info->queue_index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); + writel(virtqueue_get_queue_index(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); } /* Notify all virtqueues on an interrupt. */ @@ -270,6 +266,7 @@ static void vm_del_vq(struct virtqueue *vq) struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); struct virtio_mmio_vq_info *info = vq->priv; unsigned long flags, size; + unsigned int index = virtqueue_get_queue_index(vq); spin_lock_irqsave(&vm_dev->lock, flags); list_del(&info->node); @@ -278,7 +275,7 @@ static void vm_del_vq(struct virtqueue *vq) vring_del_virtqueue(vq); /* Select and deactivate the queue */ - writel(info->queue_index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); + writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); size = PAGE_ALIGN(vring_size(info->num, VIRTIO_MMIO_VRING_ALIGN)); @@ -309,6 +306,9 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, unsigned long flags, size; int err; + if (!name) + return NULL; + /* Select the queue we're interested in */ writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); @@ -324,7 +324,6 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, err = -ENOMEM; goto error_kmalloc; } - info->queue_index = index; /* Allocate pages for the queue - start with a queue as big as * possible (limited by maximum size allowed by device), drop down @@ -332,11 +331,21 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, * and two rings (which makes it "alignment_size * 2") */ info->num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX); + + /* If the device reports a 0 entry queue, we won't be able to + * use it to perform I/O, and vring_new_virtqueue() can't create + * empty queues anyway, so don't bother to set up the device. + */ + if (info->num == 0) { + err = -ENOENT; + goto error_alloc_pages; + } + while (1) { size = PAGE_ALIGN(vring_size(info->num, VIRTIO_MMIO_VRING_ALIGN)); - /* Already smallest possible allocation? */ - if (size <= VIRTIO_MMIO_VRING_ALIGN * 2) { + /* Did the last iter shrink the queue below minimum size? */ + if (size < VIRTIO_MMIO_VRING_ALIGN * 2) { err = -ENOMEM; goto error_alloc_pages; } @@ -356,7 +365,7 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); /* Create the vring */ - vq = vring_new_virtqueue(info->num, VIRTIO_MMIO_VRING_ALIGN, vdev, + vq = vring_new_virtqueue(index, info->num, VIRTIO_MMIO_VRING_ALIGN, vdev, true, info->queue, vm_notify, callback, name); if (!vq) { err = -ENOMEM; diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c index 2e03d416b9af..c33aea36598a 100644 --- a/drivers/virtio/virtio_pci.c +++ b/drivers/virtio/virtio_pci.c @@ -48,6 +48,7 @@ struct virtio_pci_device int msix_enabled; int intx_enabled; struct msix_entry *msix_entries; + cpumask_var_t *msix_affinity_masks; /* Name strings for interrupts. This size should be enough, * and I'm too lazy to allocate each name separately. */ char (*msix_names)[256]; @@ -79,9 +80,6 @@ struct virtio_pci_vq_info /* the number of entries in the queue */ int num; - /* the index of the queue */ - int queue_index; - /* the virtual address of the ring queue */ void *queue; @@ -202,11 +200,11 @@ static void vp_reset(struct virtio_device *vdev) static void vp_notify(struct virtqueue *vq) { struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); - struct virtio_pci_vq_info *info = vq->priv; /* we write the queue's selector into the notification register to * signal the other end */ - iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY); + iowrite16(virtqueue_get_queue_index(vq), + vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY); } /* Handle a configuration change: Tell driver if it wants to know. */ @@ -279,6 +277,10 @@ static void vp_free_vectors(struct virtio_device *vdev) for (i = 0; i < vp_dev->msix_used_vectors; ++i) free_irq(vp_dev->msix_entries[i].vector, vp_dev); + for (i = 0; i < vp_dev->msix_vectors; i++) + if (vp_dev->msix_affinity_masks[i]) + free_cpumask_var(vp_dev->msix_affinity_masks[i]); + if (vp_dev->msix_enabled) { /* Disable the vector used for configuration */ iowrite16(VIRTIO_MSI_NO_VECTOR, @@ -296,6 +298,8 @@ static void vp_free_vectors(struct virtio_device *vdev) vp_dev->msix_names = NULL; kfree(vp_dev->msix_entries); vp_dev->msix_entries = NULL; + kfree(vp_dev->msix_affinity_masks); + vp_dev->msix_affinity_masks = NULL; } static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, @@ -314,6 +318,15 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, GFP_KERNEL); if (!vp_dev->msix_names) goto error; + vp_dev->msix_affinity_masks + = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks, + GFP_KERNEL); + if (!vp_dev->msix_affinity_masks) + goto error; + for (i = 0; i < nvectors; ++i) + if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i], + GFP_KERNEL)) + goto error; for (i = 0; i < nvectors; ++i) vp_dev->msix_entries[i].entry = i; @@ -402,7 +415,6 @@ static struct virtqueue *setup_vq(struct virtio_device *vdev, unsigned index, if (!info) return ERR_PTR(-ENOMEM); - info->queue_index = index; info->num = num; info->msix_vector = msix_vec; @@ -418,7 +430,7 @@ static struct virtqueue *setup_vq(struct virtio_device *vdev, unsigned index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); /* create the vring */ - vq = vring_new_virtqueue(info->num, VIRTIO_PCI_VRING_ALIGN, vdev, + vq = vring_new_virtqueue(index, info->num, VIRTIO_PCI_VRING_ALIGN, vdev, true, info->queue, vp_notify, callback, name); if (!vq) { err = -ENOMEM; @@ -467,7 +479,8 @@ static void vp_del_vq(struct virtqueue *vq) list_del(&info->node); spin_unlock_irqrestore(&vp_dev->lock, flags); - iowrite16(info->queue_index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); + iowrite16(virtqueue_get_queue_index(vq), + vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); if (vp_dev->msix_enabled) { iowrite16(VIRTIO_MSI_NO_VECTOR, @@ -542,7 +555,10 @@ static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, vp_dev->per_vq_vectors = per_vq_vectors; allocated_vectors = vp_dev->msix_used_vectors; for (i = 0; i < nvqs; ++i) { - if (!callbacks[i] || !vp_dev->msix_enabled) + if (!names[i]) { + vqs[i] = NULL; + continue; + } else if (!callbacks[i] || !vp_dev->msix_enabled) msix_vec = VIRTIO_MSI_NO_VECTOR; else if (vp_dev->per_vq_vectors) msix_vec = allocated_vectors++; @@ -609,6 +625,35 @@ static const char *vp_bus_name(struct virtio_device *vdev) return pci_name(vp_dev->pci_dev); } +/* Setup the affinity for a virtqueue: + * - force the affinity for per vq vector + * - OR over all affinities for shared MSI + * - ignore the affinity request if we're using INTX + */ +static int vp_set_vq_affinity(struct virtqueue *vq, int cpu) +{ + struct virtio_device *vdev = vq->vdev; + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + struct virtio_pci_vq_info *info = vq->priv; + struct cpumask *mask; + unsigned int irq; + + if (!vq->callback) + return -EINVAL; + + if (vp_dev->msix_enabled) { + mask = vp_dev->msix_affinity_masks[info->msix_vector]; + irq = vp_dev->msix_entries[info->msix_vector].vector; + if (cpu == -1) + irq_set_affinity_hint(irq, NULL); + else { + cpumask_set_cpu(cpu, mask); + irq_set_affinity_hint(irq, mask); + } + } + return 0; +} + static struct virtio_config_ops virtio_pci_config_ops = { .get = vp_get, .set = vp_set, @@ -620,6 +665,7 @@ static struct virtio_config_ops virtio_pci_config_ops = { .get_features = vp_get_features, .finalize_features = vp_finalize_features, .bus_name = vp_bus_name, + .set_vq_affinity = vp_set_vq_affinity, }; static void virtio_pci_release_dev(struct device *_d) @@ -673,8 +719,10 @@ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev, goto out_enable_device; vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0); - if (vp_dev->ioaddr == NULL) + if (vp_dev->ioaddr == NULL) { + err = -ENOMEM; goto out_req_regions; + } pci_set_drvdata(pci_dev, vp_dev); pci_set_master(pci_dev); diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 5aa43c3392a2..e639584b2dbd 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -106,6 +106,9 @@ struct vring_virtqueue /* How to notify other side. FIXME: commonalize hcalls! */ void (*notify)(struct virtqueue *vq); + /* Index of the queue */ + int queue_index; + #ifdef DEBUG /* They're supposed to lock for us. */ unsigned int in_use; @@ -171,6 +174,13 @@ static int vring_add_indirect(struct vring_virtqueue *vq, return head; } +int virtqueue_get_queue_index(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + return vq->queue_index; +} +EXPORT_SYMBOL_GPL(virtqueue_get_queue_index); + /** * virtqueue_add_buf - expose buffer to other end * @vq: the struct virtqueue we're talking about. @@ -616,7 +626,8 @@ irqreturn_t vring_interrupt(int irq, void *_vq) } EXPORT_SYMBOL_GPL(vring_interrupt); -struct virtqueue *vring_new_virtqueue(unsigned int num, +struct virtqueue *vring_new_virtqueue(unsigned int index, + unsigned int num, unsigned int vring_align, struct virtio_device *vdev, bool weak_barriers, @@ -647,6 +658,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int num, vq->broken = false; vq->last_used_idx = 0; vq->num_added = 0; + vq->queue_index = index; list_add_tail(&vq->vq.list, &vdev->vqs); #ifdef DEBUG vq->in_use = false; diff --git a/drivers/watchdog/m54xx_wdt.c b/drivers/watchdog/m54xx_wdt.c index 663cad86c633..173494a681e6 100644 --- a/drivers/watchdog/m54xx_wdt.c +++ b/drivers/watchdog/m54xx_wdt.c @@ -46,17 +46,17 @@ static void wdt_enable(void) unsigned int gms0; /* preserve GPIO usage, if any */ - gms0 = __raw_readl(MCF_MBAR + MCF_GPT_GMS0); + gms0 = __raw_readl(MCF_GPT_GMS0); if (gms0 & MCF_GPT_GMS_TMS_GPIO) gms0 &= (MCF_GPT_GMS_TMS_GPIO | MCF_GPT_GMS_GPIO_MASK | MCF_GPT_GMS_OD); else gms0 = MCF_GPT_GMS_TMS_GPIO | MCF_GPT_GMS_OD; - __raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0); + __raw_writel(gms0, MCF_GPT_GMS0); __raw_writel(MCF_GPT_GCIR_PRE(heartbeat*(MCF_BUSCLK/0xffff)) | - MCF_GPT_GCIR_CNT(0xffff), MCF_MBAR + MCF_GPT_GCIR0); + MCF_GPT_GCIR_CNT(0xffff), MCF_GPT_GCIR0); gms0 |= MCF_GPT_GMS_OCPW(0xA5) | MCF_GPT_GMS_WDEN | MCF_GPT_GMS_CE; - __raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0); + __raw_writel(gms0, MCF_GPT_GMS0); } static void wdt_disable(void) @@ -64,18 +64,18 @@ static void wdt_disable(void) unsigned int gms0; /* disable watchdog */ - gms0 = __raw_readl(MCF_MBAR + MCF_GPT_GMS0); + gms0 = __raw_readl(MCF_GPT_GMS0); gms0 &= ~(MCF_GPT_GMS_WDEN | MCF_GPT_GMS_CE); - __raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0); + __raw_writel(gms0, MCF_GPT_GMS0); } static void wdt_keepalive(void) { unsigned int gms0; - gms0 = __raw_readl(MCF_MBAR + MCF_GPT_GMS0); + gms0 = __raw_readl(MCF_GPT_GMS0); gms0 |= MCF_GPT_GMS_OCPW(0xA5); - __raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0); + __raw_writel(gms0, MCF_GPT_GMS0); } static int m54xx_wdt_open(struct inode *inode, struct file *file) @@ -195,8 +195,7 @@ static struct miscdevice m54xx_wdt_miscdev = { static int __init m54xx_wdt_init(void) { - if (!request_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4, - "Coldfire M54xx Watchdog")) { + if (!request_mem_region(MCF_GPT_GCIR0, 4, "Coldfire M54xx Watchdog")) { pr_warn("I/O region busy\n"); return -EBUSY; } @@ -208,7 +207,7 @@ static int __init m54xx_wdt_init(void) static void __exit m54xx_wdt_exit(void) { misc_deregister(&m54xx_wdt_miscdev); - release_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4); + release_mem_region(MCF_GPT_GCIR0, 4); } module_init(m54xx_wdt_init); |