diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display/dmub/src')
-rw-r--r-- | drivers/gpu/drm/amd/display/dmub/src/Makefile | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c | 57 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h | 35 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c | 182 | ||||
-rw-r--r-- | drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c | 105 |
6 files changed, 329 insertions, 59 deletions
diff --git a/drivers/gpu/drm/amd/display/dmub/src/Makefile b/drivers/gpu/drm/amd/display/dmub/src/Makefile index 945287164cf2..7495c23c73a9 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/Makefile +++ b/drivers/gpu/drm/amd/display/dmub/src/Makefile @@ -20,7 +20,7 @@ # OTHER DEALINGS IN THE SOFTWARE. # -DMUB = dmub_srv.o dmub_reg.o dmub_dcn20.o dmub_dcn21.o +DMUB = dmub_srv.o dmub_srv_stat.o dmub_reg.o dmub_dcn20.o dmub_dcn21.o DMUB += dmub_dcn30.o dmub_dcn301.o DMUB += dmub_dcn302.o diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c index 8e8e65fa83c0..6934906c665e 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c @@ -135,6 +135,8 @@ void dmub_dcn20_reset(struct dmub_srv *dmub) REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); REG_WRITE(DMCUB_INBOX1_RPTR, 0); REG_WRITE(DMCUB_INBOX1_WPTR, 0); + REG_WRITE(DMCUB_OUTBOX1_RPTR, 0); + REG_WRITE(DMCUB_OUTBOX1_WPTR, 0); REG_WRITE(DMCUB_SCRATCH0, 0); } @@ -248,6 +250,13 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub, DMCUB_REGION3_CW5_TOP_ADDRESS, cw5->region.top, DMCUB_REGION3_CW5_ENABLE, 1); + REG_WRITE(DMCUB_REGION5_OFFSET, offset.u.low_part); + REG_WRITE(DMCUB_REGION5_OFFSET_HIGH, offset.u.high_part); + REG_SET_2(DMCUB_REGION5_TOP_ADDRESS, 0, + DMCUB_REGION5_TOP_ADDRESS, + cw5->region.top - cw5->region.base - 1, + DMCUB_REGION5_ENABLE, 1); + dmub_dcn20_translate_addr(&cw6->offset, fb_base, fb_offset, &offset); REG_WRITE(DMCUB_REGION3_CW6_OFFSET, offset.u.low_part); @@ -280,6 +289,54 @@ void dmub_dcn20_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset) REG_WRITE(DMCUB_INBOX1_WPTR, wptr_offset); } +void dmub_dcn20_setup_out_mailbox(struct dmub_srv *dmub, + const struct dmub_region *outbox1) +{ + /* New firmware can support CW4 for the outbox. */ + if (dmub_dcn20_use_cached_inbox(dmub)) + REG_WRITE(DMCUB_OUTBOX1_BASE_ADDRESS, outbox1->base); + else + REG_WRITE(DMCUB_OUTBOX1_BASE_ADDRESS, 0x80002000); + + REG_WRITE(DMCUB_OUTBOX1_SIZE, outbox1->top - outbox1->base); +} + +uint32_t dmub_dcn20_get_outbox1_wptr(struct dmub_srv *dmub) +{ + /** + * outbox1 wptr register is accessed without locks (dal & dc) + * and to be called only by dmub_srv_stat_get_notification() + */ + return REG_READ(DMCUB_OUTBOX1_WPTR); +} + +void dmub_dcn20_set_outbox1_rptr(struct dmub_srv *dmub, uint32_t rptr_offset) +{ + /** + * outbox1 rptr register is accessed without locks (dal & dc) + * and to be called only by dmub_srv_stat_get_notification() + */ + REG_WRITE(DMCUB_OUTBOX1_RPTR, rptr_offset); +} + +void dmub_dcn20_setup_outbox0(struct dmub_srv *dmub, + const struct dmub_region *outbox0) +{ + REG_WRITE(DMCUB_OUTBOX0_BASE_ADDRESS, outbox0->base); + + REG_WRITE(DMCUB_OUTBOX0_SIZE, outbox0->top - outbox0->base); +} + +uint32_t dmub_dcn20_get_outbox0_wptr(struct dmub_srv *dmub) +{ + return REG_READ(DMCUB_OUTBOX0_WPTR); +} + +void dmub_dcn20_set_outbox0_rptr(struct dmub_srv *dmub, uint32_t rptr_offset) +{ + REG_WRITE(DMCUB_OUTBOX0_RPTR, rptr_offset); +} + bool dmub_dcn20_is_hw_init(struct dmub_srv *dmub) { uint32_t is_hw_init; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h index a62be9c0652e..de5351cd5abc 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h @@ -40,6 +40,14 @@ struct dmub_srv; DMUB_SR(DMCUB_INBOX1_SIZE) \ DMUB_SR(DMCUB_INBOX1_RPTR) \ DMUB_SR(DMCUB_INBOX1_WPTR) \ + DMUB_SR(DMCUB_OUTBOX0_BASE_ADDRESS) \ + DMUB_SR(DMCUB_OUTBOX0_SIZE) \ + DMUB_SR(DMCUB_OUTBOX0_RPTR) \ + DMUB_SR(DMCUB_OUTBOX0_WPTR) \ + DMUB_SR(DMCUB_OUTBOX1_BASE_ADDRESS) \ + DMUB_SR(DMCUB_OUTBOX1_SIZE) \ + DMUB_SR(DMCUB_OUTBOX1_RPTR) \ + DMUB_SR(DMCUB_OUTBOX1_WPTR) \ DMUB_SR(DMCUB_REGION3_CW0_OFFSET) \ DMUB_SR(DMCUB_REGION3_CW1_OFFSET) \ DMUB_SR(DMCUB_REGION3_CW2_OFFSET) \ @@ -75,6 +83,9 @@ struct dmub_srv; DMUB_SR(DMCUB_REGION4_OFFSET) \ DMUB_SR(DMCUB_REGION4_OFFSET_HIGH) \ DMUB_SR(DMCUB_REGION4_TOP_ADDRESS) \ + DMUB_SR(DMCUB_REGION5_OFFSET) \ + DMUB_SR(DMCUB_REGION5_OFFSET_HIGH) \ + DMUB_SR(DMCUB_REGION5_TOP_ADDRESS) \ DMUB_SR(DMCUB_SCRATCH0) \ DMUB_SR(DMCUB_SCRATCH1) \ DMUB_SR(DMCUB_SCRATCH2) \ @@ -95,7 +106,8 @@ struct dmub_srv; DMUB_SR(CC_DC_PIPE_DIS) \ DMUB_SR(MMHUBBUB_SOFT_RESET) \ DMUB_SR(DCN_VM_FB_LOCATION_BASE) \ - DMUB_SR(DCN_VM_FB_OFFSET) + DMUB_SR(DCN_VM_FB_OFFSET) \ + DMUB_SR(DMCUB_INTERRUPT_ACK) #define DMUB_COMMON_FIELDS() \ DMUB_SF(DMCUB_CNTL, DMCUB_ENABLE) \ @@ -123,10 +135,13 @@ struct dmub_srv; DMUB_SF(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_ENABLE) \ DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_TOP_ADDRESS) \ DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_ENABLE) \ + DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_TOP_ADDRESS) \ + DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_ENABLE) \ DMUB_SF(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE) \ DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) \ DMUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE) \ - DMUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET) + DMUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET) \ + DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_OUTBOX0_READY_INT_ACK) struct dmub_srv_common_reg_offset { #define DMUB_SR(reg) uint32_t reg; @@ -180,6 +195,20 @@ uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub); void dmub_dcn20_set_inbox1_wptr(struct dmub_srv *dmub, uint32_t wptr_offset); +void dmub_dcn20_setup_out_mailbox(struct dmub_srv *dmub, + const struct dmub_region *outbox1); + +uint32_t dmub_dcn20_get_outbox1_wptr(struct dmub_srv *dmub); + +void dmub_dcn20_set_outbox1_rptr(struct dmub_srv *dmub, uint32_t rptr_offset); + +void dmub_dcn20_setup_outbox0(struct dmub_srv *dmub, + const struct dmub_region *outbox0); + +uint32_t dmub_dcn20_get_outbox0_wptr(struct dmub_srv *dmub); + +void dmub_dcn20_set_outbox0_rptr(struct dmub_srv *dmub, uint32_t rptr_offset); + bool dmub_dcn20_is_hw_init(struct dmub_srv *dmub); bool dmub_dcn20_is_supported(struct dmub_srv *dmub); @@ -200,4 +229,6 @@ union dmub_fw_boot_status dmub_dcn20_get_fw_boot_status(struct dmub_srv *dmub); bool dmub_dcn20_use_cached_inbox(struct dmub_srv *dmub); +bool dmub_dcn20_use_cached_trace_buffer(struct dmub_srv *dmub); + #endif /* _DMUB_DCN20_H_ */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c index b4bc0df2f14a..fb11c8d39208 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn30.c @@ -180,6 +180,13 @@ void dmub_dcn30_setup_windows(struct dmub_srv *dmub, DMCUB_REGION3_CW5_TOP_ADDRESS, cw5->region.top, DMCUB_REGION3_CW5_ENABLE, 1); + REG_WRITE(DMCUB_REGION5_OFFSET, offset.u.low_part); + REG_WRITE(DMCUB_REGION5_OFFSET_HIGH, offset.u.high_part); + REG_SET_2(DMCUB_REGION5_TOP_ADDRESS, 0, + DMCUB_REGION5_TOP_ADDRESS, + cw5->region.top - cw5->region.base - 1, + DMCUB_REGION5_ENABLE, 1); + offset = cw6->offset; REG_WRITE(DMCUB_REGION3_CW6_OFFSET, offset.u.low_part); diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index 61f64a295f06..1cbb125b4063 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -46,8 +46,8 @@ /* Context size. */ #define DMUB_CONTEXT_SIZE (512 * 1024) -/* Mailbox size */ -#define DMUB_MAILBOX_SIZE (DMUB_RB_SIZE) +/* Mailbox size : Ring buffers are required for both inbox and outbox */ +#define DMUB_MAILBOX_SIZE ((2 * DMUB_RB_SIZE)) /* Default state size if meta is absent. */ #define DMUB_FW_STATE_SIZE (64 * 1024) @@ -55,6 +55,7 @@ /* Default tracebuffer size if meta is absent. */ #define DMUB_TRACE_BUFFER_SIZE (64 * 1024) + /* Default scratch mem size. */ #define DMUB_SCRATCH_MEM_SIZE (256) @@ -69,6 +70,8 @@ #define DMUB_CW5_BASE (0x65000000) #define DMUB_CW6_BASE (0x66000000) +#define DMUB_REGION5_BASE (0xA0000000) + static inline uint32_t dmub_align(uint32_t val, uint32_t factor) { return (val + factor - 1) / factor * factor; @@ -157,6 +160,16 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) funcs->enable_dmub_boot_options = dmub_dcn20_enable_dmub_boot_options; funcs->skip_dmub_panel_power_sequence = dmub_dcn20_skip_dmub_panel_power_sequence; + // Out mailbox register access functions for RN and above + funcs->setup_out_mailbox = dmub_dcn20_setup_out_mailbox; + funcs->get_outbox1_wptr = dmub_dcn20_get_outbox1_wptr; + funcs->set_outbox1_rptr = dmub_dcn20_set_outbox1_rptr; + + //outbox0 call stacks + funcs->setup_outbox0 = dmub_dcn20_setup_outbox0; + funcs->get_outbox0_wptr = dmub_dcn20_get_outbox0_wptr; + funcs->set_outbox0_rptr = dmub_dcn20_set_outbox0_rptr; + if (asic == DMUB_ASIC_DCN21) { dmub->regs = &dmub_srv_dcn21_regs; @@ -395,13 +408,19 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE]; struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM]; - struct dmub_rb_init_params rb_params; + struct dmub_rb_init_params rb_params, outbox0_rb_params; struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6; - struct dmub_region inbox1; + struct dmub_region inbox1, outbox1, outbox0; if (!dmub->sw_init) return DMUB_STATUS_INVALID; + if (!inst_fb || !stack_fb || !data_fb || !bios_fb || !mail_fb || + !tracebuff_fb || !fw_state_fb || !scratch_mem_fb) { + ASSERT(0); + return DMUB_STATUS_INVALID; + } + dmub->fb_base = params->fb_base; dmub->fb_offset = params->fb_offset; dmub->psp_version = params->psp_version; @@ -409,72 +428,91 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, if (dmub->hw_funcs.reset) dmub->hw_funcs.reset(dmub); - if (inst_fb && data_fb) { - cw0.offset.quad_part = inst_fb->gpu_addr; - cw0.region.base = DMUB_CW0_BASE; - cw0.region.top = cw0.region.base + inst_fb->size - 1; - - cw1.offset.quad_part = stack_fb->gpu_addr; - cw1.region.base = DMUB_CW1_BASE; - cw1.region.top = cw1.region.base + stack_fb->size - 1; - - if (params->load_inst_const && dmub->hw_funcs.backdoor_load) { - /** - * Read back all the instruction memory so we don't hang the - * DMCUB when backdoor loading if the write from x86 hasn't been - * flushed yet. This only occurs in backdoor loading. - */ - dmub_flush_buffer_mem(inst_fb); - dmub->hw_funcs.backdoor_load(dmub, &cw0, &cw1); - } + cw0.offset.quad_part = inst_fb->gpu_addr; + cw0.region.base = DMUB_CW0_BASE; + cw0.region.top = cw0.region.base + inst_fb->size - 1; + + cw1.offset.quad_part = stack_fb->gpu_addr; + cw1.region.base = DMUB_CW1_BASE; + cw1.region.top = cw1.region.base + stack_fb->size - 1; + if (params->load_inst_const && dmub->hw_funcs.backdoor_load) { + /** + * Read back all the instruction memory so we don't hang the + * DMCUB when backdoor loading if the write from x86 hasn't been + * flushed yet. This only occurs in backdoor loading. + */ + dmub_flush_buffer_mem(inst_fb); + dmub->hw_funcs.backdoor_load(dmub, &cw0, &cw1); } - if (inst_fb && data_fb && bios_fb && mail_fb && tracebuff_fb && - fw_state_fb && scratch_mem_fb) { - cw2.offset.quad_part = data_fb->gpu_addr; - cw2.region.base = DMUB_CW0_BASE + inst_fb->size; - cw2.region.top = cw2.region.base + data_fb->size; + cw2.offset.quad_part = data_fb->gpu_addr; + cw2.region.base = DMUB_CW0_BASE + inst_fb->size; + cw2.region.top = cw2.region.base + data_fb->size; + + cw3.offset.quad_part = bios_fb->gpu_addr; + cw3.region.base = DMUB_CW3_BASE; + cw3.region.top = cw3.region.base + bios_fb->size; + + cw4.offset.quad_part = mail_fb->gpu_addr; + cw4.region.base = DMUB_CW4_BASE; + cw4.region.top = cw4.region.base + mail_fb->size; - cw3.offset.quad_part = bios_fb->gpu_addr; - cw3.region.base = DMUB_CW3_BASE; - cw3.region.top = cw3.region.base + bios_fb->size; + /** + * Doubled the mailbox region to accomodate inbox and outbox. + * Note: Currently, currently total mailbox size is 16KB. It is split + * equally into 8KB between inbox and outbox. If this config is + * changed, then uncached base address configuration of outbox1 + * has to be updated in funcs->setup_out_mailbox. + */ + inbox1.base = cw4.region.base; + inbox1.top = cw4.region.base + DMUB_RB_SIZE; + outbox1.base = inbox1.top; + outbox1.top = cw4.region.top; - cw4.offset.quad_part = mail_fb->gpu_addr; - cw4.region.base = DMUB_CW4_BASE; - cw4.region.top = cw4.region.base + mail_fb->size; + cw5.offset.quad_part = tracebuff_fb->gpu_addr; + cw5.region.base = DMUB_CW5_BASE; + cw5.region.top = cw5.region.base + tracebuff_fb->size; - inbox1.base = cw4.region.base; - inbox1.top = cw4.region.top; + outbox0.base = DMUB_REGION5_BASE + TRACE_BUFFER_ENTRY_OFFSET; + outbox0.top = outbox0.base + tracebuff_fb->size - TRACE_BUFFER_ENTRY_OFFSET; - cw5.offset.quad_part = tracebuff_fb->gpu_addr; - cw5.region.base = DMUB_CW5_BASE; - cw5.region.top = cw5.region.base + tracebuff_fb->size; + cw6.offset.quad_part = fw_state_fb->gpu_addr; + cw6.region.base = DMUB_CW6_BASE; + cw6.region.top = cw6.region.base + fw_state_fb->size; - cw6.offset.quad_part = fw_state_fb->gpu_addr; - cw6.region.base = DMUB_CW6_BASE; - cw6.region.top = cw6.region.base + fw_state_fb->size; + dmub->fw_state = fw_state_fb->cpu_addr; - dmub->fw_state = fw_state_fb->cpu_addr; + dmub->scratch_mem_fb = *scratch_mem_fb; - dmub->scratch_mem_fb = *scratch_mem_fb; + if (dmub->hw_funcs.setup_windows) + dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6); - if (dmub->hw_funcs.setup_windows) - dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, - &cw5, &cw6); + if (dmub->hw_funcs.setup_outbox0) + dmub->hw_funcs.setup_outbox0(dmub, &outbox0); - if (dmub->hw_funcs.setup_mailbox) - dmub->hw_funcs.setup_mailbox(dmub, &inbox1); - } + if (dmub->hw_funcs.setup_mailbox) + dmub->hw_funcs.setup_mailbox(dmub, &inbox1); + if (dmub->hw_funcs.setup_out_mailbox) + dmub->hw_funcs.setup_out_mailbox(dmub, &outbox1); - if (mail_fb) { - dmub_memset(&rb_params, 0, sizeof(rb_params)); - rb_params.ctx = dmub; - rb_params.base_address = mail_fb->cpu_addr; - rb_params.capacity = DMUB_RB_SIZE; + dmub_memset(&rb_params, 0, sizeof(rb_params)); + rb_params.ctx = dmub; + rb_params.base_address = mail_fb->cpu_addr; + rb_params.capacity = DMUB_RB_SIZE; + dmub_rb_init(&dmub->inbox1_rb, &rb_params); - dmub_rb_init(&dmub->inbox1_rb, &rb_params); - } + // Initialize outbox1 ring buffer + rb_params.ctx = dmub; + rb_params.base_address = (void *) ((uint8_t *) (mail_fb->cpu_addr) + DMUB_RB_SIZE); + rb_params.capacity = DMUB_RB_SIZE; + dmub_rb_init(&dmub->outbox1_rb, &rb_params); + + dmub_memset(&outbox0_rb_params, 0, sizeof(outbox0_rb_params)); + outbox0_rb_params.ctx = dmub; + outbox0_rb_params.base_address = (void *)((uintptr_t)(tracebuff_fb->cpu_addr) + TRACE_BUFFER_ENTRY_OFFSET); + outbox0_rb_params.capacity = tracebuff_fb->size - dmub_align(TRACE_BUFFER_ENTRY_OFFSET, 64); + dmub_rb_init(&dmub->outbox0_rb, &outbox0_rb_params); if (dmub->hw_funcs.reset_release) dmub->hw_funcs.reset_release(dmub); @@ -609,6 +647,8 @@ dmub_srv_send_gpint_command(struct dmub_srv *dmub, dmub->hw_funcs.set_gpint(dmub, reg); for (i = 0; i < timeout_us; ++i) { + udelay(1); + if (dmub->hw_funcs.is_gpint_acked(dmub, reg)) return DMUB_STATUS_OK; } @@ -674,3 +714,33 @@ enum dmub_status dmub_srv_cmd_with_reply_data(struct dmub_srv *dmub, return status; } + +static inline bool dmub_rb_out_trace_buffer_front(struct dmub_rb *rb, + void *entry) +{ + const uint64_t *src = (const uint64_t *)(rb->base_address) + rb->rptr / sizeof(uint64_t); + uint64_t *dst = (uint64_t *)entry; + uint8_t i; + uint8_t loop_count; + + if (rb->rptr == rb->wrpt) + return false; + + loop_count = sizeof(struct dmcub_trace_buf_entry) / sizeof(uint64_t); + // copying data + for (i = 0; i < loop_count; i++) + *dst++ = *src++; + + rb->rptr += sizeof(struct dmcub_trace_buf_entry); + + rb->rptr %= rb->capacity; + + return true; +} + +bool dmub_srv_get_outbox0_msg(struct dmub_srv *dmub, struct dmcub_trace_buf_entry *entry) +{ + dmub->outbox0_rb.wrpt = dmub->hw_funcs.get_outbox0_wptr(dmub); + + return dmub_rb_out_trace_buffer_front(&dmub->outbox0_rb, (void *)entry); +} diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c new file mode 100644 index 000000000000..e6f3bfab33d3 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c @@ -0,0 +1,105 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dmub/dmub_srv_stat.h" +#include "dmub/inc/dmub_cmd.h" + +/** + * DOC: DMUB_SRV STAT Interface + * + * These interfaces are called without acquiring DAL and DC locks. + * Hence, there is limitations on whese interfaces can access. Only + * variables exclusively defined for these interfaces can be modified. + */ + +/** + ***************************************************************************** + * Function: dmub_srv_stat_get_notification + * + * @brief + * Retrieves a dmub outbox notification, set up dmub notification + * structure with message information. Also a pending bit if queue + * is having more notifications + * + * @param [in] dmub: dmub srv structure + * @param [out] pnotify: dmub notification structure to be filled up + * + * @return + * dmub_status + ***************************************************************************** + */ +enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub, + struct dmub_notification *notify) +{ + /** + * This function is called without dal and dc locks, so + * we shall not modify any dmub variables, only dmub->outbox1_rb + * is exempted as it is exclusively accessed by this function + */ + union dmub_rb_out_cmd cmd = {0}; + + if (!dmub->hw_init) { + notify->type = DMUB_NOTIFICATION_NO_DATA; + notify->pending_notification = false; + return DMUB_STATUS_INVALID; + } + + /* Get write pointer which is updated by dmub */ + dmub->outbox1_rb.wrpt = dmub->hw_funcs.get_outbox1_wptr(dmub); + + if (!dmub_rb_out_front(&dmub->outbox1_rb, &cmd)) { + notify->type = DMUB_NOTIFICATION_NO_DATA; + notify->pending_notification = false; + return DMUB_STATUS_OK; + } + + switch (cmd.cmd_common.header.type) { + case DMUB_OUT_CMD__DP_AUX_REPLY: + notify->type = DMUB_NOTIFICATION_AUX_REPLY; + notify->link_index = cmd.dp_aux_reply.control.instance; + notify->result = cmd.dp_aux_reply.control.result; + dmub_memcpy((void *)¬ify->aux_reply, + (void *)&cmd.dp_aux_reply.reply_data, sizeof(struct aux_reply_data)); + break; + default: + notify->type = DMUB_NOTIFICATION_NO_DATA; + break; + } + + /* Pop outbox1 ringbuffer and update read pointer */ + dmub_rb_pop_front(&dmub->outbox1_rb); + dmub->hw_funcs.set_outbox1_rptr(dmub, dmub->outbox1_rb.rptr); + + /** + * Notify dc whether dmub has a pending outbox message, + * this is to avoid one more call to dmub_srv_stat_get_notification + */ + if (dmub_rb_empty(&dmub->outbox1_rb)) + notify->pending_notification = false; + else + notify->pending_notification = true; + + return DMUB_STATUS_OK; +} |