summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Kconfig20
-rw-r--r--drivers/gpu/drm/Makefile18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c73
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c152
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c82
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c521
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c299
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c1482
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h294
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c941
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h83
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c127
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c270
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c109
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c172
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c205
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c652
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c78
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c238
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dma.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_ras_if.h108
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c80
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c15
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c16
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c1820
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h18
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c53
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c194
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c22
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c21
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c126
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c220
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c164
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c196
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c383
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c195
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c121
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h134
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_ddc_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_helper.c76
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c146
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c119
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c92
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c65
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c54
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h63
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c213
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_services_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/clock_source.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_status.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h30
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/reg_helper.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c3
-rw-r--r--drivers/gpu/drm/amd/display/include/fixed31_32.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/signal_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c174
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c15
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h3
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h8
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h5
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h98
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h16
-rw-r--r--drivers/gpu/drm/amd/include/linux/chash.h366
-rw-r--r--drivers/gpu/drm/amd/lib/Kconfig28
-rw-r--r--drivers/gpu/drm/amd/lib/Makefile32
-rw-r--r--drivers/gpu/drm/amd/lib/chash.c638
-rw-r--r--drivers/gpu/drm/amd/powerplay/Makefile2
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c1253
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/Makefile3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c127
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c32
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.c66
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.h31
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c39
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c39
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c119
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.h29
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_inc.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c12
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c53
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h770
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu10.h14
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h89
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h128
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h147
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c1977
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c20
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c2413
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.h129
-rw-r--r--drivers/gpu/drm/arm/display/include/malidp_product.h12
-rw-r--r--drivers/gpu/drm/arm/display/include/malidp_utils.h31
-rw-r--r--drivers/gpu/drm/arm/display/komeda/Makefile8
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_component.c685
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c431
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h50
-rw-r--r--drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h530
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_crtc.c407
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.c118
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.h95
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_drv.c9
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h9
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c77
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.h26
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c113
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h129
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c610
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_plane.c139
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c220
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c48
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.h6
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c249
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.h31
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c10
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c271
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h20
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c6
-rw-r--r--drivers/gpu/drm/aspeed/Kconfig14
-rw-r--r--drivers/gpu/drm/aspeed/Makefile3
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx.h104
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c241
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c269
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_out.c42
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h4
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c7
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c10
-rw-r--r--drivers/gpu/drm/bochs/bochs.h9
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c194
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c10
-rw-r--r--drivers/gpu/drm/bridge/dumb-vga-dac.c6
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c4
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c140
-rw-r--r--drivers/gpu/drm/cirrus/Kconfig2
-rw-r--r--drivers/gpu/drm/cirrus/Makefile3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus.c657
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c161
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h4
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c315
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c328
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c621
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c10
-rw-r--r--drivers/gpu/drm/drm_atomic.c45
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c19
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c4
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c71
-rw-r--r--drivers/gpu/drm/drm_auth.c21
-rw-r--r--drivers/gpu/drm/drm_bufs.c8
-rw-r--r--drivers/gpu/drm/drm_client.c11
-rw-r--r--drivers/gpu/drm/drm_connector.c97
-rw-r--r--drivers/gpu/drm/drm_crtc.c4
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h1
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c1
-rw-r--r--drivers/gpu/drm/drm_drv.c223
-rw-r--r--drivers/gpu/drm/drm_dsc.c269
-rw-r--r--drivers/gpu/drm/drm_edid.c105
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c302
-rw-r--r--drivers/gpu/drm/drm_file.c26
-rw-r--r--drivers/gpu/drm/drm_format_helper.c324
-rw-r--r--drivers/gpu/drm/drm_fourcc.c27
-rw-r--r--drivers/gpu/drm/drm_gem.c320
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c8
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c625
-rw-r--r--drivers/gpu/drm/drm_internal.h10
-rw-r--r--drivers/gpu/drm/drm_ioc32.c13
-rw-r--r--drivers/gpu/drm/drm_ioctl.c86
-rw-r--r--drivers/gpu/drm/drm_irq.c2
-rw-r--r--drivers/gpu/drm/drm_kms_helper_common.c2
-rw-r--r--drivers/gpu/drm/drm_lease.c13
-rw-r--r--drivers/gpu/drm/drm_legacy.h87
-rw-r--r--drivers/gpu/drm/drm_legacy_misc.c82
-rw-r--r--drivers/gpu/drm/drm_lock.c19
-rw-r--r--drivers/gpu/drm/drm_memory.c26
-rw-r--r--drivers/gpu/drm/drm_mode_config.c5
-rw-r--r--drivers/gpu/drm/drm_mode_object.c5
-rw-r--r--drivers/gpu/drm/drm_modes.c12
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c13
-rw-r--r--drivers/gpu/drm/drm_plane.c8
-rw-r--r--drivers/gpu/drm/drm_prime.c1
-rw-r--r--drivers/gpu/drm/drm_print.c28
-rw-r--r--drivers/gpu/drm/drm_syncobj.c449
-rw-r--r--drivers/gpu/drm/drm_vm.c6
-rw-r--r--drivers/gpu/drm/drm_writeback.c73
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c40
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c7
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c22
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c26
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dma.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c30
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c97
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c48
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c51
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c35
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c72
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c71
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.h9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c29
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c49
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c75
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c43
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c2
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c7
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.h2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h2
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c9
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c7
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c12
-rw-r--r--drivers/gpu/drm/i915/.gitignore1
-rw-r--r--drivers/gpu/drm/i915/Makefile8
-rw-r--r--drivers/gpu/drm/i915/Makefile.header-test47
-rw-r--r--drivers/gpu/drm/i915/gvt/Makefile2
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c74
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c28
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h17
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c189
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c247
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h34
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c39
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h6
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c6
-rw-r--r--drivers/gpu/drm/i915/i915_active.c23
-rw-r--r--drivers/gpu/drm/i915/i915_active.h16
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c12
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c173
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c622
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h408
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c780
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c1097
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h260
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context_types.h175
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c18
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c42
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c156
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c141
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h26
-rw-r--r--drivers/gpu/drm/i915/i915_gem_internal.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.c42
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c6
-rw-r--r--drivers/gpu/drm/i915/i915_globals.c125
-rw-r--r--drivers/gpu/drm/i915/i915_globals.h35
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c183
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h51
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c665
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c262
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c114
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c67
-rw-r--r--drivers/gpu/drm/i915/i915_priolist_types.h42
-rw-r--r--drivers/gpu/drm/i915/i915_pvinfo.h2
-rw-r--r--drivers/gpu/drm/i915/i915_query.c39
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h571
-rw-r--r--drivers/gpu/drm/i915/i915_request.c498
-rw-r--r--drivers/gpu/drm/i915/i915_request.h87
-rw-r--r--drivers/gpu/drm/i915/i915_reset.c621
-rw-r--r--drivers/gpu/drm/i915/i915_reset.h16
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c112
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.h95
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler_types.h72
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c4
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c43
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h16
-rw-r--r--drivers/gpu/drm/i915/i915_timeline.c301
-rw-r--r--drivers/gpu/drm/i915/i915_timeline.h89
-rw-r--r--drivers/gpu/drm/i915/i915_timeline_types.h70
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h106
-rw-r--r--drivers/gpu/drm/i915/i915_user_extensions.c61
-rw-r--r--drivers/gpu/drm/i915/i915_user_extensions.h20
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h31
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c11
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c51
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h3
-rw-r--r--drivers/gpu/drm/i915/icl_dsi.c51
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c6
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c59
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.h40
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c95
-rw-r--r--drivers/gpu/drm/i915/intel_audio.h24
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c133
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c14
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c382
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.h46
-rw-r--r--drivers/gpu/drm/i915/intel_color.c1131
-rw-r--r--drivers/gpu/drm/i915/intel_color.h17
-rw-r--r--drivers/gpu/drm/i915/intel_combo_phy.c3
-rw-r--r--drivers/gpu/drm/i915/intel_connector.c19
-rw-r--r--drivers/gpu/drm/i915/intel_connector.h35
-rw-r--r--drivers/gpu/drm/i915/intel_context.c269
-rw-r--r--drivers/gpu/drm/i915/intel_context.h87
-rw-r--r--drivers/gpu/drm/i915/intel_context_types.h74
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c13
-rw-r--r--drivers/gpu/drm/i915/intel_crt.h21
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c5
-rw-r--r--drivers/gpu/drm/i915/intel_csr.h17
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c321
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.h53
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c136
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h46
-rw-r--r--drivers/gpu/drm/i915/intel_display.c804
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c583
-rw-r--r--drivers/gpu/drm/i915/intel_dp.h122
-rw-r--r--drivers/gpu/drm/i915/intel_dp_link_training.c1
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c154
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c6
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c770
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.h5
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h666
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h1
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_vbt.c24
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c10
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.h13
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c491
-rw-r--r--drivers/gpu/drm/i915/intel_engine_types.h546
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c6
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.h42
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c243
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.h53
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c1
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c5
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.h10
-rw-r--r--drivers/gpu/drm/i915/intel_gpu_commands.h9
-rw-r--r--drivers/gpu/drm/i915/intel_guc.c45
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h4
-rw-r--r--drivers/gpu/drm/i915/intel_guc_ads.c3
-rw-r--r--drivers/gpu/drm/i915/intel_guc_ct.c99
-rw-r--r--drivers/gpu/drm/i915/intel_guc_ct.h3
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fw.c4
-rw-r--r--drivers/gpu/drm/i915/intel_guc_log.c5
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c133
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.h1
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c26
-rw-r--r--drivers/gpu/drm/i915/intel_hdcp.c1179
-rw-r--r--drivers/gpu/drm/i915/intel_hdcp.h33
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c800
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.h51
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_huc_fw.c27
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c904
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h35
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c19
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.h38
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c101
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.h22
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c14
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c3
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c6
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c150
-rw-r--r--drivers/gpu/drm/i915/intel_panel.h65
-rw-r--r--drivers/gpu/drm/i915/intel_pipe_crc.c232
-rw-r--r--drivers/gpu/drm/i915/intel_pipe_crc.h35
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c555
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h71
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c318
-rw-r--r--drivers/gpu/drm/i915/intel_psr.h40
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c435
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h650
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c99
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c169
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.h23
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c12
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c260
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.h55
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c5
-rw-r--r--drivers/gpu/drm/i915/intel_tv.h13
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c25
-rw-r--r--drivers/gpu/drm/i915/intel_uc.h1
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c996
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h286
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h3
-rw-r--r--drivers/gpu/drm/i915/intel_vdsc.c133
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.c187
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds.h19
-rw-r--r--drivers/gpu/drm/i915/intel_workarounds_types.h27
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_gem_object.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_pages.c25
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_coherency.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c457
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c21
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_object.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_request.c37
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_sw_fence.c9
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_timeline.c120
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c16
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_flush_test.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c9
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_guc.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c301
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_lrc.c446
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c166
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_workarounds.c423
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.c34
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c145
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c54
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.h7
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_timeline.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_uncore.c10
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_uncore.h2
-rw-r--r--drivers/gpu/drm/i915/vlv_dsi.c84
-rw-r--r--drivers/gpu/drm/i915/vlv_dsi_pll.c4
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c2
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c2
-rw-r--r--drivers/gpu/drm/lima/Kconfig13
-rw-r--r--drivers/gpu/drm/lima/Makefile21
-rw-r--r--drivers/gpu/drm/lima/lima_bcast.c47
-rw-r--r--drivers/gpu/drm/lima/lima_bcast.h14
-rw-r--r--drivers/gpu/drm/lima/lima_ctx.c98
-rw-r--r--drivers/gpu/drm/lima/lima_ctx.h30
-rw-r--r--drivers/gpu/drm/lima/lima_device.c385
-rw-r--r--drivers/gpu/drm/lima/lima_device.h131
-rw-r--r--drivers/gpu/drm/lima/lima_dlbu.c58
-rw-r--r--drivers/gpu/drm/lima/lima_dlbu.h18
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c376
-rw-r--r--drivers/gpu/drm/lima/lima_drv.h45
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c349
-rw-r--r--drivers/gpu/drm/lima/lima_gem.h25
-rw-r--r--drivers/gpu/drm/lima/lima_gem_prime.c47
-rw-r--r--drivers/gpu/drm/lima/lima_gem_prime.h13
-rw-r--r--drivers/gpu/drm/lima/lima_gp.c283
-rw-r--r--drivers/gpu/drm/lima/lima_gp.h16
-rw-r--r--drivers/gpu/drm/lima/lima_l2_cache.c80
-rw-r--r--drivers/gpu/drm/lima/lima_l2_cache.h14
-rw-r--r--drivers/gpu/drm/lima/lima_mmu.c142
-rw-r--r--drivers/gpu/drm/lima/lima_mmu.h16
-rw-r--r--drivers/gpu/drm/lima/lima_object.c122
-rw-r--r--drivers/gpu/drm/lima/lima_object.h36
-rw-r--r--drivers/gpu/drm/lima/lima_pmu.c60
-rw-r--r--drivers/gpu/drm/lima/lima_pmu.h12
-rw-r--r--drivers/gpu/drm/lima/lima_pp.c427
-rw-r--r--drivers/gpu/drm/lima/lima_pp.h19
-rw-r--r--drivers/gpu/drm/lima/lima_regs.h298
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c362
-rw-r--r--drivers/gpu/drm/lima/lima_sched.h102
-rw-r--r--drivers/gpu/drm/lima/lima_vm.c282
-rw-r--r--drivers/gpu/drm/lima/lima_vm.h62
-rw-r--r--drivers/gpu/drm/meson/Makefile2
-rw-r--r--drivers/gpu/drm/meson/meson_canvas.c73
-rw-r--r--drivers/gpu/drm/meson/meson_canvas.h51
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c353
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c83
-rw-r--r--drivers/gpu/drm/meson/meson_drv.h5
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c163
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.h32
-rw-r--r--drivers/gpu/drm/meson/meson_overlay.c18
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c21
-rw-r--r--drivers/gpu/drm/meson/meson_registers.h247
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c123
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c11
-rw-r--r--drivers/gpu/drm/meson/meson_venc_cvbs.c25
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c85
-rw-r--r--drivers/gpu/drm/meson/meson_vpp.c51
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c8
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c10
-rw-r--r--drivers/gpu/drm/msm/Kconfig5
-rw-r--r--drivers/gpu/drm/msm/Makefile9
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c109
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c216
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h9
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c63
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c141
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c69
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c119
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h15
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c5
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c177
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c4
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c15
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h8
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c6
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c69
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h8
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c52
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c17
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h3
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c13
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c41
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_crtc.c6
-rw-r--r--drivers/gpu/drm/nouveau/Kbuild8
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Kconfig17
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Makefile3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c45
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-dvi.c330
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c45
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-opa362.c39
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c170
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c40
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dpi.c221
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c140
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c41
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c41
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c61
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c55
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c58
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c48
-rw-r--r--drivers/gpu/drm/omapdrm/dss/base.c144
-rw-r--r--drivers/gpu/drm/omapdrm/dss/display.c24
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c64
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c110
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss-of.c60
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c54
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c54
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c18
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h76
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c36
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c68
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c229
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c181
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.h8
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c13
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c236
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c211
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.h3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c6
-rw-r--r--drivers/gpu/drm/panel/Kconfig31
-rw-r--r--drivers/gpu/drm/panel/Makefile3
-rw-r--r--drivers/gpu/drm/panel/panel-arm-versatile.c6
-rw-r--r--drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c272
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c6
-rw-r--r--drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c1
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c20
-rw-r--r--drivers/gpu/drm/panel/panel-raydium-rm68200.c3
-rw-r--r--drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c387
-rw-r--r--drivers/gpu/drm/panel/panel-ronbo-rb070d30.c258
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d16d0.c3
-rw-r--r--drivers/gpu/drm/panel/panel-seiko-43wvf1g.c2
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c84
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-tpg110.c12
-rw-r--r--drivers/gpu/drm/panfrost/Kconfig14
-rw-r--r--drivers/gpu/drm/panfrost/Makefile12
-rw-r--r--drivers/gpu/drm/panfrost/TODO27
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c219
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.h14
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c253
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h125
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c474
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_features.h309
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c95
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h29
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c367
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.h19
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_issues.h176
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c564
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.h51
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c386
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.h17
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_regs.h298
-rw-r--r--drivers/gpu/drm/pl111/pl111_display.c2
-rw-r--r--drivers/gpu/drm/pl111/pl111_versatile.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c8
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h3
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c17
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig4
-rw-r--r--drivers/gpu/drm/rcar-du/Makefile3
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c64
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h13
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c54
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c37
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.h1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c122
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.h17
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_writeback.c243
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_writeback.h39
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c19
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig8
-rw-r--r--drivers/gpu/drm/rockchip/Makefile1
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.c876
-rw-r--r--drivers/gpu/drm/rockchip/rk3066_hdmi.h229
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c11
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c6
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c11
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h14
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c20
-rw-r--r--drivers/gpu/drm/selftests/test-drm_mm.c12
-rw-r--r--drivers/gpu/drm/stm/Kconfig2
-rw-r--r--drivers/gpu/drm/stm/drv.c35
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c28
-rw-r--r--drivers/gpu/drm/stm/ltdc.c24
-rw-r--r--drivers/gpu/drm/stm/ltdc.h3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c63
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi.h1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c40
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c29
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c74
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c179
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c49
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_tcon_top.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c54
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.h11
-rw-r--r--drivers/gpu/drm/tegra/fb.c4
-rw-r--r--drivers/gpu/drm/tegra/gem.c4
-rw-r--r--drivers/gpu/drm/tegra/sor.c21
-rw-r--r--drivers/gpu/drm/tinydrm/core/Makefile2
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-core.c183
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c160
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c24
-rw-r--r--drivers/gpu/drm/tinydrm/hx8357d.c59
-rw-r--r--drivers/gpu/drm/tinydrm/ili9225.c87
-rw-r--r--drivers/gpu/drm/tinydrm/ili9341.c59
-rw-r--r--drivers/gpu/drm/tinydrm/mi0283qt.c67
-rw-r--r--drivers/gpu/drm/tinydrm/mipi-dbi.c185
-rw-r--r--drivers/gpu/drm/tinydrm/repaper.c147
-rw-r--r--drivers/gpu/drm/tinydrm/st7586.c148
-rw-r--r--drivers/gpu/drm/tinydrm/st7735r.c59
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c10
-rw-r--r--drivers/gpu/drm/tve200/tve200_display.c3
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c57
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h9
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c20
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c2
-rw-r--r--drivers/gpu/drm/udl/udl_main.c35
-rw-r--r--drivers/gpu/drm/v3d/Kconfig1
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c314
-rw-r--r--drivers/gpu/drm/v3d/v3d_debugfs.c8
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c65
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h37
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c110
-rw-r--r--drivers/gpu/drm/v3d/v3d_irq.c67
-rw-r--r--drivers/gpu/drm/v3d/v3d_mmu.c11
-rw-r--r--drivers/gpu/drm/v3d/v3d_regs.h2
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c25
-rw-r--r--drivers/gpu/drm/vboxvideo/Kconfig16
-rw-r--r--drivers/gpu/drm/vboxvideo/Makefile6
-rw-r--r--drivers/gpu/drm/vboxvideo/hgsmi_base.c207
-rw-r--r--drivers/gpu/drm/vboxvideo/hgsmi_ch_setup.h32
-rw-r--r--drivers/gpu/drm/vboxvideo/hgsmi_channels.h34
-rw-r--r--drivers/gpu/drm/vboxvideo/hgsmi_defs.h73
-rw-r--r--drivers/gpu/drm/vboxvideo/modesetting.c123
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c258
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.h271
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_fb.c149
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_hgsmi.c95
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_irq.c183
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_main.c361
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_mode.c939
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_prime.c56
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_ttm.c388
-rw-r--r--drivers/gpu/drm/vboxvideo/vboxvideo.h442
-rw-r--r--drivers/gpu/drm/vboxvideo/vboxvideo_guest.h61
-rw-r--r--drivers/gpu/drm/vboxvideo/vboxvideo_vbe.h54
-rw-r--r--drivers/gpu/drm/vboxvideo/vbva_base.c214
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c69
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c105
-rw-r--r--drivers/gpu/drm/vc4/vc4_debugfs.c90
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c39
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c42
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h77
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c175
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c49
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c162
-rw-r--r--drivers/gpu/drm/vc4/vc4_hvs.c180
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c123
-rw-r--r--drivers/gpu/drm/vc4/vc4_perfmon.c18
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c59
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h51
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c23
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c49
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c240
-rw-r--r--drivers/gpu/drm/vc4/vc4_vec.c83
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_debugfs.c27
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h45
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c35
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c107
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c74
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c25
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c102
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c36
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.c98
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c24
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c59
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h30
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c1505
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c27
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c44
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.c45
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c47
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c80
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c61
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.h7
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c1
899 files changed, 64805 insertions, 23290 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index bd943a71756c..2267e84d5cb4 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -173,6 +173,12 @@ config DRM_KMS_CMA_HELPER
help
Choose this if you need the KMS CMA helper functions
+config DRM_GEM_SHMEM_HELPER
+ bool
+ depends on DRM
+ help
+ Choose this if you need the GEM shmem helper functions
+
config DRM_VM
bool
depends on DRM && MMU
@@ -225,8 +231,6 @@ config DRM_AMDGPU
source "drivers/gpu/drm/amd/amdgpu/Kconfig"
-source "drivers/gpu/drm/amd/lib/Kconfig"
-
source "drivers/gpu/drm/nouveau/Kconfig"
source "drivers/gpu/drm/i915/Kconfig"
@@ -251,6 +255,9 @@ config DRM_VKMS
If M is selected the module will be called vkms.
+config DRM_ATI_PCIGART
+ bool
+
source "drivers/gpu/drm/exynos/Kconfig"
source "drivers/gpu/drm/rockchip/Kconfig"
@@ -329,12 +336,21 @@ source "drivers/gpu/drm/tve200/Kconfig"
source "drivers/gpu/drm/xen/Kconfig"
+source "drivers/gpu/drm/vboxvideo/Kconfig"
+
+source "drivers/gpu/drm/lima/Kconfig"
+
+source "drivers/gpu/drm/panfrost/Kconfig"
+
+source "drivers/gpu/drm/aspeed/Kconfig"
+
# Keep legacy drivers last
menuconfig DRM_LEGACY
bool "Enable legacy drivers (DANGEROUS)"
depends on DRM && MMU
select DRM_VM
+ select DRM_ATI_PCIGART if PCI
help
Enable legacy DRI1 drivers. Those drivers expose unsafe and dangerous
APIs to user-space, which can be used to circumvent access
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 1ac55c65eac0..72f5036d9bfa 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -3,11 +3,9 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-drm-y := drm_auth.o drm_bufs.o drm_cache.o \
- drm_context.o drm_dma.o \
+drm-y := drm_auth.o drm_cache.o \
drm_file.o drm_gem.o drm_ioctl.o drm_irq.o \
- drm_lock.o drm_memory.o drm_drv.o \
- drm_scatter.o drm_pci.o \
+ drm_memory.o drm_drv.o drm_pci.o \
drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
drm_encoder_slave.o \
@@ -21,11 +19,13 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o \
drm_atomic_uapi.o
+drm-$(CONFIG_DRM_LEGACY) += drm_legacy_misc.o drm_bufs.o drm_context.o drm_dma.o drm_scatter.o drm_lock.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
drm-$(CONFIG_DRM_VM) += drm_vm.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
-drm-$(CONFIG_PCI) += ati_pcigart.o
+drm-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_gem_shmem_helper.o
+drm-$(CONFIG_DRM_ATI_PCIGART) += ati_pcigart.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
drm-$(CONFIG_OF) += drm_of.o
drm-$(CONFIG_AGP) += drm_agpsupport.o
@@ -37,7 +37,8 @@ drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_dsc.o drm_probe_helper
drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
drm_simple_kms_helper.o drm_modeset_helper.o \
drm_scdc_helper.o drm_gem_framebuffer_helper.o \
- drm_atomic_state_helper.o drm_damage_helper.o
+ drm_atomic_state_helper.o drm_damage_helper.o \
+ drm_format_helper.o
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
@@ -56,7 +57,6 @@ obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_SCHED) += scheduler/
obj-$(CONFIG_DRM_TDFX) += tdfx/
obj-$(CONFIG_DRM_R128) += r128/
-obj-y += amd/lib/
obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
obj-$(CONFIG_DRM_RADEON)+= radeon/
obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/
@@ -109,3 +109,7 @@ obj-$(CONFIG_DRM_TINYDRM) += tinydrm/
obj-$(CONFIG_DRM_PL111) += pl111/
obj-$(CONFIG_DRM_TVE200) += tve200/
obj-$(CONFIG_DRM_XEN) += xen/
+obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
+obj-$(CONFIG_DRM_LIMA) += lima/
+obj-$(CONFIG_DRM_PANFROST) += panfrost/
+obj-$(CONFIG_DRM_ASPEED_GFX) += aspeed/
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 466da5954a68..fdd0ca4b0f0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -23,7 +23,7 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-FULL_AMD_PATH=$(src)/..
+FULL_AMD_PATH=$(srctree)/$(src)/..
DISPLAY_FOLDER_NAME=display
FULL_AMD_DISPLAY_PATH = $(FULL_AMD_PATH)/$(DISPLAY_FOLDER_NAME)
@@ -53,7 +53,8 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \
amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
- amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o
+ amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
+ amdgpu_vm_sdma.o
# add asic specific block
amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 8d0d7f3dd5fb..14398f55f602 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -83,6 +83,7 @@
#include "amdgpu_gem.h"
#include "amdgpu_doorbell.h"
#include "amdgpu_amdkfd.h"
+#include "amdgpu_smu.h"
#define MAX_GPU_INSTANCE 16
@@ -156,6 +157,8 @@ extern int amdgpu_emu_mode;
extern uint amdgpu_smu_memory_pool_size;
extern uint amdgpu_dc_feature_mask;
extern struct amdgpu_mgpu_info mgpu_info;
+extern int amdgpu_ras_enable;
+extern uint amdgpu_ras_mask;
#ifdef CONFIG_DRM_AMDGPU_SI
extern int amdgpu_si_support;
@@ -433,6 +436,12 @@ struct amdgpu_cs_chunk {
void *kdata;
};
+struct amdgpu_cs_post_dep {
+ struct drm_syncobj *syncobj;
+ struct dma_fence_chain *chain;
+ u64 point;
+};
+
struct amdgpu_cs_parser {
struct amdgpu_device *adev;
struct drm_file *filp;
@@ -462,8 +471,8 @@ struct amdgpu_cs_parser {
/* user fence */
struct amdgpu_bo_list_entry uf_entry;
- unsigned num_post_dep_syncobjs;
- struct drm_syncobj **post_dep_syncobjs;
+ unsigned num_post_deps;
+ struct amdgpu_cs_post_dep *post_deps;
};
static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p,
@@ -702,7 +711,6 @@ enum amd_hw_ip_block_type {
struct amd_powerplay {
void *pp_handle;
const struct amd_pm_funcs *pp_funcs;
- uint32_t pp_feature;
};
#define AMDGPU_RESET_MAGIC_NUM 64
@@ -825,6 +833,7 @@ struct amdgpu_device {
/* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
struct work_struct hotplug_work;
struct amdgpu_irq_src crtc_irq;
+ struct amdgpu_irq_src vupdate_irq;
struct amdgpu_irq_src pageflip_irq;
struct amdgpu_irq_src hpd_irq;
@@ -842,6 +851,9 @@ struct amdgpu_device {
struct amd_powerplay powerplay;
bool pp_force_state_enabled;
+ /* smu */
+ struct smu_context smu;
+
/* dpm */
struct amdgpu_pm pm;
u32 cg_flags;
@@ -922,6 +934,8 @@ struct amdgpu_device {
int asic_reset_res;
struct work_struct xgmi_reset_work;
+
+ bool in_baco_reset;
};
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index fe1d7368c1e6..aeead072fa79 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -335,6 +335,43 @@ void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
amdgpu_bo_unref(&(bo));
}
+uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd,
+ enum kgd_engine_type type)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ switch (type) {
+ case KGD_ENGINE_PFP:
+ return adev->gfx.pfp_fw_version;
+
+ case KGD_ENGINE_ME:
+ return adev->gfx.me_fw_version;
+
+ case KGD_ENGINE_CE:
+ return adev->gfx.ce_fw_version;
+
+ case KGD_ENGINE_MEC1:
+ return adev->gfx.mec_fw_version;
+
+ case KGD_ENGINE_MEC2:
+ return adev->gfx.mec2_fw_version;
+
+ case KGD_ENGINE_RLC:
+ return adev->gfx.rlc_fw_version;
+
+ case KGD_ENGINE_SDMA1:
+ return adev->sdma.instance[0].fw_version;
+
+ case KGD_ENGINE_SDMA2:
+ return adev->sdma.instance[1].fw_version;
+
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
struct kfd_local_mem_info *mem_info)
{
@@ -640,4 +677,8 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd)
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
{
}
+
+void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
+{
+}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 0b31a1859023..4e37fa7e85b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -81,6 +81,18 @@ struct amdgpu_kfd_dev {
uint64_t vram_used;
};
+enum kgd_engine_type {
+ KGD_ENGINE_PFP = 1,
+ KGD_ENGINE_ME,
+ KGD_ENGINE_CE,
+ KGD_ENGINE_MEC1,
+ KGD_ENGINE_MEC2,
+ KGD_ENGINE_RLC,
+ KGD_ENGINE_SDMA1,
+ KGD_ENGINE_SDMA2,
+ KGD_ENGINE_MAX
+};
+
struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
struct mm_struct *mm);
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
@@ -142,6 +154,8 @@ int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
void **cpu_ptr, bool mqd_gfx9);
void amdgpu_amdkfd_free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
+uint32_t amdgpu_amdkfd_get_fw_version(struct kgd_dev *kgd,
+ enum kgd_engine_type type);
void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
struct kfd_local_mem_info *mem_info);
uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct kgd_dev *kgd);
@@ -230,5 +244,6 @@ int kgd2kfd_quiesce_mm(struct mm_struct *mm);
int kgd2kfd_resume_mm(struct mm_struct *mm);
int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
struct dma_fence *fence);
+void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
#endif /* AMDGPU_AMDKFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index ff7fac7df34b..fa09e11a600c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -22,14 +22,12 @@
#include <linux/fdtable.h>
#include <linux/uaccess.h>
-#include <linux/firmware.h>
#include <linux/mmu_context.h>
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
#include "cikd.h"
#include "cik_sdma.h"
-#include "amdgpu_ucode.h"
#include "gfx_v7_0.h"
#include "gca/gfx_7_2_d.h"
#include "gca/gfx_7_2_enum.h"
@@ -139,7 +137,6 @@ static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid);
static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
uint8_t vmid);
-static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
static void set_scratch_backing_va(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid);
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
@@ -191,7 +188,6 @@ static const struct kfd2kgd_calls kfd2kgd = {
.address_watch_get_offset = kgd_address_watch_get_offset,
.get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid,
.get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid,
- .get_fw_version = get_fw_version,
.set_scratch_backing_va = set_scratch_backing_va,
.get_tile_config = get_tile_config,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
@@ -792,63 +788,6 @@ static void set_scratch_backing_va(struct kgd_dev *kgd,
unlock_srbm(kgd);
}
-static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- const union amdgpu_firmware_header *hdr;
-
- switch (type) {
- case KGD_ENGINE_PFP:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.pfp_fw->data;
- break;
-
- case KGD_ENGINE_ME:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.me_fw->data;
- break;
-
- case KGD_ENGINE_CE:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.ce_fw->data;
- break;
-
- case KGD_ENGINE_MEC1:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.mec_fw->data;
- break;
-
- case KGD_ENGINE_MEC2:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.mec2_fw->data;
- break;
-
- case KGD_ENGINE_RLC:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.rlc_fw->data;
- break;
-
- case KGD_ENGINE_SDMA1:
- hdr = (const union amdgpu_firmware_header *)
- adev->sdma.instance[0].fw->data;
- break;
-
- case KGD_ENGINE_SDMA2:
- hdr = (const union amdgpu_firmware_header *)
- adev->sdma.instance[1].fw->data;
- break;
-
- default:
- return 0;
- }
-
- if (hdr == NULL)
- return 0;
-
- /* Only 12 bit in use*/
- return hdr->common.ucode_version;
-}
-
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint64_t page_table_base)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 56ea929f524b..fec3a6aa1de6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -23,12 +23,10 @@
#include <linux/module.h>
#include <linux/fdtable.h>
#include <linux/uaccess.h>
-#include <linux/firmware.h>
#include <linux/mmu_context.h>
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
-#include "amdgpu_ucode.h"
#include "gfx_v8_0.h"
#include "gca/gfx_8_0_sh_mask.h"
#include "gca/gfx_8_0_d.h"
@@ -95,7 +93,6 @@ static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
uint8_t vmid);
static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
uint8_t vmid);
-static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
static void set_scratch_backing_va(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid);
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
@@ -148,7 +145,6 @@ static const struct kfd2kgd_calls kfd2kgd = {
get_atc_vmid_pasid_mapping_pasid,
.get_atc_vmid_pasid_mapping_valid =
get_atc_vmid_pasid_mapping_valid,
- .get_fw_version = get_fw_version,
.set_scratch_backing_va = set_scratch_backing_va,
.get_tile_config = get_tile_config,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
@@ -751,63 +747,6 @@ static void set_scratch_backing_va(struct kgd_dev *kgd,
unlock_srbm(kgd);
}
-static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- const union amdgpu_firmware_header *hdr;
-
- switch (type) {
- case KGD_ENGINE_PFP:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.pfp_fw->data;
- break;
-
- case KGD_ENGINE_ME:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.me_fw->data;
- break;
-
- case KGD_ENGINE_CE:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.ce_fw->data;
- break;
-
- case KGD_ENGINE_MEC1:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.mec_fw->data;
- break;
-
- case KGD_ENGINE_MEC2:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.mec2_fw->data;
- break;
-
- case KGD_ENGINE_RLC:
- hdr = (const union amdgpu_firmware_header *)
- adev->gfx.rlc_fw->data;
- break;
-
- case KGD_ENGINE_SDMA1:
- hdr = (const union amdgpu_firmware_header *)
- adev->sdma.instance[0].fw->data;
- break;
-
- case KGD_ENGINE_SDMA2:
- hdr = (const union amdgpu_firmware_header *)
- adev->sdma.instance[1].fw->data;
- break;
-
- default:
- return 0;
- }
-
- if (hdr == NULL)
- return 0;
-
- /* Only 12 bit in use*/
- return hdr->common.ucode_version;
-}
-
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint64_t page_table_base)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 5c51d4910650..ef3d93b995b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -25,12 +25,10 @@
#include <linux/module.h>
#include <linux/fdtable.h>
#include <linux/uaccess.h>
-#include <linux/firmware.h>
#include <linux/mmu_context.h>
#include <drm/drmP.h>
#include "amdgpu.h"
#include "amdgpu_amdkfd.h"
-#include "amdgpu_ucode.h"
#include "soc15_hw_ip.h"
#include "gc/gc_9_0_offset.h"
#include "gc/gc_9_0_sh_mask.h"
@@ -111,7 +109,6 @@ static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
uint8_t vmid);
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint64_t page_table_base);
-static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
static void set_scratch_backing_va(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid);
static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
@@ -158,7 +155,6 @@ static const struct kfd2kgd_calls kfd2kgd = {
get_atc_vmid_pasid_mapping_pasid,
.get_atc_vmid_pasid_mapping_valid =
get_atc_vmid_pasid_mapping_valid,
- .get_fw_version = get_fw_version,
.set_scratch_backing_va = set_scratch_backing_va,
.get_tile_config = amdgpu_amdkfd_get_tile_config,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
@@ -874,56 +870,6 @@ static void set_scratch_backing_va(struct kgd_dev *kgd,
*/
}
-/* FIXME: Does this need to be ASIC-specific code? */
-static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
- const union amdgpu_firmware_header *hdr;
-
- switch (type) {
- case KGD_ENGINE_PFP:
- hdr = (const union amdgpu_firmware_header *)adev->gfx.pfp_fw->data;
- break;
-
- case KGD_ENGINE_ME:
- hdr = (const union amdgpu_firmware_header *)adev->gfx.me_fw->data;
- break;
-
- case KGD_ENGINE_CE:
- hdr = (const union amdgpu_firmware_header *)adev->gfx.ce_fw->data;
- break;
-
- case KGD_ENGINE_MEC1:
- hdr = (const union amdgpu_firmware_header *)adev->gfx.mec_fw->data;
- break;
-
- case KGD_ENGINE_MEC2:
- hdr = (const union amdgpu_firmware_header *)adev->gfx.mec2_fw->data;
- break;
-
- case KGD_ENGINE_RLC:
- hdr = (const union amdgpu_firmware_header *)adev->gfx.rlc_fw->data;
- break;
-
- case KGD_ENGINE_SDMA1:
- hdr = (const union amdgpu_firmware_header *)adev->sdma.instance[0].fw->data;
- break;
-
- case KGD_ENGINE_SDMA2:
- hdr = (const union amdgpu_firmware_header *)adev->sdma.instance[1].fw->data;
- break;
-
- default:
- return 0;
- }
-
- if (hdr == NULL)
- return 0;
-
- /* Only 12 bit in use*/
- return hdr->common.ucode_version;
-}
-
static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint64_t page_table_base)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 1921dec3df7a..a6e5184d436c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -410,15 +410,7 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem,
if (p_bo_va_entry)
*p_bo_va_entry = bo_va_entry;
- /* Allocate new page tables if needed and validate
- * them.
- */
- ret = amdgpu_vm_alloc_pts(adev, vm, va, amdgpu_bo_size(bo));
- if (ret) {
- pr_err("Failed to allocate pts, err=%d\n", ret);
- goto err_alloc_pts;
- }
-
+ /* Allocate validate page tables if needed */
ret = vm_validate_pt_pd_bos(vm);
if (ret) {
pr_err("validate_pt_pd_bos() failed\n");
@@ -741,13 +733,7 @@ static int update_gpuvm_pte(struct amdgpu_device *adev,
struct amdgpu_sync *sync)
{
int ret;
- struct amdgpu_vm *vm;
- struct amdgpu_bo_va *bo_va;
- struct amdgpu_bo *bo;
-
- bo_va = entry->bo_va;
- vm = bo_va->base.vm;
- bo = bo_va->base.bo;
+ struct amdgpu_bo_va *bo_va = entry->bo_va;
/* Update the page tables */
ret = amdgpu_vm_bo_update(adev, bo_va, false);
@@ -906,7 +892,8 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
pr_err("validate_pt_pd_bos() failed\n");
goto validate_pd_fail;
}
- amdgpu_bo_sync_wait(vm->root.base.bo, AMDGPU_FENCE_OWNER_KFD, false);
+ ret = amdgpu_bo_sync_wait(vm->root.base.bo,
+ AMDGPU_FENCE_OWNER_KFD, false);
if (ret)
goto wait_pd_fail;
amdgpu_bo_fence(vm->root.base.bo,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index b61e1dc61b4c..f96d75c6e099 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -28,8 +28,6 @@
#include "atom.h"
#include "atombios.h"
-#define get_index_into_master_table(master_table, table_name) (offsetof(struct master_table, table_name) / sizeof(uint16_t))
-
bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev)
{
int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
@@ -238,10 +236,71 @@ int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev)
return 0;
}
+/*
+ * Return true if vbios enabled ecc by default, if umc info table is available
+ * or false if ecc is not enabled or umc info table is not available
+ */
+bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ int index;
+ u16 data_offset, size;
+ union umc_info *umc_info;
+ u8 frev, crev;
+ bool ecc_default_enabled = false;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ umc_info);
+
+ if (amdgpu_atom_parse_data_header(mode_info->atom_context,
+ index, &size, &frev, &crev, &data_offset)) {
+ /* support umc_info 3.1+ */
+ if ((frev == 3 && crev >= 1) || (frev > 3)) {
+ umc_info = (union umc_info *)
+ (mode_info->atom_context->bios + data_offset);
+ ecc_default_enabled =
+ (le32_to_cpu(umc_info->v31.umc_config) &
+ UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE) ? true : false;
+ }
+ }
+
+ return ecc_default_enabled;
+}
+
union firmware_info {
struct atom_firmware_info_v3_1 v31;
};
+/*
+ * Return true if vbios supports sram ecc or false if not
+ */
+bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ int index;
+ u16 data_offset, size;
+ union firmware_info *firmware_info;
+ u8 frev, crev;
+ bool sram_ecc_supported = false;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ firmwareinfo);
+
+ if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context,
+ index, &size, &frev, &crev, &data_offset)) {
+ /* support firmware_info 3.1 + */
+ if ((frev == 3 && crev >=1) || (frev > 3)) {
+ firmware_info = (union firmware_info *)
+ (mode_info->atom_context->bios + data_offset);
+ sram_ecc_supported =
+ (le32_to_cpu(firmware_info->v31.firmware_capability) &
+ ATOM_FIRMWARE_CAP_SRAM_ECC) ? true : false;
+ }
+ }
+
+ return sram_ecc_supported;
+}
+
union smu_info {
struct atom_smu_info_v3_1 v31;
};
@@ -346,11 +405,11 @@ int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev)
(mode_info->atom_context->bios + data_offset);
switch (crev) {
case 4:
- adev->gfx.config.max_shader_engines = gfx_info->v24.gc_num_se;
- adev->gfx.config.max_cu_per_sh = gfx_info->v24.gc_num_cu_per_sh;
- adev->gfx.config.max_sh_per_se = gfx_info->v24.gc_num_sh_per_se;
- adev->gfx.config.max_backends_per_se = gfx_info->v24.gc_num_rb_per_se;
- adev->gfx.config.max_texture_channel_caches = gfx_info->v24.gc_num_tccs;
+ adev->gfx.config.max_shader_engines = gfx_info->v24.max_shader_engines;
+ adev->gfx.config.max_cu_per_sh = gfx_info->v24.max_cu_per_sh;
+ adev->gfx.config.max_sh_per_se = gfx_info->v24.max_sh_per_se;
+ adev->gfx.config.max_backends_per_se = gfx_info->v24.max_backends_per_se;
+ adev->gfx.config.max_texture_channel_caches = gfx_info->v24.max_texture_channel_caches;
adev->gfx.config.max_gprs = le16_to_cpu(gfx_info->v24.gc_num_gprs);
adev->gfx.config.max_gs_threads = gfx_info->v24.gc_num_max_gs_thds;
adev->gfx.config.gs_vgt_table_depth = gfx_info->v24.gc_gs_table_depth;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
index 20f158fd3b76..5ec6f92f353c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
@@ -24,6 +24,8 @@
#ifndef __AMDGPU_ATOMFIRMWARE_H__
#define __AMDGPU_ATOMFIRMWARE_H__
+#define get_index_into_master_table(master_table, table_name) (offsetof(struct master_table, table_name) / sizeof(uint16_t))
+
bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev);
void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev);
int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev);
@@ -31,5 +33,7 @@ int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev);
int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev);
+bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev);
+bool amdgpu_atomfirmware_sram_ecc_supported(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 52a5e4fdc95b..2f6239b6be6f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -215,6 +215,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
+ case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
+ case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
break;
default:
@@ -804,9 +806,11 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
- for (i = 0; i < parser->num_post_dep_syncobjs; i++)
- drm_syncobj_put(parser->post_dep_syncobjs[i]);
- kfree(parser->post_dep_syncobjs);
+ for (i = 0; i < parser->num_post_deps; i++) {
+ drm_syncobj_put(parser->post_deps[i].syncobj);
+ kfree(parser->post_deps[i].chain);
+ }
+ kfree(parser->post_deps);
dma_fence_put(parser->fence);
@@ -1117,13 +1121,18 @@ static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
}
static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
- uint32_t handle)
+ uint32_t handle, u64 point,
+ u64 flags)
{
- int r;
struct dma_fence *fence;
- r = drm_syncobj_find_fence(p->filp, handle, 0, 0, &fence);
- if (r)
+ int r;
+
+ r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence);
+ if (r) {
+ DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n",
+ handle, point, r);
return r;
+ }
r = amdgpu_sync_fence(p->adev, &p->job->sync, fence, true);
dma_fence_put(fence);
@@ -1134,46 +1143,118 @@ static int amdgpu_syncobj_lookup_and_add_to_sync(struct amdgpu_cs_parser *p,
static int amdgpu_cs_process_syncobj_in_dep(struct amdgpu_cs_parser *p,
struct amdgpu_cs_chunk *chunk)
{
+ struct drm_amdgpu_cs_chunk_sem *deps;
unsigned num_deps;
int i, r;
- struct drm_amdgpu_cs_chunk_sem *deps;
deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
num_deps = chunk->length_dw * 4 /
sizeof(struct drm_amdgpu_cs_chunk_sem);
+ for (i = 0; i < num_deps; ++i) {
+ r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle,
+ 0, 0);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+static int amdgpu_cs_process_syncobj_timeline_in_dep(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk *chunk)
+{
+ struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
+ unsigned num_deps;
+ int i, r;
+
+ syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_syncobj);
for (i = 0; i < num_deps; ++i) {
- r = amdgpu_syncobj_lookup_and_add_to_sync(p, deps[i].handle);
+ r = amdgpu_syncobj_lookup_and_add_to_sync(p,
+ syncobj_deps[i].handle,
+ syncobj_deps[i].point,
+ syncobj_deps[i].flags);
if (r)
return r;
}
+
return 0;
}
static int amdgpu_cs_process_syncobj_out_dep(struct amdgpu_cs_parser *p,
struct amdgpu_cs_chunk *chunk)
{
+ struct drm_amdgpu_cs_chunk_sem *deps;
unsigned num_deps;
int i;
- struct drm_amdgpu_cs_chunk_sem *deps;
+
deps = (struct drm_amdgpu_cs_chunk_sem *)chunk->kdata;
num_deps = chunk->length_dw * 4 /
sizeof(struct drm_amdgpu_cs_chunk_sem);
- p->post_dep_syncobjs = kmalloc_array(num_deps,
- sizeof(struct drm_syncobj *),
- GFP_KERNEL);
- p->num_post_dep_syncobjs = 0;
+ p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
+ GFP_KERNEL);
+ p->num_post_deps = 0;
- if (!p->post_dep_syncobjs)
+ if (!p->post_deps)
return -ENOMEM;
+
for (i = 0; i < num_deps; ++i) {
- p->post_dep_syncobjs[i] = drm_syncobj_find(p->filp, deps[i].handle);
- if (!p->post_dep_syncobjs[i])
+ p->post_deps[i].syncobj =
+ drm_syncobj_find(p->filp, deps[i].handle);
+ if (!p->post_deps[i].syncobj)
return -EINVAL;
- p->num_post_dep_syncobjs++;
+ p->post_deps[i].chain = NULL;
+ p->post_deps[i].point = 0;
+ p->num_post_deps++;
}
+
+ return 0;
+}
+
+
+static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p,
+ struct amdgpu_cs_chunk
+ *chunk)
+{
+ struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps;
+ unsigned num_deps;
+ int i;
+
+ syncobj_deps = (struct drm_amdgpu_cs_chunk_syncobj *)chunk->kdata;
+ num_deps = chunk->length_dw * 4 /
+ sizeof(struct drm_amdgpu_cs_chunk_syncobj);
+
+ p->post_deps = kmalloc_array(num_deps, sizeof(*p->post_deps),
+ GFP_KERNEL);
+ p->num_post_deps = 0;
+
+ if (!p->post_deps)
+ return -ENOMEM;
+
+ for (i = 0; i < num_deps; ++i) {
+ struct amdgpu_cs_post_dep *dep = &p->post_deps[i];
+
+ dep->chain = NULL;
+ if (syncobj_deps[i].point) {
+ dep->chain = kmalloc(sizeof(*dep->chain), GFP_KERNEL);
+ if (!dep->chain)
+ return -ENOMEM;
+ }
+
+ dep->syncobj = drm_syncobj_find(p->filp,
+ syncobj_deps[i].handle);
+ if (!dep->syncobj) {
+ kfree(dep->chain);
+ return -EINVAL;
+ }
+ dep->point = syncobj_deps[i].point;
+ p->num_post_deps++;
+ }
+
return 0;
}
@@ -1187,19 +1268,33 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
chunk = &p->chunks[i];
- if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES ||
- chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) {
+ switch (chunk->chunk_id) {
+ case AMDGPU_CHUNK_ID_DEPENDENCIES:
+ case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES:
r = amdgpu_cs_process_fence_dep(p, chunk);
if (r)
return r;
- } else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_IN) {
+ break;
+ case AMDGPU_CHUNK_ID_SYNCOBJ_IN:
r = amdgpu_cs_process_syncobj_in_dep(p, chunk);
if (r)
return r;
- } else if (chunk->chunk_id == AMDGPU_CHUNK_ID_SYNCOBJ_OUT) {
+ break;
+ case AMDGPU_CHUNK_ID_SYNCOBJ_OUT:
r = amdgpu_cs_process_syncobj_out_dep(p, chunk);
if (r)
return r;
+ break;
+ case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT:
+ r = amdgpu_cs_process_syncobj_timeline_in_dep(p, chunk);
+ if (r)
+ return r;
+ break;
+ case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL:
+ r = amdgpu_cs_process_syncobj_timeline_out_dep(p, chunk);
+ if (r)
+ return r;
+ break;
}
}
@@ -1210,8 +1305,17 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
{
int i;
- for (i = 0; i < p->num_post_dep_syncobjs; ++i)
- drm_syncobj_replace_fence(p->post_dep_syncobjs[i], p->fence);
+ for (i = 0; i < p->num_post_deps; ++i) {
+ if (p->post_deps[i].chain && p->post_deps[i].point) {
+ drm_syncobj_add_point(p->post_deps[i].syncobj,
+ p->post_deps[i].chain,
+ p->fence, p->post_deps[i].point);
+ p->post_deps[i].chain = NULL;
+ } else {
+ drm_syncobj_replace_fence(p->post_deps[i].syncobj,
+ p->fence);
+ }
+ }
}
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
index 7e22be7ca68a..54dd02a898b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
@@ -92,15 +92,6 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return -ENOMEM;
}
- r = amdgpu_vm_alloc_pts(adev, (*bo_va)->base.vm, csa_addr,
- size);
- if (r) {
- DRM_ERROR("failed to allocate pts for static CSA, err=%d\n", r);
- amdgpu_vm_bo_rmv(adev, *bo_va);
- ttm_eu_backoff_reservation(&ticket, &list);
- return r;
- }
-
r = amdgpu_vm_bo_map(adev, *bo_va, csa_addr, 0, size,
AMDGPU_PTE_READABLE | AMDGPU_PTE_WRITEABLE |
AMDGPU_PTE_EXECUTABLE);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 7b526593eb77..a28a3d722ba2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -26,6 +26,7 @@
#include <drm/drm_auth.h>
#include "amdgpu.h"
#include "amdgpu_sched.h"
+#include "amdgpu_ras.h"
#define to_amdgpu_ctx_entity(e) \
container_of((e), struct amdgpu_ctx_entity, entity)
@@ -344,6 +345,7 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
{
struct amdgpu_ctx *ctx;
struct amdgpu_ctx_mgr *mgr;
+ uint32_t ras_counter;
if (!fpriv)
return -EINVAL;
@@ -368,6 +370,21 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
if (atomic_read(&ctx->guilty))
out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY;
+ /*query ue count*/
+ ras_counter = amdgpu_ras_query_error_count(adev, false);
+ /*ras counter is monotonic increasing*/
+ if (ras_counter != ctx->ras_counter_ue) {
+ out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE;
+ ctx->ras_counter_ue = ras_counter;
+ }
+
+ /*query ce count*/
+ ras_counter = amdgpu_ras_query_error_count(adev, true);
+ if (ras_counter != ctx->ras_counter_ce) {
+ out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE;
+ ctx->ras_counter_ce = ras_counter;
+ }
+
mutex_unlock(&mgr->lock);
return 0;
}
@@ -541,32 +558,26 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
idr_init(&mgr->ctx_handles);
}
-void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
+long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout)
{
unsigned num_entities = amdgput_ctx_total_num_entities();
struct amdgpu_ctx *ctx;
struct idr *idp;
uint32_t id, i;
- long max_wait = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
idp = &mgr->ctx_handles;
mutex_lock(&mgr->lock);
idr_for_each_entry(idp, ctx, id) {
-
- if (!ctx->adev) {
- mutex_unlock(&mgr->lock);
- return;
- }
-
for (i = 0; i < num_entities; i++) {
struct drm_sched_entity *entity;
entity = &ctx->entities[0][i].entity;
- max_wait = drm_sched_entity_flush(entity, max_wait);
+ timeout = drm_sched_entity_flush(entity, timeout);
}
}
mutex_unlock(&mgr->lock);
+ return timeout;
}
void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
@@ -579,10 +590,6 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
idp = &mgr->ctx_handles;
idr_for_each_entry(idp, ctx, id) {
-
- if (!ctx->adev)
- return;
-
if (kref_read(&ctx->refcount) != 1) {
DRM_ERROR("ctx %p is still alive\n", ctx);
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index b3b012c0a7da..5f1b54c9bcdb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -49,6 +49,8 @@ struct amdgpu_ctx {
enum drm_sched_priority override_priority;
struct mutex lock;
atomic_t guilty;
+ uint32_t ras_counter_ce;
+ uint32_t ras_counter_ue;
};
struct amdgpu_ctx_mgr {
@@ -82,7 +84,7 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx,
void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
-void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr);
+long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 4ae3ff9a1d4c..8930d66f2204 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -568,10 +568,9 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
idx = *pos >> 2;
valuesize = sizeof(values);
- if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
- r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
- else
- return -EINVAL;
+ r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
+ if (r)
+ return r;
if (size > valuesize)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 79fb302fb954..cc8ad3831982 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -60,6 +60,7 @@
#include "amdgpu_pm.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_ras.h"
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
@@ -1506,7 +1507,9 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
return -EAGAIN;
}
- adev->powerplay.pp_feature = amdgpu_pp_feature_mask;
+ adev->pm.pp_feature = amdgpu_pp_feature_mask;
+ if (amdgpu_sriov_vf(adev))
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
for (i = 0; i < adev->num_ip_blocks; i++) {
if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
@@ -1638,6 +1641,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
{
int i, r;
+ r = amdgpu_ras_init(adev);
+ if (r)
+ return r;
+
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.valid)
continue;
@@ -1681,6 +1688,13 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
}
}
+ r = amdgpu_ib_pool_init(adev);
+ if (r) {
+ dev_err(adev->dev, "IB initialization failed (%d).\n", r);
+ amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
+ goto init_failed;
+ }
+
r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
if (r)
goto init_failed;
@@ -1869,6 +1883,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
{
int i, r;
+ amdgpu_ras_pre_fini(adev);
+
if (adev->gmc.xgmi.num_physical_nodes > 1)
amdgpu_xgmi_remove_device(adev);
@@ -1917,6 +1933,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
amdgpu_free_static_csa(&adev->virt.csa_obj);
amdgpu_device_wb_fini(adev);
amdgpu_device_vram_scratch_fini(adev);
+ amdgpu_ib_pool_fini(adev);
}
r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
@@ -1937,6 +1954,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
adev->ip_blocks[i].status.late_initialized = false;
}
+ amdgpu_ras_fini(adev);
+
if (amdgpu_sriov_vf(adev))
if (amdgpu_virt_release_full_gpu(adev, false))
DRM_ERROR("failed to release exclusive mode on fini\n");
@@ -1999,6 +2018,10 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
r = amdgpu_device_enable_mgpu_fan_boost();
if (r)
DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
+
+ /*set to low pstate by default */
+ amdgpu_xgmi_set_pstate(adev, 0);
+
}
static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
@@ -2369,7 +2392,7 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
adev->asic_reset_res = amdgpu_asic_reset(adev);
if (adev->asic_reset_res)
- DRM_WARN("ASIC reset failed with err r, %d for drm dev, %s",
+ DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
adev->asic_reset_res, adev->ddev->unique);
}
@@ -2448,6 +2471,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->virt.vf_errors.lock);
hash_init(adev->mn_hash);
mutex_init(&adev->lock_reset);
+ mutex_init(&adev->virt.dpm_mutex);
amdgpu_device_check_arguments(adev);
@@ -2642,13 +2666,6 @@ fence_driver_init:
/* Get a log2 for easy divisions. */
adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
- r = amdgpu_ib_pool_init(adev);
- if (r) {
- dev_err(adev->dev, "IB initialization failed (%d).\n", r);
- amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
- goto failed;
- }
-
amdgpu_fbdev_init(adev);
r = amdgpu_pm_sysfs_init(adev);
@@ -2694,6 +2711,9 @@ fence_driver_init:
goto failed;
}
+ /* must succeed. */
+ amdgpu_ras_post_init(adev);
+
return 0;
failed:
@@ -2726,7 +2746,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
else
drm_atomic_helper_shutdown(adev->ddev);
}
- amdgpu_ib_pool_fini(adev);
amdgpu_fence_driver_fini(adev);
amdgpu_pm_sysfs_fini(adev);
amdgpu_fbdev_fini(adev);
@@ -3225,6 +3244,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
if (r)
return r;
+ amdgpu_amdkfd_pre_reset(adev);
+
/* Resume IP prior to SMC */
r = amdgpu_device_ip_reinit_early_sriov(adev);
if (r)
@@ -3244,6 +3265,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
amdgpu_irq_gpu_reset_resume_helper(adev);
r = amdgpu_ib_ring_tests(adev);
+ amdgpu_amdkfd_post_reset(adev);
error:
amdgpu_virt_init_data_exchange(adev);
@@ -3376,7 +3398,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
r = amdgpu_asic_reset(tmp_adev);
if (r) {
- DRM_ERROR("ASIC reset failed with err r, %d for drm dev, %s",
+ DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s",
r, tmp_adev->ddev->unique);
break;
}
@@ -3393,6 +3415,11 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
break;
}
}
+
+ list_for_each_entry(tmp_adev, device_list_handle,
+ gmc.xgmi.head) {
+ amdgpu_ras_reserve_bad_pages(tmp_adev);
+ }
}
}
@@ -3411,7 +3438,7 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
if (vram_lost) {
- DRM_ERROR("VRAM is lost!\n");
+ DRM_INFO("VRAM is lost due to GPU reset!\n");
atomic_inc(&tmp_adev->vram_lost_counter);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index 344967df3137..523b8ab6b04e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -904,3 +904,19 @@ amdgpu_get_vce_clock_state(void *handle, u32 idx)
return NULL;
}
+
+int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
+{
+ if (is_support_sw_smu(adev))
+ return smu_get_sclk(&adev->smu, low);
+ else
+ return (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low));
+}
+
+int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
+{
+ if (is_support_sw_smu(adev))
+ return smu_get_mclk(&adev->smu, low);
+ else
+ return (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low));
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index e871e022c129..dca35407879d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -260,9 +260,6 @@ enum amdgpu_pcie_gen {
#define amdgpu_dpm_enable_bapm(adev, e) \
((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
-#define amdgpu_dpm_read_sensor(adev, idx, value, size) \
- ((adev)->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle, (idx), (value), (size)))
-
#define amdgpu_dpm_set_fan_control_mode(adev, m) \
((adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)))
@@ -281,18 +278,18 @@ enum amdgpu_pcie_gen {
#define amdgpu_dpm_set_fan_speed_rpm(adev, s) \
((adev)->powerplay.pp_funcs->set_fan_speed_rpm)((adev)->powerplay.pp_handle, (s))
-#define amdgpu_dpm_get_sclk(adev, l) \
- ((adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)))
-
-#define amdgpu_dpm_get_mclk(adev, l) \
- ((adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)))
-
#define amdgpu_dpm_force_performance_level(adev, l) \
((adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)))
#define amdgpu_dpm_get_current_power_state(adev) \
((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle))
+#define amdgpu_smu_get_current_power_state(adev) \
+ ((adev)->smu.ppt_funcs->get_current_power_state(&((adev)->smu)))
+
+#define amdgpu_smu_set_power_state(adev) \
+ ((adev)->smu.ppt_funcs->set_power_state(&((adev)->smu)))
+
#define amdgpu_dpm_get_pp_num_states(adev, data) \
((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data))
@@ -448,6 +445,9 @@ struct amdgpu_pm {
uint32_t smu_prv_buffer_size;
struct amdgpu_bo *smu_prv_buffer;
bool ac_power;
+ /* powerplay feature */
+ uint32_t pp_feature;
+
};
#define R600_SSTU_DFLT 0
@@ -486,6 +486,8 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev);
u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev);
void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev);
+int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
+ void *data, uint32_t *size);
bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor);
@@ -504,4 +506,8 @@ enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
struct amd_vce_state*
amdgpu_get_vce_clock_state(void *handle, u32 idx);
+extern int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low);
+
+extern int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 7419ea8a388b..1e2cc9d68a05 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -74,9 +74,11 @@
* - 3.28.0 - Add AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES
* - 3.29.0 - Add AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID
* - 3.30.0 - Add AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE.
+ * - 3.31.0 - Add support for per-flip tiling attribute changes with DC
+ * - 3.32.0 - Add syncobj timeline support to AMDGPU_CS.
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 30
+#define KMS_DRIVER_MINOR 32
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -117,8 +119,8 @@ uint amdgpu_pg_mask = 0xffffffff;
uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu = NULL;
char *amdgpu_virtual_display = NULL;
-/* OverDrive(bit 14),gfxoff(bit 15),stutter mode(bit 17) disabled by default*/
-uint amdgpu_pp_feature_mask = 0xfffd3fff;
+/* OverDrive(bit 14) disabled by default*/
+uint amdgpu_pp_feature_mask = 0xffffbfff;
int amdgpu_ngg = 0;
int amdgpu_prim_buf_per_se = 0;
int amdgpu_pos_buf_per_se = 0;
@@ -136,6 +138,8 @@ uint amdgpu_dc_feature_mask = 0;
struct amdgpu_mgpu_info mgpu_info = {
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
};
+int amdgpu_ras_enable = -1;
+uint amdgpu_ras_mask = 0xffffffff;
/**
* DOC: vramlimit (int)
@@ -495,6 +499,21 @@ MODULE_PARM_DESC(emu_mode, "Emulation mode, (1 = enable, 0 = disable)");
module_param_named(emu_mode, amdgpu_emu_mode, int, 0444);
/**
+ * DOC: ras_enable (int)
+ * Enable RAS features on the GPU (0 = disable, 1 = enable, -1 = auto (default))
+ */
+MODULE_PARM_DESC(ras_enable, "Enable RAS features on the GPU (0 = disable, 1 = enable, -1 = auto (default))");
+module_param_named(ras_enable, amdgpu_ras_enable, int, 0444);
+
+/**
+ * DOC: ras_mask (uint)
+ * Mask of RAS features to enable (default 0xffffffff), only valid when ras_enable == 1
+ * See the flags in drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+ */
+MODULE_PARM_DESC(ras_mask, "Mask of RAS features to enable (default 0xffffffff), only valid when ras_enable == 1");
+module_param_named(ras_mask, amdgpu_ras_mask, uint, 0444);
+
+/**
* DOC: si_support (int)
* Set SI support driver. This parameter works after set config CONFIG_DRM_AMDGPU_SI. For SI asic, when radeon driver is enabled,
* set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
@@ -974,6 +993,7 @@ amdgpu_pci_remove(struct pci_dev *pdev)
DRM_ERROR("Device removal is currently not supported outside of fbcon\n");
drm_dev_unplug(dev);
+ drm_dev_put(dev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
@@ -1158,13 +1178,14 @@ static int amdgpu_flush(struct file *f, fl_owner_t id)
{
struct drm_file *file_priv = f->private_data;
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
+ long timeout = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
- amdgpu_ctx_mgr_entity_flush(&fpriv->ctx_mgr);
+ timeout = amdgpu_ctx_mgr_entity_flush(&fpriv->ctx_mgr, timeout);
+ timeout = amdgpu_vm_wait_idle(&fpriv->vm, timeout);
- return 0;
+ return timeout >= 0 ? 0 : timeout;
}
-
static const struct file_operations amdgpu_driver_kms_fops = {
.owner = THIS_MODULE,
.open = drm_open,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 5cbde74b97dd..e47609218839 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -49,12 +49,11 @@
static int
amdgpufb_open(struct fb_info *info, int user)
{
- struct amdgpu_fbdev *rfbdev = info->par;
- struct amdgpu_device *adev = rfbdev->adev;
- int ret = pm_runtime_get_sync(adev->ddev->dev);
+ struct drm_fb_helper *fb_helper = info->par;
+ int ret = pm_runtime_get_sync(fb_helper->dev->dev);
if (ret < 0 && ret != -EACCES) {
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(fb_helper->dev->dev);
+ pm_runtime_put_autosuspend(fb_helper->dev->dev);
return ret;
}
return 0;
@@ -63,11 +62,10 @@ amdgpufb_open(struct fb_info *info, int user)
static int
amdgpufb_release(struct fb_info *info, int user)
{
- struct amdgpu_fbdev *rfbdev = info->par;
- struct amdgpu_device *adev = rfbdev->adev;
+ struct drm_fb_helper *fb_helper = info->par;
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ pm_runtime_mark_last_busy(fb_helper->dev->dev);
+ pm_runtime_put_autosuspend(fb_helper->dev->dev);
return 0;
}
@@ -233,9 +231,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
goto out;
}
- info->par = rfbdev;
- info->skip_vt_switch = true;
-
ret = amdgpu_display_framebuffer_init(adev->ddev, &rfbdev->rfb,
&mode_cmd, gobj);
if (ret) {
@@ -248,10 +243,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
/* setup helper */
rfbdev->helper.fb = fb;
- strcpy(info->fix.id, "amdgpudrmfb");
-
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
-
info->fbops = &amdgpufb_ops;
tmp = amdgpu_bo_gpu_offset(abo) - adev->gmc.vram_start;
@@ -260,7 +251,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
info->screen_base = amdgpu_bo_kptr(abo);
info->screen_size = amdgpu_bo_size(abo);
- drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(info, &rfbdev->helper, sizes);
/* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = adev->ddev->mode_config.fb_base;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index ee47c11e92ce..4dee2326b29c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -136,8 +136,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_fence *fence;
- struct dma_fence *old, **ptr;
+ struct dma_fence __rcu **ptr;
uint32_t seq;
+ int r;
fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
if (fence == NULL)
@@ -153,15 +154,24 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
seq, flags | AMDGPU_FENCE_FLAG_INT);
ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
+ if (unlikely(rcu_dereference_protected(*ptr, 1))) {
+ struct dma_fence *old;
+
+ rcu_read_lock();
+ old = dma_fence_get_rcu_safe(ptr);
+ rcu_read_unlock();
+
+ if (old) {
+ r = dma_fence_wait(old, false);
+ dma_fence_put(old);
+ if (r)
+ return r;
+ }
+ }
+
/* This function can't be called concurrently anyway, otherwise
* emitting the fence would mess up the hardware ring buffer.
*/
- old = rcu_dereference_protected(*ptr, 1);
- if (old && !dma_fence_is_signaled(old)) {
- DRM_INFO("rcu slot is busy\n");
- dma_fence_wait(old, false);
- }
-
rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
*f = &fence->base;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index d21dd2f369da..d4fcf5475464 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -31,6 +31,7 @@
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_display.h"
+#include "amdgpu_xgmi.h"
void amdgpu_gem_object_free(struct drm_gem_object *gobj)
{
@@ -627,11 +628,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
switch (args->operation) {
case AMDGPU_VA_OP_MAP:
- r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
- args->map_size);
- if (r)
- goto error_backoff;
-
va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
r = amdgpu_vm_bo_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
@@ -647,11 +643,6 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
args->map_size);
break;
case AMDGPU_VA_OP_REPLACE:
- r = amdgpu_vm_alloc_pts(adev, bo_va->base.vm, args->va_address,
- args->map_size);
- if (r)
- goto error_backoff;
-
va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags);
r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address,
args->offset_in_bo, args->map_size,
@@ -678,6 +669,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
struct amdgpu_device *adev = dev->dev_private;
struct drm_amdgpu_gem_op *args = data;
struct drm_gem_object *gobj;
+ struct amdgpu_vm_bo_base *base;
struct amdgpu_bo *robj;
int r;
@@ -716,6 +708,15 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
amdgpu_bo_unreserve(robj);
break;
}
+ for (base = robj->vm_bo; base; base = base->next)
+ if (amdgpu_xgmi_same_hive(amdgpu_ttm_adev(robj->tbo.bdev),
+ amdgpu_ttm_adev(base->vm->root.base.bo->tbo.bdev))) {
+ r = -EINVAL;
+ amdgpu_bo_unreserve(robj);
+ goto out;
+ }
+
+
robj->preferred_domains = args->value & (AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT |
AMDGPU_GEM_DOMAIN_CPU);
@@ -745,17 +746,25 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
struct amdgpu_device *adev = dev->dev_private;
struct drm_gem_object *gobj;
uint32_t handle;
+ u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
u32 domain;
int r;
+ /*
+ * The buffer returned from this function should be cleared, but
+ * it can only be done if the ring is enabled or we'll fail to
+ * create the buffer.
+ */
+ if (adev->mman.buffer_funcs_enabled)
+ flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
+
args->pitch = amdgpu_align_pitch(adev, args->width,
DIV_ROUND_UP(args->bpp, 8), 0);
args->size = (u64)args->pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE);
domain = amdgpu_bo_get_preferred_pin_domain(adev,
amdgpu_display_supported_domains(adev));
- r = amdgpu_gem_object_create(adev, args->size, 0, domain,
- AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
+ r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
ttm_bo_type_device, NULL, &gobj);
if (r)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 97a60da62004..997932ebbb83 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -390,7 +390,7 @@ void amdgpu_gfx_compute_mqd_sw_fini(struct amdgpu_device *adev)
void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
{
- if (!(adev->powerplay.pp_feature & PP_GFXOFF_MASK))
+ if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return;
if (!adev->powerplay.pp_funcs || !adev->powerplay.pp_funcs->set_powergating_by_smu)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index f790e15bcd08..09fc53af3d35 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -258,6 +258,9 @@ struct amdgpu_gfx {
/* pipe reservation */
struct mutex pipe_reserve_mutex;
DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
+
+ /*ras */
+ struct ras_common_if *ras_if;
};
#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index d73367cab4f3..250d9212cc38 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -80,6 +80,33 @@ uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo)
}
/**
+ * amdgpu_gmc_set_pte_pde - update the page tables using CPU
+ *
+ * @adev: amdgpu_device pointer
+ * @cpu_pt_addr: cpu address of the page table
+ * @gpu_page_idx: entry in the page table to update
+ * @addr: dst addr to write into pte/pde
+ * @flags: access flags
+ *
+ * Update the page tables using CPU.
+ */
+int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
+ uint32_t gpu_page_idx, uint64_t addr,
+ uint64_t flags)
+{
+ void __iomem *ptr = (void *)cpu_pt_addr;
+ uint64_t value;
+
+ /*
+ * The following is for PTE only. GART does not have PDEs.
+ */
+ value = addr & 0x0000FFFFFFFFF000ULL;
+ value |= flags;
+ writeq(value, ptr + (gpu_page_idx * 8));
+ return 0;
+}
+
+/**
* amdgpu_gmc_agp_addr - return the address in the AGP address space
*
* @tbo: TTM BO which needs the address, must be in GTT domain
@@ -213,3 +240,58 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc)
dev_info(adev->dev, "AGP: %lluM 0x%016llX - 0x%016llX\n",
mc->agp_size >> 20, mc->agp_start, mc->agp_end);
}
+
+/**
+ * amdgpu_gmc_filter_faults - filter VM faults
+ *
+ * @adev: amdgpu device structure
+ * @addr: address of the VM fault
+ * @pasid: PASID of the process causing the fault
+ * @timestamp: timestamp of the fault
+ *
+ * Returns:
+ * True if the fault was filtered and should not be processed further.
+ * False if the fault is a new one and needs to be handled.
+ */
+bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
+ uint16_t pasid, uint64_t timestamp)
+{
+ struct amdgpu_gmc *gmc = &adev->gmc;
+
+ uint64_t stamp, key = addr << 4 | pasid;
+ struct amdgpu_gmc_fault *fault;
+ uint32_t hash;
+
+ /* If we don't have space left in the ring buffer return immediately */
+ stamp = max(timestamp, AMDGPU_GMC_FAULT_TIMEOUT + 1) -
+ AMDGPU_GMC_FAULT_TIMEOUT;
+ if (gmc->fault_ring[gmc->last_fault].timestamp >= stamp)
+ return true;
+
+ /* Try to find the fault in the hash */
+ hash = hash_64(key, AMDGPU_GMC_FAULT_HASH_ORDER);
+ fault = &gmc->fault_ring[gmc->fault_hash[hash].idx];
+ while (fault->timestamp >= stamp) {
+ uint64_t tmp;
+
+ if (fault->key == key)
+ return true;
+
+ tmp = fault->timestamp;
+ fault = &gmc->fault_ring[fault->next];
+
+ /* Check if the entry was reused */
+ if (fault->timestamp >= tmp)
+ break;
+ }
+
+ /* Add the fault to the ring */
+ fault = &gmc->fault_ring[gmc->last_fault];
+ fault->key = key;
+ fault->timestamp = timestamp;
+
+ /* And update the hash */
+ fault->next = gmc->fault_hash[hash].idx;
+ gmc->fault_hash[hash].idx = gmc->last_fault++;
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 81e6070d255b..071145ac67b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -43,9 +43,35 @@
*/
#define AMDGPU_GMC_HOLE_MASK 0x0000ffffffffffffULL
+/*
+ * Ring size as power of two for the log of recent faults.
+ */
+#define AMDGPU_GMC_FAULT_RING_ORDER 8
+#define AMDGPU_GMC_FAULT_RING_SIZE (1 << AMDGPU_GMC_FAULT_RING_ORDER)
+
+/*
+ * Hash size as power of two for the log of recent faults
+ */
+#define AMDGPU_GMC_FAULT_HASH_ORDER 8
+#define AMDGPU_GMC_FAULT_HASH_SIZE (1 << AMDGPU_GMC_FAULT_HASH_ORDER)
+
+/*
+ * Number of IH timestamp ticks until a fault is considered handled
+ */
+#define AMDGPU_GMC_FAULT_TIMEOUT 5000ULL
+
struct firmware;
/*
+ * GMC page fault information
+ */
+struct amdgpu_gmc_fault {
+ uint64_t timestamp;
+ uint64_t next:AMDGPU_GMC_FAULT_RING_ORDER;
+ uint64_t key:52;
+};
+
+/*
* VMHUB structures, functions & helpers
*/
struct amdgpu_vmhub {
@@ -71,12 +97,6 @@ struct amdgpu_gmc_funcs {
/* Change the VMID -> PASID mapping */
void (*emit_pasid_mapping)(struct amdgpu_ring *ring, unsigned vmid,
unsigned pasid);
- /* write pte/pde updates using the cpu */
- int (*set_pte_pde)(struct amdgpu_device *adev,
- void *cpu_pt_addr, /* cpu addr of page table */
- uint32_t gpu_page_idx, /* pte/pde to update */
- uint64_t addr, /* addr to write into pte/pde */
- uint64_t flags); /* access flags */
/* enable/disable PRT support */
void (*set_prt)(struct amdgpu_device *adev, bool enable);
/* set pte flags based per asic */
@@ -147,15 +167,22 @@ struct amdgpu_gmc {
struct kfd_vm_fault_info *vm_fault_info;
atomic_t vm_fault_info_updated;
+ struct amdgpu_gmc_fault fault_ring[AMDGPU_GMC_FAULT_RING_SIZE];
+ struct {
+ uint64_t idx:AMDGPU_GMC_FAULT_RING_ORDER;
+ } fault_hash[AMDGPU_GMC_FAULT_HASH_SIZE];
+ uint64_t last_fault:AMDGPU_GMC_FAULT_RING_ORDER;
+
const struct amdgpu_gmc_funcs *gmc_funcs;
struct amdgpu_xgmi xgmi;
+ struct amdgpu_irq_src ecc_irq;
+ struct ras_common_if *ras_if;
};
#define amdgpu_gmc_flush_gpu_tlb(adev, vmid, type) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (type))
#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
-#define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
#define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags))
@@ -189,6 +216,9 @@ static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr)
void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
uint64_t *addr, uint64_t *flags);
+int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
+ uint32_t gpu_page_idx, uint64_t addr,
+ uint64_t flags);
uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo);
uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo);
void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
@@ -197,5 +227,7 @@ void amdgpu_gmc_gart_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc);
void amdgpu_gmc_agp_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc);
+bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr,
+ uint16_t pasid, uint64_t timestamp);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index da7b1b92d9cf..62591d081856 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -37,6 +37,47 @@ struct amdgpu_gtt_node {
};
/**
+ * DOC: mem_info_gtt_total
+ *
+ * The amdgpu driver provides a sysfs API for reporting current total size of
+ * the GTT.
+ * The file mem_info_gtt_total is used for this, and returns the total size of
+ * the GTT block, in bytes
+ */
+static ssize_t amdgpu_mem_info_gtt_total_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
+ (adev->mman.bdev.man[TTM_PL_TT].size) * PAGE_SIZE);
+}
+
+/**
+ * DOC: mem_info_gtt_used
+ *
+ * The amdgpu driver provides a sysfs API for reporting current total amount of
+ * used GTT.
+ * The file mem_info_gtt_used is used for this, and returns the current used
+ * size of the GTT block, in bytes
+ */
+static ssize_t amdgpu_mem_info_gtt_used_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
+ amdgpu_gtt_mgr_usage(&adev->mman.bdev.man[TTM_PL_TT]));
+}
+
+static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO,
+ amdgpu_mem_info_gtt_total_show, NULL);
+static DEVICE_ATTR(mem_info_gtt_used, S_IRUGO,
+ amdgpu_mem_info_gtt_used_show, NULL);
+
+/**
* amdgpu_gtt_mgr_init - init GTT manager and DRM MM
*
* @man: TTM memory type manager
@@ -50,6 +91,7 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
struct amdgpu_gtt_mgr *mgr;
uint64_t start, size;
+ int ret;
mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
if (!mgr)
@@ -61,6 +103,18 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
spin_lock_init(&mgr->lock);
atomic64_set(&mgr->available, p_size);
man->priv = mgr;
+
+ ret = device_create_file(adev->dev, &dev_attr_mem_info_gtt_total);
+ if (ret) {
+ DRM_ERROR("Failed to create device file mem_info_gtt_total\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_mem_info_gtt_used);
+ if (ret) {
+ DRM_ERROR("Failed to create device file mem_info_gtt_used\n");
+ return ret;
+ }
+
return 0;
}
@@ -74,12 +128,17 @@ static int amdgpu_gtt_mgr_init(struct ttm_mem_type_manager *man,
*/
static int amdgpu_gtt_mgr_fini(struct ttm_mem_type_manager *man)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
struct amdgpu_gtt_mgr *mgr = man->priv;
spin_lock(&mgr->lock);
drm_mm_takedown(&mgr->mm);
spin_unlock(&mgr->lock);
kfree(mgr);
man->priv = NULL;
+
+ device_remove_file(adev->dev, &dev_attr_mem_info_gtt_total);
+ device_remove_file(adev->dev, &dev_attr_mem_info_gtt_used);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 1c50be3ab8a9..934dfdcb4e73 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -142,6 +142,7 @@ void amdgpu_ih_ring_fini(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
*/
int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
{
+ unsigned int count = AMDGPU_IH_MAX_NUM_IVS;
u32 wptr;
if (!ih->enabled || adev->shutdown)
@@ -159,7 +160,7 @@ restart_ih:
/* Order reading of wptr vs. reading of IH ring data */
rmb();
- while (ih->rptr != wptr) {
+ while (ih->rptr != wptr && --count) {
amdgpu_irq_dispatch(adev, ih);
ih->rptr &= ih->ptr_mask;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
index 113a1ba13d4a..4e0bb645176d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
@@ -24,6 +24,9 @@
#ifndef __AMDGPU_IH_H__
#define __AMDGPU_IH_H__
+/* Maximum number of IVs processed at once */
+#define AMDGPU_IH_MAX_NUM_IVS 32
+
struct amdgpu_device;
struct amdgpu_iv_entry;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index e860412043bb..b17d0545728e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -39,6 +39,7 @@
#include "amdgpu_amdkfd.h"
#include "amdgpu_gem.h"
#include "amdgpu_display.h"
+#include "amdgpu_ras.h"
static void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev)
{
@@ -296,6 +297,17 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
fw_info->ver = adev->pm.fw_version;
fw_info->feature = 0;
break;
+ case AMDGPU_INFO_FW_TA:
+ if (query_fw->index > 1)
+ return -EINVAL;
+ if (query_fw->index == 0) {
+ fw_info->ver = adev->psp.ta_fw_version;
+ fw_info->feature = adev->psp.ta_xgmi_ucode_version;
+ } else {
+ fw_info->ver = adev->psp.ta_fw_version;
+ fw_info->feature = adev->psp.ta_ras_ucode_version;
+ }
+ break;
case AMDGPU_INFO_FW_SDMA:
if (query_fw->index >= adev->sdma.num_instances)
return -EINVAL;
@@ -684,6 +696,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
if (adev->pm.dpm_enabled) {
dev_info.max_engine_clock = amdgpu_dpm_get_sclk(adev, false) * 10;
dev_info.max_memory_clock = amdgpu_dpm_get_mclk(adev, false) * 10;
+ } else if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
+ adev->virt.ops->get_pp_clk) {
+ dev_info.max_engine_clock = amdgpu_virt_get_sclk(adev, false) * 10;
+ dev_info.max_memory_clock = amdgpu_virt_get_mclk(adev, false) * 10;
} else {
dev_info.max_engine_clock = adev->clock.default_sclk * 10;
dev_info.max_memory_clock = adev->clock.default_mclk * 10;
@@ -909,6 +925,18 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
case AMDGPU_INFO_VRAM_LOST_COUNTER:
ui32 = atomic_read(&adev->vram_lost_counter);
return copy_to_user(out, &ui32, min(size, 4u)) ? -EFAULT : 0;
+ case AMDGPU_INFO_RAS_ENABLED_FEATURES: {
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+ uint64_t ras_mask;
+
+ if (!ras)
+ return -EINVAL;
+ ras_mask = (uint64_t)ras->supported << 32 | ras->features;
+
+ return copy_to_user(out, &ras_mask,
+ min_t(u64, size, sizeof(ras_mask))) ?
+ -EFAULT : 0;
+ }
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->query);
return -EINVAL;
@@ -1328,6 +1356,16 @@ static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
seq_printf(m, "ASD feature version: %u, firmware version: 0x%08x\n",
fw_info.feature, fw_info.ver);
+ query_fw.fw_type = AMDGPU_INFO_FW_TA;
+ for (i = 0; i < 2; i++) {
+ query_fw.index = i;
+ ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
+ if (ret)
+ continue;
+ seq_printf(m, "TA %s feature version: %u, firmware version: 0x%08x\n",
+ i ? "RAS" : "XGMI", fw_info.feature, fw_info.ver);
+ }
+
/* SMC */
query_fw.fw_type = AMDGPU_INFO_FW_SMC;
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 889e443eeee7..2e9e3db778c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -58,7 +58,7 @@ struct amdgpu_hpd;
#define to_amdgpu_encoder(x) container_of(x, struct amdgpu_encoder, base)
#define to_amdgpu_framebuffer(x) container_of(x, struct amdgpu_framebuffer, base)
-#define to_dm_plane_state(x) container_of(x, struct dm_plane_state, base);
+#define to_dm_plane_state(x) container_of(x, struct dm_plane_state, base)
#define AMDGPU_MAX_HPD_PINS 6
#define AMDGPU_MAX_CRTCS 6
@@ -406,7 +406,7 @@ struct amdgpu_crtc {
struct amdgpu_flip_work *pflip_works;
enum amdgpu_flip_status pflip_status;
int deferred_flip_completion;
- u64 last_flip_vblank;
+ u32 last_flip_vblank;
/* pll sharing */
struct amdgpu_atom_ss ss;
bool ss_enabled;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index ec9e45004bff..93b2c5a48a71 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -88,12 +88,14 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
if (bo->gem_base.import_attach)
drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
drm_gem_object_release(&bo->gem_base);
- amdgpu_bo_unref(&bo->parent);
+ /* in case amdgpu_device_recover_vram got NULL of bo->parent */
if (!list_empty(&bo->shadow_list)) {
mutex_lock(&adev->shadow_list_lock);
list_del_init(&bo->shadow_list);
mutex_unlock(&adev->shadow_list_lock);
}
+ amdgpu_bo_unref(&bo->parent);
+
kfree(bo->metadata);
kfree(bo);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 220a6a7b1bc1..c430e8259038 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -72,6 +72,8 @@ struct amdgpu_bo_va {
/* If the mappings are cleared or filled */
bool cleared;
+
+ bool is_xgmi;
};
struct amdgpu_bo {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index a7adb7b6bd98..95144e49c7f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -28,6 +28,7 @@
#include "amdgpu_pm.h"
#include "amdgpu_dpm.h"
#include "amdgpu_display.h"
+#include "amdgpu_smu.h"
#include "atom.h"
#include <linux/power_supply.h>
#include <linux/hwmon.h>
@@ -80,6 +81,27 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
}
}
+int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
+ void *data, uint32_t *size)
+{
+ int ret = 0;
+
+ if (!data || !size)
+ return -EINVAL;
+
+ if (is_support_sw_smu(adev))
+ ret = smu_read_sensor(&adev->smu, sensor, data, size);
+ else {
+ if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
+ ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle,
+ sensor, data, size);
+ else
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
/**
* DOC: power_dpm_state
*
@@ -122,7 +144,9 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
enum amd_pm_state_type pm;
- if (adev->powerplay.pp_funcs->get_current_power_state)
+ if (is_support_sw_smu(adev) && adev->smu.ppt_funcs->get_current_power_state)
+ pm = amdgpu_smu_get_current_power_state(adev);
+ else if (adev->powerplay.pp_funcs->get_current_power_state)
pm = amdgpu_dpm_get_current_power_state(adev);
else
pm = adev->pm.dpm.user_state;
@@ -240,7 +264,9 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return snprintf(buf, PAGE_SIZE, "off\n");
- if (adev->powerplay.pp_funcs->get_performance_level)
+ if (is_support_sw_smu(adev))
+ level = smu_get_performance_level(&adev->smu);
+ else if (adev->powerplay.pp_funcs->get_performance_level)
level = amdgpu_dpm_get_performance_level(adev);
else
level = adev->pm.dpm.forced_level;
@@ -273,7 +299,9 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- if (adev->powerplay.pp_funcs->get_performance_level)
+ if (is_support_sw_smu(adev))
+ current_level = smu_get_performance_level(&adev->smu);
+ else if (adev->powerplay.pp_funcs->get_performance_level)
current_level = amdgpu_dpm_get_performance_level(adev);
if (strncmp("low", buf, strlen("low")) == 0) {
@@ -299,10 +327,35 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
goto fail;
}
+ if (amdgpu_sriov_vf(adev)) {
+ if (amdgim_is_hwperf(adev) &&
+ adev->virt.ops->force_dpm_level) {
+ mutex_lock(&adev->pm.mutex);
+ adev->virt.ops->force_dpm_level(adev, level);
+ mutex_unlock(&adev->pm.mutex);
+ return count;
+ } else {
+ return -EINVAL;
+ }
+ }
+
if (current_level == level)
return count;
- if (adev->powerplay.pp_funcs->force_performance_level) {
+ if (is_support_sw_smu(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ if (adev->pm.dpm.thermal_active) {
+ count = -EINVAL;
+ mutex_unlock(&adev->pm.mutex);
+ goto fail;
+ }
+ ret = smu_force_performance_level(&adev->smu, level);
+ if (ret)
+ count = -EINVAL;
+ else
+ adev->pm.dpm.forced_level = level;
+ mutex_unlock(&adev->pm.mutex);
+ } else if (adev->powerplay.pp_funcs->force_performance_level) {
mutex_lock(&adev->pm.mutex);
if (adev->pm.dpm.thermal_active) {
count = -EINVAL;
@@ -328,9 +381,13 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
struct pp_states_info data;
- int i, buf_len;
+ int i, buf_len, ret;
- if (adev->powerplay.pp_funcs->get_pp_num_states)
+ if (is_support_sw_smu(adev)) {
+ ret = smu_get_power_num_states(&adev->smu, &data);
+ if (ret)
+ return ret;
+ } else if (adev->powerplay.pp_funcs->get_pp_num_states)
amdgpu_dpm_get_pp_num_states(adev, &data);
buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
@@ -351,23 +408,29 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
struct pp_states_info data;
+ struct smu_context *smu = &adev->smu;
enum amd_pm_state_type pm = 0;
- int i = 0;
+ int i = 0, ret = 0;
- if (adev->powerplay.pp_funcs->get_current_power_state
+ if (is_support_sw_smu(adev)) {
+ pm = smu_get_current_power_state(smu);
+ ret = smu_get_power_num_states(smu, &data);
+ if (ret)
+ return ret;
+ } else if (adev->powerplay.pp_funcs->get_current_power_state
&& adev->powerplay.pp_funcs->get_pp_num_states) {
pm = amdgpu_dpm_get_current_power_state(adev);
amdgpu_dpm_get_pp_num_states(adev, &data);
+ }
- for (i = 0; i < data.nums; i++) {
- if (pm == data.states[i])
- break;
- }
-
- if (i == data.nums)
- i = -EINVAL;
+ for (i = 0; i < data.nums; i++) {
+ if (pm == data.states[i])
+ break;
}
+ if (i == data.nums)
+ i = -EINVAL;
+
return snprintf(buf, PAGE_SIZE, "%d\n", i);
}
@@ -397,6 +460,8 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
if (strlen(buf) == 1)
adev->pp_force_state_enabled = false;
+ else if (is_support_sw_smu(adev))
+ adev->pp_force_state_enabled = false;
else if (adev->powerplay.pp_funcs->dispatch_tasks &&
adev->powerplay.pp_funcs->get_pp_num_states) {
struct pp_states_info data;
@@ -442,7 +507,12 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
char *table = NULL;
int size;
- if (adev->powerplay.pp_funcs->get_pp_table)
+ if (is_support_sw_smu(adev)) {
+ size = smu_sys_get_pp_table(&adev->smu, (void **)&table);
+ if (size < 0)
+ return size;
+ }
+ else if (adev->powerplay.pp_funcs->get_pp_table)
size = amdgpu_dpm_get_pp_table(adev, &table);
else
return 0;
@@ -462,8 +532,13 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ int ret = 0;
- if (adev->powerplay.pp_funcs->set_pp_table)
+ if (is_support_sw_smu(adev)) {
+ ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count);
+ if (ret)
+ return ret;
+ } else if (adev->powerplay.pp_funcs->set_pp_table)
amdgpu_dpm_set_pp_table(adev, buf, count);
return count;
@@ -586,19 +661,29 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
tmp_str++;
}
- if (adev->powerplay.pp_funcs->odn_edit_dpm_table)
- ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
- parameter, parameter_size);
+ if (is_support_sw_smu(adev)) {
+ ret = smu_od_edit_dpm_table(&adev->smu, type,
+ parameter, parameter_size);
- if (ret)
- return -EINVAL;
+ if (ret)
+ return -EINVAL;
+ } else {
+ if (adev->powerplay.pp_funcs->odn_edit_dpm_table)
+ ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
+ parameter, parameter_size);
- if (type == PP_OD_COMMIT_DPM_TABLE) {
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
- return count;
- } else {
+ if (ret)
return -EINVAL;
+
+ if (type == PP_OD_COMMIT_DPM_TABLE) {
+ if (adev->powerplay.pp_funcs->dispatch_tasks) {
+ amdgpu_dpm_dispatch_task(adev,
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ NULL);
+ return count;
+ } else {
+ return -EINVAL;
+ }
}
}
@@ -613,7 +698,13 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
uint32_t size = 0;
- if (adev->powerplay.pp_funcs->print_clock_levels) {
+ if (is_support_sw_smu(adev)) {
+ size = smu_print_clk_levels(&adev->smu, OD_SCLK, buf);
+ size += smu_print_clk_levels(&adev->smu, OD_MCLK, buf+size);
+ size += smu_print_clk_levels(&adev->smu, OD_VDDC_CURVE, buf+size);
+ size += smu_print_clk_levels(&adev->smu, OD_RANGE, buf+size);
+ return size;
+ } else if (adev->powerplay.pp_funcs->print_clock_levels) {
size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
@@ -711,7 +802,13 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- if (adev->powerplay.pp_funcs->print_clock_levels)
+ if (amdgpu_sriov_vf(adev) && amdgim_is_hwperf(adev) &&
+ adev->virt.ops->get_pp_clk)
+ return adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
+
+ if (is_support_sw_smu(adev))
+ return smu_print_clk_levels(&adev->smu, PP_SCLK, buf);
+ else if (adev->powerplay.pp_funcs->print_clock_levels)
return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
else
return snprintf(buf, PAGE_SIZE, "\n");
@@ -767,7 +864,9 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
if (ret)
return ret;
- if (adev->powerplay.pp_funcs->force_clock_level)
+ if (is_support_sw_smu(adev))
+ ret = smu_force_clk_levels(&adev->smu, PP_SCLK, mask);
+ else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
if (ret)
@@ -783,7 +882,9 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- if (adev->powerplay.pp_funcs->print_clock_levels)
+ if (is_support_sw_smu(adev))
+ return smu_print_clk_levels(&adev->smu, PP_MCLK, buf);
+ else if (adev->powerplay.pp_funcs->print_clock_levels)
return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
else
return snprintf(buf, PAGE_SIZE, "\n");
@@ -803,7 +904,9 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
if (ret)
return ret;
- if (adev->powerplay.pp_funcs->force_clock_level)
+ if (is_support_sw_smu(adev))
+ ret = smu_force_clk_levels(&adev->smu, PP_MCLK, mask);
+ else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
if (ret)
@@ -819,7 +922,9 @@ static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- if (adev->powerplay.pp_funcs->print_clock_levels)
+ if (is_support_sw_smu(adev))
+ return smu_print_clk_levels(&adev->smu, PP_SOCCLK, buf);
+ else if (adev->powerplay.pp_funcs->print_clock_levels)
return amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf);
else
return snprintf(buf, PAGE_SIZE, "\n");
@@ -839,7 +944,9 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev,
if (ret)
return ret;
- if (adev->powerplay.pp_funcs->force_clock_level)
+ if (is_support_sw_smu(adev))
+ ret = smu_force_clk_levels(&adev->smu, PP_SOCCLK, mask);
+ else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask);
if (ret)
@@ -855,7 +962,9 @@ static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- if (adev->powerplay.pp_funcs->print_clock_levels)
+ if (is_support_sw_smu(adev))
+ return smu_print_clk_levels(&adev->smu, PP_FCLK, buf);
+ else if (adev->powerplay.pp_funcs->print_clock_levels)
return amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf);
else
return snprintf(buf, PAGE_SIZE, "\n");
@@ -875,7 +984,9 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev,
if (ret)
return ret;
- if (adev->powerplay.pp_funcs->force_clock_level)
+ if (is_support_sw_smu(adev))
+ ret = smu_force_clk_levels(&adev->smu, PP_FCLK, mask);
+ else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask);
if (ret)
@@ -891,7 +1002,9 @@ static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- if (adev->powerplay.pp_funcs->print_clock_levels)
+ if (is_support_sw_smu(adev))
+ return smu_print_clk_levels(&adev->smu, PP_DCEFCLK, buf);
+ else if (adev->powerplay.pp_funcs->print_clock_levels)
return amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf);
else
return snprintf(buf, PAGE_SIZE, "\n");
@@ -911,7 +1024,9 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev,
if (ret)
return ret;
- if (adev->powerplay.pp_funcs->force_clock_level)
+ if (is_support_sw_smu(adev))
+ ret = smu_force_clk_levels(&adev->smu, PP_DCEFCLK, mask);
+ else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask);
if (ret)
@@ -927,7 +1042,9 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- if (adev->powerplay.pp_funcs->print_clock_levels)
+ if (is_support_sw_smu(adev))
+ return smu_print_clk_levels(&adev->smu, PP_PCIE, buf);
+ else if (adev->powerplay.pp_funcs->print_clock_levels)
return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
else
return snprintf(buf, PAGE_SIZE, "\n");
@@ -947,7 +1064,9 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
if (ret)
return ret;
- if (adev->powerplay.pp_funcs->force_clock_level)
+ if (is_support_sw_smu(adev))
+ ret = smu_force_clk_levels(&adev->smu, PP_PCIE, mask);
+ else if (adev->powerplay.pp_funcs->force_clock_level)
ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
if (ret)
@@ -964,7 +1083,9 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
uint32_t value = 0;
- if (adev->powerplay.pp_funcs->get_sclk_od)
+ if (is_support_sw_smu(adev))
+ value = smu_get_od_percentage(&(adev->smu), OD_SCLK);
+ else if (adev->powerplay.pp_funcs->get_sclk_od)
value = amdgpu_dpm_get_sclk_od(adev);
return snprintf(buf, PAGE_SIZE, "%d\n", value);
@@ -986,14 +1107,19 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
count = -EINVAL;
goto fail;
}
- if (adev->powerplay.pp_funcs->set_sclk_od)
- amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
+ if (is_support_sw_smu(adev)) {
+ value = smu_set_od_percentage(&(adev->smu), OD_SCLK, (uint32_t)value);
} else {
- adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
- amdgpu_pm_compute_clocks(adev);
+ if (adev->powerplay.pp_funcs->set_sclk_od)
+ amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
+
+ if (adev->powerplay.pp_funcs->dispatch_tasks) {
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
+ } else {
+ adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
+ amdgpu_pm_compute_clocks(adev);
+ }
}
fail:
@@ -1008,7 +1134,9 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
uint32_t value = 0;
- if (adev->powerplay.pp_funcs->get_mclk_od)
+ if (is_support_sw_smu(adev))
+ value = smu_get_od_percentage(&(adev->smu), OD_MCLK);
+ else if (adev->powerplay.pp_funcs->get_mclk_od)
value = amdgpu_dpm_get_mclk_od(adev);
return snprintf(buf, PAGE_SIZE, "%d\n", value);
@@ -1030,14 +1158,19 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
count = -EINVAL;
goto fail;
}
- if (adev->powerplay.pp_funcs->set_mclk_od)
- amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
+ if (is_support_sw_smu(adev)) {
+ value = smu_set_od_percentage(&(adev->smu), OD_MCLK, (uint32_t)value);
} else {
- adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
- amdgpu_pm_compute_clocks(adev);
+ if (adev->powerplay.pp_funcs->set_mclk_od)
+ amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
+
+ if (adev->powerplay.pp_funcs->dispatch_tasks) {
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
+ } else {
+ adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
+ amdgpu_pm_compute_clocks(adev);
+ }
}
fail:
@@ -1071,7 +1204,9 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
- if (adev->powerplay.pp_funcs->get_power_profile_mode)
+ if (is_support_sw_smu(adev))
+ return smu_get_power_profile_mode(&adev->smu, buf);
+ else if (adev->powerplay.pp_funcs->get_power_profile_mode)
return amdgpu_dpm_get_power_profile_mode(adev, buf);
return snprintf(buf, PAGE_SIZE, "\n");
@@ -1121,9 +1256,10 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
}
}
parameter[parameter_size] = profile_mode;
- if (adev->powerplay.pp_funcs->set_power_profile_mode)
+ if (is_support_sw_smu(adev))
+ ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size);
+ else if (adev->powerplay.pp_funcs->set_power_profile_mode)
ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
-
if (!ret)
return count;
fail:
@@ -1146,14 +1282,10 @@ static ssize_t amdgpu_get_busy_percent(struct device *dev,
struct amdgpu_device *adev = ddev->dev_private;
int r, value, size = sizeof(value);
- /* sanity check PP is enabled */
- if (!(adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->read_sensor))
- return -EINVAL;
-
/* read the IP busy sensor */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD,
(void *)&value, &size);
+
if (r)
return r;
@@ -1247,11 +1379,6 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- /* sanity check PP is enabled */
- if (!(adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->read_sensor))
- return -EINVAL;
-
/* get the temperature */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
(void *)&temp, &size);
@@ -1283,11 +1410,14 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
u32 pwm_mode = 0;
+ if (is_support_sw_smu(adev)) {
+ pwm_mode = smu_get_fan_control_mode(&adev->smu);
+ } else {
+ if (!adev->powerplay.pp_funcs->get_fan_control_mode)
+ return -EINVAL;
- if (!adev->powerplay.pp_funcs->get_fan_control_mode)
- return -EINVAL;
-
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+ pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+ }
return sprintf(buf, "%i\n", pwm_mode);
}
@@ -1306,14 +1436,22 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- if (!adev->powerplay.pp_funcs->set_fan_control_mode)
- return -EINVAL;
+ if (is_support_sw_smu(adev)) {
+ err = kstrtoint(buf, 10, &value);
+ if (err)
+ return err;
- err = kstrtoint(buf, 10, &value);
- if (err)
- return err;
+ smu_set_fan_control_mode(&adev->smu, value);
+ } else {
+ if (!adev->powerplay.pp_funcs->set_fan_control_mode)
+ return -EINVAL;
+
+ err = kstrtoint(buf, 10, &value);
+ if (err)
+ return err;
- amdgpu_dpm_set_fan_control_mode(adev, value);
+ amdgpu_dpm_set_fan_control_mode(adev, value);
+ }
return count;
}
@@ -1345,8 +1483,10 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
if ((adev->flags & AMD_IS_PX) &&
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
-
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+ if (is_support_sw_smu(adev))
+ pwm_mode = smu_get_fan_control_mode(&adev->smu);
+ else
+ pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
pr_info("manual fan speed control should be enabled first\n");
return -EINVAL;
@@ -1358,7 +1498,11 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
value = (value * 100) / 255;
- if (adev->powerplay.pp_funcs->set_fan_speed_percent) {
+ if (is_support_sw_smu(adev)) {
+ err = smu_set_fan_speed_percent(&adev->smu, value);
+ if (err)
+ return err;
+ } else if (adev->powerplay.pp_funcs->set_fan_speed_percent) {
err = amdgpu_dpm_set_fan_speed_percent(adev, value);
if (err)
return err;
@@ -1380,7 +1524,11 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- if (adev->powerplay.pp_funcs->get_fan_speed_percent) {
+ if (is_support_sw_smu(adev)) {
+ err = smu_get_fan_speed_percent(&adev->smu, &speed);
+ if (err)
+ return err;
+ } else if (adev->powerplay.pp_funcs->get_fan_speed_percent) {
err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
if (err)
return err;
@@ -1404,7 +1552,11 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
+ if (is_support_sw_smu(adev)) {
+ err = smu_get_current_rpm(&adev->smu, &speed);
+ if (err)
+ return err;
+ } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
if (err)
return err;
@@ -1422,9 +1574,6 @@ static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev,
u32 size = sizeof(min_rpm);
int r;
- if (!adev->powerplay.pp_funcs->read_sensor)
- return -EINVAL;
-
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM,
(void *)&min_rpm, &size);
if (r)
@@ -1442,9 +1591,6 @@ static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev,
u32 size = sizeof(max_rpm);
int r;
- if (!adev->powerplay.pp_funcs->read_sensor)
- return -EINVAL;
-
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM,
(void *)&max_rpm, &size);
if (r)
@@ -1466,7 +1612,11 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
+ if (is_support_sw_smu(adev)) {
+ err = smu_get_current_rpm(&adev->smu, &rpm);
+ if (err)
+ return err;
+ } else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
if (err)
return err;
@@ -1484,7 +1634,11 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
u32 value;
u32 pwm_mode;
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+ if (is_support_sw_smu(adev))
+ pwm_mode = smu_get_fan_control_mode(&adev->smu);
+ else
+ pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+
if (pwm_mode != AMD_FAN_CTRL_MANUAL)
return -ENODATA;
@@ -1497,7 +1651,11 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
if (err)
return err;
- if (adev->powerplay.pp_funcs->set_fan_speed_rpm) {
+ if (is_support_sw_smu(adev)) {
+ err = smu_set_fan_speed_rpm(&adev->smu, value);
+ if (err)
+ return err;
+ } else if (adev->powerplay.pp_funcs->set_fan_speed_rpm) {
err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
if (err)
return err;
@@ -1513,11 +1671,14 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
struct amdgpu_device *adev = dev_get_drvdata(dev);
u32 pwm_mode = 0;
- if (!adev->powerplay.pp_funcs->get_fan_control_mode)
- return -EINVAL;
-
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+ if (is_support_sw_smu(adev)) {
+ pwm_mode = smu_get_fan_control_mode(&adev->smu);
+ } else {
+ if (!adev->powerplay.pp_funcs->get_fan_control_mode)
+ return -EINVAL;
+ pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+ }
return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
}
@@ -1536,8 +1697,6 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
(adev->ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- if (!adev->powerplay.pp_funcs->set_fan_control_mode)
- return -EINVAL;
err = kstrtoint(buf, 10, &value);
if (err)
@@ -1550,7 +1709,13 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
else
return -EINVAL;
- amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
+ if (is_support_sw_smu(adev)) {
+ smu_set_fan_control_mode(&adev->smu, pwm_mode);
+ } else {
+ if (!adev->powerplay.pp_funcs->set_fan_control_mode)
+ return -EINVAL;
+ amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
+ }
return count;
}
@@ -1569,11 +1734,6 @@ static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev,
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- /* sanity check PP is enabled */
- if (!(adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->read_sensor))
- return -EINVAL;
-
/* get the voltage */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX,
(void *)&vddgfx, &size);
@@ -1608,11 +1768,6 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- /* sanity check PP is enabled */
- if (!(adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->read_sensor))
- return -EINVAL;
-
/* get the voltage */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB,
(void *)&vddnb, &size);
@@ -1644,11 +1799,6 @@ static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev,
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
return -EINVAL;
- /* sanity check PP is enabled */
- if (!(adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->read_sensor))
- return -EINVAL;
-
/* get the voltage */
r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER,
(void *)&query, &size);
@@ -1675,7 +1825,10 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev,
struct amdgpu_device *adev = dev_get_drvdata(dev);
uint32_t limit = 0;
- if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
+ if (is_support_sw_smu(adev)) {
+ smu_get_power_limit(&adev->smu, &limit, true);
+ return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+ } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true);
return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else {
@@ -1690,7 +1843,10 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev,
struct amdgpu_device *adev = dev_get_drvdata(dev);
uint32_t limit = 0;
- if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
+ if (is_support_sw_smu(adev)) {
+ smu_get_power_limit(&adev->smu, &limit, false);
+ return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
+ } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) {
adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false);
return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000);
} else {
@@ -1713,7 +1869,9 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
return err;
value = value / 1000000; /* convert to Watt */
- if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) {
+ if (is_support_sw_smu(adev)) {
+ adev->smu.funcs->set_power_limit(&adev->smu, value);
+ } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) {
err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
if (err)
return err;
@@ -1967,18 +2125,20 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
return 0;
- /* mask fan attributes if we have no bindings for this asic to expose */
- if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
- attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
- (!adev->powerplay.pp_funcs->get_fan_control_mode &&
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
- effective_mode &= ~S_IRUGO;
+ if (!is_support_sw_smu(adev)) {
+ /* mask fan attributes if we have no bindings for this asic to expose */
+ if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
+ attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
+ (!adev->powerplay.pp_funcs->get_fan_control_mode &&
+ attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
+ effective_mode &= ~S_IRUGO;
- if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
- attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
- (!adev->powerplay.pp_funcs->set_fan_control_mode &&
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
- effective_mode &= ~S_IWUSR;
+ if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
+ attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
+ (!adev->powerplay.pp_funcs->set_fan_control_mode &&
+ attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
+ effective_mode &= ~S_IWUSR;
+ }
if ((adev->flags & AMD_IS_APU) &&
(attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
@@ -1987,20 +2147,22 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
return 0;
- /* hide max/min values if we can't both query and manage the fan */
- if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
- !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
- (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
- !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
- (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
- return 0;
+ if (!is_support_sw_smu(adev)) {
+ /* hide max/min values if we can't both query and manage the fan */
+ if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
+ !adev->powerplay.pp_funcs->get_fan_speed_percent) &&
+ (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
+ !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
+ (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
+ return 0;
- if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
- !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
- (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
- return 0;
+ if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
+ !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
+ (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
+ return 0;
+ }
/* only APUs have vddnb */
if (!(adev->flags & AMD_IS_APU) &&
@@ -2039,9 +2201,7 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
if (!adev->pm.dpm_enabled)
return;
- if (adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->read_sensor &&
- !amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
+ if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
(void *)&temp, &size)) {
if (temp < adev->pm.dpm.thermal.min_temp)
/* switch back the user state */
@@ -2267,7 +2427,13 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
{
- if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
+ int ret = 0;
+ if (is_support_sw_smu(adev)) {
+ ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_UVD, enable);
+ if (ret)
+ DRM_ERROR("[SW SMU]: dpm enable uvd failed, state = %s, ret = %d. \n",
+ enable ? "true" : "false", ret);
+ } else if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
/* enable/disable UVD */
mutex_lock(&adev->pm.mutex);
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
@@ -2288,7 +2454,13 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
{
- if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
+ int ret = 0;
+ if (is_support_sw_smu(adev)) {
+ ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_VCE, enable);
+ if (ret)
+ DRM_ERROR("[SW SMU]: dpm enable vce failed, state = %s, ret = %d. \n",
+ enable ? "true" : "false", ret);
+ } else if (adev->powerplay.pp_funcs->set_powergating_by_smu) {
/* enable/disable VCE */
mutex_lock(&adev->pm.mutex);
amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
@@ -2413,7 +2585,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
"pp_power_profile_mode\n");
return ret;
}
- if (hwmgr->od_enabled) {
+ if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
+ (!is_support_sw_smu(adev) && hwmgr->od_enabled)) {
ret = device_create_file(adev->dev,
&dev_attr_pp_od_clk_voltage);
if (ret) {
@@ -2489,7 +2662,8 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
device_remove_file(adev->dev,
&dev_attr_pp_power_profile_mode);
- if (hwmgr->od_enabled)
+ if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
+ (!is_support_sw_smu(adev) && hwmgr->od_enabled))
device_remove_file(adev->dev,
&dev_attr_pp_od_clk_voltage);
device_remove_file(adev->dev, &dev_attr_gpu_busy_percent);
@@ -2516,28 +2690,38 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
amdgpu_fence_wait_empty(ring);
}
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
- if (!amdgpu_device_has_dc_support(adev)) {
+ if (is_support_sw_smu(adev)) {
+ struct smu_context *smu = &adev->smu;
+ struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm;
+ mutex_lock(&(smu->mutex));
+ smu_handle_task(&adev->smu,
+ smu_dpm->dpm_level,
+ AMD_PP_TASK_DISPLAY_CONFIG_CHANGE);
+ mutex_unlock(&(smu->mutex));
+ } else {
+ if (adev->powerplay.pp_funcs->dispatch_tasks) {
+ if (!amdgpu_device_has_dc_support(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ amdgpu_dpm_get_active_displays(adev);
+ adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
+ adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
+ adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
+ /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
+ if (adev->pm.pm_display_cfg.vrefresh > 120)
+ adev->pm.pm_display_cfg.min_vblank_time = 0;
+ if (adev->powerplay.pp_funcs->display_configuration_change)
+ adev->powerplay.pp_funcs->display_configuration_change(
+ adev->powerplay.pp_handle,
+ &adev->pm.pm_display_cfg);
+ mutex_unlock(&adev->pm.mutex);
+ }
+ amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
+ } else {
mutex_lock(&adev->pm.mutex);
amdgpu_dpm_get_active_displays(adev);
- adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
- adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
- adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
- /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
- if (adev->pm.pm_display_cfg.vrefresh > 120)
- adev->pm.pm_display_cfg.min_vblank_time = 0;
- if (adev->powerplay.pp_funcs->display_configuration_change)
- adev->powerplay.pp_funcs->display_configuration_change(
- adev->powerplay.pp_handle,
- &adev->pm.pm_display_cfg);
+ amdgpu_dpm_change_power_state_locked(adev);
mutex_unlock(&adev->pm.mutex);
}
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
- } else {
- mutex_lock(&adev->pm.mutex);
- amdgpu_dpm_get_active_displays(adev);
- amdgpu_dpm_change_power_state_locked(adev);
- mutex_unlock(&adev->pm.mutex);
}
}
@@ -2553,11 +2737,6 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a
uint32_t query = 0;
int size;
- /* sanity check PP is enabled */
- if (!(adev->powerplay.pp_funcs &&
- adev->powerplay.pp_funcs->read_sensor))
- return -EINVAL;
-
/* GPU Clocks */
size = sizeof(value);
seq_printf(m, "GFX Clocks and Power:\n");
@@ -2649,7 +2828,7 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
if ((adev->flags & AMD_IS_PX) &&
(ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
seq_printf(m, "PX asic powered off\n");
- } else if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
+ } else if (!is_support_sw_smu(adev) && adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
mutex_lock(&adev->pm.mutex);
if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 3091488cd8cc..905cce1814f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -120,6 +120,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
{
int ret;
int index;
+ int timeout = 2000;
memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
@@ -133,8 +134,11 @@ psp_cmd_submit_buf(struct psp_context *psp,
return ret;
}
- while (*((unsigned int *)psp->fence_buf) != index)
+ while (*((unsigned int *)psp->fence_buf) != index) {
+ if (--timeout == 0)
+ break;
msleep(1);
+ }
/* In some cases, psp response status is not 0 even there is no
* problem while the command is submitted. Some version of PSP FW
@@ -143,12 +147,14 @@ psp_cmd_submit_buf(struct psp_context *psp,
* during psp initialization to avoid breaking hw_init and it doesn't
* return -EINVAL.
*/
- if (psp->cmd_buf_mem->resp.status) {
+ if (psp->cmd_buf_mem->resp.status || !timeout) {
if (ucode)
DRM_WARN("failed to load ucode id (%d) ",
ucode->ucode_id);
DRM_WARN("psp command failed and response status is (%d)\n",
psp->cmd_buf_mem->resp.status);
+ if (!timeout)
+ return -EINVAL;
}
/* get xGMI session id from response buffer */
@@ -181,13 +187,13 @@ static int psp_tmr_init(struct psp_context *psp)
int ret;
/*
- * Allocate 3M memory aligned to 1M from Frame Buffer (local
- * physical).
+ * According to HW engineer, they prefer the TMR address be "naturally
+ * aligned" , e.g. the start address be an integer divide of TMR size.
*
* Note: this memory need be reserved till the driver
* uninitializes.
*/
- ret = amdgpu_bo_create_kernel(psp->adev, PSP_TMR_SIZE, 0x100000,
+ ret = amdgpu_bo_create_kernel(psp->adev, PSP_TMR_SIZE, PSP_TMR_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
@@ -466,6 +472,206 @@ static int psp_xgmi_initialize(struct psp_context *psp)
return ret;
}
+// ras begin
+static void psp_prep_ras_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint64_t ras_ta_mc, uint64_t ras_mc_shared,
+ uint32_t ras_ta_size, uint32_t shared_size)
+{
+ cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
+ cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ras_ta_mc);
+ cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ras_ta_mc);
+ cmd->cmd.cmd_load_ta.app_len = ras_ta_size;
+
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ras_mc_shared);
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ras_mc_shared);
+ cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size;
+}
+
+static int psp_ras_init_shared_buf(struct psp_context *psp)
+{
+ int ret;
+
+ /*
+ * Allocate 16k memory aligned to 4k from Frame Buffer (local
+ * physical) for ras ta <-> Driver
+ */
+ ret = amdgpu_bo_create_kernel(psp->adev, PSP_RAS_SHARED_MEM_SIZE,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
+ &psp->ras.ras_shared_bo,
+ &psp->ras.ras_shared_mc_addr,
+ &psp->ras.ras_shared_buf);
+
+ return ret;
+}
+
+static int psp_ras_load(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ memset(psp->fw_pri_buf, 0, PSP_1_MEG);
+ memcpy(psp->fw_pri_buf, psp->ta_ras_start_addr, psp->ta_ras_ucode_size);
+
+ psp_prep_ras_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
+ psp->ras.ras_shared_mc_addr,
+ psp->ta_ras_ucode_size, PSP_RAS_SHARED_MEM_SIZE);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+
+ if (!ret) {
+ psp->ras.ras_initialized = 1;
+ psp->ras.session_id = cmd->resp.session_id;
+ }
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static void psp_prep_ras_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t ras_session_id)
+{
+ cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA;
+ cmd->cmd.cmd_unload_ta.session_id = ras_session_id;
+}
+
+static int psp_ras_unload(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the unloading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_ras_ta_unload_cmd_buf(cmd, psp->ras.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static void psp_prep_ras_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
+ uint32_t ta_cmd_id,
+ uint32_t ras_session_id)
+{
+ cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD;
+ cmd->cmd.cmd_invoke_cmd.session_id = ras_session_id;
+ cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id;
+ /* Note: cmd_invoke_cmd.buf is not used for now */
+}
+
+int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the loading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_ras_ta_invoke_cmd_buf(cmd, ta_cmd_id,
+ psp->ras.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+int psp_ras_enable_features(struct psp_context *psp,
+ union ta_ras_cmd_input *info, bool enable)
+{
+ struct ta_ras_shared_memory *ras_cmd;
+ int ret;
+
+ if (!psp->ras.ras_initialized)
+ return -EINVAL;
+
+ ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
+ memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
+
+ if (enable)
+ ras_cmd->cmd_id = TA_RAS_COMMAND__ENABLE_FEATURES;
+ else
+ ras_cmd->cmd_id = TA_RAS_COMMAND__DISABLE_FEATURES;
+
+ ras_cmd->ras_in_message = *info;
+
+ ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
+ if (ret)
+ return -EINVAL;
+
+ return ras_cmd->ras_status;
+}
+
+static int psp_ras_terminate(struct psp_context *psp)
+{
+ int ret;
+
+ if (!psp->ras.ras_initialized)
+ return 0;
+
+ ret = psp_ras_unload(psp);
+ if (ret)
+ return ret;
+
+ psp->ras.ras_initialized = 0;
+
+ /* free ras shared memory */
+ amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo,
+ &psp->ras.ras_shared_mc_addr,
+ &psp->ras.ras_shared_buf);
+
+ return 0;
+}
+
+static int psp_ras_initialize(struct psp_context *psp)
+{
+ int ret;
+
+ if (!psp->ras.ras_initialized) {
+ ret = psp_ras_init_shared_buf(psp);
+ if (ret)
+ return ret;
+ }
+
+ ret = psp_ras_load(psp);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+// ras end
+
static int psp_hw_start(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -473,25 +679,35 @@ static int psp_hw_start(struct psp_context *psp)
if (!amdgpu_sriov_vf(adev) || !adev->in_gpu_reset) {
ret = psp_bootloader_load_sysdrv(psp);
- if (ret)
+ if (ret) {
+ DRM_ERROR("PSP load sysdrv failed!\n");
return ret;
+ }
ret = psp_bootloader_load_sos(psp);
- if (ret)
+ if (ret) {
+ DRM_ERROR("PSP load sos failed!\n");
return ret;
+ }
}
ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
- if (ret)
+ if (ret) {
+ DRM_ERROR("PSP create ring failed!\n");
return ret;
+ }
ret = psp_tmr_load(psp);
- if (ret)
+ if (ret) {
+ DRM_ERROR("PSP load tmr failed!\n");
return ret;
+ }
ret = psp_asd_load(psp);
- if (ret)
+ if (ret) {
+ DRM_ERROR("PSP load asd failed!\n");
return ret;
+ }
if (adev->gmc.xgmi.num_physical_nodes > 1) {
ret = psp_xgmi_initialize(psp);
@@ -502,6 +718,15 @@ static int psp_hw_start(struct psp_context *psp)
dev_err(psp->adev->dev,
"XGMI: Failed to initialize XGMI session\n");
}
+
+
+ if (psp->adev->psp.ta_fw) {
+ ret = psp_ras_initialize(psp);
+ if (ret)
+ dev_err(psp->adev->dev,
+ "RAS: Failed to initialize RAS\n");
+ }
+
return 0;
}
@@ -665,53 +890,52 @@ static int psp_load_fw(struct amdgpu_device *adev)
&psp->fence_buf_mc_addr,
&psp->fence_buf);
if (ret)
- goto failed_mem2;
+ goto failed;
ret = amdgpu_bo_create_kernel(adev, PSP_CMD_BUFFER_SIZE, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&psp->cmd_buf_bo, &psp->cmd_buf_mc_addr,
(void **)&psp->cmd_buf_mem);
if (ret)
- goto failed_mem1;
+ goto failed;
memset(psp->fence_buf, 0, PSP_FENCE_BUFFER_SIZE);
ret = psp_ring_init(psp, PSP_RING_TYPE__KM);
- if (ret)
- goto failed_mem;
+ if (ret) {
+ DRM_ERROR("PSP ring init failed!\n");
+ goto failed;
+ }
ret = psp_tmr_init(psp);
- if (ret)
- goto failed_mem;
+ if (ret) {
+ DRM_ERROR("PSP tmr init failed!\n");
+ goto failed;
+ }
ret = psp_asd_init(psp);
- if (ret)
- goto failed_mem;
+ if (ret) {
+ DRM_ERROR("PSP asd init failed!\n");
+ goto failed;
+ }
skip_memalloc:
ret = psp_hw_start(psp);
if (ret)
- goto failed_mem;
+ goto failed;
ret = psp_np_fw_load(psp);
if (ret)
- goto failed_mem;
+ goto failed;
return 0;
-failed_mem:
- amdgpu_bo_free_kernel(&psp->cmd_buf_bo,
- &psp->cmd_buf_mc_addr,
- (void **)&psp->cmd_buf_mem);
-failed_mem1:
- amdgpu_bo_free_kernel(&psp->fence_buf_bo,
- &psp->fence_buf_mc_addr, &psp->fence_buf);
-failed_mem2:
- amdgpu_bo_free_kernel(&psp->fw_pri_bo,
- &psp->fw_pri_mc_addr, &psp->fw_pri_buf);
failed:
- kfree(psp->cmd);
- psp->cmd = NULL;
+ /*
+ * all cleanup jobs (xgmi terminate, ras terminate,
+ * ring destroy, cmd/fence/fw buffers destory,
+ * psp->cmd destory) are delayed to psp_hw_fini
+ */
return ret;
}
@@ -753,6 +977,9 @@ static int psp_hw_fini(void *handle)
psp->xgmi_context.initialized == 1)
psp_xgmi_terminate(psp);
+ if (psp->adev->psp.ta_fw)
+ psp_ras_terminate(psp);
+
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
@@ -786,6 +1013,14 @@ static int psp_suspend(void *handle)
}
}
+ if (psp->adev->psp.ta_fw) {
+ ret = psp_ras_terminate(psp);
+ if (ret) {
+ DRM_ERROR("Failed to terminate ras ta\n");
+ return ret;
+ }
+ }
+
ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
if (ret) {
DRM_ERROR("PSP ring stop failed\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 2ef98cc755d6..cde113f07c96 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -28,11 +28,13 @@
#include "amdgpu.h"
#include "psp_gfx_if.h"
#include "ta_xgmi_if.h"
+#include "ta_ras_if.h"
#define PSP_FENCE_BUFFER_SIZE 0x1000
#define PSP_CMD_BUFFER_SIZE 0x1000
#define PSP_ASD_SHARED_MEM_SIZE 0x4000
#define PSP_XGMI_SHARED_MEM_SIZE 0x4000
+#define PSP_RAS_SHARED_MEM_SIZE 0x4000
#define PSP_1_MEG 0x100000
#define PSP_TMR_SIZE 0x400000
@@ -88,6 +90,9 @@ struct psp_funcs
int (*xgmi_set_topology_info)(struct psp_context *psp, int number_devices,
struct psp_xgmi_topology_info *topology);
bool (*support_vmr_ring)(struct psp_context *psp);
+ int (*ras_trigger_error)(struct psp_context *psp,
+ struct ta_ras_trigger_error_input *info);
+ int (*ras_cure_posion)(struct psp_context *psp, uint64_t *mode_ptr);
};
struct psp_xgmi_context {
@@ -98,6 +103,16 @@ struct psp_xgmi_context {
void *xgmi_shared_buf;
};
+struct psp_ras_context {
+ /*ras fw*/
+ bool ras_initialized;
+ uint32_t session_id;
+ struct amdgpu_bo *ras_shared_bo;
+ uint64_t ras_shared_mc_addr;
+ void *ras_shared_buf;
+ struct amdgpu_ras *ras;
+};
+
struct psp_context
{
struct amdgpu_device *adev;
@@ -150,10 +165,15 @@ struct psp_context
/* xgmi ta firmware and buffer */
const struct firmware *ta_fw;
+ uint32_t ta_fw_version;
uint32_t ta_xgmi_ucode_version;
uint32_t ta_xgmi_ucode_size;
uint8_t *ta_xgmi_start_addr;
+ uint32_t ta_ras_ucode_version;
+ uint32_t ta_ras_ucode_size;
+ uint8_t *ta_ras_start_addr;
struct psp_xgmi_context xgmi_context;
+ struct psp_ras_context ras;
};
struct amdgpu_psp_funcs {
@@ -207,6 +227,13 @@ struct psp_xgmi_topology_info {
#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
+#define psp_ras_trigger_error(psp, info) \
+ ((psp)->funcs->ras_trigger_error ? \
+ (psp)->funcs->ras_trigger_error((psp), (info)) : -EINVAL)
+#define psp_ras_cure_posion(psp, addr) \
+ ((psp)->funcs->ras_cure_posion ? \
+ (psp)->funcs->ras_cure_posion(psp, (addr)) : -EINVAL)
+
extern const struct amd_ip_funcs psp_ip_funcs;
extern const struct amdgpu_ip_block_version psp_v3_1_ip_block;
@@ -217,6 +244,11 @@ extern const struct amdgpu_ip_block_version psp_v10_0_ip_block;
int psp_gpu_reset(struct amdgpu_device *adev);
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+
+int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
+int psp_ras_enable_features(struct psp_context *psp,
+ union ta_ras_cmd_input *info, bool enable);
+
extern const struct amdgpu_ip_block_version psp_v11_0_ip_block;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
new file mode 100644
index 000000000000..22bd21efe6b1
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -0,0 +1,1482 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include "amdgpu.h"
+#include "amdgpu_ras.h"
+#include "amdgpu_atomfirmware.h"
+
+struct ras_ih_data {
+ /* interrupt bottom half */
+ struct work_struct ih_work;
+ int inuse;
+ /* IP callback */
+ ras_ih_cb cb;
+ /* full of entries */
+ unsigned char *ring;
+ unsigned int ring_size;
+ unsigned int element_size;
+ unsigned int aligned_element_size;
+ unsigned int rptr;
+ unsigned int wptr;
+};
+
+struct ras_fs_data {
+ char sysfs_name[32];
+ char debugfs_name[32];
+};
+
+struct ras_err_data {
+ unsigned long ue_count;
+ unsigned long ce_count;
+};
+
+struct ras_err_handler_data {
+ /* point to bad pages array */
+ struct {
+ unsigned long bp;
+ struct amdgpu_bo *bo;
+ } *bps;
+ /* the count of entries */
+ int count;
+ /* the space can place new entries */
+ int space_left;
+ /* last reserved entry's index + 1 */
+ int last_reserved;
+};
+
+struct ras_manager {
+ struct ras_common_if head;
+ /* reference count */
+ int use;
+ /* ras block link */
+ struct list_head node;
+ /* the device */
+ struct amdgpu_device *adev;
+ /* debugfs */
+ struct dentry *ent;
+ /* sysfs */
+ struct device_attribute sysfs_attr;
+ int attr_inuse;
+
+ /* fs node name */
+ struct ras_fs_data fs_data;
+
+ /* IH data */
+ struct ras_ih_data ih_data;
+
+ struct ras_err_data err_data;
+};
+
+const char *ras_error_string[] = {
+ "none",
+ "parity",
+ "single_correctable",
+ "multi_uncorrectable",
+ "poison",
+};
+
+const char *ras_block_string[] = {
+ "umc",
+ "sdma",
+ "gfx",
+ "mmhub",
+ "athub",
+ "pcie_bif",
+ "hdp",
+ "xgmi_wafl",
+ "df",
+ "smn",
+ "sem",
+ "mp0",
+ "mp1",
+ "fuse",
+};
+
+#define ras_err_str(i) (ras_error_string[ffs(i)])
+#define ras_block_str(i) (ras_block_string[i])
+
+#define AMDGPU_RAS_FLAG_INIT_BY_VBIOS 1
+#define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
+
+static void amdgpu_ras_self_test(struct amdgpu_device *adev)
+{
+ /* TODO */
+}
+
+static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private;
+ struct ras_query_if info = {
+ .head = obj->head,
+ };
+ ssize_t s;
+ char val[128];
+
+ if (amdgpu_ras_error_query(obj->adev, &info))
+ return -EINVAL;
+
+ s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n",
+ "ue", info.ue_count,
+ "ce", info.ce_count);
+ if (*pos >= s)
+ return 0;
+
+ s -= *pos;
+ s = min_t(u64, s, size);
+
+
+ if (copy_to_user(buf, &val[*pos], s))
+ return -EINVAL;
+
+ *pos += s;
+
+ return s;
+}
+
+static const struct file_operations amdgpu_ras_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .read = amdgpu_ras_debugfs_read,
+ .write = NULL,
+ .llseek = default_llseek
+};
+
+static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
+ *block_id = i;
+ if (strcmp(name, ras_block_str(i)) == 0)
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f,
+ const char __user *buf, size_t size,
+ loff_t *pos, struct ras_debug_if *data)
+{
+ ssize_t s = min_t(u64, 64, size);
+ char str[65];
+ char block_name[33];
+ char err[9] = "ue";
+ int op = -1;
+ int block_id;
+ u64 address, value;
+
+ if (*pos)
+ return -EINVAL;
+ *pos = size;
+
+ memset(str, 0, sizeof(str));
+ memset(data, 0, sizeof(*data));
+
+ if (copy_from_user(str, buf, s))
+ return -EINVAL;
+
+ if (sscanf(str, "disable %32s", block_name) == 1)
+ op = 0;
+ else if (sscanf(str, "enable %32s %8s", block_name, err) == 2)
+ op = 1;
+ else if (sscanf(str, "inject %32s %8s", block_name, err) == 2)
+ op = 2;
+ else if (str[0] && str[1] && str[2] && str[3])
+ /* ascii string, but commands are not matched. */
+ return -EINVAL;
+
+ if (op != -1) {
+ if (amdgpu_ras_find_block_id_by_name(block_name, &block_id))
+ return -EINVAL;
+
+ data->head.block = block_id;
+ data->head.type = memcmp("ue", err, 2) == 0 ?
+ AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE :
+ AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE;
+ data->op = op;
+
+ if (op == 2) {
+ if (sscanf(str, "%*s %*s %*s %llu %llu",
+ &address, &value) != 2)
+ if (sscanf(str, "%*s %*s %*s 0x%llx 0x%llx",
+ &address, &value) != 2)
+ return -EINVAL;
+ data->inject.address = address;
+ data->inject.value = value;
+ }
+ } else {
+ if (size < sizeof(*data))
+ return -EINVAL;
+
+ if (copy_from_user(data, buf, sizeof(*data)))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+/*
+ * DOC: ras debugfs control interface
+ *
+ * It accepts struct ras_debug_if who has two members.
+ *
+ * First member: ras_debug_if::head or ras_debug_if::inject.
+ *
+ * head is used to indicate which IP block will be under control.
+ *
+ * head has four members, they are block, type, sub_block_index, name.
+ * block: which IP will be under control.
+ * type: what kind of error will be enabled/disabled/injected.
+ * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
+ * name: the name of IP.
+ *
+ * inject has two more members than head, they are address, value.
+ * As their names indicate, inject operation will write the
+ * value to the address.
+ *
+ * Second member: struct ras_debug_if::op.
+ * It has three kinds of operations.
+ * 0: disable RAS on the block. Take ::head as its data.
+ * 1: enable RAS on the block. Take ::head as its data.
+ * 2: inject errors on the block. Take ::inject as its data.
+ *
+ * How to use the interface?
+ * programs:
+ * copy the struct ras_debug_if in your codes and initialize it.
+ * write the struct to the control node.
+ *
+ * bash:
+ * echo op block [error [address value]] > .../ras/ras_ctrl
+ * op: disable, enable, inject
+ * disable: only block is needed
+ * enable: block and error are needed
+ * inject: error, address, value are needed
+ * block: umc, smda, gfx, .........
+ * see ras_block_string[] for details
+ * error: ue, ce
+ * ue: multi_uncorrectable
+ * ce: single_correctable
+ *
+ * here are some examples for bash commands,
+ * echo inject umc ue 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
+ * echo inject umc ce 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
+ * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
+ *
+ * How to check the result?
+ *
+ * For disable/enable, please check ras features at
+ * /sys/class/drm/card[0/1/2...]/device/ras/features
+ *
+ * For inject, please check corresponding err count at
+ * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
+ *
+ * NOTE: operation is only allowed on blocks which are supported.
+ * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask
+ */
+static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
+ struct ras_debug_if data;
+ int ret = 0;
+
+ ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data);
+ if (ret)
+ return -EINVAL;
+
+ if (!amdgpu_ras_is_supported(adev, data.head.block))
+ return -EINVAL;
+
+ switch (data.op) {
+ case 0:
+ ret = amdgpu_ras_feature_enable(adev, &data.head, 0);
+ break;
+ case 1:
+ ret = amdgpu_ras_feature_enable(adev, &data.head, 1);
+ break;
+ case 2:
+ ret = amdgpu_ras_error_inject(adev, &data.inject);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ };
+
+ if (ret)
+ return -EINVAL;
+
+ return size;
+}
+
+static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = {
+ .owner = THIS_MODULE,
+ .read = NULL,
+ .write = amdgpu_ras_debugfs_ctrl_write,
+ .llseek = default_llseek
+};
+
+static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr);
+ struct ras_query_if info = {
+ .head = obj->head,
+ };
+
+ if (amdgpu_ras_error_query(obj->adev, &info))
+ return -EINVAL;
+
+ return snprintf(buf, PAGE_SIZE, "%s: %lu\n%s: %lu\n",
+ "ue", info.ue_count,
+ "ce", info.ce_count);
+}
+
+/* obj begin */
+
+#define get_obj(obj) do { (obj)->use++; } while (0)
+#define alive_obj(obj) ((obj)->use)
+
+static inline void put_obj(struct ras_manager *obj)
+{
+ if (obj && --obj->use == 0)
+ list_del(&obj->node);
+ if (obj && obj->use < 0) {
+ DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", obj->head.name);
+ }
+}
+
+/* make one obj and return it. */
+static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
+ struct ras_common_if *head)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj;
+
+ if (!con)
+ return NULL;
+
+ if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
+ return NULL;
+
+ obj = &con->objs[head->block];
+ /* already exist. return obj? */
+ if (alive_obj(obj))
+ return NULL;
+
+ obj->head = *head;
+ obj->adev = adev;
+ list_add(&obj->node, &con->head);
+ get_obj(obj);
+
+ return obj;
+}
+
+/* return an obj equal to head, or the first when head is NULL */
+static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
+ struct ras_common_if *head)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj;
+ int i;
+
+ if (!con)
+ return NULL;
+
+ if (head) {
+ if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
+ return NULL;
+
+ obj = &con->objs[head->block];
+
+ if (alive_obj(obj)) {
+ WARN_ON(head->block != obj->head.block);
+ return obj;
+ }
+ } else {
+ for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
+ obj = &con->objs[i];
+ if (alive_obj(obj)) {
+ WARN_ON(i != obj->head.block);
+ return obj;
+ }
+ }
+ }
+
+ return NULL;
+}
+/* obj end */
+
+/* feature ctl begin */
+static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev,
+ struct ras_common_if *head)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ return con->hw_supported & BIT(head->block);
+}
+
+static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev,
+ struct ras_common_if *head)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ return con->features & BIT(head->block);
+}
+
+/*
+ * if obj is not created, then create one.
+ * set feature enable flag.
+ */
+static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
+ struct ras_common_if *head, int enable)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
+
+ /* If hardware does not support ras, then do not create obj.
+ * But if hardware support ras, we can create the obj.
+ * Ras framework checks con->hw_supported to see if it need do
+ * corresponding initialization.
+ * IP checks con->support to see if it need disable ras.
+ */
+ if (!amdgpu_ras_is_feature_allowed(adev, head))
+ return 0;
+ if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
+ return 0;
+
+ if (enable) {
+ if (!obj) {
+ obj = amdgpu_ras_create_obj(adev, head);
+ if (!obj)
+ return -EINVAL;
+ } else {
+ /* In case we create obj somewhere else */
+ get_obj(obj);
+ }
+ con->features |= BIT(head->block);
+ } else {
+ if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
+ con->features &= ~BIT(head->block);
+ put_obj(obj);
+ }
+ }
+
+ return 0;
+}
+
+/* wrapper of psp_ras_enable_features */
+int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
+ struct ras_common_if *head, bool enable)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ union ta_ras_cmd_input info;
+ int ret;
+
+ if (!con)
+ return -EINVAL;
+
+ if (!enable) {
+ info.disable_features = (struct ta_ras_disable_features_input) {
+ .block_id = amdgpu_ras_block_to_ta(head->block),
+ .error_type = amdgpu_ras_error_to_ta(head->type),
+ };
+ } else {
+ info.enable_features = (struct ta_ras_enable_features_input) {
+ .block_id = amdgpu_ras_block_to_ta(head->block),
+ .error_type = amdgpu_ras_error_to_ta(head->type),
+ };
+ }
+
+ /* Do not enable if it is not allowed. */
+ WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
+ /* Are we alerady in that state we are going to set? */
+ if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
+ return 0;
+
+ ret = psp_ras_enable_features(&adev->psp, &info, enable);
+ if (ret) {
+ DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n",
+ enable ? "enable":"disable",
+ ras_block_str(head->block),
+ ret);
+ return -EINVAL;
+ }
+
+ /* setup the obj */
+ __amdgpu_ras_feature_enable(adev, head, enable);
+
+ return 0;
+}
+
+/* Only used in device probe stage and called only once. */
+int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
+ struct ras_common_if *head, bool enable)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ int ret;
+
+ if (!con)
+ return -EINVAL;
+
+ if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
+ /* If ras is enabled by vbios, we set up ras object first in
+ * both case. For enable, that is all what we need do. For
+ * disable, we need perform a ras TA disable cmd after that.
+ */
+ ret = __amdgpu_ras_feature_enable(adev, head, 1);
+ if (ret)
+ return ret;
+
+ if (!enable)
+ ret = amdgpu_ras_feature_enable(adev, head, 0);
+ } else
+ ret = amdgpu_ras_feature_enable(adev, head, enable);
+
+ return ret;
+}
+
+static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev,
+ bool bypass)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj, *tmp;
+
+ list_for_each_entry_safe(obj, tmp, &con->head, node) {
+ /* bypass psp.
+ * aka just release the obj and corresponding flags
+ */
+ if (bypass) {
+ if (__amdgpu_ras_feature_enable(adev, &obj->head, 0))
+ break;
+ } else {
+ if (amdgpu_ras_feature_enable(adev, &obj->head, 0))
+ break;
+ }
+ }
+
+ return con->features;
+}
+
+static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
+ bool bypass)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ int ras_block_count = AMDGPU_RAS_BLOCK_COUNT;
+ int i;
+ const enum amdgpu_ras_error_type default_ras_type =
+ AMDGPU_RAS_ERROR__NONE;
+
+ for (i = 0; i < ras_block_count; i++) {
+ struct ras_common_if head = {
+ .block = i,
+ .type = default_ras_type,
+ .sub_block_index = 0,
+ };
+ strcpy(head.name, ras_block_str(i));
+ if (bypass) {
+ /*
+ * bypass psp. vbios enable ras for us.
+ * so just create the obj
+ */
+ if (__amdgpu_ras_feature_enable(adev, &head, 1))
+ break;
+ } else {
+ if (amdgpu_ras_feature_enable(adev, &head, 1))
+ break;
+ }
+ }
+
+ return con->features;
+}
+/* feature ctl end */
+
+/* query/inject/cure begin */
+int amdgpu_ras_error_query(struct amdgpu_device *adev,
+ struct ras_query_if *info)
+{
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
+
+ if (!obj)
+ return -EINVAL;
+ /* TODO might read the register to read the count */
+
+ info->ue_count = obj->err_data.ue_count;
+ info->ce_count = obj->err_data.ce_count;
+
+ return 0;
+}
+
+/* wrapper of psp_ras_trigger_error */
+int amdgpu_ras_error_inject(struct amdgpu_device *adev,
+ struct ras_inject_if *info)
+{
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
+ struct ta_ras_trigger_error_input block_info = {
+ .block_id = amdgpu_ras_block_to_ta(info->head.block),
+ .inject_error_type = amdgpu_ras_error_to_ta(info->head.type),
+ .sub_block_index = info->head.sub_block_index,
+ .address = info->address,
+ .value = info->value,
+ };
+ int ret = 0;
+
+ if (!obj)
+ return -EINVAL;
+
+ ret = psp_ras_trigger_error(&adev->psp, &block_info);
+ if (ret)
+ DRM_ERROR("RAS ERROR: inject %s error failed ret %d\n",
+ ras_block_str(info->head.block),
+ ret);
+
+ return ret;
+}
+
+int amdgpu_ras_error_cure(struct amdgpu_device *adev,
+ struct ras_cure_if *info)
+{
+ /* psp fw has no cure interface for now. */
+ return 0;
+}
+
+/* get the total error counts on all IPs */
+int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
+ bool is_ce)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj;
+ struct ras_err_data data = {0, 0};
+
+ if (!con)
+ return -EINVAL;
+
+ list_for_each_entry(obj, &con->head, node) {
+ struct ras_query_if info = {
+ .head = obj->head,
+ };
+
+ if (amdgpu_ras_error_query(adev, &info))
+ return -EINVAL;
+
+ data.ce_count += info.ce_count;
+ data.ue_count += info.ue_count;
+ }
+
+ return is_ce ? data.ce_count : data.ue_count;
+}
+/* query/inject/cure end */
+
+
+/* sysfs begin */
+
+static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct amdgpu_ras *con =
+ container_of(attr, struct amdgpu_ras, features_attr);
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ struct ras_common_if head;
+ int ras_block_count = AMDGPU_RAS_BLOCK_COUNT;
+ int i;
+ ssize_t s;
+ struct ras_manager *obj;
+
+ s = scnprintf(buf, PAGE_SIZE, "feature mask: 0x%x\n", con->features);
+
+ for (i = 0; i < ras_block_count; i++) {
+ head.block = i;
+
+ if (amdgpu_ras_is_feature_enabled(adev, &head)) {
+ obj = amdgpu_ras_find_obj(adev, &head);
+ s += scnprintf(&buf[s], PAGE_SIZE - s,
+ "%s: %s\n",
+ ras_block_str(i),
+ ras_err_str(obj->head.type));
+ } else
+ s += scnprintf(&buf[s], PAGE_SIZE - s,
+ "%s: disabled\n",
+ ras_block_str(i));
+ }
+
+ return s;
+}
+
+static int amdgpu_ras_sysfs_create_feature_node(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct attribute *attrs[] = {
+ &con->features_attr.attr,
+ NULL
+ };
+ struct attribute_group group = {
+ .name = "ras",
+ .attrs = attrs,
+ };
+
+ con->features_attr = (struct device_attribute) {
+ .attr = {
+ .name = "features",
+ .mode = S_IRUGO,
+ },
+ .show = amdgpu_ras_sysfs_features_read,
+ };
+ sysfs_attr_init(attrs[0]);
+
+ return sysfs_create_group(&adev->dev->kobj, &group);
+}
+
+static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct attribute *attrs[] = {
+ &con->features_attr.attr,
+ NULL
+ };
+ struct attribute_group group = {
+ .name = "ras",
+ .attrs = attrs,
+ };
+
+ sysfs_remove_group(&adev->dev->kobj, &group);
+
+ return 0;
+}
+
+int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
+ struct ras_fs_if *head)
+{
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
+
+ if (!obj || obj->attr_inuse)
+ return -EINVAL;
+
+ get_obj(obj);
+
+ memcpy(obj->fs_data.sysfs_name,
+ head->sysfs_name,
+ sizeof(obj->fs_data.sysfs_name));
+
+ obj->sysfs_attr = (struct device_attribute){
+ .attr = {
+ .name = obj->fs_data.sysfs_name,
+ .mode = S_IRUGO,
+ },
+ .show = amdgpu_ras_sysfs_read,
+ };
+ sysfs_attr_init(&obj->sysfs_attr.attr);
+
+ if (sysfs_add_file_to_group(&adev->dev->kobj,
+ &obj->sysfs_attr.attr,
+ "ras")) {
+ put_obj(obj);
+ return -EINVAL;
+ }
+
+ obj->attr_inuse = 1;
+
+ return 0;
+}
+
+int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
+ struct ras_common_if *head)
+{
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
+
+ if (!obj || !obj->attr_inuse)
+ return -EINVAL;
+
+ sysfs_remove_file_from_group(&adev->dev->kobj,
+ &obj->sysfs_attr.attr,
+ "ras");
+ obj->attr_inuse = 0;
+ put_obj(obj);
+
+ return 0;
+}
+
+static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj, *tmp;
+
+ list_for_each_entry_safe(obj, tmp, &con->head, node) {
+ amdgpu_ras_sysfs_remove(adev, &obj->head);
+ }
+
+ amdgpu_ras_sysfs_remove_feature_node(adev);
+
+ return 0;
+}
+/* sysfs end */
+
+/* debugfs begin */
+static int amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct drm_minor *minor = adev->ddev->primary;
+ struct dentry *root = minor->debugfs_root, *dir;
+ struct dentry *ent;
+
+ dir = debugfs_create_dir("ras", root);
+ if (IS_ERR(dir))
+ return -EINVAL;
+
+ con->dir = dir;
+
+ ent = debugfs_create_file("ras_ctrl",
+ S_IWUGO | S_IRUGO, con->dir,
+ adev, &amdgpu_ras_debugfs_ctrl_ops);
+ if (IS_ERR(ent)) {
+ debugfs_remove(con->dir);
+ return -EINVAL;
+ }
+
+ con->ent = ent;
+ return 0;
+}
+
+int amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
+ struct ras_fs_if *head)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &head->head);
+ struct dentry *ent;
+
+ if (!obj || obj->ent)
+ return -EINVAL;
+
+ get_obj(obj);
+
+ memcpy(obj->fs_data.debugfs_name,
+ head->debugfs_name,
+ sizeof(obj->fs_data.debugfs_name));
+
+ ent = debugfs_create_file(obj->fs_data.debugfs_name,
+ S_IWUGO | S_IRUGO, con->dir,
+ obj, &amdgpu_ras_debugfs_ops);
+
+ if (IS_ERR(ent))
+ return -EINVAL;
+
+ obj->ent = ent;
+
+ return 0;
+}
+
+int amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
+ struct ras_common_if *head)
+{
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
+
+ if (!obj || !obj->ent)
+ return 0;
+
+ debugfs_remove(obj->ent);
+ obj->ent = NULL;
+ put_obj(obj);
+
+ return 0;
+}
+
+static int amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj, *tmp;
+
+ list_for_each_entry_safe(obj, tmp, &con->head, node) {
+ amdgpu_ras_debugfs_remove(adev, &obj->head);
+ }
+
+ debugfs_remove(con->ent);
+ debugfs_remove(con->dir);
+ con->dir = NULL;
+ con->ent = NULL;
+
+ return 0;
+}
+/* debugfs end */
+
+/* ras fs */
+
+static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
+{
+ amdgpu_ras_sysfs_create_feature_node(adev);
+ amdgpu_ras_debugfs_create_ctrl_node(adev);
+
+ return 0;
+}
+
+static int amdgpu_ras_fs_fini(struct amdgpu_device *adev)
+{
+ amdgpu_ras_debugfs_remove_all(adev);
+ amdgpu_ras_sysfs_remove_all(adev);
+ return 0;
+}
+/* ras fs end */
+
+/* ih begin */
+static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
+{
+ struct ras_ih_data *data = &obj->ih_data;
+ struct amdgpu_iv_entry entry;
+ int ret;
+
+ while (data->rptr != data->wptr) {
+ rmb();
+ memcpy(&entry, &data->ring[data->rptr],
+ data->element_size);
+
+ wmb();
+ data->rptr = (data->aligned_element_size +
+ data->rptr) % data->ring_size;
+
+ /* Let IP handle its data, maybe we need get the output
+ * from the callback to udpate the error type/count, etc
+ */
+ if (data->cb) {
+ ret = data->cb(obj->adev, &entry);
+ /* ue will trigger an interrupt, and in that case
+ * we need do a reset to recovery the whole system.
+ * But leave IP do that recovery, here we just dispatch
+ * the error.
+ */
+ if (ret == AMDGPU_RAS_UE) {
+ obj->err_data.ue_count++;
+ }
+ /* Might need get ce count by register, but not all IP
+ * saves ce count, some IP just use one bit or two bits
+ * to indicate ce happened.
+ */
+ }
+ }
+}
+
+static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
+{
+ struct ras_ih_data *data =
+ container_of(work, struct ras_ih_data, ih_work);
+ struct ras_manager *obj =
+ container_of(data, struct ras_manager, ih_data);
+
+ amdgpu_ras_interrupt_handler(obj);
+}
+
+int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
+ struct ras_dispatch_if *info)
+{
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
+ struct ras_ih_data *data = &obj->ih_data;
+
+ if (!obj)
+ return -EINVAL;
+
+ if (data->inuse == 0)
+ return 0;
+
+ /* Might be overflow... */
+ memcpy(&data->ring[data->wptr], info->entry,
+ data->element_size);
+
+ wmb();
+ data->wptr = (data->aligned_element_size +
+ data->wptr) % data->ring_size;
+
+ schedule_work(&data->ih_work);
+
+ return 0;
+}
+
+int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
+ struct ras_ih_if *info)
+{
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
+ struct ras_ih_data *data;
+
+ if (!obj)
+ return -EINVAL;
+
+ data = &obj->ih_data;
+ if (data->inuse == 0)
+ return 0;
+
+ cancel_work_sync(&data->ih_work);
+
+ kfree(data->ring);
+ memset(data, 0, sizeof(*data));
+ put_obj(obj);
+
+ return 0;
+}
+
+int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
+ struct ras_ih_if *info)
+{
+ struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
+ struct ras_ih_data *data;
+
+ if (!obj) {
+ /* in case we registe the IH before enable ras feature */
+ obj = amdgpu_ras_create_obj(adev, &info->head);
+ if (!obj)
+ return -EINVAL;
+ } else
+ get_obj(obj);
+
+ data = &obj->ih_data;
+ /* add the callback.etc */
+ *data = (struct ras_ih_data) {
+ .inuse = 0,
+ .cb = info->cb,
+ .element_size = sizeof(struct amdgpu_iv_entry),
+ .rptr = 0,
+ .wptr = 0,
+ };
+
+ INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler);
+
+ data->aligned_element_size = ALIGN(data->element_size, 8);
+ /* the ring can store 64 iv entries. */
+ data->ring_size = 64 * data->aligned_element_size;
+ data->ring = kmalloc(data->ring_size, GFP_KERNEL);
+ if (!data->ring) {
+ put_obj(obj);
+ return -ENOMEM;
+ }
+
+ /* IH is ready */
+ data->inuse = 1;
+
+ return 0;
+}
+
+static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj, *tmp;
+
+ list_for_each_entry_safe(obj, tmp, &con->head, node) {
+ struct ras_ih_if info = {
+ .head = obj->head,
+ };
+ amdgpu_ras_interrupt_remove_handler(adev, &info);
+ }
+
+ return 0;
+}
+/* ih end */
+
+/* recovery begin */
+static void amdgpu_ras_do_recovery(struct work_struct *work)
+{
+ struct amdgpu_ras *ras =
+ container_of(work, struct amdgpu_ras, recovery_work);
+
+ amdgpu_device_gpu_recover(ras->adev, 0);
+ atomic_set(&ras->in_recovery, 0);
+}
+
+static int amdgpu_ras_release_vram(struct amdgpu_device *adev,
+ struct amdgpu_bo **bo_ptr)
+{
+ /* no need to free it actually. */
+ amdgpu_bo_free_kernel(bo_ptr, NULL, NULL);
+ return 0;
+}
+
+/* reserve vram with size@offset */
+static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev,
+ uint64_t offset, uint64_t size,
+ struct amdgpu_bo **bo_ptr)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+ struct amdgpu_bo_param bp;
+ int r = 0;
+ int i;
+ struct amdgpu_bo *bo;
+
+ if (bo_ptr)
+ *bo_ptr = NULL;
+ memset(&bp, 0, sizeof(bp));
+ bp.size = size;
+ bp.byte_align = PAGE_SIZE;
+ bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
+ bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
+ bp.type = ttm_bo_type_kernel;
+ bp.resv = NULL;
+
+ r = amdgpu_bo_create(adev, &bp, &bo);
+ if (r)
+ return -EINVAL;
+
+ r = amdgpu_bo_reserve(bo, false);
+ if (r)
+ goto error_reserve;
+
+ offset = ALIGN(offset, PAGE_SIZE);
+ for (i = 0; i < bo->placement.num_placement; ++i) {
+ bo->placements[i].fpfn = offset >> PAGE_SHIFT;
+ bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
+ }
+
+ ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
+ r = ttm_bo_mem_space(&bo->tbo, &bo->placement, &bo->tbo.mem, &ctx);
+ if (r)
+ goto error_pin;
+
+ r = amdgpu_bo_pin_restricted(bo,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ offset,
+ offset + size);
+ if (r)
+ goto error_pin;
+
+ if (bo_ptr)
+ *bo_ptr = bo;
+
+ amdgpu_bo_unreserve(bo);
+ return r;
+
+error_pin:
+ amdgpu_bo_unreserve(bo);
+error_reserve:
+ amdgpu_bo_unref(&bo);
+ return r;
+}
+
+/* alloc/realloc bps array */
+static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
+ struct ras_err_handler_data *data, int pages)
+{
+ unsigned int old_space = data->count + data->space_left;
+ unsigned int new_space = old_space + pages;
+ unsigned int align_space = ALIGN(new_space, 1024);
+ void *tmp = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL);
+
+ if (!tmp)
+ return -ENOMEM;
+
+ if (data->bps) {
+ memcpy(tmp, data->bps,
+ data->count * sizeof(*data->bps));
+ kfree(data->bps);
+ }
+
+ data->bps = tmp;
+ data->space_left += align_space - old_space;
+ return 0;
+}
+
+/* it deal with vram only. */
+int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
+ unsigned long *bps, int pages)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data *data;
+ int i = pages;
+ int ret = 0;
+
+ if (!con || !con->eh_data || !bps || pages <= 0)
+ return 0;
+
+ mutex_lock(&con->recovery_lock);
+ data = con->eh_data;
+ if (!data)
+ goto out;
+
+ if (data->space_left <= pages)
+ if (amdgpu_ras_realloc_eh_data_space(adev, data, pages)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ while (i--)
+ data->bps[data->count++].bp = bps[i];
+
+ data->space_left -= pages;
+out:
+ mutex_unlock(&con->recovery_lock);
+
+ return ret;
+}
+
+/* called in gpu recovery/init */
+int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data *data;
+ uint64_t bp;
+ struct amdgpu_bo *bo;
+ int i;
+
+ if (!con || !con->eh_data)
+ return 0;
+
+ mutex_lock(&con->recovery_lock);
+ data = con->eh_data;
+ if (!data)
+ goto out;
+ /* reserve vram at driver post stage. */
+ for (i = data->last_reserved; i < data->count; i++) {
+ bp = data->bps[i].bp;
+
+ if (amdgpu_ras_reserve_vram(adev, bp << PAGE_SHIFT,
+ PAGE_SIZE, &bo))
+ DRM_ERROR("RAS ERROR: reserve vram %llx fail\n", bp);
+
+ data->bps[i].bo = bo;
+ data->last_reserved = i + 1;
+ }
+out:
+ mutex_unlock(&con->recovery_lock);
+ return 0;
+}
+
+/* called when driver unload */
+static int amdgpu_ras_release_bad_pages(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data *data;
+ struct amdgpu_bo *bo;
+ int i;
+
+ if (!con || !con->eh_data)
+ return 0;
+
+ mutex_lock(&con->recovery_lock);
+ data = con->eh_data;
+ if (!data)
+ goto out;
+
+ for (i = data->last_reserved - 1; i >= 0; i--) {
+ bo = data->bps[i].bo;
+
+ amdgpu_ras_release_vram(adev, &bo);
+
+ data->bps[i].bo = bo;
+ data->last_reserved = i;
+ }
+out:
+ mutex_unlock(&con->recovery_lock);
+ return 0;
+}
+
+static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev)
+{
+ /* TODO
+ * write the array to eeprom when SMU disabled.
+ */
+ return 0;
+}
+
+static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev)
+{
+ /* TODO
+ * read the array to eeprom when SMU disabled.
+ */
+ return 0;
+}
+
+static int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data **data = &con->eh_data;
+
+ *data = kmalloc(sizeof(**data),
+ GFP_KERNEL|__GFP_ZERO);
+ if (!*data)
+ return -ENOMEM;
+
+ mutex_init(&con->recovery_lock);
+ INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery);
+ atomic_set(&con->in_recovery, 0);
+ con->adev = adev;
+
+ amdgpu_ras_load_bad_pages(adev);
+ amdgpu_ras_reserve_bad_pages(adev);
+
+ return 0;
+}
+
+static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data *data = con->eh_data;
+
+ cancel_work_sync(&con->recovery_work);
+ amdgpu_ras_save_bad_pages(adev);
+ amdgpu_ras_release_bad_pages(adev);
+
+ mutex_lock(&con->recovery_lock);
+ con->eh_data = NULL;
+ kfree(data->bps);
+ kfree(data);
+ mutex_unlock(&con->recovery_lock);
+
+ return 0;
+}
+/* recovery end */
+
+/*
+ * check hardware's ras ability which will be saved in hw_supported.
+ * if hardware does not support ras, we can skip some ras initializtion and
+ * forbid some ras operations from IP.
+ * if software itself, say boot parameter, limit the ras ability. We still
+ * need allow IP do some limited operations, like disable. In such case,
+ * we have to initialize ras as normal. but need check if operation is
+ * allowed or not in each function.
+ */
+static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
+ uint32_t *hw_supported, uint32_t *supported)
+{
+ *hw_supported = 0;
+ *supported = 0;
+
+ if (amdgpu_sriov_vf(adev) ||
+ adev->asic_type != CHIP_VEGA20)
+ return;
+
+ if (adev->is_atom_fw &&
+ (amdgpu_atomfirmware_mem_ecc_supported(adev) ||
+ amdgpu_atomfirmware_sram_ecc_supported(adev)))
+ *hw_supported = AMDGPU_RAS_BLOCK_MASK;
+
+ *supported = amdgpu_ras_enable == 0 ?
+ 0 : *hw_supported & amdgpu_ras_mask;
+}
+
+int amdgpu_ras_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (con)
+ return 0;
+
+ con = kmalloc(sizeof(struct amdgpu_ras) +
+ sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT,
+ GFP_KERNEL|__GFP_ZERO);
+ if (!con)
+ return -ENOMEM;
+
+ con->objs = (struct ras_manager *)(con + 1);
+
+ amdgpu_ras_set_context(adev, con);
+
+ amdgpu_ras_check_supported(adev, &con->hw_supported,
+ &con->supported);
+ con->features = 0;
+ INIT_LIST_HEAD(&con->head);
+ /* Might need get this flag from vbios. */
+ con->flags = RAS_DEFAULT_FLAGS;
+
+ if (amdgpu_ras_recovery_init(adev))
+ goto recovery_out;
+
+ amdgpu_ras_mask &= AMDGPU_RAS_BLOCK_MASK;
+
+ if (amdgpu_ras_fs_init(adev))
+ goto fs_out;
+
+ amdgpu_ras_self_test(adev);
+
+ DRM_INFO("RAS INFO: ras initialized successfully, "
+ "hardware ability[%x] ras_mask[%x]\n",
+ con->hw_supported, con->supported);
+ return 0;
+fs_out:
+ amdgpu_ras_recovery_fini(adev);
+recovery_out:
+ amdgpu_ras_set_context(adev, NULL);
+ kfree(con);
+
+ return -EINVAL;
+}
+
+/* do some init work after IP late init as dependence */
+void amdgpu_ras_post_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj, *tmp;
+
+ if (!con)
+ return;
+
+ if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) {
+ /* Set up all other IPs which are not implemented. There is a
+ * tricky thing that IP's actual ras error type should be
+ * MULTI_UNCORRECTABLE, but as driver does not handle it, so
+ * ERROR_NONE make sense anyway.
+ */
+ amdgpu_ras_enable_all_features(adev, 1);
+
+ /* We enable ras on all hw_supported block, but as boot
+ * parameter might disable some of them and one or more IP has
+ * not implemented yet. So we disable them on behalf.
+ */
+ list_for_each_entry_safe(obj, tmp, &con->head, node) {
+ if (!amdgpu_ras_is_supported(adev, obj->head.block)) {
+ amdgpu_ras_feature_enable(adev, &obj->head, 0);
+ /* there should be no any reference. */
+ WARN_ON(alive_obj(obj));
+ }
+ }
+ }
+}
+
+/* do some fini work before IP fini as dependence */
+int amdgpu_ras_pre_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (!con)
+ return 0;
+
+ /* Need disable ras on all IPs here before ip [hw/sw]fini */
+ amdgpu_ras_disable_all_features(adev, 0);
+ amdgpu_ras_recovery_fini(adev);
+ return 0;
+}
+
+int amdgpu_ras_fini(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (!con)
+ return 0;
+
+ amdgpu_ras_fs_fini(adev);
+ amdgpu_ras_interrupt_remove_all(adev);
+
+ WARN(con->features, "Feature mask is not cleared");
+
+ if (con->features)
+ amdgpu_ras_disable_all_features(adev, 1);
+
+ amdgpu_ras_set_context(adev, NULL);
+ kfree(con);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
new file mode 100644
index 000000000000..eaef5edefc34
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -0,0 +1,294 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ */
+#ifndef _AMDGPU_RAS_H
+#define _AMDGPU_RAS_H
+
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include "amdgpu.h"
+#include "amdgpu_psp.h"
+#include "ta_ras_if.h"
+
+enum amdgpu_ras_block {
+ AMDGPU_RAS_BLOCK__UMC = 0,
+ AMDGPU_RAS_BLOCK__SDMA,
+ AMDGPU_RAS_BLOCK__GFX,
+ AMDGPU_RAS_BLOCK__MMHUB,
+ AMDGPU_RAS_BLOCK__ATHUB,
+ AMDGPU_RAS_BLOCK__PCIE_BIF,
+ AMDGPU_RAS_BLOCK__HDP,
+ AMDGPU_RAS_BLOCK__XGMI_WAFL,
+ AMDGPU_RAS_BLOCK__DF,
+ AMDGPU_RAS_BLOCK__SMN,
+ AMDGPU_RAS_BLOCK__SEM,
+ AMDGPU_RAS_BLOCK__MP0,
+ AMDGPU_RAS_BLOCK__MP1,
+ AMDGPU_RAS_BLOCK__FUSE,
+
+ AMDGPU_RAS_BLOCK__LAST
+};
+
+#define AMDGPU_RAS_BLOCK_COUNT AMDGPU_RAS_BLOCK__LAST
+#define AMDGPU_RAS_BLOCK_MASK ((1ULL << AMDGPU_RAS_BLOCK_COUNT) - 1)
+
+enum amdgpu_ras_error_type {
+ AMDGPU_RAS_ERROR__NONE = 0,
+ AMDGPU_RAS_ERROR__PARITY = 1,
+ AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE = 2,
+ AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE = 4,
+ AMDGPU_RAS_ERROR__POISON = 8,
+};
+
+enum amdgpu_ras_ret {
+ AMDGPU_RAS_SUCCESS = 0,
+ AMDGPU_RAS_FAIL,
+ AMDGPU_RAS_UE,
+ AMDGPU_RAS_CE,
+ AMDGPU_RAS_PT,
+};
+
+struct ras_common_if {
+ enum amdgpu_ras_block block;
+ enum amdgpu_ras_error_type type;
+ uint32_t sub_block_index;
+ /* block name */
+ char name[32];
+};
+
+typedef int (*ras_ih_cb)(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry);
+
+struct amdgpu_ras {
+ /* ras infrastructure */
+ /* for ras itself. */
+ uint32_t hw_supported;
+ /* for IP to check its ras ability. */
+ uint32_t supported;
+ uint32_t features;
+ struct list_head head;
+ /* debugfs */
+ struct dentry *dir;
+ /* debugfs ctrl */
+ struct dentry *ent;
+ /* sysfs */
+ struct device_attribute features_attr;
+ /* block array */
+ struct ras_manager *objs;
+
+ /* gpu recovery */
+ struct work_struct recovery_work;
+ atomic_t in_recovery;
+ struct amdgpu_device *adev;
+ /* error handler data */
+ struct ras_err_handler_data *eh_data;
+ struct mutex recovery_lock;
+
+ uint32_t flags;
+};
+
+/* interfaces for IP */
+
+struct ras_fs_if {
+ struct ras_common_if head;
+ char sysfs_name[32];
+ char debugfs_name[32];
+};
+
+struct ras_query_if {
+ struct ras_common_if head;
+ unsigned long ue_count;
+ unsigned long ce_count;
+};
+
+struct ras_inject_if {
+ struct ras_common_if head;
+ uint64_t address;
+ uint64_t value;
+};
+
+struct ras_cure_if {
+ struct ras_common_if head;
+ uint64_t address;
+};
+
+struct ras_ih_if {
+ struct ras_common_if head;
+ ras_ih_cb cb;
+};
+
+struct ras_dispatch_if {
+ struct ras_common_if head;
+ struct amdgpu_iv_entry *entry;
+};
+
+struct ras_debug_if {
+ union {
+ struct ras_common_if head;
+ struct ras_inject_if inject;
+ };
+ int op;
+};
+/* work flow
+ * vbios
+ * 1: ras feature enable (enabled by default)
+ * psp
+ * 2: ras framework init (in ip_init)
+ * IP
+ * 3: IH add
+ * 4: debugfs/sysfs create
+ * 5: query/inject
+ * 6: debugfs/sysfs remove
+ * 7: IH remove
+ * 8: feature disable
+ */
+
+#define amdgpu_ras_get_context(adev) ((adev)->psp.ras.ras)
+#define amdgpu_ras_set_context(adev, ras_con) ((adev)->psp.ras.ras = (ras_con))
+
+/* check if ras is supported on block, say, sdma, gfx */
+static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev,
+ unsigned int block)
+{
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ return ras && (ras->supported & (1 << block));
+}
+
+int amdgpu_ras_query_error_count(struct amdgpu_device *adev,
+ bool is_ce);
+
+/* error handling functions */
+int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
+ unsigned long *bps, int pages);
+
+int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev);
+
+static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev,
+ bool is_baco)
+{
+ /* remove me when gpu reset works on vega20 A1. */
+#if 0
+ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
+
+ if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0)
+ schedule_work(&ras->recovery_work);
+#endif
+ return 0;
+}
+
+static inline enum ta_ras_block
+amdgpu_ras_block_to_ta(enum amdgpu_ras_block block) {
+ switch (block) {
+ case AMDGPU_RAS_BLOCK__UMC:
+ return TA_RAS_BLOCK__UMC;
+ case AMDGPU_RAS_BLOCK__SDMA:
+ return TA_RAS_BLOCK__SDMA;
+ case AMDGPU_RAS_BLOCK__GFX:
+ return TA_RAS_BLOCK__GFX;
+ case AMDGPU_RAS_BLOCK__MMHUB:
+ return TA_RAS_BLOCK__MMHUB;
+ case AMDGPU_RAS_BLOCK__ATHUB:
+ return TA_RAS_BLOCK__ATHUB;
+ case AMDGPU_RAS_BLOCK__PCIE_BIF:
+ return TA_RAS_BLOCK__PCIE_BIF;
+ case AMDGPU_RAS_BLOCK__HDP:
+ return TA_RAS_BLOCK__HDP;
+ case AMDGPU_RAS_BLOCK__XGMI_WAFL:
+ return TA_RAS_BLOCK__XGMI_WAFL;
+ case AMDGPU_RAS_BLOCK__DF:
+ return TA_RAS_BLOCK__DF;
+ case AMDGPU_RAS_BLOCK__SMN:
+ return TA_RAS_BLOCK__SMN;
+ case AMDGPU_RAS_BLOCK__SEM:
+ return TA_RAS_BLOCK__SEM;
+ case AMDGPU_RAS_BLOCK__MP0:
+ return TA_RAS_BLOCK__MP0;
+ case AMDGPU_RAS_BLOCK__MP1:
+ return TA_RAS_BLOCK__MP1;
+ case AMDGPU_RAS_BLOCK__FUSE:
+ return TA_RAS_BLOCK__FUSE;
+ default:
+ WARN_ONCE(1, "RAS ERROR: unexpected block id %d\n", block);
+ return TA_RAS_BLOCK__UMC;
+ }
+}
+
+static inline enum ta_ras_error_type
+amdgpu_ras_error_to_ta(enum amdgpu_ras_error_type error) {
+ switch (error) {
+ case AMDGPU_RAS_ERROR__NONE:
+ return TA_RAS_ERROR__NONE;
+ case AMDGPU_RAS_ERROR__PARITY:
+ return TA_RAS_ERROR__PARITY;
+ case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE:
+ return TA_RAS_ERROR__SINGLE_CORRECTABLE;
+ case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE:
+ return TA_RAS_ERROR__MULTI_UNCORRECTABLE;
+ case AMDGPU_RAS_ERROR__POISON:
+ return TA_RAS_ERROR__POISON;
+ default:
+ WARN_ONCE(1, "RAS ERROR: unexpected error type %d\n", error);
+ return TA_RAS_ERROR__NONE;
+ }
+}
+
+/* called in ip_init and ip_fini */
+int amdgpu_ras_init(struct amdgpu_device *adev);
+void amdgpu_ras_post_init(struct amdgpu_device *adev);
+int amdgpu_ras_fini(struct amdgpu_device *adev);
+int amdgpu_ras_pre_fini(struct amdgpu_device *adev);
+
+int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
+ struct ras_common_if *head, bool enable);
+
+int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
+ struct ras_common_if *head, bool enable);
+
+int amdgpu_ras_sysfs_create(struct amdgpu_device *adev,
+ struct ras_fs_if *head);
+
+int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
+ struct ras_common_if *head);
+
+int amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
+ struct ras_fs_if *head);
+
+int amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
+ struct ras_common_if *head);
+
+int amdgpu_ras_error_query(struct amdgpu_device *adev,
+ struct ras_query_if *info);
+
+int amdgpu_ras_error_inject(struct amdgpu_device *adev,
+ struct ras_inject_if *info);
+
+int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev,
+ struct ras_ih_if *info);
+
+int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev,
+ struct ras_ih_if *info);
+
+int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
+ struct ras_dispatch_if *info);
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 335a0edf114b..8f5026c123ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -248,6 +248,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
*/
if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
sched_hw_submission = max(sched_hw_submission, 256);
+ else if (ring == &adev->sdma.instance[0].page)
+ sched_hw_submission = 256;
if (ring->adev == NULL) {
if (adev->num_rings >= AMDGPU_MAX_RINGS)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 16b1a6ae5ba6..1ba9ba3b54f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -28,9 +28,8 @@
#define AMDGPU_MAX_SDMA_INSTANCES 2
enum amdgpu_sdma_irq {
- AMDGPU_SDMA_IRQ_TRAP0 = 0,
- AMDGPU_SDMA_IRQ_TRAP1,
-
+ AMDGPU_SDMA_IRQ_INSTANCE0 = 0,
+ AMDGPU_SDMA_IRQ_INSTANCE1,
AMDGPU_SDMA_IRQ_LAST
};
@@ -49,9 +48,11 @@ struct amdgpu_sdma {
struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
struct amdgpu_irq_src trap_irq;
struct amdgpu_irq_src illegal_inst_irq;
+ struct amdgpu_irq_src ecc_irq;
int num_instances;
uint32_t srbm_soft_reset;
bool has_page_queue;
+ struct ras_common_if *ras_if;
};
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 73e71e61dc99..0c52d1f9fe0f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -50,8 +50,6 @@
#include "amdgpu_sdma.h"
#include "bif/bif_4_1_d.h"
-#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
-
static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem, unsigned num_pages,
uint64_t offset, unsigned window,
@@ -1424,6 +1422,13 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
struct dma_fence *f;
int i;
+ /* Don't evict VM page tables while they are busy, otherwise we can't
+ * cleanly handle page faults.
+ */
+ if (bo->type == ttm_bo_type_kernel &&
+ !reservation_object_test_signaled_rcu(bo->resv, true))
+ return false;
+
/* If bo is a KFD BO, check if the bo belongs to the current process.
* If true, then return false as any KFD process needs all its BOs to
* be resident to run successfully
@@ -1671,7 +1676,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
r = ttm_bo_device_init(&adev->mman.bdev,
&amdgpu_bo_driver,
adev->ddev->anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET,
adev->need_dma32);
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
@@ -1877,14 +1881,9 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct drm_file *file_priv;
- struct amdgpu_device *adev;
-
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
- return -EINVAL;
+ struct drm_file *file_priv = filp->private_data;
+ struct amdgpu_device *adev = file_priv->minor->dev->dev_private;
- file_priv = filp->private_data;
- adev = file_priv->minor->dev->dev_private;
if (adev == NULL)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 462a04e0f5e6..7d484fad3909 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -36,6 +36,7 @@ void amdgpu_virt_init_setting(struct amdgpu_device *adev)
/* enable virtual display */
adev->mode_info.num_crtc = 1;
adev->enable_virtual_display = true;
+ adev->ddev->driver->driver_features &= ~DRIVER_ATOMIC;
adev->cg_flags = 0;
adev->pg_flags = 0;
}
@@ -375,4 +376,53 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
}
}
+static uint32_t parse_clk(char *buf, bool min)
+{
+ char *ptr = buf;
+ uint32_t clk = 0;
+
+ do {
+ ptr = strchr(ptr, ':');
+ if (!ptr)
+ break;
+ ptr+=2;
+ clk = simple_strtoul(ptr, NULL, 10);
+ } while (!min);
+
+ return clk * 100;
+}
+
+uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest)
+{
+ char *buf = NULL;
+ uint32_t clk = 0;
+
+ buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ adev->virt.ops->get_pp_clk(adev, PP_SCLK, buf);
+ clk = parse_clk(buf, lowest);
+
+ kfree(buf);
+
+ return clk;
+}
+
+uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest)
+{
+ char *buf = NULL;
+ uint32_t clk = 0;
+
+ buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ adev->virt.ops->get_pp_clk(adev, PP_MCLK, buf);
+ clk = parse_clk(buf, lowest);
+
+ kfree(buf);
+
+ return clk;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 722deefc0a7e..584947b7ccf3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -57,6 +57,8 @@ struct amdgpu_virt_ops {
int (*reset_gpu)(struct amdgpu_device *adev);
int (*wait_reset)(struct amdgpu_device *adev);
void (*trans_msg)(struct amdgpu_device *adev, u32 req, u32 data1, u32 data2, u32 data3);
+ int (*get_pp_clk)(struct amdgpu_device *adev, u32 type, char *buf);
+ int (*force_dpm_level)(struct amdgpu_device *adev, u32 level);
};
/*
@@ -83,6 +85,8 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_GIM_LOAD_UCODES = 0x2,
/* VRAM LOST by GIM */
AMDGIM_FEATURE_GIM_FLR_VRAMLOST = 0x4,
+ /* HW PERF SIM in GIM */
+ AMDGIM_FEATURE_HW_PERF_SIMULATION = (1 << 3),
};
struct amd_sriov_msg_pf2vf_info_header {
@@ -252,6 +256,8 @@ struct amdgpu_virt {
struct amdgpu_vf_error_buffer vf_errors;
struct amdgpu_virt_fw_reserve fw_reserve;
uint32_t gim_feature;
+ /* protect DPM events to GIM */
+ struct mutex dpm_mutex;
};
#define amdgpu_sriov_enabled(adev) \
@@ -278,6 +284,9 @@ static inline bool is_virtual_machine(void)
#endif
}
+#define amdgim_is_hwperf(adev) \
+ ((adev)->virt.gim_feature & AMDGIM_FEATURE_HW_PERF_SIMULATION)
+
bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
void amdgpu_virt_init_setting(struct amdgpu_device *adev);
uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
@@ -295,5 +304,7 @@ int amdgpu_virt_fw_reserve_get_checksum(void *obj, unsigned long obj_size,
unsigned int key,
unsigned int chksum);
void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev);
+uint32_t amdgpu_virt_get_sclk(struct amdgpu_device *adev, bool lowest);
+uint32_t amdgpu_virt_get_mclk(struct amdgpu_device *adev, bool lowest);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 16fcb56c232b..a07c85815b7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -34,6 +34,7 @@
#include "amdgpu_trace.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_gmc.h"
+#include "amdgpu_xgmi.h"
/**
* DOC: GPUVM
@@ -66,50 +67,6 @@ INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
#undef LAST
/**
- * struct amdgpu_pte_update_params - Local structure
- *
- * Encapsulate some VM table update parameters to reduce
- * the number of function parameters
- *
- */
-struct amdgpu_pte_update_params {
-
- /**
- * @adev: amdgpu device we do this update for
- */
- struct amdgpu_device *adev;
-
- /**
- * @vm: optional amdgpu_vm we do this update for
- */
- struct amdgpu_vm *vm;
-
- /**
- * @src: address where to copy page table entries from
- */
- uint64_t src;
-
- /**
- * @ib: indirect buffer to fill with commands
- */
- struct amdgpu_ib *ib;
-
- /**
- * @func: Function which actually does the update
- */
- void (*func)(struct amdgpu_pte_update_params *params,
- struct amdgpu_bo *bo, uint64_t pe,
- uint64_t addr, unsigned count, uint32_t incr,
- uint64_t flags);
- /**
- * @pages_addr:
- *
- * DMA addresses to use for mapping, used during VM update by CPU
- */
- dma_addr_t *pages_addr;
-};
-
-/**
* struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
*/
struct amdgpu_prt_cb {
@@ -183,6 +140,22 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
}
/**
+ * amdgpu_vm_num_ats_entries - return the number of ATS entries in the root PD
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Returns:
+ * The number of entries in the root page directory which needs the ATS setting.
+ */
+static unsigned amdgpu_vm_num_ats_entries(struct amdgpu_device *adev)
+{
+ unsigned shift;
+
+ shift = amdgpu_vm_level_shift(adev, adev->vm_manager.root_level);
+ return AMDGPU_GMC_HOLE_START >> (shift + AMDGPU_GPU_PAGE_SHIFT);
+}
+
+/**
* amdgpu_vm_entries_mask - the mask to get the entry number of a PD/PT
*
* @adev: amdgpu_device pointer
@@ -333,7 +306,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
return;
vm->bulk_moveable = false;
- if (bo->tbo.type == ttm_bo_type_kernel)
+ if (bo->tbo.type == ttm_bo_type_kernel && bo->parent)
amdgpu_vm_bo_relocated(base);
else
amdgpu_vm_bo_idle(base);
@@ -505,61 +478,39 @@ static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
}
/**
- * amdgpu_vm_pt_first_leaf - get first leaf PD/PT
+ * amdgpu_vm_pt_first_dfs - start a deep first search
*
- * @adev: amdgpu_device pointer
+ * @adev: amdgpu_device structure
* @vm: amdgpu_vm structure
- * @start: start addr of the walk
* @cursor: state to initialize
*
- * Start a walk and go directly to the leaf node.
- */
-static void amdgpu_vm_pt_first_leaf(struct amdgpu_device *adev,
- struct amdgpu_vm *vm, uint64_t start,
- struct amdgpu_vm_pt_cursor *cursor)
-{
- amdgpu_vm_pt_start(adev, vm, start, cursor);
- while (amdgpu_vm_pt_descendant(adev, cursor));
-}
-
-/**
- * amdgpu_vm_pt_next_leaf - get next leaf PD/PT
- *
- * @adev: amdgpu_device pointer
- * @cursor: current state
- *
- * Walk the PD/PT tree to the next leaf node.
+ * Starts a deep first traversal of the PD/PT tree.
*/
-static void amdgpu_vm_pt_next_leaf(struct amdgpu_device *adev,
+static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_vm_pt_cursor *start,
struct amdgpu_vm_pt_cursor *cursor)
{
- amdgpu_vm_pt_next(adev, cursor);
- if (cursor->pfn != ~0ll)
- while (amdgpu_vm_pt_descendant(adev, cursor));
+ if (start)
+ *cursor = *start;
+ else
+ amdgpu_vm_pt_start(adev, vm, 0, cursor);
+ while (amdgpu_vm_pt_descendant(adev, cursor));
}
/**
- * for_each_amdgpu_vm_pt_leaf - walk over all leaf PDs/PTs in the hierarchy
- */
-#define for_each_amdgpu_vm_pt_leaf(adev, vm, start, end, cursor) \
- for (amdgpu_vm_pt_first_leaf((adev), (vm), (start), &(cursor)); \
- (cursor).pfn <= end; amdgpu_vm_pt_next_leaf((adev), &(cursor)))
-
-/**
- * amdgpu_vm_pt_first_dfs - start a deep first search
+ * amdgpu_vm_pt_continue_dfs - check if the deep first search should continue
*
- * @adev: amdgpu_device structure
- * @vm: amdgpu_vm structure
- * @cursor: state to initialize
+ * @start: starting point for the search
+ * @entry: current entry
*
- * Starts a deep first traversal of the PD/PT tree.
+ * Returns:
+ * True when the search should continue, false otherwise.
*/
-static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_vm_pt_cursor *cursor)
+static bool amdgpu_vm_pt_continue_dfs(struct amdgpu_vm_pt_cursor *start,
+ struct amdgpu_vm_pt *entry)
{
- amdgpu_vm_pt_start(adev, vm, 0, cursor);
- while (amdgpu_vm_pt_descendant(adev, cursor));
+ return entry && (!start || entry != start->entry);
}
/**
@@ -587,11 +538,11 @@ static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
/**
* for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
*/
-#define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry) \
- for (amdgpu_vm_pt_first_dfs((adev), (vm), &(cursor)), \
+#define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \
+ for (amdgpu_vm_pt_first_dfs((adev), (vm), (start), &(cursor)), \
(entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
- (entry); (entry) = (cursor).entry, \
- amdgpu_vm_pt_next_dfs((adev), &(cursor)))
+ amdgpu_vm_pt_continue_dfs((start), (entry)); \
+ (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor)))
/**
* amdgpu_vm_get_pd_bo - add the VM PD to a validation list
@@ -712,18 +663,11 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
if (bo->tbo.type != ttm_bo_type_kernel) {
amdgpu_vm_bo_moved(bo_base);
} else {
- if (vm->use_cpu_for_update)
- r = amdgpu_bo_kmap(bo, NULL);
+ vm->update_funcs->map_table(bo);
+ if (bo->parent)
+ amdgpu_vm_bo_relocated(bo_base);
else
- r = amdgpu_ttm_alloc_gart(&bo->tbo);
- if (r)
- break;
- if (bo->shadow) {
- r = amdgpu_ttm_alloc_gart(&bo->shadow->tbo);
- if (r)
- break;
- }
- amdgpu_vm_bo_relocated(bo_base);
+ amdgpu_vm_bo_idle(bo_base);
}
}
@@ -751,8 +695,6 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
* @adev: amdgpu_device pointer
* @vm: VM to clear BO from
* @bo: BO to clear
- * @level: level this BO is at
- * @pte_support_ats: indicate ATS support from PTE
*
* Root PD needs to be reserved when calling this.
*
@@ -760,99 +702,112 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
* 0 on success, errno otherwise.
*/
static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
- struct amdgpu_vm *vm, struct amdgpu_bo *bo,
- unsigned level, bool pte_support_ats)
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo)
{
struct ttm_operation_ctx ctx = { true, false };
- struct dma_fence *fence = NULL;
+ unsigned level = adev->vm_manager.root_level;
+ struct amdgpu_vm_update_params params;
+ struct amdgpu_bo *ancestor = bo;
unsigned entries, ats_entries;
- struct amdgpu_ring *ring;
- struct amdgpu_job *job;
uint64_t addr;
int r;
+ /* Figure out our place in the hierarchy */
+ if (ancestor->parent) {
+ ++level;
+ while (ancestor->parent->parent) {
+ ++level;
+ ancestor = ancestor->parent;
+ }
+ }
+
entries = amdgpu_bo_size(bo) / 8;
+ if (!vm->pte_support_ats) {
+ ats_entries = 0;
+
+ } else if (!bo->parent) {
+ ats_entries = amdgpu_vm_num_ats_entries(adev);
+ ats_entries = min(ats_entries, entries);
+ entries -= ats_entries;
+
+ } else {
+ struct amdgpu_vm_pt *pt;
- if (pte_support_ats) {
- if (level == adev->vm_manager.root_level) {
- ats_entries = amdgpu_vm_level_shift(adev, level);
- ats_entries += AMDGPU_GPU_PAGE_SHIFT;
- ats_entries = AMDGPU_GMC_HOLE_START >> ats_entries;
- ats_entries = min(ats_entries, entries);
- entries -= ats_entries;
+ pt = container_of(ancestor->vm_bo, struct amdgpu_vm_pt, base);
+ ats_entries = amdgpu_vm_num_ats_entries(adev);
+ if ((pt - vm->root.entries) >= ats_entries) {
+ ats_entries = 0;
} else {
ats_entries = entries;
entries = 0;
}
- } else {
- ats_entries = 0;
}
- ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
-
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
- goto error;
+ return r;
+
+ if (bo->shadow) {
+ r = ttm_bo_validate(&bo->shadow->tbo, &bo->shadow->placement,
+ &ctx);
+ if (r)
+ return r;
+ }
- r = amdgpu_ttm_alloc_gart(&bo->tbo);
+ r = vm->update_funcs->map_table(bo);
if (r)
return r;
- r = amdgpu_job_alloc_with_ib(adev, 64, &job);
+ memset(&params, 0, sizeof(params));
+ params.adev = adev;
+ params.vm = vm;
+
+ r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_KFD, NULL);
if (r)
- goto error;
+ return r;
- addr = amdgpu_bo_gpu_offset(bo);
+ addr = 0;
if (ats_entries) {
- uint64_t ats_value;
+ uint64_t value = 0, flags;
- ats_value = AMDGPU_PTE_DEFAULT_ATC;
- if (level != AMDGPU_VM_PTB)
- ats_value |= AMDGPU_PDE_PTE;
+ flags = AMDGPU_PTE_DEFAULT_ATC;
+ if (level != AMDGPU_VM_PTB) {
+ /* Handle leaf PDEs as PTEs */
+ flags |= AMDGPU_PDE_PTE;
+ amdgpu_gmc_get_vm_pde(adev, level, &value, &flags);
+ }
+
+ r = vm->update_funcs->update(&params, bo, addr, 0, ats_entries,
+ value, flags);
+ if (r)
+ return r;
- amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
- ats_entries, 0, ats_value);
addr += ats_entries * 8;
}
if (entries) {
- uint64_t value = 0;
-
- /* Workaround for fault priority problem on GMC9 */
- if (level == AMDGPU_VM_PTB && adev->asic_type >= CHIP_VEGA10)
- value = AMDGPU_PTE_EXECUTABLE;
+ uint64_t value = 0, flags = 0;
+
+ if (adev->asic_type >= CHIP_VEGA10) {
+ if (level != AMDGPU_VM_PTB) {
+ /* Handle leaf PDEs as PTEs */
+ flags |= AMDGPU_PDE_PTE;
+ amdgpu_gmc_get_vm_pde(adev, level,
+ &value, &flags);
+ } else {
+ /* Workaround for fault priority problem on GMC9 */
+ flags = AMDGPU_PTE_EXECUTABLE;
+ }
+ }
- amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
- entries, 0, value);
+ r = vm->update_funcs->update(&params, bo, addr, 0, entries,
+ value, flags);
+ if (r)
+ return r;
}
- amdgpu_ring_pad_ib(ring, &job->ibs[0]);
-
- WARN_ON(job->ibs[0].length_dw > 64);
- r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
- AMDGPU_FENCE_OWNER_KFD, false);
- if (r)
- goto error_free;
-
- r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_UNDEFINED,
- &fence);
- if (r)
- goto error_free;
-
- amdgpu_bo_fence(bo, fence, true);
- dma_fence_put(fence);
-
- if (bo->shadow)
- return amdgpu_vm_clear_bo(adev, vm, bo->shadow,
- level, pte_support_ats);
-
- return 0;
-
-error_free:
- amdgpu_job_free(job);
-
-error:
- return r;
+ return vm->update_funcs->commit(&params, NULL);
}
/**
@@ -883,89 +838,56 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}
/**
- * amdgpu_vm_alloc_pts - Allocate page tables.
+ * amdgpu_vm_alloc_pts - Allocate a specific page table
*
* @adev: amdgpu_device pointer
* @vm: VM to allocate page tables for
- * @saddr: Start address which needs to be allocated
- * @size: Size from start address we need.
+ * @cursor: Which page table to allocate
*
- * Make sure the page directories and page tables are allocated
+ * Make sure a specific page table or directory is allocated.
*
* Returns:
- * 0 on success, errno otherwise.
+ * 1 if page table needed to be allocated, 0 if page table was already
+ * allocated, negative errno if an error occurred.
*/
-int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- uint64_t saddr, uint64_t size)
+static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_vm_pt_cursor *cursor)
{
- struct amdgpu_vm_pt_cursor cursor;
+ struct amdgpu_vm_pt *entry = cursor->entry;
+ struct amdgpu_bo_param bp;
struct amdgpu_bo *pt;
- bool ats = false;
- uint64_t eaddr;
int r;
- /* validate the parameters */
- if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
- return -EINVAL;
-
- eaddr = saddr + size - 1;
-
- if (vm->pte_support_ats)
- ats = saddr < AMDGPU_GMC_HOLE_START;
-
- saddr /= AMDGPU_GPU_PAGE_SIZE;
- eaddr /= AMDGPU_GPU_PAGE_SIZE;
+ if (cursor->level < AMDGPU_VM_PTB && !entry->entries) {
+ unsigned num_entries;
- if (eaddr >= adev->vm_manager.max_pfn) {
- dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
- eaddr, adev->vm_manager.max_pfn);
- return -EINVAL;
+ num_entries = amdgpu_vm_num_entries(adev, cursor->level);
+ entry->entries = kvmalloc_array(num_entries,
+ sizeof(*entry->entries),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!entry->entries)
+ return -ENOMEM;
}
- for_each_amdgpu_vm_pt_leaf(adev, vm, saddr, eaddr, cursor) {
- struct amdgpu_vm_pt *entry = cursor.entry;
- struct amdgpu_bo_param bp;
-
- if (cursor.level < AMDGPU_VM_PTB) {
- unsigned num_entries;
-
- num_entries = amdgpu_vm_num_entries(adev, cursor.level);
- entry->entries = kvmalloc_array(num_entries,
- sizeof(*entry->entries),
- GFP_KERNEL |
- __GFP_ZERO);
- if (!entry->entries)
- return -ENOMEM;
- }
-
-
- if (entry->base.bo)
- continue;
-
- amdgpu_vm_bo_param(adev, vm, cursor.level, &bp);
+ if (entry->base.bo)
+ return 0;
- r = amdgpu_bo_create(adev, &bp, &pt);
- if (r)
- return r;
+ amdgpu_vm_bo_param(adev, vm, cursor->level, &bp);
- if (vm->use_cpu_for_update) {
- r = amdgpu_bo_kmap(pt, NULL);
- if (r)
- goto error_free_pt;
- }
-
- /* Keep a reference to the root directory to avoid
- * freeing them up in the wrong order.
- */
- pt->parent = amdgpu_bo_ref(cursor.parent->base.bo);
+ r = amdgpu_bo_create(adev, &bp, &pt);
+ if (r)
+ return r;
- amdgpu_vm_bo_base_init(&entry->base, vm, pt);
+ /* Keep a reference to the root directory to avoid
+ * freeing them up in the wrong order.
+ */
+ pt->parent = amdgpu_bo_ref(cursor->parent->base.bo);
+ amdgpu_vm_bo_base_init(&entry->base, vm, pt);
- r = amdgpu_vm_clear_bo(adev, vm, pt, cursor.level, ats);
- if (r)
- goto error_free_pt;
- }
+ r = amdgpu_vm_clear_bo(adev, vm, pt);
+ if (r)
+ goto error_free_pt;
return 0;
@@ -976,31 +898,45 @@ error_free_pt:
}
/**
+ * amdgpu_vm_free_table - fre one PD/PT
+ *
+ * @entry: PDE to free
+ */
+static void amdgpu_vm_free_table(struct amdgpu_vm_pt *entry)
+{
+ if (entry->base.bo) {
+ entry->base.bo->vm_bo = NULL;
+ list_del(&entry->base.vm_status);
+ amdgpu_bo_unref(&entry->base.bo->shadow);
+ amdgpu_bo_unref(&entry->base.bo);
+ }
+ kvfree(entry->entries);
+ entry->entries = NULL;
+}
+
+/**
* amdgpu_vm_free_pts - free PD/PT levels
*
* @adev: amdgpu device structure
* @vm: amdgpu vm structure
+ * @start: optional cursor where to start freeing PDs/PTs
*
* Free the page directory or page table level and all sub levels.
*/
static void amdgpu_vm_free_pts(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
+ struct amdgpu_vm *vm,
+ struct amdgpu_vm_pt_cursor *start)
{
struct amdgpu_vm_pt_cursor cursor;
struct amdgpu_vm_pt *entry;
- for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry) {
+ vm->bulk_moveable = false;
- if (entry->base.bo) {
- entry->base.bo->vm_bo = NULL;
- list_del(&entry->base.vm_status);
- amdgpu_bo_unref(&entry->base.bo->shadow);
- amdgpu_bo_unref(&entry->base.bo);
- }
- kvfree(entry->entries);
- }
+ for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry)
+ amdgpu_vm_free_table(entry);
- BUG_ON(vm->root.base.bo);
+ if (start)
+ amdgpu_vm_free_table(start->entry);
}
/**
@@ -1212,66 +1148,6 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
}
/**
- * amdgpu_vm_do_set_ptes - helper to call the right asic function
- *
- * @params: see amdgpu_pte_update_params definition
- * @bo: PD/PT to update
- * @pe: addr of the page entry
- * @addr: dst addr to write into pe
- * @count: number of page entries to update
- * @incr: increase next addr by incr bytes
- * @flags: hw access flags
- *
- * Traces the parameters and calls the right asic functions
- * to setup the page table using the DMA.
- */
-static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
- struct amdgpu_bo *bo,
- uint64_t pe, uint64_t addr,
- unsigned count, uint32_t incr,
- uint64_t flags)
-{
- pe += amdgpu_bo_gpu_offset(bo);
- trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
-
- if (count < 3) {
- amdgpu_vm_write_pte(params->adev, params->ib, pe,
- addr | flags, count, incr);
-
- } else {
- amdgpu_vm_set_pte_pde(params->adev, params->ib, pe, addr,
- count, incr, flags);
- }
-}
-
-/**
- * amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
- *
- * @params: see amdgpu_pte_update_params definition
- * @bo: PD/PT to update
- * @pe: addr of the page entry
- * @addr: dst addr to write into pe
- * @count: number of page entries to update
- * @incr: increase next addr by incr bytes
- * @flags: hw access flags
- *
- * Traces the parameters and calls the DMA function to copy the PTEs.
- */
-static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
- struct amdgpu_bo *bo,
- uint64_t pe, uint64_t addr,
- unsigned count, uint32_t incr,
- uint64_t flags)
-{
- uint64_t src = (params->src + (addr >> 12) * 8);
-
- pe += amdgpu_bo_gpu_offset(bo);
- trace_amdgpu_vm_copy_ptes(pe, src, count);
-
- amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
-}
-
-/**
* amdgpu_vm_map_gart - Resolve gart mapping of addr
*
* @pages_addr: optional DMA address to use for lookup
@@ -1283,7 +1159,7 @@ static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
* Returns:
* The pointer for the page table entry.
*/
-static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
+uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
{
uint64_t result;
@@ -1298,88 +1174,31 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
return result;
}
-/**
- * amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
- *
- * @params: see amdgpu_pte_update_params definition
- * @bo: PD/PT to update
- * @pe: kmap addr of the page entry
- * @addr: dst addr to write into pe
- * @count: number of page entries to update
- * @incr: increase next addr by incr bytes
- * @flags: hw access flags
- *
- * Write count number of PT/PD entries directly.
- */
-static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
- struct amdgpu_bo *bo,
- uint64_t pe, uint64_t addr,
- unsigned count, uint32_t incr,
- uint64_t flags)
-{
- unsigned int i;
- uint64_t value;
-
- pe += (unsigned long)amdgpu_bo_kptr(bo);
-
- trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
-
- for (i = 0; i < count; i++) {
- value = params->pages_addr ?
- amdgpu_vm_map_gart(params->pages_addr, addr) :
- addr;
- amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
- i, value, flags);
- addr += incr;
- }
-}
-
-/**
- * amdgpu_vm_update_func - helper to call update function
- *
- * Calls the update function for both the given BO as well as its shadow.
- */
-static void amdgpu_vm_update_func(struct amdgpu_pte_update_params *params,
- struct amdgpu_bo *bo,
- uint64_t pe, uint64_t addr,
- unsigned count, uint32_t incr,
- uint64_t flags)
-{
- if (bo->shadow)
- params->func(params, bo->shadow, pe, addr, count, incr, flags);
- params->func(params, bo, pe, addr, count, incr, flags);
-}
-
/*
* amdgpu_vm_update_pde - update a single level in the hierarchy
*
* @param: parameters for the update
* @vm: requested vm
- * @parent: parent directory
* @entry: entry to update
*
* Makes sure the requested entry in parent is up to date.
*/
-static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
- struct amdgpu_vm *vm,
- struct amdgpu_vm_pt *parent,
- struct amdgpu_vm_pt *entry)
+static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params,
+ struct amdgpu_vm *vm,
+ struct amdgpu_vm_pt *entry)
{
+ struct amdgpu_vm_pt *parent = amdgpu_vm_pt_parent(entry);
struct amdgpu_bo *bo = parent->base.bo, *pbo;
uint64_t pde, pt, flags;
unsigned level;
- /* Don't update huge pages here */
- if (entry->huge)
- return;
-
for (level = 0, pbo = bo->parent; pbo; ++level)
pbo = pbo->parent;
level += params->adev->vm_manager.root_level;
amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags);
pde = (entry - parent->entries) * 8;
- amdgpu_vm_update_func(params, bo, pde, pt, 1, 0, flags);
+ return vm->update_funcs->update(params, bo, pde, pt, 1, 0, flags);
}
/*
@@ -1396,7 +1215,7 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
struct amdgpu_vm_pt_cursor cursor;
struct amdgpu_vm_pt *entry;
- for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry)
+ for_each_amdgpu_vm_pt_dfs_safe(adev, vm, NULL, cursor, entry)
if (entry->base.bo && !entry->base.moved)
amdgpu_vm_bo_relocated(&entry->base);
}
@@ -1415,89 +1234,39 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
- struct amdgpu_pte_update_params params;
- struct amdgpu_job *job;
- unsigned ndw = 0;
- int r = 0;
+ struct amdgpu_vm_update_params params;
+ int r;
if (list_empty(&vm->relocated))
return 0;
-restart:
memset(&params, 0, sizeof(params));
params.adev = adev;
+ params.vm = vm;
- if (vm->use_cpu_for_update) {
- r = amdgpu_bo_sync_wait(vm->root.base.bo,
- AMDGPU_FENCE_OWNER_VM, true);
- if (unlikely(r))
- return r;
-
- params.func = amdgpu_vm_cpu_set_ptes;
- } else {
- ndw = 512 * 8;
- r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
- if (r)
- return r;
-
- params.ib = &job->ibs[0];
- params.func = amdgpu_vm_do_set_ptes;
- }
+ r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_VM, NULL);
+ if (r)
+ return r;
while (!list_empty(&vm->relocated)) {
- struct amdgpu_vm_pt *pt, *entry;
+ struct amdgpu_vm_pt *entry;
entry = list_first_entry(&vm->relocated, struct amdgpu_vm_pt,
base.vm_status);
amdgpu_vm_bo_idle(&entry->base);
- pt = amdgpu_vm_pt_parent(entry);
- if (!pt)
- continue;
-
- amdgpu_vm_update_pde(&params, vm, pt, entry);
-
- if (!vm->use_cpu_for_update &&
- (ndw - params.ib->length_dw) < 32)
- break;
- }
-
- if (vm->use_cpu_for_update) {
- /* Flush HDP */
- mb();
- amdgpu_asic_flush_hdp(adev, NULL);
- } else if (params.ib->length_dw == 0) {
- amdgpu_job_free(job);
- } else {
- struct amdgpu_bo *root = vm->root.base.bo;
- struct amdgpu_ring *ring;
- struct dma_fence *fence;
-
- ring = container_of(vm->entity.rq->sched, struct amdgpu_ring,
- sched);
-
- amdgpu_ring_pad_ib(ring, params.ib);
- amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
- AMDGPU_FENCE_OWNER_VM, false);
- WARN_ON(params.ib->length_dw > ndw);
- r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM,
- &fence);
+ r = amdgpu_vm_update_pde(&params, vm, entry);
if (r)
goto error;
-
- amdgpu_bo_fence(root, fence, true);
- dma_fence_put(vm->last_update);
- vm->last_update = fence;
}
- if (!list_empty(&vm->relocated))
- goto restart;
-
+ r = vm->update_funcs->commit(&params, &vm->last_update);
+ if (r)
+ goto error;
return 0;
error:
amdgpu_vm_invalidate_pds(adev, vm);
- amdgpu_job_free(job);
return r;
}
@@ -1506,7 +1275,7 @@ error:
*
* Make sure to set the right flags for the PTEs at the desired level.
*/
-static void amdgpu_vm_update_flags(struct amdgpu_pte_update_params *params,
+static void amdgpu_vm_update_flags(struct amdgpu_vm_update_params *params,
struct amdgpu_bo *bo, unsigned level,
uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr,
@@ -1525,13 +1294,14 @@ static void amdgpu_vm_update_flags(struct amdgpu_pte_update_params *params,
flags |= AMDGPU_PTE_EXECUTABLE;
}
- amdgpu_vm_update_func(params, bo, pe, addr, count, incr, flags);
+ params->vm->update_funcs->update(params, bo, pe, addr, count, incr,
+ flags);
}
/**
* amdgpu_vm_fragment - get fragment for PTEs
*
- * @params: see amdgpu_pte_update_params definition
+ * @params: see amdgpu_vm_update_params definition
* @start: first PTE to handle
* @end: last PTE to handle
* @flags: hw mapping flags
@@ -1540,7 +1310,7 @@ static void amdgpu_vm_update_flags(struct amdgpu_pte_update_params *params,
*
* Returns the first possible fragment for the start and end address.
*/
-static void amdgpu_vm_fragment(struct amdgpu_pte_update_params *params,
+static void amdgpu_vm_fragment(struct amdgpu_vm_update_params *params,
uint64_t start, uint64_t end, uint64_t flags,
unsigned int *frag, uint64_t *frag_end)
{
@@ -1573,7 +1343,7 @@ static void amdgpu_vm_fragment(struct amdgpu_pte_update_params *params,
max_frag = 31;
/* system pages are non continuously */
- if (params->src) {
+ if (params->pages_addr) {
*frag = 0;
*frag_end = end;
return;
@@ -1592,7 +1362,7 @@ static void amdgpu_vm_fragment(struct amdgpu_pte_update_params *params,
/**
* amdgpu_vm_update_ptes - make sure that page tables are valid
*
- * @params: see amdgpu_pte_update_params definition
+ * @params: see amdgpu_vm_update_params definition
* @start: start of GPU address range
* @end: end of GPU address range
* @dst: destination address to map to, the next dst inside the function
@@ -1603,7 +1373,7 @@ static void amdgpu_vm_fragment(struct amdgpu_pte_update_params *params,
* Returns:
* 0 for success, -EINVAL for failure.
*/
-static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
+static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
uint64_t start, uint64_t end,
uint64_t dst, uint64_t flags)
{
@@ -1611,6 +1381,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
struct amdgpu_vm_pt_cursor cursor;
uint64_t frag_start = start, frag_end;
unsigned int frag;
+ int r;
/* figure out the initial fragment */
amdgpu_vm_fragment(params, frag_start, end, flags, &frag, &frag_end);
@@ -1618,12 +1389,15 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
/* walk over the address space and update the PTs */
amdgpu_vm_pt_start(adev, params->vm, start, &cursor);
while (cursor.pfn < end) {
- struct amdgpu_bo *pt = cursor.entry->base.bo;
unsigned shift, parent_shift, mask;
uint64_t incr, entry_end, pe_start;
+ struct amdgpu_bo *pt;
- if (!pt)
- return -ENOENT;
+ r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor);
+ if (r)
+ return r;
+
+ pt = cursor.entry->base.bo;
/* The root level can't be a huge page */
if (cursor.level == adev->vm_manager.root_level) {
@@ -1632,16 +1406,10 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
continue;
}
- /* If it isn't already handled it can't be a huge page */
- if (cursor.entry->huge) {
- /* Add the entry to the relocated list to update it. */
- cursor.entry->huge = false;
- amdgpu_vm_bo_relocated(&cursor.entry->base);
- }
-
shift = amdgpu_vm_level_shift(adev, cursor.level);
parent_shift = amdgpu_vm_level_shift(adev, cursor.level - 1);
- if (adev->asic_type < CHIP_VEGA10) {
+ if (adev->asic_type < CHIP_VEGA10 &&
+ (flags & AMDGPU_PTE_VALID)) {
/* No huge page support before GMC v9 */
if (cursor.level != AMDGPU_VM_PTB) {
if (!amdgpu_vm_pt_descendant(adev, &cursor))
@@ -1697,9 +1465,9 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
} while (frag_start < entry_end);
if (amdgpu_vm_pt_descendant(adev, &cursor)) {
- /* Mark all child entries as huge */
+ /* Free all child entries */
while (cursor.pfn < frag_start) {
- cursor.entry->huge = true;
+ amdgpu_vm_free_pts(adev, params->vm, &cursor);
amdgpu_vm_pt_next(adev, &cursor);
}
@@ -1738,137 +1506,28 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
uint64_t flags, uint64_t addr,
struct dma_fence **fence)
{
- struct amdgpu_ring *ring;
+ struct amdgpu_vm_update_params params;
void *owner = AMDGPU_FENCE_OWNER_VM;
- unsigned nptes, ncmds, ndw;
- struct amdgpu_job *job;
- struct amdgpu_pte_update_params params;
- struct dma_fence *f = NULL;
int r;
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
+ params.pages_addr = pages_addr;
/* sync to everything except eviction fences on unmapping */
if (!(flags & AMDGPU_PTE_VALID))
owner = AMDGPU_FENCE_OWNER_KFD;
- if (vm->use_cpu_for_update) {
- /* params.src is used as flag to indicate system Memory */
- if (pages_addr)
- params.src = ~0;
-
- /* Wait for PT BOs to be idle. PTs share the same resv. object
- * as the root PD BO
- */
- r = amdgpu_bo_sync_wait(vm->root.base.bo, owner, true);
- if (unlikely(r))
- return r;
-
- /* Wait for any BO move to be completed */
- if (exclusive) {
- r = dma_fence_wait(exclusive, true);
- if (unlikely(r))
- return r;
- }
-
- params.func = amdgpu_vm_cpu_set_ptes;
- params.pages_addr = pages_addr;
- return amdgpu_vm_update_ptes(&params, start, last + 1,
- addr, flags);
- }
-
- ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
-
- nptes = last - start + 1;
-
- /*
- * reserve space for two commands every (1 << BLOCK_SIZE)
- * entries or 2k dwords (whatever is smaller)
- */
- ncmds = ((nptes >> min(adev->vm_manager.block_size, 11u)) + 1);
-
- /* The second command is for the shadow pagetables. */
- if (vm->root.base.bo->shadow)
- ncmds *= 2;
-
- /* padding, etc. */
- ndw = 64;
-
- if (pages_addr) {
- /* copy commands needed */
- ndw += ncmds * adev->vm_manager.vm_pte_funcs->copy_pte_num_dw;
-
- /* and also PTEs */
- ndw += nptes * 2;
-
- params.func = amdgpu_vm_do_copy_ptes;
-
- } else {
- /* set page commands needed */
- ndw += ncmds * 10;
-
- /* extra commands for begin/end fragments */
- ncmds = 2 * adev->vm_manager.fragment_size;
- if (vm->root.base.bo->shadow)
- ncmds *= 2;
-
- ndw += 10 * ncmds;
-
- params.func = amdgpu_vm_do_set_ptes;
- }
-
- r = amdgpu_job_alloc_with_ib(adev, ndw * 4, &job);
+ r = vm->update_funcs->prepare(&params, owner, exclusive);
if (r)
return r;
- params.ib = &job->ibs[0];
-
- if (pages_addr) {
- uint64_t *pte;
- unsigned i;
-
- /* Put the PTEs at the end of the IB. */
- i = ndw - nptes * 2;
- pte= (uint64_t *)&(job->ibs->ptr[i]);
- params.src = job->ibs->gpu_addr + i * 4;
-
- for (i = 0; i < nptes; ++i) {
- pte[i] = amdgpu_vm_map_gart(pages_addr, addr + i *
- AMDGPU_GPU_PAGE_SIZE);
- pte[i] |= flags;
- }
- addr = 0;
- }
-
- r = amdgpu_sync_fence(adev, &job->sync, exclusive, false);
- if (r)
- goto error_free;
-
- r = amdgpu_sync_resv(adev, &job->sync, vm->root.base.bo->tbo.resv,
- owner, false);
- if (r)
- goto error_free;
-
r = amdgpu_vm_update_ptes(&params, start, last + 1, addr, flags);
if (r)
- goto error_free;
-
- amdgpu_ring_pad_ib(ring, params.ib);
- WARN_ON(params.ib->length_dw > ndw);
- r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f);
- if (r)
- goto error_free;
-
- amdgpu_bo_fence(vm->root.base.bo, f, true);
- dma_fence_put(*fence);
- *fence = f;
- return 0;
+ return r;
-error_free:
- amdgpu_job_free(job);
- return r;
+ return vm->update_funcs->commit(&params, fence);
}
/**
@@ -1880,6 +1539,7 @@ error_free:
* @vm: requested vm
* @mapping: mapped range and flags to use for the update
* @flags: HW flags for the mapping
+ * @bo_adev: amdgpu_device pointer that bo actually been allocated
* @nodes: array of drm_mm_nodes with the MC addresses
* @fence: optional resulting fence
*
@@ -1895,6 +1555,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_bo_va_mapping *mapping,
uint64_t flags,
+ struct amdgpu_device *bo_adev,
struct drm_mm_node *nodes,
struct dma_fence **fence)
{
@@ -1949,7 +1610,6 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
if (pages_addr) {
uint64_t count;
- max_entries = min(max_entries, 16ull * 1024ull);
for (count = 1;
count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
++count) {
@@ -1969,7 +1629,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
}
} else if (flags & AMDGPU_PTE_VALID) {
- addr += adev->vm_manager.vram_base_offset;
+ addr += bo_adev->vm_manager.vram_base_offset;
addr += pfn << PAGE_SHIFT;
}
@@ -2016,6 +1676,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct drm_mm_node *nodes;
struct dma_fence *exclusive, **last_update;
uint64_t flags;
+ struct amdgpu_device *bo_adev = adev;
int r;
if (clear || !bo) {
@@ -2034,10 +1695,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
exclusive = reservation_object_get_excl(bo->tbo.resv);
}
- if (bo)
+ if (bo) {
flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
- else
+ bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ } else {
flags = 0x0;
+ }
if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
last_update = &vm->last_update;
@@ -2054,7 +1717,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
list_for_each_entry(mapping, &bo_va->invalids, list) {
r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
- mapping, flags, nodes,
+ mapping, flags, bo_adev, nodes,
last_update);
if (r)
return r;
@@ -2374,6 +2037,16 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
INIT_LIST_HEAD(&bo_va->valids);
INIT_LIST_HEAD(&bo_va->invalids);
+ if (bo && amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) &&
+ (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) {
+ bo_va->is_xgmi = true;
+ mutex_lock(&adev->vm_manager.lock_pstate);
+ /* Power up XGMI if it can be potentially used */
+ if (++adev->vm_manager.xgmi_map_counter == 1)
+ amdgpu_xgmi_set_pstate(adev, 1);
+ mutex_unlock(&adev->vm_manager.lock_pstate);
+ }
+
return bo_va;
}
@@ -2792,6 +2465,14 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
}
dma_fence_put(bo_va->last_pt_update);
+
+ if (bo && bo_va->is_xgmi) {
+ mutex_lock(&adev->vm_manager.lock_pstate);
+ if (--adev->vm_manager.xgmi_map_counter == 0)
+ amdgpu_xgmi_set_pstate(adev, 0);
+ mutex_unlock(&adev->vm_manager.lock_pstate);
+ }
+
kfree(bo_va);
}
@@ -2949,20 +2630,16 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
adev->vm_manager.fragment_size);
}
-static struct amdgpu_retryfault_hashtable *init_fault_hash(void)
+/**
+ * amdgpu_vm_wait_idle - wait for the VM to become idle
+ *
+ * @vm: VM object to wait for
+ * @timeout: timeout to wait for VM to become idle
+ */
+long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
{
- struct amdgpu_retryfault_hashtable *fault_hash;
-
- fault_hash = kmalloc(sizeof(*fault_hash), GFP_KERNEL);
- if (!fault_hash)
- return fault_hash;
-
- INIT_CHASH_TABLE(fault_hash->hash,
- AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
- spin_lock_init(&fault_hash->lock);
- fault_hash->count = 0;
-
- return fault_hash;
+ return reservation_object_wait_timeout_rcu(vm->root.base.bo->tbo.resv,
+ true, true, timeout);
}
/**
@@ -3018,6 +2695,11 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->use_cpu_for_update ? "CPU" : "SDMA");
WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
+
+ if (vm->use_cpu_for_update)
+ vm->update_funcs = &amdgpu_vm_cpu_funcs;
+ else
+ vm->update_funcs = &amdgpu_vm_sdma_funcs;
vm->last_update = NULL;
amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, &bp);
@@ -3037,9 +2719,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
- r = amdgpu_vm_clear_bo(adev, vm, root,
- adev->vm_manager.root_level,
- vm->pte_support_ats);
+ r = amdgpu_vm_clear_bo(adev, vm, root);
if (r)
goto error_unreserve;
@@ -3058,12 +2738,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->pasid = pasid;
}
- vm->fault_hash = init_fault_hash();
- if (!vm->fault_hash) {
- r = -ENOMEM;
- goto error_free_root;
- }
-
INIT_KFIFO(vm->faults);
return 0;
@@ -3134,9 +2808,8 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns
* changing any other state, in case it fails.
*/
if (pte_support_ats != vm->pte_support_ats) {
- r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
- adev->vm_manager.root_level,
- pte_support_ats);
+ vm->pte_support_ats = pte_support_ats;
+ r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo);
if (r)
goto free_idr;
}
@@ -3144,7 +2817,6 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns
/* Update VM state */
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
AMDGPU_VM_USE_CPU_FOR_COMPUTE);
- vm->pte_support_ats = pte_support_ats;
DRM_DEBUG_DRIVER("VM update mode is %s\n",
vm->use_cpu_for_update ? "CPU" : "SDMA");
WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)),
@@ -3219,15 +2891,10 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
struct amdgpu_bo_va_mapping *mapping, *tmp;
bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
struct amdgpu_bo *root;
- u64 fault;
int i, r;
amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
- /* Clear pending page faults from IH when the VM is destroyed */
- while (kfifo_get(&vm->faults, &fault))
- amdgpu_vm_clear_fault(vm->fault_hash, fault);
-
if (vm->pasid) {
unsigned long flags;
@@ -3236,9 +2903,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
}
- kfree(vm->fault_hash);
- vm->fault_hash = NULL;
-
drm_sched_entity_destroy(&vm->entity);
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
@@ -3267,10 +2931,11 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
if (r) {
dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
} else {
- amdgpu_vm_free_pts(adev, vm);
+ amdgpu_vm_free_pts(adev, vm, NULL);
amdgpu_bo_unreserve(root);
}
amdgpu_bo_unref(&root);
+ WARN_ON(vm->root.base.bo);
dma_fence_put(vm->last_update);
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
amdgpu_vmid_free_reserved(adev, vm, i);
@@ -3315,6 +2980,9 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
idr_init(&adev->vm_manager.pasid_idr);
spin_lock_init(&adev->vm_manager.pasid_lock);
+
+ adev->vm_manager.xgmi_map_counter = 0;
+ mutex_init(&adev->vm_manager.lock_pstate);
}
/**
@@ -3405,78 +3073,3 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
}
}
}
-
-/**
- * amdgpu_vm_add_fault - Add a page fault record to fault hash table
- *
- * @fault_hash: fault hash table
- * @key: 64-bit encoding of PASID and address
- *
- * This should be called when a retry page fault interrupt is
- * received. If this is a new page fault, it will be added to a hash
- * table. The return value indicates whether this is a new fault, or
- * a fault that was already known and is already being handled.
- *
- * If there are too many pending page faults, this will fail. Retry
- * interrupts should be ignored in this case until there is enough
- * free space.
- *
- * Returns 0 if the fault was added, 1 if the fault was already known,
- * -ENOSPC if there are too many pending faults.
- */
-int amdgpu_vm_add_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key)
-{
- unsigned long flags;
- int r = -ENOSPC;
-
- if (WARN_ON_ONCE(!fault_hash))
- /* Should be allocated in amdgpu_vm_init
- */
- return r;
-
- spin_lock_irqsave(&fault_hash->lock, flags);
-
- /* Only let the hash table fill up to 50% for best performance */
- if (fault_hash->count >= (1 << (AMDGPU_PAGEFAULT_HASH_BITS-1)))
- goto unlock_out;
-
- r = chash_table_copy_in(&fault_hash->hash, key, NULL);
- if (!r)
- fault_hash->count++;
-
- /* chash_table_copy_in should never fail unless we're losing count */
- WARN_ON_ONCE(r < 0);
-
-unlock_out:
- spin_unlock_irqrestore(&fault_hash->lock, flags);
- return r;
-}
-
-/**
- * amdgpu_vm_clear_fault - Remove a page fault record
- *
- * @fault_hash: fault hash table
- * @key: 64-bit encoding of PASID and address
- *
- * This should be called when a page fault has been handled. Any
- * future interrupt with this key will be processed as a new
- * page fault.
- */
-void amdgpu_vm_clear_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key)
-{
- unsigned long flags;
- int r;
-
- if (!fault_hash)
- return;
-
- spin_lock_irqsave(&fault_hash->lock, flags);
-
- r = chash_table_remove(&fault_hash->hash, key, NULL);
- if (!WARN_ON_ONCE(r < 0)) {
- fault_hash->count--;
- WARN_ON_ONCE(fault_hash->count < 0);
- }
-
- spin_unlock_irqrestore(&fault_hash->lock, flags);
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 81ff8177f092..91baf95212a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -30,7 +30,6 @@
#include <drm/gpu_scheduler.h>
#include <drm/drm_file.h>
#include <drm/ttm/ttm_bo_driver.h>
-#include <linux/chash.h>
#include "amdgpu_sync.h"
#include "amdgpu_ring.h"
@@ -140,7 +139,6 @@ struct amdgpu_vm_bo_base {
struct amdgpu_vm_pt {
struct amdgpu_vm_bo_base base;
- bool huge;
/* array of page tables, one for each directory entry */
struct amdgpu_vm_pt *entries;
@@ -167,11 +165,6 @@ struct amdgpu_vm_pte_funcs {
uint32_t incr, uint64_t flags);
};
-#define AMDGPU_VM_FAULT(pasid, addr) (((u64)(pasid) << 48) | (addr))
-#define AMDGPU_VM_FAULT_PASID(fault) ((u64)(fault) >> 48)
-#define AMDGPU_VM_FAULT_ADDR(fault) ((u64)(fault) & 0xfffffffff000ULL)
-
-
struct amdgpu_task_info {
char process_name[TASK_COMM_LEN];
char task_name[TASK_COMM_LEN];
@@ -179,11 +172,52 @@ struct amdgpu_task_info {
pid_t tgid;
};
-#define AMDGPU_PAGEFAULT_HASH_BITS 8
-struct amdgpu_retryfault_hashtable {
- DECLARE_CHASH_TABLE(hash, AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
- spinlock_t lock;
- int count;
+/**
+ * struct amdgpu_vm_update_params
+ *
+ * Encapsulate some VM table update parameters to reduce
+ * the number of function parameters
+ *
+ */
+struct amdgpu_vm_update_params {
+
+ /**
+ * @adev: amdgpu device we do this update for
+ */
+ struct amdgpu_device *adev;
+
+ /**
+ * @vm: optional amdgpu_vm we do this update for
+ */
+ struct amdgpu_vm *vm;
+
+ /**
+ * @pages_addr:
+ *
+ * DMA addresses to use for mapping
+ */
+ dma_addr_t *pages_addr;
+
+ /**
+ * @job: job to used for hw submission
+ */
+ struct amdgpu_job *job;
+
+ /**
+ * @num_dw_left: number of dw left for the IB
+ */
+ unsigned int num_dw_left;
+};
+
+struct amdgpu_vm_update_funcs {
+ int (*map_table)(struct amdgpu_bo *bo);
+ int (*prepare)(struct amdgpu_vm_update_params *p, void * owner,
+ struct dma_fence *exclusive);
+ int (*update)(struct amdgpu_vm_update_params *p,
+ struct amdgpu_bo *bo, uint64_t pe, uint64_t addr,
+ unsigned count, uint32_t incr, uint64_t flags);
+ int (*commit)(struct amdgpu_vm_update_params *p,
+ struct dma_fence **fence);
};
struct amdgpu_vm {
@@ -221,7 +255,10 @@ struct amdgpu_vm {
struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS];
/* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
- bool use_cpu_for_update;
+ bool use_cpu_for_update;
+
+ /* Functions to use for VM table updates */
+ const struct amdgpu_vm_update_funcs *update_funcs;
/* Flag to indicate ATS support from PTE for GFX9 */
bool pte_support_ats;
@@ -245,7 +282,6 @@ struct amdgpu_vm {
struct ttm_lru_bulk_move lru_bulk_move;
/* mark whether can do the bulk move */
bool bulk_moveable;
- struct amdgpu_retryfault_hashtable *fault_hash;
};
struct amdgpu_vm_manager {
@@ -267,6 +303,7 @@ struct amdgpu_vm_manager {
const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
struct drm_sched_rq *vm_pte_rqs[AMDGPU_MAX_RINGS];
unsigned vm_pte_num_rqs;
+ struct amdgpu_ring *page_fault;
/* partial resident texture handling */
spinlock_t prt_lock;
@@ -283,14 +320,23 @@ struct amdgpu_vm_manager {
*/
struct idr pasid_idr;
spinlock_t pasid_lock;
+
+ /* counter of mapped memory through xgmi */
+ uint32_t xgmi_map_counter;
+ struct mutex lock_pstate;
};
#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
+extern const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs;
+extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
+
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
+
+long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int vm_context, unsigned int pasid);
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid);
@@ -303,9 +349,6 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm);
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int (*callback)(void *p, struct amdgpu_bo *bo),
void *param);
-int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- uint64_t saddr, uint64_t size);
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
@@ -319,6 +362,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
bool clear);
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
struct amdgpu_bo *bo, bool evicted);
+uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
struct amdgpu_bo *bo);
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
@@ -358,11 +402,6 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
-
-int amdgpu_vm_add_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key);
-
-void amdgpu_vm_clear_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key);
-
void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
new file mode 100644
index 000000000000..5222d165abfc
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "amdgpu_vm.h"
+#include "amdgpu_object.h"
+#include "amdgpu_trace.h"
+
+/**
+ * amdgpu_vm_cpu_map_table - make sure new PDs/PTs are kmapped
+ *
+ * @table: newly allocated or validated PD/PT
+ */
+static int amdgpu_vm_cpu_map_table(struct amdgpu_bo *table)
+{
+ return amdgpu_bo_kmap(table, NULL);
+}
+
+/**
+ * amdgpu_vm_cpu_prepare - prepare page table update with the CPU
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @owner: owner we need to sync to
+ * @exclusive: exclusive move fence we need to sync to
+ *
+ * Returns:
+ * Negativ errno, 0 for success.
+ */
+static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner,
+ struct dma_fence *exclusive)
+{
+ int r;
+
+ /* Wait for PT BOs to be idle. PTs share the same resv. object
+ * as the root PD BO
+ */
+ r = amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true);
+ if (unlikely(r))
+ return r;
+
+ /* Wait for any BO move to be completed */
+ if (exclusive) {
+ r = dma_fence_wait(exclusive, true);
+ if (unlikely(r))
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * amdgpu_vm_cpu_update - helper to update page tables via CPU
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @bo: PD/PT to update
+ * @pe: kmap addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Write count number of PT/PD entries directly.
+ */
+static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
+ struct amdgpu_bo *bo, uint64_t pe,
+ uint64_t addr, unsigned count, uint32_t incr,
+ uint64_t flags)
+{
+ unsigned int i;
+ uint64_t value;
+
+ pe += (unsigned long)amdgpu_bo_kptr(bo);
+
+ trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
+
+ for (i = 0; i < count; i++) {
+ value = p->pages_addr ?
+ amdgpu_vm_map_gart(p->pages_addr, addr) :
+ addr;
+ amdgpu_gmc_set_pte_pde(p->adev, (void *)(uintptr_t)pe,
+ i, value, flags);
+ addr += incr;
+ }
+ return 0;
+}
+
+/**
+ * amdgpu_vm_cpu_commit - commit page table update to the HW
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @fence: unused
+ *
+ * Make sure that the hardware sees the page table updates.
+ */
+static int amdgpu_vm_cpu_commit(struct amdgpu_vm_update_params *p,
+ struct dma_fence **fence)
+{
+ /* Flush HDP */
+ mb();
+ amdgpu_asic_flush_hdp(p->adev, NULL);
+ return 0;
+}
+
+const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs = {
+ .map_table = amdgpu_vm_cpu_map_table,
+ .prepare = amdgpu_vm_cpu_prepare,
+ .update = amdgpu_vm_cpu_update,
+ .commit = amdgpu_vm_cpu_commit
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
new file mode 100644
index 000000000000..ddd181f5ed37
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -0,0 +1,270 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "amdgpu_vm.h"
+#include "amdgpu_job.h"
+#include "amdgpu_object.h"
+#include "amdgpu_trace.h"
+
+#define AMDGPU_VM_SDMA_MIN_NUM_DW 256u
+#define AMDGPU_VM_SDMA_MAX_NUM_DW (16u * 1024u)
+
+/**
+ * amdgpu_vm_sdma_map_table - make sure new PDs/PTs are GTT mapped
+ *
+ * @table: newly allocated or validated PD/PT
+ */
+static int amdgpu_vm_sdma_map_table(struct amdgpu_bo *table)
+{
+ int r;
+
+ r = amdgpu_ttm_alloc_gart(&table->tbo);
+ if (r)
+ return r;
+
+ if (table->shadow)
+ r = amdgpu_ttm_alloc_gart(&table->shadow->tbo);
+
+ return r;
+}
+
+/**
+ * amdgpu_vm_sdma_prepare - prepare SDMA command submission
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @owner: owner we need to sync to
+ * @exclusive: exclusive move fence we need to sync to
+ *
+ * Returns:
+ * Negativ errno, 0 for success.
+ */
+static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
+ void *owner, struct dma_fence *exclusive)
+{
+ struct amdgpu_bo *root = p->vm->root.base.bo;
+ unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
+ int r;
+
+ r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
+ if (r)
+ return r;
+
+ r = amdgpu_sync_fence(p->adev, &p->job->sync, exclusive, false);
+ if (r)
+ return r;
+
+ r = amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.resv,
+ owner, false);
+ if (r)
+ return r;
+
+ p->num_dw_left = ndw;
+ return 0;
+}
+
+/**
+ * amdgpu_vm_sdma_commit - commit SDMA command submission
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @fence: resulting fence
+ *
+ * Returns:
+ * Negativ errno, 0 for success.
+ */
+static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
+ struct dma_fence **fence)
+{
+ struct amdgpu_bo *root = p->vm->root.base.bo;
+ struct amdgpu_ib *ib = p->job->ibs;
+ struct amdgpu_ring *ring;
+ struct dma_fence *f;
+ int r;
+
+ ring = container_of(p->vm->entity.rq->sched, struct amdgpu_ring, sched);
+
+ WARN_ON(ib->length_dw == 0);
+ amdgpu_ring_pad_ib(ring, ib);
+ WARN_ON(ib->length_dw > p->num_dw_left);
+ r = amdgpu_job_submit(p->job, &p->vm->entity,
+ AMDGPU_FENCE_OWNER_VM, &f);
+ if (r)
+ goto error;
+
+ amdgpu_bo_fence(root, f, true);
+ if (fence)
+ swap(*fence, f);
+ dma_fence_put(f);
+ return 0;
+
+error:
+ amdgpu_job_free(p->job);
+ return r;
+}
+
+
+/**
+ * amdgpu_vm_sdma_copy_ptes - copy the PTEs from mapping
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @bo: PD/PT to update
+ * @pe: addr of the page entry
+ * @count: number of page entries to copy
+ *
+ * Traces the parameters and calls the DMA function to copy the PTEs.
+ */
+static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
+ struct amdgpu_bo *bo, uint64_t pe,
+ unsigned count)
+{
+ struct amdgpu_ib *ib = p->job->ibs;
+ uint64_t src = ib->gpu_addr;
+
+ src += p->num_dw_left * 4;
+
+ pe += amdgpu_bo_gpu_offset(bo);
+ trace_amdgpu_vm_copy_ptes(pe, src, count);
+
+ amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
+}
+
+/**
+ * amdgpu_vm_sdma_set_ptes - helper to call the right asic function
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @bo: PD/PT to update
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Traces the parameters and calls the right asic functions
+ * to setup the page table using the DMA.
+ */
+static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
+ struct amdgpu_bo *bo, uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint64_t flags)
+{
+ struct amdgpu_ib *ib = p->job->ibs;
+
+ pe += amdgpu_bo_gpu_offset(bo);
+ trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
+ if (count < 3) {
+ amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
+ count, incr);
+ } else {
+ amdgpu_vm_set_pte_pde(p->adev, ib, pe, addr,
+ count, incr, flags);
+ }
+}
+
+/**
+ * amdgpu_vm_sdma_update - execute VM update
+ *
+ * @p: see amdgpu_vm_update_params definition
+ * @bo: PD/PT to update
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: hw access flags
+ *
+ * Reserve space in the IB, setup mapping buffer on demand and write commands to
+ * the IB.
+ */
+static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
+ struct amdgpu_bo *bo, uint64_t pe,
+ uint64_t addr, unsigned count, uint32_t incr,
+ uint64_t flags)
+{
+ unsigned int i, ndw, nptes;
+ uint64_t *pte;
+ int r;
+
+ do {
+ ndw = p->num_dw_left;
+ ndw -= p->job->ibs->length_dw;
+
+ if (ndw < 32) {
+ r = amdgpu_vm_sdma_commit(p, NULL);
+ if (r)
+ return r;
+
+ /* estimate how many dw we need */
+ ndw = 32;
+ if (p->pages_addr)
+ ndw += count * 2;
+ ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW);
+ ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
+
+ r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, &p->job);
+ if (r)
+ return r;
+
+ p->num_dw_left = ndw;
+ }
+
+ if (!p->pages_addr) {
+ /* set page commands needed */
+ if (bo->shadow)
+ amdgpu_vm_sdma_set_ptes(p, bo->shadow, pe, addr,
+ count, incr, flags);
+ amdgpu_vm_sdma_set_ptes(p, bo, pe, addr, count,
+ incr, flags);
+ return 0;
+ }
+
+ /* copy commands needed */
+ ndw -= p->adev->vm_manager.vm_pte_funcs->copy_pte_num_dw *
+ (bo->shadow ? 2 : 1);
+
+ /* for padding */
+ ndw -= 7;
+
+ nptes = min(count, ndw / 2);
+
+ /* Put the PTEs at the end of the IB. */
+ p->num_dw_left -= nptes * 2;
+ pte = (uint64_t *)&(p->job->ibs->ptr[p->num_dw_left]);
+ for (i = 0; i < nptes; ++i, addr += incr) {
+ pte[i] = amdgpu_vm_map_gart(p->pages_addr, addr);
+ pte[i] |= flags;
+ }
+
+ if (bo->shadow)
+ amdgpu_vm_sdma_copy_ptes(p, bo->shadow, pe, nptes);
+ amdgpu_vm_sdma_copy_ptes(p, bo, pe, nptes);
+
+ pe += nptes * 8;
+ count -= nptes;
+ } while (count);
+
+ return 0;
+}
+
+const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs = {
+ .map_table = amdgpu_vm_sdma_map_table,
+ .prepare = amdgpu_vm_sdma_prepare,
+ .update = amdgpu_vm_sdma_update,
+ .commit = amdgpu_vm_sdma_commit
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 3f9d5d00c9b3..ec9ea3fdbb4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -33,6 +33,85 @@ struct amdgpu_vram_mgr {
};
/**
+ * DOC: mem_info_vram_total
+ *
+ * The amdgpu driver provides a sysfs API for reporting current total VRAM
+ * available on the device
+ * The file mem_info_vram_total is used for this and returns the total
+ * amount of VRAM in bytes
+ */
+static ssize_t amdgpu_mem_info_vram_total_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.real_vram_size);
+}
+
+/**
+ * DOC: mem_info_vis_vram_total
+ *
+ * The amdgpu driver provides a sysfs API for reporting current total
+ * visible VRAM available on the device
+ * The file mem_info_vis_vram_total is used for this and returns the total
+ * amount of visible VRAM in bytes
+ */
+static ssize_t amdgpu_mem_info_vis_vram_total_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.visible_vram_size);
+}
+
+/**
+ * DOC: mem_info_vram_used
+ *
+ * The amdgpu driver provides a sysfs API for reporting current total VRAM
+ * available on the device
+ * The file mem_info_vram_used is used for this and returns the total
+ * amount of currently used VRAM in bytes
+ */
+static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
+ amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]));
+}
+
+/**
+ * DOC: mem_info_vis_vram_used
+ *
+ * The amdgpu driver provides a sysfs API for reporting current total of
+ * used visible VRAM
+ * The file mem_info_vis_vram_used is used for this and returns the total
+ * amount of currently used visible VRAM in bytes
+ */
+static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n",
+ amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]));
+}
+
+static DEVICE_ATTR(mem_info_vram_total, S_IRUGO,
+ amdgpu_mem_info_vram_total_show, NULL);
+static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO,
+ amdgpu_mem_info_vis_vram_total_show,NULL);
+static DEVICE_ATTR(mem_info_vram_used, S_IRUGO,
+ amdgpu_mem_info_vram_used_show, NULL);
+static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO,
+ amdgpu_mem_info_vis_vram_used_show, NULL);
+
+/**
* amdgpu_vram_mgr_init - init VRAM manager and DRM MM
*
* @man: TTM memory type manager
@@ -43,7 +122,9 @@ struct amdgpu_vram_mgr {
static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
unsigned long p_size)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
struct amdgpu_vram_mgr *mgr;
+ int ret;
mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
if (!mgr)
@@ -52,6 +133,29 @@ static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
drm_mm_init(&mgr->mm, 0, p_size);
spin_lock_init(&mgr->lock);
man->priv = mgr;
+
+ /* Add the two VRAM-related sysfs files */
+ ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_total);
+ if (ret) {
+ DRM_ERROR("Failed to create device file mem_info_vram_total\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_mem_info_vis_vram_total);
+ if (ret) {
+ DRM_ERROR("Failed to create device file mem_info_vis_vram_total\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_used);
+ if (ret) {
+ DRM_ERROR("Failed to create device file mem_info_vram_used\n");
+ return ret;
+ }
+ ret = device_create_file(adev->dev, &dev_attr_mem_info_vis_vram_used);
+ if (ret) {
+ DRM_ERROR("Failed to create device file mem_info_vis_vram_used\n");
+ return ret;
+ }
+
return 0;
}
@@ -65,6 +169,7 @@ static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man,
*/
static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
struct amdgpu_vram_mgr *mgr = man->priv;
spin_lock(&mgr->lock);
@@ -72,6 +177,10 @@ static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man)
spin_unlock(&mgr->lock);
kfree(mgr);
man->priv = NULL;
+ device_remove_file(adev->dev, &dev_attr_mem_info_vram_total);
+ device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_total);
+ device_remove_file(adev->dev, &dev_attr_mem_info_vram_used);
+ device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_used);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index 407dd16cc35c..a48c84c51775 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -24,6 +24,7 @@
#include <linux/list.h>
#include "amdgpu.h"
#include "amdgpu_xgmi.h"
+#include "amdgpu_smu.h"
static DEFINE_MUTEX(xgmi_mutex);
@@ -34,12 +35,132 @@ static DEFINE_MUTEX(xgmi_mutex);
static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE];
static unsigned hive_count = 0;
-
void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive)
{
return &hive->device_list;
}
+static ssize_t amdgpu_xgmi_show_hive_id(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct amdgpu_hive_info *hive =
+ container_of(attr, struct amdgpu_hive_info, dev_attr);
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n", hive->hive_id);
+}
+
+static int amdgpu_xgmi_sysfs_create(struct amdgpu_device *adev,
+ struct amdgpu_hive_info *hive)
+{
+ int ret = 0;
+
+ if (WARN_ON(hive->kobj))
+ return -EINVAL;
+
+ hive->kobj = kobject_create_and_add("xgmi_hive_info", &adev->dev->kobj);
+ if (!hive->kobj) {
+ dev_err(adev->dev, "XGMI: Failed to allocate sysfs entry!\n");
+ return -EINVAL;
+ }
+
+ hive->dev_attr = (struct device_attribute) {
+ .attr = {
+ .name = "xgmi_hive_id",
+ .mode = S_IRUGO,
+
+ },
+ .show = amdgpu_xgmi_show_hive_id,
+ };
+
+ ret = sysfs_create_file(hive->kobj, &hive->dev_attr.attr);
+ if (ret) {
+ dev_err(adev->dev, "XGMI: Failed to create device file xgmi_hive_id\n");
+ kobject_del(hive->kobj);
+ kobject_put(hive->kobj);
+ hive->kobj = NULL;
+ }
+
+ return ret;
+}
+
+static void amdgpu_xgmi_sysfs_destroy(struct amdgpu_device *adev,
+ struct amdgpu_hive_info *hive)
+{
+ sysfs_remove_file(hive->kobj, &hive->dev_attr.attr);
+ kobject_del(hive->kobj);
+ kobject_put(hive->kobj);
+ hive->kobj = NULL;
+}
+
+static ssize_t amdgpu_xgmi_show_device_id(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n", adev->gmc.xgmi.node_id);
+
+}
+
+
+static DEVICE_ATTR(xgmi_device_id, S_IRUGO, amdgpu_xgmi_show_device_id, NULL);
+
+
+static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
+ struct amdgpu_hive_info *hive)
+{
+ int ret = 0;
+ char node[10] = { 0 };
+
+ /* Create xgmi device id file */
+ ret = device_create_file(adev->dev, &dev_attr_xgmi_device_id);
+ if (ret) {
+ dev_err(adev->dev, "XGMI: Failed to create device file xgmi_device_id\n");
+ return ret;
+ }
+
+ /* Create sysfs link to hive info folder on the first device */
+ if (adev != hive->adev) {
+ ret = sysfs_create_link(&adev->dev->kobj, hive->kobj,
+ "xgmi_hive_info");
+ if (ret) {
+ dev_err(adev->dev, "XGMI: Failed to create link to hive info");
+ goto remove_file;
+ }
+ }
+
+ sprintf(node, "node%d", hive->number_devices);
+ /* Create sysfs link form the hive folder to yourself */
+ ret = sysfs_create_link(hive->kobj, &adev->dev->kobj, node);
+ if (ret) {
+ dev_err(adev->dev, "XGMI: Failed to create link from hive info");
+ goto remove_link;
+ }
+
+ goto success;
+
+
+remove_link:
+ sysfs_remove_link(&adev->dev->kobj, adev->ddev->unique);
+
+remove_file:
+ device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
+
+success:
+ return ret;
+}
+
+static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
+ struct amdgpu_hive_info *hive)
+{
+ device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
+ sysfs_remove_link(&adev->dev->kobj, adev->ddev->unique);
+ sysfs_remove_link(hive->kobj, adev->ddev->unique);
+}
+
+
+
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock)
{
int i;
@@ -66,18 +187,50 @@ struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lo
/* initialize new hive if not exist */
tmp = &xgmi_hives[hive_count++];
+
+ if (amdgpu_xgmi_sysfs_create(adev, tmp)) {
+ mutex_unlock(&xgmi_mutex);
+ return NULL;
+ }
+
+ tmp->adev = adev;
tmp->hive_id = adev->gmc.xgmi.hive_id;
INIT_LIST_HEAD(&tmp->device_list);
mutex_init(&tmp->hive_lock);
mutex_init(&tmp->reset_lock);
+
if (lock)
mutex_lock(&tmp->hive_lock);
-
+ tmp->pstate = -1;
mutex_unlock(&xgmi_mutex);
return tmp;
}
+int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
+{
+ int ret = 0;
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0);
+
+ if (!hive)
+ return 0;
+
+ if (hive->pstate == pstate)
+ return 0;
+
+ dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate);
+
+ if (is_support_sw_smu(adev))
+ ret = smu_set_xgmi_pstate(&adev->smu, pstate);
+ if (ret)
+ dev_err(adev->dev,
+ "XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
+ adev->gmc.xgmi.node_id,
+ adev->gmc.xgmi.hive_id, ret);
+
+ return ret;
+}
+
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
{
int ret = -EINVAL;
@@ -156,8 +309,17 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
break;
}
- dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n",
- adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id);
+ if (!ret)
+ ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive);
+
+ if (!ret)
+ dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n",
+ adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id);
+ else
+ dev_err(adev->dev, "XGMI: Failed to add node %d, hive 0x%llx ret: %d\n",
+ adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id,
+ ret);
+
mutex_unlock(&hive->hive_lock);
exit:
@@ -176,9 +338,11 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
return;
if (!(hive->number_devices--)) {
+ amdgpu_xgmi_sysfs_destroy(adev, hive);
mutex_destroy(&hive->hive_lock);
mutex_destroy(&hive->reset_lock);
} else {
+ amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
mutex_unlock(&hive->hive_lock);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index 14bc60664159..3e9c91e9a4bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -29,13 +29,25 @@ struct amdgpu_hive_info {
struct list_head device_list;
struct psp_xgmi_topology_info topology_info;
int number_devices;
- struct mutex hive_lock,
- reset_lock;
+ struct mutex hive_lock, reset_lock;
+ struct kobject *kobj;
+ struct device_attribute dev_attr;
+ struct amdgpu_device *adev;
+ int pstate; /*0 -- low , 1 -- high , -1 unknown*/
};
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock);
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
void amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
+int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate);
+
+static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
+ struct amdgpu_device *bo_adev)
+{
+ return (adev != bo_adev &&
+ adev->gmc.xgmi.hive_id &&
+ adev->gmc.xgmi.hive_id == bo_adev->gmc.xgmi.hive_id);
+}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 189599b694e8..d42808b05971 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -977,8 +977,8 @@ static int cik_sdma_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 :
- AMDGPU_SDMA_IRQ_TRAP1);
+ AMDGPU_SDMA_IRQ_INSTANCE0 :
+ AMDGPU_SDMA_IRQ_INSTANCE1);
if (r)
return r;
}
@@ -1114,7 +1114,7 @@ static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev,
u32 sdma_cntl;
switch (type) {
- case AMDGPU_SDMA_IRQ_TRAP0:
+ case AMDGPU_SDMA_IRQ_INSTANCE0:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
@@ -1130,7 +1130,7 @@ static int cik_sdma_set_trap_irq_state(struct amdgpu_device *adev,
break;
}
break;
- case AMDGPU_SDMA_IRQ_TRAP1:
+ case AMDGPU_SDMA_IRQ_INSTANCE1:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index 305276c7e4bf..c0cb244f58cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -782,6 +782,25 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
+ tilemode[18] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16);
+ tilemode[19] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(split_equal_to_row_size);
+ tilemode[20] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
+ ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+ PIPE_CONFIG(ADDR_SURF_P4_8x16) |
+ BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+ BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+ MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+ NUM_BANKS(ADDR_SURF_16_BANK) |
+ TILE_SPLIT(split_equal_to_row_size);
tilemode[21] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index b8e50a34bdb3..02955e6e9dd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -3236,6 +3236,7 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
dev_warn(adev->dev,
"Unknown chip type (%d) in function gfx_v8_0_tiling_mode_table_init() falling through to CHIP_CARRIZO\n",
adev->asic_type);
+ /* fall through */
case CHIP_CARRIZO:
modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index a11db2b1a63f..ba67d1023264 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -40,6 +40,8 @@
#include "ivsrcid/gfx/irqsrcs_gfx_9_0.h"
+#include "amdgpu_ras.h"
+
#define GFX9_NUM_GFX_RINGS 1
#define GFX9_MEC_HPD_SIZE 4096
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
@@ -576,6 +578,27 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
}
}
+static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
+{
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ case CHIP_VEGA12:
+ case CHIP_VEGA20:
+ break;
+ case CHIP_RAVEN:
+ if (adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
+ break;
+ if ((adev->gfx.rlc_fw_version < 531) ||
+ (adev->gfx.rlc_fw_version == 53815) ||
+ (adev->gfx.rlc_feature_version < 1) ||
+ !adev->gfx.rlc.is_rlc_v2_1)
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ break;
+ default:
+ break;
+ }
+}
+
static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
{
const char *chip_name;
@@ -828,6 +851,7 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
}
out:
+ gfx_v9_0_check_if_need_gfxoff(adev);
gfx_v9_0_check_fw_write_wait(adev);
if (err) {
dev_err(adev->dev,
@@ -1639,6 +1663,18 @@ static int gfx_v9_0_sw_init(void *handle)
if (r)
return r;
+ /* ECC error */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_ECC_ERROR,
+ &adev->gfx.cp_ecc_error_irq);
+ if (r)
+ return r;
+
+ /* FUE error */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_GRBM_CP, GFX_9_0__SRCID__CP_FUE_ERROR,
+ &adev->gfx.cp_ecc_error_irq);
+ if (r)
+ return r;
+
adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
gfx_v9_0_scratch_init(adev);
@@ -1731,6 +1767,20 @@ static int gfx_v9_0_sw_fini(void *handle)
int i;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) &&
+ adev->gfx.ras_if) {
+ struct ras_common_if *ras_if = adev->gfx.ras_if;
+ struct ras_ih_if ih_info = {
+ .head = *ras_if,
+ };
+
+ amdgpu_ras_debugfs_remove(adev, ras_if);
+ amdgpu_ras_sysfs_remove(adev, ras_if);
+ amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
+ amdgpu_ras_feature_enable(adev, ras_if, 0);
+ kfree(ras_if);
+ }
+
amdgpu_bo_free_kernel(&adev->gds.oa_gfx_bo, NULL, NULL);
amdgpu_bo_free_kernel(&adev->gds.gws_gfx_bo, NULL, NULL);
amdgpu_bo_free_kernel(&adev->gds.gds_gfx_bo, NULL, NULL);
@@ -3303,6 +3353,7 @@ static int gfx_v9_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
@@ -3492,6 +3543,80 @@ static int gfx_v9_0_early_init(void *handle)
return 0;
}
+static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry);
+
+static int gfx_v9_0_ecc_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct ras_common_if **ras_if = &adev->gfx.ras_if;
+ struct ras_ih_if ih_info = {
+ .cb = gfx_v9_0_process_ras_data_cb,
+ };
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "gfx_err_count",
+ .debugfs_name = "gfx_err_inject",
+ };
+ struct ras_common_if ras_block = {
+ .block = AMDGPU_RAS_BLOCK__GFX,
+ .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
+ .sub_block_index = 0,
+ .name = "gfx",
+ };
+ int r;
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
+ amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
+ return 0;
+ }
+
+ if (*ras_if)
+ goto resume;
+
+ *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
+ if (!*ras_if)
+ return -ENOMEM;
+
+ **ras_if = ras_block;
+
+ r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
+ if (r)
+ goto feature;
+
+ ih_info.head = **ras_if;
+ fs_info.head = **ras_if;
+
+ r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
+ if (r)
+ goto interrupt;
+
+ r = amdgpu_ras_debugfs_create(adev, &fs_info);
+ if (r)
+ goto debugfs;
+
+ r = amdgpu_ras_sysfs_create(adev, &fs_info);
+ if (r)
+ goto sysfs;
+resume:
+ r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
+ if (r)
+ goto irq;
+
+ return 0;
+irq:
+ amdgpu_ras_sysfs_remove(adev, *ras_if);
+sysfs:
+ amdgpu_ras_debugfs_remove(adev, *ras_if);
+debugfs:
+ amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
+interrupt:
+ amdgpu_ras_feature_enable(adev, *ras_if, 0);
+feature:
+ kfree(*ras_if);
+ *ras_if = NULL;
+ return -EINVAL;
+}
+
static int gfx_v9_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -3505,6 +3630,10 @@ static int gfx_v9_0_late_init(void *handle)
if (r)
return r;
+ r = gfx_v9_0_ecc_late_init(handle);
+ if (r)
+ return r;
+
return 0;
}
@@ -4541,6 +4670,45 @@ static int gfx_v9_0_set_priv_inst_fault_state(struct amdgpu_device *adev,
return 0;
}
+#define ENABLE_ECC_ON_ME_PIPE(me, pipe) \
+ WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
+ CP_ECC_ERROR_INT_ENABLE, 1)
+
+#define DISABLE_ECC_ON_ME_PIPE(me, pipe) \
+ WREG32_FIELD15(GC, 0, CP_ME##me##_PIPE##pipe##_INT_CNTL,\
+ CP_ECC_ERROR_INT_ENABLE, 0)
+
+static int gfx_v9_0_set_cp_ecc_error_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
+ CP_ECC_ERROR_INT_ENABLE, 0);
+ DISABLE_ECC_ON_ME_PIPE(1, 0);
+ DISABLE_ECC_ON_ME_PIPE(1, 1);
+ DISABLE_ECC_ON_ME_PIPE(1, 2);
+ DISABLE_ECC_ON_ME_PIPE(1, 3);
+ break;
+
+ case AMDGPU_IRQ_STATE_ENABLE:
+ WREG32_FIELD15(GC, 0, CP_INT_CNTL_RING0,
+ CP_ECC_ERROR_INT_ENABLE, 1);
+ ENABLE_ECC_ON_ME_PIPE(1, 0);
+ ENABLE_ECC_ON_ME_PIPE(1, 1);
+ ENABLE_ECC_ON_ME_PIPE(1, 2);
+ ENABLE_ECC_ON_ME_PIPE(1, 3);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+
static int gfx_v9_0_set_eop_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
@@ -4657,6 +4825,34 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev,
return 0;
}
+static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ /* TODO ue will trigger an interrupt. */
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ amdgpu_ras_reset_gpu(adev, 0);
+ return AMDGPU_RAS_UE;
+}
+
+static int gfx_v9_0_cp_ecc_error_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_common_if *ras_if = adev->gfx.ras_if;
+ struct ras_dispatch_if ih_data = {
+ .entry = entry,
+ };
+
+ if (!ras_if)
+ return 0;
+
+ ih_data.head = *ras_if;
+
+ DRM_ERROR("CP ECC ERROR IRQ\n");
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ return 0;
+}
+
static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
.name = "gfx_v9_0",
.early_init = gfx_v9_0_early_init,
@@ -4818,6 +5014,12 @@ static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = {
.process = gfx_v9_0_priv_inst_irq,
};
+static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = {
+ .set = gfx_v9_0_set_cp_ecc_error_state,
+ .process = gfx_v9_0_cp_ecc_error_irq,
+};
+
+
static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
@@ -4828,6 +5030,9 @@ static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
adev->gfx.priv_inst_irq.num_types = 1;
adev->gfx.priv_inst_irq.funcs = &gfx_v9_0_priv_inst_irq_funcs;
+
+ adev->gfx.cp_ecc_error_irq.num_types = 2; /*C5 ECC error and C9 FUE error*/
+ adev->gfx.cp_ecc_error_irq.funcs = &gfx_v9_0_cp_ecc_error_irq_funcs;
}
static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
index f5edddf3b29d..7bb5359d0bbd 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
@@ -143,7 +143,7 @@ static void gfxhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
/* XXX for emulation, Refer to closed source code.*/
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
0);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
WREG32_SOC15(GC, 0, mmVM_L2_CNTL, tmp);
@@ -236,7 +236,7 @@ static void gfxhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
block_size);
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 1);
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_CNTL, i, tmp);
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0);
WREG32_SOC15_OFFSET(GC, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 9fc3296592fe..b06d876da2d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -225,7 +225,7 @@ static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev,
u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
base <<= 24;
- amdgpu_gmc_vram_location(adev, &adev->gmc, base);
+ amdgpu_gmc_vram_location(adev, mc, base);
amdgpu_gmc_gart_location(adev, mc);
}
@@ -383,20 +383,6 @@ static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring,
return pd_addr;
}
-static int gmc_v6_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
- uint32_t gpu_page_idx, uint64_t addr,
- uint64_t flags)
-{
- void __iomem *ptr = (void *)cpu_pt_addr;
- uint64_t value;
-
- value = addr & 0xFFFFFFFFFFFFF000ULL;
- value |= flags;
- writeq(value, ptr + (gpu_page_idx * 8));
-
- return 0;
-}
-
static uint64_t gmc_v6_0_get_vm_pte_flags(struct amdgpu_device *adev,
uint32_t flags)
{
@@ -886,7 +872,7 @@ static int gmc_v6_0_sw_init(void *handle)
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
dev_warn(adev->dev, "amdgpu: No coherent DMA available.\n");
}
- adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
+ adev->need_swiotlb = drm_need_swiotlb(dma_bits);
r = gmc_v6_0_init_microcode(adev);
if (r) {
@@ -1169,7 +1155,6 @@ static const struct amd_ip_funcs gmc_v6_0_ip_funcs = {
static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v6_0_flush_gpu_tlb,
.emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb,
- .set_pte_pde = gmc_v6_0_set_pte_pde,
.set_prt = gmc_v6_0_set_prt,
.get_vm_pde = gmc_v6_0_get_vm_pde,
.get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 761dcfb2fec0..75aa3332aee2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -242,7 +242,7 @@ static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev,
u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
base <<= 24;
- amdgpu_gmc_vram_location(adev, &adev->gmc, base);
+ amdgpu_gmc_vram_location(adev, mc, base);
amdgpu_gmc_gart_location(adev, mc);
}
@@ -460,31 +460,6 @@ static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
}
-/**
- * gmc_v7_0_set_pte_pde - update the page tables using MMIO
- *
- * @adev: amdgpu_device pointer
- * @cpu_pt_addr: cpu address of the page table
- * @gpu_page_idx: entry in the page table to update
- * @addr: dst addr to write into pte/pde
- * @flags: access flags
- *
- * Update the page tables using the CPU.
- */
-static int gmc_v7_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
- uint32_t gpu_page_idx, uint64_t addr,
- uint64_t flags)
-{
- void __iomem *ptr = (void *)cpu_pt_addr;
- uint64_t value;
-
- value = addr & 0xFFFFFFFFFFFFF000ULL;
- value |= flags;
- writeq(value, ptr + (gpu_page_idx * 8));
-
- return 0;
-}
-
static uint64_t gmc_v7_0_get_vm_pte_flags(struct amdgpu_device *adev,
uint32_t flags)
{
@@ -1030,7 +1005,7 @@ static int gmc_v7_0_sw_init(void *handle)
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
pr_warn("amdgpu: No coherent DMA available\n");
}
- adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
+ adev->need_swiotlb = drm_need_swiotlb(dma_bits);
r = gmc_v7_0_init_microcode(adev);
if (r) {
@@ -1376,7 +1351,6 @@ static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v7_0_flush_gpu_tlb,
.emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping,
- .set_pte_pde = gmc_v7_0_set_pte_pde,
.set_prt = gmc_v7_0_set_prt,
.get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags,
.get_vm_pde = gmc_v7_0_get_vm_pde
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 34440672f938..8a3b5e6fc6c9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -433,7 +433,7 @@ static void gmc_v8_0_vram_gtt_location(struct amdgpu_device *adev,
base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF;
base <<= 24;
- amdgpu_gmc_vram_location(adev, &adev->gmc, base);
+ amdgpu_gmc_vram_location(adev, mc, base);
amdgpu_gmc_gart_location(adev, mc);
}
@@ -662,50 +662,26 @@ static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid);
}
-/**
- * gmc_v8_0_set_pte_pde - update the page tables using MMIO
- *
- * @adev: amdgpu_device pointer
- * @cpu_pt_addr: cpu address of the page table
- * @gpu_page_idx: entry in the page table to update
- * @addr: dst addr to write into pte/pde
- * @flags: access flags
+/*
+ * PTE format on VI:
+ * 63:40 reserved
+ * 39:12 4k physical page base address
+ * 11:7 fragment
+ * 6 write
+ * 5 read
+ * 4 exe
+ * 3 reserved
+ * 2 snooped
+ * 1 system
+ * 0 valid
*
- * Update the page tables using the CPU.
+ * PDE format on VI:
+ * 63:59 block fragment size
+ * 58:40 reserved
+ * 39:1 physical base address of PTE
+ * bits 5:1 must be 0.
+ * 0 valid
*/
-static int gmc_v8_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
- uint32_t gpu_page_idx, uint64_t addr,
- uint64_t flags)
-{
- void __iomem *ptr = (void *)cpu_pt_addr;
- uint64_t value;
-
- /*
- * PTE format on VI:
- * 63:40 reserved
- * 39:12 4k physical page base address
- * 11:7 fragment
- * 6 write
- * 5 read
- * 4 exe
- * 3 reserved
- * 2 snooped
- * 1 system
- * 0 valid
- *
- * PDE format on VI:
- * 63:59 block fragment size
- * 58:40 reserved
- * 39:1 physical base address of PTE
- * bits 5:1 must be 0.
- * 0 valid
- */
- value = addr & 0x000000FFFFFFF000ULL;
- value |= flags;
- writeq(value, ptr + (gpu_page_idx * 8));
-
- return 0;
-}
static uint64_t gmc_v8_0_get_vm_pte_flags(struct amdgpu_device *adev,
uint32_t flags)
@@ -1155,7 +1131,7 @@ static int gmc_v8_0_sw_init(void *handle)
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
pr_warn("amdgpu: No coherent DMA available\n");
}
- adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
+ adev->need_swiotlb = drm_need_swiotlb(dma_bits);
r = gmc_v8_0_init_microcode(adev);
if (r) {
@@ -1743,7 +1719,6 @@ static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v8_0_flush_gpu_tlb,
.emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping,
- .set_pte_pde = gmc_v8_0_set_pte_pde,
.set_prt = gmc_v8_0_set_prt,
.get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags,
.get_vm_pde = gmc_v8_0_get_vm_pde
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 2fe8397241ea..3fd79e07944d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -47,6 +47,8 @@
#include "ivsrcid/vmc/irqsrcs_vmc_1_0.h"
+#include "amdgpu_ras.h"
+
/* add these here since we already include dce12 headers and these are for DCN */
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d
#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
@@ -84,121 +86,182 @@ static const struct soc15_reg_golden golden_settings_athub_1_0_0[] =
SOC15_REG_GOLDEN_VALUE(ATHUB, 0, mmRPB_ARB_CNTL2, 0x00ff00ff, 0x00080008)
};
-/* Ecc related register addresses, (BASE + reg offset) */
-/* Universal Memory Controller caps (may be fused). */
-/* UMCCH:UmcLocalCap */
-#define UMCLOCALCAPS_ADDR0 (0x00014306 + 0x00000000)
-#define UMCLOCALCAPS_ADDR1 (0x00014306 + 0x00000800)
-#define UMCLOCALCAPS_ADDR2 (0x00014306 + 0x00001000)
-#define UMCLOCALCAPS_ADDR3 (0x00014306 + 0x00001800)
-#define UMCLOCALCAPS_ADDR4 (0x00054306 + 0x00000000)
-#define UMCLOCALCAPS_ADDR5 (0x00054306 + 0x00000800)
-#define UMCLOCALCAPS_ADDR6 (0x00054306 + 0x00001000)
-#define UMCLOCALCAPS_ADDR7 (0x00054306 + 0x00001800)
-#define UMCLOCALCAPS_ADDR8 (0x00094306 + 0x00000000)
-#define UMCLOCALCAPS_ADDR9 (0x00094306 + 0x00000800)
-#define UMCLOCALCAPS_ADDR10 (0x00094306 + 0x00001000)
-#define UMCLOCALCAPS_ADDR11 (0x00094306 + 0x00001800)
-#define UMCLOCALCAPS_ADDR12 (0x000d4306 + 0x00000000)
-#define UMCLOCALCAPS_ADDR13 (0x000d4306 + 0x00000800)
-#define UMCLOCALCAPS_ADDR14 (0x000d4306 + 0x00001000)
-#define UMCLOCALCAPS_ADDR15 (0x000d4306 + 0x00001800)
-
-/* Universal Memory Controller Channel config. */
-/* UMCCH:UMC_CONFIG */
-#define UMCCH_UMC_CONFIG_ADDR0 (0x00014040 + 0x00000000)
-#define UMCCH_UMC_CONFIG_ADDR1 (0x00014040 + 0x00000800)
-#define UMCCH_UMC_CONFIG_ADDR2 (0x00014040 + 0x00001000)
-#define UMCCH_UMC_CONFIG_ADDR3 (0x00014040 + 0x00001800)
-#define UMCCH_UMC_CONFIG_ADDR4 (0x00054040 + 0x00000000)
-#define UMCCH_UMC_CONFIG_ADDR5 (0x00054040 + 0x00000800)
-#define UMCCH_UMC_CONFIG_ADDR6 (0x00054040 + 0x00001000)
-#define UMCCH_UMC_CONFIG_ADDR7 (0x00054040 + 0x00001800)
-#define UMCCH_UMC_CONFIG_ADDR8 (0x00094040 + 0x00000000)
-#define UMCCH_UMC_CONFIG_ADDR9 (0x00094040 + 0x00000800)
-#define UMCCH_UMC_CONFIG_ADDR10 (0x00094040 + 0x00001000)
-#define UMCCH_UMC_CONFIG_ADDR11 (0x00094040 + 0x00001800)
-#define UMCCH_UMC_CONFIG_ADDR12 (0x000d4040 + 0x00000000)
-#define UMCCH_UMC_CONFIG_ADDR13 (0x000d4040 + 0x00000800)
-#define UMCCH_UMC_CONFIG_ADDR14 (0x000d4040 + 0x00001000)
-#define UMCCH_UMC_CONFIG_ADDR15 (0x000d4040 + 0x00001800)
-
-/* Universal Memory Controller Channel Ecc config. */
-/* UMCCH:EccCtrl */
-#define UMCCH_ECCCTRL_ADDR0 (0x00014053 + 0x00000000)
-#define UMCCH_ECCCTRL_ADDR1 (0x00014053 + 0x00000800)
-#define UMCCH_ECCCTRL_ADDR2 (0x00014053 + 0x00001000)
-#define UMCCH_ECCCTRL_ADDR3 (0x00014053 + 0x00001800)
-#define UMCCH_ECCCTRL_ADDR4 (0x00054053 + 0x00000000)
-#define UMCCH_ECCCTRL_ADDR5 (0x00054053 + 0x00000800)
-#define UMCCH_ECCCTRL_ADDR6 (0x00054053 + 0x00001000)
-#define UMCCH_ECCCTRL_ADDR7 (0x00054053 + 0x00001800)
-#define UMCCH_ECCCTRL_ADDR8 (0x00094053 + 0x00000000)
-#define UMCCH_ECCCTRL_ADDR9 (0x00094053 + 0x00000800)
-#define UMCCH_ECCCTRL_ADDR10 (0x00094053 + 0x00001000)
-#define UMCCH_ECCCTRL_ADDR11 (0x00094053 + 0x00001800)
-#define UMCCH_ECCCTRL_ADDR12 (0x000d4053 + 0x00000000)
-#define UMCCH_ECCCTRL_ADDR13 (0x000d4053 + 0x00000800)
-#define UMCCH_ECCCTRL_ADDR14 (0x000d4053 + 0x00001000)
-#define UMCCH_ECCCTRL_ADDR15 (0x000d4053 + 0x00001800)
-
-static const uint32_t ecc_umclocalcap_addrs[] = {
- UMCLOCALCAPS_ADDR0,
- UMCLOCALCAPS_ADDR1,
- UMCLOCALCAPS_ADDR2,
- UMCLOCALCAPS_ADDR3,
- UMCLOCALCAPS_ADDR4,
- UMCLOCALCAPS_ADDR5,
- UMCLOCALCAPS_ADDR6,
- UMCLOCALCAPS_ADDR7,
- UMCLOCALCAPS_ADDR8,
- UMCLOCALCAPS_ADDR9,
- UMCLOCALCAPS_ADDR10,
- UMCLOCALCAPS_ADDR11,
- UMCLOCALCAPS_ADDR12,
- UMCLOCALCAPS_ADDR13,
- UMCLOCALCAPS_ADDR14,
- UMCLOCALCAPS_ADDR15,
+static const uint32_t ecc_umc_mcumc_ctrl_addrs[] = {
+ (0x000143c0 + 0x00000000),
+ (0x000143c0 + 0x00000800),
+ (0x000143c0 + 0x00001000),
+ (0x000143c0 + 0x00001800),
+ (0x000543c0 + 0x00000000),
+ (0x000543c0 + 0x00000800),
+ (0x000543c0 + 0x00001000),
+ (0x000543c0 + 0x00001800),
+ (0x000943c0 + 0x00000000),
+ (0x000943c0 + 0x00000800),
+ (0x000943c0 + 0x00001000),
+ (0x000943c0 + 0x00001800),
+ (0x000d43c0 + 0x00000000),
+ (0x000d43c0 + 0x00000800),
+ (0x000d43c0 + 0x00001000),
+ (0x000d43c0 + 0x00001800),
+ (0x001143c0 + 0x00000000),
+ (0x001143c0 + 0x00000800),
+ (0x001143c0 + 0x00001000),
+ (0x001143c0 + 0x00001800),
+ (0x001543c0 + 0x00000000),
+ (0x001543c0 + 0x00000800),
+ (0x001543c0 + 0x00001000),
+ (0x001543c0 + 0x00001800),
+ (0x001943c0 + 0x00000000),
+ (0x001943c0 + 0x00000800),
+ (0x001943c0 + 0x00001000),
+ (0x001943c0 + 0x00001800),
+ (0x001d43c0 + 0x00000000),
+ (0x001d43c0 + 0x00000800),
+ (0x001d43c0 + 0x00001000),
+ (0x001d43c0 + 0x00001800),
};
-static const uint32_t ecc_umcch_umc_config_addrs[] = {
- UMCCH_UMC_CONFIG_ADDR0,
- UMCCH_UMC_CONFIG_ADDR1,
- UMCCH_UMC_CONFIG_ADDR2,
- UMCCH_UMC_CONFIG_ADDR3,
- UMCCH_UMC_CONFIG_ADDR4,
- UMCCH_UMC_CONFIG_ADDR5,
- UMCCH_UMC_CONFIG_ADDR6,
- UMCCH_UMC_CONFIG_ADDR7,
- UMCCH_UMC_CONFIG_ADDR8,
- UMCCH_UMC_CONFIG_ADDR9,
- UMCCH_UMC_CONFIG_ADDR10,
- UMCCH_UMC_CONFIG_ADDR11,
- UMCCH_UMC_CONFIG_ADDR12,
- UMCCH_UMC_CONFIG_ADDR13,
- UMCCH_UMC_CONFIG_ADDR14,
- UMCCH_UMC_CONFIG_ADDR15,
+static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = {
+ (0x000143e0 + 0x00000000),
+ (0x000143e0 + 0x00000800),
+ (0x000143e0 + 0x00001000),
+ (0x000143e0 + 0x00001800),
+ (0x000543e0 + 0x00000000),
+ (0x000543e0 + 0x00000800),
+ (0x000543e0 + 0x00001000),
+ (0x000543e0 + 0x00001800),
+ (0x000943e0 + 0x00000000),
+ (0x000943e0 + 0x00000800),
+ (0x000943e0 + 0x00001000),
+ (0x000943e0 + 0x00001800),
+ (0x000d43e0 + 0x00000000),
+ (0x000d43e0 + 0x00000800),
+ (0x000d43e0 + 0x00001000),
+ (0x000d43e0 + 0x00001800),
+ (0x001143e0 + 0x00000000),
+ (0x001143e0 + 0x00000800),
+ (0x001143e0 + 0x00001000),
+ (0x001143e0 + 0x00001800),
+ (0x001543e0 + 0x00000000),
+ (0x001543e0 + 0x00000800),
+ (0x001543e0 + 0x00001000),
+ (0x001543e0 + 0x00001800),
+ (0x001943e0 + 0x00000000),
+ (0x001943e0 + 0x00000800),
+ (0x001943e0 + 0x00001000),
+ (0x001943e0 + 0x00001800),
+ (0x001d43e0 + 0x00000000),
+ (0x001d43e0 + 0x00000800),
+ (0x001d43e0 + 0x00001000),
+ (0x001d43e0 + 0x00001800),
};
-static const uint32_t ecc_umcch_eccctrl_addrs[] = {
- UMCCH_ECCCTRL_ADDR0,
- UMCCH_ECCCTRL_ADDR1,
- UMCCH_ECCCTRL_ADDR2,
- UMCCH_ECCCTRL_ADDR3,
- UMCCH_ECCCTRL_ADDR4,
- UMCCH_ECCCTRL_ADDR5,
- UMCCH_ECCCTRL_ADDR6,
- UMCCH_ECCCTRL_ADDR7,
- UMCCH_ECCCTRL_ADDR8,
- UMCCH_ECCCTRL_ADDR9,
- UMCCH_ECCCTRL_ADDR10,
- UMCCH_ECCCTRL_ADDR11,
- UMCCH_ECCCTRL_ADDR12,
- UMCCH_ECCCTRL_ADDR13,
- UMCCH_ECCCTRL_ADDR14,
- UMCCH_ECCCTRL_ADDR15,
+static const uint32_t ecc_umc_mcumc_status_addrs[] = {
+ (0x000143c2 + 0x00000000),
+ (0x000143c2 + 0x00000800),
+ (0x000143c2 + 0x00001000),
+ (0x000143c2 + 0x00001800),
+ (0x000543c2 + 0x00000000),
+ (0x000543c2 + 0x00000800),
+ (0x000543c2 + 0x00001000),
+ (0x000543c2 + 0x00001800),
+ (0x000943c2 + 0x00000000),
+ (0x000943c2 + 0x00000800),
+ (0x000943c2 + 0x00001000),
+ (0x000943c2 + 0x00001800),
+ (0x000d43c2 + 0x00000000),
+ (0x000d43c2 + 0x00000800),
+ (0x000d43c2 + 0x00001000),
+ (0x000d43c2 + 0x00001800),
+ (0x001143c2 + 0x00000000),
+ (0x001143c2 + 0x00000800),
+ (0x001143c2 + 0x00001000),
+ (0x001143c2 + 0x00001800),
+ (0x001543c2 + 0x00000000),
+ (0x001543c2 + 0x00000800),
+ (0x001543c2 + 0x00001000),
+ (0x001543c2 + 0x00001800),
+ (0x001943c2 + 0x00000000),
+ (0x001943c2 + 0x00000800),
+ (0x001943c2 + 0x00001000),
+ (0x001943c2 + 0x00001800),
+ (0x001d43c2 + 0x00000000),
+ (0x001d43c2 + 0x00000800),
+ (0x001d43c2 + 0x00001000),
+ (0x001d43c2 + 0x00001800),
};
+static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *src,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 bits, i, tmp, reg;
+
+ bits = 0x7f;
+
+ switch (state) {
+ case AMDGPU_IRQ_STATE_DISABLE:
+ for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
+ reg = ecc_umc_mcumc_ctrl_addrs[i];
+ tmp = RREG32(reg);
+ tmp &= ~bits;
+ WREG32(reg, tmp);
+ }
+ for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
+ reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
+ tmp = RREG32(reg);
+ tmp &= ~bits;
+ WREG32(reg, tmp);
+ }
+ break;
+ case AMDGPU_IRQ_STATE_ENABLE:
+ for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_addrs); i++) {
+ reg = ecc_umc_mcumc_ctrl_addrs[i];
+ tmp = RREG32(reg);
+ tmp |= bits;
+ WREG32(reg, tmp);
+ }
+ for (i = 0; i < ARRAY_SIZE(ecc_umc_mcumc_ctrl_mask_addrs); i++) {
+ reg = ecc_umc_mcumc_ctrl_mask_addrs[i];
+ tmp = RREG32(reg);
+ tmp |= bits;
+ WREG32(reg, tmp);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+ amdgpu_ras_reset_gpu(adev, 0);
+ return AMDGPU_RAS_UE;
+}
+
+static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_common_if *ras_if = adev->gmc.ras_if;
+ struct ras_dispatch_if ih_data = {
+ .entry = entry,
+ };
+
+ if (!ras_if)
+ return 0;
+
+ ih_data.head = *ras_if;
+
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ return 0;
+}
+
static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
struct amdgpu_irq_src *src,
unsigned type,
@@ -244,62 +307,6 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
return 0;
}
-/**
- * vega10_ih_prescreen_iv - prescreen an interrupt vector
- *
- * @adev: amdgpu_device pointer
- *
- * Returns true if the interrupt vector should be further processed.
- */
-static bool gmc_v9_0_prescreen_iv(struct amdgpu_device *adev,
- struct amdgpu_iv_entry *entry,
- uint64_t addr)
-{
- struct amdgpu_vm *vm;
- u64 key;
- int r;
-
- /* No PASID, can't identify faulting process */
- if (!entry->pasid)
- return true;
-
- /* Not a retry fault */
- if (!(entry->src_data[1] & 0x80))
- return true;
-
- /* Track retry faults in per-VM fault FIFO. */
- spin_lock(&adev->vm_manager.pasid_lock);
- vm = idr_find(&adev->vm_manager.pasid_idr, entry->pasid);
- if (!vm) {
- /* VM not found, process it normally */
- spin_unlock(&adev->vm_manager.pasid_lock);
- return true;
- }
-
- key = AMDGPU_VM_FAULT(entry->pasid, addr);
- r = amdgpu_vm_add_fault(vm->fault_hash, key);
-
- /* Hash table is full or the fault is already being processed,
- * ignore further page faults
- */
- if (r != 0) {
- spin_unlock(&adev->vm_manager.pasid_lock);
- return false;
- }
- /* No locking required with single writer and single reader */
- r = kfifo_put(&vm->faults, key);
- if (!r) {
- /* FIFO is full. Ignore it until there is space */
- amdgpu_vm_clear_fault(vm->fault_hash, key);
- spin_unlock(&adev->vm_manager.pasid_lock);
- return false;
- }
-
- spin_unlock(&adev->vm_manager.pasid_lock);
- /* It's the first fault for this address, process it normally */
- return true;
-}
-
static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -312,9 +319,11 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
addr = (u64)entry->src_data[0] << 12;
addr |= ((u64)entry->src_data[1] & 0xf) << 44;
- if (!gmc_v9_0_prescreen_iv(adev, entry, addr))
+ if (retry_fault && amdgpu_gmc_filter_faults(adev, addr, entry->pasid,
+ entry->timestamp))
return 1; /* This also prevents sending it to KFD */
+ /* If it's the first fault for this address, process it normally */
if (!amdgpu_sriov_vf(adev)) {
status = RREG32(hub->vm_l2_pro_fault_status);
WREG32_P(hub->vm_l2_pro_fault_cntl, 1, ~1);
@@ -350,10 +359,19 @@ static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = {
.process = gmc_v9_0_process_interrupt,
};
+
+static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = {
+ .set = gmc_v9_0_ecc_interrupt_state,
+ .process = gmc_v9_0_process_ecc_irq,
+};
+
static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->gmc.vm_fault.num_types = 1;
adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs;
+
+ adev->gmc.ecc_irq.num_types = 1;
+ adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs;
}
static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
@@ -466,64 +484,37 @@ static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid,
amdgpu_ring_emit_wreg(ring, reg, pasid);
}
-/**
- * gmc_v9_0_set_pte_pde - update the page tables using MMIO
- *
- * @adev: amdgpu_device pointer
- * @cpu_pt_addr: cpu address of the page table
- * @gpu_page_idx: entry in the page table to update
- * @addr: dst addr to write into pte/pde
- * @flags: access flags
+/*
+ * PTE format on VEGA 10:
+ * 63:59 reserved
+ * 58:57 mtype
+ * 56 F
+ * 55 L
+ * 54 P
+ * 53 SW
+ * 52 T
+ * 50:48 reserved
+ * 47:12 4k physical page base address
+ * 11:7 fragment
+ * 6 write
+ * 5 read
+ * 4 exe
+ * 3 Z
+ * 2 snooped
+ * 1 system
+ * 0 valid
*
- * Update the page tables using the CPU.
+ * PDE format on VEGA 10:
+ * 63:59 block fragment size
+ * 58:55 reserved
+ * 54 P
+ * 53:48 reserved
+ * 47:6 physical base address of PD or PTE
+ * 5:3 reserved
+ * 2 C
+ * 1 system
+ * 0 valid
*/
-static int gmc_v9_0_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
- uint32_t gpu_page_idx, uint64_t addr,
- uint64_t flags)
-{
- void __iomem *ptr = (void *)cpu_pt_addr;
- uint64_t value;
-
- /*
- * PTE format on VEGA 10:
- * 63:59 reserved
- * 58:57 mtype
- * 56 F
- * 55 L
- * 54 P
- * 53 SW
- * 52 T
- * 50:48 reserved
- * 47:12 4k physical page base address
- * 11:7 fragment
- * 6 write
- * 5 read
- * 4 exe
- * 3 Z
- * 2 snooped
- * 1 system
- * 0 valid
- *
- * PDE format on VEGA 10:
- * 63:59 block fragment size
- * 58:55 reserved
- * 54 P
- * 53:48 reserved
- * 47:6 physical base address of PD or PTE
- * 5:3 reserved
- * 2 C
- * 1 system
- * 0 valid
- */
-
- /*
- * The following is for PTE only. GART does not have PDEs.
- */
- value = addr & 0x0000FFFFFFFFF000ULL;
- value |= flags;
- writeq(value, ptr + (gpu_page_idx * 8));
- return 0;
-}
static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev,
uint32_t flags)
@@ -593,7 +584,6 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb,
.emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb,
.emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping,
- .set_pte_pde = gmc_v9_0_set_pte_pde,
.get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags,
.get_vm_pde = gmc_v9_0_get_vm_pde
};
@@ -620,85 +610,6 @@ static int gmc_v9_0_early_init(void *handle)
return 0;
}
-static int gmc_v9_0_ecc_available(struct amdgpu_device *adev)
-{
- uint32_t reg_val;
- uint32_t reg_addr;
- uint32_t field_val;
- size_t i;
- uint32_t fv2;
- size_t lost_sheep;
-
- DRM_DEBUG("ecc: gmc_v9_0_ecc_available()\n");
-
- lost_sheep = 0;
- for (i = 0; i < ARRAY_SIZE(ecc_umclocalcap_addrs); ++i) {
- reg_addr = ecc_umclocalcap_addrs[i];
- DRM_DEBUG("ecc: "
- "UMCCH_UmcLocalCap[%zu]: reg_addr: 0x%08x\n",
- i, reg_addr);
- reg_val = RREG32(reg_addr);
- field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UmcLocalCap,
- EccDis);
- DRM_DEBUG("ecc: "
- "reg_val: 0x%08x, "
- "EccDis: 0x%08x, ",
- reg_val, field_val);
- if (field_val) {
- DRM_ERROR("ecc: UmcLocalCap:EccDis is set.\n");
- ++lost_sheep;
- }
- }
-
- for (i = 0; i < ARRAY_SIZE(ecc_umcch_umc_config_addrs); ++i) {
- reg_addr = ecc_umcch_umc_config_addrs[i];
- DRM_DEBUG("ecc: "
- "UMCCH0_0_UMC_CONFIG[%zu]: reg_addr: 0x%08x",
- i, reg_addr);
- reg_val = RREG32(reg_addr);
- field_val = REG_GET_FIELD(reg_val, UMCCH0_0_UMC_CONFIG,
- DramReady);
- DRM_DEBUG("ecc: "
- "reg_val: 0x%08x, "
- "DramReady: 0x%08x\n",
- reg_val, field_val);
-
- if (!field_val) {
- DRM_ERROR("ecc: UMC_CONFIG:DramReady is not set.\n");
- ++lost_sheep;
- }
- }
-
- for (i = 0; i < ARRAY_SIZE(ecc_umcch_eccctrl_addrs); ++i) {
- reg_addr = ecc_umcch_eccctrl_addrs[i];
- DRM_DEBUG("ecc: "
- "UMCCH_EccCtrl[%zu]: reg_addr: 0x%08x, ",
- i, reg_addr);
- reg_val = RREG32(reg_addr);
- field_val = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
- WrEccEn);
- fv2 = REG_GET_FIELD(reg_val, UMCCH0_0_EccCtrl,
- RdEccEn);
- DRM_DEBUG("ecc: "
- "reg_val: 0x%08x, "
- "WrEccEn: 0x%08x, "
- "RdEccEn: 0x%08x\n",
- reg_val, field_val, fv2);
-
- if (!field_val) {
- DRM_DEBUG("ecc: WrEccEn is not set\n");
- ++lost_sheep;
- }
- if (!fv2) {
- DRM_DEBUG("ecc: RdEccEn is not set\n");
- ++lost_sheep;
- }
- }
-
- DRM_DEBUG("ecc: lost_sheep: %zu\n", lost_sheep);
- return lost_sheep == 0;
-}
-
static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
{
@@ -751,31 +662,119 @@ static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
return 0;
}
-static int gmc_v9_0_late_init(void *handle)
+static int gmc_v9_0_ecc_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct ras_common_if **ras_if = &adev->gmc.ras_if;
+ struct ras_ih_if ih_info = {
+ .cb = gmc_v9_0_process_ras_data_cb,
+ };
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "umc_err_count",
+ .debugfs_name = "umc_err_inject",
+ };
+ struct ras_common_if ras_block = {
+ .block = AMDGPU_RAS_BLOCK__UMC,
+ .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
+ .sub_block_index = 0,
+ .name = "umc",
+ };
int r;
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC)) {
+ amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
+ return 0;
+ }
+ /* handle resume path. */
+ if (*ras_if)
+ goto resume;
+
+ *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
+ if (!*ras_if)
+ return -ENOMEM;
+
+ **ras_if = ras_block;
+
+ r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
+ if (r)
+ goto feature;
+
+ ih_info.head = **ras_if;
+ fs_info.head = **ras_if;
+
+ r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
+ if (r)
+ goto interrupt;
+
+ r = amdgpu_ras_debugfs_create(adev, &fs_info);
+ if (r)
+ goto debugfs;
+
+ r = amdgpu_ras_sysfs_create(adev, &fs_info);
+ if (r)
+ goto sysfs;
+resume:
+ r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0);
+ if (r)
+ goto irq;
+
+ return 0;
+irq:
+ amdgpu_ras_sysfs_remove(adev, *ras_if);
+sysfs:
+ amdgpu_ras_debugfs_remove(adev, *ras_if);
+debugfs:
+ amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
+interrupt:
+ amdgpu_ras_feature_enable(adev, *ras_if, 0);
+feature:
+ kfree(*ras_if);
+ *ras_if = NULL;
+ return -EINVAL;
+}
+
+
+static int gmc_v9_0_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ bool r;
+
if (!gmc_v9_0_keep_stolen_memory(adev))
amdgpu_bo_late_init(adev);
r = gmc_v9_0_allocate_vm_inv_eng(adev);
if (r)
return r;
+ /* Check if ecc is available */
+ if (!amdgpu_sriov_vf(adev)) {
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ case CHIP_VEGA20:
+ r = amdgpu_atomfirmware_mem_ecc_supported(adev);
+ if (!r) {
+ DRM_INFO("ECC is not present.\n");
+ if (adev->df_funcs->enable_ecc_force_par_wr_rmw)
+ adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
+ } else {
+ DRM_INFO("ECC is active.\n");
+ }
- if (adev->asic_type == CHIP_VEGA10 && !amdgpu_sriov_vf(adev)) {
- r = gmc_v9_0_ecc_available(adev);
- if (r == 1) {
- DRM_INFO("ECC is active.\n");
- } else if (r == 0) {
- DRM_INFO("ECC is not present.\n");
- adev->df_funcs->enable_ecc_force_par_wr_rmw(adev, false);
- } else {
- DRM_ERROR("gmc_v9_0_ecc_available() failed. r: %d\n", r);
- return r;
+ r = amdgpu_atomfirmware_sram_ecc_supported(adev);
+ if (!r) {
+ DRM_INFO("SRAM ECC is not present.\n");
+ } else {
+ DRM_INFO("SRAM ECC is active.\n");
+ }
+ break;
+ default:
+ break;
}
}
+ r = gmc_v9_0_ecc_late_init(handle);
+ if (r)
+ return r;
+
return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0);
}
@@ -787,7 +786,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
base = mmhub_v1_0_get_fb_location(adev);
/* add the xgmi offset of the physical node */
base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
- amdgpu_gmc_vram_location(adev, &adev->gmc, base);
+ amdgpu_gmc_vram_location(adev, mc, base);
amdgpu_gmc_gart_location(adev, mc);
if (!amdgpu_sriov_vf(adev))
amdgpu_gmc_agp_location(adev, mc);
@@ -987,6 +986,12 @@ static int gmc_v9_0_sw_init(void *handle)
if (r)
return r;
+ /* interrupt sent to DF. */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0,
+ &adev->gmc.ecc_irq);
+ if (r)
+ return r;
+
/* Set the internal MC address mask
* This is the max address of the GPU's
* internal address space.
@@ -1011,7 +1016,7 @@ static int gmc_v9_0_sw_init(void *handle)
pci_set_consistent_dma_mask(adev->pdev, DMA_BIT_MASK(32));
printk(KERN_WARNING "amdgpu: No coherent DMA available.\n");
}
- adev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
+ adev->need_swiotlb = drm_need_swiotlb(dma_bits);
if (adev->gmc.xgmi.supported) {
r = gfxhub_v1_1_get_xgmi_info(adev);
@@ -1052,6 +1057,22 @@ static int gmc_v9_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) &&
+ adev->gmc.ras_if) {
+ struct ras_common_if *ras_if = adev->gmc.ras_if;
+ struct ras_ih_if ih_info = {
+ .head = *ras_if,
+ };
+
+ /*remove fs first*/
+ amdgpu_ras_debugfs_remove(adev, ras_if);
+ amdgpu_ras_sysfs_remove(adev, ras_if);
+ /*remove the IH*/
+ amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
+ amdgpu_ras_feature_enable(adev, ras_if, 0);
+ kfree(ras_if);
+ }
+
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
@@ -1198,6 +1219,7 @@ static int gmc_v9_0_hw_fini(void *handle)
return 0;
}
+ amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
gmc_v9_0_gart_disable(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 0c9a2c03504e..f2e6b148ccad 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2824,7 +2824,7 @@ static int kv_dpm_init(struct amdgpu_device *adev)
pi->caps_tcp_ramping = true;
}
- if (adev->powerplay.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
+ if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
pi->caps_sclk_ds = true;
else
pi->caps_sclk_ds = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 1696644ec022..41a9a5779623 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -163,7 +163,7 @@ static void mmhub_v1_0_init_cache_regs(struct amdgpu_device *adev)
/* XXX for emulation, Refer to closed source code.*/
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
0);
- tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 1);
+ tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
@@ -256,7 +256,7 @@ static void mmhub_v1_0_setup_vmid_config(struct amdgpu_device *adev)
block_size);
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
- RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
+ RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 1);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i, tmp);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0);
WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 73851ebb3833..8dbad496b29f 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -157,6 +157,82 @@ static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
xgpu_ai_mailbox_set_valid(adev, false);
}
+static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, char *buf)
+{
+ int r = 0;
+ u32 req, val, size;
+
+ if (!amdgim_is_hwperf(adev) || buf == NULL)
+ return -EBADRQC;
+
+ switch(type) {
+ case PP_SCLK:
+ req = IDH_IRQ_GET_PP_SCLK;
+ break;
+ case PP_MCLK:
+ req = IDH_IRQ_GET_PP_MCLK;
+ break;
+ default:
+ return -EBADRQC;
+ }
+
+ mutex_lock(&adev->virt.dpm_mutex);
+
+ xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
+
+ r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
+ if (!r && adev->fw_vram_usage.va != NULL) {
+ val = RREG32_NO_KIQ(
+ SOC15_REG_OFFSET(NBIO, 0,
+ mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1));
+ size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) +
+ val), PAGE_SIZE);
+
+ if (size < PAGE_SIZE)
+ strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val));
+ else
+ size = 0;
+
+ r = size;
+ goto out;
+ }
+
+ r = xgpu_ai_poll_msg(adev, IDH_FAIL);
+ if(r)
+ pr_info("%s DPM request failed",
+ (type == PP_SCLK)? "SCLK" : "MCLK");
+
+out:
+ mutex_unlock(&adev->virt.dpm_mutex);
+ return r;
+}
+
+static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32 level)
+{
+ int r = 0;
+ u32 req = IDH_IRQ_FORCE_DPM_LEVEL;
+
+ if (!amdgim_is_hwperf(adev))
+ return -EBADRQC;
+
+ mutex_lock(&adev->virt.dpm_mutex);
+ xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0);
+
+ r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
+ if (!r)
+ goto out;
+
+ r = xgpu_ai_poll_msg(adev, IDH_FAIL);
+ if (!r)
+ pr_info("DPM request failed");
+ else
+ pr_info("Mailbox is broken");
+
+out:
+ mutex_unlock(&adev->virt.dpm_mutex);
+ return r;
+}
+
static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
enum idh_request req)
{
@@ -375,4 +451,6 @@ const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.reset_gpu = xgpu_ai_request_reset,
.wait_reset = NULL,
.trans_msg = xgpu_ai_mailbox_trans_msg,
+ .get_pp_clk = xgpu_ai_get_pp_clk,
+ .force_dpm_level = xgpu_ai_force_dpm_level,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index b4a9ceea334b..39d151b79153 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -35,6 +35,10 @@ enum idh_request {
IDH_REL_GPU_FINI_ACCESS,
IDH_REQ_GPU_RESET_ACCESS,
+ IDH_IRQ_FORCE_DPM_LEVEL = 10,
+ IDH_IRQ_GET_PP_SCLK,
+ IDH_IRQ_GET_PP_MCLK,
+
IDH_LOG_VF_ERROR = 200,
};
@@ -43,6 +47,8 @@ enum idh_event {
IDH_READY_TO_ACCESS_GPU,
IDH_FLR_NOTIFICATION,
IDH_FLR_NOTIFICATION_CMPL,
+ IDH_SUCCESS,
+ IDH_FAIL,
IDH_EVENT_MAX
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
index 6a0fcd67662a..aef9d059ae52 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c
@@ -515,7 +515,7 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
/* wait until RCV_MSG become 3 */
if (xgpu_vi_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) {
- pr_err("failed to recieve FLR_CMPL\n");
+ pr_err("failed to receive FLR_CMPL\n");
return;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index cc967dbfd631..6590143c3f75 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -118,7 +118,8 @@ static void nbio_v6_1_ih_doorbell_range(struct amdgpu_device *adev,
if (use_doorbell) {
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index);
- ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 2);
+ ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range,
+ BIF_IH_DOORBELL_RANGE, SIZE, 6);
} else
ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index f3a7d207af07..2f79765b4bdb 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -43,6 +43,7 @@ enum psp_gfx_crtl_cmd_id
GFX_CTRL_CMD_ID_ENABLE_INT = 0x00050000, /* enable PSP-to-Gfx interrupt */
GFX_CTRL_CMD_ID_DISABLE_INT = 0x00060000, /* disable PSP-to-Gfx interrupt */
GFX_CTRL_CMD_ID_MODE1_RST = 0x00070000, /* trigger the Mode 1 reset */
+ GFX_CTRL_CMD_ID_GBR_IH_SET = 0x00080000, /* set Gbr IH_RB_CNTL registers */
GFX_CTRL_CMD_ID_CONSUME_CMD = 0x000A0000, /* send interrupt to psp for updating write pointer of vf */
GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING = 0x000C0000, /* destroy GPCOM ring */
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 860b70d80d3c..b91df7bd1d98 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -33,6 +33,9 @@
#include "sdma0/sdma0_4_0_offset.h"
#include "nbio/nbio_7_4_offset.h"
+#include "oss/osssys_4_0_offset.h"
+#include "oss/osssys_4_0_sh_mask.h"
+
MODULE_FIRMWARE("amdgpu/vega20_sos.bin");
MODULE_FIRMWARE("amdgpu/vega20_asd.bin");
MODULE_FIRMWARE("amdgpu/vega20_ta.bin");
@@ -113,6 +116,13 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes);
adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr +
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
+
+ adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
+
+ adev->psp.ta_ras_ucode_version = le32_to_cpu(ta_hdr->ta_ras_ucode_version);
+ adev->psp.ta_ras_ucode_size = le32_to_cpu(ta_hdr->ta_ras_size_bytes);
+ adev->psp.ta_ras_start_addr = (uint8_t *)adev->psp.ta_xgmi_start_addr +
+ le32_to_cpu(ta_hdr->ta_ras_offset_bytes);
}
return 0;
@@ -217,6 +227,37 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp)
return ret;
}
+static void psp_v11_0_reroute_ih(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t tmp;
+
+ /* Change IH ring for VMC */
+ tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1244b);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
+
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 3);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
+
+ mdelay(20);
+ psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, false);
+
+ /* Change IH ring for UMC */
+ tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1216b);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
+
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 4);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
+
+ mdelay(20);
+ psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, false);
+}
+
static int psp_v11_0_ring_init(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -224,6 +265,8 @@ static int psp_v11_0_ring_init(struct psp_context *psp,
struct psp_ring *ring;
struct amdgpu_device *adev = psp->adev;
+ psp_v11_0_reroute_ih(psp);
+
ring = &psp->km_ring;
ring->ring_type = ring_type;
@@ -631,7 +674,7 @@ static int psp_v11_0_xgmi_set_topology_info(struct psp_context *psp,
for (i = 0; i < topology_info_input->num_nodes; i++) {
topology_info_input->nodes[i].node_id = topology->nodes[i].node_id;
topology_info_input->nodes[i].num_hops = topology->nodes[i].num_hops;
- topology_info_input->nodes[i].is_sharing_enabled = topology->nodes[i].is_sharing_enabled;
+ topology_info_input->nodes[i].is_sharing_enabled = 1;
topology_info_input->nodes[i].sdma_engine = topology->nodes[i].sdma_engine;
}
@@ -679,6 +722,54 @@ static int psp_v11_0_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id
return 0;
}
+static int psp_v11_0_ras_trigger_error(struct psp_context *psp,
+ struct ta_ras_trigger_error_input *info)
+{
+ struct ta_ras_shared_memory *ras_cmd;
+ int ret;
+
+ if (!psp->ras.ras_initialized)
+ return -EINVAL;
+
+ ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
+ memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
+
+ ras_cmd->cmd_id = TA_RAS_COMMAND__TRIGGER_ERROR;
+ ras_cmd->ras_in_message.trigger_error = *info;
+
+ ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
+ if (ret)
+ return -EINVAL;
+
+ return ras_cmd->ras_status;
+}
+
+static int psp_v11_0_ras_cure_posion(struct psp_context *psp, uint64_t *mode_ptr)
+{
+#if 0
+ // not support yet.
+ struct ta_ras_shared_memory *ras_cmd;
+ int ret;
+
+ if (!psp->ras.ras_initialized)
+ return -EINVAL;
+
+ ras_cmd = (struct ta_ras_shared_memory *)psp->ras.ras_shared_buf;
+ memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
+
+ ras_cmd->cmd_id = TA_RAS_COMMAND__CURE_POISON;
+ ras_cmd->ras_in_message.cure_poison.mode_ptr = mode_ptr;
+
+ ret = psp_ras_invoke(psp, ras_cmd->cmd_id);
+ if (ret)
+ return -EINVAL;
+
+ return ras_cmd->ras_status;
+#else
+ return -EINVAL;
+#endif
+}
+
static const struct psp_funcs psp_v11_0_funcs = {
.init_microcode = psp_v11_0_init_microcode,
.bootloader_load_sysdrv = psp_v11_0_bootloader_load_sysdrv,
@@ -695,6 +786,8 @@ static const struct psp_funcs psp_v11_0_funcs = {
.xgmi_get_hive_id = psp_v11_0_xgmi_get_hive_id,
.xgmi_get_node_id = psp_v11_0_xgmi_get_node_id,
.support_vmr_ring = psp_v11_0_support_vmr_ring,
+ .ras_trigger_error = psp_v11_0_ras_trigger_error,
+ .ras_cure_posion = psp_v11_0_ras_cure_posion,
};
void psp_v11_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index 0487e3a4e9e7..143f0fae69d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -37,6 +37,9 @@
#include "sdma0/sdma0_4_0_offset.h"
#include "nbio/nbio_6_1_offset.h"
+#include "oss/osssys_4_0_offset.h"
+#include "oss/osssys_4_0_sh_mask.h"
+
MODULE_FIRMWARE("amdgpu/vega10_sos.bin");
MODULE_FIRMWARE("amdgpu/vega10_asd.bin");
MODULE_FIRMWARE("amdgpu/vega12_sos.bin");
@@ -252,6 +255,37 @@ static int psp_v3_1_ring_init(struct psp_context *psp,
return 0;
}
+static void psp_v3_1_reroute_ih(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t tmp;
+
+ /* Change IH ring for VMC */
+ tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1244b);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, CLIENT_TYPE, 1);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
+
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 3);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
+
+ mdelay(20);
+ psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, false);
+
+ /* Change IH ring for UMC */
+ tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1216b);
+ tmp = REG_SET_FIELD(tmp, IH_CLIENT_CFG_DATA, RING_ID, 1);
+
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, 4);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_70, tmp);
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, GFX_CTRL_CMD_ID_GBR_IH_SET);
+
+ mdelay(20);
+ psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
+ 0x80000000, 0x8000FFFF, false);
+}
+
static int psp_v3_1_ring_create(struct psp_context *psp,
enum psp_ring_type ring_type)
{
@@ -260,6 +294,8 @@ static int psp_v3_1_ring_create(struct psp_context *psp,
struct psp_ring *ring = &psp->km_ring;
struct amdgpu_device *adev = psp->adev;
+ psp_v3_1_reroute_ih(psp);
+
/* Write low address of the ring to C2PMSG_69 */
psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index cca3552b36ed..36196372e8db 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -870,8 +870,8 @@ static int sdma_v2_4_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 :
- AMDGPU_SDMA_IRQ_TRAP1);
+ AMDGPU_SDMA_IRQ_INSTANCE0 :
+ AMDGPU_SDMA_IRQ_INSTANCE1);
if (r)
return r;
}
@@ -1006,7 +1006,7 @@ static int sdma_v2_4_set_trap_irq_state(struct amdgpu_device *adev,
u32 sdma_cntl;
switch (type) {
- case AMDGPU_SDMA_IRQ_TRAP0:
+ case AMDGPU_SDMA_IRQ_INSTANCE0:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
@@ -1022,7 +1022,7 @@ static int sdma_v2_4_set_trap_irq_state(struct amdgpu_device *adev,
break;
}
break;
- case AMDGPU_SDMA_IRQ_TRAP1:
+ case AMDGPU_SDMA_IRQ_INSTANCE1:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 0ce8331baeb2..6d39544e7829 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -1154,8 +1154,8 @@ static int sdma_v3_0_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 :
- AMDGPU_SDMA_IRQ_TRAP1);
+ AMDGPU_SDMA_IRQ_INSTANCE0 :
+ AMDGPU_SDMA_IRQ_INSTANCE1);
if (r)
return r;
}
@@ -1340,7 +1340,7 @@ static int sdma_v3_0_set_trap_irq_state(struct amdgpu_device *adev,
u32 sdma_cntl;
switch (type) {
- case AMDGPU_SDMA_IRQ_TRAP0:
+ case AMDGPU_SDMA_IRQ_INSTANCE0:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET);
@@ -1356,7 +1356,7 @@ static int sdma_v3_0_set_trap_irq_state(struct amdgpu_device *adev,
break;
}
break;
- case AMDGPU_SDMA_IRQ_TRAP1:
+ case AMDGPU_SDMA_IRQ_INSTANCE1:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET);
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index c816e55d43a9..9c88ce513d78 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -41,6 +41,8 @@
#include "ivsrcid/sdma0/irqsrcs_sdma0_4_0.h"
#include "ivsrcid/sdma1/irqsrcs_sdma1_4_0.h"
+#include "amdgpu_ras.h"
+
MODULE_FIRMWARE("amdgpu/vega10_sdma.bin");
MODULE_FIRMWARE("amdgpu/vega10_sdma1.bin");
MODULE_FIRMWARE("amdgpu/vega12_sdma.bin");
@@ -154,7 +156,6 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2[] =
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
- SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xFE000000, 0x00000000),
};
static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
@@ -184,7 +185,6 @@ static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = {
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_RPTR_ADDR_LO, 0xfffffffd, 0x00000001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_RLC7_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_PAGE, 0x000003ff, 0x000003c0),
- SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_UTCL1_WATERMK, 0xFE000000, 0x00000000),
};
static const struct soc15_reg_golden golden_settings_sdma_rv1[] =
@@ -849,7 +849,7 @@ static void sdma_v4_0_gfx_resume(struct amdgpu_device *adev, unsigned int i)
wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL);
wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
SDMA0_GFX_RB_WPTR_POLL_CNTL,
- F32_POLL_ENABLE, amdgpu_sriov_vf(adev));
+ F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0);
WREG32_SDMA(i, mmSDMA0_GFX_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
/* enable DMA RB */
@@ -940,7 +940,7 @@ static void sdma_v4_0_page_resume(struct amdgpu_device *adev, unsigned int i)
wptr_poll_cntl = RREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL);
wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl,
SDMA0_PAGE_RB_WPTR_POLL_CNTL,
- F32_POLL_ENABLE, amdgpu_sriov_vf(adev));
+ F32_POLL_ENABLE, amdgpu_sriov_vf(adev)? 1 : 0);
WREG32_SDMA(i, mmSDMA0_PAGE_RB_WPTR_POLL_CNTL, wptr_poll_cntl);
/* enable DMA RB */
@@ -1493,6 +1493,87 @@ static int sdma_v4_0_early_init(void *handle)
return 0;
}
+static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry);
+
+static int sdma_v4_0_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct ras_common_if **ras_if = &adev->sdma.ras_if;
+ struct ras_ih_if ih_info = {
+ .cb = sdma_v4_0_process_ras_data_cb,
+ };
+ struct ras_fs_if fs_info = {
+ .sysfs_name = "sdma_err_count",
+ .debugfs_name = "sdma_err_inject",
+ };
+ struct ras_common_if ras_block = {
+ .block = AMDGPU_RAS_BLOCK__SDMA,
+ .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE,
+ .sub_block_index = 0,
+ .name = "sdma",
+ };
+ int r;
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
+ amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0);
+ return 0;
+ }
+
+ /* handle resume path. */
+ if (*ras_if)
+ goto resume;
+
+ *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL);
+ if (!*ras_if)
+ return -ENOMEM;
+
+ **ras_if = ras_block;
+
+ r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1);
+ if (r)
+ goto feature;
+
+ ih_info.head = **ras_if;
+ fs_info.head = **ras_if;
+
+ r = amdgpu_ras_interrupt_add_handler(adev, &ih_info);
+ if (r)
+ goto interrupt;
+
+ r = amdgpu_ras_debugfs_create(adev, &fs_info);
+ if (r)
+ goto debugfs;
+
+ r = amdgpu_ras_sysfs_create(adev, &fs_info);
+ if (r)
+ goto sysfs;
+resume:
+ r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE0);
+ if (r)
+ goto irq;
+
+ r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE1);
+ if (r) {
+ amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE0);
+ goto irq;
+ }
+
+ return 0;
+irq:
+ amdgpu_ras_sysfs_remove(adev, *ras_if);
+sysfs:
+ amdgpu_ras_debugfs_remove(adev, *ras_if);
+debugfs:
+ amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
+interrupt:
+ amdgpu_ras_feature_enable(adev, *ras_if, 0);
+feature:
+ kfree(*ras_if);
+ *ras_if = NULL;
+ return -EINVAL;
+}
+
static int sdma_v4_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
@@ -1511,6 +1592,18 @@ static int sdma_v4_0_sw_init(void *handle)
if (r)
return r;
+ /* SDMA SRAM ECC event */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA0, SDMA0_4_0__SRCID__SDMA_SRAM_ECC,
+ &adev->sdma.ecc_irq);
+ if (r)
+ return r;
+
+ /* SDMA SRAM ECC event */
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_SDMA1, SDMA1_4_0__SRCID__SDMA_SRAM_ECC,
+ &adev->sdma.ecc_irq);
+ if (r)
+ return r;
+
for (i = 0; i < adev->sdma.num_instances; i++) {
ring = &adev->sdma.instance[i].ring;
ring->ring_obj = NULL;
@@ -1526,8 +1619,8 @@ static int sdma_v4_0_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 :
- AMDGPU_SDMA_IRQ_TRAP1);
+ AMDGPU_SDMA_IRQ_INSTANCE0 :
+ AMDGPU_SDMA_IRQ_INSTANCE1);
if (r)
return r;
@@ -1546,8 +1639,8 @@ static int sdma_v4_0_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 :
- AMDGPU_SDMA_IRQ_TRAP1);
+ AMDGPU_SDMA_IRQ_INSTANCE0 :
+ AMDGPU_SDMA_IRQ_INSTANCE1);
if (r)
return r;
}
@@ -1561,6 +1654,22 @@ static int sdma_v4_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA) &&
+ adev->sdma.ras_if) {
+ struct ras_common_if *ras_if = adev->sdma.ras_if;
+ struct ras_ih_if ih_info = {
+ .head = *ras_if,
+ };
+
+ /*remove fs first*/
+ amdgpu_ras_debugfs_remove(adev, ras_if);
+ amdgpu_ras_sysfs_remove(adev, ras_if);
+ /*remove the IH*/
+ amdgpu_ras_interrupt_remove_handler(adev, &ih_info);
+ amdgpu_ras_feature_enable(adev, ras_if, 0);
+ kfree(ras_if);
+ }
+
for (i = 0; i < adev->sdma.num_instances; i++) {
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
if (adev->sdma.has_page_queue)
@@ -1598,6 +1707,9 @@ static int sdma_v4_0_hw_fini(void *handle)
if (amdgpu_sriov_vf(adev))
return 0;
+ amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE0);
+ amdgpu_irq_put(adev, &adev->sdma.ecc_irq, AMDGPU_SDMA_IRQ_INSTANCE1);
+
sdma_v4_0_ctx_switch_enable(adev, false);
sdma_v4_0_enable(adev, false);
@@ -1666,13 +1778,12 @@ static int sdma_v4_0_set_trap_irq_state(struct amdgpu_device *adev,
unsigned type,
enum amdgpu_interrupt_state state)
{
- unsigned int instance = (type == AMDGPU_SDMA_IRQ_TRAP0) ? 0 : 1;
u32 sdma_cntl;
- sdma_cntl = RREG32_SDMA(instance, mmSDMA0_CNTL);
+ sdma_cntl = RREG32_SDMA(type, mmSDMA0_CNTL);
sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE,
state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
- WREG32_SDMA(instance, mmSDMA0_CNTL, sdma_cntl);
+ WREG32_SDMA(type, mmSDMA0_CNTL, sdma_cntl);
return 0;
}
@@ -1714,6 +1825,58 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
return 0;
}
+static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev,
+ struct amdgpu_iv_entry *entry)
+{
+ uint32_t instance, err_source;
+
+ switch (entry->client_id) {
+ case SOC15_IH_CLIENTID_SDMA0:
+ instance = 0;
+ break;
+ case SOC15_IH_CLIENTID_SDMA1:
+ instance = 1;
+ break;
+ default:
+ return 0;
+ }
+
+ switch (entry->src_id) {
+ case SDMA0_4_0__SRCID__SDMA_SRAM_ECC:
+ err_source = 0;
+ break;
+ case SDMA0_4_0__SRCID__SDMA_ECC:
+ err_source = 1;
+ break;
+ default:
+ return 0;
+ }
+
+ kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
+
+ amdgpu_ras_reset_gpu(adev, 0);
+
+ return AMDGPU_RAS_UE;
+}
+
+static int sdma_v4_0_process_ecc_irq(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ struct amdgpu_iv_entry *entry)
+{
+ struct ras_common_if *ras_if = adev->sdma.ras_if;
+ struct ras_dispatch_if ih_data = {
+ .entry = entry,
+ };
+
+ if (!ras_if)
+ return 0;
+
+ ih_data.head = *ras_if;
+
+ amdgpu_ras_interrupt_dispatch(adev, &ih_data);
+ return 0;
+}
+
static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
@@ -1741,6 +1904,25 @@ static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev,
return 0;
}
+static int sdma_v4_0_set_ecc_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned type,
+ enum amdgpu_interrupt_state state)
+{
+ u32 sdma_edc_config;
+
+ u32 reg_offset = (type == AMDGPU_SDMA_IRQ_INSTANCE0) ?
+ sdma_v4_0_get_reg_offset(adev, 0, mmSDMA0_EDC_CONFIG) :
+ sdma_v4_0_get_reg_offset(adev, 1, mmSDMA0_EDC_CONFIG);
+
+ sdma_edc_config = RREG32(reg_offset);
+ sdma_edc_config = REG_SET_FIELD(sdma_edc_config, SDMA0_EDC_CONFIG, ECC_INT_ENABLE,
+ state == AMDGPU_IRQ_STATE_ENABLE ? 1 : 0);
+ WREG32(reg_offset, sdma_edc_config);
+
+ return 0;
+}
+
static void sdma_v4_0_update_medium_grain_clock_gating(
struct amdgpu_device *adev,
bool enable)
@@ -1906,7 +2088,7 @@ static void sdma_v4_0_get_clockgating_state(void *handle, u32 *flags)
const struct amd_ip_funcs sdma_v4_0_ip_funcs = {
.name = "sdma_v4_0",
.early_init = sdma_v4_0_early_init,
- .late_init = NULL,
+ .late_init = sdma_v4_0_late_init,
.sw_init = sdma_v4_0_sw_init,
.sw_fini = sdma_v4_0_sw_fini,
.hw_init = sdma_v4_0_hw_init,
@@ -2008,11 +2190,20 @@ static const struct amdgpu_irq_src_funcs sdma_v4_0_illegal_inst_irq_funcs = {
.process = sdma_v4_0_process_illegal_inst_irq,
};
+static const struct amdgpu_irq_src_funcs sdma_v4_0_ecc_irq_funcs = {
+ .set = sdma_v4_0_set_ecc_irq_state,
+ .process = sdma_v4_0_process_ecc_irq,
+};
+
+
+
static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev)
{
adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
adev->sdma.trap_irq.funcs = &sdma_v4_0_trap_irq_funcs;
adev->sdma.illegal_inst_irq.funcs = &sdma_v4_0_illegal_inst_irq_funcs;
+ adev->sdma.ecc_irq.num_types = AMDGPU_SDMA_IRQ_LAST;
+ adev->sdma.ecc_irq.funcs = &sdma_v4_0_ecc_irq_funcs;
}
/**
@@ -2077,8 +2268,8 @@ static const struct amdgpu_buffer_funcs sdma_v4_0_buffer_funcs = {
static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
{
adev->mman.buffer_funcs = &sdma_v4_0_buffer_funcs;
- if (adev->sdma.has_page_queue)
- adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].page;
+ if (adev->sdma.has_page_queue && adev->sdma.num_instances > 1)
+ adev->mman.buffer_funcs_ring = &adev->sdma.instance[1].page;
else
adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring;
}
@@ -2097,15 +2288,22 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev)
unsigned i;
adev->vm_manager.vm_pte_funcs = &sdma_v4_0_vm_pte_funcs;
- for (i = 0; i < adev->sdma.num_instances; i++) {
- if (adev->sdma.has_page_queue)
+ if (adev->sdma.has_page_queue && adev->sdma.num_instances > 1) {
+ for (i = 1; i < adev->sdma.num_instances; i++) {
sched = &adev->sdma.instance[i].page.sched;
- else
+ adev->vm_manager.vm_pte_rqs[i - 1] =
+ &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ }
+ adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances - 1;
+ adev->vm_manager.page_fault = &adev->sdma.instance[0].page;
+ } else {
+ for (i = 0; i < adev->sdma.num_instances; i++) {
sched = &adev->sdma.instance[i].ring.sched;
- adev->vm_manager.vm_pte_rqs[i] =
- &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ adev->vm_manager.vm_pte_rqs[i] =
+ &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL];
+ }
+ adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
}
- adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances;
}
const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c
index f15f196684ba..3eeefd40dae0 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dma.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c
@@ -503,8 +503,8 @@ static int si_dma_sw_init(void *handle)
r = amdgpu_ring_init(adev, ring, 1024,
&adev->sdma.trap_irq,
(i == 0) ?
- AMDGPU_SDMA_IRQ_TRAP0 :
- AMDGPU_SDMA_IRQ_TRAP1);
+ AMDGPU_SDMA_IRQ_INSTANCE0 :
+ AMDGPU_SDMA_IRQ_INSTANCE1);
if (r)
return r;
}
@@ -591,7 +591,7 @@ static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
u32 sdma_cntl;
switch (type) {
- case AMDGPU_SDMA_IRQ_TRAP0:
+ case AMDGPU_SDMA_IRQ_INSTANCE0:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET);
@@ -607,7 +607,7 @@ static int si_dma_set_trap_irq_state(struct amdgpu_device *adev,
break;
}
break;
- case AMDGPU_SDMA_IRQ_TRAP1:
+ case AMDGPU_SDMA_IRQ_INSTANCE1:
switch (state) {
case AMDGPU_IRQ_STATE_DISABLE:
sdma_cntl = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 41e01a7f57a4..d57e75e5c71f 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -4098,14 +4098,13 @@ static int si_notify_smc_display_change(struct amdgpu_device *adev,
static void si_program_response_times(struct amdgpu_device *adev)
{
- u32 voltage_response_time, backbias_response_time, acpi_delay_time, vbi_time_out;
+ u32 voltage_response_time, acpi_delay_time, vbi_time_out;
u32 vddc_dly, acpi_dly, vbi_dly;
u32 reference_clock;
si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
voltage_response_time = (u32)adev->pm.dpm.voltage_response_time;
- backbias_response_time = (u32)adev->pm.dpm.backbias_response_time;
if (voltage_response_time == 0)
voltage_response_time = 1000;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index ed89a101f73f..4900e4958dec 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -63,6 +63,7 @@
#include "vcn_v1_0.h"
#include "dce_virtual.h"
#include "mxgpu_ai.h"
+#include "amdgpu_smu.h"
#define mmMP0_MISC_CGTT_CTRL0 0x01b9
#define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0
@@ -392,6 +393,7 @@ void soc15_program_register_sequence(struct amdgpu_device *adev,
static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
{
u32 i;
+ int ret = 0;
amdgpu_atombios_scratch_regs_engine_hung(adev, true);
@@ -402,7 +404,9 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
pci_save_state(adev->pdev);
- psp_gpu_reset(adev);
+ ret = psp_gpu_reset(adev);
+ if (ret)
+ dev_err(adev->dev, "GPU mode1 reset failed\n");
pci_restore_state(adev->pdev);
@@ -417,7 +421,7 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev)
amdgpu_atombios_scratch_regs_engine_hung(adev, false);
- return 0;
+ return ret;
}
static int soc15_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap)
@@ -451,6 +455,8 @@ static int soc15_asic_baco_reset(struct amdgpu_device *adev)
dev_info(adev->dev, "GPU BACO reset\n");
+ adev->in_baco_reset = 1;
+
return 0;
}
@@ -461,8 +467,15 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_VEGA10:
+ case CHIP_VEGA12:
soc15_asic_get_baco_capability(adev, &baco_reset);
break;
+ case CHIP_VEGA20:
+ if (adev->psp.sos_fw_version >= 0x80067)
+ soc15_asic_get_baco_capability(adev, &baco_reset);
+ else
+ baco_reset = false;
+ break;
default:
baco_reset = false;
break;
@@ -602,8 +615,12 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
}
amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
- if (!amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ if (!amdgpu_sriov_vf(adev)) {
+ if (is_support_sw_smu(adev))
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+ else
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ }
if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block);
#if defined(CONFIG_DRM_AMD_DC)
@@ -884,7 +901,8 @@ static int soc15_common_early_init(void *handle)
adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
} else if (adev->pdev->device == 0x15d8) {
- adev->cg_flags = AMD_CG_SUPPORT_GFX_MGLS |
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CP_LS |
AMD_CG_SUPPORT_GFX_3D_CGCG |
AMD_CG_SUPPORT_GFX_3D_CGLS |
@@ -927,7 +945,7 @@ static int soc15_common_early_init(void *handle)
adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
}
- if (adev->powerplay.pp_feature & PP_GFXOFF_MASK)
+ if (adev->pm.pp_feature & PP_GFXOFF_MASK)
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_RLC_SMU_HS;
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
new file mode 100644
index 000000000000..0b4e7b55595a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
@@ -0,0 +1,108 @@
+/****************************************************************************\
+*
+* File Name ta_ras_if.h
+* Project AMD PSP SW IP Module
+*
+* Description Interface to the RAS Trusted Application
+*
+* Copyright 2019 Advanced Micro Devices, Inc.
+*
+* Permission is hereby granted, free of charge, to any person obtaining a copy of this software
+* and associated documentation files (the "Software"), to deal in the Software without restriction,
+* including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
+* and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
+* subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in all copies or substantial
+* portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+* OTHER DEALINGS IN THE SOFTWARE.
+*/
+#ifndef _TA_RAS_IF_H
+#define _TA_RAS_IF_H
+
+/* Responses have bit 31 set */
+#define RSP_ID_MASK (1U << 31)
+#define RSP_ID(cmdId) (((uint32_t)(cmdId)) | RSP_ID_MASK)
+
+#define TA_NUM_BLOCK_MAX 14
+
+enum ras_command {
+ TA_RAS_COMMAND__ENABLE_FEATURES = 0,
+ TA_RAS_COMMAND__DISABLE_FEATURES,
+ TA_RAS_COMMAND__TRIGGER_ERROR,
+};
+
+enum ta_ras_status {
+ TA_RAS_STATUS__SUCCESS = 0x00,
+ TA_RAS_STATUS__RESET_NEEDED = 0x01,
+ TA_RAS_STATUS__ERROR_INVALID_PARAMETER = 0x02,
+ TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE = 0x03,
+ TA_RAS_STATUS__ERROR_RAS_DUPLICATE_CMD = 0x04,
+ TA_RAS_STATUS__ERROR_INJECTION_FAILED = 0x05
+};
+
+enum ta_ras_block {
+ TA_RAS_BLOCK__UMC = 0,
+ TA_RAS_BLOCK__SDMA,
+ TA_RAS_BLOCK__GFX,
+ TA_RAS_BLOCK__MMHUB,
+ TA_RAS_BLOCK__ATHUB,
+ TA_RAS_BLOCK__PCIE_BIF,
+ TA_RAS_BLOCK__HDP,
+ TA_RAS_BLOCK__XGMI_WAFL,
+ TA_RAS_BLOCK__DF,
+ TA_RAS_BLOCK__SMN,
+ TA_RAS_BLOCK__SEM,
+ TA_RAS_BLOCK__MP0,
+ TA_RAS_BLOCK__MP1,
+ TA_RAS_BLOCK__FUSE = (TA_NUM_BLOCK_MAX - 1),
+};
+
+enum ta_ras_error_type {
+ TA_RAS_ERROR__NONE = 0,
+ TA_RAS_ERROR__PARITY = 1,
+ TA_RAS_ERROR__SINGLE_CORRECTABLE = 2,
+ TA_RAS_ERROR__MULTI_UNCORRECTABLE = 4,
+ TA_RAS_ERROR__POISON = 8
+};
+
+struct ta_ras_enable_features_input {
+ enum ta_ras_block block_id;
+ enum ta_ras_error_type error_type;
+};
+
+struct ta_ras_disable_features_input {
+ enum ta_ras_block block_id;
+ enum ta_ras_error_type error_type;
+};
+
+struct ta_ras_trigger_error_input {
+ enum ta_ras_block block_id;
+ enum ta_ras_error_type inject_error_type;
+ uint32_t sub_block_index;
+ uint64_t address;
+ uint64_t value;
+};
+
+union ta_ras_cmd_input {
+ struct ta_ras_enable_features_input enable_features;
+ struct ta_ras_disable_features_input disable_features;
+ struct ta_ras_trigger_error_input trigger_error;
+};
+
+struct ta_ras_shared_memory {
+ uint32_t cmd_id;
+ uint32_t resp_id;
+ enum ta_ras_status ras_status;
+ uint32_t reserved;
+ union ta_ras_cmd_input ras_in_message;
+};
+
+#endif // TL_RAS_IF_H_
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index bed78a778e3f..40363ca6c5f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -283,7 +283,7 @@ static int vce_v2_0_stop(struct amdgpu_device *adev)
}
if (vce_v2_0_wait_for_idle(adev)) {
- DRM_INFO("VCE is busy, Can't set clock gateing");
+ DRM_INFO("VCE is busy, Can't set clock gating");
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index aadc3e66ebd7..f3f5938430d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -382,6 +382,7 @@ static int vce_v4_0_start(struct amdgpu_device *adev)
static int vce_v4_0_stop(struct amdgpu_device *adev)
{
+ /* Disable VCPU */
WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_VCPU_CNTL), 0, ~0x200001);
/* hold on ECPU */
@@ -389,8 +390,8 @@ static int vce_v4_0_stop(struct amdgpu_device *adev)
VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
- /* clear BUSY flag */
- WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 0, ~VCE_STATUS__JOB_BUSY_MASK);
+ /* clear VCE_STATUS */
+ WREG32(SOC15_REG_OFFSET(VCE, 0, mmVCE_STATUS), 0);
/* Set Clock-Gating off */
/* if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
@@ -922,6 +923,7 @@ static int vce_v4_0_set_clockgating_state(void *handle,
return 0;
}
+#endif
static int vce_v4_0_set_powergating_state(void *handle,
enum amd_powergating_state state)
@@ -935,16 +937,11 @@ static int vce_v4_0_set_powergating_state(void *handle,
*/
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
- return 0;
-
if (state == AMD_PG_STATE_GATE)
- /* XXX do we need a vce_v4_0_stop()? */
- return 0;
+ return vce_v4_0_stop(adev);
else
return vce_v4_0_start(adev);
}
-#endif
static void vce_v4_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
struct amdgpu_ib *ib, uint32_t flags)
@@ -1059,7 +1056,7 @@ const struct amd_ip_funcs vce_v4_0_ip_funcs = {
.soft_reset = NULL /* vce_v4_0_soft_reset */,
.post_soft_reset = NULL /* vce_v4_0_post_soft_reset */,
.set_clockgating_state = vce_v4_0_set_clockgating_state,
- .set_powergating_state = NULL /* vce_v4_0_set_powergating_state */,
+ .set_powergating_state = vce_v4_0_set_powergating_state,
};
static const struct amdgpu_ring_funcs vce_v4_0_ring_vm_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 6d1f804277f8..1b2f69a9a24e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -136,6 +136,25 @@ static uint32_t vega10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl
return ih_rb_cntl;
}
+static uint32_t vega10_ih_doorbell_rptr(struct amdgpu_ih_ring *ih)
+{
+ u32 ih_doorbell_rtpr = 0;
+
+ if (ih->use_doorbell) {
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR, OFFSET,
+ ih->doorbell_index);
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR,
+ ENABLE, 1);
+ } else {
+ ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
+ IH_DOORBELL_RPTR,
+ ENABLE, 0);
+ }
+ return ih_doorbell_rtpr;
+}
+
/**
* vega10_ih_irq_init - init and enable the interrupt ring
*
@@ -150,8 +169,8 @@ static uint32_t vega10_ih_rb_cntl(struct amdgpu_ih_ring *ih, uint32_t ih_rb_cntl
static int vega10_ih_irq_init(struct amdgpu_device *adev)
{
struct amdgpu_ih_ring *ih;
+ u32 ih_rb_cntl;
int ret = 0;
- u32 ih_rb_cntl, ih_doorbell_rtpr;
u32 tmp;
/* disable irqs */
@@ -177,23 +196,11 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
upper_32_bits(ih->wptr_addr) & 0xFFFF);
/* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR, 0);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, 0);
- ih_doorbell_rtpr = RREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR);
- if (adev->irq.ih.use_doorbell) {
- ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
- IH_DOORBELL_RPTR, OFFSET,
- adev->irq.ih.doorbell_index);
- ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
- IH_DOORBELL_RPTR,
- ENABLE, 1);
- } else {
- ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr,
- IH_DOORBELL_RPTR,
- ENABLE, 0);
- }
- WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr);
+ WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR,
+ vega10_ih_doorbell_rptr(ih));
ih = &adev->irq.ih1;
if (ih->ring_size) {
@@ -203,11 +210,18 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ WPTR_OVERFLOW_ENABLE, 0);
+ ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL,
+ RB_FULL_DRAIN_ENABLE, 1);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1, ih_rb_cntl);
/* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING1, 0);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING1, 0);
+
+ WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING1,
+ vega10_ih_doorbell_rptr(ih));
}
ih = &adev->irq.ih2;
@@ -216,13 +230,16 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev)
WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI_RING2,
(ih->gpu_addr >> 40) & 0xff);
- ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING1);
+ ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2);
ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL_RING2, ih_rb_cntl);
/* set rptr, wptr to 0 */
- WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_RING2, 0);
+ WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR_RING2, 0);
+
+ WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR_RING2,
+ vega10_ih_doorbell_rptr(ih));
}
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
@@ -449,20 +466,23 @@ static int vega10_ih_sw_init(void *handle)
if (r)
return r;
- if (adev->asic_type == CHIP_VEGA10) {
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
- if (r)
- return r;
-
- r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
- if (r)
- return r;
- }
-
- /* TODO add doorbell for IH1 & IH2 as well */
adev->irq.ih.use_doorbell = true;
adev->irq.ih.doorbell_index = adev->doorbell_index.ih << 1;
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih1, PAGE_SIZE, true);
+ if (r)
+ return r;
+
+ adev->irq.ih1.use_doorbell = true;
+ adev->irq.ih1.doorbell_index = (adev->doorbell_index.ih + 1) << 1;
+
+ r = amdgpu_ih_ring_init(adev, &adev->irq.ih2, PAGE_SIZE, true);
+ if (r)
+ return r;
+
+ adev->irq.ih2.use_doorbell = true;
+ adev->irq.ih2.doorbell_index = (adev->doorbell_index.ih + 2) << 1;
+
r = amdgpu_irq_init(adev);
return r;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index cf9a49f49d3a..c1e4d44d6137 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -467,6 +467,8 @@ struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
memset(&kfd->doorbell_available_index, 0,
sizeof(kfd->doorbell_available_index));
+ atomic_set(&kfd->sram_ecc_flag, 0);
+
return kfd;
}
@@ -492,9 +494,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
{
unsigned int size;
- kfd->mec_fw_version = kfd->kfd2kgd->get_fw_version(kfd->kgd,
+ kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
KGD_ENGINE_MEC1);
- kfd->sdma_fw_version = kfd->kfd2kgd->get_fw_version(kfd->kgd,
+ kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
KGD_ENGINE_SDMA1);
kfd->shared_resources = *gpu_resources;
@@ -662,6 +664,9 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd)
return ret;
count = atomic_dec_return(&kfd_locked);
WARN_ONCE(count != 0, "KFD reset ref. error");
+
+ atomic_set(&kfd->sram_ecc_flag, 0);
+
return 0;
}
@@ -1025,6 +1030,12 @@ int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
return 0;
}
+void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
+{
+ if (kfd)
+ atomic_inc(&kfd->sram_ecc_flag);
+}
+
#if defined(CONFIG_DEBUG_FS)
/* This function will send a package to HIQ to hang the HWS
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index e9f0e0a1b41c..6e1d41c5bf86 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -1011,25 +1011,41 @@ void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
void kfd_signal_reset_event(struct kfd_dev *dev)
{
struct kfd_hsa_hw_exception_data hw_exception_data;
+ struct kfd_hsa_memory_exception_data memory_exception_data;
struct kfd_process *p;
struct kfd_event *ev;
unsigned int temp;
uint32_t id, idx;
+ int reset_cause = atomic_read(&dev->sram_ecc_flag) ?
+ KFD_HW_EXCEPTION_ECC :
+ KFD_HW_EXCEPTION_GPU_HANG;
/* Whole gpu reset caused by GPU hang and memory is lost */
memset(&hw_exception_data, 0, sizeof(hw_exception_data));
hw_exception_data.gpu_id = dev->id;
hw_exception_data.memory_lost = 1;
+ hw_exception_data.reset_cause = reset_cause;
+
+ memset(&memory_exception_data, 0, sizeof(memory_exception_data));
+ memory_exception_data.ErrorType = KFD_MEM_ERR_SRAM_ECC;
+ memory_exception_data.gpu_id = dev->id;
+ memory_exception_data.failure.imprecise = true;
idx = srcu_read_lock(&kfd_processes_srcu);
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
mutex_lock(&p->event_mutex);
id = KFD_FIRST_NONSIGNAL_EVENT_ID;
- idr_for_each_entry_continue(&p->event_idr, ev, id)
+ idr_for_each_entry_continue(&p->event_idr, ev, id) {
if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) {
ev->hw_exception_data = hw_exception_data;
set_event(ev);
}
+ if (ev->type == KFD_EVENT_TYPE_MEMORY &&
+ reset_cause == KFD_HW_EXCEPTION_ECC) {
+ ev->memory_exception_data = memory_exception_data;
+ set_event(ev);
+ }
+ }
mutex_unlock(&p->event_mutex);
}
srcu_read_unlock(&kfd_processes_srcu, idx);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 0eeee3c6d6dc..9e0230965675 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -276,6 +276,9 @@ struct kfd_dev {
uint64_t hive_id;
bool pci_atomic_requested;
+
+ /* SRAM ECC flag */
+ atomic_t sram_ecc_flag;
};
enum kfd_mempool {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 09da91644f9f..2cb09e088dce 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -37,6 +37,7 @@
#include "kfd_device_queue_manager.h"
#include "kfd_iommu.h"
#include "amdgpu_amdkfd.h"
+#include "amdgpu_ras.h"
/* topology_device_list - Master list of all topology devices */
static struct list_head topology_device_list;
@@ -1197,6 +1198,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
void *crat_image = NULL;
size_t image_size = 0;
int proximity_domain;
+ struct amdgpu_ras *ctx;
INIT_LIST_HEAD(&temp_topology_device_list);
@@ -1328,6 +1330,20 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
}
+ ctx = amdgpu_ras_get_context((struct amdgpu_device *)(dev->gpu->kgd));
+ if (ctx) {
+ /* kfd only concerns sram ecc on GFX/SDMA and HBM ecc on UMC */
+ dev->node_props.capability |=
+ (((ctx->features & BIT(AMDGPU_RAS_BLOCK__SDMA)) != 0) ||
+ ((ctx->features & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0)) ?
+ HSA_CAP_SRAM_EDCSUPPORTED : 0;
+ dev->node_props.capability |= ((ctx->features & BIT(AMDGPU_RAS_BLOCK__UMC)) != 0) ?
+ HSA_CAP_MEM_EDCSUPPORTED : 0;
+
+ dev->node_props.capability |= (ctx->features != 0) ?
+ HSA_CAP_RASEVENTNOTIFY : 0;
+ }
+
kfd_debug_print_topology();
if (!res)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index 92a19be07344..84710cfd23c2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -48,6 +48,10 @@
#define HSA_CAP_DOORBELL_TYPE_2_0 0x2
#define HSA_CAP_AQL_QUEUE_DOUBLE_MAP 0x00004000
+#define HSA_CAP_SRAM_EDCSUPPORTED 0x00080000
+#define HSA_CAP_MEM_EDCSUPPORTED 0x00100000
+#define HSA_CAP_RASEVENTNOTIFY 0x00200000
+
struct kfd_node_properties {
uint64_t hive_id;
uint32_t cpu_cores_count;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 3082b55b1e77..1854506e3e8f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -111,7 +111,8 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
- unsigned long possible_crtcs);
+ unsigned long possible_crtcs,
+ const struct dc_plane_cap *plane_cap);
static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
uint32_t link_index);
@@ -137,30 +138,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
static void handle_cursor_update(struct drm_plane *plane,
struct drm_plane_state *old_plane_state);
-
-
-static const enum drm_plane_type dm_plane_type_default[AMDGPU_MAX_PLANES] = {
- DRM_PLANE_TYPE_PRIMARY,
- DRM_PLANE_TYPE_PRIMARY,
- DRM_PLANE_TYPE_PRIMARY,
- DRM_PLANE_TYPE_PRIMARY,
- DRM_PLANE_TYPE_PRIMARY,
- DRM_PLANE_TYPE_PRIMARY,
-};
-
-static const enum drm_plane_type dm_plane_type_carizzo[AMDGPU_MAX_PLANES] = {
- DRM_PLANE_TYPE_PRIMARY,
- DRM_PLANE_TYPE_PRIMARY,
- DRM_PLANE_TYPE_PRIMARY,
- DRM_PLANE_TYPE_OVERLAY,/* YUV Capable Underlay */
-};
-
-static const enum drm_plane_type dm_plane_type_stoney[AMDGPU_MAX_PLANES] = {
- DRM_PLANE_TYPE_PRIMARY,
- DRM_PLANE_TYPE_PRIMARY,
- DRM_PLANE_TYPE_OVERLAY, /* YUV Capable Underlay */
-};
-
/*
* dm_vblank_get_counter
*
@@ -275,12 +252,22 @@ get_crtc_by_otg_inst(struct amdgpu_device *adev,
return NULL;
}
+static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
+{
+ return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
+ dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
+}
+
static void dm_pflip_high_irq(void *interrupt_params)
{
struct amdgpu_crtc *amdgpu_crtc;
struct common_irq_params *irq_params = interrupt_params;
struct amdgpu_device *adev = irq_params->adev;
unsigned long flags;
+ struct drm_pending_vblank_event *e;
+ struct dm_crtc_state *acrtc_state;
+ uint32_t vpos, hpos, v_blank_start, v_blank_end;
+ bool vrr_active;
amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
@@ -303,26 +290,116 @@ static void dm_pflip_high_irq(void *interrupt_params)
return;
}
- /* Update to correct count(s) if racing with vblank irq */
- amdgpu_crtc->last_flip_vblank = drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
+ /* page flip completed. */
+ e = amdgpu_crtc->event;
+ amdgpu_crtc->event = NULL;
- /* wake up userspace */
- if (amdgpu_crtc->event) {
- drm_crtc_send_vblank_event(&amdgpu_crtc->base, amdgpu_crtc->event);
+ if (!e)
+ WARN_ON(1);
- /* page flip completed. clean up */
- amdgpu_crtc->event = NULL;
+ acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
+ vrr_active = amdgpu_dm_vrr_active(acrtc_state);
+
+ /* Fixed refresh rate, or VRR scanout position outside front-porch? */
+ if (!vrr_active ||
+ !dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
+ &v_blank_end, &hpos, &vpos) ||
+ (vpos < v_blank_start)) {
+ /* Update to correct count and vblank timestamp if racing with
+ * vblank irq. This also updates to the correct vblank timestamp
+ * even in VRR mode, as scanout is past the front-porch atm.
+ */
+ drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
- } else
- WARN_ON(1);
+ /* Wake up userspace by sending the pageflip event with proper
+ * count and timestamp of vblank of flip completion.
+ */
+ if (e) {
+ drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
+
+ /* Event sent, so done with vblank for this flip */
+ drm_crtc_vblank_put(&amdgpu_crtc->base);
+ }
+ } else if (e) {
+ /* VRR active and inside front-porch: vblank count and
+ * timestamp for pageflip event will only be up to date after
+ * drm_crtc_handle_vblank() has been executed from late vblank
+ * irq handler after start of back-porch (vline 0). We queue the
+ * pageflip event for send-out by drm_crtc_handle_vblank() with
+ * updated timestamp and count, once it runs after us.
+ *
+ * We need to open-code this instead of using the helper
+ * drm_crtc_arm_vblank_event(), as that helper would
+ * call drm_crtc_accurate_vblank_count(), which we must
+ * not call in VRR mode while we are in front-porch!
+ */
+
+ /* sequence will be replaced by real count during send-out. */
+ e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
+ e->pipe = amdgpu_crtc->crtc_id;
+
+ list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
+ e = NULL;
+ }
+
+ /* Keep track of vblank of this flip for flip throttling. We use the
+ * cooked hw counter, as that one incremented at start of this vblank
+ * of pageflip completion, so last_flip_vblank is the forbidden count
+ * for queueing new pageflips if vsync + VRR is enabled.
+ */
+ amdgpu_crtc->last_flip_vblank = amdgpu_get_vblank_counter_kms(adev->ddev,
+ amdgpu_crtc->crtc_id);
amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
- DRM_DEBUG_DRIVER("%s - crtc :%d[%p], pflip_stat:AMDGPU_FLIP_NONE\n",
- __func__, amdgpu_crtc->crtc_id, amdgpu_crtc);
+ DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
+ amdgpu_crtc->crtc_id, amdgpu_crtc,
+ vrr_active, (int) !e);
+}
+
+static void dm_vupdate_high_irq(void *interrupt_params)
+{
+ struct common_irq_params *irq_params = interrupt_params;
+ struct amdgpu_device *adev = irq_params->adev;
+ struct amdgpu_crtc *acrtc;
+ struct dm_crtc_state *acrtc_state;
+ unsigned long flags;
+
+ acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
+
+ if (acrtc) {
+ acrtc_state = to_dm_crtc_state(acrtc->base.state);
+
+ DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
+ amdgpu_dm_vrr_active(acrtc_state));
- drm_crtc_vblank_put(&amdgpu_crtc->base);
+ /* Core vblank handling is done here after end of front-porch in
+ * vrr mode, as vblank timestamping will give valid results
+ * while now done after front-porch. This will also deliver
+ * page-flip completion events that have been queued to us
+ * if a pageflip happened inside front-porch.
+ */
+ if (amdgpu_dm_vrr_active(acrtc_state)) {
+ drm_crtc_handle_vblank(&acrtc->base);
+
+ /* BTR processing for pre-DCE12 ASICs */
+ if (acrtc_state->stream &&
+ adev->family < AMDGPU_FAMILY_AI) {
+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ mod_freesync_handle_v_update(
+ adev->dm.freesync_module,
+ acrtc_state->stream,
+ &acrtc_state->vrr_params);
+
+ dc_stream_adjust_vmin_vmax(
+ adev->dm.dc,
+ acrtc_state->stream,
+ &acrtc_state->vrr_params.adjust);
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+ }
+ }
+ }
}
static void dm_crtc_high_irq(void *interrupt_params)
@@ -331,18 +408,33 @@ static void dm_crtc_high_irq(void *interrupt_params)
struct amdgpu_device *adev = irq_params->adev;
struct amdgpu_crtc *acrtc;
struct dm_crtc_state *acrtc_state;
+ unsigned long flags;
acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
if (acrtc) {
- drm_crtc_handle_vblank(&acrtc->base);
- amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
-
acrtc_state = to_dm_crtc_state(acrtc->base.state);
- if (acrtc_state->stream &&
+ DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
+ amdgpu_dm_vrr_active(acrtc_state));
+
+ /* Core vblank handling at start of front-porch is only possible
+ * in non-vrr mode, as only there vblank timestamping will give
+ * valid results while done in front-porch. Otherwise defer it
+ * to dm_vupdate_high_irq after end of front-porch.
+ */
+ if (!amdgpu_dm_vrr_active(acrtc_state))
+ drm_crtc_handle_vblank(&acrtc->base);
+
+ /* Following stuff must happen at start of vblank, for crc
+ * computation and below-the-range btr support in vrr mode.
+ */
+ amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
+
+ if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
acrtc_state->vrr_params.supported &&
acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
mod_freesync_handle_v_update(
adev->dm.freesync_module,
acrtc_state->stream,
@@ -352,6 +444,7 @@ static void dm_crtc_high_irq(void *interrupt_params)
adev->dm.dc,
acrtc_state->stream,
&acrtc_state->vrr_params.adjust);
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
}
}
}
@@ -462,6 +555,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_feature_mask & DC_FBC_MASK)
init_data.flags.fbc_support = true;
+ init_data.flags.power_down_display_on_boot = true;
+
/* Display Core create. */
adev->dm.dc = dc_create(&init_data);
@@ -912,9 +1007,16 @@ static int dm_resume(void *handle)
struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
struct dm_plane_state *dm_new_plane_state;
+ struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
enum dc_connection_type new_connection_type = dc_connection_none;
int i;
+ /* Recreate dc_state - DC invalidates it when setting power state to S3. */
+ dc_release_state(dm_state->context);
+ dm_state->context = dc_create_state(dm->dc);
+ /* TODO: Remove dc_state->dccg, use dc->dccg directly. */
+ dc_resource_state_construct(dm->dc, dm_state->context);
+
/* power on hardware */
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
@@ -1457,6 +1559,27 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
dm_crtc_high_irq, c_irq_params);
}
+ /* Use VUPDATE interrupt */
+ for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
+ r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
+ if (r) {
+ DRM_ERROR("Failed to add vupdate irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_vupdate_high_irq, c_irq_params);
+ }
+
/* Use GRPH_PFLIP interrupt */
for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
@@ -1542,6 +1665,34 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
dm_crtc_high_irq, c_irq_params);
}
+ /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
+ * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
+ * to trigger at end of each vblank, regardless of state of the lock,
+ * matching DCE behaviour.
+ */
+ for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
+ i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
+ i++) {
+ r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
+
+ if (r) {
+ DRM_ERROR("Failed to add vupdate irq id!\n");
+ return r;
+ }
+
+ int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
+ int_params.irq_source =
+ dc_interrupt_to_irq_source(dc, i, 0);
+
+ c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
+
+ c_irq_params->adev = adev;
+ c_irq_params->irq_src = int_params.irq_source;
+
+ amdgpu_dm_irq_register_interrupt(adev, &int_params,
+ dm_vupdate_high_irq, c_irq_params);
+ }
+
/* Use GRPH_PFLIP interrupt */
for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
@@ -1593,15 +1744,10 @@ static int dm_atomic_get_state(struct drm_atomic_state *state,
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_display_manager *dm = &adev->dm;
struct drm_private_state *priv_state;
- int ret;
if (*dm_state)
return 0;
- ret = drm_modeset_lock(&dm->atomic_obj_lock, state->acquire_ctx);
- if (ret)
- return ret;
-
priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
if (IS_ERR(priv_state))
return PTR_ERR(priv_state);
@@ -1658,17 +1804,16 @@ dm_atomic_duplicate_state(struct drm_private_obj *obj)
__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
- new_state->context = dc_create_state();
+ old_state = to_dm_atomic_state(obj->state);
+
+ if (old_state && old_state->context)
+ new_state->context = dc_copy_state(old_state->context);
+
if (!new_state->context) {
kfree(new_state);
return NULL;
}
- old_state = to_dm_atomic_state(obj->state);
- if (old_state && old_state->context)
- dc_resource_state_copy_construct(old_state->context,
- new_state->context);
-
return &new_state->base;
}
@@ -1708,13 +1853,11 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
- drm_modeset_lock_init(&adev->dm.atomic_obj_lock);
-
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
- state->context = dc_create_state();
+ state->context = dc_create_state(adev->dm.dc);
if (!state->context) {
kfree(state);
return -ENOMEM;
@@ -1841,39 +1984,42 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
#endif
static int initialize_plane(struct amdgpu_display_manager *dm,
- struct amdgpu_mode_info *mode_info,
- int plane_id)
+ struct amdgpu_mode_info *mode_info, int plane_id,
+ enum drm_plane_type plane_type,
+ const struct dc_plane_cap *plane_cap)
{
struct drm_plane *plane;
unsigned long possible_crtcs;
int ret = 0;
plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
- mode_info->planes[plane_id] = plane;
-
if (!plane) {
DRM_ERROR("KMS: Failed to allocate plane\n");
return -ENOMEM;
}
- plane->type = mode_info->plane_type[plane_id];
+ plane->type = plane_type;
/*
- * HACK: IGT tests expect that each plane can only have
- * one possible CRTC. For now, set one CRTC for each
- * plane that is not an underlay, but still allow multiple
- * CRTCs for underlay planes.
+ * HACK: IGT tests expect that the primary plane for a CRTC
+ * can only have one possible CRTC. Only expose support for
+ * any CRTC if they're not going to be used as a primary plane
+ * for a CRTC - like overlay or underlay planes.
*/
possible_crtcs = 1 << plane_id;
if (plane_id >= dm->dc->caps.max_streams)
possible_crtcs = 0xff;
- ret = amdgpu_dm_plane_init(dm, mode_info->planes[plane_id], possible_crtcs);
+ ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
if (ret) {
DRM_ERROR("KMS: Failed to initialize plane\n");
+ kfree(plane);
return ret;
}
+ if (mode_info)
+ mode_info->planes[plane_id] = plane;
+
return ret;
}
@@ -1916,8 +2062,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
struct amdgpu_encoder *aencoder = NULL;
struct amdgpu_mode_info *mode_info = &adev->mode_info;
uint32_t link_cnt;
- int32_t total_overlay_planes, total_primary_planes;
+ int32_t primary_planes;
enum dc_connection_type new_connection_type = dc_connection_none;
+ const struct dc_plane_cap *plane;
link_cnt = dm->dc->caps.max_links;
if (amdgpu_dm_mode_config_init(dm->adev)) {
@@ -1925,24 +2072,53 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
return -EINVAL;
}
- /* Identify the number of planes to be initialized */
- total_overlay_planes = dm->dc->caps.max_slave_planes;
- total_primary_planes = dm->dc->caps.max_planes - dm->dc->caps.max_slave_planes;
+ /* There is one primary plane per CRTC */
+ primary_planes = dm->dc->caps.max_streams;
+ ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
- /* First initialize overlay planes, index starting after primary planes */
- for (i = (total_overlay_planes - 1); i >= 0; i--) {
- if (initialize_plane(dm, mode_info, (total_primary_planes + i))) {
- DRM_ERROR("KMS: Failed to initialize overlay plane\n");
+ /*
+ * Initialize primary planes, implicit planes for legacy IOCTLS.
+ * Order is reversed to match iteration order in atomic check.
+ */
+ for (i = (primary_planes - 1); i >= 0; i--) {
+ plane = &dm->dc->caps.planes[i];
+
+ if (initialize_plane(dm, mode_info, i,
+ DRM_PLANE_TYPE_PRIMARY, plane)) {
+ DRM_ERROR("KMS: Failed to initialize primary plane\n");
goto fail;
}
}
- /* Initialize primary planes */
- for (i = (total_primary_planes - 1); i >= 0; i--) {
- if (initialize_plane(dm, mode_info, i)) {
- DRM_ERROR("KMS: Failed to initialize primary plane\n");
+ /*
+ * Initialize overlay planes, index starting after primary planes.
+ * These planes have a higher DRM index than the primary planes since
+ * they should be considered as having a higher z-order.
+ * Order is reversed to match iteration order in atomic check.
+ *
+ * Only support DCN for now, and only expose one so we don't encourage
+ * userspace to use up all the pipes.
+ */
+ for (i = 0; i < dm->dc->caps.max_planes; ++i) {
+ struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
+
+ if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
+ continue;
+
+ if (!plane->blends_with_above || !plane->blends_with_below)
+ continue;
+
+ if (!plane->pixel_format_support.argb8888)
+ continue;
+
+ if (initialize_plane(dm, NULL, primary_planes + i,
+ DRM_PLANE_TYPE_OVERLAY, plane)) {
+ DRM_ERROR("KMS: Failed to initialize overlay plane\n");
goto fail;
}
+
+ /* Only create one overlay plane. */
+ break;
}
for (i = 0; i < dm->dc->caps.max_streams; i++)
@@ -2042,8 +2218,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
fail:
kfree(aencoder);
kfree(aconnector);
- for (i = 0; i < dm->dc->caps.max_planes; i++)
- kfree(mode_info->planes[i]);
+
return -EINVAL;
}
@@ -2124,53 +2299,45 @@ static int dm_early_init(void *handle)
adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
- adev->mode_info.plane_type = dm_plane_type_default;
break;
case CHIP_KAVERI:
adev->mode_info.num_crtc = 4;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 7;
- adev->mode_info.plane_type = dm_plane_type_default;
break;
case CHIP_KABINI:
case CHIP_MULLINS:
adev->mode_info.num_crtc = 2;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
- adev->mode_info.plane_type = dm_plane_type_default;
break;
case CHIP_FIJI:
case CHIP_TONGA:
adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 7;
- adev->mode_info.plane_type = dm_plane_type_default;
break;
case CHIP_CARRIZO:
adev->mode_info.num_crtc = 3;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 9;
- adev->mode_info.plane_type = dm_plane_type_carizzo;
break;
case CHIP_STONEY:
adev->mode_info.num_crtc = 2;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 9;
- adev->mode_info.plane_type = dm_plane_type_stoney;
break;
case CHIP_POLARIS11:
case CHIP_POLARIS12:
adev->mode_info.num_crtc = 5;
adev->mode_info.num_hpd = 5;
adev->mode_info.num_dig = 5;
- adev->mode_info.plane_type = dm_plane_type_default;
break;
case CHIP_POLARIS10:
case CHIP_VEGAM:
adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
- adev->mode_info.plane_type = dm_plane_type_default;
break;
case CHIP_VEGA10:
case CHIP_VEGA12:
@@ -2178,14 +2345,12 @@ static int dm_early_init(void *handle)
adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
- adev->mode_info.plane_type = dm_plane_type_default;
break;
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case CHIP_RAVEN:
adev->mode_info.num_crtc = 4;
adev->mode_info.num_hpd = 4;
adev->mode_info.num_dig = 4;
- adev->mode_info.plane_type = dm_plane_type_default;
break;
#endif
default:
@@ -2243,56 +2408,63 @@ static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
.destroy = amdgpu_dm_encoder_destroy,
};
-static bool fill_rects_from_plane_state(const struct drm_plane_state *state,
- struct dc_plane_state *plane_state)
+
+static int fill_dc_scaling_info(const struct drm_plane_state *state,
+ struct dc_scaling_info *scaling_info)
{
- plane_state->src_rect.x = state->src_x >> 16;
- plane_state->src_rect.y = state->src_y >> 16;
- /* we ignore the mantissa for now and do not deal with floating pixels :( */
- plane_state->src_rect.width = state->src_w >> 16;
+ int scale_w, scale_h;
- if (plane_state->src_rect.width == 0)
- return false;
+ memset(scaling_info, 0, sizeof(*scaling_info));
- plane_state->src_rect.height = state->src_h >> 16;
- if (plane_state->src_rect.height == 0)
- return false;
+ /* Source is fixed 16.16 but we ignore mantissa for now... */
+ scaling_info->src_rect.x = state->src_x >> 16;
+ scaling_info->src_rect.y = state->src_y >> 16;
- plane_state->dst_rect.x = state->crtc_x;
- plane_state->dst_rect.y = state->crtc_y;
+ scaling_info->src_rect.width = state->src_w >> 16;
+ if (scaling_info->src_rect.width == 0)
+ return -EINVAL;
+
+ scaling_info->src_rect.height = state->src_h >> 16;
+ if (scaling_info->src_rect.height == 0)
+ return -EINVAL;
+
+ scaling_info->dst_rect.x = state->crtc_x;
+ scaling_info->dst_rect.y = state->crtc_y;
if (state->crtc_w == 0)
- return false;
+ return -EINVAL;
- plane_state->dst_rect.width = state->crtc_w;
+ scaling_info->dst_rect.width = state->crtc_w;
if (state->crtc_h == 0)
- return false;
+ return -EINVAL;
- plane_state->dst_rect.height = state->crtc_h;
+ scaling_info->dst_rect.height = state->crtc_h;
- plane_state->clip_rect = plane_state->dst_rect;
+ /* DRM doesn't specify clipping on destination output. */
+ scaling_info->clip_rect = scaling_info->dst_rect;
- switch (state->rotation & DRM_MODE_ROTATE_MASK) {
- case DRM_MODE_ROTATE_0:
- plane_state->rotation = ROTATION_ANGLE_0;
- break;
- case DRM_MODE_ROTATE_90:
- plane_state->rotation = ROTATION_ANGLE_90;
- break;
- case DRM_MODE_ROTATE_180:
- plane_state->rotation = ROTATION_ANGLE_180;
- break;
- case DRM_MODE_ROTATE_270:
- plane_state->rotation = ROTATION_ANGLE_270;
- break;
- default:
- plane_state->rotation = ROTATION_ANGLE_0;
- break;
- }
+ /* TODO: Validate scaling per-format with DC plane caps */
+ scale_w = scaling_info->dst_rect.width * 1000 /
+ scaling_info->src_rect.width;
- return true;
+ if (scale_w < 250 || scale_w > 16000)
+ return -EINVAL;
+
+ scale_h = scaling_info->dst_rect.height * 1000 /
+ scaling_info->src_rect.height;
+
+ if (scale_h < 250 || scale_h > 16000)
+ return -EINVAL;
+
+ /*
+ * The "scaling_quality" can be ignored for now, quality = 0 has DC
+ * assume reasonable defaults based on the format.
+ */
+
+ return 0;
}
+
static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
uint64_t *tiling_flags)
{
@@ -2321,10 +2493,16 @@ static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
return offset ? (address + offset * 256) : 0;
}
-static bool fill_plane_dcc_attributes(struct amdgpu_device *adev,
- const struct amdgpu_framebuffer *afb,
- struct dc_plane_state *plane_state,
- uint64_t info)
+static int
+fill_plane_dcc_attributes(struct amdgpu_device *adev,
+ const struct amdgpu_framebuffer *afb,
+ const enum surface_pixel_format format,
+ const enum dc_rotation_angle rotation,
+ const union plane_size *plane_size,
+ const union dc_tiling_info *tiling_info,
+ const uint64_t info,
+ struct dc_plane_dcc_param *dcc,
+ struct dc_plane_address *address)
{
struct dc *dc = adev->dm.dc;
struct dc_dcc_surface_param input;
@@ -2337,133 +2515,103 @@ static bool fill_plane_dcc_attributes(struct amdgpu_device *adev,
memset(&output, 0, sizeof(output));
if (!offset)
- return false;
+ return 0;
+
+ if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return 0;
if (!dc->cap_funcs.get_dcc_compression_cap)
- return false;
+ return -EINVAL;
- input.format = plane_state->format;
- input.surface_size.width =
- plane_state->plane_size.grph.surface_size.width;
- input.surface_size.height =
- plane_state->plane_size.grph.surface_size.height;
- input.swizzle_mode = plane_state->tiling_info.gfx9.swizzle;
+ input.format = format;
+ input.surface_size.width = plane_size->grph.surface_size.width;
+ input.surface_size.height = plane_size->grph.surface_size.height;
+ input.swizzle_mode = tiling_info->gfx9.swizzle;
- if (plane_state->rotation == ROTATION_ANGLE_0 ||
- plane_state->rotation == ROTATION_ANGLE_180)
+ if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
input.scan = SCAN_DIRECTION_HORIZONTAL;
- else if (plane_state->rotation == ROTATION_ANGLE_90 ||
- plane_state->rotation == ROTATION_ANGLE_270)
+ else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
input.scan = SCAN_DIRECTION_VERTICAL;
if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
- return false;
+ return -EINVAL;
if (!output.capable)
- return false;
+ return -EINVAL;
if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
- return false;
+ return -EINVAL;
- plane_state->dcc.enable = 1;
- plane_state->dcc.grph.meta_pitch =
+ dcc->enable = 1;
+ dcc->grph.meta_pitch =
AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
- plane_state->dcc.grph.independent_64b_blks = i64b;
+ dcc->grph.independent_64b_blks = i64b;
dcc_address = get_dcc_address(afb->address, info);
- plane_state->address.grph.meta_addr.low_part =
- lower_32_bits(dcc_address);
- plane_state->address.grph.meta_addr.high_part =
- upper_32_bits(dcc_address);
+ address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
+ address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
- return true;
+ return 0;
}
-static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
- struct dc_plane_state *plane_state,
- const struct amdgpu_framebuffer *amdgpu_fb)
-{
- uint64_t tiling_flags;
- unsigned int awidth;
- const struct drm_framebuffer *fb = &amdgpu_fb->base;
- int ret = 0;
- struct drm_format_name_buf format_name;
-
- ret = get_fb_info(
- amdgpu_fb,
- &tiling_flags);
-
- if (ret)
- return ret;
-
- switch (fb->format->format) {
- case DRM_FORMAT_C8:
- plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
- break;
- case DRM_FORMAT_RGB565:
- plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
- break;
- case DRM_FORMAT_XRGB8888:
- case DRM_FORMAT_ARGB8888:
- plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
- break;
- case DRM_FORMAT_XRGB2101010:
- case DRM_FORMAT_ARGB2101010:
- plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
- break;
- case DRM_FORMAT_XBGR2101010:
- case DRM_FORMAT_ABGR2101010:
- plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
- break;
- case DRM_FORMAT_XBGR8888:
- case DRM_FORMAT_ABGR8888:
- plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
- break;
- case DRM_FORMAT_NV21:
- plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
- break;
- case DRM_FORMAT_NV12:
- plane_state->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
- break;
- default:
- DRM_ERROR("Unsupported screen format %s\n",
- drm_get_format_name(fb->format->format, &format_name));
- return -EINVAL;
- }
-
- memset(&plane_state->address, 0, sizeof(plane_state->address));
- memset(&plane_state->tiling_info, 0, sizeof(plane_state->tiling_info));
- memset(&plane_state->dcc, 0, sizeof(plane_state->dcc));
-
- if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
- plane_state->address.type = PLN_ADDR_TYPE_GRAPHICS;
- plane_state->plane_size.grph.surface_size.x = 0;
- plane_state->plane_size.grph.surface_size.y = 0;
- plane_state->plane_size.grph.surface_size.width = fb->width;
- plane_state->plane_size.grph.surface_size.height = fb->height;
- plane_state->plane_size.grph.surface_pitch =
- fb->pitches[0] / fb->format->cpp[0];
- /* TODO: unhardcode */
- plane_state->color_space = COLOR_SPACE_SRGB;
+static int
+fill_plane_buffer_attributes(struct amdgpu_device *adev,
+ const struct amdgpu_framebuffer *afb,
+ const enum surface_pixel_format format,
+ const enum dc_rotation_angle rotation,
+ const uint64_t tiling_flags,
+ union dc_tiling_info *tiling_info,
+ union plane_size *plane_size,
+ struct dc_plane_dcc_param *dcc,
+ struct dc_plane_address *address)
+{
+ const struct drm_framebuffer *fb = &afb->base;
+ int ret;
+ memset(tiling_info, 0, sizeof(*tiling_info));
+ memset(plane_size, 0, sizeof(*plane_size));
+ memset(dcc, 0, sizeof(*dcc));
+ memset(address, 0, sizeof(*address));
+
+ if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
+ plane_size->grph.surface_size.x = 0;
+ plane_size->grph.surface_size.y = 0;
+ plane_size->grph.surface_size.width = fb->width;
+ plane_size->grph.surface_size.height = fb->height;
+ plane_size->grph.surface_pitch =
+ fb->pitches[0] / fb->format->cpp[0];
+
+ address->type = PLN_ADDR_TYPE_GRAPHICS;
+ address->grph.addr.low_part = lower_32_bits(afb->address);
+ address->grph.addr.high_part = upper_32_bits(afb->address);
} else {
- awidth = ALIGN(fb->width, 64);
- plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
- plane_state->plane_size.video.luma_size.x = 0;
- plane_state->plane_size.video.luma_size.y = 0;
- plane_state->plane_size.video.luma_size.width = awidth;
- plane_state->plane_size.video.luma_size.height = fb->height;
- /* TODO: unhardcode */
- plane_state->plane_size.video.luma_pitch = awidth;
-
- plane_state->plane_size.video.chroma_size.x = 0;
- plane_state->plane_size.video.chroma_size.y = 0;
- plane_state->plane_size.video.chroma_size.width = awidth;
- plane_state->plane_size.video.chroma_size.height = fb->height;
- plane_state->plane_size.video.chroma_pitch = awidth / 2;
-
- /* TODO: unhardcode */
- plane_state->color_space = COLOR_SPACE_YCBCR709;
+ uint64_t chroma_addr = afb->address + fb->offsets[1];
+
+ plane_size->video.luma_size.x = 0;
+ plane_size->video.luma_size.y = 0;
+ plane_size->video.luma_size.width = fb->width;
+ plane_size->video.luma_size.height = fb->height;
+ plane_size->video.luma_pitch =
+ fb->pitches[0] / fb->format->cpp[0];
+
+ plane_size->video.chroma_size.x = 0;
+ plane_size->video.chroma_size.y = 0;
+ /* TODO: set these based on surface format */
+ plane_size->video.chroma_size.width = fb->width / 2;
+ plane_size->video.chroma_size.height = fb->height / 2;
+
+ plane_size->video.chroma_pitch =
+ fb->pitches[1] / fb->format->cpp[1];
+
+ address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
+ address->video_progressive.luma_addr.low_part =
+ lower_32_bits(afb->address);
+ address->video_progressive.luma_addr.high_part =
+ upper_32_bits(afb->address);
+ address->video_progressive.chroma_addr.low_part =
+ lower_32_bits(chroma_addr);
+ address->video_progressive.chroma_addr.high_part =
+ upper_32_bits(chroma_addr);
}
/* Fill GFX8 params */
@@ -2477,21 +2625,21 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
/* XXX fix me for VI */
- plane_state->tiling_info.gfx8.num_banks = num_banks;
- plane_state->tiling_info.gfx8.array_mode =
+ tiling_info->gfx8.num_banks = num_banks;
+ tiling_info->gfx8.array_mode =
DC_ARRAY_2D_TILED_THIN1;
- plane_state->tiling_info.gfx8.tile_split = tile_split;
- plane_state->tiling_info.gfx8.bank_width = bankw;
- plane_state->tiling_info.gfx8.bank_height = bankh;
- plane_state->tiling_info.gfx8.tile_aspect = mtaspect;
- plane_state->tiling_info.gfx8.tile_mode =
+ tiling_info->gfx8.tile_split = tile_split;
+ tiling_info->gfx8.bank_width = bankw;
+ tiling_info->gfx8.bank_height = bankh;
+ tiling_info->gfx8.tile_aspect = mtaspect;
+ tiling_info->gfx8.tile_mode =
DC_ADDR_SURF_MICRO_TILING_DISPLAY;
} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
== DC_ARRAY_1D_TILED_THIN1) {
- plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
+ tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
}
- plane_state->tiling_info.gfx8.pipe_config =
+ tiling_info->gfx8.pipe_config =
AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
if (adev->asic_type == CHIP_VEGA10 ||
@@ -2499,60 +2647,249 @@ static int fill_plane_attributes_from_fb(struct amdgpu_device *adev,
adev->asic_type == CHIP_VEGA20 ||
adev->asic_type == CHIP_RAVEN) {
/* Fill GFX9 params */
- plane_state->tiling_info.gfx9.num_pipes =
+ tiling_info->gfx9.num_pipes =
adev->gfx.config.gb_addr_config_fields.num_pipes;
- plane_state->tiling_info.gfx9.num_banks =
+ tiling_info->gfx9.num_banks =
adev->gfx.config.gb_addr_config_fields.num_banks;
- plane_state->tiling_info.gfx9.pipe_interleave =
+ tiling_info->gfx9.pipe_interleave =
adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
- plane_state->tiling_info.gfx9.num_shader_engines =
+ tiling_info->gfx9.num_shader_engines =
adev->gfx.config.gb_addr_config_fields.num_se;
- plane_state->tiling_info.gfx9.max_compressed_frags =
+ tiling_info->gfx9.max_compressed_frags =
adev->gfx.config.gb_addr_config_fields.max_compress_frags;
- plane_state->tiling_info.gfx9.num_rb_per_se =
+ tiling_info->gfx9.num_rb_per_se =
adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
- plane_state->tiling_info.gfx9.swizzle =
+ tiling_info->gfx9.swizzle =
AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
- plane_state->tiling_info.gfx9.shaderEnable = 1;
+ tiling_info->gfx9.shaderEnable = 1;
- fill_plane_dcc_attributes(adev, amdgpu_fb, plane_state,
- tiling_flags);
+ ret = fill_plane_dcc_attributes(adev, afb, format, rotation,
+ plane_size, tiling_info,
+ tiling_flags, dcc, address);
+ if (ret)
+ return ret;
}
- plane_state->visible = true;
- plane_state->scaling_quality.h_taps_c = 0;
- plane_state->scaling_quality.v_taps_c = 0;
+ return 0;
+}
- /* is this needed? is plane_state zeroed at allocation? */
- plane_state->scaling_quality.h_taps = 0;
- plane_state->scaling_quality.v_taps = 0;
- plane_state->stereo_format = PLANE_STEREO_FORMAT_NONE;
+static void
+fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
+ bool *per_pixel_alpha, bool *global_alpha,
+ int *global_alpha_value)
+{
+ *per_pixel_alpha = false;
+ *global_alpha = false;
+ *global_alpha_value = 0xff;
- return ret;
+ if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
+ return;
+ if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
+ static const uint32_t alpha_formats[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_ABGR8888,
+ };
+ uint32_t format = plane_state->fb->format->format;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
+ if (format == alpha_formats[i]) {
+ *per_pixel_alpha = true;
+ break;
+ }
+ }
+ }
+
+ if (plane_state->alpha < 0xffff) {
+ *global_alpha = true;
+ *global_alpha_value = plane_state->alpha >> 8;
+ }
}
-static int fill_plane_attributes(struct amdgpu_device *adev,
- struct dc_plane_state *dc_plane_state,
- struct drm_plane_state *plane_state,
- struct drm_crtc_state *crtc_state)
+static int
+fill_plane_color_attributes(const struct drm_plane_state *plane_state,
+ const enum surface_pixel_format format,
+ enum dc_color_space *color_space)
{
- const struct amdgpu_framebuffer *amdgpu_fb =
+ bool full_range;
+
+ *color_space = COLOR_SPACE_SRGB;
+
+ /* DRM color properties only affect non-RGB formats. */
+ if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
+ return 0;
+
+ full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
+
+ switch (plane_state->color_encoding) {
+ case DRM_COLOR_YCBCR_BT601:
+ if (full_range)
+ *color_space = COLOR_SPACE_YCBCR601;
+ else
+ *color_space = COLOR_SPACE_YCBCR601_LIMITED;
+ break;
+
+ case DRM_COLOR_YCBCR_BT709:
+ if (full_range)
+ *color_space = COLOR_SPACE_YCBCR709;
+ else
+ *color_space = COLOR_SPACE_YCBCR709_LIMITED;
+ break;
+
+ case DRM_COLOR_YCBCR_BT2020:
+ if (full_range)
+ *color_space = COLOR_SPACE_2020_YCBCR;
+ else
+ return -EINVAL;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
+ const struct drm_plane_state *plane_state,
+ const uint64_t tiling_flags,
+ struct dc_plane_info *plane_info,
+ struct dc_plane_address *address)
+{
+ const struct drm_framebuffer *fb = plane_state->fb;
+ const struct amdgpu_framebuffer *afb =
to_amdgpu_framebuffer(plane_state->fb);
- const struct drm_crtc *crtc = plane_state->crtc;
- int ret = 0;
+ struct drm_format_name_buf format_name;
+ int ret;
- if (!fill_rects_from_plane_state(plane_state, dc_plane_state))
+ memset(plane_info, 0, sizeof(*plane_info));
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_C8:
+ plane_info->format =
+ SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
+ break;
+ case DRM_FORMAT_RGB565:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
+ break;
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_ABGR2101010:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
+ break;
+ case DRM_FORMAT_NV21:
+ plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
+ break;
+ case DRM_FORMAT_NV12:
+ plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
+ break;
+ default:
+ DRM_ERROR(
+ "Unsupported screen format %s\n",
+ drm_get_format_name(fb->format->format, &format_name));
return -EINVAL;
+ }
+
+ switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
+ case DRM_MODE_ROTATE_0:
+ plane_info->rotation = ROTATION_ANGLE_0;
+ break;
+ case DRM_MODE_ROTATE_90:
+ plane_info->rotation = ROTATION_ANGLE_90;
+ break;
+ case DRM_MODE_ROTATE_180:
+ plane_info->rotation = ROTATION_ANGLE_180;
+ break;
+ case DRM_MODE_ROTATE_270:
+ plane_info->rotation = ROTATION_ANGLE_270;
+ break;
+ default:
+ plane_info->rotation = ROTATION_ANGLE_0;
+ break;
+ }
- ret = fill_plane_attributes_from_fb(
- crtc->dev->dev_private,
- dc_plane_state,
- amdgpu_fb);
+ plane_info->visible = true;
+ plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
+ ret = fill_plane_color_attributes(plane_state, plane_info->format,
+ &plane_info->color_space);
if (ret)
return ret;
+ ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
+ plane_info->rotation, tiling_flags,
+ &plane_info->tiling_info,
+ &plane_info->plane_size,
+ &plane_info->dcc, address);
+ if (ret)
+ return ret;
+
+ fill_blending_from_plane_state(
+ plane_state, &plane_info->per_pixel_alpha,
+ &plane_info->global_alpha, &plane_info->global_alpha_value);
+
+ return 0;
+}
+
+static int fill_dc_plane_attributes(struct amdgpu_device *adev,
+ struct dc_plane_state *dc_plane_state,
+ struct drm_plane_state *plane_state,
+ struct drm_crtc_state *crtc_state)
+{
+ const struct amdgpu_framebuffer *amdgpu_fb =
+ to_amdgpu_framebuffer(plane_state->fb);
+ struct dc_scaling_info scaling_info;
+ struct dc_plane_info plane_info;
+ uint64_t tiling_flags;
+ int ret;
+
+ ret = fill_dc_scaling_info(plane_state, &scaling_info);
+ if (ret)
+ return ret;
+
+ dc_plane_state->src_rect = scaling_info.src_rect;
+ dc_plane_state->dst_rect = scaling_info.dst_rect;
+ dc_plane_state->clip_rect = scaling_info.clip_rect;
+ dc_plane_state->scaling_quality = scaling_info.scaling_quality;
+
+ ret = get_fb_info(amdgpu_fb, &tiling_flags);
+ if (ret)
+ return ret;
+
+ ret = fill_dc_plane_info_and_addr(adev, plane_state, tiling_flags,
+ &plane_info,
+ &dc_plane_state->address);
+ if (ret)
+ return ret;
+
+ dc_plane_state->format = plane_info.format;
+ dc_plane_state->color_space = plane_info.color_space;
+ dc_plane_state->format = plane_info.format;
+ dc_plane_state->plane_size = plane_info.plane_size;
+ dc_plane_state->rotation = plane_info.rotation;
+ dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
+ dc_plane_state->stereo_format = plane_info.stereo_format;
+ dc_plane_state->tiling_info = plane_info.tiling_info;
+ dc_plane_state->visible = plane_info.visible;
+ dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
+ dc_plane_state->global_alpha = plane_info.global_alpha;
+ dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
+ dc_plane_state->dcc = plane_info.dcc;
+
/*
* Always set input transfer function, since plane state is refreshed
* every time.
@@ -3124,6 +3461,8 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
dc_stream_retain(state->stream);
}
+ state->active_planes = cur->active_planes;
+ state->interrupts_enabled = cur->interrupts_enabled;
state->vrr_params = cur->vrr_params;
state->vrr_infopacket = cur->vrr_infopacket;
state->abm_level = cur->abm_level;
@@ -3136,12 +3475,41 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
return &state->base;
}
+static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
+{
+ enum dc_irq_source irq_source;
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = crtc->dev->dev_private;
+ int rc;
+
+ irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
+
+ rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
+
+ DRM_DEBUG_DRIVER("crtc %d - vupdate irq %sabling: r=%d\n",
+ acrtc->crtc_id, enable ? "en" : "dis", rc);
+ return rc;
+}
static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
{
enum dc_irq_source irq_source;
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
struct amdgpu_device *adev = crtc->dev->dev_private;
+ struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
+ int rc = 0;
+
+ if (enable) {
+ /* vblank irq on -> Only need vupdate irq in vrr mode */
+ if (amdgpu_dm_vrr_active(acrtc_state))
+ rc = dm_set_vupdate_irq(crtc, true);
+ } else {
+ /* vblank irq off -> vupdate irq off */
+ rc = dm_set_vupdate_irq(crtc, false);
+ }
+
+ if (rc)
+ return rc;
irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
return dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
@@ -3519,6 +3887,76 @@ static void dm_crtc_helper_disable(struct drm_crtc *crtc)
{
}
+static bool does_crtc_have_active_cursor(struct drm_crtc_state *new_crtc_state)
+{
+ struct drm_device *dev = new_crtc_state->crtc->dev;
+ struct drm_plane *plane;
+
+ drm_for_each_plane_mask(plane, dev, new_crtc_state->plane_mask) {
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ return true;
+ }
+
+ return false;
+}
+
+static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
+{
+ struct drm_atomic_state *state = new_crtc_state->state;
+ struct drm_plane *plane;
+ int num_active = 0;
+
+ drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
+ struct drm_plane_state *new_plane_state;
+
+ /* Cursor planes are "fake". */
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ new_plane_state = drm_atomic_get_new_plane_state(state, plane);
+
+ if (!new_plane_state) {
+ /*
+ * The plane is enable on the CRTC and hasn't changed
+ * state. This means that it previously passed
+ * validation and is therefore enabled.
+ */
+ num_active += 1;
+ continue;
+ }
+
+ /* We need a framebuffer to be considered enabled. */
+ num_active += (new_plane_state->fb != NULL);
+ }
+
+ return num_active;
+}
+
+/*
+ * Sets whether interrupts should be enabled on a specific CRTC.
+ * We require that the stream be enabled and that there exist active
+ * DC planes on the stream.
+ */
+static void
+dm_update_crtc_interrupt_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *new_crtc_state)
+{
+ struct dm_crtc_state *dm_new_crtc_state =
+ to_dm_crtc_state(new_crtc_state);
+
+ dm_new_crtc_state->active_planes = 0;
+ dm_new_crtc_state->interrupts_enabled = false;
+
+ if (!dm_new_crtc_state->stream)
+ return;
+
+ dm_new_crtc_state->active_planes =
+ count_crtc_active_planes(new_crtc_state);
+
+ dm_new_crtc_state->interrupts_enabled =
+ dm_new_crtc_state->active_planes > 0;
+}
+
static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
@@ -3527,6 +3965,14 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
int ret = -EINVAL;
+ /*
+ * Update interrupt state for the CRTC. This needs to happen whenever
+ * the CRTC has changed or whenever any of its planes have changed.
+ * Atomic check satisfies both of these requirements since the CRTC
+ * is added to the state by DRM during drm_atomic_helper_check_planes.
+ */
+ dm_update_crtc_interrupt_state(crtc, state);
+
if (unlikely(!dm_crtc_state->stream &&
modeset_required(state, NULL, dm_crtc_state->stream))) {
WARN_ON(1);
@@ -3537,6 +3983,15 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
if (!dm_crtc_state->stream)
return 0;
+ /*
+ * We want at least one hardware plane enabled to use
+ * the stream with a cursor enabled.
+ */
+ if (state->enable && state->active &&
+ does_crtc_have_active_cursor(state) &&
+ dm_crtc_state->active_planes == 0)
+ return -EINVAL;
+
if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
return 0;
@@ -3583,11 +4038,8 @@ static void dm_drm_plane_reset(struct drm_plane *plane)
amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
WARN_ON(amdgpu_state == NULL);
- if (amdgpu_state) {
- plane->state = &amdgpu_state->base;
- plane->state->plane = plane;
- plane->state->rotation = DRM_MODE_ROTATE_0;
- }
+ if (amdgpu_state)
+ __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
}
static struct drm_plane_state *
@@ -3637,10 +4089,8 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
struct drm_gem_object *obj;
struct amdgpu_device *adev;
struct amdgpu_bo *rbo;
- uint64_t chroma_addr = 0;
struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
- uint64_t tiling_flags, dcc_address;
- unsigned int awidth;
+ uint64_t tiling_flags;
uint32_t domain;
int r;
@@ -3693,29 +4143,11 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
struct dc_plane_state *plane_state = dm_plane_state_new->dc_state;
- if (plane_state->format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
- plane_state->address.grph.addr.low_part = lower_32_bits(afb->address);
- plane_state->address.grph.addr.high_part = upper_32_bits(afb->address);
-
- dcc_address =
- get_dcc_address(afb->address, tiling_flags);
- plane_state->address.grph.meta_addr.low_part =
- lower_32_bits(dcc_address);
- plane_state->address.grph.meta_addr.high_part =
- upper_32_bits(dcc_address);
- } else {
- awidth = ALIGN(new_state->fb->width, 64);
- plane_state->address.type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
- plane_state->address.video_progressive.luma_addr.low_part
- = lower_32_bits(afb->address);
- plane_state->address.video_progressive.luma_addr.high_part
- = upper_32_bits(afb->address);
- chroma_addr = afb->address + (u64)awidth * new_state->fb->height;
- plane_state->address.video_progressive.chroma_addr.low_part
- = lower_32_bits(chroma_addr);
- plane_state->address.video_progressive.chroma_addr.high_part
- = upper_32_bits(chroma_addr);
- }
+ fill_plane_buffer_attributes(
+ adev, afb, plane_state->format, plane_state->rotation,
+ tiling_flags, &plane_state->tiling_info,
+ &plane_state->plane_size, &plane_state->dcc,
+ &plane_state->address);
}
return 0;
@@ -3747,13 +4179,18 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
{
struct amdgpu_device *adev = plane->dev->dev_private;
struct dc *dc = adev->dm.dc;
- struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
+ struct dm_plane_state *dm_plane_state;
+ struct dc_scaling_info scaling_info;
+ int ret;
+
+ dm_plane_state = to_dm_plane_state(state);
if (!dm_plane_state->dc_state)
return 0;
- if (!fill_rects_from_plane_state(state, dm_plane_state->dc_state))
- return -EINVAL;
+ ret = fill_dc_scaling_info(state, &scaling_info);
+ if (ret)
+ return ret;
if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
return 0;
@@ -3826,64 +4263,115 @@ static const uint32_t rgb_formats[] = {
DRM_FORMAT_ABGR2101010,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGB565,
};
-static const uint32_t yuv_formats[] = {
- DRM_FORMAT_NV12,
- DRM_FORMAT_NV21,
+static const uint32_t overlay_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGB565
};
static const u32 cursor_formats[] = {
DRM_FORMAT_ARGB8888
};
-static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
- struct drm_plane *plane,
- unsigned long possible_crtcs)
+static int get_plane_formats(const struct drm_plane *plane,
+ const struct dc_plane_cap *plane_cap,
+ uint32_t *formats, int max_formats)
{
- int res = -EPERM;
+ int i, num_formats = 0;
+
+ /*
+ * TODO: Query support for each group of formats directly from
+ * DC plane caps. This will require adding more formats to the
+ * caps list.
+ */
switch (plane->type) {
case DRM_PLANE_TYPE_PRIMARY:
- res = drm_universal_plane_init(
- dm->adev->ddev,
- plane,
- possible_crtcs,
- &dm_plane_funcs,
- rgb_formats,
- ARRAY_SIZE(rgb_formats),
- NULL, plane->type, NULL);
+ for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
+ if (num_formats >= max_formats)
+ break;
+
+ formats[num_formats++] = rgb_formats[i];
+ }
+
+ if (plane_cap && plane_cap->pixel_format_support.nv12)
+ formats[num_formats++] = DRM_FORMAT_NV12;
break;
+
case DRM_PLANE_TYPE_OVERLAY:
- res = drm_universal_plane_init(
- dm->adev->ddev,
- plane,
- possible_crtcs,
- &dm_plane_funcs,
- yuv_formats,
- ARRAY_SIZE(yuv_formats),
- NULL, plane->type, NULL);
+ for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
+ if (num_formats >= max_formats)
+ break;
+
+ formats[num_formats++] = overlay_formats[i];
+ }
break;
+
case DRM_PLANE_TYPE_CURSOR:
- res = drm_universal_plane_init(
- dm->adev->ddev,
- plane,
- possible_crtcs,
- &dm_plane_funcs,
- cursor_formats,
- ARRAY_SIZE(cursor_formats),
- NULL, plane->type, NULL);
+ for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
+ if (num_formats >= max_formats)
+ break;
+
+ formats[num_formats++] = cursor_formats[i];
+ }
break;
}
+ return num_formats;
+}
+
+static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
+ struct drm_plane *plane,
+ unsigned long possible_crtcs,
+ const struct dc_plane_cap *plane_cap)
+{
+ uint32_t formats[32];
+ int num_formats;
+ int res = -EPERM;
+
+ num_formats = get_plane_formats(plane, plane_cap, formats,
+ ARRAY_SIZE(formats));
+
+ res = drm_universal_plane_init(dm->adev->ddev, plane, possible_crtcs,
+ &dm_plane_funcs, formats, num_formats,
+ NULL, plane->type, NULL);
+ if (res)
+ return res;
+
+ if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
+ plane_cap && plane_cap->per_pixel_alpha) {
+ unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI);
+
+ drm_plane_create_alpha_property(plane);
+ drm_plane_create_blend_mode_property(plane, blend_caps);
+ }
+
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
+ plane_cap && plane_cap->pixel_format_support.nv12) {
+ /* This only affects YUV formats. */
+ drm_plane_create_color_properties(
+ plane,
+ BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709),
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
+ BIT(DRM_COLOR_YCBCR_FULL_RANGE),
+ DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
+ }
+
drm_plane_helper_add(plane, &dm_plane_helper_funcs);
/* Create (reset) the plane state */
if (plane->funcs->reset)
plane->funcs->reset(plane);
-
- return res;
+ return 0;
}
static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
@@ -3900,7 +4388,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
goto fail;
cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
- res = amdgpu_dm_plane_init(dm, cursor_plane, 0);
+ res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
if (!acrtc)
@@ -4334,6 +4822,8 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
DRM_ERROR("Failed to create debugfs for connector");
goto out_free;
}
+ aconnector->debugfs_dpcd_address = 0;
+ aconnector->debugfs_dpcd_size = 0;
#endif
if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
@@ -4473,9 +4963,13 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
x = plane->state->crtc_x;
y = plane->state->crtc_y;
- /* avivo cursor are offset into the total surface */
- x += crtc->primary->state->src_x >> 16;
- y += crtc->primary->state->src_y >> 16;
+
+ if (crtc->primary->state) {
+ /* avivo cursor are offset into the total surface */
+ x += crtc->primary->state->src_x >> 16;
+ y += crtc->primary->state->src_y >> 16;
+ }
+
if (x < 0) {
xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
x = 0;
@@ -4582,9 +5076,10 @@ static void update_freesync_state_on_stream(
struct dc_plane_state *surface,
u32 flip_timestamp_in_us)
{
- struct mod_vrr_params vrr_params = new_crtc_state->vrr_params;
+ struct mod_vrr_params vrr_params;
struct dc_info_packet vrr_infopacket = {0};
- struct mod_freesync_config config = new_crtc_state->freesync_config;
+ struct amdgpu_device *adev = dm->adev;
+ unsigned long flags;
if (!new_stream)
return;
@@ -4597,19 +5092,8 @@ static void update_freesync_state_on_stream(
if (!new_stream->timing.h_total || !new_stream->timing.v_total)
return;
- if (new_crtc_state->vrr_supported &&
- config.min_refresh_in_uhz &&
- config.max_refresh_in_uhz) {
- config.state = new_crtc_state->base.vrr_enabled ?
- VRR_STATE_ACTIVE_VARIABLE :
- VRR_STATE_INACTIVE;
- } else {
- config.state = VRR_STATE_UNSUPPORTED;
- }
-
- mod_freesync_build_vrr_params(dm->freesync_module,
- new_stream,
- &config, &vrr_params);
+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ vrr_params = new_crtc_state->vrr_params;
if (surface) {
mod_freesync_handle_preflip(
@@ -4618,6 +5102,12 @@ static void update_freesync_state_on_stream(
new_stream,
flip_timestamp_in_us,
&vrr_params);
+
+ if (adev->family < AMDGPU_FAMILY_AI &&
+ amdgpu_dm_vrr_active(new_crtc_state)) {
+ mod_freesync_handle_v_update(dm->freesync_module,
+ new_stream, &vrr_params);
+ }
}
mod_freesync_build_vrr_infopacket(
@@ -4649,6 +5139,100 @@ static void update_freesync_state_on_stream(
new_crtc_state->base.crtc->base.id,
(int)new_crtc_state->base.vrr_enabled,
(int)vrr_params.state);
+
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+}
+
+static void pre_update_freesync_state_on_stream(
+ struct amdgpu_display_manager *dm,
+ struct dm_crtc_state *new_crtc_state)
+{
+ struct dc_stream_state *new_stream = new_crtc_state->stream;
+ struct mod_vrr_params vrr_params;
+ struct mod_freesync_config config = new_crtc_state->freesync_config;
+ struct amdgpu_device *adev = dm->adev;
+ unsigned long flags;
+
+ if (!new_stream)
+ return;
+
+ /*
+ * TODO: Determine why min/max totals and vrefresh can be 0 here.
+ * For now it's sufficient to just guard against these conditions.
+ */
+ if (!new_stream->timing.h_total || !new_stream->timing.v_total)
+ return;
+
+ spin_lock_irqsave(&adev->ddev->event_lock, flags);
+ vrr_params = new_crtc_state->vrr_params;
+
+ if (new_crtc_state->vrr_supported &&
+ config.min_refresh_in_uhz &&
+ config.max_refresh_in_uhz) {
+ config.state = new_crtc_state->base.vrr_enabled ?
+ VRR_STATE_ACTIVE_VARIABLE :
+ VRR_STATE_INACTIVE;
+ } else {
+ config.state = VRR_STATE_UNSUPPORTED;
+ }
+
+ mod_freesync_build_vrr_params(dm->freesync_module,
+ new_stream,
+ &config, &vrr_params);
+
+ new_crtc_state->freesync_timing_changed |=
+ (memcmp(&new_crtc_state->vrr_params.adjust,
+ &vrr_params.adjust,
+ sizeof(vrr_params.adjust)) != 0);
+
+ new_crtc_state->vrr_params = vrr_params;
+ spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
+}
+
+static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
+ struct dm_crtc_state *new_state)
+{
+ bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
+ bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
+
+ if (!old_vrr_active && new_vrr_active) {
+ /* Transition VRR inactive -> active:
+ * While VRR is active, we must not disable vblank irq, as a
+ * reenable after disable would compute bogus vblank/pflip
+ * timestamps if it likely happened inside display front-porch.
+ *
+ * We also need vupdate irq for the actual core vblank handling
+ * at end of vblank.
+ */
+ dm_set_vupdate_irq(new_state->base.crtc, true);
+ drm_crtc_vblank_get(new_state->base.crtc);
+ DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
+ __func__, new_state->base.crtc->base.id);
+ } else if (old_vrr_active && !new_vrr_active) {
+ /* Transition VRR active -> inactive:
+ * Allow vblank irq disable again for fixed refresh rate.
+ */
+ dm_set_vupdate_irq(new_state->base.crtc, false);
+ drm_crtc_vblank_put(new_state->base.crtc);
+ DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
+ __func__, new_state->base.crtc->base.id);
+ }
+}
+
+static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
+{
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ int i;
+
+ /*
+ * TODO: Make this per-stream so we don't issue redundant updates for
+ * commits with multiple streams.
+ */
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state,
+ new_plane_state, i)
+ if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ handle_cursor_update(plane, old_plane_state);
}
static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
@@ -4656,7 +5240,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct drm_device *dev,
struct amdgpu_display_manager *dm,
struct drm_crtc *pcrtc,
- bool *wait_for_vblank)
+ bool wait_for_vblank)
{
uint32_t i, r;
uint64_t timestamp_ns;
@@ -4668,42 +5252,42 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
struct dm_crtc_state *dm_old_crtc_state =
to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
- int flip_count = 0, planes_count = 0, vpos, hpos;
+ int planes_count = 0, vpos, hpos;
unsigned long flags;
struct amdgpu_bo *abo;
- uint64_t tiling_flags, dcc_address;
- uint32_t target, target_vblank;
- uint64_t last_flip_vblank;
- bool vrr_active = acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
-
- struct {
- struct dc_surface_update surface_updates[MAX_SURFACES];
- struct dc_flip_addrs flip_addrs[MAX_SURFACES];
- struct dc_stream_update stream_update;
- } *flip;
-
+ uint64_t tiling_flags;
+ uint32_t target_vblank, last_flip_vblank;
+ bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
+ bool pflip_present = false;
struct {
struct dc_surface_update surface_updates[MAX_SURFACES];
struct dc_plane_info plane_infos[MAX_SURFACES];
struct dc_scaling_info scaling_infos[MAX_SURFACES];
+ struct dc_flip_addrs flip_addrs[MAX_SURFACES];
struct dc_stream_update stream_update;
- } *full;
+ } *bundle;
- flip = kzalloc(sizeof(*flip), GFP_KERNEL);
- full = kzalloc(sizeof(*full), GFP_KERNEL);
+ bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
- if (!flip || !full) {
- dm_error("Failed to allocate update bundles\n");
+ if (!bundle) {
+ dm_error("Failed to allocate update bundle\n");
goto cleanup;
}
+ /*
+ * Disable the cursor first if we're disabling all the planes.
+ * It'll remain on the screen after the planes are re-enabled
+ * if we don't.
+ */
+ if (acrtc_state->active_planes == 0)
+ amdgpu_dm_commit_cursors(state);
+
/* update planes when needed */
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_crtc_state *new_crtc_state;
struct drm_framebuffer *fb = new_plane_state->fb;
- struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
- bool pflip_needed;
+ bool plane_needs_flip;
struct dc_plane_state *dc_plane;
struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
@@ -4718,122 +5302,96 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
if (!new_crtc_state->active)
continue;
- pflip_needed = old_plane_state->fb &&
- old_plane_state->fb != new_plane_state->fb;
-
dc_plane = dm_new_plane_state->dc_state;
- if (pflip_needed) {
- /*
- * Assume even ONE crtc with immediate flip means
- * entire can't wait for VBLANK
- * TODO Check if it's correct
- */
- if (new_pcrtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
- *wait_for_vblank = false;
+ bundle->surface_updates[planes_count].surface = dc_plane;
+ if (new_pcrtc_state->color_mgmt_changed) {
+ bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
+ bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
+ }
- /*
- * TODO This might fail and hence better not used, wait
- * explicitly on fences instead
- * and in general should be called for
- * blocking commit to as per framework helpers
- */
- abo = gem_to_amdgpu_bo(fb->obj[0]);
- r = amdgpu_bo_reserve(abo, true);
- if (unlikely(r != 0))
- DRM_ERROR("failed to reserve buffer before flip\n");
+ fill_dc_scaling_info(new_plane_state,
+ &bundle->scaling_infos[planes_count]);
- /*
- * Wait for all fences on this FB. Do limited wait to avoid
- * deadlock during GPU reset when this fence will not signal
- * but we hold reservation lock for the BO.
- */
- r = reservation_object_wait_timeout_rcu(abo->tbo.resv,
- true, false,
- msecs_to_jiffies(5000));
- if (unlikely(r == 0))
- DRM_ERROR("Waiting for fences timed out.");
+ bundle->surface_updates[planes_count].scaling_info =
+ &bundle->scaling_infos[planes_count];
+ plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
+ pflip_present = pflip_present || plane_needs_flip;
- amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
+ if (!plane_needs_flip) {
+ planes_count += 1;
+ continue;
+ }
- amdgpu_bo_unreserve(abo);
+ abo = gem_to_amdgpu_bo(fb->obj[0]);
- flip->flip_addrs[flip_count].address.grph.addr.low_part = lower_32_bits(afb->address);
- flip->flip_addrs[flip_count].address.grph.addr.high_part = upper_32_bits(afb->address);
+ /*
+ * Wait for all fences on this FB. Do limited wait to avoid
+ * deadlock during GPU reset when this fence will not signal
+ * but we hold reservation lock for the BO.
+ */
+ r = reservation_object_wait_timeout_rcu(abo->tbo.resv, true,
+ false,
+ msecs_to_jiffies(5000));
+ if (unlikely(r <= 0))
+ DRM_ERROR("Waiting for fences timed out or interrupted!");
- dcc_address = get_dcc_address(afb->address, tiling_flags);
- flip->flip_addrs[flip_count].address.grph.meta_addr.low_part = lower_32_bits(dcc_address);
- flip->flip_addrs[flip_count].address.grph.meta_addr.high_part = upper_32_bits(dcc_address);
+ /*
+ * TODO This might fail and hence better not used, wait
+ * explicitly on fences instead
+ * and in general should be called for
+ * blocking commit to as per framework helpers
+ */
+ r = amdgpu_bo_reserve(abo, true);
+ if (unlikely(r != 0))
+ DRM_ERROR("failed to reserve buffer before flip\n");
- flip->flip_addrs[flip_count].flip_immediate =
- (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
+ amdgpu_bo_get_tiling_flags(abo, &tiling_flags);
- timestamp_ns = ktime_get_ns();
- flip->flip_addrs[flip_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
- flip->surface_updates[flip_count].flip_addr = &flip->flip_addrs[flip_count];
- flip->surface_updates[flip_count].surface = dc_plane;
+ amdgpu_bo_unreserve(abo);
- if (!flip->surface_updates[flip_count].surface) {
- DRM_ERROR("No surface for CRTC: id=%d\n",
- acrtc_attach->crtc_id);
- continue;
- }
+ fill_dc_plane_info_and_addr(
+ dm->adev, new_plane_state, tiling_flags,
+ &bundle->plane_infos[planes_count],
+ &bundle->flip_addrs[planes_count].address);
- if (plane == pcrtc->primary)
- update_freesync_state_on_stream(
- dm,
- acrtc_state,
- acrtc_state->stream,
- dc_plane,
- flip->flip_addrs[flip_count].flip_timestamp_in_us);
+ bundle->surface_updates[planes_count].plane_info =
+ &bundle->plane_infos[planes_count];
- DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
- __func__,
- flip->flip_addrs[flip_count].address.grph.addr.high_part,
- flip->flip_addrs[flip_count].address.grph.addr.low_part);
+ bundle->flip_addrs[planes_count].flip_immediate =
+ (crtc->state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC) != 0;
- flip_count += 1;
- }
+ timestamp_ns = ktime_get_ns();
+ bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
+ bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
+ bundle->surface_updates[planes_count].surface = dc_plane;
- full->surface_updates[planes_count].surface = dc_plane;
- if (new_pcrtc_state->color_mgmt_changed) {
- full->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
- full->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
+ if (!bundle->surface_updates[planes_count].surface) {
+ DRM_ERROR("No surface for CRTC: id=%d\n",
+ acrtc_attach->crtc_id);
+ continue;
}
+ if (plane == pcrtc->primary)
+ update_freesync_state_on_stream(
+ dm,
+ acrtc_state,
+ acrtc_state->stream,
+ dc_plane,
+ bundle->flip_addrs[planes_count].flip_timestamp_in_us);
- full->scaling_infos[planes_count].scaling_quality = dc_plane->scaling_quality;
- full->scaling_infos[planes_count].src_rect = dc_plane->src_rect;
- full->scaling_infos[planes_count].dst_rect = dc_plane->dst_rect;
- full->scaling_infos[planes_count].clip_rect = dc_plane->clip_rect;
- full->surface_updates[planes_count].scaling_info = &full->scaling_infos[planes_count];
-
-
- full->plane_infos[planes_count].color_space = dc_plane->color_space;
- full->plane_infos[planes_count].format = dc_plane->format;
- full->plane_infos[planes_count].plane_size = dc_plane->plane_size;
- full->plane_infos[planes_count].rotation = dc_plane->rotation;
- full->plane_infos[planes_count].horizontal_mirror = dc_plane->horizontal_mirror;
- full->plane_infos[planes_count].stereo_format = dc_plane->stereo_format;
- full->plane_infos[planes_count].tiling_info = dc_plane->tiling_info;
- full->plane_infos[planes_count].visible = dc_plane->visible;
- full->plane_infos[planes_count].per_pixel_alpha = dc_plane->per_pixel_alpha;
- full->plane_infos[planes_count].dcc = dc_plane->dcc;
- full->surface_updates[planes_count].plane_info = &full->plane_infos[planes_count];
+ DRM_DEBUG_DRIVER("%s Flipping to hi: 0x%x, low: 0x%x\n",
+ __func__,
+ bundle->flip_addrs[planes_count].address.grph.addr.high_part,
+ bundle->flip_addrs[planes_count].address.grph.addr.low_part);
planes_count += 1;
}
- /*
- * TODO: For proper atomic behaviour, we should be calling into DC once with
- * all the changes. However, DC refuses to do pageflips and non-pageflip
- * changes in the same call. Change DC to respect atomic behaviour,
- * hopefully eliminating dc_*_update structs in their entirety.
- */
- if (flip_count) {
+ if (pflip_present) {
if (!vrr_active) {
/* Use old throttling in non-vrr fixed refresh rate mode
* to keep flip scheduling based on target vblank counts
@@ -4841,7 +5399,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* clients using the GLX_OML_sync_control extension or
* DRI3/Present extension with defined target_msc.
*/
- last_flip_vblank = drm_crtc_vblank_count(pcrtc);
+ last_flip_vblank = amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id);
}
else {
/* For variable refresh rate mode only:
@@ -4857,11 +5415,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
}
- target = (uint32_t)last_flip_vblank + *wait_for_vblank;
-
- /* Prepare wait for target vblank early - before the fence-waits */
- target_vblank = target - (uint32_t)drm_crtc_vblank_count(pcrtc) +
- amdgpu_get_vblank_counter_kms(pcrtc->dev, acrtc_attach->crtc_id);
+ target_vblank = last_flip_vblank + wait_for_vblank;
/*
* Wait until we're out of the vertical blank period before the one
@@ -4892,54 +5446,102 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
if (acrtc_state->stream) {
if (acrtc_state->freesync_timing_changed)
- flip->stream_update.adjust =
+ bundle->stream_update.adjust =
&acrtc_state->stream->adjust;
if (acrtc_state->freesync_vrr_info_changed)
- flip->stream_update.vrr_infopacket =
+ bundle->stream_update.vrr_infopacket =
&acrtc_state->stream->vrr_infopacket;
}
-
- mutex_lock(&dm->dc_lock);
- dc_commit_updates_for_stream(dm->dc,
- flip->surface_updates,
- flip_count,
- acrtc_state->stream,
- &flip->stream_update,
- dc_state);
- mutex_unlock(&dm->dc_lock);
}
- if (planes_count) {
+ /* Update the planes if changed or disable if we don't have any. */
+ if (planes_count || acrtc_state->active_planes == 0) {
if (new_pcrtc_state->mode_changed) {
- full->stream_update.src = acrtc_state->stream->src;
- full->stream_update.dst = acrtc_state->stream->dst;
+ bundle->stream_update.src = acrtc_state->stream->src;
+ bundle->stream_update.dst = acrtc_state->stream->dst;
}
if (new_pcrtc_state->color_mgmt_changed)
- full->stream_update.out_transfer_func = acrtc_state->stream->out_transfer_func;
+ bundle->stream_update.out_transfer_func = acrtc_state->stream->out_transfer_func;
acrtc_state->stream->abm_level = acrtc_state->abm_level;
if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
- full->stream_update.abm_level = &acrtc_state->abm_level;
+ bundle->stream_update.abm_level = &acrtc_state->abm_level;
mutex_lock(&dm->dc_lock);
dc_commit_updates_for_stream(dm->dc,
- full->surface_updates,
+ bundle->surface_updates,
planes_count,
acrtc_state->stream,
- &full->stream_update,
+ &bundle->stream_update,
dc_state);
mutex_unlock(&dm->dc_lock);
}
- for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
- if (plane->type == DRM_PLANE_TYPE_CURSOR)
- handle_cursor_update(plane, old_plane_state);
+ /*
+ * Update cursor state *after* programming all the planes.
+ * This avoids redundant programming in the case where we're going
+ * to be disabling a single plane - those pipes are being disabled.
+ */
+ if (acrtc_state->active_planes)
+ amdgpu_dm_commit_cursors(state);
cleanup:
- kfree(flip);
- kfree(full);
+ kfree(bundle);
+}
+
+/*
+ * Enable interrupts on CRTCs that are newly active, undergone
+ * a modeset, or have active planes again.
+ *
+ * Done in two passes, based on the for_modeset flag:
+ * Pass 1: For CRTCs going through modeset
+ * Pass 2: For CRTCs going from 0 to n active planes
+ *
+ * Interrupts can only be enabled after the planes are programmed,
+ * so this requires a two-pass approach since we don't want to
+ * just defer the interrupts until after commit planes every time.
+ */
+static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
+ struct drm_atomic_state *state,
+ bool for_modeset)
+{
+ struct amdgpu_device *adev = dev->dev_private;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
+ int i;
+
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
+ struct dm_crtc_state *dm_new_crtc_state =
+ to_dm_crtc_state(new_crtc_state);
+ struct dm_crtc_state *dm_old_crtc_state =
+ to_dm_crtc_state(old_crtc_state);
+ bool modeset = drm_atomic_crtc_needs_modeset(new_crtc_state);
+ bool run_pass;
+
+ run_pass = (for_modeset && modeset) ||
+ (!for_modeset && !modeset &&
+ !dm_old_crtc_state->interrupts_enabled);
+
+ if (!run_pass)
+ continue;
+
+ if (!dm_new_crtc_state->interrupts_enabled)
+ continue;
+
+ manage_dm_interrupts(adev, acrtc, true);
+
+#ifdef CONFIG_DEBUG_FS
+ /* The stream has changed so CRC capture needs to re-enabled. */
+ if (dm_new_crtc_state->crc_enabled) {
+ dm_new_crtc_state->crc_enabled = false;
+ amdgpu_dm_crtc_set_crc_source(crtc, "auto");
+ }
+#endif
+ }
}
/*
@@ -4953,8 +5555,7 @@ cleanup:
static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
struct dc_stream_state *stream_state)
{
- stream_state->mode_changed =
- crtc_state->mode_changed || crtc_state->active_changed;
+ stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
}
static int amdgpu_dm_atomic_commit(struct drm_device *dev,
@@ -4967,30 +5568,41 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev,
int i;
/*
- * We evade vblanks and pflips on crtc that
- * should be changed. We do it here to flush & disable
- * interrupts before drm_swap_state is called in drm_atomic_helper_commit
- * it will update crtc->dm_crtc_state->stream pointer which is used in
- * the ISRs.
+ * We evade vblank and pflip interrupts on CRTCs that are undergoing
+ * a modeset, being disabled, or have no active planes.
+ *
+ * It's done in atomic commit rather than commit tail for now since
+ * some of these interrupt handlers access the current CRTC state and
+ * potentially the stream pointer itself.
+ *
+ * Since the atomic state is swapped within atomic commit and not within
+ * commit tail this would leave to new state (that hasn't been committed yet)
+ * being accesssed from within the handlers.
+ *
+ * TODO: Fix this so we can do this in commit tail and not have to block
+ * in atomic check.
*/
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
- if (drm_atomic_crtc_needs_modeset(new_crtc_state)
- && dm_old_crtc_state->stream) {
+ if (dm_old_crtc_state->interrupts_enabled &&
+ (!dm_new_crtc_state->interrupts_enabled ||
+ drm_atomic_crtc_needs_modeset(new_crtc_state))) {
/*
- * If the stream is removed and CRC capture was
- * enabled on the CRTC the extra vblank reference
- * needs to be dropped since CRC capture will be
- * disabled.
+ * Drop the extra vblank reference added by CRC
+ * capture if applicable.
*/
- if (!dm_new_crtc_state->stream
- && dm_new_crtc_state->crc_enabled) {
+ if (dm_new_crtc_state->crc_enabled)
drm_crtc_vblank_put(crtc);
+
+ /*
+ * Only keep CRC capture enabled if there's
+ * still a stream for the CRTC.
+ */
+ if (!dm_new_crtc_state->stream)
dm_new_crtc_state->crc_enabled = false;
- }
manage_dm_interrupts(adev, acrtc, false);
}
@@ -5037,7 +5649,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dc_state = dm_state->context;
} else {
/* No state changes, retain current state. */
- dc_state_temp = dc_create_state();
+ dc_state_temp = dc_create_state(dm->dc);
ASSERT(dc_state_temp);
dc_state = dc_state_temp;
dc_resource_state_copy_construct_current(dm->dc, dc_state);
@@ -5206,45 +5818,41 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
mutex_unlock(&dm->dc_lock);
}
+ /* Count number of newly disabled CRTCs for dropping PM refs later. */
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- /*
- * loop to enable interrupts on newly arrived crtc
- */
- struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
- bool modeset_needed;
-
+ new_crtc_state, i) {
if (old_crtc_state->active && !new_crtc_state->active)
crtc_disable_count++;
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
- modeset_needed = modeset_required(
- new_crtc_state,
- dm_new_crtc_state->stream,
- dm_old_crtc_state->stream);
- if (dm_new_crtc_state->stream == NULL || !modeset_needed)
- continue;
-
- manage_dm_interrupts(adev, acrtc, true);
+ /* Update freesync active state. */
+ pre_update_freesync_state_on_stream(dm, dm_new_crtc_state);
-#ifdef CONFIG_DEBUG_FS
- /* The stream has changed so CRC capture needs to re-enabled. */
- if (dm_new_crtc_state->crc_enabled)
- amdgpu_dm_crtc_set_crc_source(crtc, "auto");
-#endif
+ /* Handle vrr on->off / off->on transitions */
+ amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
+ dm_new_crtc_state);
}
+ /* Enable interrupts for CRTCs going through a modeset. */
+ amdgpu_dm_enable_crtc_interrupts(dev, state, true);
+
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
+ if (new_crtc_state->pageflip_flags & DRM_MODE_PAGE_FLIP_ASYNC)
+ wait_for_vblank = false;
+
/* update planes when needed per crtc*/
for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
if (dm_new_crtc_state->stream)
amdgpu_dm_commit_planes(state, dc_state, dev,
- dm, crtc, &wait_for_vblank);
+ dm, crtc, wait_for_vblank);
}
+ /* Enable interrupts for CRTCs going from 0 to n active planes. */
+ amdgpu_dm_enable_crtc_interrupts(dev, state, false);
/*
* send vblank event on all events not handled in flip and
@@ -5484,21 +6092,12 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector = NULL;
struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
- struct drm_plane_state *new_plane_state = NULL;
new_stream = NULL;
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
acrtc = to_amdgpu_crtc(crtc);
-
- new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary);
-
- if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) {
- ret = -EINVAL;
- goto fail;
- }
-
aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
/* TODO This hack should go away */
@@ -5661,6 +6260,9 @@ skip_modeset:
update_stream_scaling_settings(
&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
+ /* ABM settings */
+ dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
+
/*
* Color management settings. We also update color properties
* when a modeset is needed, to ensure it gets reprogrammed.
@@ -5685,6 +6287,69 @@ fail:
return ret;
}
+static bool should_reset_plane(struct drm_atomic_state *state,
+ struct drm_plane *plane,
+ struct drm_plane_state *old_plane_state,
+ struct drm_plane_state *new_plane_state)
+{
+ struct drm_plane *other;
+ struct drm_plane_state *old_other_state, *new_other_state;
+ struct drm_crtc_state *new_crtc_state;
+ int i;
+
+ /*
+ * TODO: Remove this hack once the checks below are sufficient
+ * enough to determine when we need to reset all the planes on
+ * the stream.
+ */
+ if (state->allow_modeset)
+ return true;
+
+ /* Exit early if we know that we're adding or removing the plane. */
+ if (old_plane_state->crtc != new_plane_state->crtc)
+ return true;
+
+ /* old crtc == new_crtc == NULL, plane not in context. */
+ if (!new_plane_state->crtc)
+ return false;
+
+ new_crtc_state =
+ drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
+
+ if (!new_crtc_state)
+ return true;
+
+ if (drm_atomic_crtc_needs_modeset(new_crtc_state))
+ return true;
+
+ /*
+ * If there are any new primary or overlay planes being added or
+ * removed then the z-order can potentially change. To ensure
+ * correct z-order and pipe acquisition the current DC architecture
+ * requires us to remove and recreate all existing planes.
+ *
+ * TODO: Come up with a more elegant solution for this.
+ */
+ for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
+ if (other->type == DRM_PLANE_TYPE_CURSOR)
+ continue;
+
+ if (old_other_state->crtc != new_plane_state->crtc &&
+ new_other_state->crtc != new_plane_state->crtc)
+ continue;
+
+ if (old_other_state->crtc != new_other_state->crtc)
+ return true;
+
+ /* TODO: Remove this once we can handle fast format changes. */
+ if (old_other_state->fb && new_other_state->fb &&
+ old_other_state->fb->format != new_other_state->fb->format)
+ return true;
+ }
+
+ return false;
+}
+
static int dm_update_plane_state(struct dc *dc,
struct drm_atomic_state *state,
struct drm_plane *plane,
@@ -5699,8 +6364,7 @@ static int dm_update_plane_state(struct dc *dc,
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
- /* TODO return page_flip_needed() function */
- bool pflip_needed = !state->allow_modeset;
+ bool needs_reset;
int ret = 0;
@@ -5713,10 +6377,12 @@ static int dm_update_plane_state(struct dc *dc,
if (plane->type == DRM_PLANE_TYPE_CURSOR)
return 0;
+ needs_reset = should_reset_plane(state, plane, old_plane_state,
+ new_plane_state);
+
/* Remove any changed/removed planes */
if (!enable) {
- if (pflip_needed &&
- plane->type != DRM_PLANE_TYPE_OVERLAY)
+ if (!needs_reset)
return 0;
if (!old_plane_crtc)
@@ -5767,7 +6433,7 @@ static int dm_update_plane_state(struct dc *dc,
if (!dm_new_crtc_state->stream)
return 0;
- if (pflip_needed && plane->type != DRM_PLANE_TYPE_OVERLAY)
+ if (!needs_reset)
return 0;
WARN_ON(dm_new_plane_state->dc_state);
@@ -5779,7 +6445,7 @@ static int dm_update_plane_state(struct dc *dc,
DRM_DEBUG_DRIVER("Enabling DRM plane: %d on DRM crtc %d\n",
plane->base.id, new_plane_crtc->base.id);
- ret = fill_plane_attributes(
+ ret = fill_dc_plane_attributes(
new_plane_crtc->dev->dev_private,
dc_new_plane_state,
new_plane_state,
@@ -5827,10 +6493,11 @@ static int dm_update_plane_state(struct dc *dc,
}
static int
-dm_determine_update_type_for_commit(struct dc *dc,
+dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm,
struct drm_atomic_state *state,
enum surface_update_type *out_type)
{
+ struct dc *dc = dm->dc;
struct dm_atomic_state *dm_state = NULL, *old_dm_state = NULL;
int i, j, num_plane, ret = 0;
struct drm_plane_state *old_plane_state, *new_plane_state;
@@ -5844,21 +6511,22 @@ dm_determine_update_type_for_commit(struct dc *dc,
struct dc_stream_status *status = NULL;
struct dc_surface_update *updates;
- struct dc_plane_state *surface;
enum surface_update_type update_type = UPDATE_TYPE_FAST;
updates = kcalloc(MAX_SURFACES, sizeof(*updates), GFP_KERNEL);
- surface = kcalloc(MAX_SURFACES, sizeof(*surface), GFP_KERNEL);
- if (!updates || !surface) {
- DRM_ERROR("Plane or surface update failed to allocate");
+ if (!updates) {
+ DRM_ERROR("Failed to allocate plane updates\n");
/* Set type to FULL to avoid crashing in DC*/
update_type = UPDATE_TYPE_FULL;
goto cleanup;
}
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- struct dc_stream_update stream_update = { 0 };
+ struct dc_scaling_info scaling_info;
+ struct dc_stream_update stream_update;
+
+ memset(&stream_update, 0, sizeof(stream_update));
new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
old_dm_crtc_state = to_dm_crtc_state(old_crtc_state);
@@ -5886,23 +6554,12 @@ dm_determine_update_type_for_commit(struct dc *dc,
goto cleanup;
}
- if (!state->allow_modeset)
- continue;
-
if (crtc != new_plane_crtc)
continue;
- updates[num_plane].surface = &surface[num_plane];
+ updates[num_plane].surface = new_dm_plane_state->dc_state;
if (new_crtc_state->mode_changed) {
- updates[num_plane].surface->src_rect =
- new_dm_plane_state->dc_state->src_rect;
- updates[num_plane].surface->dst_rect =
- new_dm_plane_state->dc_state->dst_rect;
- updates[num_plane].surface->rotation =
- new_dm_plane_state->dc_state->rotation;
- updates[num_plane].surface->in_transfer_func =
- new_dm_plane_state->dc_state->in_transfer_func;
stream_update.dst = new_dm_crtc_state->stream->dst;
stream_update.src = new_dm_crtc_state->stream->src;
}
@@ -5918,6 +6575,13 @@ dm_determine_update_type_for_commit(struct dc *dc,
new_dm_crtc_state->stream->out_transfer_func;
}
+ ret = fill_dc_scaling_info(new_plane_state,
+ &scaling_info);
+ if (ret)
+ goto cleanup;
+
+ updates[num_plane].scaling_info = &scaling_info;
+
num_plane++;
}
@@ -5937,8 +6601,14 @@ dm_determine_update_type_for_commit(struct dc *dc,
status = dc_stream_get_status_from_state(old_dm_state->context,
new_dm_crtc_state->stream);
+ /*
+ * TODO: DC modifies the surface during this call so we need
+ * to lock here - find a way to do this without locking.
+ */
+ mutex_lock(&dm->dc_lock);
update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane,
&stream_update, status);
+ mutex_unlock(&dm->dc_lock);
if (update_type > UPDATE_TYPE_MED) {
update_type = UPDATE_TYPE_FULL;
@@ -5948,7 +6618,6 @@ dm_determine_update_type_for_commit(struct dc *dc,
cleanup:
kfree(updates);
- kfree(surface);
*out_type = update_type;
return ret;
@@ -6132,7 +6801,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
lock_and_validation_needed = true;
}
- ret = dm_determine_update_type_for_commit(dc, state, &update_type);
+ ret = dm_determine_update_type_for_commit(&adev->dm, state, &update_type);
if (ret)
goto fail;
@@ -6147,9 +6816,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
*/
if (lock_and_validation_needed && overall_update_type <= UPDATE_TYPE_FAST)
WARN(1, "Global lock should be Set, overall_update_type should be UPDATE_TYPE_MED or UPDATE_TYPE_FULL");
- else if (!lock_and_validation_needed && overall_update_type > UPDATE_TYPE_FAST)
- WARN(1, "Global lock should NOT be set, overall_update_type should be UPDATE_TYPE_FAST");
-
if (overall_update_type > UPDATE_TYPE_FAST) {
ret = dm_atomic_get_state(state, &dm_state);
@@ -6160,7 +6826,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
- if (dc_validate_global_state(dc, dm_state->context) != DC_OK) {
+ if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) {
ret = -EINVAL;
goto fail;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index fbd161ddc3f4..978ff14a7d45 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -132,8 +132,6 @@ struct amdgpu_display_manager {
*/
struct drm_private_obj atomic_obj;
- struct drm_modeset_lock atomic_obj_lock;
-
/**
* @dc_lock:
*
@@ -184,6 +182,15 @@ struct amdgpu_display_manager {
struct common_irq_params
vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1];
+ /**
+ * @vupdate_params:
+ *
+ * Vertical update IRQ parameters, passed to registered handlers when
+ * triggered.
+ */
+ struct common_irq_params
+ vupdate_params[DC_IRQ_SOURCE_VUPDATE6 - DC_IRQ_SOURCE_VUPDATE1 + 1];
+
spinlock_t irq_handler_list_table_lock;
struct backlight_device *backlight_dev;
@@ -240,6 +247,10 @@ struct amdgpu_dm_connector {
struct mutex hpd_lock;
bool fake_enable;
+#ifdef CONFIG_DEBUG_FS
+ uint32_t debugfs_dpcd_address;
+ uint32_t debugfs_dpcd_size;
+#endif
};
#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
@@ -260,6 +271,9 @@ struct dm_crtc_state {
struct drm_crtc_state base;
struct dc_stream_state *stream;
+ int active_planes;
+ bool interrupts_enabled;
+
int crc_skip_count;
bool crc_enabled;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index 216e48cec716..7258c992a2bf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -126,46 +126,51 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc)
crtc->base.state->dev->dev_private;
struct drm_color_lut *lut;
uint32_t lut_size;
- struct dc_gamma *gamma;
+ struct dc_gamma *gamma = NULL;
enum dc_transfer_func_type old_type = stream->out_transfer_func->type;
bool ret;
- if (!blob) {
+ if (!blob && adev->asic_type <= CHIP_RAVEN) {
/* By default, use the SRGB predefined curve.*/
stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
return 0;
}
- lut = (struct drm_color_lut *)blob->data;
- lut_size = blob->length / sizeof(struct drm_color_lut);
-
- gamma = dc_create_gamma();
- if (!gamma)
- return -ENOMEM;
+ if (blob) {
+ lut = (struct drm_color_lut *)blob->data;
+ lut_size = blob->length / sizeof(struct drm_color_lut);
+
+ gamma = dc_create_gamma();
+ if (!gamma)
+ return -ENOMEM;
+
+ gamma->num_entries = lut_size;
+ if (gamma->num_entries == MAX_COLOR_LEGACY_LUT_ENTRIES)
+ gamma->type = GAMMA_RGB_256;
+ else if (gamma->num_entries == MAX_COLOR_LUT_ENTRIES)
+ gamma->type = GAMMA_CS_TFM_1D;
+ else {
+ /* Invalid lut size */
+ dc_gamma_release(&gamma);
+ return -EINVAL;
+ }
- gamma->num_entries = lut_size;
- if (gamma->num_entries == MAX_COLOR_LEGACY_LUT_ENTRIES)
- gamma->type = GAMMA_RGB_256;
- else if (gamma->num_entries == MAX_COLOR_LUT_ENTRIES)
- gamma->type = GAMMA_CS_TFM_1D;
- else {
- /* Invalid lut size */
- dc_gamma_release(&gamma);
- return -EINVAL;
+ /* Convert drm_lut into dc_gamma */
+ __drm_lut_to_dc_gamma(lut, gamma, gamma->type == GAMMA_RGB_256);
}
- /* Convert drm_lut into dc_gamma */
- __drm_lut_to_dc_gamma(lut, gamma, gamma->type == GAMMA_RGB_256);
-
- /* Call color module to translate into something DC understands. Namely
- * a transfer function.
+ /* predefined gamma ROM only exist for RAVEN and pre-RAVEN ASIC,
+ * set canRomBeUsed accordingly
*/
stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
ret = mod_color_calculate_regamma_params(stream->out_transfer_func,
- gamma, true, adev->asic_type <= CHIP_RAVEN, NULL);
- dc_gamma_release(&gamma);
+ gamma, true, adev->asic_type <= CHIP_RAVEN, NULL);
+
+ if (gamma)
+ dc_gamma_release(&gamma);
+
if (!ret) {
stream->out_transfer_func->type = old_type;
DRM_ERROR("Out of memory when calculating regamma params\n");
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 4a55cde027cf..1d5fc5ad3bee 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -29,6 +29,7 @@
#include "amdgpu.h"
#include "amdgpu_dm.h"
#include "amdgpu_dm_debugfs.h"
+#include "dm_helpers.h"
/* function description
* get/ set DP configuration: lane_count, link_rate, spread_spectrum
@@ -688,8 +689,131 @@ static int vrr_range_show(struct seq_file *m, void *data)
return 0;
}
+
+/* function description
+ *
+ * generic SDP message access for testing
+ *
+ * debugfs sdp_message is located at /syskernel/debug/dri/0/DP-x
+ *
+ * SDP header
+ * Hb0 : Secondary-Data Packet ID
+ * Hb1 : Secondary-Data Packet type
+ * Hb2 : Secondary-Data-packet-specific header, Byte 0
+ * Hb3 : Secondary-Data-packet-specific header, Byte 1
+ *
+ * for using custom sdp message: input 4 bytes SDP header and 32 bytes raw data
+ */
+static ssize_t dp_sdp_message_debugfs_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ int r;
+ uint8_t data[36];
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ struct dm_crtc_state *acrtc_state;
+ uint32_t write_size = 36;
+
+ if (connector->base.status != connector_status_connected)
+ return -ENODEV;
+
+ if (size == 0)
+ return 0;
+
+ acrtc_state = to_dm_crtc_state(connector->base.state->crtc->state);
+
+ r = copy_from_user(data, buf, write_size);
+
+ write_size -= r;
+
+ dc_stream_send_dp_sdp(acrtc_state->stream, data, write_size);
+
+ return write_size;
+}
+
DEFINE_SHOW_ATTRIBUTE(vrr_range);
+static ssize_t dp_dpcd_address_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ int r;
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+
+ if (size < sizeof(connector->debugfs_dpcd_address))
+ return 0;
+
+ r = copy_from_user(&connector->debugfs_dpcd_address,
+ buf, sizeof(connector->debugfs_dpcd_address));
+
+ return size - r;
+}
+
+static ssize_t dp_dpcd_size_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ int r;
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+
+ if (size < sizeof(connector->debugfs_dpcd_size))
+ return 0;
+
+ r = copy_from_user(&connector->debugfs_dpcd_size,
+ buf, sizeof(connector->debugfs_dpcd_size));
+
+ if (connector->debugfs_dpcd_size > 256)
+ connector->debugfs_dpcd_size = 0;
+
+ return size - r;
+}
+
+static ssize_t dp_dpcd_data_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ int r;
+ char *data;
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ struct dc_link *link = connector->dc_link;
+ uint32_t write_size = connector->debugfs_dpcd_size;
+
+ if (size < write_size)
+ return 0;
+
+ data = kzalloc(write_size, GFP_KERNEL);
+ if (!data)
+ return 0;
+
+ r = copy_from_user(data, buf, write_size);
+
+ dm_helpers_dp_write_dpcd(link->ctx, link,
+ connector->debugfs_dpcd_address, data, write_size - r);
+ kfree(data);
+ return write_size - r;
+}
+
+static ssize_t dp_dpcd_data_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+{
+ int r;
+ char *data;
+ struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
+ struct dc_link *link = connector->dc_link;
+ uint32_t read_size = connector->debugfs_dpcd_size;
+
+ if (size < read_size)
+ return 0;
+
+ data = kzalloc(read_size, GFP_KERNEL);
+ if (!data)
+ return 0;
+
+ dm_helpers_dp_read_dpcd(link->ctx, link,
+ connector->debugfs_dpcd_address, data, read_size);
+
+ r = copy_to_user(buf, data, read_size);
+
+ kfree(data);
+ return read_size - r;
+}
+
static const struct file_operations dp_link_settings_debugfs_fops = {
.owner = THIS_MODULE,
.read = dp_link_settings_read,
@@ -710,6 +834,31 @@ static const struct file_operations dp_phy_test_pattern_fops = {
.llseek = default_llseek
};
+static const struct file_operations sdp_message_fops = {
+ .owner = THIS_MODULE,
+ .write = dp_sdp_message_debugfs_write,
+ .llseek = default_llseek
+};
+
+static const struct file_operations dp_dpcd_address_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .write = dp_dpcd_address_write,
+ .llseek = default_llseek
+};
+
+static const struct file_operations dp_dpcd_size_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .write = dp_dpcd_size_write,
+ .llseek = default_llseek
+};
+
+static const struct file_operations dp_dpcd_data_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .read = dp_dpcd_data_read,
+ .write = dp_dpcd_data_write,
+ .llseek = default_llseek
+};
+
static const struct {
char *name;
const struct file_operations *fops;
@@ -717,7 +866,11 @@ static const struct {
{"link_settings", &dp_link_settings_debugfs_fops},
{"phy_settings", &dp_phy_settings_debugfs_fop},
{"test_pattern", &dp_phy_test_pattern_fops},
- {"vrr_range", &vrr_range_fops}
+ {"vrr_range", &vrr_range_fops},
+ {"sdp_message", &sdp_message_fops},
+ {"aux_dpcd_address", &dp_dpcd_address_debugfs_fops},
+ {"aux_dpcd_size", &dp_dpcd_size_debugfs_fops},
+ {"aux_dpcd_data", &dp_dpcd_data_debugfs_fops}
};
int connector_debugfs_init(struct amdgpu_dm_connector *connector)
@@ -842,6 +995,35 @@ static const struct drm_info_list amdgpu_dm_debugfs_list[] = {
{"amdgpu_target_backlight_pwm", &target_backlight_read},
};
+/*
+ * Sets the DC visual confirm debug option from the given string.
+ * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_visual_confirm
+ */
+static int visual_confirm_set(void *data, u64 val)
+{
+ struct amdgpu_device *adev = data;
+
+ adev->dm.dc->debug.visual_confirm = (enum visual_confirm)val;
+
+ return 0;
+}
+
+/*
+ * Reads the DC visual confirm debug option value into the given buffer.
+ * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_visual_confirm
+ */
+static int visual_confirm_get(void *data, u64 *val)
+{
+ struct amdgpu_device *adev = data;
+
+ *val = adev->dm.dc->debug.visual_confirm;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(visual_confirm_fops, visual_confirm_get,
+ visual_confirm_set, "%llu\n");
+
int dtn_debugfs_init(struct amdgpu_device *adev)
{
static const struct file_operations dtn_log_fops = {
@@ -867,5 +1049,13 @@ int dtn_debugfs_init(struct amdgpu_device *adev)
adev,
&dtn_log_fops);
- return PTR_ERR_OR_ZERO(ent);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
+ ent = debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root,
+ adev, &visual_confirm_fops);
+ if (IS_ERR(ent))
+ return PTR_ERR(ent);
+
+ return 0;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index b39766bd2840..e6cd67342df8 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -264,7 +264,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
}
/*
- * poll pending down reply before clear payload allocation table
+ * poll pending down reply
*/
void dm_helpers_dp_mst_poll_pending_down_reply(
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index cd10f77cdeb0..fd22b4474dbf 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -674,11 +674,30 @@ static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev,
__func__);
}
+static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device *adev,
+ struct amdgpu_irq_src *source,
+ unsigned int crtc_id,
+ enum amdgpu_interrupt_state state)
+{
+ return dm_irq_state(
+ adev,
+ source,
+ crtc_id,
+ state,
+ IRQ_TYPE_VUPDATE,
+ __func__);
+}
+
static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = {
.set = amdgpu_dm_set_crtc_irq_state,
.process = amdgpu_dm_irq_handler,
};
+static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs = {
+ .set = amdgpu_dm_set_vupdate_irq_state,
+ .process = amdgpu_dm_irq_handler,
+};
+
static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = {
.set = amdgpu_dm_set_pflip_irq_state,
.process = amdgpu_dm_irq_handler,
@@ -695,6 +714,9 @@ void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev)
adev->crtc_irq.num_types = adev->mode_info.num_crtc;
adev->crtc_irq.funcs = &dm_crtc_irq_funcs;
+ adev->vupdate_irq.num_types = adev->mode_info.num_crtc;
+ adev->vupdate_irq.funcs = &dm_vupdate_irq_funcs;
+
adev->pageflip_irq.num_types = adev->mode_info.num_crtc;
adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index c4ea3a91f17a..6e205ee36ac3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -84,6 +84,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
{
ssize_t result = 0;
struct aux_payload payload;
+ enum aux_channel_operation_result operation_result;
if (WARN_ON(msg->size > 16))
return -E2BIG;
@@ -97,13 +98,27 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
payload.mot = (msg->request & DP_AUX_I2C_MOT) != 0;
payload.defer_delay = 0;
- result = dc_link_aux_transfer(TO_DM_AUX(aux)->ddc_service, &payload);
+ result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
+ &operation_result);
if (payload.write)
result = msg->size;
- if (result < 0) /* DC doesn't know about kernel error codes */
- result = -EIO;
+ if (result < 0)
+ switch (operation_result) {
+ case AUX_CHANNEL_OPERATION_SUCCEEDED:
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+ case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
+ result = -EIO;
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+ result = -EBUSY;
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+ result = -ETIMEDOUT;
+ break;
+ }
return result;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index a114954d6a5b..350e7a620d45 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -33,6 +33,7 @@
#include "amdgpu_dm_irq.h"
#include "amdgpu_pm.h"
#include "dm_pp_smu.h"
+#include "amdgpu_smu.h"
bool dm_pp_apply_display_requirements(
@@ -40,6 +41,7 @@ bool dm_pp_apply_display_requirements(
const struct dm_pp_display_configuration *pp_display_cfg)
{
struct amdgpu_device *adev = ctx->driver_context;
+ struct smu_context *smu = &adev->smu;
int i;
if (adev->pm.dpm_enabled) {
@@ -105,6 +107,9 @@ bool dm_pp_apply_display_requirements(
adev->powerplay.pp_funcs->display_configuration_change(
adev->powerplay.pp_handle,
&adev->pm.pm_display_cfg);
+ else
+ smu_display_configuration_change(smu,
+ &adev->pm.pm_display_cfg);
amdgpu_pm_compute_clocks(adev);
}
@@ -308,6 +313,12 @@ bool dm_pp_get_clock_levels_by_type(
if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle,
dc_to_pp_clock_type(clk_type), &pp_clks)) {
/* Error in pplib. Provide default values. */
+ return true;
+ }
+ } else if (adev->smu.funcs && adev->smu.funcs->get_clock_by_type) {
+ if (smu_get_clock_by_type(&adev->smu,
+ dc_to_pp_clock_type(clk_type),
+ &pp_clks)) {
get_default_clock_levels(clk_type, dc_clks);
return true;
}
@@ -324,6 +335,13 @@ bool dm_pp_get_clock_levels_by_type(
validation_clks.memory_max_clock = 80000;
validation_clks.level = 0;
}
+ } else if (adev->smu.funcs && adev->smu.funcs->get_max_high_clocks) {
+ if (smu_get_max_high_clocks(&adev->smu, &validation_clks)) {
+ DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n");
+ validation_clks.engine_max_clock = 72000;
+ validation_clks.memory_max_clock = 80000;
+ validation_clks.level = 0;
+ }
}
DRM_INFO("DM_PPLIB: Validation clocks:\n");
@@ -374,14 +392,21 @@ bool dm_pp_get_clock_levels_by_type_with_latency(
void *pp_handle = adev->powerplay.pp_handle;
struct pp_clock_levels_with_latency pp_clks = { 0 };
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret;
+
+ if (pp_funcs && pp_funcs->get_clock_by_type_with_latency) {
+ ret = pp_funcs->get_clock_by_type_with_latency(pp_handle,
+ dc_to_pp_clock_type(clk_type),
+ &pp_clks);
+ if (ret)
+ return false;
+ } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_latency) {
+ if (smu_get_clock_by_type_with_latency(&adev->smu,
+ dc_to_pp_clock_type(clk_type),
+ &pp_clks))
+ return false;
+ }
- if (!pp_funcs || !pp_funcs->get_clock_by_type_with_latency)
- return false;
-
- if (pp_funcs->get_clock_by_type_with_latency(pp_handle,
- dc_to_pp_clock_type(clk_type),
- &pp_clks))
- return false;
pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type);
@@ -397,14 +422,20 @@ bool dm_pp_get_clock_levels_by_type_with_voltage(
void *pp_handle = adev->powerplay.pp_handle;
struct pp_clock_levels_with_voltage pp_clk_info = {0};
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
-
- if (!pp_funcs || !pp_funcs->get_clock_by_type_with_voltage)
- return false;
-
- if (pp_funcs->get_clock_by_type_with_voltage(pp_handle,
- dc_to_pp_clock_type(clk_type),
- &pp_clk_info))
- return false;
+ int ret;
+
+ if (pp_funcs && pp_funcs->get_clock_by_type_with_voltage) {
+ ret = pp_funcs->get_clock_by_type_with_voltage(pp_handle,
+ dc_to_pp_clock_type(clk_type),
+ &pp_clk_info);
+ if (ret)
+ return false;
+ } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type_with_voltage) {
+ if (smu_get_clock_by_type_with_voltage(&adev->smu,
+ dc_to_pp_clock_type(clk_type),
+ &pp_clk_info))
+ return false;
+ }
pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type);
@@ -445,6 +476,10 @@ bool dm_pp_apply_clock_for_voltage_request(
ret = adev->powerplay.pp_funcs->display_clock_voltage_request(
adev->powerplay.pp_handle,
&pp_clock_request);
+ else if (adev->smu.funcs &&
+ adev->smu.funcs->display_clock_voltage_request)
+ ret = smu_display_clock_voltage_request(&adev->smu,
+ &pp_clock_request);
if (ret)
return false;
return true;
@@ -462,6 +497,8 @@ bool dm_pp_get_static_clocks(
ret = adev->powerplay.pp_funcs->get_current_clocks(
adev->powerplay.pp_handle,
&pp_clk_info);
+ else if (adev->smu.funcs)
+ ret = smu_get_current_clocks(&adev->smu, &pp_clk_info);
if (ret)
return false;
@@ -472,27 +509,6 @@ bool dm_pp_get_static_clocks(
return true;
}
-void pp_rv_set_display_requirement(struct pp_smu *pp,
- struct pp_smu_display_requirement_rv *req)
-{
- const struct dc_context *ctx = pp->dm;
- struct amdgpu_device *adev = ctx->driver_context;
- void *pp_handle = adev->powerplay.pp_handle;
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- struct pp_display_clock_request clock = {0};
-
- if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
- return;
-
- clock.clock_type = amd_pp_dcf_clock;
- clock.clock_freq_in_khz = req->hard_min_dcefclk_mhz * 1000;
- pp_funcs->display_clock_voltage_request(pp_handle, &clock);
-
- clock.clock_type = amd_pp_f_clock;
- clock.clock_freq_in_khz = req->hard_min_fclk_mhz * 1000;
- pp_funcs->display_clock_voltage_request(pp_handle, &clock);
-}
-
void pp_rv_set_wm_ranges(struct pp_smu *pp,
struct pp_smu_wm_range_sets *ranges)
{
@@ -508,9 +524,6 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets;
wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets;
- if (!pp_funcs || !pp_funcs->set_watermarks_for_clocks_ranges)
- return;
-
for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) {
if (ranges->reader_wm_sets[i].wm_inst > 3)
wm_dce_clocks[i].wm_set_id = WM_SET_A;
@@ -543,7 +556,13 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp,
ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000;
}
- pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, &wm_with_clock_ranges);
+ if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges)
+ pp_funcs->set_watermarks_for_clocks_ranges(pp_handle,
+ &wm_with_clock_ranges);
+ else if (adev->smu.funcs &&
+ adev->smu.funcs->set_watermarks_for_clock_ranges)
+ smu_set_watermarks_for_clock_ranges(&adev->smu,
+ &wm_with_clock_ranges);
}
void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
@@ -553,10 +572,10 @@ void pp_rv_set_pme_wa_enable(struct pp_smu *pp)
void *pp_handle = adev->powerplay.pp_handle;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- if (!pp_funcs || !pp_funcs->notify_smu_enable_pwe)
- return;
-
- pp_funcs->notify_smu_enable_pwe(pp_handle);
+ if (pp_funcs && pp_funcs->notify_smu_enable_pwe)
+ pp_funcs->notify_smu_enable_pwe(pp_handle);
+ else if (adev->smu.funcs)
+ smu_notify_smu_enable_pwe(&adev->smu);
}
void pp_rv_set_active_display_count(struct pp_smu *pp, int count)
@@ -611,17 +630,16 @@ void pp_rv_set_hard_min_fclk_by_freq(struct pp_smu *pp, int mhz)
pp_funcs->set_hard_min_fclk_by_freq(pp_handle, mhz);
}
-void dm_pp_get_funcs_rv(
+void dm_pp_get_funcs(
struct dc_context *ctx,
- struct pp_smu_funcs_rv *funcs)
+ struct pp_smu_funcs *funcs)
{
- funcs->pp_smu.dm = ctx;
- funcs->set_display_requirement = pp_rv_set_display_requirement;
- funcs->set_wm_ranges = pp_rv_set_wm_ranges;
- funcs->set_pme_wa_enable = pp_rv_set_pme_wa_enable;
- funcs->set_display_count = pp_rv_set_active_display_count;
- funcs->set_min_deep_sleep_dcfclk = pp_rv_set_min_deep_sleep_dcfclk;
- funcs->set_hard_min_dcfclk_by_freq = pp_rv_set_hard_min_dcefclk_by_freq;
- funcs->set_hard_min_fclk_by_freq = pp_rv_set_hard_min_fclk_by_freq;
+ funcs->rv_funcs.pp_smu.dm = ctx;
+ funcs->rv_funcs.set_wm_ranges = pp_rv_set_wm_ranges;
+ funcs->rv_funcs.set_pme_wa_enable = pp_rv_set_pme_wa_enable;
+ funcs->rv_funcs.set_display_count = pp_rv_set_active_display_count;
+ funcs->rv_funcs.set_min_deep_sleep_dcfclk = pp_rv_set_min_deep_sleep_dcfclk;
+ funcs->rv_funcs.set_hard_min_dcfclk_by_freq = pp_rv_set_hard_min_dcefclk_by_freq;
+ funcs->rv_funcs.set_hard_min_fclk_by_freq = pp_rv_set_hard_min_fclk_by_freq;
}
diff --git a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
index f28989860fd8..1e9a2d352068 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/fixpt31_32.c
@@ -449,6 +449,11 @@ static inline unsigned int clamp_ux_dy(
return min_clamp;
}
+unsigned int dc_fixpt_u4d19(struct fixed31_32 arg)
+{
+ return ux_dy(arg.value, 4, 19);
+}
+
unsigned int dc_fixpt_u3d19(struct fixed31_32 arg)
{
return ux_dy(arg.value, 3, 19);
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index eb62d10bb65c..1b4b51657f5e 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -247,6 +247,53 @@ static enum dcn_bw_defs tl_pixel_format_to_bw_defs(enum surface_pixel_format for
}
}
+enum source_macro_tile_size swizzle_mode_to_macro_tile_size(enum swizzle_mode_values sw_mode)
+{
+ switch (sw_mode) {
+ /* for 4/8/16 high tiles */
+ case DC_SW_LINEAR:
+ return dm_4k_tile;
+ case DC_SW_4KB_S:
+ case DC_SW_4KB_S_X:
+ return dm_4k_tile;
+ case DC_SW_64KB_S:
+ case DC_SW_64KB_S_X:
+ case DC_SW_64KB_S_T:
+ return dm_64k_tile;
+ case DC_SW_VAR_S:
+ case DC_SW_VAR_S_X:
+ return dm_256k_tile;
+
+ /* For 64bpp 2 high tiles */
+ case DC_SW_4KB_D:
+ case DC_SW_4KB_D_X:
+ return dm_4k_tile;
+ case DC_SW_64KB_D:
+ case DC_SW_64KB_D_X:
+ case DC_SW_64KB_D_T:
+ return dm_64k_tile;
+ case DC_SW_VAR_D:
+ case DC_SW_VAR_D_X:
+ return dm_256k_tile;
+
+ case DC_SW_4KB_R:
+ case DC_SW_4KB_R_X:
+ return dm_4k_tile;
+ case DC_SW_64KB_R:
+ case DC_SW_64KB_R_X:
+ return dm_64k_tile;
+ case DC_SW_VAR_R:
+ case DC_SW_VAR_R_X:
+ return dm_256k_tile;
+
+ /* Unsupported swizzle modes for dcn */
+ case DC_SW_256B_S:
+ default:
+ ASSERT(0); /* Not supported */
+ return 0;
+ }
+}
+
static void pipe_ctx_to_e2e_pipe_params (
const struct pipe_ctx *pipe,
struct _vcs_dpi_display_pipe_params_st *input)
@@ -287,46 +334,7 @@ static void pipe_ctx_to_e2e_pipe_params (
input->src.cur0_src_width = 128; /* TODO: Cursor calcs, not curently stored */
input->src.cur0_bpp = 32;
- switch (pipe->plane_state->tiling_info.gfx9.swizzle) {
- /* for 4/8/16 high tiles */
- case DC_SW_LINEAR:
- input->src.macro_tile_size = dm_4k_tile;
- break;
- case DC_SW_4KB_S:
- case DC_SW_4KB_S_X:
- input->src.macro_tile_size = dm_4k_tile;
- break;
- case DC_SW_64KB_S:
- case DC_SW_64KB_S_X:
- case DC_SW_64KB_S_T:
- input->src.macro_tile_size = dm_64k_tile;
- break;
- case DC_SW_VAR_S:
- case DC_SW_VAR_S_X:
- input->src.macro_tile_size = dm_256k_tile;
- break;
-
- /* For 64bpp 2 high tiles */
- case DC_SW_4KB_D:
- case DC_SW_4KB_D_X:
- input->src.macro_tile_size = dm_4k_tile;
- break;
- case DC_SW_64KB_D:
- case DC_SW_64KB_D_X:
- case DC_SW_64KB_D_T:
- input->src.macro_tile_size = dm_64k_tile;
- break;
- case DC_SW_VAR_D:
- case DC_SW_VAR_D_X:
- input->src.macro_tile_size = dm_256k_tile;
- break;
-
- /* Unsupported swizzle modes for dcn */
- case DC_SW_256B_S:
- default:
- ASSERT(0); /* Not supported */
- break;
- }
+ input->src.macro_tile_size = swizzle_mode_to_macro_tile_size(pipe->plane_state->tiling_info.gfx9.swizzle);
switch (pipe->plane_state->rotation) {
case ROTATION_ANGLE_0:
@@ -466,7 +474,7 @@ static void dcn_bw_calc_rq_dlg_ttu(
input.clks_cfg.dcfclk_mhz = v->dcfclk;
input.clks_cfg.dispclk_mhz = v->dispclk;
input.clks_cfg.dppclk_mhz = v->dppclk;
- input.clks_cfg.refclk_mhz = dc->res_pool->ref_clock_inKhz / 1000.0;
+ input.clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
input.clks_cfg.socclk_mhz = v->socclk;
input.clks_cfg.voltage = v->voltage_level;
// dc->dml.logger = pool->base.logger;
@@ -536,28 +544,28 @@ static void calc_wm_sets_and_perf_params(
v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_vnom0p8;
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
- context->bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns =
v->stutter_exit_watermark * 1000;
- context->bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns =
v->stutter_enter_plus_exit_watermark * 1000;
- context->bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns =
+ context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns =
v->dram_clock_change_watermark * 1000;
- context->bw.dcn.watermarks.b.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
- context->bw.dcn.watermarks.b.urgent_ns = v->urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = v->urgent_watermark * 1000;
v->dcfclk_per_state[1] = v->dcfclkv_nom0p8;
v->dcfclk_per_state[0] = v->dcfclkv_nom0p8;
v->dcfclk = v->dcfclkv_nom0p8;
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
- context->bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns =
v->stutter_exit_watermark * 1000;
- context->bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns =
v->stutter_enter_plus_exit_watermark * 1000;
- context->bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns =
+ context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns =
v->dram_clock_change_watermark * 1000;
- context->bw.dcn.watermarks.c.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
- context->bw.dcn.watermarks.c.urgent_ns = v->urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = v->urgent_watermark * 1000;
}
if (v->voltage_level < 3) {
@@ -571,14 +579,14 @@ static void calc_wm_sets_and_perf_params(
v->dcfclk = v->dcfclkv_max0p9;
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
- context->bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns =
v->stutter_exit_watermark * 1000;
- context->bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns =
v->stutter_enter_plus_exit_watermark * 1000;
- context->bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns =
+ context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns =
v->dram_clock_change_watermark * 1000;
- context->bw.dcn.watermarks.d.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
- context->bw.dcn.watermarks.d.urgent_ns = v->urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = v->urgent_watermark * 1000;
}
v->fabric_and_dram_bandwidth_per_state[2] = v->fabric_and_dram_bandwidth_vnom0p8;
@@ -591,20 +599,20 @@ static void calc_wm_sets_and_perf_params(
v->dcfclk = v->dcfclk_per_state[v->voltage_level];
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
- context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
v->stutter_exit_watermark * 1000;
- context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
v->stutter_enter_plus_exit_watermark * 1000;
- context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
v->dram_clock_change_watermark * 1000;
- context->bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
- context->bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
if (v->voltage_level >= 2) {
- context->bw.dcn.watermarks.b = context->bw.dcn.watermarks.a;
- context->bw.dcn.watermarks.c = context->bw.dcn.watermarks.a;
+ context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a;
+ context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a;
}
if (v->voltage_level >= 3)
- context->bw.dcn.watermarks.d = context->bw.dcn.watermarks.a;
+ context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
}
#endif
@@ -693,8 +701,15 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
bool dcn_validate_bandwidth(
struct dc *dc,
- struct dc_state *context)
+ struct dc_state *context,
+ bool fast_validate)
{
+ /*
+ * we want a breakdown of the various stages of validation, which the
+ * perf_trace macro doesn't support
+ */
+ BW_VAL_TRACE_SETUP();
+
const struct resource_pool *pool = dc->res_pool;
struct dcn_bw_internal_vars *v = &context->dcn_bw_vars;
int i, input_idx;
@@ -703,6 +718,9 @@ bool dcn_validate_bandwidth(
float bw_limit;
PERFORMANCE_TRACE_START();
+
+ BW_VAL_TRACE_COUNT();
+
if (dcn_bw_apply_registry_override(dc))
dcn_bw_sync_calcs_and_dml(dc);
@@ -1000,13 +1018,16 @@ bool dcn_validate_bandwidth(
dc->debug.sr_enter_plus_exit_time_dpm0_ns / 1000.0f;
if (dc->debug.sr_exit_time_dpm0_ns)
v->sr_exit_time = dc->debug.sr_exit_time_dpm0_ns / 1000.0f;
- dc->dml.soc.sr_enter_plus_exit_time_us = v->sr_enter_plus_exit_time;
- dc->dml.soc.sr_exit_time_us = v->sr_exit_time;
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = v->sr_enter_plus_exit_time;
+ context->bw_ctx.dml.soc.sr_exit_time_us = v->sr_exit_time;
mode_support_and_system_configuration(v);
}
- if (v->voltage_level != 5) {
+ BW_VAL_TRACE_END_VOLTAGE_LEVEL();
+
+ if (v->voltage_level != number_of_states_plus_one && !fast_validate) {
float bw_consumed = v->total_bandwidth_consumed_gbyte_per_second;
+
if (bw_consumed < v->fabric_and_dram_bandwidth_vmin0p65)
bw_consumed = v->fabric_and_dram_bandwidth_vmin0p65;
else if (bw_consumed < v->fabric_and_dram_bandwidth_vmid0p72)
@@ -1027,58 +1048,60 @@ bool dcn_validate_bandwidth(
*/
dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v);
- context->bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns =
v->stutter_exit_watermark * 1000;
- context->bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns =
v->stutter_enter_plus_exit_watermark * 1000;
- context->bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
+ context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns =
v->dram_clock_change_watermark * 1000;
- context->bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
- context->bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
- context->bw.dcn.watermarks.b = context->bw.dcn.watermarks.a;
- context->bw.dcn.watermarks.c = context->bw.dcn.watermarks.a;
- context->bw.dcn.watermarks.d = context->bw.dcn.watermarks.a;
+ context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000;
+ context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a;
+ context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a;
+ context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a;
- context->bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 /
+ context->bw_ctx.bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 /
(ddr4_dram_factor_single_Channel * v->number_of_channels));
if (bw_consumed == v->fabric_and_dram_bandwidth_vmin0p65) {
- context->bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
+ context->bw_ctx.bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 / 32);
}
- context->bw.dcn.clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
- context->bw.dcn.clk.dcfclk_khz = (int)(v->dcfclk * 1000);
+ context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000);
+ context->bw_ctx.bw.dcn.clk.dcfclk_khz = (int)(v->dcfclk * 1000);
- context->bw.dcn.clk.dispclk_khz = (int)(v->dispclk * 1000);
+ context->bw_ctx.bw.dcn.clk.dispclk_khz = (int)(v->dispclk * 1000);
if (dc->debug.max_disp_clk == true)
- context->bw.dcn.clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000);
+ context->bw_ctx.bw.dcn.clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000);
- if (context->bw.dcn.clk.dispclk_khz <
+ if (context->bw_ctx.bw.dcn.clk.dispclk_khz <
dc->debug.min_disp_clk_khz) {
- context->bw.dcn.clk.dispclk_khz =
+ context->bw_ctx.bw.dcn.clk.dispclk_khz =
dc->debug.min_disp_clk_khz;
}
- context->bw.dcn.clk.dppclk_khz = context->bw.dcn.clk.dispclk_khz / v->dispclk_dppclk_ratio;
- context->bw.dcn.clk.phyclk_khz = v->phyclk_per_state[v->voltage_level];
+ context->bw_ctx.bw.dcn.clk.dppclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz / v->dispclk_dppclk_ratio;
+ context->bw_ctx.bw.dcn.clk.phyclk_khz = v->phyclk_per_state[v->voltage_level];
switch (v->voltage_level) {
case 0:
- context->bw.dcn.clk.max_supported_dppclk_khz =
+ context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vmin0p65 * 1000);
break;
case 1:
- context->bw.dcn.clk.max_supported_dppclk_khz =
+ context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vmid0p72 * 1000);
break;
case 2:
- context->bw.dcn.clk.max_supported_dppclk_khz =
+ context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vnom0p8 * 1000);
break;
default:
- context->bw.dcn.clk.max_supported_dppclk_khz =
+ context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz =
(int)(dc->dcn_soc->max_dppclk_vmax0p9 * 1000);
break;
}
+ BW_VAL_TRACE_END_WATERMARKS();
+
for (i = 0, input_idx = 0; i < pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -1141,7 +1164,7 @@ bool dcn_validate_bandwidth(
hsplit_pipe->pipe_dlg_param.vblank_end = pipe->pipe_dlg_param.vblank_end;
} else {
/* pipe not split previously needs split */
- hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, pool);
+ hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, pool, pipe);
ASSERT(hsplit_pipe);
split_stream_across_pipes(
&context->res_ctx, pool,
@@ -1169,13 +1192,17 @@ bool dcn_validate_bandwidth(
input_idx++;
}
+ } else if (v->voltage_level == number_of_states_plus_one) {
+ BW_VAL_TRACE_SKIP(fail);
+ } else if (fast_validate) {
+ BW_VAL_TRACE_SKIP(fast);
}
if (v->voltage_level == 0) {
- dc->dml.soc.sr_enter_plus_exit_time_us =
+ context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us =
dc->dcn_soc->sr_enter_plus_exit_time;
- dc->dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time;
+ context->bw_ctx.dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time;
}
/*
@@ -1188,6 +1215,7 @@ bool dcn_validate_bandwidth(
kernel_fpu_end();
PERFORMANCE_TRACE_END();
+ BW_VAL_TRACE_FINISH();
if (bw_limit_pass && v->voltage_level != 5)
return true;
@@ -1395,12 +1423,14 @@ void dcn_bw_update_from_pplib(struct dc *dc)
void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc)
{
- struct pp_smu_funcs_rv *pp = dc->res_pool->pp_smu;
+ struct pp_smu_funcs_rv *pp = NULL;
struct pp_smu_wm_range_sets ranges = {0};
int min_fclk_khz, min_dcfclk_khz, socclk_khz;
const int overdrive = 5000000; /* 5 GHz to cover Overdrive */
- if (!pp->set_wm_ranges)
+ if (dc->res_pool->pp_smu)
+ pp = &dc->res_pool->pp_smu->rv_funcs;
+ if (!pp || !pp->set_wm_ranges)
return;
kernel_fpu_begin();
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index a6cda201c964..18c775a950cc 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -524,6 +524,14 @@ void dc_link_set_preferred_link_settings(struct dc *dc,
struct dc_stream_state *link_stream;
struct dc_link_settings store_settings = *link_setting;
+ link->preferred_link_setting = store_settings;
+
+ /* Retrain with preferred link settings only relevant for
+ * DP signal type
+ */
+ if (!dc_is_dp_signal(link->connector_signal))
+ return;
+
for (i = 0; i < MAX_PIPES; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->stream && pipe->stream->link) {
@@ -538,7 +546,10 @@ void dc_link_set_preferred_link_settings(struct dc *dc,
link_stream = link->dc->current_state->res_ctx.pipe_ctx[i].stream;
- link->preferred_link_setting = store_settings;
+ /* Cannot retrain link if backend is off */
+ if (link_stream->dpms_off)
+ return;
+
if (link_stream)
decide_link_settings(link_stream, &store_settings);
@@ -573,6 +584,28 @@ void dc_link_set_test_pattern(struct dc_link *link,
cust_pattern_size);
}
+uint32_t dc_link_bandwidth_kbps(
+ const struct dc_link *link,
+ const struct dc_link_settings *link_setting)
+{
+ uint32_t link_bw_kbps = link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; /* bytes per sec */
+
+ link_bw_kbps *= 8; /* 8 bits per byte*/
+ link_bw_kbps *= link_setting->lane_count;
+
+ return link_bw_kbps;
+
+}
+
+const struct dc_link_settings *dc_link_get_link_cap(
+ const struct dc_link *link)
+{
+ if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN &&
+ link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN)
+ return &link->preferred_link_setting;
+ return &link->verified_link_cap;
+}
+
static void destruct(struct dc *dc)
{
dc_release_state(dc->current_state);
@@ -621,6 +654,10 @@ static bool construct(struct dc *dc,
#endif
enum dce_version dc_version = DCE_VERSION_UNKNOWN;
+ dc->config = init_params->flags;
+
+ memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
+
dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
if (!dc_dceip) {
dm_error("%s: failed to create dceip\n", __func__);
@@ -668,13 +705,6 @@ static bool construct(struct dc *dc,
dc_ctx->dc_stream_id_count = 0;
dc->ctx = dc_ctx;
- dc->current_state = dc_create_state();
-
- if (!dc->current_state) {
- dm_error("%s: failed to create validate ctx\n", __func__);
- goto fail;
- }
-
/* Create logger */
dc_ctx->dce_environment = init_params->dce_environment;
@@ -722,14 +752,22 @@ static bool construct(struct dc *dc,
goto fail;
}
- dc->res_pool = dc_create_resource_pool(
- dc,
- init_params->num_virtual_links,
- dc_version,
- init_params->asic_id);
+ dc->res_pool = dc_create_resource_pool(dc, init_params, dc_version);
if (!dc->res_pool)
goto fail;
+ /* Creation of current_state must occur after dc->dml
+ * is initialized in dc_create_resource_pool because
+ * on creation it copies the contents of dc->dml
+ */
+
+ dc->current_state = dc_create_state(dc);
+
+ if (!dc->current_state) {
+ dm_error("%s: failed to create validate ctx\n", __func__);
+ goto fail;
+ }
+
dc_resource_state_construct(dc, dc->current_state);
if (!create_links(dc, init_params->num_virtual_links))
@@ -746,7 +784,7 @@ fail:
static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
{
int i, j;
- struct dc_state *dangling_context = dc_create_state();
+ struct dc_state *dangling_context = dc_create_state(dc);
struct dc_state *current_ctx;
if (dangling_context == NULL)
@@ -811,8 +849,6 @@ struct dc *dc_create(const struct dc_init_data *init_params)
if (dc->res_pool->dmcu != NULL)
dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
- dc->config = init_params->flags;
-
dc->build_id = DC_BUILD_ID;
DC_LOG_DC("Display Core initialized\n");
@@ -969,7 +1005,7 @@ static bool context_changed(
return false;
}
-bool dc_validate_seamless_boot_timing(struct dc *dc,
+bool dc_validate_seamless_boot_timing(const struct dc *dc,
const struct dc_sink *sink,
struct dc_crtc_timing *crtc_timing)
{
@@ -1060,7 +1096,13 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
if (!dcb->funcs->is_accelerated_mode(dcb))
dc->hwss.enable_accelerated_mode(dc, context);
- dc->hwss.prepare_bandwidth(dc, context);
+ for (i = 0; i < context->stream_count; i++) {
+ if (context->streams[i]->apply_seamless_boot_optimization)
+ dc->optimize_seamless_boot = true;
+ }
+
+ if (!dc->optimize_seamless_boot)
+ dc->hwss.prepare_bandwidth(dc, context);
/* re-program planes for existing stream, in case we need to
* free up plane resource for later use
@@ -1135,12 +1177,15 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
- /* pplib is notified if disp_num changed */
- dc->hwss.optimize_bandwidth(dc, context);
+ if (!dc->optimize_seamless_boot)
+ /* pplib is notified if disp_num changed */
+ dc->hwss.optimize_bandwidth(dc, context);
for (i = 0; i < context->stream_count; i++)
context->streams[i]->mode_changed = false;
+ memset(&context->commit_hints, 0, sizeof(context->commit_hints));
+
dc_release_state(dc->current_state);
dc->current_state = context;
@@ -1177,7 +1222,7 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
int i;
struct dc_state *context = dc->current_state;
- if (dc->optimized_required == false)
+ if (!dc->optimized_required || dc->optimize_seamless_boot)
return true;
post_surface_trace(dc);
@@ -1195,18 +1240,60 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
return true;
}
-struct dc_state *dc_create_state(void)
+struct dc_state *dc_create_state(struct dc *dc)
{
struct dc_state *context = kzalloc(sizeof(struct dc_state),
GFP_KERNEL);
if (!context)
return NULL;
+ /* Each context must have their own instance of VBA and in order to
+ * initialize and obtain IP and SOC the base DML instance from DC is
+ * initially copied into every context
+ */
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
+ memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
+#endif
kref_init(&context->refcount);
+
return context;
}
+struct dc_state *dc_copy_state(struct dc_state *src_ctx)
+{
+ int i, j;
+ struct dc_state *new_ctx = kzalloc(sizeof(struct dc_state),
+ GFP_KERNEL);
+
+ if (!new_ctx)
+ return NULL;
+
+ memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
+
+ if (cur_pipe->top_pipe)
+ cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
+
+ if (cur_pipe->bottom_pipe)
+ cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
+
+ }
+
+ for (i = 0; i < new_ctx->stream_count; i++) {
+ dc_stream_retain(new_ctx->streams[i]);
+ for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
+ dc_plane_state_retain(
+ new_ctx->stream_status[i].plane_states[j]);
+ }
+
+ kref_init(&new_ctx->refcount);
+
+ return new_ctx;
+}
+
void dc_retain_state(struct dc_state *context)
{
kref_get(&context->refcount);
@@ -1666,6 +1753,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
continue;
if (stream_update->dpms_off) {
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
if (*stream_update->dpms_off) {
core_link_disable_stream(pipe_ctx, KEEP_ACQUIRED_RESOURCE);
dc->hwss.optimize_bandwidth(dc, dc->current_state);
@@ -1673,6 +1761,7 @@ static void commit_planes_do_stream_update(struct dc *dc,
dc->hwss.prepare_bandwidth(dc, dc->current_state);
core_link_enable_stream(dc->current_state, pipe_ctx);
}
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
}
if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
@@ -1700,7 +1789,16 @@ static void commit_planes_for_stream(struct dc *dc,
int i, j;
struct pipe_ctx *top_pipe_to_program = NULL;
- if (update_type == UPDATE_TYPE_FULL) {
+ if (dc->optimize_seamless_boot && surface_count > 0) {
+ /* Optimize seamless boot flag keeps clocks and watermarks high until
+ * first flip. After first flip, optimization is required to lower
+ * bandwidth.
+ */
+ dc->optimize_seamless_boot = false;
+ dc->optimized_required = true;
+ }
+
+ if (update_type == UPDATE_TYPE_FULL && !dc->optimize_seamless_boot) {
dc->hwss.prepare_bandwidth(dc, context);
context_clock_trace(dc, context);
}
@@ -1800,7 +1898,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (update_type >= UPDATE_TYPE_FULL) {
/* initialize scratch memory for building context */
- context = dc_create_state();
+ context = dc_create_state(dc);
if (context == NULL) {
DC_ERROR("Failed to allocate new validate context!\n");
return;
@@ -2099,13 +2197,13 @@ void dc_link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink)
void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
{
- info->displayClock = (unsigned int)state->bw.dcn.clk.dispclk_khz;
- info->engineClock = (unsigned int)state->bw.dcn.clk.dcfclk_khz;
- info->memoryClock = (unsigned int)state->bw.dcn.clk.dramclk_khz;
- info->maxSupportedDppClock = (unsigned int)state->bw.dcn.clk.max_supported_dppclk_khz;
- info->dppClock = (unsigned int)state->bw.dcn.clk.dppclk_khz;
- info->socClock = (unsigned int)state->bw.dcn.clk.socclk_khz;
- info->dcfClockDeepSleep = (unsigned int)state->bw.dcn.clk.dcfclk_deep_sleep_khz;
- info->fClock = (unsigned int)state->bw.dcn.clk.fclk_khz;
- info->phyClock = (unsigned int)state->bw.dcn.clk.phyclk_khz;
+ info->displayClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
+ info->engineClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
+ info->memoryClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
+ info->maxSupportedDppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
+ info->dppClock = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
+ info->socClock = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
+ info->dcfClockDeepSleep = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
+ info->fClock = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
+ info->phyClock = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index 73d049506618..5903e7822f98 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -351,19 +351,19 @@ void context_clock_trace(
DC_LOGGER_INIT(dc->ctx->logger);
CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
- context->bw.dcn.clk.dispclk_khz,
- context->bw.dcn.clk.dppclk_khz,
- context->bw.dcn.clk.dcfclk_khz,
- context->bw.dcn.clk.dcfclk_deep_sleep_khz,
- context->bw.dcn.clk.fclk_khz,
- context->bw.dcn.clk.socclk_khz);
+ context->bw_ctx.bw.dcn.clk.dispclk_khz,
+ context->bw_ctx.bw.dcn.clk.dppclk_khz,
+ context->bw_ctx.bw.dcn.clk.dcfclk_khz,
+ context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
+ context->bw_ctx.bw.dcn.clk.fclk_khz,
+ context->bw_ctx.bw.dcn.clk.socclk_khz);
CLOCK_TRACE("Calculated: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
- context->bw.dcn.clk.dispclk_khz,
- context->bw.dcn.clk.dppclk_khz,
- context->bw.dcn.clk.dcfclk_khz,
- context->bw.dcn.clk.dcfclk_deep_sleep_khz,
- context->bw.dcn.clk.fclk_khz,
- context->bw.dcn.clk.socclk_khz);
+ context->bw_ctx.bw.dcn.clk.dispclk_khz,
+ context->bw_ctx.bw.dcn.clk.dppclk_khz,
+ context->bw_ctx.bw.dcn.clk.dcfclk_khz,
+ context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
+ context->bw_ctx.bw.dcn.clk.fclk_khz,
+ context->bw_ctx.bw.dcn.clk.socclk_khz);
#endif
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index ea18e9c2d8ce..b37ecc3ede61 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -58,7 +58,6 @@
******************************************************************************/
enum {
- LINK_RATE_REF_FREQ_IN_MHZ = 27,
PEAK_FACTOR_X1000 = 1006,
/*
* Some receivers fail to train on first try and are good
@@ -515,6 +514,40 @@ static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *lin
}
+static void read_edp_current_link_settings_on_detect(struct dc_link *link)
+{
+ union lane_count_set lane_count_set = { {0} };
+ uint8_t link_bw_set;
+ uint8_t link_rate_set;
+
+ // Read DPCD 00101h to find out the number of lanes currently set
+ core_link_read_dpcd(link, DP_LANE_COUNT_SET,
+ &lane_count_set.raw, sizeof(lane_count_set));
+ link->cur_link_settings.lane_count = lane_count_set.bits.LANE_COUNT_SET;
+
+ // Read DPCD 00100h to find if standard link rates are set
+ core_link_read_dpcd(link, DP_LINK_BW_SET,
+ &link_bw_set, sizeof(link_bw_set));
+
+ if (link_bw_set == 0) {
+ /* If standard link rates are not being used,
+ * Read DPCD 00115h to find the link rate set used
+ */
+ core_link_read_dpcd(link, DP_LINK_RATE_SET,
+ &link_rate_set, sizeof(link_rate_set));
+
+ if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
+ link->cur_link_settings.link_rate =
+ link->dpcd_caps.edp_supported_link_rates[link_rate_set];
+ link->cur_link_settings.link_rate_set = link_rate_set;
+ link->cur_link_settings.use_link_rate_set = true;
+ }
+ } else {
+ link->cur_link_settings.link_rate = link_bw_set;
+ link->cur_link_settings.use_link_rate_set = false;
+ }
+}
+
static bool detect_dp(
struct dc_link *link,
struct display_sink_capability *sink_caps,
@@ -640,7 +673,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
bool same_dpcd = true;
enum dc_connection_type new_connection_type = dc_connection_none;
DC_LOGGER_INIT(link->ctx->logger);
- if (link->connector_signal == SIGNAL_TYPE_VIRTUAL)
+
+ if (dc_is_virtual_signal(link->connector_signal))
return false;
if (false == dc_link_detect_sink(link, &new_connection_type)) {
@@ -648,9 +682,14 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
return false;
}
- if (link->connector_signal == SIGNAL_TYPE_EDP &&
- link->local_sink)
- return true;
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ /* On detect, we want to make sure current link settings are
+ * up to date, especially if link was powered on by GOP.
+ */
+ read_edp_current_link_settings_on_detect(link);
+ if (link->local_sink)
+ return true;
+ }
if (link->connector_signal == SIGNAL_TYPE_LVDS &&
link->local_sink)
@@ -720,9 +759,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
same_dpcd = false;
}
/* Active dongle plug in without display or downstream unplug*/
- if (link->type == dc_connection_active_dongle
- && link->dpcd_caps.sink_count.
- bits.SINK_COUNT == 0) {
+ if (link->type == dc_connection_active_dongle &&
+ link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) {
if (prev_sink != NULL) {
/* Downstream unplug */
dc_sink_release(prev_sink);
@@ -1172,8 +1210,6 @@ static bool construct(
goto create_fail;
}
-
-
/* TODO: #DAL3 Implement id to str function.*/
LINK_INFO("Connector[%d] description:"
"signal %d\n",
@@ -1207,7 +1243,7 @@ static bool construct(
link->link_enc = link->dc->res_pool->funcs->link_enc_create(
&enc_init_data);
- if( link->link_enc == NULL) {
+ if (link->link_enc == NULL) {
DC_ERROR("Failed to create link encoder!\n");
goto link_enc_create_fail;
}
@@ -1399,9 +1435,24 @@ static enum dc_status enable_link_dp(
/* get link settings for video mode timing */
decide_link_settings(stream, &link_settings);
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) {
+ /* If link settings are different than current and link already enabled
+ * then need to disable before programming to new rate.
+ */
+ if (link->link_status.link_active &&
+ (link->cur_link_settings.lane_count != link_settings.lane_count ||
+ link->cur_link_settings.link_rate != link_settings.link_rate)) {
+ dp_disable_link_phy(link, pipe_ctx->stream->signal);
+ }
+
+ /*in case it is not on*/
+ link->dc->hwss.edp_power_control(link, true);
+ link->dc->hwss.edp_wait_for_hpd_ready(link, true);
+ }
+
pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
- state->dccg->funcs->update_clocks(state->dccg, state, false);
+ state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false);
dp_enable_link_phy(
link,
@@ -1442,15 +1493,9 @@ static enum dc_status enable_link_edp(
struct pipe_ctx *pipe_ctx)
{
enum dc_status status;
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->link;
- /*in case it is not on*/
- link->dc->hwss.edp_power_control(link, true);
- link->dc->hwss.edp_wait_for_hpd_ready(link, true);
status = enable_link_dp(state, pipe_ctx);
-
return status;
}
@@ -1466,14 +1511,14 @@ static enum dc_status enable_link_dp_mst(
if (link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN)
return DC_OK;
+ /* clear payload table */
+ dm_helpers_dp_mst_clear_payload_allocation_table(link->ctx, link);
+
/* to make sure the pending down rep can be processed
- * before clear payload table
+ * before enabling the link
*/
dm_helpers_dp_mst_poll_pending_down_reply(link->ctx, link);
- /* clear payload table */
- dm_helpers_dp_mst_clear_payload_allocation_table(link->ctx, link);
-
/* set the sink to MST mode before enabling the link */
dp_enable_mst_on_sink(link, true);
@@ -1982,7 +2027,7 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream->signal,
stream->phy_pix_clk);
- if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
dal_ddc_service_read_scdc_data(link->ddc);
}
@@ -2074,11 +2119,28 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
}
}
+static uint32_t get_timing_pixel_clock_100hz(const struct dc_crtc_timing *timing)
+{
+
+ uint32_t pxl_clk = timing->pix_clk_100hz;
+
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ pxl_clk /= 2;
+ else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ pxl_clk = pxl_clk * 2 / 3;
+
+ if (timing->display_color_depth == COLOR_DEPTH_101010)
+ pxl_clk = pxl_clk * 10 / 8;
+ else if (timing->display_color_depth == COLOR_DEPTH_121212)
+ pxl_clk = pxl_clk * 12 / 8;
+
+ return pxl_clk;
+}
+
static bool dp_active_dongle_validate_timing(
const struct dc_crtc_timing *timing,
const struct dpcd_caps *dpcd_caps)
{
- unsigned int required_pix_clk_100hz = timing->pix_clk_100hz;
const struct dc_dongle_caps *dongle_caps = &dpcd_caps->dongle_caps;
switch (dpcd_caps->dongle_type) {
@@ -2115,13 +2177,6 @@ static bool dp_active_dongle_validate_timing(
return false;
}
-
- /* Check Color Depth and Pixel Clock */
- if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
- required_pix_clk_100hz /= 2;
- else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
- required_pix_clk_100hz = required_pix_clk_100hz * 2 / 3;
-
switch (timing->display_color_depth) {
case COLOR_DEPTH_666:
case COLOR_DEPTH_888:
@@ -2130,14 +2185,11 @@ static bool dp_active_dongle_validate_timing(
case COLOR_DEPTH_101010:
if (dongle_caps->dp_hdmi_max_bpc < 10)
return false;
- required_pix_clk_100hz = required_pix_clk_100hz * 10 / 8;
break;
case COLOR_DEPTH_121212:
if (dongle_caps->dp_hdmi_max_bpc < 12)
return false;
- required_pix_clk_100hz = required_pix_clk_100hz * 12 / 8;
break;
-
case COLOR_DEPTH_141414:
case COLOR_DEPTH_161616:
default:
@@ -2145,7 +2197,7 @@ static bool dp_active_dongle_validate_timing(
return false;
}
- if (required_pix_clk_100hz > (dongle_caps->dp_hdmi_max_pixel_clk * 10))
+ if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10))
return false;
return true;
@@ -2166,7 +2218,7 @@ enum dc_status dc_link_validate_mode_timing(
return DC_OK;
/* Passive Dongle */
- if (0 != max_pix_clk && timing->pix_clk_100hz > max_pix_clk)
+ if (max_pix_clk != 0 && get_timing_pixel_clock_100hz(timing) > max_pix_clk)
return DC_EXCEED_DONGLE_CAP;
/* Active Dongle*/
@@ -2284,14 +2336,13 @@ void core_link_resume(struct dc_link *link)
static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream)
{
- struct dc_link_settings *link_settings =
- &stream->link->cur_link_settings;
- uint32_t link_rate_in_mbps =
- link_settings->link_rate * LINK_RATE_REF_FREQ_IN_MHZ;
- struct fixed31_32 mbps = dc_fixpt_from_int(
- link_rate_in_mbps * link_settings->lane_count);
-
- return dc_fixpt_div_int(mbps, 54);
+ struct fixed31_32 mbytes_per_sec;
+ uint32_t link_rate_in_mbytes_per_sec = dc_link_bandwidth_kbps(stream->link, &stream->link->cur_link_settings);
+ link_rate_in_mbytes_per_sec /= 8000; /* Kbits to MBytes */
+
+ mbytes_per_sec = dc_fixpt_from_int(link_rate_in_mbytes_per_sec);
+
+ return dc_fixpt_div_int(mbytes_per_sec, 54);
}
static int get_color_depth(enum dc_color_depth color_depth)
@@ -2316,7 +2367,7 @@ static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
uint32_t denominator;
bpc = get_color_depth(pipe_ctx->stream_res.pix_clk_params.color_depth);
- kbps = pipe_ctx->stream_res.pix_clk_params.requested_pix_clk_100hz / 10 * bpc * 3;
+ kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing);
/*
* margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
@@ -2551,12 +2602,12 @@ void core_link_enable_stream(
struct dc_state *state,
struct pipe_ctx *pipe_ctx)
{
- struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ struct dc *core_dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream;
enum dc_status status;
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
- if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL) {
+ if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) {
stream->link->link_enc->funcs->setup(
stream->link->link_enc,
pipe_ctx->stream->signal);
@@ -2570,9 +2621,10 @@ void core_link_enable_stream(
pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(
pipe_ctx->stream_res.stream_enc,
&stream->timing,
- stream->output_color_space);
+ stream->output_color_space,
+ stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->hdmi_set_stream_attribute(
pipe_ctx->stream_res.stream_enc,
&stream->timing,
@@ -2736,3 +2788,49 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable)
}
}
+uint32_t dc_bandwidth_in_kbps_from_timing(
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t bits_per_channel = 0;
+ uint32_t kbps;
+
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ bits_per_channel = 6;
+ break;
+ case COLOR_DEPTH_888:
+ bits_per_channel = 8;
+ break;
+ case COLOR_DEPTH_101010:
+ bits_per_channel = 10;
+ break;
+ case COLOR_DEPTH_121212:
+ bits_per_channel = 12;
+ break;
+ case COLOR_DEPTH_141414:
+ bits_per_channel = 14;
+ break;
+ case COLOR_DEPTH_161616:
+ bits_per_channel = 16;
+ break;
+ default:
+ break;
+ }
+
+ ASSERT(bits_per_channel != 0);
+
+ kbps = timing->pix_clk_100hz / 10;
+ kbps *= bits_per_channel;
+
+ if (timing->flags.Y_ONLY != 1) {
+ /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
+ kbps *= 3;
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ kbps /= 2;
+ else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ kbps = kbps * 2 / 3;
+ }
+
+ return kbps;
+
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index b7ee63cd8dc7..f02092a0dc76 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -573,12 +573,28 @@ bool dal_ddc_service_query_ddc_data(
return ret;
}
-int dc_link_aux_transfer(struct ddc_service *ddc,
- struct aux_payload *payload)
+/* dc_link_aux_transfer_raw() - Attempt to transfer
+ * the given aux payload. This function does not perform
+ * retries or handle error states. The reply is returned
+ * in the payload->reply and the result through
+ * *operation_result. Returns the number of bytes transferred,
+ * or -1 on a failure.
+ */
+int dc_link_aux_transfer_raw(struct ddc_service *ddc,
+ struct aux_payload *payload,
+ enum aux_channel_operation_result *operation_result)
{
- return dce_aux_transfer(ddc, payload);
+ return dce_aux_transfer_raw(ddc, payload, operation_result);
}
+/* dc_link_aux_transfer_with_retries() - Attempt to submit an
+ * aux payload, retrying on timeouts, defers, and busy states
+ * as outlined in the DP spec. Returns true if the request
+ * was successful.
+ *
+ * Unless you want to implement your own retry semantics, this
+ * is probably the one you want.
+ */
bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
struct aux_payload *payload)
{
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 09d301216076..1ee544a32ebb 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -93,12 +93,10 @@ static void dpcd_set_link_settings(
struct dc_link *link,
const struct link_training_settings *lt_settings)
{
- uint8_t rate = (uint8_t)
- (lt_settings->link_settings.link_rate);
+ uint8_t rate;
union down_spread_ctrl downspread = { {0} };
union lane_count_set lane_count_set = { {0} };
- uint8_t link_set_buffer[2];
downspread.raw = (uint8_t)
(lt_settings->link_settings.link_spread);
@@ -111,29 +109,42 @@ static void dpcd_set_link_settings(
lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED =
link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED;
- link_set_buffer[0] = rate;
- link_set_buffer[1] = lane_count_set.raw;
-
- core_link_write_dpcd(link, DP_LINK_BW_SET,
- link_set_buffer, 2);
core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL,
&downspread.raw, sizeof(downspread));
+ core_link_write_dpcd(link, DP_LANE_COUNT_SET,
+ &lane_count_set.raw, 1);
+
if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
- (link->dpcd_caps.link_rate_set >= 1 &&
- link->dpcd_caps.link_rate_set <= 8)) {
+ lt_settings->link_settings.use_link_rate_set == true) {
+ rate = 0;
+ core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
core_link_write_dpcd(link, DP_LINK_RATE_SET,
- &link->dpcd_caps.link_rate_set, 1);
+ &lt_settings->link_settings.link_rate_set, 1);
+ } else {
+ rate = (uint8_t) (lt_settings->link_settings.link_rate);
+ core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
}
- DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x\n %x spread = %x\n",
- __func__,
- DP_LINK_BW_SET,
- lt_settings->link_settings.link_rate,
- DP_LANE_COUNT_SET,
- lt_settings->link_settings.lane_count,
- DP_DOWNSPREAD_CTRL,
- lt_settings->link_settings.link_spread);
+ if (rate) {
+ DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x\n %x spread = %x\n",
+ __func__,
+ DP_LINK_BW_SET,
+ lt_settings->link_settings.link_rate,
+ DP_LANE_COUNT_SET,
+ lt_settings->link_settings.lane_count,
+ DP_DOWNSPREAD_CTRL,
+ lt_settings->link_settings.link_spread);
+ } else {
+ DC_LOG_HW_LINK_TRAINING("%s\n %x rate set = %x\n %x lane = %x\n %x spread = %x\n",
+ __func__,
+ DP_LINK_RATE_SET,
+ lt_settings->link_settings.link_rate_set,
+ DP_LANE_COUNT_SET,
+ lt_settings->link_settings.lane_count,
+ DP_DOWNSPREAD_CTRL,
+ lt_settings->link_settings.link_spread);
+ }
}
@@ -952,6 +963,8 @@ enum link_training_result dc_link_dp_perform_link_training(
lt_settings.link_settings.link_rate = link_setting->link_rate;
lt_settings.link_settings.lane_count = link_setting->lane_count;
+ lt_settings.link_settings.use_link_rate_set = link_setting->use_link_rate_set;
+ lt_settings.link_settings.link_rate_set = link_setting->link_rate_set;
/*@todo[vdevulap] move SS to LS, should not be handled by displaypath*/
@@ -1075,7 +1088,7 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
{
/* Set Default link settings */
struct dc_link_settings max_link_cap = {LANE_COUNT_FOUR, LINK_RATE_HIGH,
- LINK_SPREAD_05_DOWNSPREAD_30KHZ};
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ, false, 0};
/* Higher link settings based on feature supported */
if (link->link_enc->features.flags.bits.IS_HBR2_CAPABLE)
@@ -1520,69 +1533,6 @@ static bool decide_fallback_link_setting(
return true;
}
-static uint32_t bandwidth_in_kbps_from_timing(
- const struct dc_crtc_timing *timing)
-{
- uint32_t bits_per_channel = 0;
- uint32_t kbps;
-
- switch (timing->display_color_depth) {
- case COLOR_DEPTH_666:
- bits_per_channel = 6;
- break;
- case COLOR_DEPTH_888:
- bits_per_channel = 8;
- break;
- case COLOR_DEPTH_101010:
- bits_per_channel = 10;
- break;
- case COLOR_DEPTH_121212:
- bits_per_channel = 12;
- break;
- case COLOR_DEPTH_141414:
- bits_per_channel = 14;
- break;
- case COLOR_DEPTH_161616:
- bits_per_channel = 16;
- break;
- default:
- break;
- }
-
- ASSERT(bits_per_channel != 0);
-
- kbps = timing->pix_clk_100hz / 10;
- kbps *= bits_per_channel;
-
- if (timing->flags.Y_ONLY != 1) {
- /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/
- kbps *= 3;
- if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
- kbps /= 2;
- else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422)
- kbps = kbps * 2 / 3;
- }
-
- return kbps;
-
-}
-
-static uint32_t bandwidth_in_kbps_from_link_settings(
- const struct dc_link_settings *link_setting)
-{
- uint32_t link_rate_in_kbps = link_setting->link_rate *
- LINK_RATE_REF_FREQ_IN_KHZ;
-
- uint32_t lane_count = link_setting->lane_count;
- uint32_t kbps = link_rate_in_kbps;
-
- kbps *= lane_count;
- kbps *= 8; /* 8 bits per byte*/
-
- return kbps;
-
-}
-
bool dp_validate_mode_timing(
struct dc_link *link,
const struct dc_crtc_timing *timing)
@@ -1598,8 +1548,7 @@ bool dp_validate_mode_timing(
timing->v_addressable == (uint32_t) 480)
return true;
- /* We always use verified link settings */
- link_setting = &link->verified_link_cap;
+ link_setting = dc_link_get_link_cap(link);
/* TODO: DYNAMIC_VALIDATION needs to be implemented */
/*if (flags.DYNAMIC_VALIDATION == 1 &&
@@ -1607,8 +1556,8 @@ bool dp_validate_mode_timing(
link_setting = &link->verified_link_cap;
*/
- req_bw = bandwidth_in_kbps_from_timing(timing);
- max_bw = bandwidth_in_kbps_from_link_settings(link_setting);
+ req_bw = dc_bandwidth_in_kbps_from_timing(timing);
+ max_bw = dc_link_bandwidth_kbps(link, link_setting);
if (req_bw <= max_bw) {
/* remember the biggest mode here, during
@@ -1629,58 +1578,78 @@ bool dp_validate_mode_timing(
return false;
}
-void decide_link_settings(struct dc_stream_state *stream,
- struct dc_link_settings *link_setting)
+static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw)
{
-
struct dc_link_settings initial_link_setting = {
- LANE_COUNT_ONE, LINK_RATE_LOW, LINK_SPREAD_DISABLED};
+ LANE_COUNT_ONE, LINK_RATE_LOW, LINK_SPREAD_DISABLED, false, 0};
struct dc_link_settings current_link_setting =
initial_link_setting;
- struct dc_link *link;
- uint32_t req_bw;
uint32_t link_bw;
- req_bw = bandwidth_in_kbps_from_timing(&stream->timing);
-
- link = stream->link;
-
- /* if preferred is specified through AMDDP, use it, if it's enough
- * to drive the mode
+ /* search for the minimum link setting that:
+ * 1. is supported according to the link training result
+ * 2. could support the b/w requested by the timing
*/
- if (link->preferred_link_setting.lane_count !=
- LANE_COUNT_UNKNOWN &&
- link->preferred_link_setting.link_rate !=
- LINK_RATE_UNKNOWN) {
- *link_setting = link->preferred_link_setting;
- return;
- }
+ while (current_link_setting.link_rate <=
+ link->verified_link_cap.link_rate) {
+ link_bw = dc_link_bandwidth_kbps(
+ link,
+ &current_link_setting);
+ if (req_bw <= link_bw) {
+ *link_setting = current_link_setting;
+ return true;
+ }
- /* MST doesn't perform link training for now
- * TODO: add MST specific link training routine
- */
- if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
- *link_setting = link->verified_link_cap;
- return;
+ if (current_link_setting.lane_count <
+ link->verified_link_cap.lane_count) {
+ current_link_setting.lane_count =
+ increase_lane_count(
+ current_link_setting.lane_count);
+ } else {
+ current_link_setting.link_rate =
+ increase_link_rate(
+ current_link_setting.link_rate);
+ current_link_setting.lane_count =
+ initial_link_setting.lane_count;
+ }
}
- /* EDP use the link cap setting */
- if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ return false;
+}
+
+static bool decide_edp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw)
+{
+ struct dc_link_settings initial_link_setting;
+ struct dc_link_settings current_link_setting;
+ uint32_t link_bw;
+
+ if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14 ||
+ link->dpcd_caps.edp_supported_link_rates_count == 0 ||
+ link->dc->config.optimize_edp_link_rate == false) {
*link_setting = link->verified_link_cap;
- return;
+ return true;
}
+ memset(&initial_link_setting, 0, sizeof(initial_link_setting));
+ initial_link_setting.lane_count = LANE_COUNT_ONE;
+ initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0];
+ initial_link_setting.link_spread = LINK_SPREAD_DISABLED;
+ initial_link_setting.use_link_rate_set = true;
+ initial_link_setting.link_rate_set = 0;
+ current_link_setting = initial_link_setting;
+
/* search for the minimum link setting that:
* 1. is supported according to the link training result
* 2. could support the b/w requested by the timing
*/
while (current_link_setting.link_rate <=
link->verified_link_cap.link_rate) {
- link_bw = bandwidth_in_kbps_from_link_settings(
+ link_bw = dc_link_bandwidth_kbps(
+ link,
&current_link_setting);
if (req_bw <= link_bw) {
*link_setting = current_link_setting;
- return;
+ return true;
}
if (current_link_setting.lane_count <
@@ -1689,13 +1658,53 @@ void decide_link_settings(struct dc_stream_state *stream,
increase_lane_count(
current_link_setting.lane_count);
} else {
- current_link_setting.link_rate =
- increase_link_rate(
- current_link_setting.link_rate);
- current_link_setting.lane_count =
- initial_link_setting.lane_count;
+ if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) {
+ current_link_setting.link_rate_set++;
+ current_link_setting.link_rate =
+ link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set];
+ current_link_setting.lane_count =
+ initial_link_setting.lane_count;
+ } else
+ break;
}
}
+ return false;
+}
+
+void decide_link_settings(struct dc_stream_state *stream,
+ struct dc_link_settings *link_setting)
+{
+ struct dc_link *link;
+ uint32_t req_bw;
+
+ req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+
+ link = stream->link;
+
+ /* if preferred is specified through AMDDP, use it, if it's enough
+ * to drive the mode
+ */
+ if (link->preferred_link_setting.lane_count !=
+ LANE_COUNT_UNKNOWN &&
+ link->preferred_link_setting.link_rate !=
+ LINK_RATE_UNKNOWN) {
+ *link_setting = link->preferred_link_setting;
+ return;
+ }
+
+ /* MST doesn't perform link training for now
+ * TODO: add MST specific link training routine
+ */
+ if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ *link_setting = link->verified_link_cap;
+ return;
+ }
+
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ if (decide_edp_link_settings(link, link_setting, req_bw))
+ return;
+ } else if (decide_dp_link_settings(link, link_setting, req_bw))
+ return;
BREAK_TO_DEBUGGER();
ASSERT(link->verified_link_cap.lane_count != LANE_COUNT_UNKNOWN);
@@ -2155,11 +2164,7 @@ bool is_mst_supported(struct dc_link *link)
bool is_dp_active_dongle(const struct dc_link *link)
{
- enum display_dongle_type dongle_type = link->dpcd_caps.dongle_type;
-
- return (dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) ||
- (dongle_type == DISPLAY_DONGLE_DP_DVI_CONVERTER) ||
- (dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER);
+ return link->dpcd_caps.is_branch_dev;
}
static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc)
@@ -2180,6 +2185,30 @@ static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc)
return -1;
}
+static void read_dp_device_vendor_id(struct dc_link *link)
+{
+ struct dp_device_vendor_id dp_id;
+
+ /* read IEEE branch device id */
+ core_link_read_dpcd(
+ link,
+ DP_BRANCH_OUI,
+ (uint8_t *)&dp_id,
+ sizeof(dp_id));
+
+ link->dpcd_caps.branch_dev_id =
+ (dp_id.ieee_oui[0] << 16) +
+ (dp_id.ieee_oui[1] << 8) +
+ dp_id.ieee_oui[2];
+
+ memmove(
+ link->dpcd_caps.branch_dev_name,
+ dp_id.ieee_device_id,
+ sizeof(dp_id.ieee_device_id));
+}
+
+
+
static void get_active_converter_info(
uint8_t data, struct dc_link *link)
{
@@ -2193,6 +2222,9 @@ static void get_active_converter_info(
return;
}
+ /* DPCD 0x5 bit 0 = 1, it indicate it's branch device */
+ link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT;
+
switch (ds_port.fields.PORT_TYPE) {
case DOWNSTREAM_VGA:
link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER;
@@ -2234,8 +2266,8 @@ static void get_active_converter_info(
hdmi_caps = {.raw = det_caps[3] };
union dwnstream_port_caps_byte2
hdmi_color_caps = {.raw = det_caps[2] };
- link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk =
- det_caps[1] * 25000;
+ link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz =
+ det_caps[1] * 2500;
link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter =
hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK;
@@ -2252,7 +2284,7 @@ static void get_active_converter_info(
translate_dpcd_max_bpc(
hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
- if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk != 0)
+ if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz != 0)
link->dpcd_caps.dongle_caps.extendedCapValid = true;
}
@@ -2263,27 +2295,6 @@ static void get_active_converter_info(
ddc_service_set_dongle_type(link->ddc, link->dpcd_caps.dongle_type);
{
- struct dp_device_vendor_id dp_id;
-
- /* read IEEE branch device id */
- core_link_read_dpcd(
- link,
- DP_BRANCH_OUI,
- (uint8_t *)&dp_id,
- sizeof(dp_id));
-
- link->dpcd_caps.branch_dev_id =
- (dp_id.ieee_oui[0] << 16) +
- (dp_id.ieee_oui[1] << 8) +
- dp_id.ieee_oui[2];
-
- memmove(
- link->dpcd_caps.branch_dev_name,
- dp_id.ieee_device_id,
- sizeof(dp_id.ieee_device_id));
- }
-
- {
struct dp_sink_hw_fw_revision dp_hw_fw_revision;
core_link_read_dpcd(
@@ -2347,6 +2358,10 @@ static bool retrieve_link_cap(struct dc_link *link)
{
uint8_t dpcd_data[DP_ADAPTER_CAP - DP_DPCD_REV + 1];
+ /*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST.
+ */
+ uint8_t dpcd_dprx_data = '\0';
+
struct dp_device_vendor_id sink_id;
union down_stream_port_count down_strm_port_count;
union edp_configuration_cap edp_config_cap;
@@ -2383,7 +2398,10 @@ static bool retrieve_link_cap(struct dc_link *link)
aux_rd_interval.raw =
dpcd_data[DP_TRAINING_AUX_RD_INTERVAL];
- if (aux_rd_interval.bits.EXT_RECIEVER_CAP_FIELD_PRESENT == 1) {
+ link->dpcd_caps.ext_receiver_cap_field_present =
+ aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1 ? true:false;
+
+ if (aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1) {
uint8_t ext_cap_data[16];
memset(ext_cap_data, '\0', sizeof(ext_cap_data));
@@ -2404,11 +2422,44 @@ static bool retrieve_link_cap(struct dc_link *link)
}
link->dpcd_caps.dpcd_rev.raw =
- dpcd_data[DP_DPCD_REV - DP_DPCD_REV];
+ dpcd_data[DP_DPCD_REV - DP_DPCD_REV];
+
+ if (link->dpcd_caps.dpcd_rev.raw >= 0x14) {
+ for (i = 0; i < read_dpcd_retry_cnt; i++) {
+ status = core_link_read_dpcd(
+ link,
+ DP_DPRX_FEATURE_ENUMERATION_LIST,
+ &dpcd_dprx_data,
+ sizeof(dpcd_dprx_data));
+ if (status == DC_OK)
+ break;
+ }
+
+ link->dpcd_caps.dprx_feature.raw = dpcd_dprx_data;
+
+ if (status != DC_OK)
+ dm_error("%s: Read DPRX caps data failed.\n", __func__);
+ }
+
+ else {
+ link->dpcd_caps.dprx_feature.raw = 0;
+ }
+
+
+ /* Error condition checking...
+ * It is impossible for Sink to report Max Lane Count = 0.
+ * It is possible for Sink to report Max Link Rate = 0, if it is
+ * an eDP device that is reporting specialized link rates in the
+ * SUPPORTED_LINK_RATE table.
+ */
+ if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0)
+ return false;
ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT -
DP_DPCD_REV];
+ read_dp_device_vendor_id(link);
+
get_active_converter_info(ds_port.byte, link);
dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data));
@@ -2536,31 +2587,31 @@ enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in_khz)
void detect_edp_sink_caps(struct dc_link *link)
{
- uint8_t supported_link_rates[16] = {0};
+ uint8_t supported_link_rates[16];
uint32_t entry;
uint32_t link_rate_in_khz;
enum dc_link_rate link_rate = LINK_RATE_UNKNOWN;
retrieve_link_cap(link);
+ link->dpcd_caps.edp_supported_link_rates_count = 0;
+ memset(supported_link_rates, 0, sizeof(supported_link_rates));
- if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14) {
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
+ link->dc->config.optimize_edp_link_rate) {
// Read DPCD 00010h - 0001Fh 16 bytes at one shot
core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES,
supported_link_rates, sizeof(supported_link_rates));
- link->dpcd_caps.link_rate_set = 0;
for (entry = 0; entry < 16; entry += 2) {
// DPCD register reports per-lane link rate = 16-bit link rate capability
- // value X 200 kHz. Need multipler to find link rate in kHz.
+ // value X 200 kHz. Need multiplier to find link rate in kHz.
link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 +
supported_link_rates[entry]) * 200;
if (link_rate_in_khz != 0) {
link_rate = linkRateInKHzToLinkRateMultiplier(link_rate_in_khz);
- if (link->reported_link_cap.link_rate < link_rate) {
- link->reported_link_cap.link_rate = link_rate;
- link->dpcd_caps.link_rate_set = entry;
- }
+ link->dpcd_caps.edp_supported_link_rates[link->dpcd_caps.edp_supported_link_rates_count] = link_rate;
+ link->dpcd_caps.edp_supported_link_rates_count++;
}
}
}
@@ -2601,6 +2652,7 @@ static void set_crtc_test_pattern(struct dc_link *link,
enum dc_color_depth color_depth = pipe_ctx->
stream->timing.display_color_depth;
struct bit_depth_reduction_params params;
+ struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
memset(&params, 0, sizeof(params));
@@ -2640,8 +2692,7 @@ static void set_crtc_test_pattern(struct dc_link *link,
{
/* disable bit depth reduction */
pipe_ctx->stream->bit_depth_params = params;
- pipe_ctx->stream_res.opp->funcs->
- opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, &params);
+ opp->funcs->opp_program_bit_depth_reduction(opp, &params);
if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
controller_test_pattern, color_depth);
@@ -2650,11 +2701,9 @@ static void set_crtc_test_pattern(struct dc_link *link,
case DP_TEST_PATTERN_VIDEO_MODE:
{
/* restore bitdepth reduction */
- resource_build_bit_depth_reduction_params(pipe_ctx->stream,
- &params);
+ resource_build_bit_depth_reduction_params(pipe_ctx->stream, &params);
pipe_ctx->stream->bit_depth_params = params;
- pipe_ctx->stream_res.opp->funcs->
- opp_program_bit_depth_reduction(pipe_ctx->stream_res.opp, &params);
+ opp->funcs->opp_program_bit_depth_reduction(opp, &params);
if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index f7f7515f65f4..b0dea759cd86 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -58,6 +58,8 @@ void dp_enable_link_phy(
const struct dc_link_settings *link_settings)
{
struct link_encoder *link_enc = link->link_enc;
+ struct dc *core_dc = link->ctx->dc;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
struct pipe_ctx *pipes =
link->dc->current_state->res_ctx.pipe_ctx;
@@ -84,6 +86,9 @@ void dp_enable_link_phy(
}
}
+ if (dmcu != NULL && dmcu->funcs->lock_phy)
+ dmcu->funcs->lock_phy(dmcu);
+
if (dc_is_dp_sst_signal(signal)) {
link_enc->funcs->enable_dp_output(
link_enc,
@@ -95,6 +100,10 @@ void dp_enable_link_phy(
link_settings,
clock_source);
}
+
+ if (dmcu != NULL && dmcu->funcs->unlock_phy)
+ dmcu->funcs->unlock_phy(dmcu);
+
link->cur_link_settings = *link_settings;
dp_receiver_power_ctrl(link, true);
@@ -150,15 +159,25 @@ bool edp_receiver_ready_T7(struct dc_link *link)
void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
{
+ struct dc *core_dc = link->ctx->dc;
+ struct dmcu *dmcu = core_dc->res_pool->dmcu;
+
if (!link->wa_flags.dp_keep_receiver_powered)
dp_receiver_power_ctrl(link, false);
if (signal == SIGNAL_TYPE_EDP) {
link->link_enc->funcs->disable_output(link->link_enc, signal);
link->dc->hwss.edp_power_control(link, false);
- } else
+ } else {
+ if (dmcu != NULL && dmcu->funcs->lock_phy)
+ dmcu->funcs->lock_phy(dmcu);
+
link->link_enc->funcs->disable_output(link->link_enc, signal);
+ if (dmcu != NULL && dmcu->funcs->unlock_phy)
+ dmcu->funcs->unlock_phy(dmcu);
+ }
+
/* Clear current link setting.*/
memset(&link->cur_link_settings, 0,
sizeof(link->cur_link_settings));
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 349ab8017776..eac7186e4f08 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -31,6 +31,8 @@
#include "opp.h"
#include "timing_generator.h"
#include "transform.h"
+#include "dccg.h"
+#include "dchubbub.h"
#include "dpp.h"
#include "core_types.h"
#include "set_mode_types.h"
@@ -104,44 +106,43 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
return dc_version;
}
-struct resource_pool *dc_create_resource_pool(
- struct dc *dc,
- int num_virtual_links,
- enum dce_version dc_version,
- struct hw_asic_id asic_id)
+struct resource_pool *dc_create_resource_pool(struct dc *dc,
+ const struct dc_init_data *init_data,
+ enum dce_version dc_version)
{
struct resource_pool *res_pool = NULL;
switch (dc_version) {
case DCE_VERSION_8_0:
res_pool = dce80_create_resource_pool(
- num_virtual_links, dc);
+ init_data->num_virtual_links, dc);
break;
case DCE_VERSION_8_1:
res_pool = dce81_create_resource_pool(
- num_virtual_links, dc);
+ init_data->num_virtual_links, dc);
break;
case DCE_VERSION_8_3:
res_pool = dce83_create_resource_pool(
- num_virtual_links, dc);
+ init_data->num_virtual_links, dc);
break;
case DCE_VERSION_10_0:
res_pool = dce100_create_resource_pool(
- num_virtual_links, dc);
+ init_data->num_virtual_links, dc);
break;
case DCE_VERSION_11_0:
res_pool = dce110_create_resource_pool(
- num_virtual_links, dc, asic_id);
+ init_data->num_virtual_links, dc,
+ init_data->asic_id);
break;
case DCE_VERSION_11_2:
case DCE_VERSION_11_22:
res_pool = dce112_create_resource_pool(
- num_virtual_links, dc);
+ init_data->num_virtual_links, dc);
break;
case DCE_VERSION_12_0:
case DCE_VERSION_12_1:
res_pool = dce120_create_resource_pool(
- num_virtual_links, dc);
+ init_data->num_virtual_links, dc);
break;
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
@@ -149,8 +150,7 @@ struct resource_pool *dc_create_resource_pool(
#if defined(CONFIG_DRM_AMD_DC_DCN1_01)
case DCN_VERSION_1_01:
#endif
- res_pool = dcn10_create_resource_pool(
- num_virtual_links, dc);
+ res_pool = dcn10_create_resource_pool(init_data, dc);
break;
#endif
@@ -163,7 +163,28 @@ struct resource_pool *dc_create_resource_pool(
if (dc->ctx->dc_bios->funcs->get_firmware_info(
dc->ctx->dc_bios, &fw_info) == BP_RESULT_OK) {
- res_pool->ref_clock_inKhz = fw_info.pll_info.crystal_frequency;
+ res_pool->ref_clocks.xtalin_clock_inKhz = fw_info.pll_info.crystal_frequency;
+
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ // On FPGA these dividers are currently not configured by GDB
+ res_pool->ref_clocks.dccg_ref_clock_inKhz = res_pool->ref_clocks.xtalin_clock_inKhz;
+ res_pool->ref_clocks.dchub_ref_clock_inKhz = res_pool->ref_clocks.xtalin_clock_inKhz;
+ } else if (res_pool->dccg && res_pool->hubbub) {
+ // If DCCG reference frequency cannot be determined (usually means not set to xtalin) then this is a critical error
+ // as this value must be known for DCHUB programming
+ (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
+ fw_info.pll_info.crystal_frequency,
+ &res_pool->ref_clocks.dccg_ref_clock_inKhz);
+
+ // Similarly, if DCHUB reference frequency cannot be determined, then it is also a critical error
+ (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
+ res_pool->ref_clocks.dccg_ref_clock_inKhz,
+ &res_pool->ref_clocks.dchub_ref_clock_inKhz);
+ } else {
+ // Not all ASICs have DCCG sw component
+ res_pool->ref_clocks.dccg_ref_clock_inKhz = res_pool->ref_clocks.xtalin_clock_inKhz;
+ res_pool->ref_clocks.dchub_ref_clock_inKhz = res_pool->ref_clocks.xtalin_clock_inKhz;
+ }
} else
ASSERT_CRITICAL(false);
}
@@ -260,6 +281,7 @@ bool resource_construct(
pool->stream_enc_count++;
}
}
+
dc->caps.dynamic_audio = false;
if (pool->audio_count < pool->stream_enc_count) {
dc->caps.dynamic_audio = true;
@@ -1014,24 +1036,60 @@ enum dc_status resource_build_scaling_params_for_context(
struct pipe_ctx *find_idle_secondary_pipe(
struct resource_context *res_ctx,
- const struct resource_pool *pool)
+ const struct resource_pool *pool,
+ const struct pipe_ctx *primary_pipe)
{
int i;
struct pipe_ctx *secondary_pipe = NULL;
/*
- * search backwards for the second pipe to keep pipe
- * assignment more consistent
+ * We add a preferred pipe mapping to avoid the chance that
+ * MPCCs already in use will need to be reassigned to other trees.
+ * For example, if we went with the strict, assign backwards logic:
+ *
+ * (State 1)
+ * Display A on, no surface, top pipe = 0
+ * Display B on, no surface, top pipe = 1
+ *
+ * (State 2)
+ * Display A on, no surface, top pipe = 0
+ * Display B on, surface enable, top pipe = 1, bottom pipe = 5
+ *
+ * (State 3)
+ * Display A on, surface enable, top pipe = 0, bottom pipe = 5
+ * Display B on, surface enable, top pipe = 1, bottom pipe = 4
+ *
+ * The state 2->3 transition requires remapping MPCC 5 from display B
+ * to display A.
+ *
+ * However, with the preferred pipe logic, state 2 would look like:
+ *
+ * (State 2)
+ * Display A on, no surface, top pipe = 0
+ * Display B on, surface enable, top pipe = 1, bottom pipe = 4
+ *
+ * This would then cause 2->3 to not require remapping any MPCCs.
*/
-
- for (i = pool->pipe_count - 1; i >= 0; i--) {
- if (res_ctx->pipe_ctx[i].stream == NULL) {
- secondary_pipe = &res_ctx->pipe_ctx[i];
- secondary_pipe->pipe_idx = i;
- break;
+ if (primary_pipe) {
+ int preferred_pipe_idx = (pool->pipe_count - 1) - primary_pipe->pipe_idx;
+ if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) {
+ secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx];
+ secondary_pipe->pipe_idx = preferred_pipe_idx;
}
}
+ /*
+ * search backwards for the second pipe to keep pipe
+ * assignment more consistent
+ */
+ if (!secondary_pipe)
+ for (i = pool->pipe_count - 1; i >= 0; i--) {
+ if (res_ctx->pipe_ctx[i].stream == NULL) {
+ secondary_pipe = &res_ctx->pipe_ctx[i];
+ secondary_pipe->pipe_idx = i;
+ break;
+ }
+ }
return secondary_pipe;
}
@@ -1214,6 +1272,9 @@ bool dc_add_plane_to_context(
free_pipe->clock_source = tail_pipe->clock_source;
free_pipe->top_pipe = tail_pipe;
tail_pipe->bottom_pipe = free_pipe;
+ } else if (free_pipe->bottom_pipe && free_pipe->bottom_pipe->plane_state == NULL) {
+ ASSERT(free_pipe->bottom_pipe->stream_res.opp != free_pipe->stream_res.opp);
+ free_pipe->bottom_pipe->plane_state = plane_state;
}
/* assign new surfaces*/
@@ -1224,6 +1285,35 @@ bool dc_add_plane_to_context(
return true;
}
+struct pipe_ctx *dc_res_get_odm_bottom_pipe(struct pipe_ctx *pipe_ctx)
+{
+ struct pipe_ctx *bottom_pipe = pipe_ctx->bottom_pipe;
+
+ /* ODM should only be updated once per otg */
+ if (pipe_ctx->top_pipe)
+ return NULL;
+
+ while (bottom_pipe) {
+ if (bottom_pipe->stream_res.opp != pipe_ctx->stream_res.opp)
+ break;
+ bottom_pipe = bottom_pipe->bottom_pipe;
+ }
+
+ return bottom_pipe;
+}
+
+bool dc_res_is_odm_head_pipe(struct pipe_ctx *pipe_ctx)
+{
+ struct pipe_ctx *top_pipe = pipe_ctx->top_pipe;
+
+ if (!top_pipe)
+ return false;
+ if (top_pipe && top_pipe->stream_res.opp == pipe_ctx->stream_res.opp)
+ return false;
+
+ return true;
+}
+
bool dc_remove_plane_from_context(
const struct dc *dc,
struct dc_stream_state *stream,
@@ -1247,10 +1337,14 @@ bool dc_remove_plane_from_context(
/* release pipe for plane*/
for (i = pool->pipe_count - 1; i >= 0; i--) {
- struct pipe_ctx *pipe_ctx;
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- if (context->res_ctx.pipe_ctx[i].plane_state == plane_state) {
- pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ if (pipe_ctx->plane_state == plane_state) {
+ if (dc_res_is_odm_head_pipe(pipe_ctx)) {
+ pipe_ctx->plane_state = NULL;
+ pipe_ctx->bottom_pipe = NULL;
+ continue;
+ }
if (pipe_ctx->top_pipe)
pipe_ctx->top_pipe->bottom_pipe = pipe_ctx->bottom_pipe;
@@ -1268,8 +1362,9 @@ bool dc_remove_plane_from_context(
*/
if (!pipe_ctx->top_pipe) {
pipe_ctx->plane_state = NULL;
- pipe_ctx->bottom_pipe = NULL;
- } else {
+ if (!dc_res_get_odm_bottom_pipe(pipe_ctx))
+ pipe_ctx->bottom_pipe = NULL;
+ } else {
memset(pipe_ctx, 0, sizeof(*pipe_ctx));
}
}
@@ -1674,6 +1769,9 @@ enum dc_status dc_remove_stream_from_ctx(
for (i = 0; i < MAX_PIPES; i++) {
if (new_ctx->res_ctx.pipe_ctx[i].stream == stream &&
!new_ctx->res_ctx.pipe_ctx[i].top_pipe) {
+ struct pipe_ctx *odm_pipe =
+ dc_res_get_odm_bottom_pipe(&new_ctx->res_ctx.pipe_ctx[i]);
+
del_pipe = &new_ctx->res_ctx.pipe_ctx[i];
ASSERT(del_pipe->stream_res.stream_enc);
@@ -1698,6 +1796,8 @@ enum dc_status dc_remove_stream_from_ctx(
dc->res_pool->funcs->remove_stream_from_ctx(dc, new_ctx, stream);
memset(del_pipe, 0, sizeof(*del_pipe));
+ if (odm_pipe)
+ memset(odm_pipe, 0, sizeof(*odm_pipe));
break;
}
@@ -1855,6 +1955,7 @@ enum dc_status resource_map_pool_resources(
struct dc_context *dc_ctx = dc->ctx;
struct pipe_ctx *pipe_ctx = NULL;
int pipe_idx = -1;
+ struct dc_bios *dcb = dc->ctx->dc_bios;
/* TODO Check if this is needed */
/*if (!resource_is_stream_unchanged(old_context, stream)) {
@@ -1869,6 +1970,13 @@ enum dc_status resource_map_pool_resources(
calculate_phy_pix_clks(stream);
+ /* TODO: Check Linux */
+ if (dc->config.allow_seamless_boot_optimization &&
+ !dcb->funcs->is_accelerated_mode(dcb)) {
+ if (dc_validate_seamless_boot_timing(dc, stream->sink, &stream->timing))
+ stream->apply_seamless_boot_optimization = true;
+ }
+
if (stream->apply_seamless_boot_optimization)
pipe_idx = acquire_resource_from_hw_enabled_state(
&context->res_ctx,
@@ -1951,7 +2059,7 @@ void dc_resource_state_construct(
const struct dc *dc,
struct dc_state *dst_ctx)
{
- dst_ctx->dccg = dc->res_pool->clk_mgr;
+ dst_ctx->clk_mgr = dc->res_pool->clk_mgr;
}
/**
@@ -1959,12 +2067,14 @@ void dc_resource_state_construct(
* Checks HW resource availability and bandwidth requirement.
* @dc: dc struct for this driver
* @new_ctx: state to be validated
+ * @fast_validate: set to true if only yes/no to support matters
*
* Return: DC_OK if the result can be programmed. Otherwise, an error code.
*/
enum dc_status dc_validate_global_state(
struct dc *dc,
- struct dc_state *new_ctx)
+ struct dc_state *new_ctx,
+ bool fast_validate)
{
enum dc_status result = DC_ERROR_UNEXPECTED;
int i, j;
@@ -2019,7 +2129,7 @@ enum dc_status dc_validate_global_state(
result = resource_build_scaling_params_for_context(dc, new_ctx);
if (result == DC_OK)
- if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx))
+ if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate))
result = DC_FAIL_BANDWIDTH_VALIDATE;
return result;
@@ -2315,6 +2425,21 @@ static void set_spd_info_packet(
*info_packet = stream->vrr_infopacket;
}
+static void set_dp_sdp_info_packet(
+ struct dc_info_packet *info_packet,
+ struct dc_stream_state *stream)
+{
+ /* SPD info packet for custom sdp message */
+
+ /* Return if false. If true,
+ * set the corresponding bit in the info packet
+ */
+ if (!stream->dpsdp_infopacket.valid)
+ return;
+
+ *info_packet = stream->dpsdp_infopacket;
+}
+
static void set_hdr_static_info_packet(
struct dc_info_packet *info_packet,
struct dc_stream_state *stream)
@@ -2411,6 +2536,7 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
info->spd.valid = false;
info->hdrsmd.valid = false;
info->vsc.valid = false;
+ info->dpsdp.valid = false;
signal = pipe_ctx->stream->signal;
@@ -2430,6 +2556,8 @@ void resource_build_info_frame(struct pipe_ctx *pipe_ctx)
set_spd_info_packet(&info->spd, pipe_ctx->stream);
set_hdr_static_info_packet(&info->hdrsmd, pipe_ctx->stream);
+
+ set_dp_sdp_info_packet(&info->dpsdp, pipe_ctx->stream);
}
patch_gamut_packet_checksum(&info->gamut);
@@ -2657,10 +2785,11 @@ enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream)
if (!tg->funcs->validate_timing(tg, &stream->timing))
res = DC_FAIL_CONTROLLER_VALIDATE;
- if (res == DC_OK)
+ if (res == DC_OK) {
if (!link->link_enc->funcs->validate_output_with_stream(
link->link_enc, stream))
res = DC_FAIL_ENC_VALIDATE;
+ }
/* TODO: validate audio ASIC caps, encoder */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 996298c35f42..96e97d25d639 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -29,6 +29,9 @@
#include "resource.h"
#include "ipp.h"
#include "timing_generator.h"
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+#include "dcn10/dcn10_hw_sequencer.h"
+#endif
#define DC_LOGGER dc->ctx->logger
@@ -160,6 +163,27 @@ struct dc_stream_state *dc_create_stream_for_sink(
return stream;
}
+struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream)
+{
+ struct dc_stream_state *new_stream;
+
+ new_stream = kzalloc(sizeof(struct dc_stream_state), GFP_KERNEL);
+ if (!new_stream)
+ return NULL;
+
+ memcpy(new_stream, stream, sizeof(struct dc_stream_state));
+
+ if (new_stream->sink)
+ dc_sink_retain(new_stream->sink);
+
+ if (new_stream->out_transfer_func)
+ dc_transfer_func_retain(new_stream->out_transfer_func);
+
+ kref_init(&new_stream->refcount);
+
+ return new_stream;
+}
+
/**
* dc_stream_get_status_from_state - Get stream status from given dc state
* @state: DC state to find the stream status in
@@ -196,6 +220,35 @@ struct dc_stream_status *dc_stream_get_status(
return dc_stream_get_status_from_state(dc->current_state, stream);
}
+static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc)
+{
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
+ unsigned int vupdate_line;
+ unsigned int lines_to_vupdate, us_to_vupdate, vpos, nvpos;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ unsigned int us_per_line;
+
+ if (stream->ctx->asic_id.chip_family == FAMILY_RV &&
+ ASIC_REV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) {
+
+ vupdate_line = get_vupdate_offset_from_vsync(pipe_ctx);
+ if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos))
+ return;
+
+ if (vpos >= vupdate_line)
+ return;
+
+ us_per_line = stream->timing.h_total * 10000 / stream->timing.pix_clk_100hz;
+ lines_to_vupdate = vupdate_line - vpos;
+ us_to_vupdate = lines_to_vupdate * us_per_line;
+
+ /* 70 us is a conservative estimate of cursor update time*/
+ if (us_to_vupdate < 70)
+ udelay(us_to_vupdate);
+ }
+#endif
+}
+
/**
* dc_stream_set_cursor_attributes() - Update cursor attributes and set cursor surface address
*/
@@ -234,6 +287,8 @@ bool dc_stream_set_cursor_attributes(
if (!pipe_to_program) {
pipe_to_program = pipe_ctx;
+
+ delay_cursor_until_vupdate(pipe_ctx, core_dc);
core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true);
}
@@ -278,11 +333,13 @@ bool dc_stream_set_cursor_position(
(!pipe_ctx->plane_res.mi && !pipe_ctx->plane_res.hubp) ||
!pipe_ctx->plane_state ||
(!pipe_ctx->plane_res.xfm && !pipe_ctx->plane_res.dpp) ||
- !pipe_ctx->plane_res.ipp)
+ (!pipe_ctx->plane_res.ipp && !pipe_ctx->plane_res.dpp))
continue;
if (!pipe_to_program) {
pipe_to_program = pipe_ctx;
+
+ delay_cursor_until_vupdate(pipe_ctx, core_dc);
core_dc->hwss.pipe_control_lock(core_dc, pipe_to_program, true);
}
@@ -314,6 +371,68 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
return 0;
}
+static void build_dp_sdp_info_frame(struct pipe_ctx *pipe_ctx,
+ const uint8_t *custom_sdp_message,
+ unsigned int sdp_message_size)
+{
+ uint8_t i;
+ struct encoder_info_frame *info = &pipe_ctx->stream_res.encoder_info_frame;
+
+ /* set valid info */
+ info->dpsdp.valid = true;
+
+ /* set sdp message header */
+ info->dpsdp.hb0 = custom_sdp_message[0]; /* package id */
+ info->dpsdp.hb1 = custom_sdp_message[1]; /* package type */
+ info->dpsdp.hb2 = custom_sdp_message[2]; /* package specific byte 0 any data */
+ info->dpsdp.hb3 = custom_sdp_message[3]; /* package specific byte 0 any data */
+
+ /* set sdp message data */
+ for (i = 0; i < 32; i++)
+ info->dpsdp.sb[i] = (custom_sdp_message[i+4]);
+
+}
+
+static void invalid_dp_sdp_info_frame(struct pipe_ctx *pipe_ctx)
+{
+ struct encoder_info_frame *info = &pipe_ctx->stream_res.encoder_info_frame;
+
+ /* in-valid info */
+ info->dpsdp.valid = false;
+}
+
+bool dc_stream_send_dp_sdp(const struct dc_stream_state *stream,
+ const uint8_t *custom_sdp_message,
+ unsigned int sdp_message_size)
+{
+ int i;
+ struct dc *core_dc;
+ struct resource_context *res_ctx;
+
+ if (stream == NULL) {
+ dm_error("DC: dc_stream is NULL!\n");
+ return false;
+ }
+
+ core_dc = stream->ctx->dc;
+ res_ctx = &core_dc->current_state->res_ctx;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
+
+ if (pipe_ctx->stream != stream)
+ continue;
+
+ build_dp_sdp_info_frame(pipe_ctx, custom_sdp_message, sdp_message_size);
+
+ core_dc->hwss.update_info_frame(pipe_ctx);
+
+ invalid_dp_sdp_info_frame(pipe_ctx);
+ }
+
+ return true;
+}
+
bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
uint32_t *v_blank_start,
uint32_t *v_blank_end,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index ee6bd50f60b8..a5e86f9b148f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -119,6 +119,19 @@ const struct dc_plane_status *dc_plane_get_status(
if (core_dc->current_state == NULL)
return NULL;
+ /* Find the current plane state and set its pending bit to false */
+ for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx =
+ &core_dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->plane_state != plane_state)
+ continue;
+
+ pipe_ctx->plane_state->status.is_flip_pending = false;
+
+ break;
+ }
+
for (i = 0; i < core_dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx =
&core_dc->current_state->res_ctx.pipe_ctx[i];
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 0515095574e7..44e4b0465587 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -39,9 +39,10 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.2.17"
+#define DC_VER "3.2.27"
#define MAX_SURFACES 3
+#define MAX_PLANES 6
#define MAX_STREAMS 6
#define MAX_SINKS_PER_LINK 4
@@ -53,6 +54,41 @@ struct dc_versions {
struct dmcu_version dmcu_version;
};
+enum dc_plane_type {
+ DC_PLANE_TYPE_INVALID,
+ DC_PLANE_TYPE_DCE_RGB,
+ DC_PLANE_TYPE_DCE_UNDERLAY,
+ DC_PLANE_TYPE_DCN_UNIVERSAL,
+};
+
+struct dc_plane_cap {
+ enum dc_plane_type type;
+ uint32_t blends_with_above : 1;
+ uint32_t blends_with_below : 1;
+ uint32_t per_pixel_alpha : 1;
+ struct {
+ uint32_t argb8888 : 1;
+ uint32_t nv12 : 1;
+ uint32_t fp16 : 1;
+ } pixel_format_support;
+ // max upscaling factor x1000
+ // upscaling factors are always >= 1
+ // for example, 1080p -> 8K is 4.0, or 4000 raw value
+ struct {
+ uint32_t argb8888;
+ uint32_t nv12;
+ uint32_t fp16;
+ } max_upscale_factor;
+ // max downscale factor x1000
+ // downscale factors are always <= 1
+ // for example, 8K -> 1080p is 0.25, or 250 raw value
+ struct {
+ uint32_t argb8888;
+ uint32_t nv12;
+ uint32_t fp16;
+ } max_downscale_factor;
+};
+
struct dc_caps {
uint32_t max_streams;
uint32_t max_links;
@@ -73,6 +109,7 @@ struct dc_caps {
bool force_dp_tps4_for_cp2520;
bool disable_dp_clk_share;
bool psp_setup_panel_mode;
+ struct dc_plane_cap planes[MAX_PLANES];
};
struct dc_dcc_surface_param {
@@ -164,6 +201,10 @@ struct dc_config {
bool gpu_vm_support;
bool disable_disp_pll_sharing;
bool fbc_support;
+ bool optimize_edp_link_rate;
+ bool disable_fractional_pwm;
+ bool allow_seamless_boot_optimization;
+ bool power_down_display_on_boot;
};
enum visual_confirm {
@@ -203,7 +244,59 @@ struct dc_clocks {
int fclk_khz;
int phyclk_khz;
int dramclk_khz;
-};
+ bool p_state_change_support;
+};
+
+struct dc_bw_validation_profile {
+ bool enable;
+
+ unsigned long long total_ticks;
+ unsigned long long voltage_level_ticks;
+ unsigned long long watermark_ticks;
+ unsigned long long rq_dlg_ticks;
+
+ unsigned long long total_count;
+ unsigned long long skip_fast_count;
+ unsigned long long skip_pass_count;
+ unsigned long long skip_fail_count;
+};
+
+#define BW_VAL_TRACE_SETUP() \
+ unsigned long long end_tick = 0; \
+ unsigned long long voltage_level_tick = 0; \
+ unsigned long long watermark_tick = 0; \
+ unsigned long long start_tick = dc->debug.bw_val_profile.enable ? \
+ dm_get_timestamp(dc->ctx) : 0
+
+#define BW_VAL_TRACE_COUNT() \
+ if (dc->debug.bw_val_profile.enable) \
+ dc->debug.bw_val_profile.total_count++
+
+#define BW_VAL_TRACE_SKIP(status) \
+ if (dc->debug.bw_val_profile.enable) { \
+ if (!voltage_level_tick) \
+ voltage_level_tick = dm_get_timestamp(dc->ctx); \
+ dc->debug.bw_val_profile.skip_ ## status ## _count++; \
+ }
+
+#define BW_VAL_TRACE_END_VOLTAGE_LEVEL() \
+ if (dc->debug.bw_val_profile.enable) \
+ voltage_level_tick = dm_get_timestamp(dc->ctx)
+
+#define BW_VAL_TRACE_END_WATERMARKS() \
+ if (dc->debug.bw_val_profile.enable) \
+ watermark_tick = dm_get_timestamp(dc->ctx)
+
+#define BW_VAL_TRACE_FINISH() \
+ if (dc->debug.bw_val_profile.enable) { \
+ end_tick = dm_get_timestamp(dc->ctx); \
+ dc->debug.bw_val_profile.total_ticks += end_tick - start_tick; \
+ dc->debug.bw_val_profile.voltage_level_ticks += voltage_level_tick - start_tick; \
+ if (watermark_tick) { \
+ dc->debug.bw_val_profile.watermark_ticks += watermark_tick - voltage_level_tick; \
+ dc->debug.bw_val_profile.rq_dlg_ticks += end_tick - watermark_tick; \
+ } \
+ }
struct dc_debug_options {
enum visual_confirm visual_confirm;
@@ -257,6 +350,8 @@ struct dc_debug_options {
bool skip_detection_link_training;
unsigned int force_odm_combine; //bit vector based on otg inst
unsigned int force_fclk_khz;
+ bool disable_tri_buf;
+ struct dc_bw_validation_profile bw_val_profile;
};
struct dc_debug_data {
@@ -265,6 +360,14 @@ struct dc_debug_data {
uint32_t auxErrorCount;
};
+struct dc_bounding_box_overrides {
+ int sr_exit_time_ns;
+ int sr_enter_plus_exit_time_ns;
+ int urgent_latency_ns;
+ int percent_of_ideal_drambw;
+ int dram_clock_change_latency_ns;
+};
+
struct dc_state;
struct resource_pool;
struct dce_hwseq;
@@ -274,6 +377,7 @@ struct dc {
struct dc_cap_funcs cap_funcs;
struct dc_config config;
struct dc_debug_options debug;
+ struct dc_bounding_box_overrides bb_overrides;
struct dc_context *ctx;
uint8_t link_count;
@@ -298,8 +402,12 @@ struct dc {
struct hw_sequencer_funcs hwss;
struct dce_hwseq *hwseq;
+ /* Require to optimize clocks and bandwidth for added/removed planes */
bool optimized_required;
+ /* Require to maintain clocks and bandwidth for UEFI enabled HW */
+ bool optimize_seamless_boot;
+
/* FBC compressor */
struct compressor *fbc_compressor;
@@ -327,6 +435,7 @@ struct dc_init_data {
struct hw_asic_id asic_id;
void *driver; /* ctx */
struct cgs_device *cgs_device;
+ struct dc_bounding_box_overrides bb_overrides;
int num_virtual_links;
/*
@@ -597,7 +706,7 @@ struct dc_validation_set {
uint8_t plane_count;
};
-bool dc_validate_seamless_boot_timing(struct dc *dc,
+bool dc_validate_seamless_boot_timing(const struct dc *dc,
const struct dc_sink *sink,
struct dc_crtc_timing *crtc_timing);
@@ -605,9 +714,14 @@ enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *pla
void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info);
+/*
+ * fast_validate: we return after determining if we can support the new state,
+ * but before we populate the programming info
+ */
enum dc_status dc_validate_global_state(
struct dc *dc,
- struct dc_state *new_ctx);
+ struct dc_state *new_ctx,
+ bool fast_validate);
void dc_resource_state_construct(
@@ -636,7 +750,8 @@ void dc_resource_state_destruct(struct dc_state *context);
bool dc_commit_state(struct dc *dc, struct dc_state *context);
-struct dc_state *dc_create_state(void);
+struct dc_state *dc_create_state(struct dc *dc);
+struct dc_state *dc_copy_state(struct dc_state *src_ctx);
void dc_retain_state(struct dc_state *context);
void dc_release_state(struct dc_state *context);
@@ -648,9 +763,16 @@ struct dpcd_caps {
union dpcd_rev dpcd_rev;
union max_lane_count max_ln_count;
union max_down_spread max_down_spread;
+ union dprx_feature dprx_feature;
+
+ /* valid only for eDP v1.4 or higher*/
+ uint8_t edp_supported_link_rates_count;
+ enum dc_link_rate edp_supported_link_rates[8];
/* dongle type (DP converter, CV smart dongle) */
enum display_dongle_type dongle_type;
+ /* branch device or sink device */
+ bool is_branch_dev;
/* Dongle's downstream count. */
union sink_count sink_count;
/* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER,
@@ -666,11 +788,11 @@ struct dpcd_caps {
int8_t branch_dev_name[6];
int8_t branch_hw_revision;
int8_t branch_fw_revision[2];
- uint8_t link_rate_set;
bool allow_invalid_MSA_timing_param;
bool panel_mode_edp;
bool dpcd_display_control_capable;
+ bool ext_receiver_cap_field_present;
};
#include "dc_link.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
index 05c8c31d8b31..4ef97f65e55d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h
@@ -68,6 +68,8 @@ enum aux_transaction_reply {
AUX_TRANSACTION_REPLY_AUX_ACK = 0x00,
AUX_TRANSACTION_REPLY_AUX_NACK = 0x01,
AUX_TRANSACTION_REPLY_AUX_DEFER = 0x02,
+ AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK = 0x04,
+ AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER = 0x08,
AUX_TRANSACTION_REPLY_I2C_ACK = 0x00,
AUX_TRANSACTION_REPLY_I2C_NACK = 0x10,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index d4eab33c453b..11c68a399267 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -94,6 +94,8 @@ struct dc_link_settings {
enum dc_lane_count lane_count;
enum dc_link_rate link_rate;
enum dc_link_spread link_spread;
+ bool use_link_rate_set;
+ uint8_t link_rate_set;
};
struct dc_lane_settings {
@@ -420,10 +422,24 @@ union edp_configuration_cap {
uint8_t raw;
};
+union dprx_feature {
+ struct {
+ uint8_t GTC_CAP:1; // bit 0: DP 1.3+
+ uint8_t SST_SPLIT_SDP_CAP:1; // bit 1: DP 1.4
+ uint8_t AV_SYNC_CAP:1; // bit 2: DP 1.3+
+ uint8_t VSC_SDP_COLORIMETRY_SUPPORTED:1; // bit 3: DP 1.3+
+ uint8_t VSC_EXT_VESA_SDP_SUPPORTED:1; // bit 4: DP 1.4
+ uint8_t VSC_EXT_VESA_SDP_CHAINING_SUPPORTED:1; // bit 5: DP 1.4
+ uint8_t VSC_EXT_CEA_SDP_SUPPORTED:1; // bit 6: DP 1.4
+ uint8_t VSC_EXT_CEA_SDP_CHAINING_SUPPORTED:1; // bit 7: DP 1.4
+ } bits;
+ uint8_t raw;
+};
+
union training_aux_rd_interval {
struct {
uint8_t TRAINIG_AUX_RD_INTERVAL:7;
- uint8_t EXT_RECIEVER_CAP_FIELD_PRESENT:1;
+ uint8_t EXT_RECEIVER_CAP_FIELD_PRESENT:1;
} bits;
uint8_t raw;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c
index 597d38393379..5e6c5eff49cf 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c
@@ -51,20 +51,16 @@ static inline void set_reg_field_value_masks(
field_value_mask->mask = field_value_mask->mask | mask;
}
-uint32_t generic_reg_update_ex(const struct dc_context *ctx,
- uint32_t addr, uint32_t reg_val, int n,
+static void set_reg_field_values(struct dc_reg_value_masks *field_value_mask,
+ uint32_t addr, int n,
uint8_t shift1, uint32_t mask1, uint32_t field_value1,
- ...)
+ va_list ap)
{
- struct dc_reg_value_masks field_value_mask = {0};
uint32_t shift, mask, field_value;
int i = 1;
- va_list ap;
- va_start(ap, field_value1);
-
/* gather all bits value/mask getting updated in this register */
- set_reg_field_value_masks(&field_value_mask,
+ set_reg_field_value_masks(field_value_mask,
field_value1, mask1, shift1);
while (i < n) {
@@ -72,10 +68,48 @@ uint32_t generic_reg_update_ex(const struct dc_context *ctx,
mask = va_arg(ap, uint32_t);
field_value = va_arg(ap, uint32_t);
- set_reg_field_value_masks(&field_value_mask,
+ set_reg_field_value_masks(field_value_mask,
field_value, mask, shift);
i++;
}
+}
+
+uint32_t generic_reg_update_ex(const struct dc_context *ctx,
+ uint32_t addr, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t field_value1,
+ ...)
+{
+ struct dc_reg_value_masks field_value_mask = {0};
+ uint32_t reg_val;
+ va_list ap;
+
+ va_start(ap, field_value1);
+
+ set_reg_field_values(&field_value_mask, addr, n, shift1, mask1,
+ field_value1, ap);
+
+ va_end(ap);
+
+ /* mmio write directly */
+ reg_val = dm_read_reg(ctx, addr);
+ reg_val = (reg_val & ~field_value_mask.mask) | field_value_mask.value;
+ dm_write_reg(ctx, addr, reg_val);
+ return reg_val;
+}
+
+uint32_t generic_reg_set_ex(const struct dc_context *ctx,
+ uint32_t addr, uint32_t reg_val, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t field_value1,
+ ...)
+{
+ struct dc_reg_value_masks field_value_mask = {0};
+ va_list ap;
+
+ va_start(ap, field_value1);
+
+ set_reg_field_values(&field_value_mask, addr, n, shift1, mask1,
+ field_value1, ap);
+
va_end(ap);
@@ -85,6 +119,24 @@ uint32_t generic_reg_update_ex(const struct dc_context *ctx,
return reg_val;
}
+uint32_t dm_read_reg_func(
+ const struct dc_context *ctx,
+ uint32_t address,
+ const char *func_name)
+{
+ uint32_t value;
+#ifdef DM_CHECK_ADDR_0
+ if (address == 0) {
+ DC_ERR("invalid register read; address = 0\n");
+ return 0;
+ }
+#endif
+ value = cgs_read_register(ctx->cgs_device, address);
+ trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
+
+ return value;
+}
+
uint32_t generic_reg_get(const struct dc_context *ctx, uint32_t addr,
uint8_t shift, uint32_t mask, uint32_t *field_value)
{
@@ -235,7 +287,7 @@ uint32_t generic_reg_get(const struct dc_context *ctx,
}
*/
-uint32_t generic_reg_wait(const struct dc_context *ctx,
+void generic_reg_wait(const struct dc_context *ctx,
uint32_t addr, uint32_t shift, uint32_t mask, uint32_t condition_value,
unsigned int delay_between_poll_us, unsigned int time_out_num_tries,
const char *func_name, int line)
@@ -265,7 +317,7 @@ uint32_t generic_reg_wait(const struct dc_context *ctx,
DC_LOG_DC("REG_WAIT taking a while: %dms in %s line:%d\n",
delay_between_poll_us * i / 1000,
func_name, line);
- return reg_val;
+ return;
}
}
@@ -275,8 +327,6 @@ uint32_t generic_reg_wait(const struct dc_context *ctx,
if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
BREAK_TO_DEBUGGER();
-
- return reg_val;
}
void generic_write_indirect_reg(const struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 8fc223defed4..7b9429e30d82 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -120,6 +120,7 @@ struct dc_link {
/* MST record stream using this link */
struct link_flags {
bool dp_keep_receiver_powered;
+ bool dp_skip_DID2;
} wa_flags;
struct link_mst_stream_allocation_table mst_stream_alloc_table;
@@ -246,10 +247,18 @@ void dc_link_set_test_pattern(struct dc_link *link,
const struct link_training_settings *p_link_settings,
const unsigned char *p_custom_pattern,
unsigned int cust_pattern_size);
+uint32_t dc_link_bandwidth_kbps(
+ const struct dc_link *link,
+ const struct dc_link_settings *link_setting);
+
+const struct dc_link_settings *dc_link_get_link_cap(
+ const struct dc_link *link);
bool dc_submit_i2c(
struct dc *dc,
uint32_t link_index,
struct i2c_command *cmd);
+uint32_t dc_bandwidth_in_kbps_from_timing(
+ const struct dc_crtc_timing *timing);
#endif /* DC_LINK_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 5657cb3a2ad3..189bdab929a5 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -80,6 +80,7 @@ struct dc_stream_state {
struct dc_info_packet vrr_infopacket;
struct dc_info_packet vsc_infopacket;
struct dc_info_packet vsp_infopacket;
+ struct dc_info_packet dpsdp_infopacket;
struct rect src; /* composition area */
struct rect dst; /* stream addressable area */
@@ -221,6 +222,13 @@ struct dc_stream_state *dc_get_stream_at_index(struct dc *dc, uint8_t i);
*/
uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream);
+/*
+ * Send dp sdp message.
+ */
+bool dc_stream_send_dp_sdp(const struct dc_stream_state *stream,
+ const uint8_t *custom_sdp_message,
+ unsigned int sdp_message_size);
+
/* TODO: Return parsed values rather than direct register read
* This has a dependency on the caller (amdgpu_display_get_crtc_scanoutpos)
* being refactored properly to be dce-specific
@@ -299,6 +307,8 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
*/
struct dc_stream_state *dc_create_stream_for_sink(struct dc_sink *dc_sink);
+struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream);
+
void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink);
void dc_stream_retain(struct dc_stream_state *dc_stream);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index da2009a108cf..6c2a3d9a4c2e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -103,7 +103,7 @@ struct dc_context {
};
-#define DC_MAX_EDID_BUFFER_SIZE 512
+#define DC_MAX_EDID_BUFFER_SIZE 1024
#define EDID_BLOCK_SIZE 128
#define MAX_SURFACE_NUM 4
#define NUM_PIXEL_FORMATS 10
@@ -395,7 +395,7 @@ struct dc_dongle_caps {
bool is_dp_hdmi_ycbcr422_converter;
bool is_dp_hdmi_ycbcr420_converter;
uint32_t dp_hdmi_max_bpc;
- uint32_t dp_hdmi_max_pixel_clk;
+ uint32_t dp_hdmi_max_pixel_clk_in_khz;
};
/* Scaling format */
enum scaling_transformation {
@@ -550,9 +550,9 @@ struct psr_config {
unsigned char psr_version;
unsigned int psr_rfb_setup_time;
bool psr_exit_link_training_required;
-
bool psr_frame_capture_indication_req;
unsigned int psr_sdp_transmit_line_num_deadline;
+ bool allow_smu_optimizations;
};
union dmcu_psr_level {
@@ -654,6 +654,7 @@ struct psr_context {
* continuing powerd own
*/
unsigned int frame_delay;
+ bool allow_smu_optimizations;
};
struct colorspace_transform {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 4fe3664fb495..bd33c47183fc 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -171,24 +171,24 @@ static void submit_channel_request(
(request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT)));
if (REG(AUXN_IMPCAL)) {
/* clear_aux_error */
- REG_UPDATE_SEQ(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK,
- 1,
- 0);
+ REG_UPDATE_SEQ_2(AUXN_IMPCAL,
+ AUXN_CALOUT_ERROR_AK, 1,
+ AUXN_CALOUT_ERROR_AK, 0);
- REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK,
- 1,
- 0);
+ REG_UPDATE_SEQ_2(AUXP_IMPCAL,
+ AUXP_CALOUT_ERROR_AK, 1,
+ AUXP_CALOUT_ERROR_AK, 0);
/* force_default_calibrate */
- REG_UPDATE_1BY1_2(AUXN_IMPCAL,
+ REG_UPDATE_SEQ_2(AUXN_IMPCAL,
AUXN_IMPCAL_ENABLE, 1,
AUXN_IMPCAL_OVERRIDE_ENABLE, 0);
/* bug? why AUXN update EN and OVERRIDE_EN 1 by 1 while AUX P toggles OVERRIDE? */
- REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE,
- 1,
- 0);
+ REG_UPDATE_SEQ_2(AUXP_IMPCAL,
+ AUXP_IMPCAL_OVERRIDE_ENABLE, 1,
+ AUXP_IMPCAL_OVERRIDE_ENABLE, 0);
}
REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
@@ -270,7 +270,7 @@ static int read_channel_reply(struct dce_aux *engine, uint32_t size,
if (!bytes_replied)
return -1;
- REG_UPDATE_1BY1_3(AUX_SW_DATA,
+ REG_UPDATE_SEQ_3(AUX_SW_DATA,
AUX_SW_INDEX, 0,
AUX_SW_AUTOINCREMENT_DISABLE, 1,
AUX_SW_DATA_RW, 1);
@@ -320,9 +320,10 @@ static enum aux_channel_operation_result get_channel_status(
*returned_bytes = 0;
/* poll to make sure that SW_DONE is asserted */
- value = REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
+ REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
10, aux110->timeout_period/10);
+ value = REG_READ(AUX_SW_STATUS);
/* in case HPD is LOW, exit AUX transaction */
if ((value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
return AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
@@ -377,7 +378,6 @@ static bool acquire(
struct dce_aux *engine,
struct ddc *ddc)
{
-
enum gpio_result result;
if (!is_engine_available(engine))
@@ -442,12 +442,12 @@ static enum i2caux_transaction_action i2caux_action_from_payload(struct aux_payl
return I2CAUX_TRANSACTION_ACTION_DP_READ;
}
-int dce_aux_transfer(struct ddc_service *ddc,
- struct aux_payload *payload)
+int dce_aux_transfer_raw(struct ddc_service *ddc,
+ struct aux_payload *payload,
+ enum aux_channel_operation_result *operation_result)
{
struct ddc *ddc_pin = ddc->ddc_pin;
struct dce_aux *aux_engine;
- enum aux_channel_operation_result operation_result;
struct aux_request_transaction_data aux_req;
struct aux_reply_transaction_data aux_rep;
uint8_t returned_bytes = 0;
@@ -458,7 +458,8 @@ int dce_aux_transfer(struct ddc_service *ddc,
memset(&aux_rep, 0, sizeof(aux_rep));
aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
- acquire(aux_engine, ddc_pin);
+ if (!acquire(aux_engine, ddc_pin))
+ return -1;
if (payload->i2c_over_aux)
aux_req.type = AUX_TRANSACTION_TYPE_I2C;
@@ -473,28 +474,26 @@ int dce_aux_transfer(struct ddc_service *ddc,
aux_req.data = payload->data;
submit_channel_request(aux_engine, &aux_req);
- operation_result = get_channel_status(aux_engine, &returned_bytes);
-
- switch (operation_result) {
- case AUX_CHANNEL_OPERATION_SUCCEEDED:
- res = read_channel_reply(aux_engine, payload->length,
- payload->data, payload->reply,
- &status);
- break;
- case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
- res = 0;
- break;
- case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
- case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
- case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+ *operation_result = get_channel_status(aux_engine, &returned_bytes);
+
+ if (*operation_result == AUX_CHANNEL_OPERATION_SUCCEEDED) {
+ read_channel_reply(aux_engine, payload->length,
+ payload->data, payload->reply,
+ &status);
+ res = returned_bytes;
+ } else {
res = -1;
- break;
}
+
release_engine(aux_engine);
return res;
}
-#define AUX_RETRY_MAX 7
+#define AUX_MAX_RETRIES 7
+#define AUX_MAX_DEFER_RETRIES 7
+#define AUX_MAX_I2C_DEFER_RETRIES 7
+#define AUX_MAX_INVALID_REPLY_RETRIES 2
+#define AUX_MAX_TIMEOUT_RETRIES 3
bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
struct aux_payload *payload)
@@ -502,24 +501,85 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
int i, ret = 0;
uint8_t reply;
bool payload_reply = true;
+ enum aux_channel_operation_result operation_result;
+ int aux_ack_retries = 0,
+ aux_defer_retries = 0,
+ aux_i2c_defer_retries = 0,
+ aux_timeout_retries = 0,
+ aux_invalid_reply_retries = 0;
if (!payload->reply) {
payload_reply = false;
payload->reply = &reply;
}
- for (i = 0; i < AUX_RETRY_MAX; i++) {
- ret = dce_aux_transfer(ddc, payload);
-
- if (ret >= 0) {
- if (*payload->reply == 0) {
- if (!payload_reply)
- payload->reply = NULL;
- return true;
+ for (i = 0; i < AUX_MAX_RETRIES; i++) {
+ ret = dce_aux_transfer_raw(ddc, payload, &operation_result);
+ switch (operation_result) {
+ case AUX_CHANNEL_OPERATION_SUCCEEDED:
+ aux_timeout_retries = 0;
+ aux_invalid_reply_retries = 0;
+
+ switch (*payload->reply) {
+ case AUX_TRANSACTION_REPLY_AUX_ACK:
+ if (!payload->write && payload->length != ret) {
+ if (++aux_ack_retries >= AUX_MAX_RETRIES)
+ goto fail;
+ else
+ udelay(300);
+ } else
+ return true;
+ break;
+
+ case AUX_TRANSACTION_REPLY_AUX_DEFER:
+ case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
+ case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER:
+ if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES)
+ goto fail;
+ break;
+
+ case AUX_TRANSACTION_REPLY_I2C_DEFER:
+ aux_defer_retries = 0;
+ if (++aux_i2c_defer_retries >= AUX_MAX_I2C_DEFER_RETRIES)
+ goto fail;
+ break;
+
+ case AUX_TRANSACTION_REPLY_AUX_NACK:
+ case AUX_TRANSACTION_REPLY_HPD_DISCON:
+ default:
+ goto fail;
}
- }
+ break;
+
+ case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+ if (++aux_invalid_reply_retries >= AUX_MAX_INVALID_REPLY_RETRIES)
+ goto fail;
+ else
+ udelay(400);
+ break;
+
+ case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+ if (++aux_timeout_retries >= AUX_MAX_TIMEOUT_RETRIES)
+ goto fail;
+ else {
+ /*
+ * DP 1.4, 2.8.2: AUX Transaction Response/Reply Timeouts
+ * According to the DP spec there should be 3 retries total
+ * with a 400us wait inbetween each. Hardware already waits
+ * for 550us therefore no wait is required here.
+ */
+ }
+ break;
- udelay(1000);
+ case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+ case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
+ default:
+ goto fail;
+ }
}
+
+fail:
+ if (!payload_reply)
+ payload->reply = NULL;
return false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
index e28ed6a00ff4..ce6a26d189b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
@@ -123,8 +123,9 @@ bool dce110_aux_engine_acquire(
struct dce_aux *aux_engine,
struct ddc *ddc);
-int dce_aux_transfer(struct ddc_service *ddc,
- struct aux_payload *cmd);
+int dce_aux_transfer_raw(struct ddc_service *ddc,
+ struct aux_payload *cmd,
+ enum aux_channel_operation_result *operation_result);
bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
struct aux_payload *cmd);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
index 6e142c2db986..963686380738 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c
@@ -222,7 +222,7 @@ static enum dm_pp_clocks_state dce_get_required_clocks_state(
* all required clocks
*/
for (i = clk_mgr_dce->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
- if (context->bw.dce.dispclk_khz >
+ if (context->bw_ctx.bw.dce.dispclk_khz >
clk_mgr_dce->max_clks_by_state[i].display_clk_khz
|| max_pix_clk >
clk_mgr_dce->max_clks_by_state[i].pixel_clk_khz)
@@ -232,7 +232,7 @@ static enum dm_pp_clocks_state dce_get_required_clocks_state(
if (low_req_clk > clk_mgr_dce->max_clks_state) {
/* set max clock state for high phyclock, invalid on exceeding display clock */
if (clk_mgr_dce->max_clks_by_state[clk_mgr_dce->max_clks_state].display_clk_khz
- < context->bw.dce.dispclk_khz)
+ < context->bw_ctx.bw.dce.dispclk_khz)
low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
else
low_req_clk = clk_mgr_dce->max_clks_state;
@@ -610,22 +610,22 @@ static void dce11_pplib_apply_display_requirements(
struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
pp_display_cfg->all_displays_in_sync =
- context->bw.dce.all_displays_in_sync;
+ context->bw_ctx.bw.dce.all_displays_in_sync;
pp_display_cfg->nb_pstate_switch_disable =
- context->bw.dce.nbp_state_change_enable == false;
+ context->bw_ctx.bw.dce.nbp_state_change_enable == false;
pp_display_cfg->cpu_cc6_disable =
- context->bw.dce.cpuc_state_change_enable == false;
+ context->bw_ctx.bw.dce.cpuc_state_change_enable == false;
pp_display_cfg->cpu_pstate_disable =
- context->bw.dce.cpup_state_change_enable == false;
+ context->bw_ctx.bw.dce.cpup_state_change_enable == false;
pp_display_cfg->cpu_pstate_separation_time =
- context->bw.dce.blackout_recovery_time_us;
+ context->bw_ctx.bw.dce.blackout_recovery_time_us;
- pp_display_cfg->min_memory_clock_khz = context->bw.dce.yclk_khz
+ pp_display_cfg->min_memory_clock_khz = context->bw_ctx.bw.dce.yclk_khz
/ MEMORY_TYPE_MULTIPLIER_CZ;
pp_display_cfg->min_engine_clock_khz = determine_sclk_from_bounding_box(
dc,
- context->bw.dce.sclk_khz);
+ context->bw_ctx.bw.dce.sclk_khz);
/*
* As workaround for >4x4K lightup set dcfclock to min_engine_clock value.
@@ -638,7 +638,7 @@ static void dce11_pplib_apply_display_requirements(
pp_display_cfg->min_engine_clock_khz : 0;
pp_display_cfg->min_engine_clock_deep_sleep_khz
- = context->bw.dce.sclk_deep_sleep_khz;
+ = context->bw_ctx.bw.dce.sclk_deep_sleep_khz;
pp_display_cfg->avail_mclk_switch_time_us =
dce110_get_min_vblank_time_us(context);
@@ -669,7 +669,7 @@ static void dce_update_clocks(struct clk_mgr *clk_mgr,
{
struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
struct dm_pp_power_level_change_request level_change_req;
- int patched_disp_clk = context->bw.dce.dispclk_khz;
+ int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
/*TODO: W/A for dal3 linux, investigate why this works */
if (!clk_mgr_dce->dfs_bypass_active)
@@ -696,7 +696,7 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr,
{
struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
struct dm_pp_power_level_change_request level_change_req;
- int patched_disp_clk = context->bw.dce.dispclk_khz;
+ int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
/*TODO: W/A for dal3 linux, investigate why this works */
if (!clk_mgr_dce->dfs_bypass_active)
@@ -711,7 +711,7 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr,
}
if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) {
- context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, patched_disp_clk);
+ context->bw_ctx.bw.dce.dispclk_khz = dce_set_clock(clk_mgr, patched_disp_clk);
clk_mgr->clks.dispclk_khz = patched_disp_clk;
}
dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context);
@@ -723,7 +723,7 @@ static void dce112_update_clocks(struct clk_mgr *clk_mgr,
{
struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
struct dm_pp_power_level_change_request level_change_req;
- int patched_disp_clk = context->bw.dce.dispclk_khz;
+ int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
/*TODO: W/A for dal3 linux, investigate why this works */
if (!clk_mgr_dce->dfs_bypass_active)
@@ -751,7 +751,7 @@ static void dce12_update_clocks(struct clk_mgr *clk_mgr,
struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr);
struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
int max_pix_clk = get_max_pixel_clock_for_all_paths(context);
- int patched_disp_clk = context->bw.dce.dispclk_khz;
+ int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz;
/*TODO: W/A for dal3 linux, investigate why this works */
if (!clk_mgr_dce->dfs_bypass_active)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 71d5777de961..f70437aae8e0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -978,7 +978,7 @@ static bool dce110_clock_source_power_down(
}
static bool get_pixel_clk_frequency_100hz(
- struct clock_source *clock_source,
+ const struct clock_source *clock_source,
unsigned int inst,
unsigned int *pixel_clk_khz)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index c2926cf19dee..818536eea00a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -51,6 +51,9 @@
#define PSR_SET_WAITLOOP 0x31
#define MCP_INIT_DMCU 0x88
#define MCP_INIT_IRAM 0x89
+#define MCP_SYNC_PHY_LOCK 0x90
+#define MCP_SYNC_PHY_UNLOCK 0x91
+#define MCP_BL_SET_PWM_FRAC 0x6A /* Enable or disable Fractional PWM */
#define MASTER_COMM_CNTL_REG__MASTER_COMM_INTERRUPT_MASK 0x00000001L
static bool dce_dmcu_init(struct dmcu *dmcu)
@@ -213,9 +216,6 @@ static bool dce_dmcu_setup_psr(struct dmcu *dmcu,
link->link_enc->funcs->psr_program_secondary_packet(link->link_enc,
psr_context->sdpTransmitLineNumDeadline);
- if (psr_context->psr_level.bits.SKIP_SMU_NOTIFICATION)
- REG_UPDATE(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, 1);
-
/* waitDMCUReadyForCmd */
REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0,
dmcu_wait_reg_ready_interval,
@@ -342,9 +342,32 @@ static void dcn10_get_dmcu_version(struct dmcu *dmcu)
IRAM_RD_ADDR_AUTO_INC, 0);
}
+static void dcn10_dmcu_enable_fractional_pwm(struct dmcu *dmcu,
+ uint32_t fractional_pwm)
+{
+ struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+
+ /* Wait until microcontroller is ready to process interrupt */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800);
+
+ /* Set PWM fractional enable/disable */
+ REG_WRITE(MASTER_COMM_DATA_REG1, fractional_pwm);
+
+ /* Set command to enable or disable fractional PWM microcontroller */
+ REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
+ MCP_BL_SET_PWM_FRAC);
+
+ /* Notify microcontroller of new command */
+ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ /* Ensure command has been executed before continuing */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 100, 800);
+}
+
static bool dcn10_dmcu_init(struct dmcu *dmcu)
{
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
+ const struct dc_config *config = &dmcu->ctx->dc->config;
bool status = false;
/* Definition of DC_DMCU_SCRATCH
@@ -382,9 +405,14 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)
if (dmcu->dmcu_state == DMCU_RUNNING) {
/* Retrieve and cache the DMCU firmware version. */
dcn10_get_dmcu_version(dmcu);
+
+ /* Initialize DMCU to use fractional PWM or not */
+ dcn10_dmcu_enable_fractional_pwm(dmcu,
+ (config->disable_fractional_pwm == false) ? 1 : 0);
status = true;
- } else
+ } else {
status = false;
+ }
break;
case DMCU_RUNNING:
@@ -594,7 +622,7 @@ static bool dcn10_dmcu_setup_psr(struct dmcu *dmcu,
link->link_enc->funcs->psr_program_secondary_packet(link->link_enc,
psr_context->sdpTransmitLineNumDeadline);
- if (psr_context->psr_level.bits.SKIP_SMU_NOTIFICATION)
+ if (psr_context->allow_smu_optimizations)
REG_UPDATE(SMU_INTERRUPT_CONTROL, DC_SMU_INT_ENABLE, 1);
/* waitDMCUReadyForCmd */
@@ -615,6 +643,7 @@ static bool dcn10_dmcu_setup_psr(struct dmcu *dmcu,
psr_context->psrFrameCaptureIndicationReq;
masterCmdData1.bits.aux_chan = psr_context->channel;
masterCmdData1.bits.aux_repeat = psr_context->aux_repeats;
+ masterCmdData1.bits.allow_smu_optimizations = psr_context->allow_smu_optimizations;
dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG1),
masterCmdData1.u32All);
@@ -635,6 +664,7 @@ static bool dcn10_dmcu_setup_psr(struct dmcu *dmcu,
dm_write_reg(dmcu->ctx, REG(MASTER_COMM_DATA_REG3),
masterCmdData3.u32All);
+
/* setDMCUParam_Cmd */
REG_UPDATE(MASTER_COMM_CMD_REG,
MASTER_COMM_CMD_REG_BYTE0, PSR_SET);
@@ -691,7 +721,7 @@ static bool dcn10_is_dmcu_initialized(struct dmcu *dmcu)
return true;
}
-#endif
+#endif //(CONFIG_DRM_AMD_DC_DCN1_0)
static const struct dmcu_funcs dce_funcs = {
.dmcu_init = dce_dmcu_init,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
index c24c0e5ea44e..60ce56f60ae3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.h
@@ -199,16 +199,16 @@ struct dce_dmcu {
******************************************************************/
union dce_dmcu_psr_config_data_reg1 {
struct {
- unsigned int timehyst_frames:8; /*[7:0]*/
- unsigned int hyst_lines:7; /*[14:8]*/
- unsigned int rfb_update_auto_en:1; /*[15:15]*/
- unsigned int dp_port_num:3; /*[18:16]*/
- unsigned int dcp_sel:3; /*[21:19]*/
- unsigned int phy_type:1; /*[22:22]*/
- unsigned int frame_cap_ind:1; /*[23:23]*/
- unsigned int aux_chan:3; /*[26:24]*/
- unsigned int aux_repeat:4; /*[30:27]*/
- unsigned int reserved:1; /*[31:31]*/
+ unsigned int timehyst_frames:8; /*[7:0]*/
+ unsigned int hyst_lines:7; /*[14:8]*/
+ unsigned int rfb_update_auto_en:1; /*[15:15]*/
+ unsigned int dp_port_num:3; /*[18:16]*/
+ unsigned int dcp_sel:3; /*[21:19]*/
+ unsigned int phy_type:1; /*[22:22]*/
+ unsigned int frame_cap_ind:1; /*[23:23]*/
+ unsigned int aux_chan:3; /*[26:24]*/
+ unsigned int aux_repeat:4; /*[30:27]*/
+ unsigned int allow_smu_optimizations:1; /*[31:31]*/
} bits;
unsigned int u32All;
};
@@ -236,7 +236,7 @@ union dce_dmcu_psr_config_data_reg3 {
struct {
unsigned int psr_level:16; /*[15:0]*/
unsigned int link_rate:4; /*[19:16]*/
- unsigned int reserved:12; /*[31:20]*/
+ unsigned int reserved:12; /*[31:20]*/
} bits;
unsigned int u32All;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index 40f2d6e0b122..cd26161bcc4d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -346,6 +346,16 @@ static void release_engine(
}
+static bool is_engine_available(struct dce_i2c_hw *dce_i2c_hw)
+{
+ unsigned int arbitrate;
+
+ REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate);
+ if (arbitrate == DC_I2C_REG_RW_CNTL_STATUS_DMCU_ONLY)
+ return false;
+ return true;
+}
+
struct dce_i2c_hw *acquire_i2c_hw_engine(
struct resource_pool *pool,
struct ddc *ddc)
@@ -368,7 +378,7 @@ struct dce_i2c_hw *acquire_i2c_hw_engine(
if (!dce_i2c_hw)
return NULL;
- if (pool->i2c_hw_buffer_in_use)
+ if (pool->i2c_hw_buffer_in_use || !is_engine_available(dce_i2c_hw))
return NULL;
do {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
index 7f19bb439665..575500755b2e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
@@ -29,7 +29,8 @@
enum dc_i2c_status {
DC_I2C_STATUS__DC_I2C_STATUS_IDLE,
DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW,
- DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW
+ DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW,
+ DC_I2C_REG_RW_CNTL_STATUS_DMCU_ONLY = 2,
};
enum dc_i2c_arbitration {
@@ -129,7 +130,8 @@ enum {
I2C_SF(DC_I2C_DATA, DC_I2C_DATA, mask_sh),\
I2C_SF(DC_I2C_DATA, DC_I2C_INDEX, mask_sh),\
I2C_SF(DC_I2C_DATA, DC_I2C_INDEX_WRITE, mask_sh),\
- I2C_SF(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, mask_sh)
+ I2C_SF(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, mask_sh),\
+ I2C_SF(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, mask_sh)
#define I2C_COMMON_MASK_SH_LIST_DCE110(mask_sh)\
I2C_COMMON_MASK_SH_LIST_DCE_COMMON_BASE(mask_sh),\
@@ -170,6 +172,7 @@ struct dce_i2c_shift {
uint8_t DC_I2C_INDEX;
uint8_t DC_I2C_INDEX_WRITE;
uint8_t XTAL_REF_DIV;
+ uint8_t DC_I2C_REG_RW_CNTL_STATUS;
};
struct dce_i2c_mask {
@@ -207,6 +210,7 @@ struct dce_i2c_mask {
uint32_t DC_I2C_INDEX;
uint32_t DC_I2C_INDEX_WRITE;
uint32_t XTAL_REF_DIV;
+ uint32_t DC_I2C_REG_RW_CNTL_STATUS;
};
struct dce_i2c_registers {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 1fa2d4fd7a35..14309fe6f2e6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -272,7 +272,8 @@ static void dce110_update_hdmi_info_packet(
static void dce110_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
- enum dc_color_space output_color_space)
+ enum dc_color_space output_color_space,
+ uint32_t enable_sdp_splitting)
{
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
uint32_t h_active_start;
@@ -977,7 +978,7 @@ static void dce110_stream_encoder_dp_unblank(
uint64_t m_vid_l = n_vid;
- m_vid_l *= param->pixel_clk_khz;
+ m_vid_l *= param->timing.pix_clk_100hz / 10;
m_vid_l = div_u64(m_vid_l,
param->link_settings.link_rate
* LINK_RATE_REF_FREQ_IN_KHZ);
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 23044e6723e8..e938bf9986d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -378,6 +378,28 @@ static const struct resource_caps res_cap = {
.num_ddc = 6,
};
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCE_RGB,
+
+ .pixel_format_support = {
+ .argb8888 = true,
+ .nv12 = false,
+ .fp16 = false
+ },
+
+ .max_upscale_factor = {
+ .argb8888 = 16000,
+ .nv12 = 1,
+ .fp16 = 1
+ },
+
+ .max_downscale_factor = {
+ .argb8888 = 250,
+ .nv12 = 1,
+ .fp16 = 1
+ }
+};
+
#define CTX ctx
#define REG(reg) mm ## reg
@@ -756,7 +778,8 @@ static enum dc_status build_mapped_resource(
bool dce100_validate_bandwidth(
struct dc *dc,
- struct dc_state *context)
+ struct dc_state *context,
+ bool fast_validate)
{
int i;
bool at_least_one_pipe = false;
@@ -768,11 +791,11 @@ bool dce100_validate_bandwidth(
if (at_least_one_pipe) {
/* TODO implement when needed but for now hardcode max value*/
- context->bw.dce.dispclk_khz = 681000;
- context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
+ context->bw_ctx.bw.dce.dispclk_khz = 681000;
+ context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
} else {
- context->bw.dce.dispclk_khz = 0;
- context->bw.dce.yclk_khz = 0;
+ context->bw_ctx.bw.dce.dispclk_khz = 0;
+ context->bw_ctx.bw.dce.yclk_khz = 0;
}
return true;
@@ -1023,6 +1046,9 @@ static bool construct(
dc->caps.max_planes = pool->base.pipe_count;
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
if (!resource_construct(num_virtual_links, dc, &pool->base,
&res_create_funcs))
goto res_create_fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 5e4db3712eef..7ac50ab1b762 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -616,7 +616,7 @@ dce110_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
void dce110_update_info_frame(struct pipe_ctx *pipe_ctx)
{
- bool is_hdmi;
+ bool is_hdmi_tmds;
bool is_dp;
ASSERT(pipe_ctx->stream);
@@ -624,13 +624,13 @@ void dce110_update_info_frame(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream_res.stream_enc == NULL)
return; /* this is not root pipe */
- is_hdmi = dc_is_hdmi_signal(pipe_ctx->stream->signal);
+ is_hdmi_tmds = dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal);
is_dp = dc_is_dp_signal(pipe_ctx->stream->signal);
- if (!is_hdmi && !is_dp)
+ if (!is_hdmi_tmds && !is_dp)
return;
- if (is_hdmi)
+ if (is_hdmi_tmds)
pipe_ctx->stream_res.stream_enc->funcs->update_hdmi_info_packets(
pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream_res.encoder_info_frame);
@@ -935,13 +935,31 @@ void hwss_edp_backlight_control(
edp_receiver_ready_T9(link);
}
+// Static helper function which calls the correct function
+// based on pp_smu version
+static void set_pme_wa_enable_by_version(struct dc *dc)
+{
+ struct pp_smu_funcs *pp_smu = NULL;
+
+ if (dc->res_pool->pp_smu)
+ pp_smu = dc->res_pool->pp_smu;
+
+ if (pp_smu) {
+ if (pp_smu->ctx.ver == PP_SMU_VER_RV && pp_smu->rv_funcs.set_pme_wa_enable)
+ pp_smu->rv_funcs.set_pme_wa_enable(&(pp_smu->ctx));
+ }
+}
+
void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
{
- struct dc *core_dc = pipe_ctx->stream->ctx->dc;
/* notify audio driver for audio modes of monitor */
- struct pp_smu_funcs_rv *pp_smu = core_dc->res_pool->pp_smu;
+ struct dc *core_dc = pipe_ctx->stream->ctx->dc;
+ struct pp_smu_funcs *pp_smu = NULL;
unsigned int i, num_audio = 1;
+ if (core_dc->res_pool->pp_smu)
+ pp_smu = core_dc->res_pool->pp_smu;
+
if (pipe_ctx->stream_res.audio) {
for (i = 0; i < MAX_PIPES; i++) {
/*current_state not updated yet*/
@@ -951,30 +969,31 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
- if (num_audio >= 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
+ if (num_audio >= 1 && pp_smu != NULL)
/*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
- pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
+ set_pme_wa_enable_by_version(core_dc);
/* un-mute audio */
/* TODO: audio should be per stream rather than per link */
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
- pipe_ctx->stream_res.stream_enc, false);
+ pipe_ctx->stream_res.stream_enc, false);
}
}
void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
{
struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct pp_smu_funcs *pp_smu = NULL;
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
pipe_ctx->stream_res.stream_enc, true);
if (pipe_ctx->stream_res.audio) {
- struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+ if (dc->res_pool->pp_smu)
+ pp_smu = dc->res_pool->pp_smu;
if (option != KEEP_ACQUIRED_RESOURCE ||
- !dc->debug.az_endpoint_mute_only) {
+ !dc->debug.az_endpoint_mute_only)
/*only disalbe az_endpoint if power down or free*/
pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
- }
if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable(
@@ -989,9 +1008,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
pipe_ctx->stream_res.audio = NULL;
}
- if (pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
+ if (pp_smu != NULL)
/*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
- pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
+ set_pme_wa_enable_by_version(dc);
/* TODO: notify audio driver for if audio modes list changed
* add audio mode list change flag */
@@ -1007,7 +1026,7 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx, int option)
struct dc_link *link = stream->link;
struct dc *dc = pipe_ctx->stream->ctx->dc;
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
pipe_ctx->stream_res.stream_enc);
@@ -1032,7 +1051,7 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link *link = stream->link;
/* only 3 items below are used by unblank */
- params.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10;
+ params.timing = pipe_ctx->stream->timing;
params.link_settings.link_rate = link_settings->link_rate;
if (dc_is_dp_signal(pipe_ctx->stream->signal))
@@ -1147,8 +1166,8 @@ static void build_audio_output(
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT ||
pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
audio_output->pll_info.dp_dto_source_clock_in_khz =
- state->dccg->funcs->get_dp_ref_clk_frequency(
- state->dccg);
+ state->clk_mgr->funcs->get_dp_ref_clk_frequency(
+ state->clk_mgr);
}
audio_output->pll_info.feed_back_divider =
@@ -1349,7 +1368,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
pipe_ctx->stream_res.tg, event_triggers);
- if (pipe_ctx->stream->signal != SIGNAL_TYPE_VIRTUAL)
+ if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dig_connect_to_otg(
pipe_ctx->stream_res.stream_enc,
pipe_ctx->stream_res.tg->inst);
@@ -1358,7 +1377,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.opp,
COLOR_SPACE_YCBCR601,
stream->timing.display_color_depth,
- pipe_ctx->stream->signal);
+ stream->signal);
pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
pipe_ctx->stream_res.opp,
@@ -1532,6 +1551,9 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
}
}
+ if (dc->hwss.init_pipes)
+ dc->hwss.init_pipes(dc, context);
+
if (edp_link) {
/* this seems to cause blank screens on DCE8 */
if ((dc->ctx->dce_version == DCE_VERSION_8_0) ||
@@ -1608,18 +1630,18 @@ static void dce110_set_displaymarks(
dc->bw_vbios->blackout_duration, pipe_ctx->stream);
pipe_ctx->plane_res.mi->funcs->mem_input_program_display_marks(
pipe_ctx->plane_res.mi,
- context->bw.dce.nbp_state_change_wm_ns[num_pipes],
- context->bw.dce.stutter_exit_wm_ns[num_pipes],
- context->bw.dce.stutter_entry_wm_ns[num_pipes],
- context->bw.dce.urgent_wm_ns[num_pipes],
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[num_pipes],
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[num_pipes],
+ context->bw_ctx.bw.dce.stutter_entry_wm_ns[num_pipes],
+ context->bw_ctx.bw.dce.urgent_wm_ns[num_pipes],
total_dest_line_time_ns);
if (i == underlay_idx) {
num_pipes++;
pipe_ctx->plane_res.mi->funcs->mem_input_program_chroma_display_marks(
pipe_ctx->plane_res.mi,
- context->bw.dce.nbp_state_change_wm_ns[num_pipes],
- context->bw.dce.stutter_exit_wm_ns[num_pipes],
- context->bw.dce.urgent_wm_ns[num_pipes],
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[num_pipes],
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[num_pipes],
+ context->bw_ctx.bw.dce.urgent_wm_ns[num_pipes],
total_dest_line_time_ns);
}
num_pipes++;
@@ -2612,7 +2634,7 @@ void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
struct mem_input *mi = pipe_ctx->plane_res.mi;
struct dc_cursor_mi_param param = {
.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
- .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
+ .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.xtalin_clock_inKhz,
.viewport = pipe_ctx->plane_res.scl_data.viewport,
.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 7549adaa1542..dcd04e9ea76b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -392,6 +392,55 @@ static const struct resource_caps stoney_resource_cap = {
.num_ddc = 3,
};
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCE_RGB,
+ .blends_with_below = true,
+ .blends_with_above = true,
+ .per_pixel_alpha = 1,
+
+ .pixel_format_support = {
+ .argb8888 = true,
+ .nv12 = false,
+ .fp16 = false
+ },
+
+ .max_upscale_factor = {
+ .argb8888 = 16000,
+ .nv12 = 1,
+ .fp16 = 1
+ },
+
+ .max_downscale_factor = {
+ .argb8888 = 250,
+ .nv12 = 1,
+ .fp16 = 1
+ }
+};
+
+static const struct dc_plane_cap underlay_plane_cap = {
+ .type = DC_PLANE_TYPE_DCE_UNDERLAY,
+ .blends_with_above = true,
+ .per_pixel_alpha = 1,
+
+ .pixel_format_support = {
+ .argb8888 = false,
+ .nv12 = true,
+ .fp16 = false
+ },
+
+ .max_upscale_factor = {
+ .argb8888 = 1,
+ .nv12 = 16000,
+ .fp16 = 1
+ },
+
+ .max_downscale_factor = {
+ .argb8888 = 1,
+ .nv12 = 250,
+ .fp16 = 1
+ }
+};
+
#define CTX ctx
#define REG(reg) mm ## reg
@@ -854,7 +903,8 @@ static enum dc_status build_mapped_resource(
static bool dce110_validate_bandwidth(
struct dc *dc,
- struct dc_state *context)
+ struct dc_state *context,
+ bool fast_validate)
{
bool result = false;
@@ -868,7 +918,7 @@ static bool dce110_validate_bandwidth(
dc->bw_vbios,
context->res_ctx.pipe_ctx,
dc->res_pool->pipe_count,
- &context->bw.dce))
+ &context->bw_ctx.bw.dce))
result = true;
if (!result)
@@ -878,8 +928,8 @@ static bool dce110_validate_bandwidth(
context->streams[0]->timing.v_addressable,
context->streams[0]->timing.pix_clk_100hz / 10);
- if (memcmp(&dc->current_state->bw.dce,
- &context->bw.dce, sizeof(context->bw.dce))) {
+ if (memcmp(&dc->current_state->bw_ctx.bw.dce,
+ &context->bw_ctx.bw.dce, sizeof(context->bw_ctx.bw.dce))) {
DC_LOG_BANDWIDTH_CALCS(
"%s: finish,\n"
@@ -893,34 +943,34 @@ static bool dce110_validate_bandwidth(
"sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n"
,
__func__,
- context->bw.dce.nbp_state_change_wm_ns[0].b_mark,
- context->bw.dce.nbp_state_change_wm_ns[0].a_mark,
- context->bw.dce.urgent_wm_ns[0].b_mark,
- context->bw.dce.urgent_wm_ns[0].a_mark,
- context->bw.dce.stutter_exit_wm_ns[0].b_mark,
- context->bw.dce.stutter_exit_wm_ns[0].a_mark,
- context->bw.dce.nbp_state_change_wm_ns[1].b_mark,
- context->bw.dce.nbp_state_change_wm_ns[1].a_mark,
- context->bw.dce.urgent_wm_ns[1].b_mark,
- context->bw.dce.urgent_wm_ns[1].a_mark,
- context->bw.dce.stutter_exit_wm_ns[1].b_mark,
- context->bw.dce.stutter_exit_wm_ns[1].a_mark,
- context->bw.dce.nbp_state_change_wm_ns[2].b_mark,
- context->bw.dce.nbp_state_change_wm_ns[2].a_mark,
- context->bw.dce.urgent_wm_ns[2].b_mark,
- context->bw.dce.urgent_wm_ns[2].a_mark,
- context->bw.dce.stutter_exit_wm_ns[2].b_mark,
- context->bw.dce.stutter_exit_wm_ns[2].a_mark,
- context->bw.dce.stutter_mode_enable,
- context->bw.dce.cpuc_state_change_enable,
- context->bw.dce.cpup_state_change_enable,
- context->bw.dce.nbp_state_change_enable,
- context->bw.dce.all_displays_in_sync,
- context->bw.dce.dispclk_khz,
- context->bw.dce.sclk_khz,
- context->bw.dce.sclk_deep_sleep_khz,
- context->bw.dce.yclk_khz,
- context->bw.dce.blackout_recovery_time_us);
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].b_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].a_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[0].b_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[0].a_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].b_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].a_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].b_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].a_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[1].b_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[1].a_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].b_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].a_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].b_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].a_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[2].b_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[2].a_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].b_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].a_mark,
+ context->bw_ctx.bw.dce.stutter_mode_enable,
+ context->bw_ctx.bw.dce.cpuc_state_change_enable,
+ context->bw_ctx.bw.dce.cpup_state_change_enable,
+ context->bw_ctx.bw.dce.nbp_state_change_enable,
+ context->bw_ctx.bw.dce.all_displays_in_sync,
+ context->bw_ctx.bw.dce.dispclk_khz,
+ context->bw_ctx.bw.dce.sclk_khz,
+ context->bw_ctx.bw.dce.sclk_deep_sleep_khz,
+ context->bw_ctx.bw.dce.yclk_khz,
+ context->bw_ctx.bw.dce.blackout_recovery_time_us);
}
return result;
}
@@ -1371,6 +1421,11 @@ static bool construct(
dc->caps.max_planes = pool->base.pipe_count;
+ for (i = 0; i < pool->base.underlay_pipe_index; ++i)
+ dc->caps.planes[i] = plane_cap;
+
+ dc->caps.planes[pool->base.underlay_pipe_index] = underlay_plane_cap;
+
bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id);
bw_calcs_data_update_from_pplib(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index ea3065d63372..a480b15f6885 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -397,6 +397,28 @@ static const struct resource_caps polaris_11_resource_cap = {
.num_ddc = 5,
};
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCE_RGB,
+
+ .pixel_format_support = {
+ .argb8888 = true,
+ .nv12 = false,
+ .fp16 = false
+ },
+
+ .max_upscale_factor = {
+ .argb8888 = 16000,
+ .nv12 = 1,
+ .fp16 = 1
+ },
+
+ .max_downscale_factor = {
+ .argb8888 = 250,
+ .nv12 = 1,
+ .fp16 = 1
+ }
+};
+
#define CTX ctx
#define REG(reg) mm ## reg
@@ -804,7 +826,8 @@ static enum dc_status build_mapped_resource(
bool dce112_validate_bandwidth(
struct dc *dc,
- struct dc_state *context)
+ struct dc_state *context,
+ bool fast_validate)
{
bool result = false;
@@ -818,7 +841,7 @@ bool dce112_validate_bandwidth(
dc->bw_vbios,
context->res_ctx.pipe_ctx,
dc->res_pool->pipe_count,
- &context->bw.dce))
+ &context->bw_ctx.bw.dce))
result = true;
if (!result)
@@ -826,8 +849,8 @@ bool dce112_validate_bandwidth(
"%s: Bandwidth validation failed!",
__func__);
- if (memcmp(&dc->current_state->bw.dce,
- &context->bw.dce, sizeof(context->bw.dce))) {
+ if (memcmp(&dc->current_state->bw_ctx.bw.dce,
+ &context->bw_ctx.bw.dce, sizeof(context->bw_ctx.bw.dce))) {
DC_LOG_BANDWIDTH_CALCS(
"%s: finish,\n"
@@ -841,34 +864,34 @@ bool dce112_validate_bandwidth(
"sclk: %d sclk_sleep: %d yclk: %d blackout_recovery_time_us: %d\n"
,
__func__,
- context->bw.dce.nbp_state_change_wm_ns[0].b_mark,
- context->bw.dce.nbp_state_change_wm_ns[0].a_mark,
- context->bw.dce.urgent_wm_ns[0].b_mark,
- context->bw.dce.urgent_wm_ns[0].a_mark,
- context->bw.dce.stutter_exit_wm_ns[0].b_mark,
- context->bw.dce.stutter_exit_wm_ns[0].a_mark,
- context->bw.dce.nbp_state_change_wm_ns[1].b_mark,
- context->bw.dce.nbp_state_change_wm_ns[1].a_mark,
- context->bw.dce.urgent_wm_ns[1].b_mark,
- context->bw.dce.urgent_wm_ns[1].a_mark,
- context->bw.dce.stutter_exit_wm_ns[1].b_mark,
- context->bw.dce.stutter_exit_wm_ns[1].a_mark,
- context->bw.dce.nbp_state_change_wm_ns[2].b_mark,
- context->bw.dce.nbp_state_change_wm_ns[2].a_mark,
- context->bw.dce.urgent_wm_ns[2].b_mark,
- context->bw.dce.urgent_wm_ns[2].a_mark,
- context->bw.dce.stutter_exit_wm_ns[2].b_mark,
- context->bw.dce.stutter_exit_wm_ns[2].a_mark,
- context->bw.dce.stutter_mode_enable,
- context->bw.dce.cpuc_state_change_enable,
- context->bw.dce.cpup_state_change_enable,
- context->bw.dce.nbp_state_change_enable,
- context->bw.dce.all_displays_in_sync,
- context->bw.dce.dispclk_khz,
- context->bw.dce.sclk_khz,
- context->bw.dce.sclk_deep_sleep_khz,
- context->bw.dce.yclk_khz,
- context->bw.dce.blackout_recovery_time_us);
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].b_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[0].a_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[0].b_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[0].a_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].b_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[0].a_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].b_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[1].a_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[1].b_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[1].a_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].b_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[1].a_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].b_mark,
+ context->bw_ctx.bw.dce.nbp_state_change_wm_ns[2].a_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[2].b_mark,
+ context->bw_ctx.bw.dce.urgent_wm_ns[2].a_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].b_mark,
+ context->bw_ctx.bw.dce.stutter_exit_wm_ns[2].a_mark,
+ context->bw_ctx.bw.dce.stutter_mode_enable,
+ context->bw_ctx.bw.dce.cpuc_state_change_enable,
+ context->bw_ctx.bw.dce.cpup_state_change_enable,
+ context->bw_ctx.bw.dce.nbp_state_change_enable,
+ context->bw_ctx.bw.dce.all_displays_in_sync,
+ context->bw_ctx.bw.dce.dispclk_khz,
+ context->bw_ctx.bw.dce.sclk_khz,
+ context->bw_ctx.bw.dce.sclk_deep_sleep_khz,
+ context->bw_ctx.bw.dce.yclk_khz,
+ context->bw_ctx.bw.dce.blackout_recovery_time_us);
}
return result;
}
@@ -887,7 +910,7 @@ enum dc_status resource_map_phy_clock_resources(
return DC_ERROR_UNEXPECTED;
if (dc_is_dp_signal(pipe_ctx->stream->signal)
- || pipe_ctx->stream->signal == SIGNAL_TYPE_VIRTUAL)
+ || dc_is_virtual_signal(pipe_ctx->stream->signal))
pipe_ctx->clock_source =
dc->res_pool->dp_clock_source;
else
@@ -1310,6 +1333,9 @@ static bool construct(
dc->caps.max_planes = pool->base.pipe_count;
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
/* Create hardware sequencer */
dce112_hw_sequencer_construct(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h
index 95a403396219..1f57ebc6f9b4 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h
@@ -44,7 +44,8 @@ enum dc_status dce112_validate_with_context(
bool dce112_validate_bandwidth(
struct dc *dc,
- struct dc_state *context);
+ struct dc_state *context,
+ bool fast_validate);
enum dc_status dce112_add_stream_to_ctx(
struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 312a0aebf91f..6d49c7143c67 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -454,6 +454,28 @@ static const struct resource_caps res_cap = {
.num_ddc = 6,
};
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCE_RGB,
+
+ .pixel_format_support = {
+ .argb8888 = true,
+ .nv12 = false,
+ .fp16 = false
+ },
+
+ .max_upscale_factor = {
+ .argb8888 = 16000,
+ .nv12 = 1,
+ .fp16 = 1
+ },
+
+ .max_downscale_factor = {
+ .argb8888 = 250,
+ .nv12 = 1,
+ .fp16 = 1
+ }
+};
+
static const struct dc_debug_options debug_defaults = {
.disable_clock_gate = true,
};
@@ -1171,6 +1193,9 @@ static bool construct(
dc->caps.max_planes = pool->base.pipe_count;
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
bw_calcs_init(dc->bw_dceip, dc->bw_vbios, dc->ctx->asic_id);
bw_calcs_data_update_from_pplib(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index c109ace96be9..27d0cc394963 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -387,6 +387,28 @@ static const struct resource_caps res_cap_83 = {
.num_ddc = 2,
};
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCE_RGB,
+
+ .pixel_format_support = {
+ .argb8888 = true,
+ .nv12 = false,
+ .fp16 = false
+ },
+
+ .max_upscale_factor = {
+ .argb8888 = 16000,
+ .nv12 = 1,
+ .fp16 = 1
+ },
+
+ .max_downscale_factor = {
+ .argb8888 = 250,
+ .nv12 = 1,
+ .fp16 = 1
+ }
+};
+
static const struct dce_dmcu_registers dmcu_regs = {
DMCU_DCE80_REG_LIST()
};
@@ -790,7 +812,8 @@ static void destruct(struct dce110_resource_pool *pool)
bool dce80_validate_bandwidth(
struct dc *dc,
- struct dc_state *context)
+ struct dc_state *context,
+ bool fast_validate)
{
int i;
bool at_least_one_pipe = false;
@@ -802,11 +825,11 @@ bool dce80_validate_bandwidth(
if (at_least_one_pipe) {
/* TODO implement when needed but for now hardcode max value*/
- context->bw.dce.dispclk_khz = 681000;
- context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
+ context->bw_ctx.bw.dce.dispclk_khz = 681000;
+ context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ;
} else {
- context->bw.dce.dispclk_khz = 0;
- context->bw.dce.yclk_khz = 0;
+ context->bw_ctx.bw.dce.dispclk_khz = 0;
+ context->bw_ctx.bw.dce.yclk_khz = 0;
}
return true;
@@ -1032,6 +1055,10 @@ static bool dce80_construct(
}
dc->caps.max_planes = pool->base.pipe_count;
+
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
dc->caps.disable_dp_clk_share = true;
if (!resource_construct(num_virtual_links, dc, &pool->base,
@@ -1237,6 +1264,10 @@ static bool dce81_construct(
}
dc->caps.max_planes = pool->base.pipe_count;
+
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
dc->caps.disable_dp_clk_share = true;
if (!resource_construct(num_virtual_links, dc, &pool->base,
@@ -1438,6 +1469,10 @@ static bool dce83_construct(
}
dc->caps.max_planes = pool->base.pipe_count;
+
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
dc->caps.disable_dp_clk_share = true;
if (!resource_construct(num_virtual_links, dc, &pool->base,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
index afe8c42211cd..2b2de1d913c9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.c
@@ -43,23 +43,6 @@
#define DC_LOGGER \
clk_mgr->ctx->logger
-void dcn1_pplib_apply_display_requirements(
- struct dc *dc,
- struct dc_state *context)
-{
- struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg;
-
- pp_display_cfg->min_engine_clock_khz = dc->res_pool->clk_mgr->clks.dcfclk_khz;
- pp_display_cfg->min_memory_clock_khz = dc->res_pool->clk_mgr->clks.fclk_khz;
- pp_display_cfg->min_engine_clock_deep_sleep_khz = dc->res_pool->clk_mgr->clks.dcfclk_deep_sleep_khz;
- pp_display_cfg->min_dcfc_deep_sleep_clock_khz = dc->res_pool->clk_mgr->clks.dcfclk_deep_sleep_khz;
- pp_display_cfg->min_dcfclock_khz = dc->res_pool->clk_mgr->clks.dcfclk_khz;
- pp_display_cfg->disp_clk_khz = dc->res_pool->clk_mgr->clks.dispclk_khz;
- dce110_fill_display_configs(context, pp_display_cfg);
-
- dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg);
-}
-
static int dcn1_determine_dppclk_threshold(struct clk_mgr *clk_mgr, struct dc_clocks *new_clocks)
{
bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
@@ -167,11 +150,8 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
{
struct dc *dc = clk_mgr->ctx->dc;
struct dc_debug_options *debug = &dc->debug;
- struct dc_clocks *new_clocks = &context->bw.dcn.clk;
- struct pp_smu_display_requirement_rv *smu_req_cur =
- &dc->res_pool->pp_smu_req;
- struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
- struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+ struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
+ struct pp_smu_funcs_rv *pp_smu = NULL;
bool send_request_to_increase = false;
bool send_request_to_lower = false;
int display_count;
@@ -179,7 +159,8 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
bool enter_display_off = false;
display_count = get_active_display_cnt(dc, context);
-
+ if (dc->res_pool->pp_smu)
+ pp_smu = &dc->res_pool->pp_smu->rv_funcs;
if (display_count == 0)
enter_display_off = true;
@@ -189,10 +170,8 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
* if function pointer not set up, this message is
* sent as part of pplib_apply_display_requirements.
*/
- if (pp_smu->set_display_count)
+ if (pp_smu && pp_smu->set_display_count)
pp_smu->set_display_count(&pp_smu->pp_smu, display_count);
-
- smu_req.display_count = display_count;
}
if (new_clocks->dispclk_khz > clk_mgr->clks.dispclk_khz
@@ -203,7 +182,6 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr->clks.phyclk_khz)) {
clk_mgr->clks.phyclk_khz = new_clocks->phyclk_khz;
-
send_request_to_lower = true;
}
@@ -213,24 +191,18 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, clk_mgr->clks.fclk_khz)) {
clk_mgr->clks.fclk_khz = new_clocks->fclk_khz;
- smu_req.hard_min_fclk_mhz = new_clocks->fclk_khz / 1000;
-
send_request_to_lower = true;
}
//DCF Clock
if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr->clks.dcfclk_khz)) {
clk_mgr->clks.dcfclk_khz = new_clocks->dcfclk_khz;
- smu_req.hard_min_dcefclk_mhz = new_clocks->dcfclk_khz / 1000;
-
send_request_to_lower = true;
}
if (should_set_clock(safe_to_lower,
new_clocks->dcfclk_deep_sleep_khz, clk_mgr->clks.dcfclk_deep_sleep_khz)) {
clk_mgr->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
- smu_req.min_deep_sleep_dcefclk_mhz = (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000;
-
send_request_to_lower = true;
}
@@ -239,17 +211,13 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
*/
if (send_request_to_increase) {
/*use dcfclk to request voltage*/
- if (pp_smu->set_hard_min_fclk_by_freq &&
+ if (pp_smu && pp_smu->set_hard_min_fclk_by_freq &&
pp_smu->set_hard_min_dcfclk_by_freq &&
pp_smu->set_min_deep_sleep_dcfclk) {
- pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_fclk_mhz);
- pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_dcefclk_mhz);
- pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, smu_req.min_deep_sleep_dcefclk_mhz);
- } else {
- if (pp_smu->set_display_requirement)
- pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
- dcn1_pplib_apply_display_requirements(dc, context);
+ pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, new_clocks->fclk_khz / 1000);
+ pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, new_clocks->dcfclk_khz / 1000);
+ pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000);
}
}
@@ -259,27 +227,20 @@ static void dcn1_update_clocks(struct clk_mgr *clk_mgr,
|| new_clocks->dispclk_khz == clk_mgr->clks.dispclk_khz) {
dcn1_ramp_up_dispclk_with_dpp(clk_mgr, new_clocks);
clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
-
send_request_to_lower = true;
}
if (!send_request_to_increase && send_request_to_lower) {
/*use dcfclk to request voltage*/
- if (pp_smu->set_hard_min_fclk_by_freq &&
+ if (pp_smu && pp_smu->set_hard_min_fclk_by_freq &&
pp_smu->set_hard_min_dcfclk_by_freq &&
pp_smu->set_min_deep_sleep_dcfclk) {
- pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_fclk_mhz);
- pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, smu_req.hard_min_dcefclk_mhz);
- pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, smu_req.min_deep_sleep_dcefclk_mhz);
- } else {
- if (pp_smu->set_display_requirement)
- pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
- dcn1_pplib_apply_display_requirements(dc, context);
+ pp_smu->set_hard_min_fclk_by_freq(&pp_smu->pp_smu, new_clocks->fclk_khz / 1000);
+ pp_smu->set_hard_min_dcfclk_by_freq(&pp_smu->pp_smu, new_clocks->dcfclk_khz / 1000);
+ pp_smu->set_min_deep_sleep_dcfclk(&pp_smu->pp_smu, (new_clocks->dcfclk_deep_sleep_khz + 999) / 1000);
}
}
-
- *smu_req_cur = smu_req;
}
static const struct clk_mgr_funcs dcn1_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
index a995eda443a3..97007cf33665 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_clk_mgr.h
@@ -34,10 +34,6 @@ struct clk_bypass {
uint32_t dprefclk_bypass;
};
-void dcn1_pplib_apply_display_requirements(
- struct dc *dc,
- struct dc_state *context);
-
struct clk_mgr *dcn1_clk_mgr_create(struct dc_context *ctx);
#endif //__DCN10_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
index cd1ebe57ed59..6f4b24756323 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
@@ -91,13 +91,6 @@ enum dscl_mode_sel {
DSCL_MODE_DSCL_BYPASS = 6
};
-enum gamut_remap_select {
- GAMUT_REMAP_BYPASS = 0,
- GAMUT_REMAP_COEFF,
- GAMUT_REMAP_COMA_COEFF,
- GAMUT_REMAP_COMB_COEFF
-};
-
void dpp_read_state(struct dpp *dpp_base,
struct dcn_dpp_state *s)
{
@@ -392,6 +385,10 @@ void dpp1_cnv_setup (
default:
break;
}
+
+ /* Set default color space based on format if none is given. */
+ color_space = input_color_space ? input_color_space : color_space;
+
REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0,
CNVC_SURFACE_PIXEL_FORMAT, pixel_format);
REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en);
@@ -403,7 +400,7 @@ void dpp1_cnv_setup (
for (i = 0; i < 12; i++)
tbl_entry.regval[i] = input_csc_color_matrix.matrix[i];
- tbl_entry.color_space = input_color_space;
+ tbl_entry.color_space = color_space;
if (color_space >= COLOR_SPACE_YCBCR601)
select = INPUT_CSC_SELECT_ICSC;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
index 41f0f4c912e7..882bcc5a40f6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_cm.c
@@ -88,13 +88,6 @@ enum dscl_mode_sel {
DSCL_MODE_DSCL_BYPASS = 6
};
-enum gamut_remap_select {
- GAMUT_REMAP_BYPASS = 0,
- GAMUT_REMAP_COEFF,
- GAMUT_REMAP_COMA_COEFF,
- GAMUT_REMAP_COMB_COEFF
-};
-
static const struct dpp_input_csc_matrix dpp_input_csc_matrix[] = {
{COLOR_SPACE_SRGB,
{0x2000, 0, 0, 0, 0, 0x2000, 0, 0, 0, 0, 0x2000, 0} },
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index c7642e748297..ce21a290bf3e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -406,15 +406,25 @@ void dpp1_dscl_calc_lb_num_partitions(
int *num_part_y,
int *num_part_c)
{
+ int lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a,
+ lb_bpc, memory_line_size_y, memory_line_size_c, memory_line_size_a;
+
int line_size = scl_data->viewport.width < scl_data->recout.width ?
scl_data->viewport.width : scl_data->recout.width;
int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ?
scl_data->viewport_c.width : scl_data->recout.width;
- int lb_bpc = dpp1_dscl_get_lb_depth_bpc(scl_data->lb_params.depth);
- int memory_line_size_y = (line_size * lb_bpc + 71) / 72; /* +71 to ceil */
- int memory_line_size_c = (line_size_c * lb_bpc + 71) / 72; /* +71 to ceil */
- int memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */
- int lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a;
+
+ if (line_size == 0)
+ line_size = 1;
+
+ if (line_size_c == 0)
+ line_size_c = 1;
+
+
+ lb_bpc = dpp1_dscl_get_lb_depth_bpc(scl_data->lb_params.depth);
+ memory_line_size_y = (line_size * lb_bpc + 71) / 72; /* +71 to ceil */
+ memory_line_size_c = (line_size_c * lb_bpc + 71) / 72; /* +71 to ceil */
+ memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */
if (lb_config == LB_MEMORY_CONFIG_1) {
lb_memory_size = 816;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index e161ad836812..0db2a6e96fc0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -258,8 +258,9 @@ void hubbub1_wm_change_req_wa(struct hubbub *hubbub)
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
- REG_UPDATE_SEQ(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
- DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0, 1);
+ REG_UPDATE_SEQ_2(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 0,
+ DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
}
void hubbub1_program_watermarks(
@@ -282,7 +283,8 @@ void hubbub1_program_watermarks(
hubbub1->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
+ DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
@@ -309,7 +311,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
@@ -322,7 +325,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.cstate_exit_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
@@ -336,7 +340,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.pstate_change_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
+ DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
@@ -347,7 +352,8 @@ void hubbub1_program_watermarks(
hubbub1->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
+ DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
@@ -374,7 +380,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
@@ -387,7 +394,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.cstate_exit_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
@@ -401,7 +409,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.pstate_change_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
+ DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
@@ -412,7 +421,8 @@ void hubbub1_program_watermarks(
hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
+ DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
@@ -439,7 +449,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
@@ -452,7 +463,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.cstate_exit_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
@@ -466,7 +478,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.pstate_change_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
+ DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
@@ -477,7 +490,8 @@ void hubbub1_program_watermarks(
hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
+ DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
@@ -504,7 +518,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
+ DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
@@ -517,7 +532,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.cstate_exit_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
+ DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
@@ -531,7 +547,8 @@ void hubbub1_program_watermarks(
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.pstate_change_ns,
refclk_mhz, 0x1fffff);
- REG_WRITE(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
+ REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
+ DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
@@ -866,6 +883,7 @@ static const struct hubbub_funcs hubbub1_funcs = {
.dcc_support_pixel_format = hubbub1_dcc_support_pixel_format,
.get_dcc_compression_cap = hubbub1_get_dcc_compression_cap,
.wm_read_state = hubbub1_wm_read_state,
+ .program_watermarks = hubbub1_program_watermarks,
};
void hubbub1_construct(struct hubbub *hubbub,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
index 9cd4a5194154..85811b24a497 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -32,18 +32,14 @@
#define TO_DCN10_HUBBUB(hubbub)\
container_of(hubbub, struct dcn10_hubbub, base)
-#define HUBHUB_REG_LIST_DCN()\
+#define HUBBUB_REG_LIST_DCN_COMMON()\
SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A),\
- SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A),\
SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A),\
SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B),\
- SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B),\
SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B),\
SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C),\
- SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C),\
SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C),\
SR(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D),\
- SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D),\
SR(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D),\
SR(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL),\
SR(DCHUBBUB_ARB_DRAM_STATE_CNTL),\
@@ -54,6 +50,12 @@
SR(DCHUBBUB_TEST_DEBUG_DATA),\
SR(DCHUBBUB_SOFT_RESET)
+#define HUBBUB_VM_REG_LIST() \
+ SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A),\
+ SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B),\
+ SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C),\
+ SR(DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D)
+
#define HUBBUB_SR_WATERMARK_REG_LIST()\
SR(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A),\
SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A),\
@@ -65,7 +67,8 @@
SR(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D)
#define HUBBUB_REG_LIST_DCN10(id)\
- HUBHUB_REG_LIST_DCN(), \
+ HUBBUB_REG_LIST_DCN_COMMON(), \
+ HUBBUB_VM_REG_LIST(), \
HUBBUB_SR_WATERMARK_REG_LIST(), \
SR(DCHUBBUB_SDPIF_FB_TOP),\
SR(DCHUBBUB_SDPIF_FB_BASE),\
@@ -122,8 +125,7 @@ struct dcn_hubbub_registers {
#define HUBBUB_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
-
-#define HUBBUB_MASK_SH_LIST_DCN(mask_sh)\
+#define HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh)\
HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, mask_sh), \
HUBBUB_SF(DCHUBBUB_SOFT_RESET, DCHUBBUB_GLOBAL_SOFT_RESET, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL, DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, mask_sh), \
@@ -133,10 +135,29 @@ struct dcn_hubbub_registers {
HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_DRAM_STATE_CNTL, DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE, mask_sh), \
HUBBUB_SF(DCHUBBUB_ARB_SAT_LEVEL, DCHUBBUB_ARB_SAT_LEVEL, mask_sh), \
- HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, mask_sh)
+ HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, mask_sh)
+
+#define HUBBUB_MASK_SH_LIST_STUTTER(mask_sh) \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, mask_sh), \
+ HUBBUB_SF(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, mask_sh)
#define HUBBUB_MASK_SH_LIST_DCN10(mask_sh)\
- HUBBUB_MASK_SH_LIST_DCN(mask_sh), \
+ HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \
+ HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \
HUBBUB_SF(DCHUBBUB_SDPIF_FB_TOP, SDPIF_FB_TOP, mask_sh), \
HUBBUB_SF(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, mask_sh), \
HUBBUB_SF(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, mask_sh), \
@@ -167,15 +188,35 @@ struct dcn_hubbub_registers {
type FB_OFFSET;\
type AGP_BOT;\
type AGP_TOP;\
- type AGP_BASE
+ type AGP_BASE;\
+ type DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A;\
+ type DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B;\
+ type DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C;\
+ type DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D;\
+ type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A;\
+ type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B;\
+ type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C;\
+ type DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D
+
+#define HUBBUB_STUTTER_REG_FIELD_LIST(type) \
+ type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A;\
+ type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B;\
+ type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C;\
+ type DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D;\
+ type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A;\
+ type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B;\
+ type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C;\
+ type DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D
struct dcn_hubbub_shift {
DCN_HUBBUB_REG_FIELD_LIST(uint8_t);
+ HUBBUB_STUTTER_REG_FIELD_LIST(uint8_t);
};
struct dcn_hubbub_mask {
DCN_HUBBUB_REG_FIELD_LIST(uint32_t);
+ HUBBUB_STUTTER_REG_FIELD_LIST(uint32_t);
};
struct dc;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 0ba68d41b9c3..54b219a710d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -1178,6 +1178,10 @@ void hubp1_vtg_sel(struct hubp *hubp, uint32_t otg_inst)
REG_UPDATE(DCHUBP_CNTL, HUBP_VTG_SEL, otg_inst);
}
+void hubp1_init(struct hubp *hubp)
+{
+ //do nothing
+}
static const struct hubp_funcs dcn10_hubp_funcs = {
.hubp_program_surface_flip_and_addr =
hubp1_program_surface_flip_and_addr,
@@ -1201,7 +1205,7 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
.hubp_clear_underflow = hubp1_clear_underflow,
.hubp_disable_control = hubp1_disable_control,
.hubp_get_underflow_status = hubp1_get_underflow_status,
-
+ .hubp_init = hubp1_init,
};
/*****************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index a6d6dfe00617..99d2b7e2a578 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -34,6 +34,7 @@
#define HUBP_REG_LIST_DCN(id)\
SRI(DCHUBP_CNTL, HUBP, id),\
SRI(HUBPREQ_DEBUG_DB, HUBP, id),\
+ SRI(HUBPREQ_DEBUG, HUBP, id),\
SRI(DCSURF_ADDR_CONFIG, HUBP, id),\
SRI(DCSURF_TILING_CONFIG, HUBP, id),\
SRI(DCSURF_SURFACE_PITCH, HUBPREQ, id),\
@@ -138,6 +139,7 @@
#define HUBP_COMMON_REG_VARIABLE_LIST \
uint32_t DCHUBP_CNTL; \
uint32_t HUBPREQ_DEBUG_DB; \
+ uint32_t HUBPREQ_DEBUG; \
uint32_t DCSURF_ADDR_CONFIG; \
uint32_t DCSURF_TILING_CONFIG; \
uint32_t DCSURF_SURFACE_PITCH; \
@@ -247,7 +249,7 @@
.field_name = reg_name ## __ ## field_name ## post_fix
/* Mask/shift struct generation macro for all ASICs (including those with reduced functionality) */
-#define HUBP_MASK_SH_LIST_DCN(mask_sh)\
+#define HUBP_MASK_SH_LIST_DCN_COMMON(mask_sh)\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_BLANK_EN, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_TTU_DISABLE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_UNDERFLOW_STATUS, mask_sh),\
@@ -331,7 +333,6 @@
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, META_CHUNK_SIZE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MIN_META_CHUNK_SIZE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, DPTE_GROUP_SIZE, mask_sh),\
- HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MPTE_GROUP_SIZE, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, SWATH_HEIGHT, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, PTE_ROW_HEIGHT_LINEAR, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, CHUNK_SIZE_C, mask_sh),\
@@ -339,7 +340,6 @@
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, META_CHUNK_SIZE_C, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MIN_META_CHUNK_SIZE_C, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, DPTE_GROUP_SIZE_C, mask_sh),\
- HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MPTE_GROUP_SIZE_C, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, SWATH_HEIGHT_C, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, PTE_ROW_HEIGHT_LINEAR_C, mask_sh),\
HUBP_SF(HUBPREQ0_BLANK_OFFSET_0, REFCYC_H_BLANK_END, mask_sh),\
@@ -373,6 +373,11 @@
HUBP_SF(HUBPREQ0_DCN_SURF0_TTU_CNTL1, REFCYC_PER_REQ_DELIVERY_PRE, mask_sh),\
HUBP_SF(HUBP0_HUBP_CLK_CNTL, HUBP_CLOCK_ENABLE, mask_sh)
+#define HUBP_MASK_SH_LIST_DCN(mask_sh)\
+ HUBP_MASK_SH_LIST_DCN_COMMON(mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG, MPTE_GROUP_SIZE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_REQ_SIZE_CONFIG_C, MPTE_GROUP_SIZE_C, mask_sh)
+
/* Mask/shift struct generation macro for ASICs with VM */
#define HUBP_MASK_SH_LIST_DCN_VM(mask_sh)\
HUBP_SF(HUBPREQ0_NOM_PARAMETERS_0, DST_Y_PER_PTE_ROW_NOM_L, mask_sh),\
@@ -595,6 +600,9 @@
type AGP_BASE;\
type AGP_BOT;\
type AGP_TOP;\
+ type DCN_VM_SYSTEM_APERTURE_DEFAULT_SYSTEM;\
+ type DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB;\
+ type DCN_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB;\
/* todo: get these from GVM instead of reading registers ourselves */\
type PAGE_DIRECTORY_ENTRY_HI32;\
type PAGE_DIRECTORY_ENTRY_LO32;\
@@ -743,4 +751,6 @@ enum cursor_pitch hubp1_get_cursor_pitch(unsigned int pitch);
void hubp1_vready_workaround(struct hubp *hubp,
struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest);
+void hubp1_init(struct hubp *hubp);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index d1a8f1c302a9..33d311cea28c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -65,7 +65,7 @@ void print_microsec(struct dc_context *dc_ctx,
struct dc_log_buffer_ctx *log_ctx,
uint32_t ref_cycle)
{
- const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clock_inKhz / 1000;
+ const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
static const unsigned int frac = 1000;
uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
@@ -345,13 +345,13 @@ void dcn10_log_hw_state(struct dc *dc,
DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d dcfclk_deep_sleep_khz:%d dispclk_khz:%d\n"
"dppclk_khz:%d max_supported_dppclk_khz:%d fclk_khz:%d socclk_khz:%d\n\n",
- dc->current_state->bw.dcn.clk.dcfclk_khz,
- dc->current_state->bw.dcn.clk.dcfclk_deep_sleep_khz,
- dc->current_state->bw.dcn.clk.dispclk_khz,
- dc->current_state->bw.dcn.clk.dppclk_khz,
- dc->current_state->bw.dcn.clk.max_supported_dppclk_khz,
- dc->current_state->bw.dcn.clk.fclk_khz,
- dc->current_state->bw.dcn.clk.socclk_khz);
+ dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
log_mpc_crc(dc, log_ctx);
@@ -714,7 +714,7 @@ static enum dc_status dcn10_enable_stream_timing(
return DC_OK;
}
-static void reset_back_end_for_pipe(
+static void dcn10_reset_back_end_for_pipe(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
@@ -889,22 +889,23 @@ void hwss1_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
dcn10_verify_allow_pstate_change_high(dc);
}
-static void plane_atomic_power_down(struct dc *dc, struct pipe_ctx *pipe_ctx)
+static void plane_atomic_power_down(struct dc *dc,
+ struct dpp *dpp,
+ struct hubp *hubp)
{
struct dce_hwseq *hws = dc->hwseq;
- struct dpp *dpp = pipe_ctx->plane_res.dpp;
DC_LOGGER_INIT(dc->ctx->logger);
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 1);
dpp_pg_control(hws, dpp->inst, false);
- hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, false);
+ hubp_pg_control(hws, hubp->inst, false);
dpp->funcs->dpp_reset(dpp);
REG_SET(DC_IP_REQUEST_CNTL, 0,
IP_REQUEST_EN, 0);
DC_LOG_DEBUG(
- "Power gated front end %d\n", pipe_ctx->pipe_idx);
+ "Power gated front end %d\n", hubp->inst);
}
}
@@ -931,7 +932,9 @@ static void plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
hubp->power_gated = true;
dc->optimized_required = false; /* We're powering off, no need to optimize */
- plane_atomic_power_down(dc, pipe_ctx);
+ plane_atomic_power_down(dc,
+ pipe_ctx->plane_res.dpp,
+ pipe_ctx->plane_res.hubp);
pipe_ctx->stream = NULL;
memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
@@ -976,16 +979,14 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
* to non-preferred front end. If pipe_ctx->stream is not NULL,
* we will use the pipe, so don't disable
*/
- if (pipe_ctx->stream != NULL)
+ if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
continue;
- if (tg->funcs->is_tg_enabled(tg))
- tg->funcs->lock(tg);
-
/* Blank controller using driver code instead of
* command table.
*/
if (tg->funcs->is_tg_enabled(tg)) {
+ tg->funcs->lock(tg);
tg->funcs->set_blank(tg, true);
hwss_wait_for_blank_complete(tg);
}
@@ -1001,16 +1002,19 @@ static void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
struct dpp *dpp = dc->res_pool->dpps[i];
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- // W/A for issue with dc_post_update_surfaces_to_stream
- hubp->power_gated = true;
-
/* There is assumption that pipe_ctx is not mapping irregularly
* to non-preferred front end. If pipe_ctx->stream is not NULL,
* we will use the pipe, so don't disable
*/
- if (pipe_ctx->stream != NULL)
+ if (can_apply_seamless_boot &&
+ pipe_ctx->stream != NULL &&
+ pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
+ pipe_ctx->stream_res.tg))
continue;
+ /* Disable on the current state so the new one isn't cleared. */
+ pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
dpp->funcs->dpp_reset(dpp);
pipe_ctx->stream_res.tg = tg;
@@ -1108,6 +1112,25 @@ static void dcn10_init_hw(struct dc *dc)
link->link_status.link_active = true;
}
+ /* If taking control over from VBIOS, we may want to optimize our first
+ * mode set, so we need to skip powering down pipes until we know which
+ * pipes we want to use.
+ * Otherwise, if taking control is not possible, we need to power
+ * everything down.
+ */
+ if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct hubp *hubp = dc->res_pool->hubps[i];
+ struct dpp *dpp = dc->res_pool->dpps[i];
+
+ hubp->funcs->hubp_init(hubp);
+ dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
+ plane_atomic_power_down(dc, dpp, hubp);
+ }
+
+ apply_DEGVIDCN10_253_wa(dc);
+ }
+
for (i = 0; i < dc->res_pool->audio_count; i++) {
struct audio *audio = dc->res_pool->audios[i];
@@ -1137,12 +1160,9 @@ static void dcn10_init_hw(struct dc *dc)
enable_power_gating_plane(dc->hwseq, true);
memset(&dc->res_pool->clk_mgr->clks, 0, sizeof(dc->res_pool->clk_mgr->clks));
-
- if (dc->hwss.init_pipes)
- dc->hwss.init_pipes(dc, dc->current_state);
}
-static void reset_hw_ctx_wrap(
+static void dcn10_reset_hw_ctx_wrap(
struct dc *dc,
struct dc_state *context)
{
@@ -1164,10 +1184,9 @@ static void reset_hw_ctx_wrap(
pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
struct clock_source *old_clk = pipe_ctx_old->clock_source;
- reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
- if (dc->hwss.enable_stream_gating) {
+ dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
+ if (dc->hwss.enable_stream_gating)
dc->hwss.enable_stream_gating(dc, pipe_ctx);
- }
if (old_clk)
old_clk->funcs->cs_power_down(old_clk);
}
@@ -1836,7 +1855,7 @@ void dcn10_get_hdr_visual_confirm_color(
switch (top_pipe_ctx->plane_res.scl_data.format) {
case PIXEL_FORMAT_ARGB2101010:
- if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_UNITY) {
+ if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
/* HDR10, ARGB2101010 - set boarder color to red */
color->color_r_cr = color_value;
}
@@ -1931,7 +1950,7 @@ static void update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
plane_state->format,
EXPANSION_MODE_ZERO,
plane_state->input_csc_color_matrix,
- COLOR_SPACE_YCBCR601_LIMITED);
+ plane_state->color_space);
//set scale and bias registers
build_prescale_params(&bns_params, plane_state);
@@ -2051,7 +2070,7 @@ void update_dchubp_dpp(
* divided by 2
*/
if (plane_state->update_flags.bits.full_update) {
- bool should_divided_by_2 = context->bw.dcn.clk.dppclk_khz <=
+ bool should_divided_by_2 = context->bw_ctx.bw.dcn.clk.dppclk_khz <=
dc->res_pool->clk_mgr->clks.dispclk_khz / 2;
dpp->funcs->dpp_dppclk_control(
@@ -2120,6 +2139,9 @@ void update_dchubp_dpp(
if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
dc->hwss.set_cursor_position(pipe_ctx);
dc->hwss.set_cursor_attribute(pipe_ctx);
+
+ if (dc->hwss.set_cursor_sdr_white_level)
+ dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
}
if (plane_state->update_flags.bits.full_update) {
@@ -2310,6 +2332,7 @@ static void dcn10_apply_ctx_for_surface(
int i;
struct timing_generator *tg;
bool removed_pipe[4] = { false };
+ bool interdependent_update = false;
struct pipe_ctx *top_pipe_to_program =
find_top_pipe_for_stream(dc, context, stream);
DC_LOGGER_INIT(dc->ctx->logger);
@@ -2319,7 +2342,13 @@ static void dcn10_apply_ctx_for_surface(
tg = top_pipe_to_program->stream_res.tg;
- dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
+ interdependent_update = top_pipe_to_program->plane_state &&
+ top_pipe_to_program->plane_state->update_flags.bits.full_update;
+
+ if (interdependent_update)
+ lock_all_pipes(dc, context, true);
+ else
+ dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
if (num_planes == 0) {
/* OTG blank before remove all front end */
@@ -2339,15 +2368,9 @@ static void dcn10_apply_ctx_for_surface(
*/
if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) {
if (old_pipe_ctx->stream_res.tg == tg &&
- old_pipe_ctx->plane_res.hubp &&
- old_pipe_ctx->plane_res.hubp->opp_id != 0xf) {
+ old_pipe_ctx->plane_res.hubp &&
+ old_pipe_ctx->plane_res.hubp->opp_id != 0xf)
dcn10_disable_plane(dc, old_pipe_ctx);
- /*
- * power down fe will unlock when calling reset, need
- * to lock it back here. Messy, need rework.
- */
- pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
- }
}
if ((!pipe_ctx->plane_state ||
@@ -2366,29 +2389,25 @@ static void dcn10_apply_ctx_for_surface(
if (num_planes > 0)
program_all_pipe_in_tree(dc, top_pipe_to_program, context);
- dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
-
- if (top_pipe_to_program->plane_state &&
- top_pipe_to_program->plane_state->update_flags.bits.full_update)
+ if (interdependent_update)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- tg = pipe_ctx->stream_res.tg;
/* Skip inactive pipes and ones already updated */
- if (!pipe_ctx->stream || pipe_ctx->stream == stream
- || !pipe_ctx->plane_state
- || !tg->funcs->is_tg_enabled(tg))
+ if (!pipe_ctx->stream || pipe_ctx->stream == stream ||
+ !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg))
continue;
- tg->funcs->lock(tg);
-
pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
pipe_ctx->plane_res.hubp,
&pipe_ctx->dlg_regs,
&pipe_ctx->ttu_regs);
-
- tg->funcs->unlock(tg);
}
+ if (interdependent_update)
+ lock_all_pipes(dc, context, false);
+ else
+ dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
+
if (num_planes == 0)
false_optc_underflow_wa(dc, stream, tg);
@@ -2420,12 +2439,14 @@ static void dcn10_prepare_bandwidth(
struct dc *dc,
struct dc_state *context)
{
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+
if (dc->debug.sanity_checks)
dcn10_verify_allow_pstate_change_high(dc);
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
if (context->stream_count == 0)
- context->bw.dcn.clk.phyclk_khz = 0;
+ context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
dc->res_pool->clk_mgr->funcs->update_clocks(
dc->res_pool->clk_mgr,
@@ -2433,9 +2454,9 @@ static void dcn10_prepare_bandwidth(
false);
}
- hubbub1_program_watermarks(dc->res_pool->hubbub,
- &context->bw.dcn.watermarks,
- dc->res_pool->ref_clock_inKhz / 1000,
+ hubbub->funcs->program_watermarks(hubbub,
+ &context->bw_ctx.bw.dcn.watermarks,
+ dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
dcn10_stereo_hw_frame_pack_wa(dc, context);
@@ -2450,12 +2471,14 @@ static void dcn10_optimize_bandwidth(
struct dc *dc,
struct dc_state *context)
{
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+
if (dc->debug.sanity_checks)
dcn10_verify_allow_pstate_change_high(dc);
if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
if (context->stream_count == 0)
- context->bw.dcn.clk.phyclk_khz = 0;
+ context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
dc->res_pool->clk_mgr->funcs->update_clocks(
dc->res_pool->clk_mgr,
@@ -2463,9 +2486,9 @@ static void dcn10_optimize_bandwidth(
true);
}
- hubbub1_program_watermarks(dc->res_pool->hubbub,
- &context->bw.dcn.watermarks,
- dc->res_pool->ref_clock_inKhz / 1000,
+ hubbub->funcs->program_watermarks(hubbub,
+ &context->bw_ctx.bw.dcn.watermarks,
+ dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
dcn10_stereo_hw_frame_pack_wa(dc, context);
@@ -2654,7 +2677,7 @@ static void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
pipe_ctx->plane_res.hubp);
- plane_state->status.is_flip_pending = flip_pending;
+ plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
if (!flip_pending)
plane_state->status.current_address = plane_state->status.requested_address;
@@ -2685,16 +2708,22 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
struct dpp *dpp = pipe_ctx->plane_res.dpp;
struct dc_cursor_mi_param param = {
.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
- .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clock_inKhz,
+ .ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
.viewport = pipe_ctx->plane_res.scl_data.viewport,
.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
.rotation = pipe_ctx->plane_state->rotation,
.mirror = pipe_ctx->plane_state->horizontal_mirror
};
+ uint32_t x_plane = pipe_ctx->plane_state->dst_rect.x;
+ uint32_t y_plane = pipe_ctx->plane_state->dst_rect.y;
+ uint32_t x_offset = min(x_plane, pos_cpy.x);
+ uint32_t y_offset = min(y_plane, pos_cpy.y);
- pos_cpy.x_hotspot += pipe_ctx->plane_state->dst_rect.x;
- pos_cpy.y_hotspot += pipe_ctx->plane_state->dst_rect.y;
+ pos_cpy.x -= x_offset;
+ pos_cpy.y -= y_offset;
+ pos_cpy.x_hotspot += (x_plane - x_offset);
+ pos_cpy.y_hotspot += (y_plane - y_offset);
if (pipe_ctx->plane_state->address.type
== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
@@ -2789,6 +2818,33 @@ int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
return vertical_line_start;
}
+void lock_all_pipes(struct dc *dc,
+ struct dc_state *context,
+ bool lock)
+{
+ struct pipe_ctx *pipe_ctx;
+ struct timing_generator *tg;
+ int i;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ tg = pipe_ctx->stream_res.tg;
+ /*
+ * Only lock the top pipe's tg to prevent redundant
+ * (un)locking. Also skip if pipe is disabled.
+ */
+ if (pipe_ctx->top_pipe ||
+ !pipe_ctx->stream || !pipe_ctx->plane_state ||
+ !tg->funcs->is_tg_enabled(tg))
+ continue;
+
+ if (lock)
+ tg->funcs->lock(tg);
+ else
+ tg->funcs->unlock(tg);
+ }
+}
+
static void calc_vupdate_position(
struct pipe_ctx *pipe_ctx,
uint32_t *start_line,
@@ -2882,6 +2938,29 @@ static void dcn10_setup_vupdate_interrupt(struct pipe_ctx *pipe_ctx)
tg->funcs->setup_vertical_interrupt2(tg, start_line);
}
+static void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
+ struct dc_link_settings *link_settings)
+{
+ struct encoder_unblank_param params = { { 0 } };
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+
+ /* only 3 items below are used by unblank */
+ params.timing = pipe_ctx->stream->timing;
+
+ params.link_settings.link_rate = link_settings->link_rate;
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
+ if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ params.timing.pix_clk_100hz /= 2;
+ pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
+ }
+
+ if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
+ link->dc->hwss.edp_backlight_control(link, true);
+ }
+}
+
static const struct hw_sequencer_funcs dcn10_funcs = {
.program_gamut_remap = program_gamut_remap,
.init_hw = dcn10_init_hw,
@@ -2903,7 +2982,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.update_info_frame = dce110_update_info_frame,
.enable_stream = dce110_enable_stream,
.disable_stream = dce110_disable_stream,
- .unblank_stream = dce110_unblank_stream,
+ .unblank_stream = dcn10_unblank_stream,
.blank_stream = dce110_blank_stream,
.enable_audio_stream = dce110_enable_audio_stream,
.disable_audio_stream = dce110_disable_audio_stream,
@@ -2913,7 +2992,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.pipe_control_lock = dcn10_pipe_control_lock,
.prepare_bandwidth = dcn10_prepare_bandwidth,
.optimize_bandwidth = dcn10_optimize_bandwidth,
- .reset_hw_ctx_wrap = reset_hw_ctx_wrap,
+ .reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap,
.enable_stream_timing = dcn10_enable_stream_timing,
.set_drr = set_drr,
.get_position = get_position,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index 6d66084df55f..4b3b27a5d23b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -83,4 +83,8 @@ struct pipe_ctx *find_top_pipe_for_stream(
int get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx);
+void lock_all_pipes(struct dc *dc,
+ struct dc_state *context,
+ bool lock);
+
#endif /* __DC_HWSS_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
index 98f41d250978..991622da9ed5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
@@ -77,7 +77,7 @@ static unsigned int dcn10_get_hubbub_state(struct dc *dc, char *pBuf, unsigned i
unsigned int chars_printed = 0;
unsigned int remaining_buffer = bufSize;
- const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clock_inKhz / 1000;
+ const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
static const unsigned int frac = 1000;
memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
@@ -115,7 +115,7 @@ static unsigned int dcn10_get_hubp_states(struct dc *dc, char *pBuf, unsigned in
unsigned int chars_printed = 0;
unsigned int remaining_buffer = bufSize;
- const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clock_inKhz / 1000;
+ const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
static const unsigned int frac = 1000;
if (invarOnly)
@@ -472,12 +472,12 @@ static unsigned int dcn10_get_clock_states(struct dc *dc, char *pBuf, unsigned i
chars_printed = snprintf_count(pBuf, bufSize, "dcfclk,dcfclk_deep_sleep,dispclk,"
"dppclk,fclk,socclk\n"
"%d,%d,%d,%d,%d,%d\n",
- dc->current_state->bw.dcn.clk.dcfclk_khz,
- dc->current_state->bw.dcn.clk.dcfclk_deep_sleep_khz,
- dc->current_state->bw.dcn.clk.dispclk_khz,
- dc->current_state->bw.dcn.clk.dppclk_khz,
- dc->current_state->bw.dcn.clk.fclk_khz,
- dc->current_state->bw.dcn.clk.socclk_khz);
+ dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
+ dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
remaining_buffer -= chars_printed;
pBuf += chars_printed;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index a9db372688ff..0126a44ba012 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -1304,7 +1304,6 @@ void dcn10_link_encoder_connect_dig_be_to_fe(
#define HPD_REG_UPDATE_N(reg_name, n, ...) \
generic_reg_update_ex(CTX, \
HPD_REG(reg_name), \
- HPD_REG_READ(reg_name), \
n, __VA_ARGS__)
#define HPD_REG_UPDATE(reg_name, field, val) \
@@ -1337,7 +1336,6 @@ void dcn10_link_encoder_disable_hpd(struct link_encoder *enc)
#define AUX_REG_UPDATE_N(reg_name, n, ...) \
generic_reg_update_ex(CTX, \
AUX_REG(reg_name), \
- AUX_REG_READ(reg_name), \
n, __VA_ARGS__)
#define AUX_REG_UPDATE(reg_name, field, val) \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 09d74070a49b..7eccb54c421d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -516,6 +516,31 @@ static const struct resource_caps rv2_res_cap = {
};
#endif
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCN_UNIVERSAL,
+ .blends_with_above = true,
+ .blends_with_below = true,
+ .per_pixel_alpha = true,
+
+ .pixel_format_support = {
+ .argb8888 = true,
+ .nv12 = true,
+ .fp16 = true
+ },
+
+ .max_upscale_factor = {
+ .argb8888 = 16000,
+ .nv12 = 16000,
+ .fp16 = 1
+ },
+
+ .max_downscale_factor = {
+ .argb8888 = 250,
+ .nv12 = 250,
+ .fp16 = 1
+ }
+};
+
static const struct dc_debug_options debug_defaults_drv = {
.sanity_checks = true,
.disable_dmcu = true,
@@ -848,14 +873,14 @@ void dcn10_clock_source_destroy(struct clock_source **clk_src)
*clk_src = NULL;
}
-static struct pp_smu_funcs_rv *dcn10_pp_smu_create(struct dc_context *ctx)
+static struct pp_smu_funcs *dcn10_pp_smu_create(struct dc_context *ctx)
{
- struct pp_smu_funcs_rv *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
+ struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL);
if (!pp_smu)
return pp_smu;
- dm_pp_get_funcs_rv(ctx, pp_smu);
+ dm_pp_get_funcs(ctx, pp_smu);
return pp_smu;
}
@@ -865,10 +890,7 @@ static void destruct(struct dcn10_resource_pool *pool)
for (i = 0; i < pool->base.stream_enc_count; i++) {
if (pool->base.stream_enc[i] != NULL) {
- /* TODO: free dcn version of stream encoder once implemented
- * rather than using virtual stream encoder
- */
- kfree(pool->base.stream_enc[i]);
+ kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i]));
pool->base.stream_enc[i] = NULL;
}
}
@@ -921,9 +943,6 @@ static void destruct(struct dcn10_resource_pool *pool)
}
}
- for (i = 0; i < pool->base.stream_enc_count; i++)
- kfree(pool->base.stream_enc[i]);
-
for (i = 0; i < pool->base.audio_count; i++) {
if (pool->base.audios[i])
dce_aud_destroy(&pool->base.audios[i]);
@@ -1078,7 +1097,7 @@ static struct pipe_ctx *dcn10_acquire_idle_pipe_for_layer(
{
struct resource_context *res_ctx = &context->res_ctx;
struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
- struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool);
+ struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool, head_pipe);
if (!head_pipe) {
ASSERT(0);
@@ -1143,7 +1162,7 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont
continue;
if (context->stream_status[i].plane_count > 2)
- return false;
+ return DC_FAIL_UNSUPPORTED_1;
for (j = 0; j < context->stream_status[i].plane_count; j++) {
struct dc_plane_state *plane =
@@ -1351,7 +1370,7 @@ static bool construct(
goto fail;
}
- dml_init_instance(&dc->dml, DML_PROJECT_RAVEN1);
+ dml_init_instance(&dc->dml, &dcn1_0_soc, &dcn1_0_ip, DML_PROJECT_RAVEN1);
memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults));
memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults));
@@ -1510,6 +1529,9 @@ static bool construct(
dcn10_hw_sequencer_construct(dc);
dc->caps.max_planes = pool->base.pipe_count;
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
dc->cap_funcs = cap_funcs;
return true;
@@ -1522,7 +1544,7 @@ fail:
}
struct resource_pool *dcn10_create_resource_pool(
- uint8_t num_virtual_links,
+ const struct dc_init_data *init_data,
struct dc *dc)
{
struct dcn10_resource_pool *pool =
@@ -1531,7 +1553,7 @@ struct resource_pool *dcn10_create_resource_pool(
if (!pool)
return NULL;
- if (construct(num_virtual_links, dc, pool))
+ if (construct(init_data->num_virtual_links, dc, pool))
return &pool->base;
BREAK_TO_DEBUGGER();
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h
index 8f71225bc61b..999c684a0b36 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h
@@ -39,7 +39,7 @@ struct dcn10_resource_pool {
struct resource_pool base;
};
struct resource_pool *dcn10_create_resource_pool(
- uint8_t num_virtual_links,
+ const struct dc_init_data *init_data,
struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index b08254121251..8ee9f6dc1d62 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -245,7 +245,8 @@ static void enc1_update_hdmi_info_packet(
void enc1_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
- enum dc_color_space output_color_space)
+ enum dc_color_space output_color_space,
+ uint32_t enable_sdp_splitting)
{
uint32_t h_active_start;
uint32_t v_active_start;
@@ -298,7 +299,6 @@ void enc1_stream_encoder_dp_set_stream_attribute(
break;
case PIXEL_ENCODING_YCBCR420:
dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_YCBCR420;
- REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1);
break;
default:
dp_pixel_encoding = DP_PIXEL_ENCODING_TYPE_RGB444;
@@ -726,13 +726,19 @@ void enc1_stream_encoder_update_dp_info_packets(
3, /* packetIndex */
&info_frame->hdrsmd);
+ if (info_frame->dpsdp.valid)
+ enc1_update_generic_info_packet(
+ enc1,
+ 4,/* packetIndex */
+ &info_frame->dpsdp);
+
/* enable/disable transmission of packet(s).
* If enabled, packet transmission begins on the next frame
*/
REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP0_ENABLE, info_frame->vsc.valid);
REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP2_ENABLE, info_frame->spd.valid);
REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP3_ENABLE, info_frame->hdrsmd.valid);
-
+ REG_UPDATE(DP_SEC_CNTL, DP_SEC_GSP4_ENABLE, info_frame->dpsdp.valid);
/* This bit is the master enable bit.
* When enabling secondary stream engine,
@@ -797,10 +803,10 @@ void enc1_stream_encoder_dp_blank(
*/
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_DIS_DEFER, 2);
/* Larger delay to wait until VBLANK - use max retry of
- * 10us*3000=30ms. This covers 16.6ms of typical 60 Hz mode +
+ * 10us*5000=50ms. This covers 41.7ms of minimum 24 Hz mode +
* a little more because we may not trust delay accuracy.
*/
- max_retries = DP_BLANK_MAX_RETRY * 150;
+ max_retries = DP_BLANK_MAX_RETRY * 250;
/* disable DP stream */
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
@@ -833,14 +839,19 @@ void enc1_stream_encoder_dp_unblank(
if (param->link_settings.link_rate != LINK_RATE_UNKNOWN) {
uint32_t n_vid = 0x8000;
uint32_t m_vid;
+ uint32_t n_multiply = 0;
+ uint64_t m_vid_l = n_vid;
+ /* YCbCr 4:2:0 : Computed VID_M will be 2X the input rate */
+ if (param->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ /*this param->pixel_clk_khz is half of 444 rate for 420 already*/
+ n_multiply = 1;
+ }
/* M / N = Fstream / Flink
* m_vid / n_vid = pixel rate / link rate
*/
- uint64_t m_vid_l = n_vid;
-
- m_vid_l *= param->pixel_clk_khz;
+ m_vid_l *= param->timing.pix_clk_100hz / 10;
m_vid_l = div_u64(m_vid_l,
param->link_settings.link_rate
* LINK_RATE_REF_FREQ_IN_KHZ);
@@ -859,7 +870,9 @@ void enc1_stream_encoder_dp_unblank(
REG_UPDATE(DP_VID_M, DP_VID_M, m_vid);
- REG_UPDATE(DP_VID_TIMING, DP_VID_M_N_GEN_EN, 1);
+ REG_UPDATE_2(DP_VID_TIMING,
+ DP_VID_M_N_GEN_EN, 1,
+ DP_VID_N_MUL, n_multiply);
}
/* set DIG_START to 0x1 to resync FIFO */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index b7c800e10a32..e654c2f55971 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -462,7 +462,8 @@ void enc1_update_generic_info_packet(
void enc1_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
- enum dc_color_space output_color_space);
+ enum dc_color_space output_color_space,
+ uint32_t enable_sdp_splitting);
void enc1_stream_encoder_hdmi_set_stream_attribute(
struct stream_encoder *enc,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index e81b24374bcb..ccbfe9680d27 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -58,7 +58,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
bool enable);
/*
- * poll pending down reply before clear payload allocation table
+ * poll pending down reply
*/
void dm_helpers_dp_mst_poll_pending_down_reply(
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index 14bed5b1fa97..4fc4208d1472 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -30,6 +30,8 @@
* interface to PPLIB/SMU to setup clocks and pstate requirements on SoC
*/
+typedef bool BOOLEAN;
+
enum pp_smu_ver {
/*
* PP_SMU_INTERFACE_X should be interpreted as the interface defined
@@ -72,29 +74,6 @@ struct pp_smu_wm_range_sets {
struct pp_smu_wm_set_range writer_wm_sets[MAX_WATERMARK_SETS];
};
-struct pp_smu_display_requirement_rv {
- /* PPSMC_MSG_SetDisplayCount: count
- * 0 triggers S0i2 optimization
- */
- unsigned int display_count;
-
- /* PPSMC_MSG_SetHardMinFclkByFreq: mhz
- * FCLK will vary with DPM, but never below requested hard min
- */
- unsigned int hard_min_fclk_mhz;
-
- /* PPSMC_MSG_SetHardMinDcefclkByFreq: mhz
- * fixed clock at requested freq, either from FCH bypass or DFS
- */
- unsigned int hard_min_dcefclk_mhz;
-
- /* PPSMC_MSG_SetMinDeepSleepDcefclk: mhz
- * when DF is in cstate, dcf clock is further divided down
- * to just above given frequency
- */
- unsigned int min_deep_sleep_dcefclk_mhz;
-};
-
struct pp_smu_funcs_rv {
struct pp_smu pp_smu;
@@ -137,12 +116,6 @@ struct pp_smu_funcs_rv {
/* PME w/a */
void (*set_pme_wa_enable)(struct pp_smu *pp);
- /*
- * Legacy functions. Used for backwards comp. with existing
- * PPlib code.
- */
- void (*set_display_requirement)(struct pp_smu *pp,
- struct pp_smu_display_requirement_rv *req);
};
struct pp_smu_funcs {
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services.h b/drivers/gpu/drm/amd/display/dc/dm_services.h
index 1961cc6d9143..b426ba02b793 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services.h
@@ -52,30 +52,17 @@ irq_handler_idx dm_register_interrupt(
* GPU registers access
*
*/
-
+uint32_t dm_read_reg_func(
+ const struct dc_context *ctx,
+ uint32_t address,
+ const char *func_name);
/* enable for debugging new code, this adds 50k to the driver size. */
/* #define DM_CHECK_ADDR_0 */
#define dm_read_reg(ctx, address) \
dm_read_reg_func(ctx, address, __func__)
-static inline uint32_t dm_read_reg_func(
- const struct dc_context *ctx,
- uint32_t address,
- const char *func_name)
-{
- uint32_t value;
-#ifdef DM_CHECK_ADDR_0
- if (address == 0) {
- DC_ERR("invalid register read; address = 0\n");
- return 0;
- }
-#endif
- value = cgs_read_register(ctx->cgs_device, address);
- trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
- return value;
-}
#define dm_write_reg(ctx, address, value) \
dm_write_reg_func(ctx, address, value, __func__)
@@ -144,10 +131,14 @@ static inline uint32_t set_reg_field_value_ex(
reg_name ## __ ## reg_field ## _MASK,\
reg_name ## __ ## reg_field ## __SHIFT)
-uint32_t generic_reg_update_ex(const struct dc_context *ctx,
+uint32_t generic_reg_set_ex(const struct dc_context *ctx,
uint32_t addr, uint32_t reg_val, int n,
uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...);
+uint32_t generic_reg_update_ex(const struct dc_context *ctx,
+ uint32_t addr, int n,
+ uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...);
+
#define FD(reg_field) reg_field ## __SHIFT, \
reg_field ## _MASK
@@ -155,7 +146,7 @@ uint32_t generic_reg_update_ex(const struct dc_context *ctx,
* return number of poll before condition is met
* return 0 if condition is not meet after specified time out tries
*/
-unsigned int generic_reg_wait(const struct dc_context *ctx,
+void generic_reg_wait(const struct dc_context *ctx,
uint32_t addr, uint32_t mask, uint32_t shift, uint32_t condition_value,
unsigned int delay_between_poll_us, unsigned int time_out_num_tries,
const char *func_name, int line);
@@ -172,11 +163,10 @@ unsigned int generic_reg_wait(const struct dc_context *ctx,
#define generic_reg_update_soc15(ctx, inst_offset, reg_name, n, ...)\
generic_reg_update_ex(ctx, DCE_BASE.instance[0].segment[mm##reg_name##_BASE_IDX] + mm##reg_name + inst_offset, \
- dm_read_reg_func(ctx, mm##reg_name + DCE_BASE.instance[0].segment[mm##reg_name##_BASE_IDX] + inst_offset, __func__), \
n, __VA_ARGS__)
#define generic_reg_set_soc15(ctx, inst_offset, reg_name, n, ...)\
- generic_reg_update_ex(ctx, DCE_BASE.instance[0].segment[mm##reg_name##_BASE_IDX] + mm##reg_name + inst_offset, 0, \
+ generic_reg_set_ex(ctx, DCE_BASE.instance[0].segment[mm##reg_name##_BASE_IDX] + mm##reg_name + inst_offset, 0, \
n, __VA_ARGS__)
#define get_reg_field_value_soc15(reg_value, block, reg_num, reg_name, reg_field)\
@@ -223,8 +213,8 @@ bool dm_pp_notify_wm_clock_changes(
const struct dc_context *ctx,
struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges);
-void dm_pp_get_funcs_rv(struct dc_context *ctx,
- struct pp_smu_funcs_rv *funcs);
+void dm_pp_get_funcs(struct dc_context *ctx,
+ struct pp_smu_funcs *funcs);
/* DAL calls this function to notify PP about completion of Mode Set.
* For PP it means that current DCE clocks are those which were returned
diff --git a/drivers/gpu/drm/amd/display/dc/dm_services_types.h b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
index 77200711abbe..a3d1be20dd9d 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_services_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_services_types.h
@@ -29,7 +29,7 @@
#include "os_types.h"
#include "dc_types.h"
-struct pp_smu_funcs_rv;
+struct pp_smu_funcs;
struct dm_pp_clock_range {
int min_khz;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index d303b789adfe..80ffd7d958b2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -26,40 +26,14 @@
#include "display_mode_lib.h"
#include "dc_features.h"
-extern const struct _vcs_dpi_ip_params_st dcn1_0_ip;
-extern const struct _vcs_dpi_soc_bounding_box_st dcn1_0_soc;
-
-static void set_soc_bounding_box(struct _vcs_dpi_soc_bounding_box_st *soc, enum dml_project project)
-{
- switch (project) {
- case DML_PROJECT_RAVEN1:
- *soc = dcn1_0_soc;
- break;
- default:
- ASSERT(0);
- break;
- }
-}
-
-static void set_ip_params(struct _vcs_dpi_ip_params_st *ip, enum dml_project project)
+void dml_init_instance(struct display_mode_lib *lib,
+ const struct _vcs_dpi_soc_bounding_box_st *soc_bb,
+ const struct _vcs_dpi_ip_params_st *ip_params,
+ enum dml_project project)
{
- switch (project) {
- case DML_PROJECT_RAVEN1:
- *ip = dcn1_0_ip;
- break;
- default:
- ASSERT(0);
- break;
- }
-}
-
-void dml_init_instance(struct display_mode_lib *lib, enum dml_project project)
-{
- if (lib->project != project) {
- set_soc_bounding_box(&lib->soc, project);
- set_ip_params(&lib->ip, project);
- lib->project = project;
- }
+ lib->soc = *soc_bb;
+ lib->ip = *ip_params;
+ lib->project = project;
}
const char *dml_get_status_message(enum dm_validation_status status)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index a730e0209c05..1b546dba34bd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -41,7 +41,10 @@ struct display_mode_lib {
struct dal_logger *logger;
};
-void dml_init_instance(struct display_mode_lib *lib, enum dml_project project);
+void dml_init_instance(struct display_mode_lib *lib,
+ const struct _vcs_dpi_soc_bounding_box_st *soc_bb,
+ const struct _vcs_dpi_ip_params_st *ip_params,
+ enum dml_project project);
const char *dml_get_status_message(enum dm_validation_status status);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 391183e3428f..c5b791d158a7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -25,6 +25,8 @@
#ifndef __DISPLAY_MODE_STRUCTS_H__
#define __DISPLAY_MODE_STRUCTS_H__
+#define MAX_CLOCK_LIMIT_STATES 8
+
typedef struct _vcs_dpi_voltage_scaling_st voltage_scaling_st;
typedef struct _vcs_dpi_soc_bounding_box_st soc_bounding_box_st;
typedef struct _vcs_dpi_ip_params_st ip_params_st;
@@ -103,7 +105,7 @@ struct _vcs_dpi_soc_bounding_box_st {
double xfc_xbuf_latency_tolerance_us;
int use_urgent_burst_bw;
unsigned int num_states;
- struct _vcs_dpi_voltage_scaling_st clock_limits[8];
+ struct _vcs_dpi_voltage_scaling_st clock_limits[MAX_CLOCK_LIMIT_STATES];
};
struct _vcs_dpi_ip_params_st {
@@ -416,6 +418,7 @@ struct _vcs_dpi_display_dlg_regs_st {
unsigned int refcyc_per_vm_group_flip;
unsigned int refcyc_per_vm_req_vblank;
unsigned int refcyc_per_vm_req_flip;
+ unsigned int refcyc_per_vm_dmdata;
};
struct _vcs_dpi_display_ttu_regs_st {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
index 48400d642610..e2d82aacd3bc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
@@ -321,6 +321,9 @@ void print__dlg_regs_st(struct display_mode_lib *mode_lib, display_dlg_regs_st d
dml_print(
"DML_RQ_DLG_CALC: xfc_reg_remote_surface_flip_latency = 0x%0x\n",
dlg_regs.xfc_reg_remote_surface_flip_latency);
+ dml_print(
+ "DML_RQ_DLG_CALC: refcyc_per_vm_dmdata = 0x%0x\n",
+ dlg_regs.refcyc_per_vm_dmdata);
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
diff --git a/drivers/gpu/drm/amd/display/dc/inc/clock_source.h b/drivers/gpu/drm/amd/display/dc/inc/clock_source.h
index fe6301cb8681..1b01a9a58d14 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/clock_source.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/clock_source.h
@@ -167,7 +167,7 @@ struct clock_source_funcs {
struct pixel_clk_params *,
struct pll_settings *);
bool (*get_pixel_clk_frequency_100hz)(
- struct clock_source *clock_source,
+ const struct clock_source *clock_source,
unsigned int inst,
unsigned int *pixel_clk_khz);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
index 2e61a22ef4b2..8dca3b7700e5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h
@@ -43,7 +43,7 @@ enum dc_status {
DC_FAIL_BANDWIDTH_VALIDATE = 13, /* BW and Watermark validation */
DC_FAIL_SCALING = 14,
DC_FAIL_DP_LINK_TRAINING = 15,
-
+ DC_FAIL_UNSUPPORTED_1 = 18,
DC_ERROR_UNEXPECTED = -1
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 986ed1728644..6f5ab05d6467 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -95,10 +95,10 @@ struct resource_funcs {
void (*link_init)(struct dc_link *link);
struct link_encoder *(*link_enc_create)(
const struct encoder_init_data *init);
-
bool (*validate_bandwidth)(
struct dc *dc,
- struct dc_state *context);
+ struct dc_state *context,
+ bool fast_validate);
enum dc_status (*validate_global)(
struct dc *dc,
@@ -144,8 +144,7 @@ struct resource_pool {
struct stream_encoder *stream_enc[MAX_PIPES * 2];
struct hubbub *hubbub;
struct mpc *mpc;
- struct pp_smu_funcs_rv *pp_smu;
- struct pp_smu_display_requirement_rv pp_smu_req;
+ struct pp_smu_funcs *pp_smu;
struct dce_aux *engines[MAX_PIPES];
struct dce_i2c_hw *hw_i2cs[MAX_PIPES];
struct dce_i2c_sw *sw_i2cs[MAX_PIPES];
@@ -154,7 +153,12 @@ struct resource_pool {
unsigned int pipe_count;
unsigned int underlay_pipe_index;
unsigned int stream_enc_count;
- unsigned int ref_clock_inKhz;
+
+ struct {
+ unsigned int xtalin_clock_inKhz;
+ unsigned int dccg_ref_clock_inKhz;
+ unsigned int dchub_ref_clock_inKhz;
+ } ref_clocks;
unsigned int timing_generator_count;
/*
@@ -262,18 +266,22 @@ struct dcn_bw_output {
struct dcn_watermark_set watermarks;
};
-union bw_context {
+union bw_output {
struct dcn_bw_output dcn;
struct dce_bw_output dce;
};
+struct bw_context {
+ union bw_output bw;
+ struct display_mode_lib dml;
+};
/**
* struct dc_state - The full description of a state requested by a user
*
* @streams: Stream properties
* @stream_status: The planes on a given stream
* @res_ctx: Persistent state of resources
- * @bw: The output from bandwidth and watermark calculations
+ * @bw_ctx: The output from bandwidth and watermark calculations and the DML
* @pp_display_cfg: PowerPlay clocks and settings
* @dcn_bw_vars: non-stack memory to support bandwidth calculations
*
@@ -285,7 +293,7 @@ struct dc_state {
struct resource_context res_ctx;
- union bw_context bw;
+ struct bw_context bw_ctx;
/* Note: these are big structures, do *not* put on stack! */
struct dm_pp_display_configuration pp_display_cfg;
@@ -293,7 +301,11 @@ struct dc_state {
struct dcn_bw_internal_vars dcn_bw_vars;
#endif
- struct clk_mgr *dccg;
+ struct clk_mgr *clk_mgr;
+
+ struct {
+ bool full_update_needed : 1;
+ } commit_hints;
struct kref refcount;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
index 16fd4dc6c4dd..b1fab251c09b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
@@ -95,8 +95,9 @@ bool dal_ddc_service_query_ddc_data(
uint8_t *read_buf,
uint32_t read_size);
-int dc_link_aux_transfer(struct ddc_service *ddc,
- struct aux_payload *payload);
+int dc_link_aux_transfer_raw(struct ddc_service *ddc,
+ struct aux_payload *payload,
+ enum aux_channel_operation_result *operation_result);
bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc,
struct aux_payload *payload);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
index ece954a40a8e..263c09630c06 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h
@@ -621,7 +621,8 @@ extern const struct dcn_ip_params dcn10_ip_defaults;
bool dcn_validate_bandwidth(
struct dc *dc,
- struct dc_state *context);
+ struct dc_state *context,
+ bool fast_validate);
unsigned int dcn_find_dcfclk_suits_all(
const struct dc *dc,
@@ -631,5 +632,7 @@ void dcn_bw_update_from_pplib(struct dc *dc);
void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc);
void dcn_bw_sync_calcs_and_dml(struct dc *dc);
+enum source_macro_tile_size swizzle_mode_to_macro_tile_size(enum swizzle_mode_values sw_mode);
+
#endif /* __DCN_CALCS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 23a4b18e5fee..31bd6d5183ab 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -42,6 +42,8 @@ struct clk_mgr_funcs {
bool safe_to_lower);
int (*get_dp_ref_clk_frequency)(struct clk_mgr *clk_mgr);
+
+ void (*init_clocks)(struct clk_mgr *clk_mgr);
};
#endif /* __DAL_CLK_MGR_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index 95a56d012626..05ee5295d2c1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -39,6 +39,10 @@ struct dccg_funcs {
void (*update_dpp_dto)(struct dccg *dccg,
int dpp_inst,
int req_dppclk);
+ void (*get_dccg_ref_freq)(struct dccg *dccg,
+ unsigned int xtalin_freq_inKhz,
+ unsigned int *dccg_ref_freq_inKhz);
+ void (*dccg_init)(struct dccg *dccg);
};
#endif //__DAL_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index 9d2d8e51306c..93667e8b23b3 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -73,6 +73,16 @@ struct hubbub_funcs {
void (*wm_read_state)(struct hubbub *hubbub,
struct dcn_hubbub_wm *wm);
+
+ void (*get_dchub_ref_freq)(struct hubbub *hubbub,
+ unsigned int dccg_ref_freq_inKhz,
+ unsigned int *dchub_ref_freq_inKhz);
+
+ void (*program_watermarks)(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower);
};
struct hubbub {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
index cbaa43853611..c68f0ce346c7 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
@@ -70,6 +70,8 @@ struct dmcu_funcs {
void (*get_psr_wait_loop)(struct dmcu *dmcu,
unsigned int *psr_wait_loop_number);
bool (*is_dmcu_initialized)(struct dmcu *dmcu);
+ bool (*lock_phy)(struct dmcu *dmcu);
+ bool (*unlock_phy)(struct dmcu *dmcu);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 1cd07e94ee63..455df4999797 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -130,6 +130,7 @@ struct hubp_funcs {
void (*hubp_clear_underflow)(struct hubp *hubp);
void (*hubp_disable_control)(struct hubp *hubp, bool disable_hubp);
unsigned int (*hubp_get_underflow_status)(struct hubp *hubp);
+ void (*hubp_init)(struct hubp *hubp);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index da85537a4488..4c8e2c6fb6db 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -146,6 +146,12 @@ struct out_csc_color_matrix {
uint16_t regval[12];
};
+enum gamut_remap_select {
+ GAMUT_REMAP_BYPASS = 0,
+ GAMUT_REMAP_COEFF,
+ GAMUT_REMAP_COMA_COEFF,
+ GAMUT_REMAP_COMB_COEFF
+};
enum opp_regamma {
OPP_REGAMMA_BYPASS = 0,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index 4051493557bc..49854eb73d1d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -63,11 +63,13 @@ struct encoder_info_frame {
struct dc_info_packet vsc;
/* HDR Static MetaData */
struct dc_info_packet hdrsmd;
+ /* custom sdp message */
+ struct dc_info_packet dpsdp;
};
struct encoder_unblank_param {
struct dc_link_settings link_settings;
- unsigned int pixel_clk_khz;
+ struct dc_crtc_timing timing;
};
struct encoder_set_dp_phy_pattern_param {
@@ -88,7 +90,8 @@ struct stream_encoder_funcs {
void (*dp_set_stream_attribute)(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
- enum dc_color_space output_color_space);
+ enum dc_color_space output_color_space,
+ uint32_t enable_sdp_splitting);
void (*hdmi_set_stream_attribute)(
struct stream_encoder *enc,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index c25f7df7b5e3..067d53caf28a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -187,8 +187,10 @@ struct timing_generator_funcs {
bool (*did_triggered_reset_occur)(struct timing_generator *tg);
void (*setup_global_swap_lock)(struct timing_generator *tg,
const struct dcp_gsl_params *gsl_params);
+ void (*setup_global_lock)(struct timing_generator *tg);
void (*unlock)(struct timing_generator *tg);
void (*lock)(struct timing_generator *tg);
+ void (*lock_global)(struct timing_generator *tg);
void (*enable_reset_trigger)(struct timing_generator *tg,
int source_tg_inst);
void (*enable_crtc_reset)(struct timing_generator *tg,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 7676f25216b1..33905468e2b9 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -176,6 +176,10 @@ struct hw_sequencer_funcs {
struct dc *dc,
struct pipe_ctx *pipe,
bool lock);
+ void (*pipe_control_lock_global)(
+ struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock);
void (*blank_pixel_data)(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
index cf5a84b9e27c..8503d9cc4763 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h
@@ -52,7 +52,7 @@
/* macro to set register fields. */
#define REG_SET_N(reg_name, n, initial_val, ...) \
- generic_reg_update_ex(CTX, \
+ generic_reg_set_ex(CTX, \
REG(reg_name), \
initial_val, \
n, __VA_ARGS__)
@@ -225,7 +225,6 @@
#define REG_UPDATE_N(reg_name, n, ...) \
generic_reg_update_ex(CTX, \
REG(reg_name), \
- REG_READ(reg_name), \
n, __VA_ARGS__)
#define REG_UPDATE(reg_name, field, val) \
@@ -380,16 +379,11 @@
/* macro to update a register field to specified values in given sequences.
* useful when toggling bits
*/
-#define REG_UPDATE_SEQ(reg, field, value1, value2) \
-{ uint32_t val = REG_UPDATE(reg, field, value1); \
- REG_SET(reg, val, field, value2); }
-
-/* macro to update fields in register 1 field at a time in given order */
-#define REG_UPDATE_1BY1_2(reg, f1, v1, f2, v2) \
+#define REG_UPDATE_SEQ_2(reg, f1, v1, f2, v2) \
{ uint32_t val = REG_UPDATE(reg, f1, v1); \
REG_SET(reg, val, f2, v2); }
-#define REG_UPDATE_1BY1_3(reg, f1, v1, f2, v2, f3, v3) \
+#define REG_UPDATE_SEQ_3(reg, f1, v1, f2, v2, f3, v3) \
{ uint32_t val = REG_UPDATE(reg, f1, v1); \
val = REG_SET(reg, val, f2, v2); \
REG_SET(reg, val, f3, v3); }
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 0086a2f1d21a..3ce0a4fc5822 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -70,11 +70,9 @@ bool resource_construct(
struct resource_pool *pool,
const struct resource_create_funcs *create_funcs);
-struct resource_pool *dc_create_resource_pool(
- struct dc *dc,
- int num_virtual_links,
- enum dce_version dc_version,
- struct hw_asic_id asic_id);
+struct resource_pool *dc_create_resource_pool(struct dc *dc,
+ const struct dc_init_data *init_data,
+ enum dce_version dc_version);
void dc_destroy_resource_pool(struct dc *dc);
@@ -131,7 +129,8 @@ bool resource_attach_surfaces_to_context(
struct pipe_ctx *find_idle_secondary_pipe(
struct resource_context *res_ctx,
- const struct resource_pool *pool);
+ const struct resource_pool *pool,
+ const struct pipe_ctx *primary_pipe);
bool resource_is_stream_unchanged(
struct dc_state *old_context, struct dc_stream_state *stream);
@@ -172,4 +171,7 @@ void update_audio_usage(
unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format);
+struct pipe_ctx *dc_res_get_odm_bottom_pipe(struct pipe_ctx *pipe_ctx);
+bool dc_res_is_odm_head_pipe(struct pipe_ctx *pipe_ctx);
+
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
index afe0876fe6f8..86987f5e8bd5 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce110/irq_service_dce110.c
@@ -81,6 +81,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.ack = NULL
};
+static const struct irq_source_info_funcs vupdate_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
#define hpd_int_entry(reg_num)\
[DC_IRQ_SOURCE_HPD1 + reg_num] = {\
.enable_reg = mmHPD ## reg_num ## _DC_HPD_INT_CONTROL,\
@@ -137,7 +142,7 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
.ack_value =\
CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
- .funcs = &vblank_irq_info_funcs\
+ .funcs = &vupdate_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
index 1ea7256ec89b..750ba0ab4106 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce120/irq_service_dce120.c
@@ -84,6 +84,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.ack = NULL
};
+static const struct irq_source_info_funcs vupdate_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
#define BASE_INNER(seg) \
DCE_BASE__INST0_SEG ## seg
@@ -140,7 +145,7 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
IRQ_REG_ENTRY(CRTC, reg_num,\
CRTC_INTERRUPT_CONTROL, CRTC_V_UPDATE_INT_MSK,\
CRTC_V_UPDATE_INT_STATUS, CRTC_V_UPDATE_INT_CLEAR),\
- .funcs = &vblank_irq_info_funcs\
+ .funcs = &vupdate_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
index 8a2066c313fe..de218fe84a43 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dce80/irq_service_dce80.c
@@ -84,6 +84,10 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.ack = NULL
};
+static const struct irq_source_info_funcs vupdate_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
#define hpd_int_entry(reg_num)\
[DC_IRQ_SOURCE_INVALID + reg_num] = {\
@@ -142,7 +146,7 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
.ack_value =\
CRTC_V_UPDATE_INT_STATUS__CRTC_V_UPDATE_INT_CLEAR_MASK,\
- .funcs = &vblank_irq_info_funcs\
+ .funcs = &vupdate_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
index e04ae49243f6..10ac6deff5ff 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn10/irq_service_dcn10.c
@@ -56,6 +56,18 @@ enum dc_irq_source to_dal_irq_source_dcn10(
return DC_IRQ_SOURCE_VBLANK5;
case DCN_1_0__SRCID__DC_D6_OTG_VSTARTUP:
return DC_IRQ_SOURCE_VBLANK6;
+ case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE1;
+ case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE2;
+ case DCN_1_0__SRCID__OTG2_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE3;
+ case DCN_1_0__SRCID__OTG3_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE4;
+ case DCN_1_0__SRCID__OTG4_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE5;
+ case DCN_1_0__SRCID__OTG5_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE6;
case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT:
return DC_IRQ_SOURCE_PFLIP1;
case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT:
@@ -153,6 +165,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.ack = NULL
};
+static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
#define BASE_INNER(seg) \
DCE_BASE__INST0_SEG ## seg
@@ -203,12 +220,15 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
.funcs = &pflip_irq_info_funcs\
}
-#define vupdate_int_entry(reg_num)\
+/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
+ * of DCE's DC_IRQ_SOURCE_VUPDATEx.
+ */
+#define vupdate_no_lock_int_entry(reg_num)\
[DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
IRQ_REG_ENTRY(OTG, reg_num,\
- OTG_GLOBAL_SYNC_STATUS, VUPDATE_INT_EN,\
- OTG_GLOBAL_SYNC_STATUS, VUPDATE_EVENT_CLEAR),\
- .funcs = &vblank_irq_info_funcs\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
+ .funcs = &vupdate_no_lock_irq_info_funcs\
}
#define vblank_int_entry(reg_num)\
@@ -315,12 +335,12 @@ irq_source_info_dcn10[DAL_IRQ_SOURCES_NUMBER] = {
dc_underflow_int_entry(6),
[DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
[DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
- vupdate_int_entry(0),
- vupdate_int_entry(1),
- vupdate_int_entry(2),
- vupdate_int_entry(3),
- vupdate_int_entry(4),
- vupdate_int_entry(5),
+ vupdate_no_lock_int_entry(0),
+ vupdate_no_lock_int_entry(1),
+ vupdate_no_lock_int_entry(2),
+ vupdate_no_lock_int_entry(3),
+ vupdate_no_lock_int_entry(4),
+ vupdate_no_lock_int_entry(5),
vblank_int_entry(0),
vblank_int_entry(1),
vblank_int_entry(2),
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
index 3dc1733eea20..fdcf9e66d852 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
@@ -29,7 +29,8 @@
static void virtual_stream_encoder_dp_set_stream_attribute(
struct stream_encoder *enc,
struct dc_crtc_timing *crtc_timing,
- enum dc_color_space output_color_space) {}
+ enum dc_color_space output_color_space,
+ uint32_t enable_sdp_splitting) {}
static void virtual_stream_encoder_hdmi_set_stream_attribute(
struct stream_encoder *enc,
diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
index 52a73332befb..89ef9f6860e5 100644
--- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
+++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
@@ -503,6 +503,8 @@ static inline int dc_fixpt_ceil(struct fixed31_32 arg)
* fractional
*/
+unsigned int dc_fixpt_u4d19(struct fixed31_32 arg);
+
unsigned int dc_fixpt_u3d19(struct fixed31_32 arg);
unsigned int dc_fixpt_u2d19(struct fixed31_32 arg);
diff --git a/drivers/gpu/drm/amd/display/include/signal_types.h b/drivers/gpu/drm/amd/display/include/signal_types.h
index f56d2891475f..beed70179bb5 100644
--- a/drivers/gpu/drm/amd/display/include/signal_types.h
+++ b/drivers/gpu/drm/amd/display/include/signal_types.h
@@ -45,6 +45,11 @@ enum signal_type {
};
/* help functions for signal types manipulation */
+static inline bool dc_is_hdmi_tmds_signal(enum signal_type signal)
+{
+ return (signal == SIGNAL_TYPE_HDMI_TYPE_A);
+}
+
static inline bool dc_is_hdmi_signal(enum signal_type signal)
{
return (signal == SIGNAL_TYPE_HDMI_TYPE_A);
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 0fbc8fbc3541..a1055413bade 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -1854,6 +1854,8 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
coordinates_x, axis_x, curve,
MAX_HW_POINTS, tf_pts,
mapUserRamp && ramp && ramp->type == GAMMA_RGB_256);
+ if (ramp->type == GAMMA_CUSTOM)
+ apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
ret = true;
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index bfd27f10879e..19b1eaebe484 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -37,6 +37,8 @@
#define RENDER_TIMES_MAX_COUNT 10
/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */
#define BTR_EXIT_MARGIN 2000
+/* Threshold to change BTR multiplier (to avoid frequent changes) */
+#define BTR_DRIFT_MARGIN 2000
/*Threshold to exit fixed refresh rate*/
#define FIXED_REFRESH_EXIT_MARGIN_IN_HZ 4
/* Number of consecutive frames to check before entering/exiting fixed refresh*/
@@ -48,6 +50,93 @@ struct core_freesync {
struct dc *dc;
};
+void setFieldWithMask(unsigned char *dest, unsigned int mask, unsigned int value)
+{
+ unsigned int shift = 0;
+
+ if (!mask || !dest)
+ return;
+
+ while (!((mask >> shift) & 1))
+ shift++;
+
+ //reset
+ *dest = *dest & ~mask;
+ //set
+ //dont let value span past mask
+ value = value & (mask >> shift);
+ //insert value
+ *dest = *dest | (value << shift);
+}
+
+// VTEM Byte Offset
+#define VRR_VTEM_PB0 0
+#define VRR_VTEM_PB1 1
+#define VRR_VTEM_PB2 2
+#define VRR_VTEM_PB3 3
+#define VRR_VTEM_PB4 4
+#define VRR_VTEM_PB5 5
+#define VRR_VTEM_PB6 6
+
+#define VRR_VTEM_MD0 7
+#define VRR_VTEM_MD1 8
+#define VRR_VTEM_MD2 9
+#define VRR_VTEM_MD3 10
+
+
+// VTEM Byte Masks
+//PB0
+#define MASK__VRR_VTEM_PB0__RESERVED0 0x01
+#define MASK__VRR_VTEM_PB0__SYNC 0x02
+#define MASK__VRR_VTEM_PB0__VFR 0x04
+#define MASK__VRR_VTEM_PB0__AFR 0x08
+#define MASK__VRR_VTEM_PB0__DS_TYPE 0x30
+ //0: Periodic pseudo-static EM Data Set
+ //1: Periodic dynamic EM Data Set
+ //2: Unique EM Data Set
+ //3: Reserved
+#define MASK__VRR_VTEM_PB0__END 0x40
+#define MASK__VRR_VTEM_PB0__NEW 0x80
+
+//PB1
+#define MASK__VRR_VTEM_PB1__RESERVED1 0xFF
+
+//PB2
+#define MASK__VRR_VTEM_PB2__ORGANIZATION_ID 0xFF
+ //0: This is a Vendor Specific EM Data Set
+ //1: This EM Data Set is defined by This Specification (HDMI 2.1 r102.clean)
+ //2: This EM Data Set is defined by CTA-861-G
+ //3: This EM Data Set is defined by VESA
+//PB3
+#define MASK__VRR_VTEM_PB3__DATA_SET_TAG_MSB 0xFF
+//PB4
+#define MASK__VRR_VTEM_PB4__DATA_SET_TAG_LSB 0xFF
+//PB5
+#define MASK__VRR_VTEM_PB5__DATA_SET_LENGTH_MSB 0xFF
+//PB6
+#define MASK__VRR_VTEM_PB6__DATA_SET_LENGTH_LSB 0xFF
+
+
+
+//PB7-27 (20 bytes):
+//PB7 = MD0
+#define MASK__VRR_VTEM_MD0__VRR_EN 0x01
+#define MASK__VRR_VTEM_MD0__M_CONST 0x02
+#define MASK__VRR_VTEM_MD0__RESERVED2 0x0C
+#define MASK__VRR_VTEM_MD0__FVA_FACTOR_M1 0xF0
+
+//MD1
+#define MASK__VRR_VTEM_MD1__BASE_VFRONT 0xFF
+
+//MD2
+#define MASK__VRR_VTEM_MD2__BASE_REFRESH_RATE_98 0x03
+#define MASK__VRR_VTEM_MD2__RB 0x04
+#define MASK__VRR_VTEM_MD2__RESERVED3 0xF8
+
+//MD3
+#define MASK__VRR_VTEM_MD3__BASE_REFRESH_RATE_07 0xFF
+
+
#define MOD_FREESYNC_TO_CORE(mod_freesync)\
container_of(mod_freesync, struct core_freesync, public)
@@ -248,6 +337,7 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
unsigned int frames_to_insert = 0;
unsigned int min_frame_duration_in_ns = 0;
unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us;
+ unsigned int delta_from_mid_point_delta_in_us;
min_frame_duration_in_ns = ((unsigned int) (div64_u64(
(1000000000ULL * 1000000),
@@ -318,10 +408,27 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
/* Choose number of frames to insert based on how close it
* can get to the mid point of the variable range.
*/
- if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2)
+ if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) {
frames_to_insert = mid_point_frames_ceil;
- else
+ delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 -
+ delta_from_mid_point_in_us_1;
+ } else {
frames_to_insert = mid_point_frames_floor;
+ delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_1 -
+ delta_from_mid_point_in_us_2;
+ }
+
+ /* Prefer current frame multiplier when BTR is enabled unless it drifts
+ * too far from the midpoint
+ */
+ if (in_out_vrr->btr.frames_to_insert != 0 &&
+ delta_from_mid_point_delta_in_us < BTR_DRIFT_MARGIN) {
+ if (((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) <
+ in_out_vrr->max_duration_in_us) &&
+ ((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) >
+ in_out_vrr->min_duration_in_us))
+ frames_to_insert = in_out_vrr->btr.frames_to_insert;
+ }
/* Either we've calculated the number of frames to insert,
* or we need to insert min duration frames
@@ -330,10 +437,8 @@ static void apply_below_the_range(struct core_freesync *core_freesync,
inserted_frame_duration_in_us = last_render_time_in_us /
frames_to_insert;
- if (inserted_frame_duration_in_us <
- (1000000 / in_out_vrr->max_refresh_in_uhz))
- inserted_frame_duration_in_us =
- (1000000 / in_out_vrr->max_refresh_in_uhz);
+ if (inserted_frame_duration_in_us < in_out_vrr->min_duration_in_us)
+ inserted_frame_duration_in_us = in_out_vrr->min_duration_in_us;
/* Cache the calculated variables */
in_out_vrr->btr.inserted_duration_in_us =
@@ -469,16 +574,14 @@ static void build_vrr_infopacket_header_vtem(enum signal_type signal,
// HB0, HB1, HB2 indicates PacketType VTEMPacket
infopacket->hb0 = 0x7F;
infopacket->hb1 = 0xC0;
- infopacket->hb2 = 0x00;
- /* HB3 Bit Fields
- * Reserved :1 = 0
- * Sync :1 = 0
- * VFR :1 = 1
- * Ds_Type :2 = 0
- * End :1 = 0
- * New :1 = 0
- */
- infopacket->hb3 = 0x20;
+ infopacket->hb2 = 0x00; //sequence_index
+
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_PB0], MASK__VRR_VTEM_PB0__VFR, 1);
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_PB2], MASK__VRR_VTEM_PB2__ORGANIZATION_ID, 1);
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_PB3], MASK__VRR_VTEM_PB3__DATA_SET_TAG_MSB, 0);
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_PB4], MASK__VRR_VTEM_PB4__DATA_SET_TAG_LSB, 1);
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_PB5], MASK__VRR_VTEM_PB5__DATA_SET_LENGTH_MSB, 0);
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_PB6], MASK__VRR_VTEM_PB6__DATA_SET_LENGTH_LSB, 4);
}
static void build_vrr_infopacket_header_v1(enum signal_type signal,
@@ -583,45 +686,36 @@ static void build_vrr_vtem_infopacket_data(const struct dc_stream_state *stream,
const struct mod_vrr_params *vrr,
struct dc_info_packet *infopacket)
{
- /* dc_info_packet to VtemPacket Translation of Bit-fields,
- * SB[6]
- * unsigned char VRR_EN :1
- * unsigned char M_CONST :1
- * unsigned char Reserved2 :2
- * unsigned char FVA_Factor_M1 :4
- * SB[7]
- * unsigned char Base_Vfront :8
- * SB[8]
- * unsigned char Base_Refresh_Rate_98 :2
- * unsigned char RB :1
- * unsigned char Reserved3 :5
- * SB[9]
- * unsigned char Base_RefreshRate_07 :8
- */
unsigned int fieldRateInHz;
if (vrr->state == VRR_STATE_ACTIVE_VARIABLE ||
- vrr->state == VRR_STATE_ACTIVE_FIXED){
- infopacket->sb[6] |= 0x80; //VRR_EN Bit = 1
+ vrr->state == VRR_STATE_ACTIVE_FIXED) {
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_MD0], MASK__VRR_VTEM_MD0__VRR_EN, 1);
} else {
- infopacket->sb[6] &= 0x7F; //VRR_EN Bit = 0
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_MD0], MASK__VRR_VTEM_MD0__VRR_EN, 0);
}
if (!stream->timing.vic) {
- infopacket->sb[7] = stream->timing.v_front_porch;
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_MD1], MASK__VRR_VTEM_MD1__BASE_VFRONT,
+ stream->timing.v_front_porch);
+
/* TODO: In dal2, we check mode flags for a reduced blanking timing.
* Need a way to relay that information to this function.
* if("ReducedBlanking")
* {
- * infopacket->sb[8] |= 0x20; //Set 3rd bit to 1
+ * setFieldWithMask(&infopacket->sb[VRR_VTEM_MD2], MASK__VRR_VTEM_MD2__RB, 1;
* }
*/
+
+ //TODO: DAL2 does FixPoint and rounding. Here we might need to account for that
fieldRateInHz = (stream->timing.pix_clk_100hz * 100)/
- (stream->timing.h_total * stream->timing.v_total);
+ (stream->timing.h_total * stream->timing.v_total);
- infopacket->sb[8] |= ((fieldRateInHz & 0x300) >> 2);
- infopacket->sb[9] |= fieldRateInHz & 0xFF;
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_MD2], MASK__VRR_VTEM_MD2__BASE_REFRESH_RATE_98,
+ fieldRateInHz >> 8);
+ setFieldWithMask(&infopacket->sb[VRR_VTEM_MD3], MASK__VRR_VTEM_MD3__BASE_REFRESH_RATE_07,
+ fieldRateInHz);
}
infopacket->valid = true;
@@ -745,6 +839,8 @@ static void build_vrr_infopacket_vtem(const struct dc_stream_state *stream,
{
//VTEM info packet for HdmiVrr
+ memset(infopacket, 0, sizeof(struct dc_info_packet));
+
//VTEM Packet is structured differently
build_vrr_infopacket_header_vtem(stream->signal, infopacket);
build_vrr_vtem_infopacket_data(stream, vrr, infopacket);
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index 038b88221c5f..b3810b864676 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -41,9 +41,12 @@ static const unsigned char min_reduction_table[13] = {
static const unsigned char max_reduction_table[13] = {
0xf5, 0xe5, 0xd9, 0xcd, 0xb1, 0xa5, 0xa5, 0x80, 0x65, 0x4d, 0x4d, 0x4d, 0x32};
-/* ABM 2.2 Min Reduction effectively disabled (100% for all configs)*/
+/* Possible ABM 2.2 Min Reduction configs from least aggressive to most aggressive
+ * 0 1 2 3 4 5 6 7 8 9 10 11 12
+ * 100 100 100 100 100 100 100 100 100 92.2 83.1 75.3 75.3 %
+ */
static const unsigned char min_reduction_table_v_2_2[13] = {
-0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xeb, 0xd4, 0xc0, 0xc0};
/* Possible ABM 2.2 Max Reduction configs from least aggressive to most aggressive
* 0 1 2 3 4 5 6 7 8 9 10 11 12
@@ -408,9 +411,9 @@ void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
ram_table->flags = 0x0;
ram_table->deviation_gain[0] = 0xb3;
- ram_table->deviation_gain[1] = 0xb3;
- ram_table->deviation_gain[2] = 0xb3;
- ram_table->deviation_gain[3] = 0xb3;
+ ram_table->deviation_gain[1] = 0xa8;
+ ram_table->deviation_gain[2] = 0x98;
+ ram_table->deviation_gain[3] = 0x68;
ram_table->min_reduction[0][0] = min_reduction_table_v_2_2[abm_config[set][0]];
ram_table->min_reduction[1][0] = min_reduction_table_v_2_2[abm_config[set][0]];
@@ -505,7 +508,7 @@ void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame
ram_table->contrastFactor[0] = 0x99;
ram_table->contrastFactor[1] = 0x99;
- ram_table->contrastFactor[2] = 0x99;
+ ram_table->contrastFactor[2] = 0x90;
ram_table->contrastFactor[3] = 0x80;
ram_table->iir_curve[0] = 0x65;
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 470d7b89071a..574bf6e70763 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -137,6 +137,7 @@ enum DC_FEATURE_MASK {
DC_FBC_MASK = 0x1,
};
+enum amd_dpm_forced_level;
/**
* struct amd_ip_funcs - general hooks for managing amdgpu IP Blocks
*/
@@ -186,6 +187,8 @@ struct amd_ip_funcs {
enum amd_powergating_state state);
/** @get_clockgating_state: get current clockgating status */
void (*get_clockgating_state)(void *handle, u32 *flags);
+ /** @enable_umd_pstate: enable UMD powerstate */
+ int (*enable_umd_pstate)(void *handle, enum amd_dpm_forced_level *level);
};
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h
index 721c61171045..5a44e614ab7e 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_1_0_offset.h
@@ -2347,6 +2347,8 @@
#define mmHUBP0_DCHUBP_VMPG_CONFIG_BASE_IDX 2
#define mmHUBP0_HUBPREQ_DEBUG_DB 0x0569
#define mmHUBP0_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define mmHUBP0_HUBPREQ_DEBUG 0x056a
+#define mmHUBP0_HUBPREQ_DEBUG_BASE_IDX 2
#define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x056e
#define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
#define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x056f
@@ -2631,6 +2633,8 @@
#define mmHUBP1_DCHUBP_VMPG_CONFIG_BASE_IDX 2
#define mmHUBP1_HUBPREQ_DEBUG_DB 0x062d
#define mmHUBP1_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define mmHUBP1_HUBPREQ_DEBUG 0x062e
+#define mmHUBP1_HUBPREQ_DEBUG_BASE_IDX 2
#define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x0632
#define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
#define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x0633
@@ -2915,6 +2919,8 @@
#define mmHUBP2_DCHUBP_VMPG_CONFIG_BASE_IDX 2
#define mmHUBP2_HUBPREQ_DEBUG_DB 0x06f1
#define mmHUBP2_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define mmHUBP2_HUBPREQ_DEBUG 0x06f2
+#define mmHUBP2_HUBPREQ_DEBUG_BASE_IDX 2
#define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x06f6
#define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
#define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x06f7
@@ -3199,6 +3205,8 @@
#define mmHUBP3_DCHUBP_VMPG_CONFIG_BASE_IDX 2
#define mmHUBP3_HUBPREQ_DEBUG_DB 0x07b5
#define mmHUBP3_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define mmHUBP3_HUBPREQ_DEBUG 0x07b6
+#define mmHUBP3_HUBPREQ_DEBUG_BASE_IDX 2
#define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x07ba
#define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
#define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x07bb
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
index 442ca7c471a5..6109f5ad25ad 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_offset.h
@@ -141,6 +141,8 @@
#define mmUVD_GPCOM_VCPU_DATA0_BASE_IDX 1
#define mmUVD_GPCOM_VCPU_DATA1 0x03c5
#define mmUVD_GPCOM_VCPU_DATA1_BASE_IDX 1
+#define mmUVD_ENGINE_CNTL 0x03c6
+#define mmUVD_ENGINE_CNTL_BASE_IDX 1
#define mmUVD_UDEC_DBW_UV_ADDR_CONFIG 0x03d2
#define mmUVD_UDEC_DBW_UV_ADDR_CONFIG_BASE_IDX 1
#define mmUVD_UDEC_ADDR_CONFIG 0x03d3
diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h
index 63457f9df4c5..f84bed6eecb9 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_1_0_sh_mask.h
@@ -312,6 +312,11 @@
//UVD_GPCOM_VCPU_DATA1
#define UVD_GPCOM_VCPU_DATA1__DATA1__SHIFT 0x0
#define UVD_GPCOM_VCPU_DATA1__DATA1_MASK 0xFFFFFFFFL
+//UVD_ENGINE_CNTL
+#define UVD_ENGINE_CNTL__ENGINE_START_MASK 0x1
+#define UVD_ENGINE_CNTL__ENGINE_START__SHIFT 0x0
+#define UVD_ENGINE_CNTL__ENGINE_START_MODE_MASK 0x2
+#define UVD_ENGINE_CNTL__ENGINE_START_MODE__SHIFT 0x1
//UVD_UDEC_DBW_UV_ADDR_CONFIG
#define UVD_UDEC_DBW_UV_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
#define UVD_UDEC_DBW_UV_ADDR_CONFIG__PIPE_INTERLEAVE_SIZE__SHIFT 0x3
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 8eb0bb241210..d3075adb3297 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -494,6 +494,9 @@ enum atombios_firmware_capability
ATOM_FIRMWARE_CAP_FIRMWARE_POSTED = 0x00000001,
ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION = 0x00000002,
ATOM_FIRMWARE_CAP_WMI_SUPPORT = 0x00000040,
+ ATOM_FIRMWARE_CAP_HWEMU_ENABLE = 0x00000080,
+ ATOM_FIRMWARE_CAP_HWEMU_UMC_CFG = 0x00000100,
+ ATOM_FIRMWARE_CAP_SRAM_ECC = 0x00000200,
};
enum atom_cooling_solution_id{
@@ -528,6 +531,35 @@ struct atom_firmware_info_v3_2 {
uint32_t reserved2[3];
};
+struct atom_firmware_info_v3_3
+{
+ struct atom_common_table_header table_header;
+ uint32_t firmware_revision;
+ uint32_t bootup_sclk_in10khz;
+ uint32_t bootup_mclk_in10khz;
+ uint32_t firmware_capability; // enum atombios_firmware_capability
+ uint32_t main_call_parser_entry; /* direct address of main parser call in VBIOS binary. */
+ uint32_t bios_scratch_reg_startaddr; // 1st bios scratch register dword address
+ uint16_t bootup_vddc_mv;
+ uint16_t bootup_vddci_mv;
+ uint16_t bootup_mvddc_mv;
+ uint16_t bootup_vddgfx_mv;
+ uint8_t mem_module_id;
+ uint8_t coolingsolution_id; /*0: Air cooling; 1: Liquid cooling ... */
+ uint8_t reserved1[2];
+ uint32_t mc_baseaddr_high;
+ uint32_t mc_baseaddr_low;
+ uint8_t board_i2c_feature_id; // enum of atom_board_i2c_feature_id_def
+ uint8_t board_i2c_feature_gpio_id; // i2c id find in gpio_lut data table gpio_id
+ uint8_t board_i2c_feature_slave_addr;
+ uint8_t reserved3;
+ uint16_t bootup_mvddq_mv;
+ uint16_t bootup_mvpp_mv;
+ uint32_t zfbstartaddrin16mb;
+ uint32_t pplib_pptable_id; // if pplib_pptable_id!=0, pplib get powerplay table inside driver instead of from VBIOS
+ uint32_t reserved2[2];
+};
+
/*
***************************************************************************
Data Table lcd_info structure
@@ -686,6 +718,7 @@ enum atom_encoder_caps_def
ATOM_ENCODER_CAP_RECORD_HBR2_EN =0x02, // DP1.2 HBR2 setting is qualified and HBR2 can be enabled
ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN =0x04, // HDMI2.0 6Gbps enable or not.
ATOM_ENCODER_CAP_RECORD_HBR3_EN =0x08, // DP1.3 HBR3 is supported by board.
+ ATOM_ENCODER_CAP_RECORD_USB_C_TYPE =0x100, // the DP connector is a USB-C type.
};
struct atom_encoder_caps_record
@@ -1226,16 +1259,17 @@ struct atom_gfx_info_v2_3 {
uint32_t rm21_sram_vmin_value;
};
-struct atom_gfx_info_v2_4 {
+struct atom_gfx_info_v2_4
+{
struct atom_common_table_header table_header;
uint8_t gfxip_min_ver;
uint8_t gfxip_max_ver;
- uint8_t gc_num_se;
- uint8_t max_tile_pipes;
- uint8_t gc_num_cu_per_sh;
- uint8_t gc_num_sh_per_se;
- uint8_t gc_num_rb_per_se;
- uint8_t gc_num_tccs;
+ uint8_t max_shader_engines;
+ uint8_t reserved;
+ uint8_t max_cu_per_sh;
+ uint8_t max_sh_per_se;
+ uint8_t max_backends_per_se;
+ uint8_t max_texture_channel_caches;
uint32_t regaddr_cp_dma_src_addr;
uint32_t regaddr_cp_dma_src_addr_hi;
uint32_t regaddr_cp_dma_dst_addr;
@@ -1780,6 +1814,56 @@ struct atom_umc_info_v3_1
uint32_t mem_refclk_10khz;
};
+// umc_info.umc_config
+enum atom_umc_config_def {
+ UMC_CONFIG__ENABLE_1KB_INTERLEAVE_MODE = 0x00000001,
+ UMC_CONFIG__DEFAULT_MEM_ECC_ENABLE = 0x00000002,
+ UMC_CONFIG__ENABLE_HBM_LANE_REPAIR = 0x00000004,
+ UMC_CONFIG__ENABLE_BANK_HARVESTING = 0x00000008,
+ UMC_CONFIG__ENABLE_PHY_REINIT = 0x00000010,
+ UMC_CONFIG__DISABLE_UCODE_CHKSTATUS = 0x00000020,
+};
+
+struct atom_umc_info_v3_2
+{
+ struct atom_common_table_header table_header;
+ uint32_t ucode_version;
+ uint32_t ucode_rom_startaddr;
+ uint32_t ucode_length;
+ uint16_t umc_reg_init_offset;
+ uint16_t customer_ucode_name_offset;
+ uint16_t mclk_ss_percentage;
+ uint16_t mclk_ss_rate_10hz;
+ uint8_t umcip_min_ver;
+ uint8_t umcip_max_ver;
+ uint8_t vram_type; //enum of atom_dgpu_vram_type
+ uint8_t umc_config;
+ uint32_t mem_refclk_10khz;
+ uint32_t pstate_uclk_10khz[4];
+ uint16_t umcgoldenoffset;
+ uint16_t densitygoldenoffset;
+};
+
+struct atom_umc_info_v3_3
+{
+ struct atom_common_table_header table_header;
+ uint32_t ucode_reserved;
+ uint32_t ucode_rom_startaddr;
+ uint32_t ucode_length;
+ uint16_t umc_reg_init_offset;
+ uint16_t customer_ucode_name_offset;
+ uint16_t mclk_ss_percentage;
+ uint16_t mclk_ss_rate_10hz;
+ uint8_t umcip_min_ver;
+ uint8_t umcip_max_ver;
+ uint8_t vram_type; //enum of atom_dgpu_vram_type
+ uint8_t umc_config;
+ uint32_t mem_refclk_10khz;
+ uint32_t pstate_uclk_10khz[4];
+ uint16_t umcgoldenoffset;
+ uint16_t densitygoldenoffset;
+ uint32_t reserved[4];
+};
/*
***************************************************************************
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 5f3c10ebff08..b897aca9b4c9 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -85,18 +85,6 @@ enum kgd_memory_pool {
KGD_POOL_FRAMEBUFFER = 3,
};
-enum kgd_engine_type {
- KGD_ENGINE_PFP = 1,
- KGD_ENGINE_ME,
- KGD_ENGINE_CE,
- KGD_ENGINE_MEC1,
- KGD_ENGINE_MEC2,
- KGD_ENGINE_RLC,
- KGD_ENGINE_SDMA1,
- KGD_ENGINE_SDMA2,
- KGD_ENGINE_MAX
-};
-
/**
* enum kfd_sched_policy
*
@@ -230,8 +218,6 @@ struct tile_config {
* @hqd_sdma_destroy: Destructs and preempts the SDMA queue assigned to that
* SDMA hqd slot.
*
- * @get_fw_version: Returns FW versions from the header
- *
* @set_scratch_backing_va: Sets VA for scratch backing memory of a VMID.
* Only used for no cp scheduling mode
*
@@ -311,8 +297,6 @@ struct kfd2kgd_calls {
struct kgd_dev *kgd,
uint8_t vmid);
- uint16_t (*get_fw_version)(struct kgd_dev *kgd,
- enum kgd_engine_type type);
void (*set_scratch_backing_va)(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid);
int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config);
diff --git a/drivers/gpu/drm/amd/include/linux/chash.h b/drivers/gpu/drm/amd/include/linux/chash.h
deleted file mode 100644
index 6dc159924ed1..000000000000
--- a/drivers/gpu/drm/amd/include/linux/chash.h
+++ /dev/null
@@ -1,366 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef _LINUX_CHASH_H
-#define _LINUX_CHASH_H
-
-#include <linux/types.h>
-#include <linux/hash.h>
-#include <linux/bug.h>
-#include <asm/bitsperlong.h>
-
-#if BITS_PER_LONG == 32
-# define _CHASH_LONG_SHIFT 5
-#elif BITS_PER_LONG == 64
-# define _CHASH_LONG_SHIFT 6
-#else
-# error "Unexpected BITS_PER_LONG"
-#endif
-
-struct __chash_table {
- u8 bits;
- u8 key_size;
- unsigned int value_size;
- u32 size_mask;
- unsigned long *occup_bitmap, *valid_bitmap;
- union {
- u32 *keys32;
- u64 *keys64;
- };
- u8 *values;
-
-#ifdef CONFIG_CHASH_STATS
- u64 hits, hits_steps, hits_time_ns;
- u64 miss, miss_steps, miss_time_ns;
- u64 relocs, reloc_dist;
-#endif
-};
-
-#define __CHASH_BITMAP_SIZE(bits) \
- (((1 << (bits)) + BITS_PER_LONG - 1) / BITS_PER_LONG)
-#define __CHASH_ARRAY_SIZE(bits, size) \
- ((((size) << (bits)) + sizeof(long) - 1) / sizeof(long))
-
-#define __CHASH_DATA_SIZE(bits, key_size, value_size) \
- (__CHASH_BITMAP_SIZE(bits) * 2 + \
- __CHASH_ARRAY_SIZE(bits, key_size) + \
- __CHASH_ARRAY_SIZE(bits, value_size))
-
-#define STRUCT_CHASH_TABLE(bits, key_size, value_size) \
- struct { \
- struct __chash_table table; \
- unsigned long data \
- [__CHASH_DATA_SIZE(bits, key_size, value_size)];\
- }
-
-/**
- * struct chash_table - Dynamically allocated closed hash table
- *
- * Use this struct for dynamically allocated hash tables (using
- * chash_table_alloc and chash_table_free), where the size is
- * determined at runtime.
- */
-struct chash_table {
- struct __chash_table table;
- unsigned long *data;
-};
-
-/**
- * DECLARE_CHASH_TABLE - macro to declare a closed hash table
- * @table: name of the declared hash table
- * @bts: Table size will be 2^bits entries
- * @key_sz: Size of hash keys in bytes, 4 or 8
- * @val_sz: Size of data values in bytes, can be 0
- *
- * This declares the hash table variable with a static size.
- *
- * The closed hash table stores key-value pairs with low memory and
- * lookup overhead. In operation it performs no dynamic memory
- * management. The data being stored does not require any
- * list_heads. The hash table performs best with small @val_sz and as
- * long as some space (about 50%) is left free in the table. But the
- * table can still work reasonably efficiently even when filled up to
- * about 90%. If bigger data items need to be stored and looked up,
- * store the pointer to it as value in the hash table.
- *
- * @val_sz may be 0. This can be useful when all the stored
- * information is contained in the key itself and the fact that it is
- * in the hash table (or not).
- */
-#define DECLARE_CHASH_TABLE(table, bts, key_sz, val_sz) \
- STRUCT_CHASH_TABLE(bts, key_sz, val_sz) table
-
-#ifdef CONFIG_CHASH_STATS
-#define __CHASH_STATS_INIT(prefix), \
- prefix.hits = 0, \
- prefix.hits_steps = 0, \
- prefix.hits_time_ns = 0, \
- prefix.miss = 0, \
- prefix.miss_steps = 0, \
- prefix.miss_time_ns = 0, \
- prefix.relocs = 0, \
- prefix.reloc_dist = 0
-#else
-#define __CHASH_STATS_INIT(prefix)
-#endif
-
-#define __CHASH_TABLE_INIT(prefix, data, bts, key_sz, val_sz) \
- prefix.bits = (bts), \
- prefix.key_size = (key_sz), \
- prefix.value_size = (val_sz), \
- prefix.size_mask = ((1 << bts) - 1), \
- prefix.occup_bitmap = &data[0], \
- prefix.valid_bitmap = &data \
- [__CHASH_BITMAP_SIZE(bts)], \
- prefix.keys64 = (u64 *)&data \
- [__CHASH_BITMAP_SIZE(bts) * 2], \
- prefix.values = (u8 *)&data \
- [__CHASH_BITMAP_SIZE(bts) * 2 + \
- __CHASH_ARRAY_SIZE(bts, key_sz)] \
- __CHASH_STATS_INIT(prefix)
-
-/**
- * DEFINE_CHASH_TABLE - macro to define and initialize a closed hash table
- * @tbl: name of the declared hash table
- * @bts: Table size will be 2^bits entries
- * @key_sz: Size of hash keys in bytes, 4 or 8
- * @val_sz: Size of data values in bytes, can be 0
- *
- * Note: the macro can be used for global and local hash table variables.
- */
-#define DEFINE_CHASH_TABLE(tbl, bts, key_sz, val_sz) \
- DECLARE_CHASH_TABLE(tbl, bts, key_sz, val_sz) = { \
- .table = { \
- __CHASH_TABLE_INIT(, (tbl).data, bts, key_sz, val_sz) \
- }, \
- .data = {0} \
- }
-
-/**
- * INIT_CHASH_TABLE - Initialize a hash table declared by DECLARE_CHASH_TABLE
- * @tbl: name of the declared hash table
- * @bts: Table size will be 2^bits entries
- * @key_sz: Size of hash keys in bytes, 4 or 8
- * @val_sz: Size of data values in bytes, can be 0
- */
-#define INIT_CHASH_TABLE(tbl, bts, key_sz, val_sz) \
- __CHASH_TABLE_INIT(((tbl).table), (tbl).data, bts, key_sz, val_sz)
-
-int chash_table_alloc(struct chash_table *table, u8 bits, u8 key_size,
- unsigned int value_size, gfp_t gfp_mask);
-void chash_table_free(struct chash_table *table);
-
-/**
- * chash_table_dump_stats - Dump statistics of a closed hash table
- * @tbl: Pointer to the table structure
- *
- * Dumps some performance statistics of the table gathered in operation
- * in the kernel log using pr_debug. If CONFIG_DYNAMIC_DEBUG is enabled,
- * user must turn on messages for chash.c (file chash.c +p).
- */
-#ifdef CONFIG_CHASH_STATS
-#define chash_table_dump_stats(tbl) __chash_table_dump_stats(&(*tbl).table)
-
-void __chash_table_dump_stats(struct __chash_table *table);
-#else
-#define chash_table_dump_stats(tbl)
-#endif
-
-/**
- * chash_table_reset_stats - Reset statistics of a closed hash table
- * @tbl: Pointer to the table structure
- */
-#ifdef CONFIG_CHASH_STATS
-#define chash_table_reset_stats(tbl) __chash_table_reset_stats(&(*tbl).table)
-
-static inline void __chash_table_reset_stats(struct __chash_table *table)
-{
- (void)table __CHASH_STATS_INIT((*table));
-}
-#else
-#define chash_table_reset_stats(tbl)
-#endif
-
-/**
- * chash_table_copy_in - Copy a new value into the hash table
- * @tbl: Pointer to the table structure
- * @key: Key of the entry to add or update
- * @value: Pointer to value to copy, may be NULL
- *
- * If @key already has an entry, its value is replaced. Otherwise a
- * new entry is added. If @value is NULL, the value is left unchanged
- * or uninitialized. Returns 1 if an entry already existed, 0 if a new
- * entry was added or %-ENOMEM if there was no free space in the
- * table.
- */
-#define chash_table_copy_in(tbl, key, value) \
- __chash_table_copy_in(&(*tbl).table, key, value)
-
-int __chash_table_copy_in(struct __chash_table *table, u64 key,
- const void *value);
-
-/**
- * chash_table_copy_out - Copy a value out of the hash table
- * @tbl: Pointer to the table structure
- * @key: Key of the entry to find
- * @value: Pointer to value to copy, may be NULL
- *
- * If @value is not NULL and the table has a non-0 value_size, the
- * value at @key is copied to @value. Returns the slot index of the
- * entry or %-EINVAL if @key was not found.
- */
-#define chash_table_copy_out(tbl, key, value) \
- __chash_table_copy_out(&(*tbl).table, key, value, false)
-
-int __chash_table_copy_out(struct __chash_table *table, u64 key,
- void *value, bool remove);
-
-/**
- * chash_table_remove - Remove an entry from the hash table
- * @tbl: Pointer to the table structure
- * @key: Key of the entry to find
- * @value: Pointer to value to copy, may be NULL
- *
- * If @value is not NULL and the table has a non-0 value_size, the
- * value at @key is copied to @value. The entry is removed from the
- * table. Returns the slot index of the removed entry or %-EINVAL if
- * @key was not found.
- */
-#define chash_table_remove(tbl, key, value) \
- __chash_table_copy_out(&(*tbl).table, key, value, true)
-
-/*
- * Low level iterator API used internally by the above functions.
- */
-struct chash_iter {
- struct __chash_table *table;
- unsigned long mask;
- int slot;
-};
-
-/**
- * CHASH_ITER_INIT - Initialize a hash table iterator
- * @tbl: Pointer to hash table to iterate over
- * @s: Initial slot number
- */
-#define CHASH_ITER_INIT(table, s) { \
- table, \
- 1UL << ((s) & (BITS_PER_LONG - 1)), \
- s \
- }
-/**
- * CHASH_ITER_SET - Set hash table iterator to new slot
- * @iter: Iterator
- * @s: Slot number
- */
-#define CHASH_ITER_SET(iter, s) \
- (iter).mask = 1UL << ((s) & (BITS_PER_LONG - 1)), \
- (iter).slot = (s)
-/**
- * CHASH_ITER_INC - Increment hash table iterator
- * @table: Hash table to iterate over
- *
- * Wraps around at the end.
- */
-#define CHASH_ITER_INC(iter) do { \
- (iter).mask = (iter).mask << 1 | \
- (iter).mask >> (BITS_PER_LONG - 1); \
- (iter).slot = ((iter).slot + 1) & (iter).table->size_mask; \
- } while (0)
-
-static inline bool chash_iter_is_valid(const struct chash_iter iter)
-{
- BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
- return !!(iter.table->valid_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &
- iter.mask);
-}
-static inline bool chash_iter_is_empty(const struct chash_iter iter)
-{
- BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
- return !(iter.table->occup_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &
- iter.mask);
-}
-
-static inline void chash_iter_set_valid(const struct chash_iter iter)
-{
- BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
- iter.table->valid_bitmap[iter.slot >> _CHASH_LONG_SHIFT] |= iter.mask;
- iter.table->occup_bitmap[iter.slot >> _CHASH_LONG_SHIFT] |= iter.mask;
-}
-static inline void chash_iter_set_invalid(const struct chash_iter iter)
-{
- BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
- iter.table->valid_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &= ~iter.mask;
-}
-static inline void chash_iter_set_empty(const struct chash_iter iter)
-{
- BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
- iter.table->occup_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &= ~iter.mask;
-}
-
-static inline u32 chash_iter_key32(const struct chash_iter iter)
-{
- BUG_ON(iter.table->key_size != 4);
- BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
- return iter.table->keys32[iter.slot];
-}
-static inline u64 chash_iter_key64(const struct chash_iter iter)
-{
- BUG_ON(iter.table->key_size != 8);
- BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
- return iter.table->keys64[iter.slot];
-}
-static inline u64 chash_iter_key(const struct chash_iter iter)
-{
- BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
- return (iter.table->key_size == 4) ?
- iter.table->keys32[iter.slot] : iter.table->keys64[iter.slot];
-}
-
-static inline u32 chash_iter_hash32(const struct chash_iter iter)
-{
- BUG_ON(iter.table->key_size != 4);
- return hash_32(chash_iter_key32(iter), iter.table->bits);
-}
-
-static inline u32 chash_iter_hash64(const struct chash_iter iter)
-{
- BUG_ON(iter.table->key_size != 8);
- return hash_64(chash_iter_key64(iter), iter.table->bits);
-}
-
-static inline u32 chash_iter_hash(const struct chash_iter iter)
-{
- return (iter.table->key_size == 4) ?
- hash_32(chash_iter_key32(iter), iter.table->bits) :
- hash_64(chash_iter_key64(iter), iter.table->bits);
-}
-
-static inline void *chash_iter_value(const struct chash_iter iter)
-{
- BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
- return iter.table->values +
- ((unsigned long)iter.slot * iter.table->value_size);
-}
-
-#endif /* _LINUX_CHASH_H */
diff --git a/drivers/gpu/drm/amd/lib/Kconfig b/drivers/gpu/drm/amd/lib/Kconfig
deleted file mode 100644
index 776ef3434c10..000000000000
--- a/drivers/gpu/drm/amd/lib/Kconfig
+++ /dev/null
@@ -1,28 +0,0 @@
-menu "AMD Library routines"
-
-#
-# Closed hash table
-#
-config CHASH
- tristate
- default DRM_AMDGPU
- help
- Statically sized closed hash table implementation with low
- memory and CPU overhead.
-
-config CHASH_STATS
- bool "Closed hash table performance statistics"
- depends on CHASH
- default n
- help
- Enable collection of performance statistics for closed hash tables.
-
-config CHASH_SELFTEST
- bool "Closed hash table self test"
- depends on CHASH
- default n
- help
- Runs a selftest during module load. Several module parameters
- are available to modify the behaviour of the test.
-
-endmenu
diff --git a/drivers/gpu/drm/amd/lib/Makefile b/drivers/gpu/drm/amd/lib/Makefile
deleted file mode 100644
index 690243001e1a..000000000000
--- a/drivers/gpu/drm/amd/lib/Makefile
+++ /dev/null
@@ -1,32 +0,0 @@
-#
-# Copyright 2017 Advanced Micro Devices, Inc.
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-# OTHER DEALINGS IN THE SOFTWARE.
-#
-#
-# Makefile for AMD library routines, which are used by AMD driver
-# components.
-#
-# This is for common library routines that can be shared between AMD
-# driver components or later moved to kernel/lib for sharing with
-# other drivers.
-
-ccflags-y := -I$(src)/../include
-
-obj-$(CONFIG_CHASH) += chash.o
diff --git a/drivers/gpu/drm/amd/lib/chash.c b/drivers/gpu/drm/amd/lib/chash.c
deleted file mode 100644
index b8e45f356a1c..000000000000
--- a/drivers/gpu/drm/amd/lib/chash.c
+++ /dev/null
@@ -1,638 +0,0 @@
-/*
- * Copyright 2017 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <linux/types.h>
-#include <linux/hash.h>
-#include <linux/bug.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/sched/clock.h>
-#include <asm/div64.h>
-#include <linux/chash.h>
-
-/**
- * chash_table_alloc - Allocate closed hash table
- * @table: Pointer to the table structure
- * @bits: Table size will be 2^bits entries
- * @key_size: Size of hash keys in bytes, 4 or 8
- * @value_size: Size of data values in bytes, can be 0
- */
-int chash_table_alloc(struct chash_table *table, u8 bits, u8 key_size,
- unsigned int value_size, gfp_t gfp_mask)
-{
- if (bits > 31)
- return -EINVAL;
-
- if (key_size != 4 && key_size != 8)
- return -EINVAL;
-
- table->data = kcalloc(__CHASH_DATA_SIZE(bits, key_size, value_size),
- sizeof(long), gfp_mask);
- if (!table->data)
- return -ENOMEM;
-
- __CHASH_TABLE_INIT(table->table, table->data,
- bits, key_size, value_size);
-
- return 0;
-}
-EXPORT_SYMBOL(chash_table_alloc);
-
-/**
- * chash_table_free - Free closed hash table
- * @table: Pointer to the table structure
- */
-void chash_table_free(struct chash_table *table)
-{
- kfree(table->data);
-}
-EXPORT_SYMBOL(chash_table_free);
-
-#ifdef CONFIG_CHASH_STATS
-
-#define DIV_FRAC(nom, denom, quot, frac, frac_digits) do { \
- u64 __nom = (nom); \
- u64 __denom = (denom); \
- u64 __quot, __frac; \
- u32 __rem; \
- \
- while (__denom >> 32) { \
- __nom >>= 1; \
- __denom >>= 1; \
- } \
- __quot = __nom; \
- __rem = do_div(__quot, __denom); \
- __frac = __rem * (frac_digits) + (__denom >> 1); \
- do_div(__frac, __denom); \
- (quot) = __quot; \
- (frac) = __frac; \
- } while (0)
-
-void __chash_table_dump_stats(struct __chash_table *table)
-{
- struct chash_iter iter = CHASH_ITER_INIT(table, 0);
- u32 filled = 0, empty = 0, tombstones = 0;
- u64 quot1, quot2;
- u32 frac1, frac2;
-
- do {
- if (chash_iter_is_valid(iter))
- filled++;
- else if (chash_iter_is_empty(iter))
- empty++;
- else
- tombstones++;
- CHASH_ITER_INC(iter);
- } while (iter.slot);
-
- pr_debug("chash: key size %u, value size %u\n",
- table->key_size, table->value_size);
- pr_debug(" Slots total/filled/empty/tombstones: %u / %u / %u / %u\n",
- 1 << table->bits, filled, empty, tombstones);
- if (table->hits > 0) {
- DIV_FRAC(table->hits_steps, table->hits, quot1, frac1, 1000);
- DIV_FRAC(table->hits * 1000, table->hits_time_ns,
- quot2, frac2, 1000);
- } else {
- quot1 = quot2 = 0;
- frac1 = frac2 = 0;
- }
- pr_debug(" Hits (avg.cost, rate): %llu (%llu.%03u, %llu.%03u M/s)\n",
- table->hits, quot1, frac1, quot2, frac2);
- if (table->miss > 0) {
- DIV_FRAC(table->miss_steps, table->miss, quot1, frac1, 1000);
- DIV_FRAC(table->miss * 1000, table->miss_time_ns,
- quot2, frac2, 1000);
- } else {
- quot1 = quot2 = 0;
- frac1 = frac2 = 0;
- }
- pr_debug(" Misses (avg.cost, rate): %llu (%llu.%03u, %llu.%03u M/s)\n",
- table->miss, quot1, frac1, quot2, frac2);
- if (table->hits + table->miss > 0) {
- DIV_FRAC(table->hits_steps + table->miss_steps,
- table->hits + table->miss, quot1, frac1, 1000);
- DIV_FRAC((table->hits + table->miss) * 1000,
- (table->hits_time_ns + table->miss_time_ns),
- quot2, frac2, 1000);
- } else {
- quot1 = quot2 = 0;
- frac1 = frac2 = 0;
- }
- pr_debug(" Total (avg.cost, rate): %llu (%llu.%03u, %llu.%03u M/s)\n",
- table->hits + table->miss, quot1, frac1, quot2, frac2);
- if (table->relocs > 0) {
- DIV_FRAC(table->hits + table->miss, table->relocs,
- quot1, frac1, 1000);
- DIV_FRAC(table->reloc_dist, table->relocs, quot2, frac2, 1000);
- pr_debug(" Relocations (freq, avg.dist): %llu (1:%llu.%03u, %llu.%03u)\n",
- table->relocs, quot1, frac1, quot2, frac2);
- } else {
- pr_debug(" No relocations\n");
- }
-}
-EXPORT_SYMBOL(__chash_table_dump_stats);
-
-#undef DIV_FRAC
-#endif
-
-#define CHASH_INC(table, a) ((a) = ((a) + 1) & (table)->size_mask)
-#define CHASH_ADD(table, a, b) (((a) + (b)) & (table)->size_mask)
-#define CHASH_SUB(table, a, b) (((a) - (b)) & (table)->size_mask)
-#define CHASH_IN_RANGE(table, slot, first, last) \
- (CHASH_SUB(table, slot, first) <= CHASH_SUB(table, last, first))
-
-/*#define CHASH_DEBUG Uncomment this to enable verbose debug output*/
-#ifdef CHASH_DEBUG
-static void chash_table_dump(struct __chash_table *table)
-{
- struct chash_iter iter = CHASH_ITER_INIT(table, 0);
-
- do {
- if ((iter.slot & 3) == 0)
- pr_debug("%04x: ", iter.slot);
-
- if (chash_iter_is_valid(iter))
- pr_debug("[%016llx] ", chash_iter_key(iter));
- else if (chash_iter_is_empty(iter))
- pr_debug("[ <empty> ] ");
- else
- pr_debug("[ <tombstone> ] ");
-
- if ((iter.slot & 3) == 3)
- pr_debug("\n");
-
- CHASH_ITER_INC(iter);
- } while (iter.slot);
-
- if ((iter.slot & 3) != 0)
- pr_debug("\n");
-}
-
-static int chash_table_check(struct __chash_table *table)
-{
- u32 hash;
- struct chash_iter iter = CHASH_ITER_INIT(table, 0);
- struct chash_iter cur = CHASH_ITER_INIT(table, 0);
-
- do {
- if (!chash_iter_is_valid(iter)) {
- CHASH_ITER_INC(iter);
- continue;
- }
-
- hash = chash_iter_hash(iter);
- CHASH_ITER_SET(cur, hash);
- while (cur.slot != iter.slot) {
- if (chash_iter_is_empty(cur)) {
- pr_err("Path to element at %x with hash %x broken at slot %x\n",
- iter.slot, hash, cur.slot);
- chash_table_dump(table);
- return -EINVAL;
- }
- CHASH_ITER_INC(cur);
- }
-
- CHASH_ITER_INC(iter);
- } while (iter.slot);
-
- return 0;
-}
-#endif
-
-static void chash_iter_relocate(struct chash_iter dst, struct chash_iter src)
-{
- BUG_ON(src.table == dst.table && src.slot == dst.slot);
- BUG_ON(src.table->key_size != dst.table->key_size);
- BUG_ON(src.table->value_size != dst.table->value_size);
-
- if (dst.table->key_size == 4)
- dst.table->keys32[dst.slot] = src.table->keys32[src.slot];
- else
- dst.table->keys64[dst.slot] = src.table->keys64[src.slot];
-
- if (dst.table->value_size)
- memcpy(chash_iter_value(dst), chash_iter_value(src),
- dst.table->value_size);
-
- chash_iter_set_valid(dst);
- chash_iter_set_invalid(src);
-
-#ifdef CONFIG_CHASH_STATS
- if (src.table == dst.table) {
- dst.table->relocs++;
- dst.table->reloc_dist +=
- CHASH_SUB(dst.table, src.slot, dst.slot);
- }
-#endif
-}
-
-/**
- * __chash_table_find - Helper for looking up a hash table entry
- * @iter: Pointer to hash table iterator
- * @key: Key of the entry to find
- * @for_removal: set to true if the element will be removed soon
- *
- * Searches for an entry in the hash table with a given key. iter must
- * be initialized by the caller to point to the home position of the
- * hypothetical entry, i.e. it must be initialized with the hash table
- * and the key's hash as the initial slot for the search.
- *
- * This function also does some local clean-up to speed up future
- * look-ups by relocating entries to better slots and removing
- * tombstones that are no longer needed.
- *
- * If @for_removal is true, the function avoids relocating the entry
- * that is being returned.
- *
- * Returns 0 if the search is successful. In this case iter is updated
- * to point to the found entry. Otherwise %-EINVAL is returned and the
- * iter is updated to point to the first available slot for the given
- * key. If the table is full, the slot is set to -1.
- */
-static int chash_table_find(struct chash_iter *iter, u64 key,
- bool for_removal)
-{
-#ifdef CONFIG_CHASH_STATS
- u64 ts1 = local_clock();
-#endif
- u32 hash = iter->slot;
- struct chash_iter first_redundant = CHASH_ITER_INIT(iter->table, -1);
- int first_avail = (for_removal ? -2 : -1);
-
- while (!chash_iter_is_valid(*iter) || chash_iter_key(*iter) != key) {
- if (chash_iter_is_empty(*iter)) {
- /* Found an empty slot, which ends the
- * search. Clean up any preceding tombstones
- * that are no longer needed because they lead
- * to no-where
- */
- if ((int)first_redundant.slot < 0)
- goto not_found;
- while (first_redundant.slot != iter->slot) {
- if (!chash_iter_is_valid(first_redundant))
- chash_iter_set_empty(first_redundant);
- CHASH_ITER_INC(first_redundant);
- }
-#ifdef CHASH_DEBUG
- chash_table_check(iter->table);
-#endif
- goto not_found;
- } else if (!chash_iter_is_valid(*iter)) {
- /* Found a tombstone. Remember it as candidate
- * for relocating the entry we're looking for
- * or for adding a new entry with the given key
- */
- if (first_avail == -1)
- first_avail = iter->slot;
- /* Or mark it as the start of a series of
- * potentially redundant tombstones
- */
- else if (first_redundant.slot == -1)
- CHASH_ITER_SET(first_redundant, iter->slot);
- } else if (first_redundant.slot >= 0) {
- /* Found a valid, occupied slot with a
- * preceding series of tombstones. Relocate it
- * to a better position that no longer depends
- * on those tombstones
- */
- u32 cur_hash = chash_iter_hash(*iter);
-
- if (!CHASH_IN_RANGE(iter->table, cur_hash,
- first_redundant.slot + 1,
- iter->slot)) {
- /* This entry has a hash at or before
- * the first tombstone we found. We
- * can relocate it to that tombstone
- * and advance to the next tombstone
- */
- chash_iter_relocate(first_redundant, *iter);
- do {
- CHASH_ITER_INC(first_redundant);
- } while (chash_iter_is_valid(first_redundant));
- } else if (cur_hash != iter->slot) {
- /* Relocate entry to its home position
- * or as close as possible so it no
- * longer depends on any preceding
- * tombstones
- */
- struct chash_iter new_iter =
- CHASH_ITER_INIT(iter->table, cur_hash);
-
- while (new_iter.slot != iter->slot &&
- chash_iter_is_valid(new_iter))
- CHASH_ITER_INC(new_iter);
-
- if (new_iter.slot != iter->slot)
- chash_iter_relocate(new_iter, *iter);
- }
- }
-
- CHASH_ITER_INC(*iter);
- if (iter->slot == hash) {
- iter->slot = -1;
- goto not_found;
- }
- }
-
-#ifdef CONFIG_CHASH_STATS
- iter->table->hits++;
- iter->table->hits_steps += CHASH_SUB(iter->table, iter->slot, hash) + 1;
-#endif
-
- if (first_avail >= 0) {
- CHASH_ITER_SET(first_redundant, first_avail);
- chash_iter_relocate(first_redundant, *iter);
- iter->slot = first_redundant.slot;
- iter->mask = first_redundant.mask;
- }
-
-#ifdef CONFIG_CHASH_STATS
- iter->table->hits_time_ns += local_clock() - ts1;
-#endif
-
- return 0;
-
-not_found:
-#ifdef CONFIG_CHASH_STATS
- iter->table->miss++;
- iter->table->miss_steps += (iter->slot < 0) ?
- (1 << iter->table->bits) :
- CHASH_SUB(iter->table, iter->slot, hash) + 1;
-#endif
-
- if (first_avail >= 0)
- CHASH_ITER_SET(*iter, first_avail);
-
-#ifdef CONFIG_CHASH_STATS
- iter->table->miss_time_ns += local_clock() - ts1;
-#endif
-
- return -EINVAL;
-}
-
-int __chash_table_copy_in(struct __chash_table *table, u64 key,
- const void *value)
-{
- u32 hash = (table->key_size == 4) ?
- hash_32(key, table->bits) : hash_64(key, table->bits);
- struct chash_iter iter = CHASH_ITER_INIT(table, hash);
- int r = chash_table_find(&iter, key, false);
-
- /* Found an existing entry */
- if (!r) {
- if (value && table->value_size)
- memcpy(chash_iter_value(iter), value,
- table->value_size);
- return 1;
- }
-
- /* Is there a place to add a new entry? */
- if (iter.slot < 0) {
- pr_err("Hash table overflow\n");
- return -ENOMEM;
- }
-
- chash_iter_set_valid(iter);
-
- if (table->key_size == 4)
- table->keys32[iter.slot] = key;
- else
- table->keys64[iter.slot] = key;
- if (value && table->value_size)
- memcpy(chash_iter_value(iter), value, table->value_size);
-
- return 0;
-}
-EXPORT_SYMBOL(__chash_table_copy_in);
-
-int __chash_table_copy_out(struct __chash_table *table, u64 key,
- void *value, bool remove)
-{
- u32 hash = (table->key_size == 4) ?
- hash_32(key, table->bits) : hash_64(key, table->bits);
- struct chash_iter iter = CHASH_ITER_INIT(table, hash);
- int r = chash_table_find(&iter, key, remove);
-
- if (r < 0)
- return r;
-
- if (value && table->value_size)
- memcpy(value, chash_iter_value(iter), table->value_size);
-
- if (remove)
- chash_iter_set_invalid(iter);
-
- return iter.slot;
-}
-EXPORT_SYMBOL(__chash_table_copy_out);
-
-#ifdef CONFIG_CHASH_SELFTEST
-/**
- * chash_self_test - Run a self-test of the hash table implementation
- * @bits: Table size will be 2^bits entries
- * @key_size: Size of hash keys in bytes, 4 or 8
- * @min_fill: Minimum fill level during the test
- * @max_fill: Maximum fill level during the test
- * @iterations: Number of test iterations
- *
- * The test adds and removes entries from a hash table, cycling the
- * fill level between min_fill and max_fill entries. Also tests lookup
- * and value retrieval.
- */
-static int __init chash_self_test(u8 bits, u8 key_size,
- int min_fill, int max_fill,
- u64 iterations)
-{
- struct chash_table table;
- int ret;
- u64 add_count, rmv_count;
- u64 value;
-
- if (key_size == 4 && iterations > 0xffffffff)
- return -EINVAL;
- if (min_fill >= max_fill)
- return -EINVAL;
-
- ret = chash_table_alloc(&table, bits, key_size, sizeof(u64),
- GFP_KERNEL);
- if (ret) {
- pr_err("chash_table_alloc failed: %d\n", ret);
- return ret;
- }
-
- for (add_count = 0, rmv_count = 0; add_count < iterations;
- add_count++) {
- /* When we hit the max_fill level, remove entries down
- * to min_fill
- */
- if (add_count - rmv_count == max_fill) {
- u64 find_count = rmv_count;
-
- /* First try to find all entries that we're
- * about to remove, confirm their value, test
- * writing them back a second time.
- */
- for (; add_count - find_count > min_fill;
- find_count++) {
- ret = chash_table_copy_out(&table, find_count,
- &value);
- if (ret < 0) {
- pr_err("chash_table_copy_out failed: %d\n",
- ret);
- goto out;
- }
- if (value != ~find_count) {
- pr_err("Wrong value retrieved for key 0x%llx, expected 0x%llx got 0x%llx\n",
- find_count, ~find_count, value);
-#ifdef CHASH_DEBUG
- chash_table_dump(&table.table);
-#endif
- ret = -EFAULT;
- goto out;
- }
- ret = chash_table_copy_in(&table, find_count,
- &value);
- if (ret != 1) {
- pr_err("copy_in second time returned %d, expected 1\n",
- ret);
- ret = -EFAULT;
- goto out;
- }
- }
- /* Remove them until we hit min_fill level */
- for (; add_count - rmv_count > min_fill; rmv_count++) {
- ret = chash_table_remove(&table, rmv_count,
- NULL);
- if (ret < 0) {
- pr_err("chash_table_remove failed: %d\n",
- ret);
- goto out;
- }
- }
- }
-
- /* Add a new value */
- value = ~add_count;
- ret = chash_table_copy_in(&table, add_count, &value);
- if (ret != 0) {
- pr_err("copy_in first time returned %d, expected 0\n",
- ret);
- ret = -EFAULT;
- goto out;
- }
- }
-
- chash_table_dump_stats(&table);
- chash_table_reset_stats(&table);
-
-out:
- chash_table_free(&table);
- return ret;
-}
-
-static unsigned int chash_test_bits = 10;
-MODULE_PARM_DESC(test_bits,
- "Selftest number of hash bits ([4..20], default=10)");
-module_param_named(test_bits, chash_test_bits, uint, 0444);
-
-static unsigned int chash_test_keysize = 8;
-MODULE_PARM_DESC(test_keysize, "Selftest keysize (4 or 8, default=8)");
-module_param_named(test_keysize, chash_test_keysize, uint, 0444);
-
-static unsigned int chash_test_minfill;
-MODULE_PARM_DESC(test_minfill, "Selftest minimum #entries (default=50%)");
-module_param_named(test_minfill, chash_test_minfill, uint, 0444);
-
-static unsigned int chash_test_maxfill;
-MODULE_PARM_DESC(test_maxfill, "Selftest maximum #entries (default=80%)");
-module_param_named(test_maxfill, chash_test_maxfill, uint, 0444);
-
-static unsigned long chash_test_iters;
-MODULE_PARM_DESC(test_iters, "Selftest iterations (default=1000 x #entries)");
-module_param_named(test_iters, chash_test_iters, ulong, 0444);
-
-static int __init chash_init(void)
-{
- int ret;
- u64 ts1_ns;
-
- /* Skip self test on user errors */
- if (chash_test_bits < 4 || chash_test_bits > 20) {
- pr_err("chash: test_bits out of range [4..20].\n");
- return 0;
- }
- if (chash_test_keysize != 4 && chash_test_keysize != 8) {
- pr_err("chash: test_keysize invalid. Must be 4 or 8.\n");
- return 0;
- }
-
- if (!chash_test_minfill)
- chash_test_minfill = (1 << chash_test_bits) / 2;
- if (!chash_test_maxfill)
- chash_test_maxfill = (1 << chash_test_bits) * 4 / 5;
- if (!chash_test_iters)
- chash_test_iters = (1 << chash_test_bits) * 1000;
-
- if (chash_test_minfill >= (1 << chash_test_bits)) {
- pr_err("chash: test_minfill too big. Must be < table size.\n");
- return 0;
- }
- if (chash_test_maxfill >= (1 << chash_test_bits)) {
- pr_err("chash: test_maxfill too big. Must be < table size.\n");
- return 0;
- }
- if (chash_test_minfill >= chash_test_maxfill) {
- pr_err("chash: test_minfill must be < test_maxfill.\n");
- return 0;
- }
- if (chash_test_keysize == 4 && chash_test_iters > 0xffffffff) {
- pr_err("chash: test_iters must be < 4G for 4 byte keys.\n");
- return 0;
- }
-
- ts1_ns = local_clock();
- ret = chash_self_test(chash_test_bits, chash_test_keysize,
- chash_test_minfill, chash_test_maxfill,
- chash_test_iters);
- if (!ret) {
- u64 ts_delta_us = local_clock() - ts1_ns;
- u64 iters_per_second = (u64)chash_test_iters * 1000000;
-
- do_div(ts_delta_us, 1000);
- do_div(iters_per_second, ts_delta_us);
- pr_info("chash: self test took %llu us, %llu iterations/s\n",
- ts_delta_us, iters_per_second);
- } else {
- pr_err("chash: self test failed: %d\n", ret);
- }
-
- return ret;
-}
-
-module_init(chash_init);
-
-#endif /* CONFIG_CHASH_SELFTEST */
-
-MODULE_DESCRIPTION("Closed hash table");
-MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/amd/powerplay/Makefile b/drivers/gpu/drm/amd/powerplay/Makefile
index 231785a9e24c..ec87b3430d12 100644
--- a/drivers/gpu/drm/amd/powerplay/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/Makefile
@@ -35,7 +35,7 @@ AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/powerplay/,$(
include $(AMD_POWERPLAY)
-POWER_MGR = amd_powerplay.o
+POWER_MGR = amd_powerplay.o amdgpu_smu.o smu_v11_0.o vega20_ppt.o
AMD_PP_POWER = $(addprefix $(AMD_PP_PATH)/,$(POWER_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 3f73f7cd18b9..bea1587d352d 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -53,7 +53,7 @@ static int amd_powerplay_create(struct amdgpu_device *adev)
mutex_init(&hwmgr->smu_lock);
hwmgr->chip_family = adev->family;
hwmgr->chip_id = adev->asic_type;
- hwmgr->feature_mask = adev->powerplay.pp_feature;
+ hwmgr->feature_mask = adev->pm.pp_feature;
hwmgr->display_config = &adev->pm.pm_display_cfg;
adev->powerplay.pp_handle = hwmgr;
adev->powerplay.pp_funcs = &pp_dpm_funcs;
@@ -1304,7 +1304,7 @@ static int pp_notify_smu_enable_pwe(void *handle)
if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
pr_info_ratelimited("%s was not implemented.\n", __func__);
- return -EINVAL;;
+ return -EINVAL;
}
mutex_lock(&hwmgr->smu_lock);
@@ -1341,7 +1341,7 @@ static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
pr_debug("%s was not implemented.\n", __func__);
- return -EINVAL;;
+ return -EINVAL;
}
mutex_lock(&hwmgr->smu_lock);
@@ -1360,7 +1360,7 @@ static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
pr_debug("%s was not implemented.\n", __func__);
- return -EINVAL;;
+ return -EINVAL;
}
mutex_lock(&hwmgr->smu_lock);
@@ -1379,7 +1379,7 @@ static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
pr_debug("%s was not implemented.\n", __func__);
- return -EINVAL;;
+ return -EINVAL;
}
mutex_lock(&hwmgr->smu_lock);
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
new file mode 100644
index 000000000000..c058c784180e
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -0,0 +1,1253 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "pp_debug.h"
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "soc15_common.h"
+#include "smu_v11_0.h"
+#include "atom.h"
+#include "amd_pcie.h"
+
+int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type,
+ bool gate)
+{
+ int ret = 0;
+
+ switch (block_type) {
+ case AMD_IP_BLOCK_TYPE_UVD:
+ ret = smu_dpm_set_uvd_enable(smu, gate);
+ break;
+ case AMD_IP_BLOCK_TYPE_VCE:
+ ret = smu_dpm_set_vce_enable(smu, gate);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu)
+{
+ /* not support power state */
+ return POWER_STATE_TYPE_DEFAULT;
+}
+
+int smu_get_power_num_states(struct smu_context *smu,
+ struct pp_states_info *state_info)
+{
+ if (!state_info)
+ return -EINVAL;
+
+ /* not support power state */
+ memset(state_info, 0, sizeof(struct pp_states_info));
+ state_info->nums = 0;
+
+ return 0;
+}
+
+int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
+ void *data, uint32_t *size)
+{
+ int ret = 0;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
+ *((uint32_t *)data) = smu->pstate_sclk;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
+ *((uint32_t *)data) = smu->pstate_mclk;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
+ ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
+ *size = 8;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ *size = 0;
+
+ return ret;
+}
+
+int smu_update_table_with_arg(struct smu_context *smu, uint16_t table_id, uint16_t exarg,
+ void *table_data, bool drv2smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *table = NULL;
+ int ret = 0;
+ uint32_t table_index;
+
+ if (!table_data || table_id >= smu_table->table_count)
+ return -EINVAL;
+
+ table_index = (exarg << 16) | table_id;
+
+ table = &smu_table->tables[table_id];
+
+ if (drv2smu)
+ memcpy(table->cpu_addr, table_data, table->size);
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh,
+ upper_32_bits(table->mc_address));
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow,
+ lower_32_bits(table->mc_address));
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, drv2smu ?
+ SMU_MSG_TransferTableDram2Smu :
+ SMU_MSG_TransferTableSmu2Dram,
+ table_index);
+ if (ret)
+ return ret;
+
+ if (!drv2smu)
+ memcpy(table_data, table->cpu_addr, table->size);
+
+ return ret;
+}
+
+bool is_support_sw_smu(struct amdgpu_device *adev)
+{
+ if (amdgpu_dpm != 1)
+ return false;
+
+ if (adev->asic_type >= CHIP_VEGA20 && adev->asic_type != CHIP_RAVEN)
+ return true;
+
+ return false;
+}
+
+int smu_sys_get_pp_table(struct smu_context *smu, void **table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
+ if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
+ return -EINVAL;
+
+ if (smu_table->hardcode_pptable)
+ *table = smu_table->hardcode_pptable;
+ else
+ *table = smu_table->power_play_table;
+
+ return smu_table->power_play_table_size;
+}
+
+int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ ATOM_COMMON_TABLE_HEADER *header = (ATOM_COMMON_TABLE_HEADER *)buf;
+ int ret = 0;
+
+ if (header->usStructureSize != size) {
+ pr_err("pp table size not matched !\n");
+ return -EIO;
+ }
+
+ mutex_lock(&smu->mutex);
+ if (!smu_table->hardcode_pptable)
+ smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
+ if (!smu_table->hardcode_pptable) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+
+ memcpy(smu_table->hardcode_pptable, buf, size);
+ smu_table->power_play_table = smu_table->hardcode_pptable;
+ smu_table->power_play_table_size = size;
+ mutex_unlock(&smu->mutex);
+
+ ret = smu_reset(smu);
+ if (ret)
+ pr_info("smu reset failed, ret = %d\n", ret);
+
+ return ret;
+
+failed:
+ mutex_unlock(&smu->mutex);
+ return ret;
+}
+
+int smu_feature_init_dpm(struct smu_context *smu)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ int ret = 0;
+ uint32_t unallowed_feature_mask[SMU_FEATURE_MAX/32];
+
+ mutex_lock(&feature->mutex);
+ bitmap_fill(feature->allowed, SMU_FEATURE_MAX);
+ mutex_unlock(&feature->mutex);
+
+ ret = smu_get_unallowed_feature_mask(smu, unallowed_feature_mask,
+ SMU_FEATURE_MAX/32);
+ if (ret)
+ return ret;
+
+ mutex_lock(&feature->mutex);
+ bitmap_andnot(feature->allowed, feature->allowed,
+ (unsigned long *)unallowed_feature_mask,
+ feature->feature_num);
+ mutex_unlock(&feature->mutex);
+
+ return ret;
+}
+
+int smu_feature_is_enabled(struct smu_context *smu, int feature_id)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ int ret = 0;
+
+ WARN_ON(feature_id > feature->feature_num);
+
+ mutex_lock(&feature->mutex);
+ ret = test_bit(feature_id, feature->enabled);
+ mutex_unlock(&feature->mutex);
+
+ return ret;
+}
+
+int smu_feature_set_enabled(struct smu_context *smu, int feature_id, bool enable)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ int ret = 0;
+
+ WARN_ON(feature_id > feature->feature_num);
+
+ mutex_lock(&feature->mutex);
+ ret = smu_feature_update_enable_state(smu, feature_id, enable);
+ if (ret)
+ goto failed;
+
+ if (enable)
+ test_and_set_bit(feature_id, feature->enabled);
+ else
+ test_and_clear_bit(feature_id, feature->enabled);
+
+failed:
+ mutex_unlock(&feature->mutex);
+
+ return ret;
+}
+
+int smu_feature_is_supported(struct smu_context *smu, int feature_id)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ int ret = 0;
+
+ WARN_ON(feature_id > feature->feature_num);
+
+ mutex_lock(&feature->mutex);
+ ret = test_bit(feature_id, feature->supported);
+ mutex_unlock(&feature->mutex);
+
+ return ret;
+}
+
+int smu_feature_set_supported(struct smu_context *smu, int feature_id,
+ bool enable)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ int ret = 0;
+
+ WARN_ON(feature_id > feature->feature_num);
+
+ mutex_unlock(&feature->mutex);
+ if (enable)
+ test_and_set_bit(feature_id, feature->supported);
+ else
+ test_and_clear_bit(feature_id, feature->supported);
+ mutex_unlock(&feature->mutex);
+
+ return ret;
+}
+
+static int smu_set_funcs(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = &adev->smu;
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
+ smu->od_enabled = true;
+ smu_v11_0_set_smu_funcs(smu);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int smu_early_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct smu_context *smu = &adev->smu;
+
+ smu->adev = adev;
+ mutex_init(&smu->mutex);
+
+ return smu_set_funcs(adev);
+}
+
+static int smu_late_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct smu_context *smu = &adev->smu;
+ mutex_lock(&smu->mutex);
+ smu_handle_task(&adev->smu,
+ smu->smu_dpm.dpm_level,
+ AMD_PP_TASK_COMPLETE_INIT);
+ mutex_unlock(&smu->mutex);
+
+ return 0;
+}
+
+int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
+ uint16_t *size, uint8_t *frev, uint8_t *crev,
+ uint8_t **addr)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint16_t data_start;
+
+ if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
+ size, frev, crev, &data_start))
+ return -EINVAL;
+
+ *addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
+
+ return 0;
+}
+
+static int smu_initialize_pptable(struct smu_context *smu)
+{
+ /* TODO */
+ return 0;
+}
+
+static int smu_smc_table_sw_init(struct smu_context *smu)
+{
+ int ret;
+
+ ret = smu_initialize_pptable(smu);
+ if (ret) {
+ pr_err("Failed to init smu_initialize_pptable!\n");
+ return ret;
+ }
+
+ /**
+ * Create smu_table structure, and init smc tables such as
+ * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
+ */
+ ret = smu_init_smc_tables(smu);
+ if (ret) {
+ pr_err("Failed to init smc tables!\n");
+ return ret;
+ }
+
+ /**
+ * Create smu_power_context structure, and allocate smu_dpm_context and
+ * context size to fill the smu_power_context data.
+ */
+ ret = smu_init_power(smu);
+ if (ret) {
+ pr_err("Failed to init smu_init_power!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int smu_smc_table_sw_fini(struct smu_context *smu)
+{
+ int ret;
+
+ ret = smu_fini_smc_tables(smu);
+ if (ret) {
+ pr_err("Failed to smu_fini_smc_tables!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int smu_sw_init(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct smu_context *smu = &adev->smu;
+ int ret;
+
+ if (!is_support_sw_smu(adev))
+ return -EINVAL;
+
+ smu->pool_size = adev->pm.smu_prv_buffer_size;
+ smu->smu_feature.feature_num = SMU_FEATURE_MAX;
+ mutex_init(&smu->smu_feature.mutex);
+ bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
+ bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
+ bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
+ smu->watermarks_bitmap = 0;
+ smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+ smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+
+ smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
+ smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
+ smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
+ smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
+ smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
+ smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
+ smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
+ smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
+
+ smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+ smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
+ smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
+ smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
+ smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
+ smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
+ smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
+ smu->display_config = &adev->pm.pm_display_cfg;
+
+ smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
+ smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
+ ret = smu_init_microcode(smu);
+ if (ret) {
+ pr_err("Failed to load smu firmware!\n");
+ return ret;
+ }
+
+ ret = smu_smc_table_sw_init(smu);
+ if (ret) {
+ pr_err("Failed to sw init smc table!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int smu_sw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct smu_context *smu = &adev->smu;
+ int ret;
+
+ if (!is_support_sw_smu(adev))
+ return -EINVAL;
+
+ ret = smu_smc_table_sw_fini(smu);
+ if (ret) {
+ pr_err("Failed to sw fini smc table!\n");
+ return ret;
+ }
+
+ ret = smu_fini_power(smu);
+ if (ret) {
+ pr_err("Failed to init smu_fini_power!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int smu_init_fb_allocations(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+ uint32_t table_count = smu_table->table_count;
+ uint32_t i = 0;
+ int32_t ret = 0;
+
+ if (table_count <= 0)
+ return -EINVAL;
+
+ for (i = 0 ; i < table_count; i++) {
+ if (tables[i].size == 0)
+ continue;
+ ret = amdgpu_bo_create_kernel(adev,
+ tables[i].size,
+ tables[i].align,
+ tables[i].domain,
+ &tables[i].bo,
+ &tables[i].mc_address,
+ &tables[i].cpu_addr);
+ if (ret)
+ goto failed;
+ }
+
+ return 0;
+failed:
+ for (; i > 0; i--) {
+ if (tables[i].size == 0)
+ continue;
+ amdgpu_bo_free_kernel(&tables[i].bo,
+ &tables[i].mc_address,
+ &tables[i].cpu_addr);
+
+ }
+ return ret;
+}
+
+static int smu_fini_fb_allocations(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+ uint32_t table_count = smu_table->table_count;
+ uint32_t i = 0;
+
+ if (table_count == 0 || tables == NULL)
+ return 0;
+
+ for (i = 0 ; i < table_count; i++) {
+ if (tables[i].size == 0)
+ continue;
+ amdgpu_bo_free_kernel(&tables[i].bo,
+ &tables[i].mc_address,
+ &tables[i].cpu_addr);
+ }
+
+ return 0;
+}
+
+static int smu_override_pcie_parameters(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
+ int ret;
+
+ if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4)
+ pcie_gen = 3;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
+ pcie_gen = 2;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
+ pcie_gen = 1;
+ else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1)
+ pcie_gen = 0;
+
+ /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
+ * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
+ * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
+ */
+ if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
+ pcie_width = 6;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12)
+ pcie_width = 5;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8)
+ pcie_width = 4;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4)
+ pcie_width = 3;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2)
+ pcie_width = 2;
+ else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1)
+ pcie_width = 1;
+
+ smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width;
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_OverridePcieParameters,
+ smu_pcie_arg);
+ if (ret)
+ pr_err("[%s] Attempt to override pcie params failed!\n", __func__);
+ return ret;
+}
+
+static int smu_smc_table_hw_init(struct smu_context *smu,
+ bool initialize)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret;
+
+ if (smu_is_dpm_running(smu) && adev->in_suspend) {
+ pr_info("dpm has been enabled\n");
+ return 0;
+ }
+
+ ret = smu_init_display(smu);
+ if (ret)
+ return ret;
+
+ if (initialize) {
+ ret = smu_read_pptable_from_vbios(smu);
+ if (ret)
+ return ret;
+
+ /* get boot_values from vbios to set revision, gfxclk, and etc. */
+ ret = smu_get_vbios_bootup_values(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_get_clk_info_from_vbios(smu);
+ if (ret)
+ return ret;
+
+ /*
+ * check if the format_revision in vbios is up to pptable header
+ * version, and the structure size is not 0.
+ */
+ ret = smu_get_clk_info_from_vbios(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_check_pptable(smu);
+ if (ret)
+ return ret;
+
+ /*
+ * allocate vram bos to store smc table contents.
+ */
+ ret = smu_init_fb_allocations(smu);
+ if (ret)
+ return ret;
+
+ /*
+ * Parse pptable format and fill PPTable_t smc_pptable to
+ * smu_table_context structure. And read the smc_dpm_table from vbios,
+ * then fill it into smc_pptable.
+ */
+ ret = smu_parse_pptable(smu);
+ if (ret)
+ return ret;
+
+ /*
+ * Send msg GetDriverIfVersion to check if the return value is equal
+ * with DRIVER_IF_VERSION of smc header.
+ */
+ ret = smu_check_fw_version(smu);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Copy pptable bo in the vram to smc with SMU MSGs such as
+ * SetDriverDramAddr and TransferTableDram2Smu.
+ */
+ ret = smu_write_pptable(smu);
+ if (ret)
+ return ret;
+
+ /* issue RunAfllBtc msg */
+ ret = smu_run_afll_btc(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_feature_set_allowed_mask(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_system_features_control(smu, true);
+ if (ret)
+ return ret;
+
+ ret = smu_override_pcie_parameters(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_notify_display_change(smu);
+ if (ret)
+ return ret;
+
+ /*
+ * Set min deep sleep dce fclk with bootup value from vbios via
+ * SetMinDeepSleepDcefclk MSG.
+ */
+ ret = smu_set_min_dcef_deep_sleep(smu);
+ if (ret)
+ return ret;
+
+ /*
+ * Set initialized values (get from vbios) to dpm tables context such as
+ * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
+ * type of clks.
+ */
+ if (initialize) {
+ ret = smu_populate_smc_pptable(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_init_max_sustainable_clocks(smu);
+ if (ret)
+ return ret;
+ }
+
+ ret = smu_set_od8_default_settings(smu, initialize);
+ if (ret)
+ return ret;
+
+ if (initialize) {
+ ret = smu_populate_umd_state_clk(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_get_power_limit(smu, &smu->default_power_limit, false);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
+ */
+ ret = smu_set_tool_table_location(smu);
+
+ return ret;
+}
+
+/**
+ * smu_alloc_memory_pool - allocate memory pool in the system memory
+ *
+ * @smu: amdgpu_device pointer
+ *
+ * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
+ * and DramLogSetDramAddr can notify it changed.
+ *
+ * Returns 0 on success, error on failure.
+ */
+static int smu_alloc_memory_pool(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *memory_pool = &smu_table->memory_pool;
+ uint64_t pool_size = smu->pool_size;
+ int ret = 0;
+
+ if (pool_size == SMU_MEMORY_POOL_SIZE_ZERO)
+ return ret;
+
+ memory_pool->size = pool_size;
+ memory_pool->align = PAGE_SIZE;
+ memory_pool->domain = AMDGPU_GEM_DOMAIN_GTT;
+
+ switch (pool_size) {
+ case SMU_MEMORY_POOL_SIZE_256_MB:
+ case SMU_MEMORY_POOL_SIZE_512_MB:
+ case SMU_MEMORY_POOL_SIZE_1_GB:
+ case SMU_MEMORY_POOL_SIZE_2_GB:
+ ret = amdgpu_bo_create_kernel(adev,
+ memory_pool->size,
+ memory_pool->align,
+ memory_pool->domain,
+ &memory_pool->bo,
+ &memory_pool->mc_address,
+ &memory_pool->cpu_addr);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int smu_free_memory_pool(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *memory_pool = &smu_table->memory_pool;
+ int ret = 0;
+
+ if (memory_pool->size == SMU_MEMORY_POOL_SIZE_ZERO)
+ return ret;
+
+ amdgpu_bo_free_kernel(&memory_pool->bo,
+ &memory_pool->mc_address,
+ &memory_pool->cpu_addr);
+
+ memset(memory_pool, 0, sizeof(struct smu_table));
+
+ return ret;
+}
+
+static int smu_hw_init(void *handle)
+{
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct smu_context *smu = &adev->smu;
+
+ if (!is_support_sw_smu(adev))
+ return -EINVAL;
+
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ ret = smu_load_microcode(smu);
+ if (ret)
+ return ret;
+ }
+
+ ret = smu_check_fw_status(smu);
+ if (ret) {
+ pr_err("SMC firmware status is not correct\n");
+ return ret;
+ }
+
+ mutex_lock(&smu->mutex);
+
+ ret = smu_feature_init_dpm(smu);
+ if (ret)
+ goto failed;
+
+ ret = smu_smc_table_hw_init(smu, true);
+ if (ret)
+ goto failed;
+
+ ret = smu_alloc_memory_pool(smu);
+ if (ret)
+ goto failed;
+
+ /*
+ * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
+ * pool location.
+ */
+ ret = smu_notify_memory_pool_location(smu);
+ if (ret)
+ goto failed;
+
+ ret = smu_start_thermal_control(smu);
+ if (ret)
+ goto failed;
+
+ mutex_unlock(&smu->mutex);
+
+ adev->pm.dpm_enabled = true;
+
+ pr_info("SMU is initialized successfully!\n");
+
+ return 0;
+
+failed:
+ mutex_unlock(&smu->mutex);
+ return ret;
+}
+
+static int smu_hw_fini(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct smu_context *smu = &adev->smu;
+ struct smu_table_context *table_context = &smu->smu_table;
+ int ret = 0;
+
+ if (!is_support_sw_smu(adev))
+ return -EINVAL;
+
+ kfree(table_context->driver_pptable);
+ table_context->driver_pptable = NULL;
+
+ kfree(table_context->max_sustainable_clocks);
+ table_context->max_sustainable_clocks = NULL;
+
+ kfree(table_context->od_feature_capabilities);
+ table_context->od_feature_capabilities = NULL;
+
+ kfree(table_context->od_settings_max);
+ table_context->od_settings_max = NULL;
+
+ kfree(table_context->od_settings_min);
+ table_context->od_settings_min = NULL;
+
+ kfree(table_context->overdrive_table);
+ table_context->overdrive_table = NULL;
+
+ kfree(table_context->od8_settings);
+ table_context->od8_settings = NULL;
+
+ ret = smu_fini_fb_allocations(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_free_memory_pool(smu);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int smu_reset(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ ret = smu_hw_fini(adev);
+ if (ret)
+ return ret;
+
+ ret = smu_hw_init(adev);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int smu_suspend(void *handle)
+{
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct smu_context *smu = &adev->smu;
+
+ if (!is_support_sw_smu(adev))
+ return -EINVAL;
+
+ ret = smu_system_features_control(smu, false);
+ if (ret)
+ return ret;
+
+ smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
+
+ return 0;
+}
+
+static int smu_resume(void *handle)
+{
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct smu_context *smu = &adev->smu;
+
+ if (!is_support_sw_smu(adev))
+ return -EINVAL;
+
+ pr_info("SMU is resuming...\n");
+
+ mutex_lock(&smu->mutex);
+
+ ret = smu_smc_table_hw_init(smu, false);
+ if (ret)
+ goto failed;
+
+ ret = smu_start_thermal_control(smu);
+ if (ret)
+ goto failed;
+
+ mutex_unlock(&smu->mutex);
+
+ pr_info("SMU is resumed successfully!\n");
+
+ return 0;
+failed:
+ mutex_unlock(&smu->mutex);
+ return ret;
+}
+
+int smu_display_configuration_change(struct smu_context *smu,
+ const struct amd_pp_display_configuration *display_config)
+{
+ int index = 0;
+ int num_of_active_display = 0;
+
+ if (!is_support_sw_smu(smu->adev))
+ return -EINVAL;
+
+ if (!display_config)
+ return -EINVAL;
+
+ mutex_lock(&smu->mutex);
+
+ smu_set_deep_sleep_dcefclk(smu,
+ display_config->min_dcef_deep_sleep_set_clk / 100);
+
+ for (index = 0; index < display_config->num_path_including_non_display; index++) {
+ if (display_config->displays[index].controller_id != 0)
+ num_of_active_display++;
+ }
+
+ smu_set_active_display_count(smu, num_of_active_display);
+
+ smu_store_cc6_data(smu, display_config->cpu_pstate_separation_time,
+ display_config->cpu_cc6_disable,
+ display_config->cpu_pstate_disable,
+ display_config->nb_pstate_switch_disable);
+
+ mutex_unlock(&smu->mutex);
+
+ return 0;
+}
+
+static int smu_get_clock_info(struct smu_context *smu,
+ struct smu_clock_info *clk_info,
+ enum smu_perf_level_designation designation)
+{
+ int ret;
+ struct smu_performance_level level = {0};
+
+ if (!clk_info)
+ return -EINVAL;
+
+ ret = smu_get_perf_level(smu, PERF_LEVEL_ACTIVITY, &level);
+ if (ret)
+ return -EINVAL;
+
+ clk_info->min_mem_clk = level.memory_clock;
+ clk_info->min_eng_clk = level.core_clock;
+ clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
+
+ ret = smu_get_perf_level(smu, designation, &level);
+ if (ret)
+ return -EINVAL;
+
+ clk_info->min_mem_clk = level.memory_clock;
+ clk_info->min_eng_clk = level.core_clock;
+ clk_info->min_bus_bandwidth = level.non_local_mem_freq * level.non_local_mem_width;
+
+ return 0;
+}
+
+int smu_get_current_clocks(struct smu_context *smu,
+ struct amd_pp_clock_info *clocks)
+{
+ struct amd_pp_simple_clock_info simple_clocks = {0};
+ struct smu_clock_info hw_clocks;
+ int ret = 0;
+
+ if (!is_support_sw_smu(smu->adev))
+ return -EINVAL;
+
+ mutex_lock(&smu->mutex);
+
+ smu_get_dal_power_level(smu, &simple_clocks);
+
+ if (smu->support_power_containment)
+ ret = smu_get_clock_info(smu, &hw_clocks,
+ PERF_LEVEL_POWER_CONTAINMENT);
+ else
+ ret = smu_get_clock_info(smu, &hw_clocks, PERF_LEVEL_ACTIVITY);
+
+ if (ret) {
+ pr_err("Error in smu_get_clock_info\n");
+ goto failed;
+ }
+
+ clocks->min_engine_clock = hw_clocks.min_eng_clk;
+ clocks->max_engine_clock = hw_clocks.max_eng_clk;
+ clocks->min_memory_clock = hw_clocks.min_mem_clk;
+ clocks->max_memory_clock = hw_clocks.max_mem_clk;
+ clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
+ clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
+ clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
+ clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
+
+ if (simple_clocks.level == 0)
+ clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
+ else
+ clocks->max_clocks_state = simple_clocks.level;
+
+ if (!smu_get_current_shallow_sleep_clocks(smu, &hw_clocks)) {
+ clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
+ clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
+ }
+
+failed:
+ mutex_unlock(&smu->mutex);
+ return ret;
+}
+
+static int smu_set_clockgating_state(void *handle,
+ enum amd_clockgating_state state)
+{
+ return 0;
+}
+
+static int smu_set_powergating_state(void *handle,
+ enum amd_powergating_state state)
+{
+ return 0;
+}
+
+static int smu_enable_umd_pstate(void *handle,
+ enum amd_dpm_forced_level *level)
+{
+ uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+
+ struct smu_context *smu = (struct smu_context*)(handle);
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ if (!smu_dpm_ctx->dpm_context)
+ return -EINVAL;
+
+ if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) {
+ /* enter umd pstate, save current level, disable gfx cg*/
+ if (*level & profile_mode_mask) {
+ smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
+ smu_dpm_ctx->enable_umd_pstate = true;
+ amdgpu_device_ip_set_clockgating_state(smu->adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_UNGATE);
+ amdgpu_device_ip_set_powergating_state(smu->adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_UNGATE);
+ }
+ } else {
+ /* exit umd pstate, restore level, enable gfx cg*/
+ if (!(*level & profile_mode_mask)) {
+ if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
+ *level = smu_dpm_ctx->saved_dpm_level;
+ smu_dpm_ctx->enable_umd_pstate = false;
+ amdgpu_device_ip_set_clockgating_state(smu->adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_GATE);
+ amdgpu_device_ip_set_powergating_state(smu->adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_GATE);
+ }
+ }
+
+ return 0;
+}
+
+int smu_adjust_power_state_dynamic(struct smu_context *smu,
+ enum amd_dpm_forced_level level,
+ bool skip_display_settings)
+{
+ int ret = 0;
+ int index = 0;
+ uint32_t sclk_mask, mclk_mask, soc_mask;
+ long workload;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+
+ if (!skip_display_settings) {
+ ret = smu_display_config_changed(smu);
+ if (ret) {
+ pr_err("Failed to change display config!");
+ return ret;
+ }
+ }
+
+ ret = smu_apply_clocks_adjust_rules(smu);
+ if (ret) {
+ pr_err("Failed to apply clocks adjust rules!");
+ return ret;
+ }
+
+ if (!skip_display_settings) {
+ ret = smu_notify_smc_dispaly_config(smu);
+ if (ret) {
+ pr_err("Failed to notify smc display config!");
+ return ret;
+ }
+ }
+
+ if (smu_dpm_ctx->dpm_level != level) {
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ ret = smu_force_dpm_limit_value(smu, true);
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ ret = smu_force_dpm_limit_value(smu, false);
+ break;
+
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ ret = smu_unforce_dpm_levels(smu);
+ break;
+
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ ret = smu_get_profiling_clk_mask(smu, level,
+ &sclk_mask,
+ &mclk_mask,
+ &soc_mask);
+ if (ret)
+ return ret;
+ smu_force_clk_levels(smu, PP_SCLK, 1 << sclk_mask);
+ smu_force_clk_levels(smu, PP_MCLK, 1 << mclk_mask);
+ break;
+
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+ default:
+ break;
+ }
+
+ if (!ret)
+ smu_dpm_ctx->dpm_level = level;
+ }
+
+ if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
+ index = fls(smu->workload_mask);
+ index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
+ workload = smu->workload_setting[index];
+
+ if (smu->power_profile_mode != workload)
+ smu_set_power_profile_mode(smu, &workload, 0);
+ }
+
+ return ret;
+}
+
+int smu_handle_task(struct smu_context *smu,
+ enum amd_dpm_forced_level level,
+ enum amd_pp_task task_id)
+{
+ int ret = 0;
+
+ switch (task_id) {
+ case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
+ ret = smu_pre_display_config_changed(smu);
+ if (ret)
+ return ret;
+ ret = smu_set_cpu_power_state(smu);
+ if (ret)
+ return ret;
+ ret = smu_adjust_power_state_dynamic(smu, level, false);
+ break;
+ case AMD_PP_TASK_COMPLETE_INIT:
+ case AMD_PP_TASK_READJUST_POWER_STATE:
+ ret = smu_adjust_power_state_dynamic(smu, level, true);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+const struct amd_ip_funcs smu_ip_funcs = {
+ .name = "smu",
+ .early_init = smu_early_init,
+ .late_init = smu_late_init,
+ .sw_init = smu_sw_init,
+ .sw_fini = smu_sw_fini,
+ .hw_init = smu_hw_init,
+ .hw_fini = smu_hw_fini,
+ .suspend = smu_suspend,
+ .resume = smu_resume,
+ .is_idle = NULL,
+ .check_soft_reset = NULL,
+ .wait_for_idle = NULL,
+ .soft_reset = NULL,
+ .set_clockgating_state = smu_set_clockgating_state,
+ .set_powergating_state = smu_set_powergating_state,
+ .enable_umd_pstate = smu_enable_umd_pstate,
+};
+
+const struct amdgpu_ip_block_version smu_v11_0_ip_block =
+{
+ .type = AMD_IP_BLOCK_TYPE_SMC,
+ .major = 11,
+ .minor = 0,
+ .rev = 0,
+ .funcs = &smu_ip_funcs,
+};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
index 0b3c6d1d52e4..cc63705920dc 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile
@@ -35,7 +35,8 @@ HARDWARE_MGR = hwmgr.o processpptables.o \
vega12_thermal.o \
pp_overdriver.o smu_helper.o \
vega20_processpptables.o vega20_hwmgr.o vega20_powertune.o \
- vega20_thermal.o common_baco.o vega10_baco.o vega20_baco.o
+ vega20_thermal.o common_baco.o vega10_baco.o vega20_baco.o \
+ vega12_baco.o smu9_baco.o
AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index c1c51c115e57..70f7f47a2fcf 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -76,7 +76,7 @@ int phm_set_power_state(struct pp_hwmgr *hwmgr,
int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = NULL;
- int ret = -EINVAL;;
+ int ret = -EINVAL;
PHM_FUNC_CHECK(hwmgr);
adev = hwmgr->adev;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index 0ad8fe4a6277..9a595f7525e6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -35,6 +35,7 @@
#include "smu10_hwmgr.h"
#include "power_state.h"
#include "soc15_common.h"
+#include "smu10.h"
#define SMU10_MAX_DEEPSLEEP_DIVIDER_ID 5
#define SMU10_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */
@@ -114,11 +115,6 @@ static int smu10_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
smu10_data->num_active_display = 0;
smu10_data->deep_sleep_dcefclk = 0;
- if (hwmgr->feature_mask & PP_GFXOFF_MASK)
- smu10_data->gfx_off_controled_by_driver = true;
- else
- smu10_data->gfx_off_controled_by_driver = false;
-
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SclkDeepSleep);
@@ -209,18 +205,13 @@ static int smu10_set_clock_limit(struct pp_hwmgr *hwmgr, const void *input)
return 0;
}
-static inline uint32_t convert_10k_to_mhz(uint32_t clock)
-{
- return (clock + 99) / 100;
-}
-
static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clock)
{
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
if (smu10_data->need_min_deep_sleep_dcefclk &&
- smu10_data->deep_sleep_dcefclk != convert_10k_to_mhz(clock)) {
- smu10_data->deep_sleep_dcefclk = convert_10k_to_mhz(clock);
+ smu10_data->deep_sleep_dcefclk != clock) {
+ smu10_data->deep_sleep_dcefclk = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepDcefclk,
smu10_data->deep_sleep_dcefclk);
@@ -233,8 +224,8 @@ static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t c
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
if (smu10_data->dcf_actual_hard_min_freq &&
- smu10_data->dcf_actual_hard_min_freq != convert_10k_to_mhz(clock)) {
- smu10_data->dcf_actual_hard_min_freq = convert_10k_to_mhz(clock);
+ smu10_data->dcf_actual_hard_min_freq != clock) {
+ smu10_data->dcf_actual_hard_min_freq = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinDcefclkByFreq,
smu10_data->dcf_actual_hard_min_freq);
@@ -247,8 +238,8 @@ static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t cloc
struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
if (smu10_data->f_actual_hard_min_freq &&
- smu10_data->f_actual_hard_min_freq != convert_10k_to_mhz(clock)) {
- smu10_data->f_actual_hard_min_freq = convert_10k_to_mhz(clock);
+ smu10_data->f_actual_hard_min_freq != clock) {
+ smu10_data->f_actual_hard_min_freq = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
smu10_data->f_actual_hard_min_freq);
@@ -330,9 +321,9 @@ static bool smu10_is_gfx_on(struct pp_hwmgr *hwmgr)
static int smu10_disable_gfx_off(struct pp_hwmgr *hwmgr)
{
- struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+ struct amdgpu_device *adev = hwmgr->adev;
- if (smu10_data->gfx_off_controled_by_driver) {
+ if (adev->pm.pp_feature & PP_GFXOFF_MASK) {
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_DisableGfxOff);
/* confirm gfx is back to "on" state */
@@ -350,9 +341,9 @@ static int smu10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
static int smu10_enable_gfx_off(struct pp_hwmgr *hwmgr)
{
- struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+ struct amdgpu_device *adev = hwmgr->adev;
- if (smu10_data->gfx_off_controled_by_driver)
+ if (adev->pm.pp_feature & PP_GFXOFF_MASK)
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_EnableGfxOff);
return 0;
@@ -577,7 +568,6 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
enum amd_dpm_forced_level level)
{
struct smu10_hwmgr *data = hwmgr->backend;
- struct amdgpu_device *adev = hwmgr->adev;
uint32_t min_sclk = hwmgr->display_config->min_core_set_clock;
uint32_t min_mclk = hwmgr->display_config->min_mem_set_clock/100;
@@ -586,11 +576,6 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
return 0;
}
- /* Disable UMDPSTATE support on rv2 temporarily */
- if ((adev->asic_type == CHIP_RAVEN) &&
- (adev->rev_id >= 8))
- return 0;
-
if (min_sclk < data->gfx_min_freq_limit)
min_sclk = data->gfx_min_freq_limit;
@@ -1205,6 +1190,94 @@ static void smu10_powergate_vcn(struct pp_hwmgr *hwmgr, bool bgate)
}
}
+static int conv_power_profile_to_pplib_workload(int power_profile)
+{
+ int pplib_workload = 0;
+
+ switch (power_profile) {
+ case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT:
+ pplib_workload = WORKLOAD_DEFAULT_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
+ pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_POWERSAVING:
+ pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_VIDEO:
+ pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_VR:
+ pplib_workload = WORKLOAD_PPLIB_VR_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_COMPUTE:
+ pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
+ break;
+ }
+
+ return pplib_workload;
+}
+
+static int smu10_get_power_profile_mode(struct pp_hwmgr *hwmgr, char *buf)
+{
+ uint32_t i, size = 0;
+ static const uint8_t
+ profile_mode_setting[6][4] = {{70, 60, 0, 0,},
+ {70, 60, 1, 3,},
+ {90, 60, 0, 0,},
+ {70, 60, 0, 0,},
+ {70, 90, 0, 0,},
+ {30, 60, 0, 6,},
+ };
+ static const char *profile_name[6] = {
+ "BOOTUP_DEFAULT",
+ "3D_FULL_SCREEN",
+ "POWER_SAVING",
+ "VIDEO",
+ "VR",
+ "COMPUTE"};
+ static const char *title[6] = {"NUM",
+ "MODE_NAME",
+ "BUSY_SET_POINT",
+ "FPS",
+ "USE_RLC_BUSY",
+ "MIN_ACTIVE_LEVEL"};
+
+ if (!buf)
+ return -EINVAL;
+
+ size += sprintf(buf + size, "%s %16s %s %s %s %s\n",title[0],
+ title[1], title[2], title[3], title[4], title[5]);
+
+ for (i = 0; i <= PP_SMC_POWER_PROFILE_COMPUTE; i++)
+ size += sprintf(buf + size, "%3d %14s%s: %14d %3d %10d %14d\n",
+ i, profile_name[i], (i == hwmgr->power_profile_mode) ? "*" : " ",
+ profile_mode_setting[i][0], profile_mode_setting[i][1],
+ profile_mode_setting[i][2], profile_mode_setting[i][3]);
+
+ return size;
+}
+
+static int smu10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint32_t size)
+{
+ int workload_type = 0;
+
+ if (input[size] > PP_SMC_POWER_PROFILE_COMPUTE) {
+ pr_err("Invalid power profile mode %ld\n", input[size]);
+ return -EINVAL;
+ }
+ hwmgr->power_profile_mode = input[size];
+
+ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+ workload_type =
+ conv_power_profile_to_pplib_workload(hwmgr->power_profile_mode);
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_ActiveProcessNotify,
+ 1 << workload_type);
+
+ return 0;
+}
+
+
static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.backend_init = smu10_hwmgr_backend_init,
.backend_fini = smu10_hwmgr_backend_fini,
@@ -1246,6 +1319,8 @@ static const struct pp_hwmgr_func smu10_hwmgr_funcs = {
.powergate_sdma = smu10_powergate_sdma,
.set_hard_min_dcefclk_by_freq = smu10_set_hard_min_dcefclk_by_freq,
.set_hard_min_fclk_by_freq = smu10_set_hard_min_fclk_by_freq,
+ .get_power_profile_mode = smu10_get_power_profile_mode,
+ .set_power_profile_mode = smu10_set_power_profile_mode,
};
int smu10_init_function_pointers(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 83d3d935f3ac..048757e8f494 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -77,7 +77,7 @@
#define PCIE_BUS_CLK 10000
#define TCLK (PCIE_BUS_CLK / 10)
-static const struct profile_mode_setting smu7_profiling[7] =
+static struct profile_mode_setting smu7_profiling[7] =
{{0, 0, 0, 0, 0, 0, 0, 0},
{1, 0, 100, 30, 1, 0, 100, 10},
{1, 10, 0, 30, 0, 0, 0, 0},
@@ -4984,17 +4984,27 @@ static int smu7_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, uint
mode = input[size];
switch (mode) {
case PP_SMC_POWER_PROFILE_CUSTOM:
- if (size < 8)
+ if (size < 8 && size != 0)
return -EINVAL;
-
- tmp.bupdate_sclk = input[0];
- tmp.sclk_up_hyst = input[1];
- tmp.sclk_down_hyst = input[2];
- tmp.sclk_activity = input[3];
- tmp.bupdate_mclk = input[4];
- tmp.mclk_up_hyst = input[5];
- tmp.mclk_down_hyst = input[6];
- tmp.mclk_activity = input[7];
+ /* If only CUSTOM is passed in, use the saved values. Check
+ * that we actually have a CUSTOM profile by ensuring that
+ * the "use sclk" or the "use mclk" bits are set
+ */
+ tmp = smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM];
+ if (size == 0) {
+ if (tmp.bupdate_sclk == 0 && tmp.bupdate_mclk == 0)
+ return -EINVAL;
+ } else {
+ tmp.bupdate_sclk = input[0];
+ tmp.sclk_up_hyst = input[1];
+ tmp.sclk_down_hyst = input[2];
+ tmp.sclk_activity = input[3];
+ tmp.bupdate_mclk = input[4];
+ tmp.mclk_up_hyst = input[5];
+ tmp.mclk_down_hyst = input[6];
+ tmp.mclk_activity = input[7];
+ smu7_profiling[PP_SMC_POWER_PROFILE_CUSTOM] = tmp;
+ }
if (!smum_update_dpm_settings(hwmgr, &tmp)) {
memcpy(&data->current_profile_setting, &tmp, sizeof(struct profile_mode_setting));
hwmgr->power_profile_mode = mode;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.c
new file mode 100644
index 000000000000..de0a37f7c632
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "soc15.h"
+#include "soc15_hw_ip.h"
+#include "vega10_ip_offset.h"
+#include "soc15_common.h"
+#include "vega10_inc.h"
+#include "smu9_baco.h"
+
+int smu9_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+ uint32_t reg, data;
+
+ *cap = false;
+ if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO))
+ return 0;
+
+ WREG32(0x12074, 0xFFF0003B);
+ data = RREG32(0x12075);
+
+ if (data == 0x1) {
+ reg = RREG32_SOC15(NBIF, 0, mmRCC_BIF_STRAP0);
+
+ if (reg & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK)
+ *cap = true;
+ }
+
+ return 0;
+}
+
+int smu9_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
+ uint32_t reg;
+
+ reg = RREG32_SOC15(NBIF, 0, mmBACO_CNTL);
+
+ if (reg & BACO_CNTL__BACO_MODE_MASK)
+ /* gfx has already entered BACO state */
+ *state = BACO_STATE_IN;
+ else
+ *state = BACO_STATE_OUT;
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.h
new file mode 100644
index 000000000000..84e90f801ac3
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu9_baco.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMU9_BACO_H__
+#define __SMU9_BACO_H__
+#include "hwmgr.h"
+#include "common_baco.h"
+
+extern int smu9_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap);
+extern int smu9_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c
index 7337be5602e4..d168af4a4d78 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.c
@@ -85,48 +85,11 @@ static const struct soc15_baco_cmd_entry clean_baco_tbl[] =
{CMD_WRITE, SOC15_REG_ENTRY(NBIF, 0, mmBIOS_SCRATCH_7), 0, 0, 0, 0},
};
-int vega10_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
- uint32_t reg, data;
-
- *cap = false;
- if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO))
- return 0;
-
- WREG32(0x12074, 0xFFF0003B);
- data = RREG32(0x12075);
-
- if (data == 0x1) {
- reg = RREG32_SOC15(NBIF, 0, mmRCC_BIF_STRAP0);
-
- if (reg & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK)
- *cap = true;
- }
-
- return 0;
-}
-
-int vega10_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
- uint32_t reg;
-
- reg = RREG32_SOC15(NBIF, 0, mmBACO_CNTL);
-
- if (reg & BACO_CNTL__BACO_MODE_MASK)
- /* gfx has already entered BACO state */
- *state = BACO_STATE_IN;
- else
- *state = BACO_STATE_OUT;
- return 0;
-}
-
int vega10_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
{
enum BACO_STATE cur_state;
- vega10_baco_get_state(hwmgr, &cur_state);
+ smu9_baco_get_state(hwmgr, &cur_state);
if (cur_state == state)
/* aisc already in the target state */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h
index f7a3ffa744b3..96d793f026a5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_baco.h
@@ -22,11 +22,8 @@
*/
#ifndef __VEGA10_BACO_H__
#define __VEGA10_BACO_H__
-#include "hwmgr.h"
-#include "common_baco.h"
+#include "smu9_baco.h"
-extern int vega10_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap);
-extern int vega10_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
extern int vega10_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 5c4f701939ea..384c37875cd0 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -1427,6 +1427,15 @@ static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
vega10_setup_default_pcie_table(hwmgr);
+ /* Zero out the saved copy of the CUSTOM profile
+ * This will be checked when trying to set the profile
+ * and will require that new values be passed in
+ */
+ data->custom_profile_mode[0] = 0;
+ data->custom_profile_mode[1] = 0;
+ data->custom_profile_mode[2] = 0;
+ data->custom_profile_mode[3] = 0;
+
/* save a copy of the default DPM table */
memcpy(&(data->golden_dpm_table), &(data->dpm_table),
sizeof(struct vega10_dpm_table));
@@ -4904,16 +4913,23 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
uint8_t FPS;
uint8_t use_rlc_busy;
uint8_t min_active_level;
+ uint32_t power_profile_mode = input[size];
- hwmgr->power_profile_mode = input[size];
-
- smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
- 1<<hwmgr->power_profile_mode);
-
- if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
- if (size == 0 || size > 4)
+ if (power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
+ if (size != 0 && size != 4)
return -EINVAL;
+ /* If size = 0 and the CUSTOM profile has been set already
+ * then just apply the profile. The copy stored in the hwmgr
+ * is zeroed out on init
+ */
+ if (size == 0) {
+ if (data->custom_profile_mode[0] != 0)
+ goto out;
+ else
+ return -EINVAL;
+ }
+
data->custom_profile_mode[0] = busy_set_point = input[0];
data->custom_profile_mode[1] = FPS = input[1];
data->custom_profile_mode[2] = use_rlc_busy = input[2];
@@ -4924,6 +4940,11 @@ static int vega10_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
use_rlc_busy << 16 | min_active_level<<24);
}
+out:
+ smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
+ 1 << power_profile_mode);
+ hwmgr->power_profile_mode = power_profile_mode;
+
return 0;
}
@@ -5170,8 +5191,8 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
.set_power_limit = vega10_set_power_limit,
.odn_edit_dpm_table = vega10_odn_edit_dpm_table,
.get_performance_level = vega10_get_performance_level,
- .get_asic_baco_capability = vega10_baco_get_capability,
- .get_asic_baco_state = vega10_baco_get_state,
+ .get_asic_baco_capability = smu9_baco_get_capability,
+ .get_asic_baco_state = smu9_baco_get_state,
.set_asic_baco_state = vega10_baco_set_state,
.enable_mgpu_fan_boost = vega10_enable_mgpu_fan_boost,
.get_ppfeature_status = vega10_get_ppfeature_status,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c
new file mode 100644
index 000000000000..9d8ca94a8f0c
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "amdgpu.h"
+#include "soc15.h"
+#include "soc15_hw_ip.h"
+#include "vega10_ip_offset.h"
+#include "soc15_common.h"
+#include "vega12_inc.h"
+#include "vega12_ppsmc.h"
+#include "vega12_baco.h"
+
+static const struct soc15_baco_cmd_entry pre_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmBIF_DOORBELL_CNTL_BASE_IDX, mmBIF_DOORBELL_CNTL, BIF_DOORBELL_CNTL__DOORBELL_MONITOR_EN_MASK, BIF_DOORBELL_CNTL__DOORBELL_MONITOR_EN__SHIFT, 0, 0 },
+ { CMD_WRITE, NBIF_HWID, 0, mmBIF_FB_EN_BASE_IDX, mmBIF_FB_EN, 0, 0, 0, 0 },
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_DSTATE_BYPASS_MASK, BACO_CNTL__BACO_DSTATE_BYPASS__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_RST_INTR_MASK_MASK, BACO_CNTL__BACO_RST_INTR_MASK__SHIFT, 0, 1 }
+};
+
+static const struct soc15_baco_cmd_entry enter_baco_tbl[] =
+{
+ { CMD_WAITFOR, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__SOC_DOMAIN_IDLE_MASK, THM_BACO_CNTL__SOC_DOMAIN_IDLE__SHIFT, 0xffffffff, 0x80000000 },
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_BIF_LCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_LCLK_SWITCH__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_DUMMY_EN_MASK, BACO_CNTL__BACO_DUMMY_EN__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_SOC_VDCI_RESET_MASK, THM_BACO_CNTL__BACO_SOC_VDCI_RESET__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_SMNCLK_MUX_MASK, THM_BACO_CNTL__BACO_SMNCLK_MUX__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_ISO_EN_MASK, THM_BACO_CNTL__BACO_ISO_EN__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_AEB_ISO_EN_MASK, THM_BACO_CNTL__BACO_AEB_ISO_EN__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_ANA_ISO_EN_MASK, THM_BACO_CNTL__BACO_ANA_ISO_EN__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_SOC_REFCLK_OFF_MASK, THM_BACO_CNTL__BACO_SOC_REFCLK_OFF__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 1 },
+ { CMD_DELAY_MS, 0, 0, 0, 5, 0 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_RESET_EN_MASK, THM_BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_PWROKRAW_CNTL_MASK, THM_BACO_CNTL__BACO_PWROKRAW_CNTL__SHIFT, 0, 0 },
+ { CMD_WAITFOR, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, BACO_CNTL__BACO_MODE__SHIFT, 0xffffffff, 0x100 }
+};
+
+static const struct soc15_baco_cmd_entry exit_baco_tbl[] =
+{
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0 },
+ { CMD_DELAY_MS, 0, 0, 0, 0, 0, 0, 10, 0 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_SOC_REFCLK_OFF_MASK, THM_BACO_CNTL__BACO_SOC_REFCLK_OFF__SHIFT, 0, 0 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_ANA_ISO_EN_MASK, THM_BACO_CNTL__BACO_ANA_ISO_EN__SHIFT, 0, 0 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_AEB_ISO_EN_MASK, THM_BACO_CNTL__BACO_AEB_ISO_EN__SHIFT, 0, 0 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_ISO_EN_MASK, THM_BACO_CNTL__BACO_ISO_EN__SHIFT, 0, 0 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_PWROKRAW_CNTL_MASK, THM_BACO_CNTL__BACO_PWROKRAW_CNTL__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_SMNCLK_MUX_MASK, THM_BACO_CNTL__BACO_SMNCLK_MUX__SHIFT, 0, 0 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_SOC_VDCI_RESET_MASK, THM_BACO_CNTL__BACO_SOC_VDCI_RESET__SHIFT, 0, 0 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_EXIT_MASK, THM_BACO_CNTL__BACO_EXIT__SHIFT, 0, 1 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_RESET_EN_MASK, THM_BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0 },
+ { CMD_WAITFOR, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_EXIT_MASK, 0, 0xffffffff, 0 },
+ { CMD_READMODIFYWRITE, THM_HWID, 0, mmTHM_BACO_CNTL_BASE_IDX, mmTHM_BACO_CNTL, THM_BACO_CNTL__BACO_SB_AXI_FENCE_MASK, THM_BACO_CNTL__BACO_SB_AXI_FENCE__SHIFT, 0, 0 },
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_DUMMY_EN_MASK, BACO_CNTL__BACO_DUMMY_EN__SHIFT, 0, 0 },
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_BIF_LCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_LCLK_SWITCH__SHIFT, 0, 0 },
+ { CMD_READMODIFYWRITE, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0 },
+ { CMD_WAITFOR, NBIF_HWID, 0, mmRCC_BACO_CNTL_MISC_BASE_IDX, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0 }
+};
+
+static const struct soc15_baco_cmd_entry clean_baco_tbl[] =
+{
+ { CMD_WRITE, NBIF_HWID, 0, mmBIOS_SCRATCH_6_BASE_IDX, mmBIOS_SCRATCH_6, 0, 0, 0, 0 },
+ { CMD_WRITE, NBIF_HWID, 0, mmBIOS_SCRATCH_7_BASE_IDX, mmBIOS_SCRATCH_7, 0, 0, 0, 0 }
+};
+
+int vega12_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
+{
+ enum BACO_STATE cur_state;
+
+ smu9_baco_get_state(hwmgr, &cur_state);
+
+ if (cur_state == state)
+ /* aisc already in the target state */
+ return 0;
+
+ if (state == BACO_STATE_IN) {
+ if (soc15_baco_program_registers(hwmgr, pre_baco_tbl,
+ ARRAY_SIZE(pre_baco_tbl))) {
+ if (smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0))
+ return -EINVAL;
+
+ if (soc15_baco_program_registers(hwmgr, enter_baco_tbl,
+ ARRAY_SIZE(enter_baco_tbl)))
+ return 0;
+ }
+ } else if (state == BACO_STATE_OUT) {
+ /* HW requires at least 20ms between regulator off and on */
+ msleep(20);
+ /* Execute Hardware BACO exit sequence */
+ if (soc15_baco_program_registers(hwmgr, exit_baco_tbl,
+ ARRAY_SIZE(exit_baco_tbl))) {
+ if (soc15_baco_program_registers(hwmgr, clean_baco_tbl,
+ ARRAY_SIZE(clean_baco_tbl)))
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.h
new file mode 100644
index 000000000000..57b72e5a95ae
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_baco.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __VEGA12_BACO_H__
+#define __VEGA12_BACO_H__
+#include "smu9_baco.h"
+
+extern int vega12_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index bdb48e94eff6..707cd4b0357f 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -45,6 +45,7 @@
#include "ppinterrupt.h"
#include "pp_overdriver.h"
#include "pp_thermal.h"
+#include "vega12_baco.h"
static int vega12_force_clock_level(struct pp_hwmgr *hwmgr,
@@ -2626,8 +2627,12 @@ static const struct pp_hwmgr_func vega12_hwmgr_funcs = {
.start_thermal_controller = vega12_start_thermal_controller,
.powergate_gfx = vega12_gfx_off_control,
.get_performance_level = vega12_get_performance_level,
+ .get_asic_baco_capability = smu9_baco_get_capability,
+ .get_asic_baco_state = smu9_baco_get_state,
+ .set_asic_baco_state = vega12_baco_set_state,
.get_ppfeature_status = vega12_get_ppfeature_status,
.set_ppfeature_status = vega12_set_ppfeature_status,
+
};
int vega12_hwmgr_init(struct pp_hwmgr *hwmgr)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_inc.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_inc.h
index 30b278c50222..e6d9e84059e1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_inc.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_inc.h
@@ -35,5 +35,7 @@
#include "asic_reg/gc/gc_9_2_1_sh_mask.h"
#include "asic_reg/nbio/nbio_6_1_offset.h"
+#include "asic_reg/nbio/nbio_6_1_offset.h"
+#include "asic_reg/nbio/nbio_6_1_sh_mask.h"
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
index 5e8602a79b1c..df6ff9252401 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c
@@ -27,6 +27,7 @@
#include "vega20_inc.h"
#include "vega20_ppsmc.h"
#include "vega20_baco.h"
+#include "vega20_smumgr.h"
@@ -101,3 +102,14 @@ int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state)
return 0;
}
+
+int vega20_baco_apply_vdci_flush_workaround(struct pp_hwmgr *hwmgr)
+{
+ int ret = 0;
+
+ ret = vega20_set_pptable_driver_address(hwmgr);
+ if (ret)
+ return ret;
+
+ return smum_send_msg_to_smc(hwmgr, PPSMC_MSG_BacoWorkAroundFlushVDCI);
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h
index 51c7f8392925..f06471e712dc 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.h
@@ -28,5 +28,6 @@
extern int vega20_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap);
extern int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state);
extern int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state);
+extern int vega20_baco_apply_vdci_flush_workaround(struct pp_hwmgr *hwmgr);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 23b5b94a4939..9b9f87b84910 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -434,6 +434,7 @@ static int vega20_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
hwmgr->platform_descriptor.clockStep.memoryClock = 500;
data->total_active_cus = adev->gfx.cu_info.number;
+ data->is_custom_profile_set = false;
return 0;
}
@@ -450,6 +451,7 @@ static int vega20_init_sclk_threshold(struct pp_hwmgr *hwmgr)
static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
int ret = 0;
ret = vega20_init_sclk_threshold(hwmgr);
@@ -457,7 +459,15 @@ static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr)
"Failed to init sclk threshold!",
return ret);
- return 0;
+ if (adev->in_baco_reset) {
+ adev->in_baco_reset = 0;
+
+ ret = vega20_baco_apply_vdci_flush_workaround(hwmgr);
+ if (ret)
+ pr_err("Failed to apply vega20 baco workaround!\n");
+ }
+
+ return ret;
}
/*
@@ -3450,7 +3460,18 @@ static void vega20_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
return ;
data->vce_power_gated = bgate;
- vega20_enable_disable_vce_dpm(hwmgr, !bgate);
+ if (bgate) {
+ vega20_enable_disable_vce_dpm(hwmgr, !bgate);
+ amdgpu_device_ip_set_powergating_state(hwmgr->adev,
+ AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
+ } else {
+ amdgpu_device_ip_set_powergating_state(hwmgr->adev,
+ AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_UNGATE);
+ vega20_enable_disable_vce_dpm(hwmgr, !bgate);
+ }
+
}
static void vega20_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
@@ -3826,16 +3847,19 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
{
DpmActivityMonitorCoeffInt_t activity_monitor;
int workload_type, result = 0;
+ uint32_t power_profile_mode = input[size];
- hwmgr->power_profile_mode = input[size];
-
- if (hwmgr->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
- pr_err("Invalid power profile mode %d\n", hwmgr->power_profile_mode);
+ if (power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
+ pr_err("Invalid power profile mode %d\n", power_profile_mode);
return -EINVAL;
}
- if (hwmgr->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
- if (size < 10)
+ if (power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
+ struct vega20_hwmgr *data =
+ (struct vega20_hwmgr *)(hwmgr->backend);
+ if (size == 0 && !data->is_custom_profile_set)
+ return -EINVAL;
+ if (size < 10 && size != 0)
return -EINVAL;
result = vega20_get_activity_monitor_coeff(hwmgr,
@@ -3845,6 +3869,13 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
"[SetPowerProfile] Failed to get activity monitor!",
return result);
+ /* If size==0, then we want to apply the already-configured
+ * CUSTOM profile again. Just apply it, since we checked its
+ * validity above
+ */
+ if (size == 0)
+ goto out;
+
switch (input[0]) {
case 0: /* Gfxclk */
activity_monitor.Gfx_FPS = input[1];
@@ -3895,17 +3926,21 @@ static int vega20_set_power_profile_mode(struct pp_hwmgr *hwmgr, long *input, ui
result = vega20_set_activity_monitor_coeff(hwmgr,
(uint8_t *)(&activity_monitor),
WORKLOAD_PPLIB_CUSTOM_BIT);
+ data->is_custom_profile_set = true;
PP_ASSERT_WITH_CODE(!result,
"[SetPowerProfile] Failed to set activity monitor!",
return result);
}
+out:
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type =
- conv_power_profile_to_pplib_workload(hwmgr->power_profile_mode);
+ conv_power_profile_to_pplib_workload(power_profile_mode);
smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetWorkloadMask,
1 << workload_type);
+ hwmgr->power_profile_mode = power_profile_mode;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
index ac2a3118a0ae..2c3125f82b24 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
@@ -531,6 +531,8 @@ struct vega20_hwmgr {
bool pcie_parameters_override;
uint32_t pcie_gen_level1;
uint32_t pcie_width_level1;
+
+ bool is_custom_profile_set;
};
#define VEGA20_DPM2_NEAR_TDP_DEC 10
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
new file mode 100644
index 000000000000..c8b168b3413b
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -0,0 +1,770 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef __AMDGPU_SMU_H__
+#define __AMDGPU_SMU_H__
+
+#include "amdgpu.h"
+#include "kgd_pp_interface.h"
+#include "dm_pp_interface.h"
+
+struct smu_hw_power_state {
+ unsigned int magic;
+};
+
+struct smu_power_state;
+
+enum smu_state_ui_label {
+ SMU_STATE_UI_LABEL_NONE,
+ SMU_STATE_UI_LABEL_BATTERY,
+ SMU_STATE_UI_TABEL_MIDDLE_LOW,
+ SMU_STATE_UI_LABEL_BALLANCED,
+ SMU_STATE_UI_LABEL_MIDDLE_HIGHT,
+ SMU_STATE_UI_LABEL_PERFORMANCE,
+ SMU_STATE_UI_LABEL_BACO,
+};
+
+enum smu_state_classification_flag {
+ SMU_STATE_CLASSIFICATION_FLAG_BOOT = 0x0001,
+ SMU_STATE_CLASSIFICATION_FLAG_THERMAL = 0x0002,
+ SMU_STATE_CLASSIFICATIN_FLAG_LIMITED_POWER_SOURCE = 0x0004,
+ SMU_STATE_CLASSIFICATION_FLAG_RESET = 0x0008,
+ SMU_STATE_CLASSIFICATION_FLAG_FORCED = 0x0010,
+ SMU_STATE_CLASSIFICATION_FLAG_USER_3D_PERFORMANCE = 0x0020,
+ SMU_STATE_CLASSIFICATION_FLAG_USER_2D_PERFORMANCE = 0x0040,
+ SMU_STATE_CLASSIFICATION_FLAG_3D_PERFORMANCE = 0x0080,
+ SMU_STATE_CLASSIFICATION_FLAG_AC_OVERDIRVER_TEMPLATE = 0x0100,
+ SMU_STATE_CLASSIFICATION_FLAG_UVD = 0x0200,
+ SMU_STATE_CLASSIFICATION_FLAG_3D_PERFORMANCE_LOW = 0x0400,
+ SMU_STATE_CLASSIFICATION_FLAG_ACPI = 0x0800,
+ SMU_STATE_CLASSIFICATION_FLAG_HD2 = 0x1000,
+ SMU_STATE_CLASSIFICATION_FLAG_UVD_HD = 0x2000,
+ SMU_STATE_CLASSIFICATION_FLAG_UVD_SD = 0x4000,
+ SMU_STATE_CLASSIFICATION_FLAG_USER_DC_PERFORMANCE = 0x8000,
+ SMU_STATE_CLASSIFICATION_FLAG_DC_OVERDIRVER_TEMPLATE = 0x10000,
+ SMU_STATE_CLASSIFICATION_FLAG_BACO = 0x20000,
+ SMU_STATE_CLASSIFICATIN_FLAG_LIMITED_POWER_SOURCE2 = 0x40000,
+ SMU_STATE_CLASSIFICATION_FLAG_ULV = 0x80000,
+ SMU_STATE_CLASSIFICATION_FLAG_UVD_MVC = 0x100000,
+};
+
+struct smu_state_classification_block {
+ enum smu_state_ui_label ui_label;
+ enum smu_state_classification_flag flags;
+ int bios_index;
+ bool temporary_state;
+ bool to_be_deleted;
+};
+
+struct smu_state_pcie_block {
+ unsigned int lanes;
+};
+
+enum smu_refreshrate_source {
+ SMU_REFRESHRATE_SOURCE_EDID,
+ SMU_REFRESHRATE_SOURCE_EXPLICIT
+};
+
+struct smu_state_display_block {
+ bool disable_frame_modulation;
+ bool limit_refreshrate;
+ enum smu_refreshrate_source refreshrate_source;
+ int explicit_refreshrate;
+ int edid_refreshrate_index;
+ bool enable_vari_bright;
+};
+
+struct smu_state_memroy_block {
+ bool dll_off;
+ uint8_t m3arb;
+ uint8_t unused[3];
+};
+
+struct smu_state_software_algorithm_block {
+ bool disable_load_balancing;
+ bool enable_sleep_for_timestamps;
+};
+
+struct smu_temperature_range {
+ int min;
+ int max;
+};
+
+struct smu_state_validation_block {
+ bool single_display_only;
+ bool disallow_on_dc;
+ uint8_t supported_power_levels;
+};
+
+struct smu_uvd_clocks {
+ uint32_t vclk;
+ uint32_t dclk;
+};
+
+/**
+* Structure to hold a SMU Power State.
+*/
+struct smu_power_state {
+ uint32_t id;
+ struct list_head ordered_list;
+ struct list_head all_states_list;
+
+ struct smu_state_classification_block classification;
+ struct smu_state_validation_block validation;
+ struct smu_state_pcie_block pcie;
+ struct smu_state_display_block display;
+ struct smu_state_memroy_block memory;
+ struct smu_temperature_range temperatures;
+ struct smu_state_software_algorithm_block software;
+ struct smu_uvd_clocks uvd_clocks;
+ struct smu_hw_power_state hardware;
+};
+
+enum smu_message_type
+{
+ SMU_MSG_TestMessage = 0,
+ SMU_MSG_GetSmuVersion,
+ SMU_MSG_GetDriverIfVersion,
+ SMU_MSG_SetAllowedFeaturesMaskLow,
+ SMU_MSG_SetAllowedFeaturesMaskHigh,
+ SMU_MSG_EnableAllSmuFeatures,
+ SMU_MSG_DisableAllSmuFeatures,
+ SMU_MSG_EnableSmuFeaturesLow,
+ SMU_MSG_EnableSmuFeaturesHigh,
+ SMU_MSG_DisableSmuFeaturesLow,
+ SMU_MSG_DisableSmuFeaturesHigh,
+ SMU_MSG_GetEnabledSmuFeaturesLow,
+ SMU_MSG_GetEnabledSmuFeaturesHigh,
+ SMU_MSG_SetWorkloadMask,
+ SMU_MSG_SetPptLimit,
+ SMU_MSG_SetDriverDramAddrHigh,
+ SMU_MSG_SetDriverDramAddrLow,
+ SMU_MSG_SetToolsDramAddrHigh,
+ SMU_MSG_SetToolsDramAddrLow,
+ SMU_MSG_TransferTableSmu2Dram,
+ SMU_MSG_TransferTableDram2Smu,
+ SMU_MSG_UseDefaultPPTable,
+ SMU_MSG_UseBackupPPTable,
+ SMU_MSG_RunBtc,
+ SMU_MSG_RequestI2CBus,
+ SMU_MSG_ReleaseI2CBus,
+ SMU_MSG_SetFloorSocVoltage,
+ SMU_MSG_SoftReset,
+ SMU_MSG_StartBacoMonitor,
+ SMU_MSG_CancelBacoMonitor,
+ SMU_MSG_EnterBaco,
+ SMU_MSG_SetSoftMinByFreq,
+ SMU_MSG_SetSoftMaxByFreq,
+ SMU_MSG_SetHardMinByFreq,
+ SMU_MSG_SetHardMaxByFreq,
+ SMU_MSG_GetMinDpmFreq,
+ SMU_MSG_GetMaxDpmFreq,
+ SMU_MSG_GetDpmFreqByIndex,
+ SMU_MSG_GetDpmClockFreq,
+ SMU_MSG_GetSsVoltageByDpm,
+ SMU_MSG_SetMemoryChannelConfig,
+ SMU_MSG_SetGeminiMode,
+ SMU_MSG_SetGeminiApertureHigh,
+ SMU_MSG_SetGeminiApertureLow,
+ SMU_MSG_SetMinLinkDpmByIndex,
+ SMU_MSG_OverridePcieParameters,
+ SMU_MSG_OverDriveSetPercentage,
+ SMU_MSG_SetMinDeepSleepDcefclk,
+ SMU_MSG_ReenableAcDcInterrupt,
+ SMU_MSG_NotifyPowerSource,
+ SMU_MSG_SetUclkFastSwitch,
+ SMU_MSG_SetUclkDownHyst,
+ SMU_MSG_GfxDeviceDriverReset,
+ SMU_MSG_GetCurrentRpm,
+ SMU_MSG_SetVideoFps,
+ SMU_MSG_SetTjMax,
+ SMU_MSG_SetFanTemperatureTarget,
+ SMU_MSG_PrepareMp1ForUnload,
+ SMU_MSG_DramLogSetDramAddrHigh,
+ SMU_MSG_DramLogSetDramAddrLow,
+ SMU_MSG_DramLogSetDramSize,
+ SMU_MSG_SetFanMaxRpm,
+ SMU_MSG_SetFanMinPwm,
+ SMU_MSG_ConfigureGfxDidt,
+ SMU_MSG_NumOfDisplays,
+ SMU_MSG_RemoveMargins,
+ SMU_MSG_ReadSerialNumTop32,
+ SMU_MSG_ReadSerialNumBottom32,
+ SMU_MSG_SetSystemVirtualDramAddrHigh,
+ SMU_MSG_SetSystemVirtualDramAddrLow,
+ SMU_MSG_WaflTest,
+ SMU_MSG_SetFclkGfxClkRatio,
+ SMU_MSG_AllowGfxOff,
+ SMU_MSG_DisallowGfxOff,
+ SMU_MSG_GetPptLimit,
+ SMU_MSG_GetDcModeMaxDpmFreq,
+ SMU_MSG_GetDebugData,
+ SMU_MSG_SetXgmiMode,
+ SMU_MSG_RunAfllBtc,
+ SMU_MSG_ExitBaco,
+ SMU_MSG_PrepareMp1ForReset,
+ SMU_MSG_PrepareMp1ForShutdown,
+ SMU_MSG_SetMGpuFanBoostLimitRpm,
+ SMU_MSG_GetAVFSVoltageByDpm,
+ SMU_MSG_MAX_COUNT,
+};
+
+enum smu_memory_pool_size
+{
+ SMU_MEMORY_POOL_SIZE_ZERO = 0,
+ SMU_MEMORY_POOL_SIZE_256_MB = 0x10000000,
+ SMU_MEMORY_POOL_SIZE_512_MB = 0x20000000,
+ SMU_MEMORY_POOL_SIZE_1_GB = 0x40000000,
+ SMU_MEMORY_POOL_SIZE_2_GB = 0x80000000,
+};
+
+#define SMU_TABLE_INIT(tables, table_id, s, a, d) \
+ do { \
+ tables[table_id].size = s; \
+ tables[table_id].align = a; \
+ tables[table_id].domain = d; \
+ } while (0)
+
+struct smu_table {
+ uint64_t size;
+ uint32_t align;
+ uint8_t domain;
+ uint64_t mc_address;
+ void *cpu_addr;
+ struct amdgpu_bo *bo;
+};
+
+enum smu_perf_level_designation {
+ PERF_LEVEL_ACTIVITY,
+ PERF_LEVEL_POWER_CONTAINMENT,
+};
+
+struct smu_performance_level {
+ uint32_t core_clock;
+ uint32_t memory_clock;
+ uint32_t vddc;
+ uint32_t vddci;
+ uint32_t non_local_mem_freq;
+ uint32_t non_local_mem_width;
+};
+
+struct smu_clock_info {
+ uint32_t min_mem_clk;
+ uint32_t max_mem_clk;
+ uint32_t min_eng_clk;
+ uint32_t max_eng_clk;
+ uint32_t min_bus_bandwidth;
+ uint32_t max_bus_bandwidth;
+};
+
+struct smu_bios_boot_up_values
+{
+ uint32_t revision;
+ uint32_t gfxclk;
+ uint32_t uclk;
+ uint32_t socclk;
+ uint32_t dcefclk;
+ uint32_t eclk;
+ uint32_t vclk;
+ uint32_t dclk;
+ uint16_t vddc;
+ uint16_t vddci;
+ uint16_t mvddc;
+ uint16_t vdd_gfx;
+ uint8_t cooling_id;
+ uint32_t pp_table_id;
+};
+
+struct smu_table_context
+{
+ void *power_play_table;
+ uint32_t power_play_table_size;
+ void *hardcode_pptable;
+
+ void *max_sustainable_clocks;
+ struct smu_bios_boot_up_values boot_values;
+ void *driver_pptable;
+ struct smu_table *tables;
+ uint32_t table_count;
+ struct smu_table memory_pool;
+ uint16_t software_shutdown_temp;
+ uint8_t thermal_controller_type;
+ uint16_t TDPODLimit;
+
+ uint8_t *od_feature_capabilities;
+ uint32_t *od_settings_max;
+ uint32_t *od_settings_min;
+ void *overdrive_table;
+ void *od8_settings;
+ bool od_gfxclk_update;
+ bool od_memclk_update;
+};
+
+struct smu_dpm_context {
+ uint32_t dpm_context_size;
+ void *dpm_context;
+ void *golden_dpm_context;
+ bool enable_umd_pstate;
+ enum amd_dpm_forced_level dpm_level;
+ enum amd_dpm_forced_level saved_dpm_level;
+ enum amd_dpm_forced_level requested_dpm_level;
+ struct smu_power_state *dpm_request_power_state;
+ struct smu_power_state *dpm_current_power_state;
+ struct mclock_latency_table *mclk_latency_table;
+};
+
+struct smu_power_context {
+ void *power_context;
+ uint32_t power_context_size;
+};
+
+
+#define SMU_FEATURE_MAX (64)
+struct smu_feature
+{
+ uint32_t feature_num;
+ DECLARE_BITMAP(supported, SMU_FEATURE_MAX);
+ DECLARE_BITMAP(allowed, SMU_FEATURE_MAX);
+ DECLARE_BITMAP(enabled, SMU_FEATURE_MAX);
+ struct mutex mutex;
+};
+
+struct smu_clocks {
+ uint32_t engine_clock;
+ uint32_t memory_clock;
+ uint32_t bus_bandwidth;
+ uint32_t engine_clock_in_sr;
+ uint32_t dcef_clock;
+ uint32_t dcef_clock_in_sr;
+};
+
+#define MAX_REGULAR_DPM_NUM 16
+struct mclk_latency_entries {
+ uint32_t frequency;
+ uint32_t latency;
+};
+struct mclock_latency_table {
+ uint32_t count;
+ struct mclk_latency_entries entries[MAX_REGULAR_DPM_NUM];
+};
+
+#define WORKLOAD_POLICY_MAX 7
+struct smu_context
+{
+ struct amdgpu_device *adev;
+
+ const struct smu_funcs *funcs;
+ const struct pptable_funcs *ppt_funcs;
+ struct mutex mutex;
+ uint64_t pool_size;
+
+ struct smu_table_context smu_table;
+ struct smu_dpm_context smu_dpm;
+ struct smu_power_context smu_power;
+ struct smu_feature smu_feature;
+ struct amd_pp_display_configuration *display_config;
+
+ uint32_t pstate_sclk;
+ uint32_t pstate_mclk;
+
+ bool od_enabled;
+ uint32_t power_limit;
+ uint32_t default_power_limit;
+
+ bool support_power_containment;
+ bool disable_watermark;
+
+#define WATERMARKS_EXIST (1 << 0)
+#define WATERMARKS_LOADED (1 << 1)
+ uint32_t watermarks_bitmap;
+
+ uint32_t workload_mask;
+ uint32_t workload_prority[WORKLOAD_POLICY_MAX];
+ uint32_t workload_setting[WORKLOAD_POLICY_MAX];
+ uint32_t power_profile_mode;
+ uint32_t default_power_profile_mode;
+
+ uint32_t smc_if_version;
+};
+
+struct pptable_funcs {
+ int (*alloc_dpm_context)(struct smu_context *smu);
+ int (*store_powerplay_table)(struct smu_context *smu);
+ int (*check_powerplay_table)(struct smu_context *smu);
+ int (*append_powerplay_table)(struct smu_context *smu);
+ int (*get_smu_msg_index)(struct smu_context *smu, uint32_t index);
+ int (*run_afll_btc)(struct smu_context *smu);
+ int (*get_unallowed_feature_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
+ enum amd_pm_state_type (*get_current_power_state)(struct smu_context *smu);
+ int (*set_default_dpm_table)(struct smu_context *smu);
+ int (*set_power_state)(struct smu_context *smu);
+ int (*populate_umd_state_clk)(struct smu_context *smu);
+ int (*print_clk_levels)(struct smu_context *smu, enum pp_clock_type type, char *buf);
+ int (*force_clk_levels)(struct smu_context *smu, enum pp_clock_type type, uint32_t mask);
+ int (*set_default_od8_settings)(struct smu_context *smu);
+ int (*update_specified_od8_value)(struct smu_context *smu,
+ uint32_t index,
+ uint32_t value);
+ int (*get_od_percentage)(struct smu_context *smu, enum pp_clock_type type);
+ int (*set_od_percentage)(struct smu_context *smu,
+ enum pp_clock_type type,
+ uint32_t value);
+ int (*od_edit_dpm_table)(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long *input, uint32_t size);
+ int (*get_clock_by_type_with_latency)(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct
+ pp_clock_levels_with_latency
+ *clocks);
+ int (*get_clock_by_type_with_voltage)(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct
+ pp_clock_levels_with_voltage
+ *clocks);
+ int (*get_power_profile_mode)(struct smu_context *smu, char *buf);
+ int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size);
+ enum amd_dpm_forced_level (*get_performance_level)(struct smu_context *smu);
+ int (*force_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level);
+ int (*pre_display_config_changed)(struct smu_context *smu);
+ int (*display_config_changed)(struct smu_context *smu);
+ int (*apply_clocks_adjust_rules)(struct smu_context *smu);
+ int (*notify_smc_dispaly_config)(struct smu_context *smu);
+ int (*force_dpm_limit_value)(struct smu_context *smu, bool highest);
+ int (*unforce_dpm_levels)(struct smu_context *smu);
+ int (*upload_dpm_level)(struct smu_context *smu, bool max,
+ uint32_t feature_mask);
+ int (*get_profiling_clk_mask)(struct smu_context *smu,
+ enum amd_dpm_forced_level level,
+ uint32_t *sclk_mask,
+ uint32_t *mclk_mask,
+ uint32_t *soc_mask);
+ int (*set_cpu_power_state)(struct smu_context *smu);
+};
+
+struct smu_funcs
+{
+ int (*init_microcode)(struct smu_context *smu);
+ int (*init_smc_tables)(struct smu_context *smu);
+ int (*fini_smc_tables)(struct smu_context *smu);
+ int (*init_power)(struct smu_context *smu);
+ int (*fini_power)(struct smu_context *smu);
+ int (*load_microcode)(struct smu_context *smu);
+ int (*check_fw_status)(struct smu_context *smu);
+ int (*read_pptable_from_vbios)(struct smu_context *smu);
+ int (*get_vbios_bootup_values)(struct smu_context *smu);
+ int (*get_clk_info_from_vbios)(struct smu_context *smu);
+ int (*check_pptable)(struct smu_context *smu);
+ int (*parse_pptable)(struct smu_context *smu);
+ int (*populate_smc_pptable)(struct smu_context *smu);
+ int (*check_fw_version)(struct smu_context *smu);
+ int (*write_pptable)(struct smu_context *smu);
+ int (*set_min_dcef_deep_sleep)(struct smu_context *smu);
+ int (*set_tool_table_location)(struct smu_context *smu);
+ int (*notify_memory_pool_location)(struct smu_context *smu);
+ int (*write_watermarks_table)(struct smu_context *smu);
+ int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu);
+ int (*system_features_control)(struct smu_context *smu, bool en);
+ int (*send_smc_msg)(struct smu_context *smu, uint16_t msg);
+ int (*send_smc_msg_with_param)(struct smu_context *smu, uint16_t msg, uint32_t param);
+ int (*read_smc_arg)(struct smu_context *smu, uint32_t *arg);
+ int (*init_display)(struct smu_context *smu);
+ int (*set_allowed_mask)(struct smu_context *smu);
+ int (*get_enabled_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
+ bool (*is_dpm_running)(struct smu_context *smu);
+ int (*update_feature_enable_state)(struct smu_context *smu, uint32_t feature_id, bool enabled);
+ int (*notify_display_change)(struct smu_context *smu);
+ int (*get_power_limit)(struct smu_context *smu, uint32_t *limit, bool def);
+ int (*set_power_limit)(struct smu_context *smu, uint32_t n);
+ int (*get_current_clk_freq)(struct smu_context *smu, uint32_t clk_id, uint32_t *value);
+ int (*init_max_sustainable_clocks)(struct smu_context *smu);
+ int (*start_thermal_control)(struct smu_context *smu);
+ int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors sensor,
+ void *data, uint32_t *size);
+ int (*set_deep_sleep_dcefclk)(struct smu_context *smu, uint32_t clk);
+ int (*set_active_display_count)(struct smu_context *smu, uint32_t count);
+ int (*store_cc6_data)(struct smu_context *smu, uint32_t separation_time,
+ bool cc6_disable, bool pstate_disable,
+ bool pstate_switch_disable);
+ int (*get_clock_by_type)(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct amd_pp_clocks *clocks);
+ int (*get_max_high_clocks)(struct smu_context *smu,
+ struct amd_pp_simple_clock_info *clocks);
+ int (*display_clock_voltage_request)(struct smu_context *smu, struct
+ pp_display_clock_request
+ *clock_req);
+ int (*get_dal_power_level)(struct smu_context *smu,
+ struct amd_pp_simple_clock_info *clocks);
+ int (*get_perf_level)(struct smu_context *smu,
+ enum smu_perf_level_designation designation,
+ struct smu_performance_level *level);
+ int (*get_current_shallow_sleep_clocks)(struct smu_context *smu,
+ struct smu_clock_info *clocks);
+ int (*notify_smu_enable_pwe)(struct smu_context *smu);
+ int (*set_watermarks_for_clock_ranges)(struct smu_context *smu,
+ struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges);
+ int (*set_od8_default_settings)(struct smu_context *smu,
+ bool initialize);
+ int (*conv_power_profile_to_pplib_workload)(int power_profile);
+ int (*get_power_profile_mode)(struct smu_context *smu, char *buf);
+ int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size);
+ int (*update_od8_settings)(struct smu_context *smu,
+ uint32_t index,
+ uint32_t value);
+ int (*dpm_set_uvd_enable)(struct smu_context *smu, bool enable);
+ int (*dpm_set_vce_enable)(struct smu_context *smu, bool enable);
+ uint32_t (*get_sclk)(struct smu_context *smu, bool low);
+ uint32_t (*get_mclk)(struct smu_context *smu, bool low);
+ int (*get_current_rpm)(struct smu_context *smu, uint32_t *speed);
+ uint32_t (*get_fan_control_mode)(struct smu_context *smu);
+ int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode);
+ int (*get_fan_speed_percent)(struct smu_context *smu, uint32_t *speed);
+ int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed);
+ int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed);
+ int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate);
+
+};
+
+#define smu_init_microcode(smu) \
+ ((smu)->funcs->init_microcode ? (smu)->funcs->init_microcode((smu)) : 0)
+#define smu_init_smc_tables(smu) \
+ ((smu)->funcs->init_smc_tables ? (smu)->funcs->init_smc_tables((smu)) : 0)
+#define smu_fini_smc_tables(smu) \
+ ((smu)->funcs->fini_smc_tables ? (smu)->funcs->fini_smc_tables((smu)) : 0)
+#define smu_init_power(smu) \
+ ((smu)->funcs->init_power ? (smu)->funcs->init_power((smu)) : 0)
+#define smu_fini_power(smu) \
+ ((smu)->funcs->fini_power ? (smu)->funcs->fini_power((smu)) : 0)
+#define smu_load_microcode(smu) \
+ ((smu)->funcs->load_microcode ? (smu)->funcs->load_microcode((smu)) : 0)
+#define smu_check_fw_status(smu) \
+ ((smu)->funcs->check_fw_status ? (smu)->funcs->check_fw_status((smu)) : 0)
+#define smu_read_pptable_from_vbios(smu) \
+ ((smu)->funcs->read_pptable_from_vbios ? (smu)->funcs->read_pptable_from_vbios((smu)) : 0)
+#define smu_get_vbios_bootup_values(smu) \
+ ((smu)->funcs->get_vbios_bootup_values ? (smu)->funcs->get_vbios_bootup_values((smu)) : 0)
+#define smu_get_clk_info_from_vbios(smu) \
+ ((smu)->funcs->get_clk_info_from_vbios ? (smu)->funcs->get_clk_info_from_vbios((smu)) : 0)
+#define smu_check_pptable(smu) \
+ ((smu)->funcs->check_pptable ? (smu)->funcs->check_pptable((smu)) : 0)
+#define smu_parse_pptable(smu) \
+ ((smu)->funcs->parse_pptable ? (smu)->funcs->parse_pptable((smu)) : 0)
+#define smu_populate_smc_pptable(smu) \
+ ((smu)->funcs->populate_smc_pptable ? (smu)->funcs->populate_smc_pptable((smu)) : 0)
+#define smu_check_fw_version(smu) \
+ ((smu)->funcs->check_fw_version ? (smu)->funcs->check_fw_version((smu)) : 0)
+#define smu_write_pptable(smu) \
+ ((smu)->funcs->write_pptable ? (smu)->funcs->write_pptable((smu)) : 0)
+#define smu_set_min_dcef_deep_sleep(smu) \
+ ((smu)->funcs->set_min_dcef_deep_sleep ? (smu)->funcs->set_min_dcef_deep_sleep((smu)) : 0)
+#define smu_set_tool_table_location(smu) \
+ ((smu)->funcs->set_tool_table_location ? (smu)->funcs->set_tool_table_location((smu)) : 0)
+#define smu_notify_memory_pool_location(smu) \
+ ((smu)->funcs->notify_memory_pool_location ? (smu)->funcs->notify_memory_pool_location((smu)) : 0)
+#define smu_write_watermarks_table(smu) \
+ ((smu)->funcs->write_watermarks_table ? (smu)->funcs->write_watermarks_table((smu)) : 0)
+#define smu_set_last_dcef_min_deep_sleep_clk(smu) \
+ ((smu)->funcs->set_last_dcef_min_deep_sleep_clk ? (smu)->funcs->set_last_dcef_min_deep_sleep_clk((smu)) : 0)
+#define smu_system_features_control(smu, en) \
+ ((smu)->funcs->system_features_control ? (smu)->funcs->system_features_control((smu), (en)) : 0)
+#define smu_init_max_sustainable_clocks(smu) \
+ ((smu)->funcs->init_max_sustainable_clocks ? (smu)->funcs->init_max_sustainable_clocks((smu)) : 0)
+#define smu_set_od8_default_settings(smu, initialize) \
+ ((smu)->funcs->set_od8_default_settings ? (smu)->funcs->set_od8_default_settings((smu), (initialize)) : 0)
+#define smu_update_od8_settings(smu, index, value) \
+ ((smu)->funcs->update_od8_settings ? (smu)->funcs->update_od8_settings((smu), (index), (value)) : 0)
+#define smu_get_current_rpm(smu, speed) \
+ ((smu)->funcs->get_current_rpm ? (smu)->funcs->get_current_rpm((smu), (speed)) : 0)
+#define smu_set_fan_speed_rpm(smu, speed) \
+ ((smu)->funcs->set_fan_speed_rpm ? (smu)->funcs->set_fan_speed_rpm((smu), (speed)) : 0)
+#define smu_send_smc_msg(smu, msg) \
+ ((smu)->funcs->send_smc_msg? (smu)->funcs->send_smc_msg((smu), (msg)) : 0)
+#define smu_send_smc_msg_with_param(smu, msg, param) \
+ ((smu)->funcs->send_smc_msg_with_param? (smu)->funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0)
+#define smu_read_smc_arg(smu, arg) \
+ ((smu)->funcs->read_smc_arg? (smu)->funcs->read_smc_arg((smu), (arg)) : 0)
+#define smu_alloc_dpm_context(smu) \
+ ((smu)->ppt_funcs->alloc_dpm_context ? (smu)->ppt_funcs->alloc_dpm_context((smu)) : 0)
+#define smu_init_display(smu) \
+ ((smu)->funcs->init_display ? (smu)->funcs->init_display((smu)) : 0)
+#define smu_feature_set_allowed_mask(smu) \
+ ((smu)->funcs->set_allowed_mask? (smu)->funcs->set_allowed_mask((smu)) : 0)
+#define smu_feature_get_enabled_mask(smu, mask, num) \
+ ((smu)->funcs->get_enabled_mask? (smu)->funcs->get_enabled_mask((smu), (mask), (num)) : 0)
+#define smu_is_dpm_running(smu) \
+ ((smu)->funcs->is_dpm_running ? (smu)->funcs->is_dpm_running((smu)) : 0)
+#define smu_feature_update_enable_state(smu, feature_id, enabled) \
+ ((smu)->funcs->update_feature_enable_state? (smu)->funcs->update_feature_enable_state((smu), (feature_id), (enabled)) : 0)
+#define smu_notify_display_change(smu) \
+ ((smu)->funcs->notify_display_change? (smu)->funcs->notify_display_change((smu)) : 0)
+#define smu_store_powerplay_table(smu) \
+ ((smu)->ppt_funcs->store_powerplay_table ? (smu)->ppt_funcs->store_powerplay_table((smu)) : 0)
+#define smu_check_powerplay_table(smu) \
+ ((smu)->ppt_funcs->check_powerplay_table ? (smu)->ppt_funcs->check_powerplay_table((smu)) : 0)
+#define smu_append_powerplay_table(smu) \
+ ((smu)->ppt_funcs->append_powerplay_table ? (smu)->ppt_funcs->append_powerplay_table((smu)) : 0)
+#define smu_set_default_dpm_table(smu) \
+ ((smu)->ppt_funcs->set_default_dpm_table ? (smu)->ppt_funcs->set_default_dpm_table((smu)) : 0)
+#define smu_populate_umd_state_clk(smu) \
+ ((smu)->ppt_funcs->populate_umd_state_clk ? (smu)->ppt_funcs->populate_umd_state_clk((smu)) : 0)
+#define smu_set_default_od8_settings(smu) \
+ ((smu)->ppt_funcs->set_default_od8_settings ? (smu)->ppt_funcs->set_default_od8_settings((smu)) : 0)
+#define smu_update_specified_od8_value(smu, index, value) \
+ ((smu)->ppt_funcs->update_specified_od8_value ? (smu)->ppt_funcs->update_specified_od8_value((smu), (index), (value)) : 0)
+#define smu_get_power_limit(smu, limit, def) \
+ ((smu)->funcs->get_power_limit ? (smu)->funcs->get_power_limit((smu), (limit), (def)) : 0)
+#define smu_set_power_limit(smu, limit) \
+ ((smu)->funcs->set_power_limit ? (smu)->funcs->set_power_limit((smu), (limit)) : 0)
+#define smu_get_current_clk_freq(smu, clk_id, value) \
+ ((smu)->funcs->get_current_clk_freq? (smu)->funcs->get_current_clk_freq((smu), (clk_id), (value)) : 0)
+#define smu_print_clk_levels(smu, type, buf) \
+ ((smu)->ppt_funcs->print_clk_levels ? (smu)->ppt_funcs->print_clk_levels((smu), (type), (buf)) : 0)
+#define smu_force_clk_levels(smu, type, level) \
+ ((smu)->ppt_funcs->force_clk_levels ? (smu)->ppt_funcs->force_clk_levels((smu), (type), (level)) : 0)
+#define smu_get_od_percentage(smu, type) \
+ ((smu)->ppt_funcs->get_od_percentage ? (smu)->ppt_funcs->get_od_percentage((smu), (type)) : 0)
+#define smu_set_od_percentage(smu, type, value) \
+ ((smu)->ppt_funcs->set_od_percentage ? (smu)->ppt_funcs->set_od_percentage((smu), (type), (value)) : 0)
+#define smu_od_edit_dpm_table(smu, type, input, size) \
+ ((smu)->ppt_funcs->od_edit_dpm_table ? (smu)->ppt_funcs->od_edit_dpm_table((smu), (type), (input), (size)) : 0)
+#define smu_start_thermal_control(smu) \
+ ((smu)->funcs->start_thermal_control? (smu)->funcs->start_thermal_control((smu)) : 0)
+#define smu_read_sensor(smu, sensor, data, size) \
+ ((smu)->funcs->read_sensor? (smu)->funcs->read_sensor((smu), (sensor), (data), (size)) : 0)
+#define smu_get_power_profile_mode(smu, buf) \
+ ((smu)->funcs->get_power_profile_mode ? (smu)->funcs->get_power_profile_mode((smu), buf) : 0)
+#define smu_set_power_profile_mode(smu, param, param_size) \
+ ((smu)->funcs->set_power_profile_mode ? (smu)->funcs->set_power_profile_mode((smu), (param), (param_size)) : 0)
+#define smu_get_performance_level(smu) \
+ ((smu)->ppt_funcs->get_performance_level ? (smu)->ppt_funcs->get_performance_level((smu)) : 0)
+#define smu_force_performance_level(smu, level) \
+ ((smu)->ppt_funcs->force_performance_level ? (smu)->ppt_funcs->force_performance_level((smu), (level)) : 0)
+#define smu_pre_display_config_changed(smu) \
+ ((smu)->ppt_funcs->pre_display_config_changed ? (smu)->ppt_funcs->pre_display_config_changed((smu)) : 0)
+#define smu_display_config_changed(smu) \
+ ((smu)->ppt_funcs->display_config_changed ? (smu)->ppt_funcs->display_config_changed((smu)) : 0)
+#define smu_apply_clocks_adjust_rules(smu) \
+ ((smu)->ppt_funcs->apply_clocks_adjust_rules ? (smu)->ppt_funcs->apply_clocks_adjust_rules((smu)) : 0)
+#define smu_notify_smc_dispaly_config(smu) \
+ ((smu)->ppt_funcs->notify_smc_dispaly_config ? (smu)->ppt_funcs->notify_smc_dispaly_config((smu)) : 0)
+#define smu_force_dpm_limit_value(smu, highest) \
+ ((smu)->ppt_funcs->force_dpm_limit_value ? (smu)->ppt_funcs->force_dpm_limit_value((smu), (highest)) : 0)
+#define smu_unforce_dpm_levels(smu) \
+ ((smu)->ppt_funcs->unforce_dpm_levels ? (smu)->ppt_funcs->unforce_dpm_levels((smu)) : 0)
+#define smu_upload_dpm_level(smu, max, feature_mask) \
+ ((smu)->ppt_funcs->upload_dpm_level ? (smu)->ppt_funcs->upload_dpm_level((smu), (max), (feature_mask)) : 0)
+#define smu_get_profiling_clk_mask(smu, level, sclk_mask, mclk_mask, soc_mask) \
+ ((smu)->ppt_funcs->get_profiling_clk_mask ? (smu)->ppt_funcs->get_profiling_clk_mask((smu), (level), (sclk_mask), (mclk_mask), (soc_mask)) : 0)
+#define smu_set_cpu_power_state(smu) \
+ ((smu)->ppt_funcs->set_cpu_power_state ? (smu)->ppt_funcs->set_cpu_power_state((smu)) : 0)
+#define smu_get_fan_control_mode(smu) \
+ ((smu)->funcs->get_fan_control_mode ? (smu)->funcs->get_fan_control_mode((smu)) : 0)
+#define smu_set_fan_control_mode(smu, value) \
+ ((smu)->funcs->set_fan_control_mode ? (smu)->funcs->set_fan_control_mode((smu), (value)) : 0)
+#define smu_get_fan_speed_percent(smu, speed) \
+ ((smu)->funcs->get_fan_speed_percent ? (smu)->funcs->get_fan_speed_percent((smu), (speed)) : 0)
+#define smu_set_fan_speed_percent(smu, speed) \
+ ((smu)->funcs->set_fan_speed_percent ? (smu)->funcs->set_fan_speed_percent((smu), (speed)) : 0)
+
+#define smu_msg_get_index(smu, msg) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL)
+#define smu_run_afll_btc(smu) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->run_afll_btc? (smu)->ppt_funcs->run_afll_btc((smu)) : 0) : 0)
+#define smu_get_unallowed_feature_mask(smu, feature_mask, num) \
+ ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_unallowed_feature_mask? (smu)->ppt_funcs->get_unallowed_feature_mask((smu), (feature_mask), (num)) : 0) : 0)
+#define smu_set_deep_sleep_dcefclk(smu, clk) \
+ ((smu)->funcs->set_deep_sleep_dcefclk ? (smu)->funcs->set_deep_sleep_dcefclk((smu), (clk)) : 0)
+#define smu_set_active_display_count(smu, count) \
+ ((smu)->funcs->set_active_display_count ? (smu)->funcs->set_active_display_count((smu), (count)) : 0)
+#define smu_store_cc6_data(smu, st, cc6_dis, pst_dis, pst_sw_dis) \
+ ((smu)->funcs->store_cc6_data ? (smu)->funcs->store_cc6_data((smu), (st), (cc6_dis), (pst_dis), (pst_sw_dis)) : 0)
+#define smu_get_clock_by_type(smu, type, clocks) \
+ ((smu)->funcs->get_clock_by_type ? (smu)->funcs->get_clock_by_type((smu), (type), (clocks)) : 0)
+#define smu_get_max_high_clocks(smu, clocks) \
+ ((smu)->funcs->get_max_high_clocks ? (smu)->funcs->get_max_high_clocks((smu), (clocks)) : 0)
+#define smu_get_clock_by_type_with_latency(smu, type, clocks) \
+ ((smu)->ppt_funcs->get_clock_by_type_with_latency ? (smu)->ppt_funcs->get_clock_by_type_with_latency((smu), (type), (clocks)) : 0)
+#define smu_get_clock_by_type_with_voltage(smu, type, clocks) \
+ ((smu)->ppt_funcs->get_clock_by_type_with_voltage ? (smu)->ppt_funcs->get_clock_by_type_with_voltage((smu), (type), (clocks)) : 0)
+#define smu_display_clock_voltage_request(smu, clock_req) \
+ ((smu)->funcs->display_clock_voltage_request ? (smu)->funcs->display_clock_voltage_request((smu), (clock_req)) : 0)
+#define smu_get_dal_power_level(smu, clocks) \
+ ((smu)->funcs->get_dal_power_level ? (smu)->funcs->get_dal_power_level((smu), (clocks)) : 0)
+#define smu_get_perf_level(smu, designation, level) \
+ ((smu)->funcs->get_perf_level ? (smu)->funcs->get_perf_level((smu), (designation), (level)) : 0)
+#define smu_get_current_shallow_sleep_clocks(smu, clocks) \
+ ((smu)->funcs->get_current_shallow_sleep_clocks ? (smu)->funcs->get_current_shallow_sleep_clocks((smu), (clocks)) : 0)
+#define smu_notify_smu_enable_pwe(smu) \
+ ((smu)->funcs->notify_smu_enable_pwe ? (smu)->funcs->notify_smu_enable_pwe((smu)) : 0)
+#define smu_set_watermarks_for_clock_ranges(smu, clock_ranges) \
+ ((smu)->funcs->set_watermarks_for_clock_ranges ? (smu)->funcs->set_watermarks_for_clock_ranges((smu), (clock_ranges)) : 0)
+#define smu_dpm_set_uvd_enable(smu, enable) \
+ ((smu)->funcs->dpm_set_uvd_enable ? (smu)->funcs->dpm_set_uvd_enable((smu), (enable)) : 0)
+#define smu_dpm_set_vce_enable(smu, enable) \
+ ((smu)->funcs->dpm_set_vce_enable ? (smu)->funcs->dpm_set_vce_enable((smu), (enable)) : 0)
+#define smu_get_sclk(smu, low) \
+ ((smu)->funcs->get_sclk ? (smu)->funcs->get_sclk((smu), (low)) : 0)
+#define smu_get_mclk(smu, low) \
+ ((smu)->funcs->get_mclk ? (smu)->funcs->get_mclk((smu), (low)) : 0)
+#define smu_set_xgmi_pstate(smu, pstate) \
+ ((smu)->funcs->set_xgmi_pstate ? (smu)->funcs->set_xgmi_pstate((smu), (pstate)) : 0)
+
+
+extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table,
+ uint16_t *size, uint8_t *frev, uint8_t *crev,
+ uint8_t **addr);
+
+extern const struct amd_ip_funcs smu_ip_funcs;
+
+extern const struct amdgpu_ip_block_version smu_v11_0_ip_block;
+extern int smu_feature_init_dpm(struct smu_context *smu);
+
+extern int smu_feature_is_enabled(struct smu_context *smu, int feature_id);
+extern int smu_feature_set_enabled(struct smu_context *smu, int feature_id, bool enable);
+extern int smu_feature_is_supported(struct smu_context *smu, int feature_id);
+extern int smu_feature_set_supported(struct smu_context *smu, int feature_id, bool enable);
+
+int smu_update_table_with_arg(struct smu_context *smu, uint16_t table_id, uint16_t exarg,
+ void *table_data, bool drv2smu);
+#define smu_update_table(smu, table_id, table_data, drv2smu) \
+ smu_update_table_with_arg((smu), (table_id), 0, (table_data), (drv2smu))
+
+bool is_support_sw_smu(struct amdgpu_device *adev);
+int smu_reset(struct smu_context *smu);
+int smu_common_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor,
+ void *data, uint32_t *size);
+int smu_sys_get_pp_table(struct smu_context *smu, void **table);
+int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size);
+int smu_get_power_num_states(struct smu_context *smu, struct pp_states_info *state_info);
+enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu);
+
+/* smu to display interface */
+extern int smu_display_configuration_change(struct smu_context *smu, const
+ struct amd_pp_display_configuration
+ *display_config);
+extern int smu_get_current_clocks(struct smu_context *smu,
+ struct amd_pp_clock_info *clocks);
+extern int smu_dpm_set_power_gate(struct smu_context *smu,uint32_t block_type, bool gate);
+extern int smu_handle_task(struct smu_context *smu,
+ enum amd_dpm_forced_level level,
+ enum amd_pp_task task_id);
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
index a2991fa2e6f8..90879e4092a3 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/rv_ppsmc.h
@@ -85,7 +85,6 @@
#define PPSMC_MSG_SetRccPfcPmeRestoreRegister 0x36
#define PPSMC_Message_Count 0x37
-
typedef uint16_t PPSMC_Result;
typedef int PPSMC_Msg;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu10.h b/drivers/gpu/drm/amd/powerplay/inc/smu10.h
index 9e837a5014c5..b96520528240 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu10.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu10.h
@@ -136,12 +136,14 @@
#define FEATURE_CORE_CSTATES_MASK (1 << FEATURE_CORE_CSTATES_BIT)
/* Workload bits */
-#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 0
-#define WORKLOAD_PPLIB_VIDEO_BIT 2
-#define WORKLOAD_PPLIB_VR_BIT 3
-#define WORKLOAD_PPLIB_COMPUTE_BIT 4
-#define WORKLOAD_PPLIB_CUSTOM_BIT 5
-#define WORKLOAD_PPLIB_COUNT 6
+#define WORKLOAD_DEFAULT_BIT 0
+#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1
+#define WORKLOAD_PPLIB_POWER_SAVING_BIT 2
+#define WORKLOAD_PPLIB_VIDEO_BIT 3
+#define WORKLOAD_PPLIB_VR_BIT 4
+#define WORKLOAD_PPLIB_COMPUTE_BIT 5
+#define WORKLOAD_PPLIB_CUSTOM_BIT 6
+#define WORKLOAD_PPLIB_COUNT 7
typedef struct {
/* MP1_EXT_SCRATCH0 */
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
new file mode 100644
index 000000000000..aa8d81f4111e
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMU_V11_0_H__
+#define __SMU_V11_0_H__
+
+#include "amdgpu_smu.h"
+
+/* MP Apertures */
+#define MP0_Public 0x03800000
+#define MP0_SRAM 0x03900000
+#define MP1_Public 0x03b00000
+#define MP1_SRAM 0x03c00004
+
+/* address block */
+#define smnMP1_FIRMWARE_FLAGS 0x3010024
+#define smnMP0_FW_INTF 0x30101c0
+#define smnMP1_PUB_CTRL 0x3010b14
+
+struct smu_11_0_max_sustainable_clocks {
+ uint32_t display_clock;
+ uint32_t phy_clock;
+ uint32_t pixel_clock;
+ uint32_t uclock;
+ uint32_t dcef_clock;
+ uint32_t soc_clock;
+};
+
+struct smu_11_0_dpm_table {
+ uint32_t min; /* MHz */
+ uint32_t max; /* MHz */
+};
+
+struct smu_11_0_dpm_tables {
+ struct smu_11_0_dpm_table soc_table;
+ struct smu_11_0_dpm_table gfx_table;
+ struct smu_11_0_dpm_table uclk_table;
+ struct smu_11_0_dpm_table eclk_table;
+ struct smu_11_0_dpm_table vclk_table;
+ struct smu_11_0_dpm_table dclk_table;
+ struct smu_11_0_dpm_table dcef_table;
+ struct smu_11_0_dpm_table pixel_table;
+ struct smu_11_0_dpm_table display_table;
+ struct smu_11_0_dpm_table phy_table;
+ struct smu_11_0_dpm_table fclk_table;
+};
+
+struct smu_11_0_dpm_context {
+ struct smu_11_0_dpm_tables dpm_tables;
+ uint32_t workload_policy_mask;
+ uint32_t dcef_min_ds_clk;
+};
+
+enum smu_11_0_power_state {
+ SMU_11_0_POWER_STATE__D0 = 0,
+ SMU_11_0_POWER_STATE__D1,
+ SMU_11_0_POWER_STATE__D3, /* Sleep*/
+ SMU_11_0_POWER_STATE__D4, /* Hibernate*/
+ SMU_11_0_POWER_STATE__D5, /* Power off*/
+};
+
+struct smu_11_0_power_context {
+ uint32_t power_source;
+ uint8_t in_power_limit_boost_mode;
+ enum smu_11_0_power_state power_state;
+};
+
+void smu_v11_0_set_smu_funcs(struct smu_context *smu);
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h
new file mode 100644
index 000000000000..f466f624ad32
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU_V11_0_PPSMC_H
+#define SMU_V11_0_PPSMC_H
+
+// SMU Response Codes:
+#define PPSMC_Result_OK 0x1
+#define PPSMC_Result_Failed 0xFF
+#define PPSMC_Result_UnknownCmd 0xFE
+#define PPSMC_Result_CmdRejectedPrereq 0xFD
+#define PPSMC_Result_CmdRejectedBusy 0xFC
+
+// Message Definitions:
+// BASIC
+#define PPSMC_MSG_TestMessage 0x1
+#define PPSMC_MSG_GetSmuVersion 0x2
+#define PPSMC_MSG_GetDriverIfVersion 0x3
+#define PPSMC_MSG_SetAllowedFeaturesMaskLow 0x4
+#define PPSMC_MSG_SetAllowedFeaturesMaskHigh 0x5
+#define PPSMC_MSG_EnableAllSmuFeatures 0x6
+#define PPSMC_MSG_DisableAllSmuFeatures 0x7
+#define PPSMC_MSG_EnableSmuFeaturesLow 0x8
+#define PPSMC_MSG_EnableSmuFeaturesHigh 0x9
+#define PPSMC_MSG_DisableSmuFeaturesLow 0xA
+#define PPSMC_MSG_DisableSmuFeaturesHigh 0xB
+#define PPSMC_MSG_GetEnabledSmuFeaturesLow 0xC
+#define PPSMC_MSG_GetEnabledSmuFeaturesHigh 0xD
+#define PPSMC_MSG_SetDriverDramAddrHigh 0xE
+#define PPSMC_MSG_SetDriverDramAddrLow 0xF
+#define PPSMC_MSG_SetToolsDramAddrHigh 0x10
+#define PPSMC_MSG_SetToolsDramAddrLow 0x11
+#define PPSMC_MSG_TransferTableSmu2Dram 0x12
+#define PPSMC_MSG_TransferTableDram2Smu 0x13
+#define PPSMC_MSG_UseDefaultPPTable 0x14
+#define PPSMC_MSG_UseBackupPPTable 0x15
+#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x16
+#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x17
+
+//BACO/BAMACO/BOMACO
+#define PPSMC_MSG_EnterBaco 0x18
+#define PPSMC_MSG_ExitBaco 0x19
+
+//DPM
+#define PPSMC_MSG_SetSoftMinByFreq 0x1A
+#define PPSMC_MSG_SetSoftMaxByFreq 0x1B
+#define PPSMC_MSG_SetHardMinByFreq 0x1C
+#define PPSMC_MSG_SetHardMaxByFreq 0x1D
+#define PPSMC_MSG_GetMinDpmFreq 0x1E
+#define PPSMC_MSG_GetMaxDpmFreq 0x1F
+#define PPSMC_MSG_GetDpmFreqByIndex 0x20
+#define PPSMC_MSG_OverridePcieParameters 0x21
+#define PPSMC_MSG_SetMinDeepSleepDcefclk 0x22
+#define PPSMC_MSG_SetWorkloadMask 0x23
+#define PPSMC_MSG_SetUclkFastSwitch 0x24
+#define PPSMC_MSG_GetAvfsVoltageByDpm 0x25
+#define PPSMC_MSG_SetVideoFps 0x26
+#define PPSMC_MSG_GetDcModeMaxDpmFreq 0x27
+
+//Power Gating
+#define PPSMC_MSG_AllowGfxOff 0x28
+#define PPSMC_MSG_DisallowGfxOff 0x29
+#define PPSMC_MSG_PowerUpVcn 0x2A
+#define PPSMC_MSG_PowerDownVcn 0x2B
+#define PPSMC_MSG_PowerUpJpeg 0x2C
+#define PPSMC_MSG_PowerDownJpeg 0x2D
+//reserve 0x2A to 0x2F for PG harvesting TBD
+
+//I2C Interface
+#define PPSMC_RequestI2cTransaction 0x30
+
+//Resets
+#define PPSMC_MSG_SoftReset 0x31 //FIXME Need confirmation from driver
+#define PPSMC_MSG_PrepareMp1ForUnload 0x32
+#define PPSMC_MSG_PrepareMp1ForReset 0x33
+#define PPSMC_MSG_PrepareMp1ForShutdown 0x34
+
+//ACDC Power Source
+#define PPSMC_MSG_SetPptLimit 0x35
+#define PPSMC_MSG_GetPptLimit 0x36
+#define PPSMC_MSG_ReenableAcDcInterrupt 0x37
+#define PPSMC_MSG_NotifyPowerSource 0x38
+//#define PPSMC_MSG_GfxDeviceDriverReset 0x39 //FIXME mode1 and 2 resets will go directly go PSP
+
+//BTC
+#define PPSMC_MSG_RunBtc 0x3A
+
+//Debug
+#define PPSMC_MSG_DramLogSetDramAddrHigh 0x3B
+#define PPSMC_MSG_DramLogSetDramAddrLow 0x3C
+#define PPSMC_MSG_DramLogSetDramSize 0x3D
+#define PPSMC_MSG_GetDebugData 0x3E
+
+//Others
+#define PPSMC_MSG_ConfigureGfxDidt 0x3F
+#define PPSMC_MSG_NumOfDisplays 0x40
+
+#define PPSMC_MSG_SetMemoryChannelConfig 0x41
+#define PPSMC_MSG_SetGeminiMode 0x42
+#define PPSMC_MSG_SetGeminiApertureHigh 0x43
+#define PPSMC_MSG_SetGeminiApertureLow 0x44
+
+#define PPSMC_Message_Count 0x45
+
+typedef uint32_t PPSMC_Result;
+typedef uint32_t PPSMC_Msg;
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
new file mode 100644
index 000000000000..92c65b80bde2
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef SMU_11_0_PPTABLE_H
+#define SMU_11_0_PPTABLE_H
+
+
+#define SMU_11_0_TABLE_FORMAT_REVISION 12
+
+//// POWERPLAYTABLE::ulPlatformCaps
+#define SMU_11_0_PP_PLATFORM_CAP_POWERPLAY 0x1
+#define SMU_11_0_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 0x2
+#define SMU_11_0_PP_PLATFORM_CAP_HARDWAREDC 0x4
+#define SMU_11_0_PP_PLATFORM_CAP_BACO 0x8
+#define SMU_11_0_PP_PLATFORM_CAP_MACO 0x10
+#define SMU_11_0_PP_PLATFORM_CAP_SHADOWPSTATE 0x20
+
+// SMU_11_0_PP_THERMALCONTROLLER - Thermal Controller Type
+#define SMU_11_0_PP_THERMALCONTROLLER_NONE 0
+
+#define SMU_11_0_PP_OVERDRIVE_VERSION 0x0800
+#define SMU_11_0_PP_POWERSAVINGCLOCK_VERSION 0x0100
+
+enum SMU_11_0_ODFEATURE_ID {
+ SMU_11_0_ODFEATURE_GFXCLK_LIMITS = 1 << 0, //GFXCLK Limit feature
+ SMU_11_0_ODFEATURE_GFXCLK_CURVE = 1 << 1, //GFXCLK Curve feature
+ SMU_11_0_ODFEATURE_UCLK_MAX = 1 << 2, //UCLK Limit feature
+ SMU_11_0_ODFEATURE_POWER_LIMIT = 1 << 3, //Power Limit feature
+ SMU_11_0_ODFEATURE_FAN_ACOUSTIC_LIMIT = 1 << 4, //Fan Acoustic RPM feature
+ SMU_11_0_ODFEATURE_FAN_SPEED_MIN = 1 << 5, //Minimum Fan Speed feature
+ SMU_11_0_ODFEATURE_TEMPERATURE_FAN = 1 << 6, //Fan Target Temperature Limit feature
+ SMU_11_0_ODFEATURE_TEMPERATURE_SYSTEM = 1 << 7, //Operating Temperature Limit feature
+ SMU_11_0_ODFEATURE_MEMORY_TIMING_TUNE = 1 << 8, //AC Timing Tuning feature
+ SMU_11_0_ODFEATURE_FAN_ZERO_RPM_CONTROL = 1 << 9, //Zero RPM feature
+ SMU_11_0_ODFEATURE_AUTO_UV_ENGINE = 1 << 10, //Auto Under Volt GFXCLK feature
+ SMU_11_0_ODFEATURE_AUTO_OC_ENGINE = 1 << 11, //Auto Over Clock GFXCLK feature
+ SMU_11_0_ODFEATURE_AUTO_OC_MEMORY = 1 << 12, //Auto Over Clock MCLK feature
+ SMU_11_0_ODFEATURE_FAN_CURVE = 1 << 13, //VICTOR TODO
+ SMU_11_0_ODFEATURE_COUNT = 14,
+};
+#define SMU_11_0_MAX_ODFEATURE 32 //Maximum Number of OD Features
+
+enum SMU_11_0_ODSETTING_ID {
+ SMU_11_0_ODSETTING_GFXCLKFMAX = 0,
+ SMU_11_0_ODSETTING_GFXCLKFMIN,
+ SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1,
+ SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1,
+ SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2,
+ SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2,
+ SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3,
+ SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3,
+ SMU_11_0_ODSETTING_UCLKFMAX,
+ SMU_11_0_ODSETTING_POWERPERCENTAGE,
+ SMU_11_0_ODSETTING_FANRPMMIN,
+ SMU_11_0_ODSETTING_FANRPMACOUSTICLIMIT,
+ SMU_11_0_ODSETTING_FANTARGETTEMPERATURE,
+ SMU_11_0_ODSETTING_OPERATINGTEMPMAX,
+ SMU_11_0_ODSETTING_ACTIMING,
+ SMU_11_0_ODSETTING_FAN_ZERO_RPM_CONTROL,
+ SMU_11_0_ODSETTING_AUTOUVENGINE,
+ SMU_11_0_ODSETTING_AUTOOCENGINE,
+ SMU_11_0_ODSETTING_AUTOOCMEMORY,
+ SMU_11_0_ODSETTING_COUNT,
+};
+#define SMU_11_0_MAX_ODSETTING 32 //Maximum Number of ODSettings
+
+struct smu_11_0_overdrive_table
+{
+ uint8_t revision; //Revision = SMU_11_0_PP_OVERDRIVE_VERSION
+ uint8_t reserve[3]; //Zero filled field reserved for future use
+ uint32_t feature_count; //Total number of supported features
+ uint32_t setting_count; //Total number of supported settings
+ uint8_t cap[SMU_11_0_MAX_ODFEATURE]; //OD feature support flags
+ uint32_t max[SMU_11_0_MAX_ODSETTING]; //default maximum settings
+ uint32_t min[SMU_11_0_MAX_ODSETTING]; //default minimum settings
+} __attribute__((packed));
+
+enum SMU_11_0_PPCLOCK_ID {
+ SMU_11_0_PPCLOCK_GFXCLK = 0,
+ SMU_11_0_PPCLOCK_VCLK,
+ SMU_11_0_PPCLOCK_DCLK,
+ SMU_11_0_PPCLOCK_ECLK,
+ SMU_11_0_PPCLOCK_SOCCLK,
+ SMU_11_0_PPCLOCK_UCLK,
+ SMU_11_0_PPCLOCK_DCEFCLK,
+ SMU_11_0_PPCLOCK_DISPCLK,
+ SMU_11_0_PPCLOCK_PIXCLK,
+ SMU_11_0_PPCLOCK_PHYCLK,
+ SMU_11_0_PPCLOCK_COUNT,
+};
+#define SMU_11_0_MAX_PPCLOCK 16 //Maximum Number of PP Clocks
+
+struct smu_11_0_power_saving_clock_table
+{
+ uint8_t revision; //Revision = SMU_11_0_PP_POWERSAVINGCLOCK_VERSION
+ uint8_t reserve[3]; //Zero filled field reserved for future use
+ uint32_t count; //power_saving_clock_count = SMU_11_0_PPCLOCK_COUNT
+ uint32_t max[SMU_11_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Maximum array In MHz
+ uint32_t min[SMU_11_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Minimum array In MHz
+} __attribute__((packed));
+
+struct smu_11_0_powerplay_table
+{
+ struct atom_common_table_header header;
+ uint8_t table_revision;
+ uint32_t table_size; //Driver portion table size. The offset to smc_pptable including header size
+ uint32_t golden_pp_id;
+ uint32_t golden_revision;
+ uint16_t format_id;
+ uint32_t platform_caps; //POWERPLAYABLE::ulPlatformCaps
+
+ uint8_t thermal_controller_type; //one of SMU_11_0_PP_THERMALCONTROLLER
+
+ uint16_t small_power_limit1;
+ uint16_t small_power_limit2;
+ uint16_t boost_power_limit;
+ uint16_t od_turbo_power_limit; //Power limit setting for Turbo mode in Performance UI Tuning.
+ uint16_t od_power_save_power_limit; //Power limit setting for PowerSave/Optimal mode in Performance UI Tuning.
+ uint16_t software_shutdown_temp;
+
+ uint16_t reserve[6]; //Zero filled field reserved for future use
+
+ struct smu_11_0_power_saving_clock_table power_saving_clock;
+ struct smu_11_0_overdrive_table overdrive_table;
+
+ PPTable_t smc_pptable; //PPTable_t in smu11_driver_if.h
+} __attribute__((packed));
+
+#endif
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
index 4f63a736ea0e..a0883038f3c3 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h
@@ -119,7 +119,8 @@
#define PPSMC_MSG_PrepareMp1ForShutdown 0x5A
#define PPSMC_MSG_SetMGpuFanBoostLimitRpm 0x5D
#define PPSMC_MSG_GetAVFSVoltageByDpm 0x5F
-#define PPSMC_Message_Count 0x60
+#define PPSMC_MSG_BacoWorkAroundFlushVDCI 0x60
+#define PPSMC_Message_Count 0x61
typedef uint32_t PPSMC_Result;
typedef uint32_t PPSMC_Msg;
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
new file mode 100644
index 000000000000..92903a4cc4d8
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -0,0 +1,1977 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "pp_debug.h"
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "atomfirmware.h"
+#include "amdgpu_atomfirmware.h"
+#include "smu_v11_0.h"
+#include "smu11_driver_if.h"
+#include "soc15_common.h"
+#include "atom.h"
+#include "vega20_ppt.h"
+#include "pp_thermal.h"
+
+#include "asic_reg/thm/thm_11_0_2_offset.h"
+#include "asic_reg/thm/thm_11_0_2_sh_mask.h"
+#include "asic_reg/mp/mp_9_0_offset.h"
+#include "asic_reg/mp/mp_9_0_sh_mask.h"
+#include "asic_reg/nbio/nbio_7_4_offset.h"
+#include "asic_reg/smuio/smuio_9_0_offset.h"
+#include "asic_reg/smuio/smuio_9_0_sh_mask.h"
+
+MODULE_FIRMWARE("amdgpu/vega20_smc.bin");
+
+#define SMU11_TOOL_SIZE 0x19000
+#define SMU11_THERMAL_MINIMUM_ALERT_TEMP 0
+#define SMU11_THERMAL_MAXIMUM_ALERT_TEMP 255
+
+#define SMU11_TEMPERATURE_UNITS_PER_CENTIGRADES 1000
+#define SMU11_VOLTAGE_SCALE 4
+
+#define SMC_DPM_FEATURE (FEATURE_DPM_PREFETCHER_MASK | \
+ FEATURE_DPM_GFXCLK_MASK | \
+ FEATURE_DPM_UCLK_MASK | \
+ FEATURE_DPM_SOCCLK_MASK | \
+ FEATURE_DPM_UVD_MASK | \
+ FEATURE_DPM_VCE_MASK | \
+ FEATURE_DPM_MP0CLK_MASK | \
+ FEATURE_DPM_LINK_MASK | \
+ FEATURE_DPM_DCEFCLK_MASK)
+
+static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
+ uint16_t msg)
+{
+ struct amdgpu_device *adev = smu->adev;
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
+ return 0;
+}
+
+static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
+ return 0;
+}
+
+static int smu_v11_0_wait_for_response(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t cur_value, i;
+
+ for (i = 0; i < adev->usec_timeout; i++) {
+ cur_value = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
+ if ((cur_value & MP1_C2PMSG_90__CONTENT_MASK) != 0)
+ break;
+ udelay(1);
+ }
+
+ /* timeout means wrong logic */
+ if (i == adev->usec_timeout)
+ return -ETIME;
+
+ return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO;
+}
+
+static int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0, index = 0;
+
+ index = smu_msg_get_index(smu, msg);
+ if (index < 0)
+ return index;
+
+ smu_v11_0_wait_for_response(smu);
+
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+
+ smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
+
+ ret = smu_v11_0_wait_for_response(smu);
+
+ if (ret)
+ pr_err("Failed to send message 0x%x, response 0x%x\n", index,
+ ret);
+
+ return ret;
+
+}
+
+static int
+smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg,
+ uint32_t param)
+{
+
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0, index = 0;
+
+ index = smu_msg_get_index(smu, msg);
+ if (index < 0)
+ return index;
+
+ ret = smu_v11_0_wait_for_response(smu);
+ if (ret)
+ pr_err("Failed to send message 0x%x, response 0x%x, param 0x%x\n",
+ index, ret, param);
+
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
+
+ smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
+
+ ret = smu_v11_0_wait_for_response(smu);
+ if (ret)
+ pr_err("Failed to send message 0x%x, response 0x%x param 0x%x\n",
+ index, ret, param);
+
+ return ret;
+}
+
+static int smu_v11_0_init_microcode(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ const char *chip_name;
+ char fw_name[30];
+ int err = 0;
+ const struct smc_firmware_header_v1_0 *hdr;
+ const struct common_firmware_header *header;
+ struct amdgpu_firmware_info *ucode = NULL;
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ chip_name = "vega20";
+ break;
+ default:
+ BUG();
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
+
+ err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+ err = amdgpu_ucode_validate(adev->pm.fw);
+ if (err)
+ goto out;
+
+ hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
+ amdgpu_ucode_print_smc_hdr(&hdr->header);
+ adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
+ ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
+ ucode->fw = adev->pm.fw;
+ header = (const struct common_firmware_header *)ucode->fw->data;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
+ }
+
+out:
+ if (err) {
+ DRM_ERROR("smu_v11_0: Failed to load firmware \"%s\"\n",
+ fw_name);
+ release_firmware(adev->pm.fw);
+ adev->pm.fw = NULL;
+ }
+ return err;
+}
+
+static int smu_v11_0_load_microcode(struct smu_context *smu)
+{
+ return 0;
+}
+
+static int smu_v11_0_check_fw_status(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t mp1_fw_flags;
+
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+
+ if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
+ MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
+ return 0;
+
+ return -EIO;
+}
+
+static int smu_v11_0_check_fw_version(struct smu_context *smu)
+{
+ uint32_t smu_version = 0xff;
+ int ret = 0;
+
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
+ if (ret)
+ goto err;
+
+ ret = smu_read_smc_arg(smu, &smu_version);
+ if (ret)
+ goto err;
+
+ if (smu_version != smu->smc_if_version)
+ ret = -EINVAL;
+err:
+ return ret;
+}
+
+static int smu_v11_0_read_pptable_from_vbios(struct smu_context *smu)
+{
+ int ret, index;
+ uint16_t size;
+ uint8_t frev, crev;
+ void *table;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ powerplayinfo);
+
+ ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
+ (uint8_t **)&table);
+ if (ret)
+ return ret;
+
+ if (!smu->smu_table.power_play_table)
+ smu->smu_table.power_play_table = table;
+ if (!smu->smu_table.power_play_table_size)
+ smu->smu_table.power_play_table_size = size;
+
+ return 0;
+}
+
+static int smu_v11_0_init_dpm_context(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+
+ if (smu_dpm->dpm_context || smu_dpm->dpm_context_size != 0)
+ return -EINVAL;
+
+ return smu_alloc_dpm_context(smu);
+}
+
+static int smu_v11_0_fini_dpm_context(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+
+ if (!smu_dpm->dpm_context || smu_dpm->dpm_context_size == 0)
+ return -EINVAL;
+
+ kfree(smu_dpm->dpm_context);
+ kfree(smu_dpm->golden_dpm_context);
+ kfree(smu_dpm->dpm_current_power_state);
+ kfree(smu_dpm->dpm_request_power_state);
+ smu_dpm->dpm_context = NULL;
+ smu_dpm->golden_dpm_context = NULL;
+ smu_dpm->dpm_context_size = 0;
+ smu_dpm->dpm_current_power_state = NULL;
+ smu_dpm->dpm_request_power_state = NULL;
+
+ return 0;
+}
+
+static int smu_v11_0_init_smc_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = NULL;
+ int ret = 0;
+
+ if (smu_table->tables || smu_table->table_count != 0)
+ return -EINVAL;
+
+ tables = kcalloc(TABLE_COUNT, sizeof(struct smu_table), GFP_KERNEL);
+ if (!tables)
+ return -ENOMEM;
+
+ smu_table->tables = tables;
+ smu_table->table_count = TABLE_COUNT;
+
+ SMU_TABLE_INIT(tables, TABLE_PPTABLE, sizeof(PPTable_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, TABLE_WATERMARKS, sizeof(Watermarks_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, TABLE_ACTIVITY_MONITOR_COEFF,
+ sizeof(DpmActivityMonitorCoeffInt_t),
+ PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM);
+
+ ret = smu_v11_0_init_dpm_context(smu);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int smu_v11_0_fini_smc_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ int ret = 0;
+
+ if (!smu_table->tables || smu_table->table_count == 0)
+ return -EINVAL;
+
+ kfree(smu_table->tables);
+ smu_table->tables = NULL;
+ smu_table->table_count = 0;
+
+ ret = smu_v11_0_fini_dpm_context(smu);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+static int smu_v11_0_init_power(struct smu_context *smu)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+
+ if (smu_power->power_context || smu_power->power_context_size != 0)
+ return -EINVAL;
+
+ smu_power->power_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
+ GFP_KERNEL);
+ if (!smu_power->power_context)
+ return -ENOMEM;
+ smu_power->power_context_size = sizeof(struct smu_11_0_dpm_context);
+
+ return 0;
+}
+
+static int smu_v11_0_fini_power(struct smu_context *smu)
+{
+ struct smu_power_context *smu_power = &smu->smu_power;
+
+ if (!smu_power->power_context || smu_power->power_context_size == 0)
+ return -EINVAL;
+
+ kfree(smu_power->power_context);
+ smu_power->power_context = NULL;
+ smu_power->power_context_size = 0;
+
+ return 0;
+}
+
+int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
+{
+ int ret, index;
+ uint16_t size;
+ uint8_t frev, crev;
+ struct atom_common_table_header *header;
+ struct atom_firmware_info_v3_3 *v_3_3;
+ struct atom_firmware_info_v3_1 *v_3_1;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ firmwareinfo);
+
+ ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
+ (uint8_t **)&header);
+ if (ret)
+ return ret;
+
+ if (header->format_revision != 3) {
+ pr_err("unknown atom_firmware_info version! for smu11\n");
+ return -EINVAL;
+ }
+
+ switch (header->content_revision) {
+ case 0:
+ case 1:
+ case 2:
+ v_3_1 = (struct atom_firmware_info_v3_1 *)header;
+ smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
+ smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
+ smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
+ smu->smu_table.boot_values.socclk = 0;
+ smu->smu_table.boot_values.dcefclk = 0;
+ smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
+ smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
+ smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
+ smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
+ smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
+ smu->smu_table.boot_values.pp_table_id = 0;
+ break;
+ case 3:
+ default:
+ v_3_3 = (struct atom_firmware_info_v3_3 *)header;
+ smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
+ smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
+ smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
+ smu->smu_table.boot_values.socclk = 0;
+ smu->smu_table.boot_values.dcefclk = 0;
+ smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
+ smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
+ smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
+ smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
+ smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
+ smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
+ }
+
+ return 0;
+}
+
+static int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
+{
+ int ret, index;
+ struct amdgpu_device *adev = smu->adev;
+ struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
+ struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
+
+ input.clk_id = SMU11_SYSPLL0_SOCCLK_ID;
+ input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
+ index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
+ getsmuclockinfo);
+
+ ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
+ (uint32_t *)&input);
+ if (ret)
+ return -EINVAL;
+
+ output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
+ smu->smu_table.boot_values.socclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
+
+ memset(&input, 0, sizeof(input));
+ input.clk_id = SMU11_SYSPLL0_DCEFCLK_ID;
+ input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
+ index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
+ getsmuclockinfo);
+
+ ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
+ (uint32_t *)&input);
+ if (ret)
+ return -EINVAL;
+
+ output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
+ smu->smu_table.boot_values.dcefclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
+
+ memset(&input, 0, sizeof(input));
+ input.clk_id = SMU11_SYSPLL0_ECLK_ID;
+ input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
+ index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
+ getsmuclockinfo);
+
+ ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
+ (uint32_t *)&input);
+ if (ret)
+ return -EINVAL;
+
+ output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
+ smu->smu_table.boot_values.eclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
+
+ memset(&input, 0, sizeof(input));
+ input.clk_id = SMU11_SYSPLL0_VCLK_ID;
+ input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
+ index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
+ getsmuclockinfo);
+
+ ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
+ (uint32_t *)&input);
+ if (ret)
+ return -EINVAL;
+
+ output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
+ smu->smu_table.boot_values.vclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
+
+ memset(&input, 0, sizeof(input));
+ input.clk_id = SMU11_SYSPLL0_DCLK_ID;
+ input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
+ index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
+ getsmuclockinfo);
+
+ ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
+ (uint32_t *)&input);
+ if (ret)
+ return -EINVAL;
+
+ output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
+ smu->smu_table.boot_values.dclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
+
+ return 0;
+}
+
+static int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *memory_pool = &smu_table->memory_pool;
+ int ret = 0;
+ uint64_t address;
+ uint32_t address_low, address_high;
+
+ if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
+ return ret;
+
+ address = (uintptr_t)memory_pool->cpu_addr;
+ address_high = (uint32_t)upper_32_bits(address);
+ address_low = (uint32_t)lower_32_bits(address);
+
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSystemVirtualDramAddrHigh,
+ address_high);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetSystemVirtualDramAddrLow,
+ address_low);
+ if (ret)
+ return ret;
+
+ address = memory_pool->mc_address;
+ address_high = (uint32_t)upper_32_bits(address);
+ address_low = (uint32_t)lower_32_bits(address);
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
+ address_high);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
+ address_low);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
+ (uint32_t)memory_pool->size);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int smu_v11_0_check_pptable(struct smu_context *smu)
+{
+ int ret;
+
+ ret = smu_check_powerplay_table(smu);
+ return ret;
+}
+
+static int smu_v11_0_parse_pptable(struct smu_context *smu)
+{
+ int ret;
+
+ struct smu_table_context *table_context = &smu->smu_table;
+
+ if (table_context->driver_pptable)
+ return -EINVAL;
+
+ table_context->driver_pptable = kzalloc(sizeof(PPTable_t), GFP_KERNEL);
+
+ if (!table_context->driver_pptable)
+ return -ENOMEM;
+
+ ret = smu_store_powerplay_table(smu);
+ if (ret)
+ return -EINVAL;
+
+ ret = smu_append_powerplay_table(smu);
+
+ return ret;
+}
+
+static int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
+{
+ int ret;
+
+ ret = smu_set_default_dpm_table(smu);
+
+ return ret;
+}
+
+static int smu_v11_0_write_pptable(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ int ret = 0;
+
+ ret = smu_update_table(smu, TABLE_PPTABLE, table_context->driver_pptable, true);
+
+ return ret;
+}
+
+static int smu_v11_0_write_watermarks_table(struct smu_context *smu)
+{
+ return smu_update_table(smu, TABLE_WATERMARKS,
+ smu->smu_table.tables[TABLE_WATERMARKS].cpu_addr, true);
+}
+
+static int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
+{
+ int ret;
+
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetMinDeepSleepDcefclk, clk);
+ if (ret)
+ pr_err("SMU11 attempt to set divider for DCEFCLK Failed!");
+
+ return ret;
+}
+
+static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+
+ if (!table_context)
+ return -EINVAL;
+
+ return smu_set_deep_sleep_dcefclk(smu,
+ table_context->boot_values.dcefclk / 100);
+}
+
+static int smu_v11_0_set_tool_table_location(struct smu_context *smu)
+{
+ int ret = 0;
+ struct smu_table *tool_table = &smu->smu_table.tables[TABLE_PMSTATUSLOG];
+
+ if (tool_table->mc_address) {
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetToolsDramAddrHigh,
+ upper_32_bits(tool_table->mc_address));
+ if (!ret)
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetToolsDramAddrLow,
+ lower_32_bits(tool_table->mc_address));
+ }
+
+ return ret;
+}
+
+static int smu_v11_0_init_display(struct smu_context *smu)
+{
+ int ret = 0;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0);
+ return ret;
+}
+
+static int smu_v11_0_update_feature_enable_state(struct smu_context *smu, uint32_t feature_id, bool enabled)
+{
+ uint32_t feature_low = 0, feature_high = 0;
+ int ret = 0;
+
+ if (feature_id >= 0 && feature_id < 31)
+ feature_low = (1 << feature_id);
+ else if (feature_id > 31 && feature_id < 63)
+ feature_high = (1 << feature_id);
+ else
+ return -EINVAL;
+
+ if (enabled) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
+ feature_low);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
+ feature_high);
+ if (ret)
+ return ret;
+
+ } else {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
+ feature_low);
+ if (ret)
+ return ret;
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
+ feature_high);
+ if (ret)
+ return ret;
+
+ }
+
+ return ret;
+}
+
+static int smu_v11_0_set_allowed_mask(struct smu_context *smu)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ int ret = 0;
+ uint32_t feature_mask[2];
+
+ mutex_lock(&feature->mutex);
+ if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
+ goto failed;
+
+ bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
+ feature_mask[1]);
+ if (ret)
+ goto failed;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
+ feature_mask[0]);
+ if (ret)
+ goto failed;
+
+failed:
+ mutex_unlock(&feature->mutex);
+ return ret;
+}
+
+static int smu_v11_0_get_enabled_mask(struct smu_context *smu,
+ uint32_t *feature_mask, uint32_t num)
+{
+ uint32_t feature_mask_high = 0, feature_mask_low = 0;
+ int ret = 0;
+
+ if (!feature_mask || num < 2)
+ return -EINVAL;
+
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
+ if (ret)
+ return ret;
+ ret = smu_read_smc_arg(smu, &feature_mask_high);
+ if (ret)
+ return ret;
+
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
+ if (ret)
+ return ret;
+ ret = smu_read_smc_arg(smu, &feature_mask_low);
+ if (ret)
+ return ret;
+
+ feature_mask[0] = feature_mask_low;
+ feature_mask[1] = feature_mask_high;
+
+ return ret;
+}
+
+static bool smu_v11_0_is_dpm_running(struct smu_context *smu)
+{
+ int ret = 0;
+ uint32_t feature_mask[2];
+ unsigned long feature_enabled;
+ ret = smu_v11_0_get_enabled_mask(smu, feature_mask, 2);
+ feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
+ ((uint64_t)feature_mask[1] << 32));
+ return !!(feature_enabled & SMC_DPM_FEATURE);
+}
+
+static int smu_v11_0_system_features_control(struct smu_context *smu,
+ bool en)
+{
+ struct smu_feature *feature = &smu->smu_feature;
+ uint32_t feature_mask[2];
+ int ret = 0;
+
+ ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
+ SMU_MSG_DisableAllSmuFeatures));
+ if (ret)
+ return ret;
+ ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
+ if (ret)
+ return ret;
+
+ bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
+ feature->feature_num);
+ bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
+ feature->feature_num);
+
+ return ret;
+}
+
+static int smu_v11_0_notify_display_change(struct smu_context *smu)
+{
+ int ret = 0;
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT))
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1);
+
+ return ret;
+}
+
+static int
+smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
+ PPCLK_e clock_select)
+{
+ int ret = 0;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
+ clock_select << 16);
+ if (ret) {
+ pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
+ return ret;
+ }
+
+ ret = smu_read_smc_arg(smu, clock);
+ if (ret)
+ return ret;
+
+ if (*clock != 0)
+ return 0;
+
+ /* if DC limit is zero, return AC limit */
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
+ clock_select << 16);
+ if (ret) {
+ pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!");
+ return ret;
+ }
+
+ ret = smu_read_smc_arg(smu, clock);
+
+ return ret;
+}
+
+static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
+{
+ struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks;
+ int ret = 0;
+
+ max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks),
+ GFP_KERNEL);
+ smu->smu_table.max_sustainable_clocks = (void *)max_sustainable_clocks;
+
+ max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
+ max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100;
+ max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100;
+ max_sustainable_clocks->display_clock = 0xFFFFFFFF;
+ max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
+ max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
+ ret = smu_v11_0_get_max_sustainable_clock(smu,
+ &(max_sustainable_clocks->uclock),
+ PPCLK_UCLK);
+ if (ret) {
+ pr_err("[%s] failed to get max UCLK from SMC!",
+ __func__);
+ return ret;
+ }
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT)) {
+ ret = smu_v11_0_get_max_sustainable_clock(smu,
+ &(max_sustainable_clocks->soc_clock),
+ PPCLK_SOCCLK);
+ if (ret) {
+ pr_err("[%s] failed to get max SOCCLK from SMC!",
+ __func__);
+ return ret;
+ }
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
+ ret = smu_v11_0_get_max_sustainable_clock(smu,
+ &(max_sustainable_clocks->dcef_clock),
+ PPCLK_DCEFCLK);
+ if (ret) {
+ pr_err("[%s] failed to get max DCEFCLK from SMC!",
+ __func__);
+ return ret;
+ }
+
+ ret = smu_v11_0_get_max_sustainable_clock(smu,
+ &(max_sustainable_clocks->display_clock),
+ PPCLK_DISPCLK);
+ if (ret) {
+ pr_err("[%s] failed to get max DISPCLK from SMC!",
+ __func__);
+ return ret;
+ }
+ ret = smu_v11_0_get_max_sustainable_clock(smu,
+ &(max_sustainable_clocks->phy_clock),
+ PPCLK_PHYCLK);
+ if (ret) {
+ pr_err("[%s] failed to get max PHYCLK from SMC!",
+ __func__);
+ return ret;
+ }
+ ret = smu_v11_0_get_max_sustainable_clock(smu,
+ &(max_sustainable_clocks->pixel_clock),
+ PPCLK_PIXCLK);
+ if (ret) {
+ pr_err("[%s] failed to get max PIXCLK from SMC!",
+ __func__);
+ return ret;
+ }
+ }
+
+ if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
+ max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;
+
+ return 0;
+}
+
+static int smu_v11_0_get_power_limit(struct smu_context *smu,
+ uint32_t *limit,
+ bool get_default)
+{
+ int ret = 0;
+
+ if (get_default) {
+ mutex_lock(&smu->mutex);
+ *limit = smu->default_power_limit;
+ if (smu->od_enabled) {
+ *limit *= (100 + smu->smu_table.TDPODLimit);
+ *limit /= 100;
+ }
+ mutex_unlock(&smu->mutex);
+ } else {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
+ POWER_SOURCE_AC << 16);
+ if (ret) {
+ pr_err("[%s] get PPT limit failed!", __func__);
+ return ret;
+ }
+ smu_read_smc_arg(smu, limit);
+ smu->power_limit = *limit;
+ }
+
+ return ret;
+}
+
+static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
+{
+ uint32_t max_power_limit;
+ int ret = 0;
+
+ if (n == 0)
+ n = smu->default_power_limit;
+
+ max_power_limit = smu->default_power_limit;
+
+ if (smu->od_enabled) {
+ max_power_limit *= (100 + smu->smu_table.TDPODLimit);
+ max_power_limit /= 100;
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_PPT_BIT))
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n);
+ if (ret) {
+ pr_err("[%s] Set power limit Failed!", __func__);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int smu_v11_0_get_current_clk_freq(struct smu_context *smu, uint32_t clk_id, uint32_t *value)
+{
+ int ret = 0;
+ uint32_t freq;
+
+ if (clk_id >= PPCLK_COUNT || !value)
+ return -EINVAL;
+
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_GetDpmClockFreq, (clk_id << 16));
+ if (ret)
+ return ret;
+
+ ret = smu_read_smc_arg(smu, &freq);
+ if (ret)
+ return ret;
+
+ freq *= 100;
+ *value = freq;
+
+ return ret;
+}
+
+static int smu_v11_0_get_thermal_range(struct smu_context *smu,
+ struct PP_TemperatureRange *range)
+{
+ memcpy(range, &SMU7ThermalWithDelayPolicy[0], sizeof(struct PP_TemperatureRange));
+
+ range->max = smu->smu_table.software_shutdown_temp *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ return 0;
+}
+
+static int smu_v11_0_set_thermal_range(struct smu_context *smu,
+ struct PP_TemperatureRange *range)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int low = SMU11_THERMAL_MINIMUM_ALERT_TEMP *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ int high = SMU11_THERMAL_MAXIMUM_ALERT_TEMP *
+ PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ uint32_t val;
+
+ if (low < range->min)
+ low = range->min;
+ if (high > range->max)
+ high = range->max;
+
+ if (low > high)
+ return -EINVAL;
+
+ val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+ val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
+ val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
+
+ WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
+
+ return 0;
+}
+
+static int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t val = 0;
+
+ val |= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
+ val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
+ val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
+
+ WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val);
+
+ return 0;
+}
+
+static int smu_v11_0_set_thermal_fan_table(struct smu_context *smu)
+{
+ int ret;
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetFanTemperatureTarget,
+ (uint32_t)pptable->FanTargetTemperature);
+
+ return ret;
+}
+
+static int smu_v11_0_start_thermal_control(struct smu_context *smu)
+{
+ int ret = 0;
+ struct PP_TemperatureRange range;
+ struct amdgpu_device *adev = smu->adev;
+
+ smu_v11_0_get_thermal_range(smu, &range);
+
+ if (smu->smu_table.thermal_controller_type) {
+ ret = smu_v11_0_set_thermal_range(smu, &range);
+ if (ret)
+ return ret;
+
+ ret = smu_v11_0_enable_thermal_alert(smu);
+ if (ret)
+ return ret;
+ ret = smu_v11_0_set_thermal_fan_table(smu);
+ if (ret)
+ return ret;
+ }
+
+ adev->pm.dpm.thermal.min_temp = range.min;
+ adev->pm.dpm.thermal.max_temp = range.max;
+
+ return ret;
+}
+
+static int smu_v11_0_get_current_activity_percent(struct smu_context *smu,
+ uint32_t *value)
+{
+ int ret = 0;
+ SmuMetrics_t metrics;
+
+ if (!value)
+ return -EINVAL;
+
+ ret = smu_update_table(smu, TABLE_SMU_METRICS, (void *)&metrics, false);
+ if (ret)
+ return ret;
+
+ *value = metrics.AverageGfxActivity;
+
+ return 0;
+}
+
+static int smu_v11_0_thermal_get_temperature(struct smu_context *smu, uint32_t *value)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t temp = 0;
+
+ if (!value)
+ return -EINVAL;
+
+ temp = RREG32_SOC15(THM, 0, mmCG_MULT_THERMAL_STATUS);
+ temp = (temp & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
+ CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
+
+ temp = temp & 0x1ff;
+ temp *= SMU11_TEMPERATURE_UNITS_PER_CENTIGRADES;
+
+ *value = temp;
+
+ return 0;
+}
+
+static int smu_v11_0_get_gpu_power(struct smu_context *smu, uint32_t *value)
+{
+ int ret = 0;
+ SmuMetrics_t metrics;
+
+ if (!value)
+ return -EINVAL;
+
+ ret = smu_update_table(smu, TABLE_SMU_METRICS, (void *)&metrics, false);
+ if (ret)
+ return ret;
+
+ *value = metrics.CurrSocketPower << 8;
+
+ return 0;
+}
+
+static uint16_t convert_to_vddc(uint8_t vid)
+{
+ return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE);
+}
+
+static int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t vdd = 0, val_vid = 0;
+
+ if (!value)
+ return -EINVAL;
+ val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) &
+ SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >>
+ SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT;
+
+ vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid);
+
+ *value = vdd;
+
+ return 0;
+
+}
+
+static int smu_v11_0_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data, uint32_t *size)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+ int ret = 0;
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ ret = smu_v11_0_get_current_activity_percent(smu,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GFX_MCLK:
+ ret = smu_get_current_clk_freq(smu, PPCLK_UCLK, (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GFX_SCLK:
+ ret = smu_get_current_clk_freq(smu, PPCLK_GFXCLK, (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_TEMP:
+ ret = smu_v11_0_thermal_get_temperature(smu, (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_POWER:
+ ret = smu_v11_0_get_gpu_power(smu, (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_VDDGFX:
+ ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_UVD_POWER:
+ *(uint32_t *)data = smu_feature_is_enabled(smu, FEATURE_DPM_UVD_BIT) ? 1 : 0;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_VCE_POWER:
+ *(uint32_t *)data = smu_feature_is_enabled(smu, FEATURE_DPM_VCE_BIT) ? 1 : 0;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
+ *(uint32_t *)data = 0;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
+ *(uint32_t *)data = pptable->FanMaximumRpm;
+ *size = 4;
+ break;
+ default:
+ ret = smu_common_read_sensor(smu, sensor, data, size);
+ break;
+ }
+
+ if (ret)
+ *size = 0;
+
+ return ret;
+}
+
+static int
+smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
+ struct pp_display_clock_request
+ *clock_req)
+{
+ enum amd_pp_clock_type clk_type = clock_req->clock_type;
+ int ret = 0;
+ PPCLK_e clk_select = 0;
+ uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
+ switch (clk_type) {
+ case amd_pp_dcef_clock:
+ clk_select = PPCLK_DCEFCLK;
+ break;
+ case amd_pp_disp_clock:
+ clk_select = PPCLK_DISPCLK;
+ break;
+ case amd_pp_pixel_clock:
+ clk_select = PPCLK_PIXCLK;
+ break;
+ case amd_pp_phy_clock:
+ clk_select = PPCLK_PHYCLK;
+ break;
+ default:
+ pr_info("[%s] Invalid Clock Type!", __func__);
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ goto failed;
+
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
+ (clk_select << 16) | clk_freq);
+ }
+
+failed:
+ return ret;
+}
+
+static int smu_v11_0_set_watermarks_table(struct smu_context *smu,
+ Watermarks_t *table, struct
+ dm_pp_wm_sets_with_clock_ranges_soc15
+ *clock_ranges)
+{
+ int i;
+
+ if (!table || !clock_ranges)
+ return -EINVAL;
+
+ if (clock_ranges->num_wm_dmif_sets > 4 ||
+ clock_ranges->num_wm_mcif_sets > 4)
+ return -EINVAL;
+
+ for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) {
+ table->WatermarkRow[1][i].MinClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
+ 1000));
+ table->WatermarkRow[1][i].MaxClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
+ 1000));
+ table->WatermarkRow[1][i].MinUclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
+ 1000));
+ table->WatermarkRow[1][i].MaxUclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
+ 1000));
+ table->WatermarkRow[1][i].WmSetting = (uint8_t)
+ clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
+ }
+
+ for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) {
+ table->WatermarkRow[0][i].MinClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
+ 1000));
+ table->WatermarkRow[0][i].MaxClock =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
+ 1000));
+ table->WatermarkRow[0][i].MinUclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
+ 1000));
+ table->WatermarkRow[0][i].MaxUclk =
+ cpu_to_le16((uint16_t)
+ (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
+ 1000));
+ table->WatermarkRow[0][i].WmSetting = (uint8_t)
+ clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
+ }
+
+ return 0;
+}
+
+static int
+smu_v11_0_set_watermarks_for_clock_ranges(struct smu_context *smu, struct
+ dm_pp_wm_sets_with_clock_ranges_soc15
+ *clock_ranges)
+{
+ int ret = 0;
+ struct smu_table *watermarks = &smu->smu_table.tables[TABLE_WATERMARKS];
+ Watermarks_t *table = watermarks->cpu_addr;
+
+ if (!smu->disable_watermark &&
+ smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT) &&
+ smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT)) {
+ smu_v11_0_set_watermarks_table(smu, table, clock_ranges);
+ smu->watermarks_bitmap |= WATERMARKS_EXIST;
+ smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
+ }
+
+ return ret;
+}
+
+static int smu_v11_0_get_clock_ranges(struct smu_context *smu,
+ uint32_t *clock,
+ PPCLK_e clock_select,
+ bool max)
+{
+ int ret;
+ *clock = 0;
+ if (max) {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
+ (clock_select << 16));
+ if (ret) {
+ pr_err("[GetClockRanges] Failed to get max clock from SMC!\n");
+ return ret;
+ }
+ smu_read_smc_arg(smu, clock);
+ } else {
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq,
+ (clock_select << 16));
+ if (ret) {
+ pr_err("[GetClockRanges] Failed to get min clock from SMC!\n");
+ return ret;
+ }
+ smu_read_smc_arg(smu, clock);
+ }
+
+ return 0;
+}
+
+static uint32_t smu_v11_0_dpm_get_sclk(struct smu_context *smu, bool low)
+{
+ uint32_t gfx_clk;
+ int ret;
+
+ if (!smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT)) {
+ pr_err("[GetSclks]: gfxclk dpm not enabled!\n");
+ return -EPERM;
+ }
+
+ if (low) {
+ ret = smu_v11_0_get_clock_ranges(smu, &gfx_clk, PPCLK_GFXCLK, false);
+ if (ret) {
+ pr_err("[GetSclks]: fail to get min PPCLK_GFXCLK\n");
+ return ret;
+ }
+ } else {
+ ret = smu_v11_0_get_clock_ranges(smu, &gfx_clk, PPCLK_GFXCLK, true);
+ if (ret) {
+ pr_err("[GetSclks]: fail to get max PPCLK_GFXCLK\n");
+ return ret;
+ }
+ }
+
+ return (gfx_clk * 100);
+}
+
+static uint32_t smu_v11_0_dpm_get_mclk(struct smu_context *smu, bool low)
+{
+ uint32_t mem_clk;
+ int ret;
+
+ if (!smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
+ pr_err("[GetMclks]: memclk dpm not enabled!\n");
+ return -EPERM;
+ }
+
+ if (low) {
+ ret = smu_v11_0_get_clock_ranges(smu, &mem_clk, PPCLK_UCLK, false);
+ if (ret) {
+ pr_err("[GetMclks]: fail to get min PPCLK_UCLK\n");
+ return ret;
+ }
+ } else {
+ ret = smu_v11_0_get_clock_ranges(smu, &mem_clk, PPCLK_GFXCLK, true);
+ if (ret) {
+ pr_err("[GetMclks]: fail to get max PPCLK_UCLK\n");
+ return ret;
+ }
+ }
+
+ return (mem_clk * 100);
+}
+
+static int smu_v11_0_set_od8_default_settings(struct smu_context *smu,
+ bool initialize)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ int ret;
+
+ if (initialize) {
+ if (table_context->overdrive_table)
+ return -EINVAL;
+
+ table_context->overdrive_table = kzalloc(sizeof(OverDriveTable_t), GFP_KERNEL);
+
+ if (!table_context->overdrive_table)
+ return -ENOMEM;
+
+ ret = smu_update_table(smu, TABLE_OVERDRIVE, table_context->overdrive_table, false);
+ if (ret) {
+ pr_err("Failed to export over drive table!\n");
+ return ret;
+ }
+
+ smu_set_default_od8_settings(smu);
+ }
+
+ ret = smu_update_table(smu, TABLE_OVERDRIVE, table_context->overdrive_table, true);
+ if (ret) {
+ pr_err("Failed to import over drive table!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int smu_v11_0_conv_power_profile_to_pplib_workload(int power_profile)
+{
+ int pplib_workload = 0;
+
+ switch (power_profile) {
+ case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT:
+ pplib_workload = WORKLOAD_DEFAULT_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_FULLSCREEN3D:
+ pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_POWERSAVING:
+ pplib_workload = WORKLOAD_PPLIB_POWER_SAVING_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_VIDEO:
+ pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_VR:
+ pplib_workload = WORKLOAD_PPLIB_VR_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_COMPUTE:
+ pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT;
+ break;
+ case PP_SMC_POWER_PROFILE_CUSTOM:
+ pplib_workload = WORKLOAD_PPLIB_CUSTOM_BIT;
+ break;
+ }
+
+ return pplib_workload;
+}
+
+static int smu_v11_0_get_power_profile_mode(struct smu_context *smu, char *buf)
+{
+ DpmActivityMonitorCoeffInt_t activity_monitor;
+ uint32_t i, size = 0;
+ uint16_t workload_type = 0;
+ static const char *profile_name[] = {
+ "BOOTUP_DEFAULT",
+ "3D_FULL_SCREEN",
+ "POWER_SAVING",
+ "VIDEO",
+ "VR",
+ "COMPUTE",
+ "CUSTOM"};
+ static const char *title[] = {
+ "PROFILE_INDEX(NAME)",
+ "CLOCK_TYPE(NAME)",
+ "FPS",
+ "UseRlcBusy",
+ "MinActiveFreqType",
+ "MinActiveFreq",
+ "BoosterFreqType",
+ "BoosterFreq",
+ "PD_Data_limit_c",
+ "PD_Data_error_coeff",
+ "PD_Data_error_rate_coeff"};
+ int result = 0;
+
+ if (!buf)
+ return -EINVAL;
+
+ size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n",
+ title[0], title[1], title[2], title[3], title[4], title[5],
+ title[6], title[7], title[8], title[9], title[10]);
+
+ for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) {
+ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+ workload_type = smu_v11_0_conv_power_profile_to_pplib_workload(i);
+ result = smu_update_table_with_arg(smu, TABLE_ACTIVITY_MONITOR_COEFF,
+ workload_type, &activity_monitor, false);
+ if (result) {
+ pr_err("[%s] Failed to get activity monitor!", __func__);
+ return result;
+ }
+
+ size += sprintf(buf + size, "%2d %14s%s:\n",
+ i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
+
+ size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ " ",
+ 0,
+ "GFXCLK",
+ activity_monitor.Gfx_FPS,
+ activity_monitor.Gfx_UseRlcBusy,
+ activity_monitor.Gfx_MinActiveFreqType,
+ activity_monitor.Gfx_MinActiveFreq,
+ activity_monitor.Gfx_BoosterFreqType,
+ activity_monitor.Gfx_BoosterFreq,
+ activity_monitor.Gfx_PD_Data_limit_c,
+ activity_monitor.Gfx_PD_Data_error_coeff,
+ activity_monitor.Gfx_PD_Data_error_rate_coeff);
+
+ size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ " ",
+ 1,
+ "SOCCLK",
+ activity_monitor.Soc_FPS,
+ activity_monitor.Soc_UseRlcBusy,
+ activity_monitor.Soc_MinActiveFreqType,
+ activity_monitor.Soc_MinActiveFreq,
+ activity_monitor.Soc_BoosterFreqType,
+ activity_monitor.Soc_BoosterFreq,
+ activity_monitor.Soc_PD_Data_limit_c,
+ activity_monitor.Soc_PD_Data_error_coeff,
+ activity_monitor.Soc_PD_Data_error_rate_coeff);
+
+ size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ " ",
+ 2,
+ "UCLK",
+ activity_monitor.Mem_FPS,
+ activity_monitor.Mem_UseRlcBusy,
+ activity_monitor.Mem_MinActiveFreqType,
+ activity_monitor.Mem_MinActiveFreq,
+ activity_monitor.Mem_BoosterFreqType,
+ activity_monitor.Mem_BoosterFreq,
+ activity_monitor.Mem_PD_Data_limit_c,
+ activity_monitor.Mem_PD_Data_error_coeff,
+ activity_monitor.Mem_PD_Data_error_rate_coeff);
+
+ size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
+ " ",
+ 3,
+ "FCLK",
+ activity_monitor.Fclk_FPS,
+ activity_monitor.Fclk_UseRlcBusy,
+ activity_monitor.Fclk_MinActiveFreqType,
+ activity_monitor.Fclk_MinActiveFreq,
+ activity_monitor.Fclk_BoosterFreqType,
+ activity_monitor.Fclk_BoosterFreq,
+ activity_monitor.Fclk_PD_Data_limit_c,
+ activity_monitor.Fclk_PD_Data_error_coeff,
+ activity_monitor.Fclk_PD_Data_error_rate_coeff);
+ }
+
+ return size;
+}
+
+static int smu_v11_0_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
+{
+ DpmActivityMonitorCoeffInt_t activity_monitor;
+ int workload_type = 0, ret = 0;
+
+ smu->power_profile_mode = input[size];
+
+ if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
+ pr_err("Invalid power profile mode %d\n", smu->power_profile_mode);
+ return -EINVAL;
+ }
+
+ if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
+ ret = smu_update_table_with_arg(smu, TABLE_ACTIVITY_MONITOR_COEFF,
+ WORKLOAD_PPLIB_CUSTOM_BIT, &activity_monitor, false);
+ if (ret) {
+ pr_err("[%s] Failed to get activity monitor!", __func__);
+ return ret;
+ }
+
+ switch (input[0]) {
+ case 0: /* Gfxclk */
+ activity_monitor.Gfx_FPS = input[1];
+ activity_monitor.Gfx_UseRlcBusy = input[2];
+ activity_monitor.Gfx_MinActiveFreqType = input[3];
+ activity_monitor.Gfx_MinActiveFreq = input[4];
+ activity_monitor.Gfx_BoosterFreqType = input[5];
+ activity_monitor.Gfx_BoosterFreq = input[6];
+ activity_monitor.Gfx_PD_Data_limit_c = input[7];
+ activity_monitor.Gfx_PD_Data_error_coeff = input[8];
+ activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
+ break;
+ case 1: /* Socclk */
+ activity_monitor.Soc_FPS = input[1];
+ activity_monitor.Soc_UseRlcBusy = input[2];
+ activity_monitor.Soc_MinActiveFreqType = input[3];
+ activity_monitor.Soc_MinActiveFreq = input[4];
+ activity_monitor.Soc_BoosterFreqType = input[5];
+ activity_monitor.Soc_BoosterFreq = input[6];
+ activity_monitor.Soc_PD_Data_limit_c = input[7];
+ activity_monitor.Soc_PD_Data_error_coeff = input[8];
+ activity_monitor.Soc_PD_Data_error_rate_coeff = input[9];
+ break;
+ case 2: /* Uclk */
+ activity_monitor.Mem_FPS = input[1];
+ activity_monitor.Mem_UseRlcBusy = input[2];
+ activity_monitor.Mem_MinActiveFreqType = input[3];
+ activity_monitor.Mem_MinActiveFreq = input[4];
+ activity_monitor.Mem_BoosterFreqType = input[5];
+ activity_monitor.Mem_BoosterFreq = input[6];
+ activity_monitor.Mem_PD_Data_limit_c = input[7];
+ activity_monitor.Mem_PD_Data_error_coeff = input[8];
+ activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
+ break;
+ case 3: /* Fclk */
+ activity_monitor.Fclk_FPS = input[1];
+ activity_monitor.Fclk_UseRlcBusy = input[2];
+ activity_monitor.Fclk_MinActiveFreqType = input[3];
+ activity_monitor.Fclk_MinActiveFreq = input[4];
+ activity_monitor.Fclk_BoosterFreqType = input[5];
+ activity_monitor.Fclk_BoosterFreq = input[6];
+ activity_monitor.Fclk_PD_Data_limit_c = input[7];
+ activity_monitor.Fclk_PD_Data_error_coeff = input[8];
+ activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9];
+ break;
+ }
+
+ ret = smu_update_table_with_arg(smu, TABLE_ACTIVITY_MONITOR_COEFF,
+ WORKLOAD_PPLIB_COMPUTE_BIT, &activity_monitor, true);
+ if (ret) {
+ pr_err("[%s] Failed to set activity monitor!", __func__);
+ return ret;
+ }
+ }
+
+ /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
+ workload_type =
+ smu_v11_0_conv_power_profile_to_pplib_workload(smu->power_profile_mode);
+ smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
+ 1 << workload_type);
+
+ return ret;
+}
+
+static int smu_v11_0_update_od8_settings(struct smu_context *smu,
+ uint32_t index,
+ uint32_t value)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ int ret;
+
+ ret = smu_update_table(smu, TABLE_OVERDRIVE,
+ table_context->overdrive_table, false);
+ if (ret) {
+ pr_err("Failed to export over drive table!\n");
+ return ret;
+ }
+
+ smu_update_specified_od8_value(smu, index, value);
+
+ ret = smu_update_table(smu, TABLE_OVERDRIVE,
+ table_context->overdrive_table, true);
+ if (ret) {
+ pr_err("Failed to import over drive table!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int smu_v11_0_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
+{
+ if (!smu_feature_is_supported(smu, FEATURE_DPM_VCE_BIT))
+ return 0;
+
+ if (enable == smu_feature_is_enabled(smu, FEATURE_DPM_VCE_BIT))
+ return 0;
+
+ return smu_feature_set_enabled(smu, FEATURE_DPM_VCE_BIT, enable);
+}
+
+static int smu_v11_0_dpm_set_vce_enable(struct smu_context *smu, bool enable)
+{
+ if (!smu_feature_is_supported(smu, FEATURE_DPM_UVD_BIT))
+ return 0;
+
+ if (enable == smu_feature_is_enabled(smu, FEATURE_DPM_UVD_BIT))
+ return 0;
+
+ return smu_feature_set_enabled(smu, FEATURE_DPM_UVD_BIT, enable);
+}
+
+static int smu_v11_0_get_current_rpm(struct smu_context *smu,
+ uint32_t *current_rpm)
+{
+ int ret;
+
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm);
+
+ if (ret) {
+ pr_err("Attempt to get current RPM from SMC Failed!\n");
+ return ret;
+ }
+
+ smu_read_smc_arg(smu, current_rpm);
+
+ return 0;
+}
+
+static uint32_t
+smu_v11_0_get_fan_control_mode(struct smu_context *smu)
+{
+ if (!smu_feature_is_enabled(smu, FEATURE_FAN_CONTROL_BIT))
+ return AMD_FAN_CTRL_MANUAL;
+ else
+ return AMD_FAN_CTRL_AUTO;
+}
+
+static int
+smu_v11_0_get_fan_speed_percent(struct smu_context *smu,
+ uint32_t *speed)
+{
+ int ret = 0;
+ uint32_t percent = 0;
+ uint32_t current_rpm;
+ PPTable_t *pptable = smu->smu_table.driver_pptable;
+
+ ret = smu_v11_0_get_current_rpm(smu, &current_rpm);
+ percent = current_rpm * 100 / pptable->FanMaximumRpm;
+ *speed = percent > 100 ? 100 : percent;
+
+ return ret;
+}
+
+static int
+smu_v11_0_smc_fan_control(struct smu_context *smu, bool start)
+{
+ int ret = 0;
+
+ if (smu_feature_is_supported(smu, FEATURE_FAN_CONTROL_BIT))
+ return 0;
+
+ ret = smu_feature_set_enabled(smu, FEATURE_FAN_CONTROL_BIT, start);
+ if (ret)
+ pr_err("[%s]%s smc FAN CONTROL feature failed!",
+ __func__, (start ? "Start" : "Stop"));
+
+ return ret;
+}
+
+static int
+smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
+ CG_FDO_CTRL2, TMIN, 0));
+ WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
+ CG_FDO_CTRL2, FDO_PWM_MODE, mode));
+
+ return 0;
+}
+
+static int
+smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t duty100;
+ uint32_t duty;
+ uint64_t tmp64;
+ bool stop = 0;
+
+ if (speed > 100)
+ speed = 100;
+
+ if (smu_v11_0_smc_fan_control(smu, stop))
+ return -EINVAL;
+ duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
+ CG_FDO_CTRL1, FMAX_DUTY100);
+ if (!duty100)
+ return -EINVAL;
+
+ tmp64 = (uint64_t)speed * duty100;
+ do_div(tmp64, 100);
+ duty = (uint32_t)tmp64;
+
+ WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
+ CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
+
+ return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
+}
+
+static int
+smu_v11_0_set_fan_control_mode(struct smu_context *smu,
+ uint32_t mode)
+{
+ int ret = 0;
+ bool start = 1;
+ bool stop = 0;
+
+ switch (mode) {
+ case AMD_FAN_CTRL_NONE:
+ ret = smu_v11_0_set_fan_speed_percent(smu, 100);
+ break;
+ case AMD_FAN_CTRL_MANUAL:
+ ret = smu_v11_0_smc_fan_control(smu, stop);
+ break;
+ case AMD_FAN_CTRL_AUTO:
+ ret = smu_v11_0_smc_fan_control(smu, start);
+ break;
+ default:
+ break;
+ }
+
+ if (ret) {
+ pr_err("[%s]Set fan control mode failed!", __func__);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
+ uint32_t speed)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret;
+ uint32_t tach_period, crystal_clock_freq;
+ bool stop = 0;
+
+ if (!speed)
+ return -EINVAL;
+
+ mutex_lock(&(smu->mutex));
+ ret = smu_v11_0_smc_fan_control(smu, stop);
+ if (ret)
+ goto set_fan_speed_rpm_failed;
+
+ crystal_clock_freq = amdgpu_asic_get_xclk(adev);
+ tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
+ WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
+ REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
+ CG_TACH_CTRL, TARGET_PERIOD,
+ tach_period));
+
+ ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
+
+set_fan_speed_rpm_failed:
+ mutex_unlock(&(smu->mutex));
+ return ret;
+}
+
+static int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
+ uint32_t pstate)
+{
+ int ret = 0;
+ mutex_lock(&(smu->mutex));
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetXgmiMode,
+ pstate ? XGMI_STATE_D0 : XGMI_STATE_D3);
+ mutex_unlock(&(smu->mutex));
+ return ret;
+}
+
+static const struct smu_funcs smu_v11_0_funcs = {
+ .init_microcode = smu_v11_0_init_microcode,
+ .load_microcode = smu_v11_0_load_microcode,
+ .check_fw_status = smu_v11_0_check_fw_status,
+ .check_fw_version = smu_v11_0_check_fw_version,
+ .send_smc_msg = smu_v11_0_send_msg,
+ .send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
+ .read_smc_arg = smu_v11_0_read_arg,
+ .read_pptable_from_vbios = smu_v11_0_read_pptable_from_vbios,
+ .init_smc_tables = smu_v11_0_init_smc_tables,
+ .fini_smc_tables = smu_v11_0_fini_smc_tables,
+ .init_power = smu_v11_0_init_power,
+ .fini_power = smu_v11_0_fini_power,
+ .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
+ .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios,
+ .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
+ .check_pptable = smu_v11_0_check_pptable,
+ .parse_pptable = smu_v11_0_parse_pptable,
+ .populate_smc_pptable = smu_v11_0_populate_smc_pptable,
+ .write_pptable = smu_v11_0_write_pptable,
+ .write_watermarks_table = smu_v11_0_write_watermarks_table,
+ .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep,
+ .set_tool_table_location = smu_v11_0_set_tool_table_location,
+ .init_display = smu_v11_0_init_display,
+ .set_allowed_mask = smu_v11_0_set_allowed_mask,
+ .get_enabled_mask = smu_v11_0_get_enabled_mask,
+ .is_dpm_running = smu_v11_0_is_dpm_running,
+ .system_features_control = smu_v11_0_system_features_control,
+ .update_feature_enable_state = smu_v11_0_update_feature_enable_state,
+ .notify_display_change = smu_v11_0_notify_display_change,
+ .get_power_limit = smu_v11_0_get_power_limit,
+ .set_power_limit = smu_v11_0_set_power_limit,
+ .get_current_clk_freq = smu_v11_0_get_current_clk_freq,
+ .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks,
+ .start_thermal_control = smu_v11_0_start_thermal_control,
+ .read_sensor = smu_v11_0_read_sensor,
+ .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk,
+ .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request,
+ .set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges,
+ .get_sclk = smu_v11_0_dpm_get_sclk,
+ .get_mclk = smu_v11_0_dpm_get_mclk,
+ .set_od8_default_settings = smu_v11_0_set_od8_default_settings,
+ .conv_power_profile_to_pplib_workload = smu_v11_0_conv_power_profile_to_pplib_workload,
+ .get_power_profile_mode = smu_v11_0_get_power_profile_mode,
+ .set_power_profile_mode = smu_v11_0_set_power_profile_mode,
+ .update_od8_settings = smu_v11_0_update_od8_settings,
+ .dpm_set_uvd_enable = smu_v11_0_dpm_set_uvd_enable,
+ .dpm_set_vce_enable = smu_v11_0_dpm_set_vce_enable,
+ .get_current_rpm = smu_v11_0_get_current_rpm,
+ .get_fan_control_mode = smu_v11_0_get_fan_control_mode,
+ .set_fan_control_mode = smu_v11_0_set_fan_control_mode,
+ .get_fan_speed_percent = smu_v11_0_get_fan_speed_percent,
+ .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent,
+ .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm,
+ .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate,
+};
+
+void smu_v11_0_set_smu_funcs(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ smu->funcs = &smu_v11_0_funcs;
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ vega20_set_ppt_funcs(smu);
+ break;
+ default:
+ pr_warn("Unknown asic for smu11\n");
+ }
+}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
index d111dd4e03d7..6d11076a79ba 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
@@ -212,6 +212,10 @@ static int smu10_start_smu(struct pp_hwmgr *hwmgr)
hwmgr->smu_version = smu10_read_arg_from_smc(hwmgr);
adev->pm.fw_version = hwmgr->smu_version >> 8;
+ if (adev->rev_id < 0x8 && adev->pdev->device != 0x15d8 &&
+ adev->pm.fw_version < 0x1e45)
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+
if (smu10_verify_smc_interface(hwmgr))
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
index ba00744c3413..f301a73f6df1 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
@@ -367,6 +367,26 @@ static int vega20_set_tools_address(struct pp_hwmgr *hwmgr)
return ret;
}
+int vega20_set_pptable_driver_address(struct pp_hwmgr *hwmgr)
+{
+ struct vega20_smumgr *priv =
+ (struct vega20_smumgr *)(hwmgr->smu_backend);
+ int ret = 0;
+
+ PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetDriverDramAddrHigh,
+ upper_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr))) == 0,
+ "[SetPPtabeDriverAddress] Attempt to Set Dram Addr High Failed!",
+ return ret);
+ PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetDriverDramAddrLow,
+ lower_32_bits(priv->smu_tables.entry[TABLE_PPTABLE].mc_addr))) == 0,
+ "[SetPPtabeDriverAddress] Attempt to Set Dram Addr Low Failed!",
+ return ret);
+
+ return ret;
+}
+
static int vega20_smu_init(struct pp_hwmgr *hwmgr)
{
struct vega20_smumgr *priv;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h
index 77349c3f0162..ec953ab13e87 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.h
@@ -55,6 +55,7 @@ int vega20_set_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
uint8_t *table, uint16_t workload_type);
int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr,
uint8_t *table, uint16_t workload_type);
+int vega20_set_pptable_driver_address(struct pp_hwmgr *hwmgr);
#endif
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
new file mode 100644
index 000000000000..8fafcbdb1dfd
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -0,0 +1,2413 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "pp_debug.h"
+#include <linux/firmware.h>
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "atomfirmware.h"
+#include "amdgpu_atomfirmware.h"
+#include "smu_v11_0.h"
+#include "smu11_driver_if.h"
+#include "soc15_common.h"
+#include "atom.h"
+#include "power_state.h"
+#include "vega20_ppt.h"
+#include "vega20_pptable.h"
+#include "vega20_ppsmc.h"
+#include "nbio/nbio_7_4_sh_mask.h"
+
+#define smnPCIE_LC_SPEED_CNTL 0x11140290
+#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
+
+#define MSG_MAP(msg) \
+ [SMU_MSG_##msg] = PPSMC_MSG_##msg
+
+static int vega20_message_map[SMU_MSG_MAX_COUNT] = {
+ MSG_MAP(TestMessage),
+ MSG_MAP(GetSmuVersion),
+ MSG_MAP(GetDriverIfVersion),
+ MSG_MAP(SetAllowedFeaturesMaskLow),
+ MSG_MAP(SetAllowedFeaturesMaskHigh),
+ MSG_MAP(EnableAllSmuFeatures),
+ MSG_MAP(DisableAllSmuFeatures),
+ MSG_MAP(EnableSmuFeaturesLow),
+ MSG_MAP(EnableSmuFeaturesHigh),
+ MSG_MAP(DisableSmuFeaturesLow),
+ MSG_MAP(DisableSmuFeaturesHigh),
+ MSG_MAP(GetEnabledSmuFeaturesLow),
+ MSG_MAP(GetEnabledSmuFeaturesHigh),
+ MSG_MAP(SetWorkloadMask),
+ MSG_MAP(SetPptLimit),
+ MSG_MAP(SetDriverDramAddrHigh),
+ MSG_MAP(SetDriverDramAddrLow),
+ MSG_MAP(SetToolsDramAddrHigh),
+ MSG_MAP(SetToolsDramAddrLow),
+ MSG_MAP(TransferTableSmu2Dram),
+ MSG_MAP(TransferTableDram2Smu),
+ MSG_MAP(UseDefaultPPTable),
+ MSG_MAP(UseBackupPPTable),
+ MSG_MAP(RunBtc),
+ MSG_MAP(RequestI2CBus),
+ MSG_MAP(ReleaseI2CBus),
+ MSG_MAP(SetFloorSocVoltage),
+ MSG_MAP(SoftReset),
+ MSG_MAP(StartBacoMonitor),
+ MSG_MAP(CancelBacoMonitor),
+ MSG_MAP(EnterBaco),
+ MSG_MAP(SetSoftMinByFreq),
+ MSG_MAP(SetSoftMaxByFreq),
+ MSG_MAP(SetHardMinByFreq),
+ MSG_MAP(SetHardMaxByFreq),
+ MSG_MAP(GetMinDpmFreq),
+ MSG_MAP(GetMaxDpmFreq),
+ MSG_MAP(GetDpmFreqByIndex),
+ MSG_MAP(GetDpmClockFreq),
+ MSG_MAP(GetSsVoltageByDpm),
+ MSG_MAP(SetMemoryChannelConfig),
+ MSG_MAP(SetGeminiMode),
+ MSG_MAP(SetGeminiApertureHigh),
+ MSG_MAP(SetGeminiApertureLow),
+ MSG_MAP(SetMinLinkDpmByIndex),
+ MSG_MAP(OverridePcieParameters),
+ MSG_MAP(OverDriveSetPercentage),
+ MSG_MAP(SetMinDeepSleepDcefclk),
+ MSG_MAP(ReenableAcDcInterrupt),
+ MSG_MAP(NotifyPowerSource),
+ MSG_MAP(SetUclkFastSwitch),
+ MSG_MAP(SetUclkDownHyst),
+ MSG_MAP(GetCurrentRpm),
+ MSG_MAP(SetVideoFps),
+ MSG_MAP(SetTjMax),
+ MSG_MAP(SetFanTemperatureTarget),
+ MSG_MAP(PrepareMp1ForUnload),
+ MSG_MAP(DramLogSetDramAddrHigh),
+ MSG_MAP(DramLogSetDramAddrLow),
+ MSG_MAP(DramLogSetDramSize),
+ MSG_MAP(SetFanMaxRpm),
+ MSG_MAP(SetFanMinPwm),
+ MSG_MAP(ConfigureGfxDidt),
+ MSG_MAP(NumOfDisplays),
+ MSG_MAP(RemoveMargins),
+ MSG_MAP(ReadSerialNumTop32),
+ MSG_MAP(ReadSerialNumBottom32),
+ MSG_MAP(SetSystemVirtualDramAddrHigh),
+ MSG_MAP(SetSystemVirtualDramAddrLow),
+ MSG_MAP(WaflTest),
+ MSG_MAP(SetFclkGfxClkRatio),
+ MSG_MAP(AllowGfxOff),
+ MSG_MAP(DisallowGfxOff),
+ MSG_MAP(GetPptLimit),
+ MSG_MAP(GetDcModeMaxDpmFreq),
+ MSG_MAP(GetDebugData),
+ MSG_MAP(SetXgmiMode),
+ MSG_MAP(RunAfllBtc),
+ MSG_MAP(ExitBaco),
+ MSG_MAP(PrepareMp1ForReset),
+ MSG_MAP(PrepareMp1ForShutdown),
+ MSG_MAP(SetMGpuFanBoostLimitRpm),
+ MSG_MAP(GetAVFSVoltageByDpm),
+};
+
+static int vega20_get_smu_msg_index(struct smu_context *smc, uint32_t index)
+{
+ int val;
+
+ if (index >= SMU_MSG_MAX_COUNT)
+ return -EINVAL;
+
+ val = vega20_message_map[index];
+ if (val > PPSMC_Message_Count)
+ return -EINVAL;
+
+ return val;
+}
+
+static int vega20_allocate_dpm_context(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+
+ if (smu_dpm->dpm_context)
+ return -EINVAL;
+
+ smu_dpm->dpm_context = kzalloc(sizeof(struct vega20_dpm_table),
+ GFP_KERNEL);
+ if (!smu_dpm->dpm_context)
+ return -ENOMEM;
+
+ if (smu_dpm->golden_dpm_context)
+ return -EINVAL;
+
+ smu_dpm->golden_dpm_context = kzalloc(sizeof(struct vega20_dpm_table),
+ GFP_KERNEL);
+ if (!smu_dpm->golden_dpm_context)
+ return -ENOMEM;
+
+ smu_dpm->dpm_context_size = sizeof(struct vega20_dpm_table);
+
+ smu_dpm->dpm_current_power_state = kzalloc(sizeof(struct smu_power_state),
+ GFP_KERNEL);
+ if (!smu_dpm->dpm_current_power_state)
+ return -ENOMEM;
+
+ smu_dpm->dpm_request_power_state = kzalloc(sizeof(struct smu_power_state),
+ GFP_KERNEL);
+ if (!smu_dpm->dpm_request_power_state)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int vega20_setup_od8_information(struct smu_context *smu)
+{
+ ATOM_Vega20_POWERPLAYTABLE *powerplay_table = NULL;
+ struct smu_table_context *table_context = &smu->smu_table;
+
+ uint32_t od_feature_count, od_feature_array_size,
+ od_setting_count, od_setting_array_size;
+
+ if (!table_context->power_play_table)
+ return -EINVAL;
+
+ powerplay_table = table_context->power_play_table;
+
+ if (powerplay_table->OverDrive8Table.ucODTableRevision == 1) {
+ /* Setup correct ODFeatureCount, and store ODFeatureArray from
+ * powerplay table to od_feature_capabilities */
+ od_feature_count =
+ (le32_to_cpu(powerplay_table->OverDrive8Table.ODFeatureCount) >
+ ATOM_VEGA20_ODFEATURE_COUNT) ?
+ ATOM_VEGA20_ODFEATURE_COUNT :
+ le32_to_cpu(powerplay_table->OverDrive8Table.ODFeatureCount);
+
+ od_feature_array_size = sizeof(uint8_t) * od_feature_count;
+
+ if (table_context->od_feature_capabilities)
+ return -EINVAL;
+
+ table_context->od_feature_capabilities = kmemdup(&powerplay_table->OverDrive8Table.ODFeatureCapabilities,
+ od_feature_array_size,
+ GFP_KERNEL);
+ if (!table_context->od_feature_capabilities)
+ return -ENOMEM;
+
+ /* Setup correct ODSettingCount, and store ODSettingArray from
+ * powerplay table to od_settings_max and od_setting_min */
+ od_setting_count =
+ (le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingCount) >
+ ATOM_VEGA20_ODSETTING_COUNT) ?
+ ATOM_VEGA20_ODSETTING_COUNT :
+ le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingCount);
+
+ od_setting_array_size = sizeof(uint32_t) * od_setting_count;
+
+ if (table_context->od_settings_max)
+ return -EINVAL;
+
+ table_context->od_settings_max = kmemdup(&powerplay_table->OverDrive8Table.ODSettingsMax,
+ od_setting_array_size,
+ GFP_KERNEL);
+
+ if (!table_context->od_settings_max) {
+ kfree(table_context->od_feature_capabilities);
+ table_context->od_feature_capabilities = NULL;
+ return -ENOMEM;
+ }
+
+ if (table_context->od_settings_min)
+ return -EINVAL;
+
+ table_context->od_settings_min = kmemdup(&powerplay_table->OverDrive8Table.ODSettingsMin,
+ od_setting_array_size,
+ GFP_KERNEL);
+
+ if (!table_context->od_settings_min) {
+ kfree(table_context->od_feature_capabilities);
+ table_context->od_feature_capabilities = NULL;
+ kfree(table_context->od_settings_max);
+ table_context->od_settings_max = NULL;
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
+static int vega20_store_powerplay_table(struct smu_context *smu)
+{
+ ATOM_Vega20_POWERPLAYTABLE *powerplay_table = NULL;
+ struct smu_table_context *table_context = &smu->smu_table;
+ int ret;
+
+ if (!table_context->power_play_table)
+ return -EINVAL;
+
+ powerplay_table = table_context->power_play_table;
+
+ memcpy(table_context->driver_pptable, &powerplay_table->smcPPTable,
+ sizeof(PPTable_t));
+
+ table_context->software_shutdown_temp = powerplay_table->usSoftwareShutdownTemp;
+ table_context->thermal_controller_type = powerplay_table->ucThermalControllerType;
+ table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]);
+
+ ret = vega20_setup_od8_information(smu);
+
+ return ret;
+}
+
+static int vega20_append_powerplay_table(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *smc_pptable = table_context->driver_pptable;
+ struct atom_smc_dpm_info_v4_4 *smc_dpm_table;
+ int index, i, ret;
+
+ index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
+ smc_dpm_info);
+
+ ret = smu_get_atom_data_table(smu, index, NULL, NULL, NULL,
+ (uint8_t **)&smc_dpm_table);
+ if (ret)
+ return ret;
+
+ smc_pptable->MaxVoltageStepGfx = smc_dpm_table->maxvoltagestepgfx;
+ smc_pptable->MaxVoltageStepSoc = smc_dpm_table->maxvoltagestepsoc;
+
+ smc_pptable->VddGfxVrMapping = smc_dpm_table->vddgfxvrmapping;
+ smc_pptable->VddSocVrMapping = smc_dpm_table->vddsocvrmapping;
+ smc_pptable->VddMem0VrMapping = smc_dpm_table->vddmem0vrmapping;
+ smc_pptable->VddMem1VrMapping = smc_dpm_table->vddmem1vrmapping;
+
+ smc_pptable->GfxUlvPhaseSheddingMask = smc_dpm_table->gfxulvphasesheddingmask;
+ smc_pptable->SocUlvPhaseSheddingMask = smc_dpm_table->soculvphasesheddingmask;
+ smc_pptable->ExternalSensorPresent = smc_dpm_table->externalsensorpresent;
+
+ smc_pptable->GfxMaxCurrent = smc_dpm_table->gfxmaxcurrent;
+ smc_pptable->GfxOffset = smc_dpm_table->gfxoffset;
+ smc_pptable->Padding_TelemetryGfx = smc_dpm_table->padding_telemetrygfx;
+
+ smc_pptable->SocMaxCurrent = smc_dpm_table->socmaxcurrent;
+ smc_pptable->SocOffset = smc_dpm_table->socoffset;
+ smc_pptable->Padding_TelemetrySoc = smc_dpm_table->padding_telemetrysoc;
+
+ smc_pptable->Mem0MaxCurrent = smc_dpm_table->mem0maxcurrent;
+ smc_pptable->Mem0Offset = smc_dpm_table->mem0offset;
+ smc_pptable->Padding_TelemetryMem0 = smc_dpm_table->padding_telemetrymem0;
+
+ smc_pptable->Mem1MaxCurrent = smc_dpm_table->mem1maxcurrent;
+ smc_pptable->Mem1Offset = smc_dpm_table->mem1offset;
+ smc_pptable->Padding_TelemetryMem1 = smc_dpm_table->padding_telemetrymem1;
+
+ smc_pptable->AcDcGpio = smc_dpm_table->acdcgpio;
+ smc_pptable->AcDcPolarity = smc_dpm_table->acdcpolarity;
+ smc_pptable->VR0HotGpio = smc_dpm_table->vr0hotgpio;
+ smc_pptable->VR0HotPolarity = smc_dpm_table->vr0hotpolarity;
+
+ smc_pptable->VR1HotGpio = smc_dpm_table->vr1hotgpio;
+ smc_pptable->VR1HotPolarity = smc_dpm_table->vr1hotpolarity;
+ smc_pptable->Padding1 = smc_dpm_table->padding1;
+ smc_pptable->Padding2 = smc_dpm_table->padding2;
+
+ smc_pptable->LedPin0 = smc_dpm_table->ledpin0;
+ smc_pptable->LedPin1 = smc_dpm_table->ledpin1;
+ smc_pptable->LedPin2 = smc_dpm_table->ledpin2;
+
+ smc_pptable->PllGfxclkSpreadEnabled = smc_dpm_table->pllgfxclkspreadenabled;
+ smc_pptable->PllGfxclkSpreadPercent = smc_dpm_table->pllgfxclkspreadpercent;
+ smc_pptable->PllGfxclkSpreadFreq = smc_dpm_table->pllgfxclkspreadfreq;
+
+ smc_pptable->UclkSpreadEnabled = 0;
+ smc_pptable->UclkSpreadPercent = smc_dpm_table->uclkspreadpercent;
+ smc_pptable->UclkSpreadFreq = smc_dpm_table->uclkspreadfreq;
+
+ smc_pptable->FclkSpreadEnabled = smc_dpm_table->fclkspreadenabled;
+ smc_pptable->FclkSpreadPercent = smc_dpm_table->fclkspreadpercent;
+ smc_pptable->FclkSpreadFreq = smc_dpm_table->fclkspreadfreq;
+
+ smc_pptable->FllGfxclkSpreadEnabled = smc_dpm_table->fllgfxclkspreadenabled;
+ smc_pptable->FllGfxclkSpreadPercent = smc_dpm_table->fllgfxclkspreadpercent;
+ smc_pptable->FllGfxclkSpreadFreq = smc_dpm_table->fllgfxclkspreadfreq;
+
+ for (i = 0; i < I2C_CONTROLLER_NAME_COUNT; i++) {
+ smc_pptable->I2cControllers[i].Enabled =
+ smc_dpm_table->i2ccontrollers[i].enabled;
+ smc_pptable->I2cControllers[i].SlaveAddress =
+ smc_dpm_table->i2ccontrollers[i].slaveaddress;
+ smc_pptable->I2cControllers[i].ControllerPort =
+ smc_dpm_table->i2ccontrollers[i].controllerport;
+ smc_pptable->I2cControllers[i].ThermalThrottler =
+ smc_dpm_table->i2ccontrollers[i].thermalthrottler;
+ smc_pptable->I2cControllers[i].I2cProtocol =
+ smc_dpm_table->i2ccontrollers[i].i2cprotocol;
+ smc_pptable->I2cControllers[i].I2cSpeed =
+ smc_dpm_table->i2ccontrollers[i].i2cspeed;
+ }
+
+ return 0;
+}
+
+static int vega20_check_powerplay_table(struct smu_context *smu)
+{
+ ATOM_Vega20_POWERPLAYTABLE *powerplay_table = NULL;
+ struct smu_table_context *table_context = &smu->smu_table;
+
+ powerplay_table = table_context->power_play_table;
+
+ if (powerplay_table->sHeader.format_revision < ATOM_VEGA20_TABLE_REVISION_VEGA20) {
+ pr_err("Unsupported PPTable format!");
+ return -EINVAL;
+ }
+
+ if (!powerplay_table->sHeader.structuresize) {
+ pr_err("Invalid PowerPlay Table!");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vega20_run_btc_afll(struct smu_context *smu)
+{
+ return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc);
+}
+
+static int
+vega20_get_unallowed_feature_mask(struct smu_context *smu,
+ uint32_t *feature_mask, uint32_t num)
+{
+ if (num > 2)
+ return -EINVAL;
+
+ feature_mask[0] = 0xE0041C00;
+ feature_mask[1] = 0xFFFFFFFE; /* bit32~bit63 is Unsupported */
+
+ return 0;
+}
+
+static enum
+amd_pm_state_type vega20_get_current_power_state(struct smu_context *smu)
+{
+ enum amd_pm_state_type pm_type;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+
+ if (!smu_dpm_ctx->dpm_context ||
+ !smu_dpm_ctx->dpm_current_power_state)
+ return -EINVAL;
+
+ mutex_lock(&(smu->mutex));
+ switch (smu_dpm_ctx->dpm_current_power_state->classification.ui_label) {
+ case SMU_STATE_UI_LABEL_BATTERY:
+ pm_type = POWER_STATE_TYPE_BATTERY;
+ break;
+ case SMU_STATE_UI_LABEL_BALLANCED:
+ pm_type = POWER_STATE_TYPE_BALANCED;
+ break;
+ case SMU_STATE_UI_LABEL_PERFORMANCE:
+ pm_type = POWER_STATE_TYPE_PERFORMANCE;
+ break;
+ default:
+ if (smu_dpm_ctx->dpm_current_power_state->classification.flags & SMU_STATE_CLASSIFICATION_FLAG_BOOT)
+ pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
+ else
+ pm_type = POWER_STATE_TYPE_DEFAULT;
+ break;
+ }
+ mutex_unlock(&(smu->mutex));
+
+ return pm_type;
+}
+
+static int
+vega20_set_single_dpm_table(struct smu_context *smu,
+ struct vega20_single_dpm_table *single_dpm_table,
+ PPCLK_e clk_id)
+{
+ int ret = 0;
+ uint32_t i, num_of_levels = 0, clk;
+
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_GetDpmFreqByIndex,
+ (clk_id << 16 | 0xFF));
+ if (ret) {
+ pr_err("[GetNumOfDpmLevel] failed to get dpm levels!");
+ return ret;
+ }
+
+ smu_read_smc_arg(smu, &num_of_levels);
+ if (!num_of_levels) {
+ pr_err("[GetNumOfDpmLevel] number of clk levels is invalid!");
+ return -EINVAL;
+ }
+
+ single_dpm_table->count = num_of_levels;
+
+ for (i = 0; i < num_of_levels; i++) {
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_GetDpmFreqByIndex,
+ (clk_id << 16 | i));
+ if (ret) {
+ pr_err("[GetDpmFreqByIndex] failed to get dpm freq by index!");
+ return ret;
+ }
+ smu_read_smc_arg(smu, &clk);
+ if (!clk) {
+ pr_err("[GetDpmFreqByIndex] clk value is invalid!");
+ return -EINVAL;
+ }
+ single_dpm_table->dpm_levels[i].value = clk;
+ single_dpm_table->dpm_levels[i].enabled = true;
+ }
+ return 0;
+}
+
+static void vega20_init_single_dpm_state(struct vega20_dpm_state *dpm_state)
+{
+ dpm_state->soft_min_level = 0x0;
+ dpm_state->soft_max_level = 0xffff;
+ dpm_state->hard_min_level = 0x0;
+ dpm_state->hard_max_level = 0xffff;
+}
+
+static int vega20_set_default_dpm_table(struct smu_context *smu)
+{
+ int ret;
+
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct vega20_dpm_table *dpm_table = NULL;
+ struct vega20_single_dpm_table *single_dpm_table;
+
+ dpm_table = smu_dpm->dpm_context;
+
+ /* socclk */
+ single_dpm_table = &(dpm_table->soc_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_SOCCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get socclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* gfxclk */
+ single_dpm_table = &(dpm_table->gfx_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_GFXCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get gfxclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* memclk */
+ single_dpm_table = &(dpm_table->mem_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_UCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get memclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* eclk */
+ single_dpm_table = &(dpm_table->eclk_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_VCE_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table, PPCLK_ECLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get eclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.eclk / 100;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* vclk */
+ single_dpm_table = &(dpm_table->vclk_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_UVD_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table, PPCLK_VCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get vclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* dclk */
+ single_dpm_table = &(dpm_table->dclk_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_UVD_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table, PPCLK_DCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get dclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* dcefclk */
+ single_dpm_table = &(dpm_table->dcef_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_DCEFCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get dcefclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dcefclk / 100;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* pixclk */
+ single_dpm_table = &(dpm_table->pixel_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_PIXCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get pixclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 0;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* dispclk */
+ single_dpm_table = &(dpm_table->display_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_DISPCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get dispclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 0;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* phyclk */
+ single_dpm_table = &(dpm_table->phy_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_PHYCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get phyclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 0;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ /* fclk */
+ single_dpm_table = &(dpm_table->fclk_table);
+
+ if (smu_feature_is_enabled(smu,FEATURE_DPM_FCLK_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_FCLK);
+ if (ret) {
+ pr_err("[SetupDefaultDpmTable] failed to get fclk dpm levels!");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 0;
+ }
+ vega20_init_single_dpm_state(&(single_dpm_table->dpm_state));
+
+ memcpy(smu_dpm->golden_dpm_context, dpm_table,
+ sizeof(struct vega20_dpm_table));
+
+ return 0;
+}
+
+static int vega20_populate_umd_state_clk(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct vega20_dpm_table *dpm_table = NULL;
+ struct vega20_single_dpm_table *gfx_table = NULL;
+ struct vega20_single_dpm_table *mem_table = NULL;
+
+ dpm_table = smu_dpm->dpm_context;
+ gfx_table = &(dpm_table->gfx_table);
+ mem_table = &(dpm_table->mem_table);
+
+ smu->pstate_sclk = gfx_table->dpm_levels[0].value;
+ smu->pstate_mclk = mem_table->dpm_levels[0].value;
+
+ if (gfx_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL &&
+ mem_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL) {
+ smu->pstate_sclk = gfx_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
+ smu->pstate_mclk = mem_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
+ }
+
+ smu->pstate_sclk = smu->pstate_sclk * 100;
+ smu->pstate_mclk = smu->pstate_mclk * 100;
+
+ return 0;
+}
+
+static int vega20_get_clk_table(struct smu_context *smu,
+ struct pp_clock_levels_with_latency *clocks,
+ struct vega20_single_dpm_table *dpm_table)
+{
+ int i, count;
+
+ count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
+ clocks->num_levels = count;
+
+ for (i = 0; i < count; i++) {
+ clocks->data[i].clocks_in_khz =
+ dpm_table->dpm_levels[i].value * 1000;
+ clocks->data[i].latency_in_us = 0;
+ }
+
+ return 0;
+}
+
+static int vega20_print_clk_levels(struct smu_context *smu,
+ enum pp_clock_type type, char *buf)
+{
+ int i, now, size = 0;
+ int ret = 0;
+ uint32_t gen_speed, lane_width;
+ struct amdgpu_device *adev = smu->adev;
+ struct pp_clock_levels_with_latency clocks;
+ struct vega20_single_dpm_table *single_dpm_table;
+ struct smu_table_context *table_context = &smu->smu_table;
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct vega20_dpm_table *dpm_table = NULL;
+ struct vega20_od8_settings *od8_settings =
+ (struct vega20_od8_settings *)table_context->od8_settings;
+ OverDriveTable_t *od_table =
+ (OverDriveTable_t *)(table_context->overdrive_table);
+ PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable;
+
+ dpm_table = smu_dpm->dpm_context;
+
+ switch (type) {
+ case PP_SCLK:
+ ret = smu_get_current_clk_freq(smu, PPCLK_GFXCLK, &now);
+ if (ret) {
+ pr_err("Attempt to get current gfx clk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_table->gfx_table);
+ ret = vega20_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ pr_err("Attempt to get gfx clk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n", i,
+ clocks.data[i].clocks_in_khz / 1000,
+ (clocks.data[i].clocks_in_khz == now * 10)
+ ? "*" : "");
+ break;
+
+ case PP_MCLK:
+ ret = smu_get_current_clk_freq(smu, PPCLK_UCLK, &now);
+ if (ret) {
+ pr_err("Attempt to get current mclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_table->mem_table);
+ ret = vega20_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ pr_err("Attempt to get memory clk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, clocks.data[i].clocks_in_khz / 1000,
+ (clocks.data[i].clocks_in_khz == now * 10)
+ ? "*" : "");
+ break;
+
+ case PP_SOCCLK:
+ ret = smu_get_current_clk_freq(smu, PPCLK_SOCCLK, &now);
+ if (ret) {
+ pr_err("Attempt to get current socclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_table->soc_table);
+ ret = vega20_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ pr_err("Attempt to get socclk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, clocks.data[i].clocks_in_khz / 1000,
+ (clocks.data[i].clocks_in_khz == now * 10)
+ ? "*" : "");
+ break;
+
+ case PP_FCLK:
+ ret = smu_get_current_clk_freq(smu, PPCLK_FCLK, &now);
+ if (ret) {
+ pr_err("Attempt to get current fclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_table->fclk_table);
+ for (i = 0; i < single_dpm_table->count; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, single_dpm_table->dpm_levels[i].value,
+ (single_dpm_table->dpm_levels[i].value == now / 100)
+ ? "*" : "");
+ break;
+
+ case PP_DCEFCLK:
+ ret = smu_get_current_clk_freq(smu, PPCLK_DCEFCLK, &now);
+ if (ret) {
+ pr_err("Attempt to get current dcefclk Failed!");
+ return ret;
+ }
+
+ single_dpm_table = &(dpm_table->dcef_table);
+ ret = vega20_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ pr_err("Attempt to get dcefclk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < clocks.num_levels; i++)
+ size += sprintf(buf + size, "%d: %uMhz %s\n",
+ i, clocks.data[i].clocks_in_khz / 1000,
+ (clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
+ break;
+
+ case PP_PCIE:
+ gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
+ PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
+ >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
+ lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
+ PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
+ >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
+ for (i = 0; i < NUM_LINK_LEVELS; i++)
+ size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
+ (pptable->PcieGenSpeed[i] == 0) ? "2.5GT/s," :
+ (pptable->PcieGenSpeed[i] == 1) ? "5.0GT/s," :
+ (pptable->PcieGenSpeed[i] == 2) ? "8.0GT/s," :
+ (pptable->PcieGenSpeed[i] == 3) ? "16.0GT/s," : "",
+ (pptable->PcieLaneCount[i] == 1) ? "x1" :
+ (pptable->PcieLaneCount[i] == 2) ? "x2" :
+ (pptable->PcieLaneCount[i] == 3) ? "x4" :
+ (pptable->PcieLaneCount[i] == 4) ? "x8" :
+ (pptable->PcieLaneCount[i] == 5) ? "x12" :
+ (pptable->PcieLaneCount[i] == 6) ? "x16" : "",
+ pptable->LclkFreq[i],
+ (gen_speed == pptable->PcieGenSpeed[i]) &&
+ (lane_width == pptable->PcieLaneCount[i]) ?
+ "*" : "");
+ break;
+
+ case OD_SCLK:
+ if (od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id) {
+ size = sprintf(buf, "%s:\n", "OD_SCLK");
+ size += sprintf(buf + size, "0: %10uMhz\n",
+ od_table->GfxclkFmin);
+ size += sprintf(buf + size, "1: %10uMhz\n",
+ od_table->GfxclkFmax);
+ }
+
+ break;
+
+ case OD_MCLK:
+ if (od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id) {
+ size = sprintf(buf, "%s:\n", "OD_MCLK");
+ size += sprintf(buf + size, "1: %10uMhz\n",
+ od_table->UclkFmax);
+ }
+
+ break;
+
+ case OD_VDDC_CURVE:
+ if (od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
+ size = sprintf(buf, "%s:\n", "OD_VDDC_CURVE");
+ size += sprintf(buf + size, "0: %10uMhz %10dmV\n",
+ od_table->GfxclkFreq1,
+ od_table->GfxclkVolt1 / VOLTAGE_SCALE);
+ size += sprintf(buf + size, "1: %10uMhz %10dmV\n",
+ od_table->GfxclkFreq2,
+ od_table->GfxclkVolt2 / VOLTAGE_SCALE);
+ size += sprintf(buf + size, "2: %10uMhz %10dmV\n",
+ od_table->GfxclkFreq3,
+ od_table->GfxclkVolt3 / VOLTAGE_SCALE);
+ }
+
+ break;
+
+ case OD_RANGE:
+ size = sprintf(buf, "%s:\n", "OD_RANGE");
+
+ if (od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id) {
+ size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].min_value,
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].max_value);
+ }
+
+ if (od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id) {
+ single_dpm_table = &(dpm_table->mem_table);
+ ret = vega20_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ pr_err("Attempt to get memory clk levels Failed!");
+ return ret;
+ }
+
+ size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
+ clocks.data[0].clocks_in_khz / 1000,
+ od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].max_value);
+ }
+
+ if (od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
+ size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].min_value,
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].max_value);
+ size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].min_value,
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].max_value);
+ size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].min_value,
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].max_value);
+ size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].min_value,
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].max_value);
+ size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].min_value,
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].max_value);
+ size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].min_value,
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].max_value);
+ }
+
+ break;
+
+ default:
+ break;
+ }
+ return size;
+}
+
+static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
+ uint32_t feature_mask)
+{
+ struct vega20_dpm_table *dpm_table;
+ struct vega20_single_dpm_table *single_dpm_table;
+ uint32_t freq;
+ int ret = 0;
+
+ dpm_table = smu->smu_dpm.dpm_context;
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT) &&
+ (feature_mask & FEATURE_DPM_GFXCLK_MASK)) {
+ single_dpm_table = &(dpm_table->gfx_table);
+ freq = max ? single_dpm_table->dpm_state.soft_max_level :
+ single_dpm_table->dpm_state.soft_min_level;
+ ret = smu_send_smc_msg_with_param(smu,
+ (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
+ (PPCLK_GFXCLK << 16) | (freq & 0xffff));
+ if (ret) {
+ pr_err("Failed to set soft %s gfxclk !\n",
+ max ? "max" : "min");
+ return ret;
+ }
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT) &&
+ (feature_mask & FEATURE_DPM_UCLK_MASK)) {
+ single_dpm_table = &(dpm_table->mem_table);
+ freq = max ? single_dpm_table->dpm_state.soft_max_level :
+ single_dpm_table->dpm_state.soft_min_level;
+ ret = smu_send_smc_msg_with_param(smu,
+ (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
+ (PPCLK_UCLK << 16) | (freq & 0xffff));
+ if (ret) {
+ pr_err("Failed to set soft %s memclk !\n",
+ max ? "max" : "min");
+ return ret;
+ }
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT) &&
+ (feature_mask & FEATURE_DPM_SOCCLK_MASK)) {
+ single_dpm_table = &(dpm_table->soc_table);
+ freq = max ? single_dpm_table->dpm_state.soft_max_level :
+ single_dpm_table->dpm_state.soft_min_level;
+ ret = smu_send_smc_msg_with_param(smu,
+ (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
+ (PPCLK_SOCCLK << 16) | (freq & 0xffff));
+ if (ret) {
+ pr_err("Failed to set soft %s socclk !\n",
+ max ? "max" : "min");
+ return ret;
+ }
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_FCLK_BIT) &&
+ (feature_mask & FEATURE_DPM_FCLK_MASK)) {
+ single_dpm_table = &(dpm_table->fclk_table);
+ freq = max ? single_dpm_table->dpm_state.soft_max_level :
+ single_dpm_table->dpm_state.soft_min_level;
+ ret = smu_send_smc_msg_with_param(smu,
+ (max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
+ (PPCLK_FCLK << 16) | (freq & 0xffff));
+ if (ret) {
+ pr_err("Failed to set soft %s fclk !\n",
+ max ? "max" : "min");
+ return ret;
+ }
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_DCEFCLK_BIT) &&
+ (feature_mask & FEATURE_DPM_DCEFCLK_MASK)) {
+ single_dpm_table = &(dpm_table->dcef_table);
+ freq = single_dpm_table->dpm_state.hard_min_level;
+ if (!max) {
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinByFreq,
+ (PPCLK_DCEFCLK << 16) | (freq & 0xffff));
+ if (ret) {
+ pr_err("Failed to set hard min dcefclk !\n");
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int vega20_force_clk_levels(struct smu_context *smu,
+ enum pp_clock_type type, uint32_t mask)
+{
+ struct vega20_dpm_table *dpm_table;
+ struct vega20_single_dpm_table *single_dpm_table;
+ uint32_t soft_min_level, soft_max_level, hard_min_level;
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ int ret = 0;
+
+ if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
+ pr_info("force clock level is for dpm manual mode only.\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&(smu->mutex));
+
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
+
+ dpm_table = smu->smu_dpm.dpm_context;
+
+ switch (type) {
+ case PP_SCLK:
+ single_dpm_table = &(dpm_table->gfx_table);
+
+ if (soft_max_level >= single_dpm_table->count) {
+ pr_err("Clock level specified %d is over max allowed %d\n",
+ soft_max_level, single_dpm_table->count - 1);
+ ret = -EINVAL;
+ break;
+ }
+
+ single_dpm_table->dpm_state.soft_min_level =
+ single_dpm_table->dpm_levels[soft_min_level].value;
+ single_dpm_table->dpm_state.soft_max_level =
+ single_dpm_table->dpm_levels[soft_max_level].value;
+
+ ret = vega20_upload_dpm_level(smu, false, FEATURE_DPM_GFXCLK_MASK);
+ if (ret) {
+ pr_err("Failed to upload boot level to lowest!\n");
+ break;
+ }
+
+ ret = vega20_upload_dpm_level(smu, true, FEATURE_DPM_GFXCLK_MASK);
+ if (ret)
+ pr_err("Failed to upload dpm max level to highest!\n");
+
+ break;
+
+ case PP_MCLK:
+ single_dpm_table = &(dpm_table->mem_table);
+
+ if (soft_max_level >= single_dpm_table->count) {
+ pr_err("Clock level specified %d is over max allowed %d\n",
+ soft_max_level, single_dpm_table->count - 1);
+ ret = -EINVAL;
+ break;
+ }
+
+ single_dpm_table->dpm_state.soft_min_level =
+ single_dpm_table->dpm_levels[soft_min_level].value;
+ single_dpm_table->dpm_state.soft_max_level =
+ single_dpm_table->dpm_levels[soft_max_level].value;
+
+ ret = vega20_upload_dpm_level(smu, false, FEATURE_DPM_UCLK_MASK);
+ if (ret) {
+ pr_err("Failed to upload boot level to lowest!\n");
+ break;
+ }
+
+ ret = vega20_upload_dpm_level(smu, true, FEATURE_DPM_UCLK_MASK);
+ if (ret)
+ pr_err("Failed to upload dpm max level to highest!\n");
+
+ break;
+
+ case PP_SOCCLK:
+ single_dpm_table = &(dpm_table->soc_table);
+
+ if (soft_max_level >= single_dpm_table->count) {
+ pr_err("Clock level specified %d is over max allowed %d\n",
+ soft_max_level, single_dpm_table->count - 1);
+ ret = -EINVAL;
+ break;
+ }
+
+ single_dpm_table->dpm_state.soft_min_level =
+ single_dpm_table->dpm_levels[soft_min_level].value;
+ single_dpm_table->dpm_state.soft_max_level =
+ single_dpm_table->dpm_levels[soft_max_level].value;
+
+ ret = vega20_upload_dpm_level(smu, false, FEATURE_DPM_SOCCLK_MASK);
+ if (ret) {
+ pr_err("Failed to upload boot level to lowest!\n");
+ break;
+ }
+
+ ret = vega20_upload_dpm_level(smu, true, FEATURE_DPM_SOCCLK_MASK);
+ if (ret)
+ pr_err("Failed to upload dpm max level to highest!\n");
+
+ break;
+
+ case PP_FCLK:
+ single_dpm_table = &(dpm_table->fclk_table);
+
+ if (soft_max_level >= single_dpm_table->count) {
+ pr_err("Clock level specified %d is over max allowed %d\n",
+ soft_max_level, single_dpm_table->count - 1);
+ ret = -EINVAL;
+ break;
+ }
+
+ single_dpm_table->dpm_state.soft_min_level =
+ single_dpm_table->dpm_levels[soft_min_level].value;
+ single_dpm_table->dpm_state.soft_max_level =
+ single_dpm_table->dpm_levels[soft_max_level].value;
+
+ ret = vega20_upload_dpm_level(smu, false, FEATURE_DPM_FCLK_MASK);
+ if (ret) {
+ pr_err("Failed to upload boot level to lowest!\n");
+ break;
+ }
+
+ ret = vega20_upload_dpm_level(smu, true, FEATURE_DPM_FCLK_MASK);
+ if (ret)
+ pr_err("Failed to upload dpm max level to highest!\n");
+
+ break;
+
+ case PP_DCEFCLK:
+ hard_min_level = soft_min_level;
+ single_dpm_table = &(dpm_table->dcef_table);
+
+ if (hard_min_level >= single_dpm_table->count) {
+ pr_err("Clock level specified %d is over max allowed %d\n",
+ hard_min_level, single_dpm_table->count - 1);
+ ret = -EINVAL;
+ break;
+ }
+
+ single_dpm_table->dpm_state.hard_min_level =
+ single_dpm_table->dpm_levels[hard_min_level].value;
+
+ ret = vega20_upload_dpm_level(smu, false, FEATURE_DPM_DCEFCLK_MASK);
+ if (ret)
+ pr_err("Failed to upload boot level to lowest!\n");
+
+ break;
+
+ case PP_PCIE:
+ if (soft_min_level >= NUM_LINK_LEVELS ||
+ soft_max_level >= NUM_LINK_LEVELS) {
+ ret = -EINVAL;
+ break;
+ }
+
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetMinLinkDpmByIndex, soft_min_level);
+ if (ret)
+ pr_err("Failed to set min link dpm level!\n");
+
+ break;
+
+ default:
+ break;
+ }
+
+ mutex_unlock(&(smu->mutex));
+ return ret;
+}
+
+static int vega20_get_clock_by_type_with_latency(struct smu_context *smu,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_latency *clocks)
+{
+ int ret;
+ struct vega20_single_dpm_table *single_dpm_table;
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct vega20_dpm_table *dpm_table = NULL;
+
+ dpm_table = smu_dpm->dpm_context;
+
+ mutex_lock(&smu->mutex);
+
+ switch (type) {
+ case amd_pp_sys_clock:
+ single_dpm_table = &(dpm_table->gfx_table);
+ ret = vega20_get_clk_table(smu, clocks, single_dpm_table);
+ break;
+ case amd_pp_mem_clock:
+ single_dpm_table = &(dpm_table->mem_table);
+ ret = vega20_get_clk_table(smu, clocks, single_dpm_table);
+ break;
+ case amd_pp_dcef_clock:
+ single_dpm_table = &(dpm_table->dcef_table);
+ ret = vega20_get_clk_table(smu, clocks, single_dpm_table);
+ break;
+ case amd_pp_soc_clock:
+ single_dpm_table = &(dpm_table->soc_table);
+ ret = vega20_get_clk_table(smu, clocks, single_dpm_table);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&smu->mutex);
+ return ret;
+}
+
+static int vega20_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
+ uint32_t *voltage,
+ uint32_t freq)
+{
+ int ret;
+
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_GetAVFSVoltageByDpm,
+ ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq));
+ if (ret) {
+ pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!");
+ return ret;
+ }
+
+ smu_read_smc_arg(smu, voltage);
+ *voltage = *voltage / VOLTAGE_SCALE;
+
+ return 0;
+}
+
+static int vega20_set_default_od8_setttings(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTable_t *od_table = (OverDriveTable_t *)(table_context->overdrive_table);
+ struct vega20_od8_settings *od8_settings = NULL;
+ PPTable_t *smc_pptable = table_context->driver_pptable;
+ int i, ret;
+
+ if (table_context->od8_settings)
+ return -EINVAL;
+
+ table_context->od8_settings = kzalloc(sizeof(struct vega20_od8_settings), GFP_KERNEL);
+
+ if (!table_context->od8_settings)
+ return -ENOMEM;
+
+ memset(table_context->od8_settings, 0, sizeof(struct vega20_od8_settings));
+ od8_settings = (struct vega20_od8_settings *)table_context->od8_settings;
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_SOCCLK_BIT)) {
+ if (table_context->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_LIMITS] &&
+ table_context->od_settings_max[OD8_SETTING_GFXCLK_FMAX] > 0 &&
+ table_context->od_settings_min[OD8_SETTING_GFXCLK_FMIN] > 0 &&
+ (table_context->od_settings_max[OD8_SETTING_GFXCLK_FMAX] >=
+ table_context->od_settings_min[OD8_SETTING_GFXCLK_FMIN])) {
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id =
+ OD8_GFXCLK_LIMITS;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id =
+ OD8_GFXCLK_LIMITS;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].default_value =
+ od_table->GfxclkFmin;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].default_value =
+ od_table->GfxclkFmax;
+ }
+
+ if (table_context->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_GFXCLK_CURVE] &&
+ (table_context->od_settings_min[OD8_SETTING_GFXCLK_VOLTAGE1] >=
+ smc_pptable->MinVoltageGfx / VOLTAGE_SCALE) &&
+ (table_context->od_settings_max[OD8_SETTING_GFXCLK_VOLTAGE3] <=
+ smc_pptable->MaxVoltageGfx / VOLTAGE_SCALE) &&
+ (table_context->od_settings_min[OD8_SETTING_GFXCLK_VOLTAGE1] <=
+ table_context->od_settings_max[OD8_SETTING_GFXCLK_VOLTAGE3])) {
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id =
+ OD8_GFXCLK_CURVE;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id =
+ OD8_GFXCLK_CURVE;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id =
+ OD8_GFXCLK_CURVE;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id =
+ OD8_GFXCLK_CURVE;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id =
+ OD8_GFXCLK_CURVE;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id =
+ OD8_GFXCLK_CURVE;
+
+ od_table->GfxclkFreq1 = od_table->GfxclkFmin;
+ od_table->GfxclkFreq2 = (od_table->GfxclkFmin + od_table->GfxclkFmax) / 2;
+ od_table->GfxclkFreq3 = od_table->GfxclkFmax;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].default_value =
+ od_table->GfxclkFreq1;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].default_value =
+ od_table->GfxclkFreq2;
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].default_value =
+ od_table->GfxclkFreq3;
+
+ ret = vega20_overdrive_get_gfx_clk_base_voltage(smu,
+ &od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value,
+ od_table->GfxclkFreq1);
+ if (ret)
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value = 0;
+ od_table->GfxclkVolt1 =
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].default_value
+ * VOLTAGE_SCALE;
+ ret = vega20_overdrive_get_gfx_clk_base_voltage(smu,
+ &od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value,
+ od_table->GfxclkFreq2);
+ if (ret)
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value = 0;
+ od_table->GfxclkVolt2 =
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].default_value
+ * VOLTAGE_SCALE;
+ ret = vega20_overdrive_get_gfx_clk_base_voltage(smu,
+ &od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value,
+ od_table->GfxclkFreq3);
+ if (ret)
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value = 0;
+ od_table->GfxclkVolt3 =
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].default_value
+ * VOLTAGE_SCALE;
+ }
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
+ if (table_context->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_UCLK_MAX] &&
+ table_context->od_settings_min[OD8_SETTING_UCLK_FMAX] > 0 &&
+ table_context->od_settings_max[OD8_SETTING_UCLK_FMAX] > 0 &&
+ (table_context->od_settings_max[OD8_SETTING_UCLK_FMAX] >=
+ table_context->od_settings_min[OD8_SETTING_UCLK_FMAX])) {
+ od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id =
+ OD8_UCLK_MAX;
+ od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].default_value =
+ od_table->UclkFmax;
+ }
+ }
+
+ if (table_context->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_POWER_LIMIT] &&
+ table_context->od_settings_min[OD8_SETTING_POWER_PERCENTAGE] > 0 &&
+ table_context->od_settings_min[OD8_SETTING_POWER_PERCENTAGE] <= 100 &&
+ table_context->od_settings_max[OD8_SETTING_POWER_PERCENTAGE] > 0 &&
+ table_context->od_settings_max[OD8_SETTING_POWER_PERCENTAGE] <= 100) {
+ od8_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].feature_id =
+ OD8_POWER_LIMIT;
+ od8_settings->od8_settings_array[OD8_SETTING_POWER_PERCENTAGE].default_value =
+ od_table->OverDrivePct;
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_FAN_CONTROL_BIT)) {
+ if (table_context->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_ACOUSTIC_LIMIT] &&
+ table_context->od_settings_min[OD8_SETTING_FAN_ACOUSTIC_LIMIT] > 0 &&
+ table_context->od_settings_max[OD8_SETTING_FAN_ACOUSTIC_LIMIT] > 0 &&
+ (table_context->od_settings_max[OD8_SETTING_FAN_ACOUSTIC_LIMIT] >=
+ table_context->od_settings_min[OD8_SETTING_FAN_ACOUSTIC_LIMIT])) {
+ od8_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].feature_id =
+ OD8_ACOUSTIC_LIMIT_SCLK;
+ od8_settings->od8_settings_array[OD8_SETTING_FAN_ACOUSTIC_LIMIT].default_value =
+ od_table->FanMaximumRpm;
+ }
+
+ if (table_context->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_FAN_SPEED_MIN] &&
+ table_context->od_settings_min[OD8_SETTING_FAN_MIN_SPEED] > 0 &&
+ table_context->od_settings_max[OD8_SETTING_FAN_MIN_SPEED] > 0 &&
+ (table_context->od_settings_max[OD8_SETTING_FAN_MIN_SPEED] >=
+ table_context->od_settings_min[OD8_SETTING_FAN_MIN_SPEED])) {
+ od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].feature_id =
+ OD8_FAN_SPEED_MIN;
+ od8_settings->od8_settings_array[OD8_SETTING_FAN_MIN_SPEED].default_value =
+ od_table->FanMinimumPwm * smc_pptable->FanMaximumRpm / 100;
+ }
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_THERMAL_BIT)) {
+ if (table_context->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_TEMPERATURE_FAN] &&
+ table_context->od_settings_min[OD8_SETTING_FAN_TARGET_TEMP] > 0 &&
+ table_context->od_settings_max[OD8_SETTING_FAN_TARGET_TEMP] > 0 &&
+ (table_context->od_settings_max[OD8_SETTING_FAN_TARGET_TEMP] >=
+ table_context->od_settings_min[OD8_SETTING_FAN_TARGET_TEMP])) {
+ od8_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].feature_id =
+ OD8_TEMPERATURE_FAN;
+ od8_settings->od8_settings_array[OD8_SETTING_FAN_TARGET_TEMP].default_value =
+ od_table->FanTargetTemperature;
+ }
+
+ if (table_context->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_TEMPERATURE_SYSTEM] &&
+ table_context->od_settings_min[OD8_SETTING_OPERATING_TEMP_MAX] > 0 &&
+ table_context->od_settings_max[OD8_SETTING_OPERATING_TEMP_MAX] > 0 &&
+ (table_context->od_settings_max[OD8_SETTING_OPERATING_TEMP_MAX] >=
+ table_context->od_settings_min[OD8_SETTING_OPERATING_TEMP_MAX])) {
+ od8_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].feature_id =
+ OD8_TEMPERATURE_SYSTEM;
+ od8_settings->od8_settings_array[OD8_SETTING_OPERATING_TEMP_MAX].default_value =
+ od_table->MaxOpTemp;
+ }
+ }
+
+ for (i = 0; i < OD8_SETTING_COUNT; i++) {
+ if (od8_settings->od8_settings_array[i].feature_id) {
+ od8_settings->od8_settings_array[i].min_value =
+ table_context->od_settings_min[i];
+ od8_settings->od8_settings_array[i].max_value =
+ table_context->od_settings_max[i];
+ od8_settings->od8_settings_array[i].current_value =
+ od8_settings->od8_settings_array[i].default_value;
+ } else {
+ od8_settings->od8_settings_array[i].min_value = 0;
+ od8_settings->od8_settings_array[i].max_value = 0;
+ od8_settings->od8_settings_array[i].current_value = 0;
+ }
+ }
+
+ return 0;
+}
+
+static int vega20_get_od_percentage(struct smu_context *smu,
+ enum pp_clock_type type)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct vega20_dpm_table *dpm_table = NULL;
+ struct vega20_dpm_table *golden_table = NULL;
+ struct vega20_single_dpm_table *single_dpm_table;
+ struct vega20_single_dpm_table *golden_dpm_table;
+ int value, golden_value;
+
+ dpm_table = smu_dpm->dpm_context;
+ golden_table = smu_dpm->golden_dpm_context;
+
+ switch (type) {
+ case OD_SCLK:
+ single_dpm_table = &(dpm_table->gfx_table);
+ golden_dpm_table = &(golden_table->gfx_table);
+ break;
+ case OD_MCLK:
+ single_dpm_table = &(dpm_table->mem_table);
+ golden_dpm_table = &(golden_table->mem_table);
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+
+ value = single_dpm_table->dpm_levels[single_dpm_table->count - 1].value;
+ golden_value = golden_dpm_table->dpm_levels[golden_dpm_table->count - 1].value;
+
+ value -= golden_value;
+ value = DIV_ROUND_UP(value * 100, golden_value);
+
+ return value;
+}
+
+static int
+vega20_get_profiling_clk_mask(struct smu_context *smu,
+ enum amd_dpm_forced_level level,
+ uint32_t *sclk_mask,
+ uint32_t *mclk_mask,
+ uint32_t *soc_mask)
+{
+ struct vega20_dpm_table *dpm_table = (struct vega20_dpm_table *)smu->smu_dpm.dpm_context;
+ struct vega20_single_dpm_table *gfx_dpm_table;
+ struct vega20_single_dpm_table *mem_dpm_table;
+ struct vega20_single_dpm_table *soc_dpm_table;
+
+ if (!smu->smu_dpm.dpm_context)
+ return -EINVAL;
+
+ gfx_dpm_table = &dpm_table->gfx_table;
+ mem_dpm_table = &dpm_table->mem_table;
+ soc_dpm_table = &dpm_table->soc_table;
+
+ *sclk_mask = 0;
+ *mclk_mask = 0;
+ *soc_mask = 0;
+
+ if (gfx_dpm_table->count > VEGA20_UMD_PSTATE_GFXCLK_LEVEL &&
+ mem_dpm_table->count > VEGA20_UMD_PSTATE_MCLK_LEVEL &&
+ soc_dpm_table->count > VEGA20_UMD_PSTATE_SOCCLK_LEVEL) {
+ *sclk_mask = VEGA20_UMD_PSTATE_GFXCLK_LEVEL;
+ *mclk_mask = VEGA20_UMD_PSTATE_MCLK_LEVEL;
+ *soc_mask = VEGA20_UMD_PSTATE_SOCCLK_LEVEL;
+ }
+
+ if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+ *sclk_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
+ *mclk_mask = 0;
+ } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ *sclk_mask = gfx_dpm_table->count - 1;
+ *mclk_mask = mem_dpm_table->count - 1;
+ *soc_mask = soc_dpm_table->count - 1;
+ }
+
+ return 0;
+}
+
+static int
+vega20_set_uclk_to_highest_dpm_level(struct smu_context *smu,
+ struct vega20_single_dpm_table *dpm_table)
+{
+ int ret = 0;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ if (!smu_dpm_ctx->dpm_context)
+ return -EINVAL;
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
+ if (dpm_table->count <= 0) {
+ pr_err("[%s] Dpm table has no entry!", __func__);
+ return -EINVAL;
+ }
+
+ if (dpm_table->count > NUM_UCLK_DPM_LEVELS) {
+ pr_err("[%s] Dpm table has too many entries!", __func__);
+ return -EINVAL;
+ }
+
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinByFreq,
+ (PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level);
+ if (ret) {
+ pr_err("[%s] Set hard min uclk failed!", __func__);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int vega20_pre_display_config_changed(struct smu_context *smu)
+{
+ int ret = 0;
+ struct vega20_dpm_table *dpm_table = smu->smu_dpm.dpm_context;
+
+ if (!smu->smu_dpm.dpm_context)
+ return -EINVAL;
+
+ smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0);
+ ret = vega20_set_uclk_to_highest_dpm_level(smu,
+ &dpm_table->mem_table);
+ if (ret)
+ pr_err("Failed to set uclk to highest dpm level");
+ return ret;
+}
+
+static int vega20_display_config_changed(struct smu_context *smu)
+{
+ int ret = 0;
+
+ if (!smu->funcs)
+ return -EINVAL;
+
+ if (!smu->smu_dpm.dpm_context ||
+ !smu->smu_table.tables ||
+ !smu->smu_table.tables[TABLE_WATERMARKS].cpu_addr)
+ return -EINVAL;
+
+ if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
+ !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
+ ret = smu->funcs->write_watermarks_table(smu);
+ if (ret) {
+ pr_err("Failed to update WMTABLE!");
+ return ret;
+ }
+ smu->watermarks_bitmap |= WATERMARKS_LOADED;
+ }
+
+ if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
+ smu_feature_is_supported(smu, FEATURE_DPM_DCEFCLK_BIT) &&
+ smu_feature_is_supported(smu, FEATURE_DPM_SOCCLK_BIT)) {
+ smu_send_smc_msg_with_param(smu,
+ SMU_MSG_NumOfDisplays,
+ smu->display_config->num_display);
+ }
+
+ return ret;
+}
+
+static int vega20_apply_clocks_adjust_rules(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ struct vega20_dpm_table *dpm_ctx = (struct vega20_dpm_table *)(smu_dpm_ctx->dpm_context);
+ struct vega20_single_dpm_table *dpm_table;
+ bool vblank_too_short = false;
+ bool disable_mclk_switching;
+ uint32_t i, latency;
+
+ disable_mclk_switching = ((1 < smu->display_config->num_display) &&
+ !smu->display_config->multi_monitor_in_sync) || vblank_too_short;
+ latency = smu->display_config->dce_tolerable_mclk_in_active_latency;
+
+ /* gfxclk */
+ dpm_table = &(dpm_ctx->gfx_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (VEGA20_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_GFXCLK_LEVEL].value;
+ }
+
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
+ }
+
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+
+ /* memclk */
+ dpm_table = &(dpm_ctx->mem_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (VEGA20_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_MCLK_LEVEL].value;
+ }
+
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[0].value;
+ }
+
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+
+ /* honour DAL's UCLK Hardmin */
+ if (dpm_table->dpm_state.hard_min_level < (smu->display_config->min_mem_set_clock / 100))
+ dpm_table->dpm_state.hard_min_level = smu->display_config->min_mem_set_clock / 100;
+
+ /* Hardmin is dependent on displayconfig */
+ if (disable_mclk_switching) {
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ for (i = 0; i < smu_dpm_ctx->mclk_latency_table->count - 1; i++) {
+ if (smu_dpm_ctx->mclk_latency_table->entries[i].latency <= latency) {
+ if (dpm_table->dpm_levels[i].value >= (smu->display_config->min_mem_set_clock / 100)) {
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[i].value;
+ break;
+ }
+ }
+ }
+ }
+
+ if (smu->display_config->nb_pstate_switch_disable)
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ /* vclk */
+ dpm_table = &(dpm_ctx->vclk_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
+ }
+
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+
+ /* dclk */
+ dpm_table = &(dpm_ctx->dclk_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_UVDCLK_LEVEL].value;
+ }
+
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+
+ /* socclk */
+ dpm_table = &(dpm_ctx->soc_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (VEGA20_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_SOCCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_SOCCLK_LEVEL].value;
+ }
+
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+
+ /* eclk */
+ dpm_table = &(dpm_ctx->eclk_table);
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
+ dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+
+ if (VEGA20_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_VCEMCLK_LEVEL].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[VEGA20_UMD_PSTATE_VCEMCLK_LEVEL].value;
+ }
+
+ if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
+ dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
+ }
+ return 0;
+}
+
+static int
+vega20_notify_smc_dispaly_config(struct smu_context *smu)
+{
+ struct vega20_dpm_table *dpm_table = smu->smu_dpm.dpm_context;
+ struct vega20_single_dpm_table *memtable = &dpm_table->mem_table;
+ struct smu_clocks min_clocks = {0};
+ struct pp_display_clock_request clock_req;
+ int ret = 0;
+
+ min_clocks.dcef_clock = smu->display_config->min_dcef_set_clk;
+ min_clocks.dcef_clock_in_sr = smu->display_config->min_dcef_deep_sleep_set_clk;
+ min_clocks.memory_clock = smu->display_config->min_mem_set_clock;
+
+ if (smu_feature_is_supported(smu, FEATURE_DPM_DCEFCLK_BIT)) {
+ clock_req.clock_type = amd_pp_dcef_clock;
+ clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
+ if (!smu->funcs->display_clock_voltage_request(smu, &clock_req)) {
+ if (smu_feature_is_supported(smu, FEATURE_DS_DCEFCLK_BIT)) {
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetMinDeepSleepDcefclk,
+ min_clocks.dcef_clock_in_sr/100);
+ if (ret) {
+ pr_err("Attempt to set divider for DCEFCLK Failed!");
+ return ret;
+ }
+ }
+ } else {
+ pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
+ }
+ }
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT)) {
+ memtable->dpm_state.hard_min_level = min_clocks.memory_clock/100;
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetHardMinByFreq,
+ (PPCLK_UCLK << 16) | memtable->dpm_state.hard_min_level);
+ if (ret) {
+ pr_err("[%s] Set hard min uclk failed!", __func__);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static uint32_t vega20_find_lowest_dpm_level(struct vega20_single_dpm_table *table)
+{
+ uint32_t i;
+
+ for (i = 0; i < table->count; i++) {
+ if (table->dpm_levels[i].enabled)
+ break;
+ }
+ if (i >= table->count) {
+ i = 0;
+ table->dpm_levels[i].enabled = true;
+ }
+
+ return i;
+}
+
+static uint32_t vega20_find_highest_dpm_level(struct vega20_single_dpm_table *table)
+{
+ int i = 0;
+
+ if (!table) {
+ pr_err("[%s] DPM Table does not exist!", __func__);
+ return 0;
+ }
+ if (table->count <= 0) {
+ pr_err("[%s] DPM Table has no entry!", __func__);
+ return 0;
+ }
+ if (table->count > MAX_REGULAR_DPM_NUMBER) {
+ pr_err("[%s] DPM Table has too many entries!", __func__);
+ return MAX_REGULAR_DPM_NUMBER - 1;
+ }
+
+ for (i = table->count - 1; i >= 0; i--) {
+ if (table->dpm_levels[i].enabled)
+ break;
+ }
+ if (i < 0) {
+ i = 0;
+ table->dpm_levels[i].enabled = true;
+ }
+
+ return i;
+}
+
+static int vega20_force_dpm_limit_value(struct smu_context *smu, bool highest)
+{
+ uint32_t soft_level;
+ int ret = 0;
+ struct vega20_dpm_table *dpm_table =
+ (struct vega20_dpm_table *)smu->smu_dpm.dpm_context;
+
+ if (highest)
+ soft_level = vega20_find_highest_dpm_level(&(dpm_table->gfx_table));
+ else
+ soft_level = vega20_find_lowest_dpm_level(&(dpm_table->gfx_table));
+
+ dpm_table->gfx_table.dpm_state.soft_min_level =
+ dpm_table->gfx_table.dpm_state.soft_max_level =
+ dpm_table->gfx_table.dpm_levels[soft_level].value;
+
+ if (highest)
+ soft_level = vega20_find_highest_dpm_level(&(dpm_table->mem_table));
+ else
+ soft_level = vega20_find_lowest_dpm_level(&(dpm_table->mem_table));
+
+ dpm_table->mem_table.dpm_state.soft_min_level =
+ dpm_table->mem_table.dpm_state.soft_max_level =
+ dpm_table->mem_table.dpm_levels[soft_level].value;
+
+ if (highest)
+ soft_level = vega20_find_highest_dpm_level(&(dpm_table->soc_table));
+ else
+ soft_level = vega20_find_lowest_dpm_level(&(dpm_table->soc_table));
+
+ dpm_table->soc_table.dpm_state.soft_min_level =
+ dpm_table->soc_table.dpm_state.soft_max_level =
+ dpm_table->soc_table.dpm_levels[soft_level].value;
+
+ ret = vega20_upload_dpm_level(smu, false, 0xFFFFFFFF);
+ if (ret) {
+ pr_err("Failed to upload boot level to %s!\n",
+ highest ? "highest" : "lowest");
+ return ret;
+ }
+
+ ret = vega20_upload_dpm_level(smu, true, 0xFFFFFFFF);
+ if (ret) {
+ pr_err("Failed to upload dpm max level to %s!\n!",
+ highest ? "highest" : "lowest");
+ return ret;
+ }
+
+ return ret;
+}
+
+static int vega20_unforce_dpm_levels(struct smu_context *smu)
+{
+ uint32_t soft_min_level, soft_max_level;
+ int ret = 0;
+ struct vega20_dpm_table *dpm_table =
+ (struct vega20_dpm_table *)smu->smu_dpm.dpm_context;
+
+ soft_min_level = vega20_find_lowest_dpm_level(&(dpm_table->gfx_table));
+ soft_max_level = vega20_find_highest_dpm_level(&(dpm_table->gfx_table));
+ dpm_table->gfx_table.dpm_state.soft_min_level =
+ dpm_table->gfx_table.dpm_levels[soft_min_level].value;
+ dpm_table->gfx_table.dpm_state.soft_max_level =
+ dpm_table->gfx_table.dpm_levels[soft_max_level].value;
+
+ soft_min_level = vega20_find_lowest_dpm_level(&(dpm_table->mem_table));
+ soft_max_level = vega20_find_highest_dpm_level(&(dpm_table->mem_table));
+ dpm_table->mem_table.dpm_state.soft_min_level =
+ dpm_table->gfx_table.dpm_levels[soft_min_level].value;
+ dpm_table->mem_table.dpm_state.soft_max_level =
+ dpm_table->gfx_table.dpm_levels[soft_max_level].value;
+
+ soft_min_level = vega20_find_lowest_dpm_level(&(dpm_table->soc_table));
+ soft_max_level = vega20_find_highest_dpm_level(&(dpm_table->soc_table));
+ dpm_table->soc_table.dpm_state.soft_min_level =
+ dpm_table->soc_table.dpm_levels[soft_min_level].value;
+ dpm_table->soc_table.dpm_state.soft_max_level =
+ dpm_table->soc_table.dpm_levels[soft_max_level].value;
+
+ ret = smu_upload_dpm_level(smu, false, 0xFFFFFFFF);
+ if (ret) {
+ pr_err("Failed to upload DPM Bootup Levels!");
+ return ret;
+ }
+
+ ret = smu_upload_dpm_level(smu, true, 0xFFFFFFFF);
+ if (ret) {
+ pr_err("Failed to upload DPM Max Levels!");
+ return ret;
+ }
+
+ return ret;
+}
+
+static enum amd_dpm_forced_level vega20_get_performance_level(struct smu_context *smu)
+{
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+ if (!smu_dpm_ctx->dpm_context)
+ return -EINVAL;
+
+ if (smu_dpm_ctx->dpm_level != smu_dpm_ctx->saved_dpm_level) {
+ mutex_lock(&(smu->mutex));
+ smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
+ mutex_unlock(&(smu->mutex));
+ }
+ return smu_dpm_ctx->dpm_level;
+}
+
+static int
+vega20_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level)
+{
+ int ret = 0;
+ int i;
+ struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
+
+ if (!smu_dpm_ctx->dpm_context)
+ return -EINVAL;
+
+ for (i = 0; i < smu->adev->num_ip_blocks; i++) {
+ if (smu->adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)
+ break;
+ }
+
+ mutex_lock(&smu->mutex);
+
+ smu->adev->ip_blocks[i].version->funcs->enable_umd_pstate(smu, &level);
+ ret = smu_handle_task(smu, level,
+ AMD_PP_TASK_READJUST_POWER_STATE);
+
+ mutex_unlock(&smu->mutex);
+
+ return ret;
+}
+
+static int vega20_update_specified_od8_value(struct smu_context *smu,
+ uint32_t index,
+ uint32_t value)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTable_t *od_table =
+ (OverDriveTable_t *)(table_context->overdrive_table);
+ struct vega20_od8_settings *od8_settings =
+ (struct vega20_od8_settings *)table_context->od8_settings;
+
+ switch (index) {
+ case OD8_SETTING_GFXCLK_FMIN:
+ od_table->GfxclkFmin = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_GFXCLK_FMAX:
+ if (value < od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].min_value ||
+ value > od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].max_value)
+ return -EINVAL;
+ od_table->GfxclkFmax = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_GFXCLK_FREQ1:
+ od_table->GfxclkFreq1 = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_GFXCLK_VOLTAGE1:
+ od_table->GfxclkVolt1 = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_GFXCLK_FREQ2:
+ od_table->GfxclkFreq2 = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_GFXCLK_VOLTAGE2:
+ od_table->GfxclkVolt2 = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_GFXCLK_FREQ3:
+ od_table->GfxclkFreq3 = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_GFXCLK_VOLTAGE3:
+ od_table->GfxclkVolt3 = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_UCLK_FMAX:
+ if (value < od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].min_value ||
+ value > od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].max_value)
+ return -EINVAL;
+ od_table->UclkFmax = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_POWER_PERCENTAGE:
+ od_table->OverDrivePct = (int16_t)value;
+ break;
+
+ case OD8_SETTING_FAN_ACOUSTIC_LIMIT:
+ od_table->FanMaximumRpm = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_FAN_MIN_SPEED:
+ od_table->FanMinimumPwm = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_FAN_TARGET_TEMP:
+ od_table->FanTargetTemperature = (uint16_t)value;
+ break;
+
+ case OD8_SETTING_OPERATING_TEMP_MAX:
+ od_table->MaxOpTemp = (uint16_t)value;
+ break;
+ }
+
+ return 0;
+}
+
+static int vega20_set_od_percentage(struct smu_context *smu,
+ enum pp_clock_type type,
+ uint32_t value)
+{
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct vega20_dpm_table *dpm_table = NULL;
+ struct vega20_dpm_table *golden_table = NULL;
+ struct vega20_single_dpm_table *single_dpm_table;
+ struct vega20_single_dpm_table *golden_dpm_table;
+ uint32_t od_clk, index;
+ int ret = 0;
+ int feature_enabled;
+ PPCLK_e clk_id;
+
+ mutex_lock(&(smu->mutex));
+
+ dpm_table = smu_dpm->dpm_context;
+ golden_table = smu_dpm->golden_dpm_context;
+
+ switch (type) {
+ case OD_SCLK:
+ single_dpm_table = &(dpm_table->gfx_table);
+ golden_dpm_table = &(golden_table->gfx_table);
+ feature_enabled = smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT);
+ clk_id = PPCLK_GFXCLK;
+ index = OD8_SETTING_GFXCLK_FMAX;
+ break;
+ case OD_MCLK:
+ single_dpm_table = &(dpm_table->mem_table);
+ golden_dpm_table = &(golden_table->mem_table);
+ feature_enabled = smu_feature_is_enabled(smu, FEATURE_DPM_UCLK_BIT);
+ clk_id = PPCLK_UCLK;
+ index = OD8_SETTING_UCLK_FMAX;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret)
+ goto set_od_failed;
+
+ od_clk = golden_dpm_table->dpm_levels[golden_dpm_table->count - 1].value * value;
+ od_clk /= 100;
+ od_clk += golden_dpm_table->dpm_levels[golden_dpm_table->count - 1].value;
+
+ ret = smu_update_od8_settings(smu, index, od_clk);
+ if (ret) {
+ pr_err("[Setoverdrive] failed to set od clk!\n");
+ goto set_od_failed;
+ }
+
+ if (feature_enabled) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table,
+ clk_id);
+ if (ret) {
+ pr_err("[Setoverdrive] failed to refresh dpm table!\n");
+ goto set_od_failed;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
+ }
+
+ ret = smu_handle_task(smu, smu_dpm->dpm_level,
+ AMD_PP_TASK_READJUST_POWER_STATE);
+
+set_od_failed:
+ mutex_unlock(&(smu->mutex));
+
+ return ret;
+}
+
+static int vega20_odn_edit_dpm_table(struct smu_context *smu,
+ enum PP_OD_DPM_TABLE_COMMAND type,
+ long *input, uint32_t size)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ OverDriveTable_t *od_table =
+ (OverDriveTable_t *)(table_context->overdrive_table);
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct vega20_dpm_table *dpm_table = NULL;
+ struct vega20_single_dpm_table *single_dpm_table;
+ struct vega20_od8_settings *od8_settings =
+ (struct vega20_od8_settings *)table_context->od8_settings;
+ struct pp_clock_levels_with_latency clocks;
+ int32_t input_index, input_clk, input_vol, i;
+ int od8_id;
+ int ret = 0;
+
+ dpm_table = smu_dpm->dpm_context;
+
+ if (!input) {
+ pr_warn("NULL user input for clock and voltage\n");
+ return -EINVAL;
+ }
+
+ switch (type) {
+ case PP_OD_EDIT_SCLK_VDDC_TABLE:
+ if (!(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].feature_id)) {
+ pr_info("Sclk min/max frequency overdrive not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ for (i = 0; i < size; i += 2) {
+ if (i + 2 > size) {
+ pr_info("invalid number of input parameters %d\n", size);
+ return -EINVAL;
+ }
+
+ input_index = input[i];
+ input_clk = input[i + 1];
+
+ if (input_index != 0 && input_index != 1) {
+ pr_info("Invalid index %d\n", input_index);
+ pr_info("Support min/max sclk frequency settingonly which index by 0/1\n");
+ return -EINVAL;
+ }
+
+ if (input_clk < od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].min_value ||
+ input_clk > od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].max_value) {
+ pr_info("clock freq %d is not within allowed range [%d - %d]\n",
+ input_clk,
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMIN].min_value,
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FMAX].max_value);
+ return -EINVAL;
+ }
+
+ if (input_index == 0 && od_table->GfxclkFmin != input_clk) {
+ od_table->GfxclkFmin = input_clk;
+ table_context->od_gfxclk_update = true;
+ } else if (input_index == 1 && od_table->GfxclkFmax != input_clk) {
+ od_table->GfxclkFmax = input_clk;
+ table_context->od_gfxclk_update = true;
+ }
+ }
+
+ break;
+
+ case PP_OD_EDIT_MCLK_VDDC_TABLE:
+ if (!od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].feature_id) {
+ pr_info("Mclk max frequency overdrive not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ single_dpm_table = &(dpm_table->mem_table);
+ ret = vega20_get_clk_table(smu, &clocks, single_dpm_table);
+ if (ret) {
+ pr_err("Attempt to get memory clk levels Failed!");
+ return ret;
+ }
+
+ for (i = 0; i < size; i += 2) {
+ if (i + 2 > size) {
+ pr_info("invalid number of input parameters %d\n",
+ size);
+ return -EINVAL;
+ }
+
+ input_index = input[i];
+ input_clk = input[i + 1];
+
+ if (input_index != 1) {
+ pr_info("Invalid index %d\n", input_index);
+ pr_info("Support max Mclk frequency setting only which index by 1\n");
+ return -EINVAL;
+ }
+
+ if (input_clk < clocks.data[0].clocks_in_khz / 1000 ||
+ input_clk > od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].max_value) {
+ pr_info("clock freq %d is not within allowed range [%d - %d]\n",
+ input_clk,
+ clocks.data[0].clocks_in_khz / 1000,
+ od8_settings->od8_settings_array[OD8_SETTING_UCLK_FMAX].max_value);
+ return -EINVAL;
+ }
+
+ if (input_index == 1 && od_table->UclkFmax != input_clk) {
+ table_context->od_gfxclk_update = true;
+ od_table->UclkFmax = input_clk;
+ }
+ }
+
+ break;
+
+ case PP_OD_EDIT_VDDC_CURVE:
+ if (!(od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ1].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ2].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_FREQ3].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
+ od8_settings->od8_settings_array[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id)) {
+ pr_info("Voltage curve calibrate not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ for (i = 0; i < size; i += 3) {
+ if (i + 3 > size) {
+ pr_info("invalid number of input parameters %d\n",
+ size);
+ return -EINVAL;
+ }
+
+ input_index = input[i];
+ input_clk = input[i + 1];
+ input_vol = input[i + 2];
+
+ if (input_index > 2) {
+ pr_info("Setting for point %d is not supported\n",
+ input_index + 1);
+ pr_info("Three supported points index by 0, 1, 2\n");
+ return -EINVAL;
+ }
+
+ od8_id = OD8_SETTING_GFXCLK_FREQ1 + 2 * input_index;
+ if (input_clk < od8_settings->od8_settings_array[od8_id].min_value ||
+ input_clk > od8_settings->od8_settings_array[od8_id].max_value) {
+ pr_info("clock freq %d is not within allowed range [%d - %d]\n",
+ input_clk,
+ od8_settings->od8_settings_array[od8_id].min_value,
+ od8_settings->od8_settings_array[od8_id].max_value);
+ return -EINVAL;
+ }
+
+ od8_id = OD8_SETTING_GFXCLK_VOLTAGE1 + 2 * input_index;
+ if (input_vol < od8_settings->od8_settings_array[od8_id].min_value ||
+ input_vol > od8_settings->od8_settings_array[od8_id].max_value) {
+ pr_info("clock voltage %d is not within allowed range [%d- %d]\n",
+ input_vol,
+ od8_settings->od8_settings_array[od8_id].min_value,
+ od8_settings->od8_settings_array[od8_id].max_value);
+ return -EINVAL;
+ }
+
+ switch (input_index) {
+ case 0:
+ od_table->GfxclkFreq1 = input_clk;
+ od_table->GfxclkVolt1 = input_vol * VOLTAGE_SCALE;
+ break;
+ case 1:
+ od_table->GfxclkFreq2 = input_clk;
+ od_table->GfxclkVolt2 = input_vol * VOLTAGE_SCALE;
+ break;
+ case 2:
+ od_table->GfxclkFreq3 = input_clk;
+ od_table->GfxclkVolt3 = input_vol * VOLTAGE_SCALE;
+ break;
+ }
+ }
+
+ break;
+
+ case PP_OD_RESTORE_DEFAULT_TABLE:
+ ret = smu_update_table(smu, TABLE_OVERDRIVE, table_context->overdrive_table, false);
+ if (ret) {
+ pr_err("Failed to export over drive table!\n");
+ return ret;
+ }
+
+ break;
+
+ case PP_OD_COMMIT_DPM_TABLE:
+ ret = smu_update_table(smu, TABLE_OVERDRIVE, table_context->overdrive_table, true);
+ if (ret) {
+ pr_err("Failed to import over drive table!\n");
+ return ret;
+ }
+
+ /* retrieve updated gfxclk table */
+ if (table_context->od_gfxclk_update) {
+ table_context->od_gfxclk_update = false;
+ single_dpm_table = &(dpm_table->gfx_table);
+
+ if (smu_feature_is_enabled(smu, FEATURE_DPM_GFXCLK_BIT)) {
+ ret = vega20_set_single_dpm_table(smu, single_dpm_table,
+ PPCLK_GFXCLK);
+ if (ret) {
+ pr_err("[Setoverdrive] failed to refresh dpm table!\n");
+ return ret;
+ }
+ } else {
+ single_dpm_table->count = 1;
+ single_dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
+ }
+ }
+
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (type == PP_OD_COMMIT_DPM_TABLE) {
+ mutex_lock(&(smu->mutex));
+ ret = smu_handle_task(smu, smu_dpm->dpm_level,
+ AMD_PP_TASK_READJUST_POWER_STATE);
+ mutex_unlock(&(smu->mutex));
+ }
+
+ return ret;
+}
+
+static const struct pptable_funcs vega20_ppt_funcs = {
+ .alloc_dpm_context = vega20_allocate_dpm_context,
+ .store_powerplay_table = vega20_store_powerplay_table,
+ .check_powerplay_table = vega20_check_powerplay_table,
+ .append_powerplay_table = vega20_append_powerplay_table,
+ .get_smu_msg_index = vega20_get_smu_msg_index,
+ .run_afll_btc = vega20_run_btc_afll,
+ .get_unallowed_feature_mask = vega20_get_unallowed_feature_mask,
+ .get_current_power_state = vega20_get_current_power_state,
+ .set_default_dpm_table = vega20_set_default_dpm_table,
+ .set_power_state = NULL,
+ .populate_umd_state_clk = vega20_populate_umd_state_clk,
+ .print_clk_levels = vega20_print_clk_levels,
+ .force_clk_levels = vega20_force_clk_levels,
+ .get_clock_by_type_with_latency = vega20_get_clock_by_type_with_latency,
+ .set_default_od8_settings = vega20_set_default_od8_setttings,
+ .get_od_percentage = vega20_get_od_percentage,
+ .get_performance_level = vega20_get_performance_level,
+ .force_performance_level = vega20_force_performance_level,
+ .update_specified_od8_value = vega20_update_specified_od8_value,
+ .set_od_percentage = vega20_set_od_percentage,
+ .od_edit_dpm_table = vega20_odn_edit_dpm_table,
+ .pre_display_config_changed = vega20_pre_display_config_changed,
+ .display_config_changed = vega20_display_config_changed,
+ .apply_clocks_adjust_rules = vega20_apply_clocks_adjust_rules,
+ .notify_smc_dispaly_config = vega20_notify_smc_dispaly_config,
+ .force_dpm_limit_value = vega20_force_dpm_limit_value,
+ .unforce_dpm_levels = vega20_unforce_dpm_levels,
+ .upload_dpm_level = vega20_upload_dpm_level,
+ .get_profiling_clk_mask = vega20_get_profiling_clk_mask,
+};
+
+void vega20_set_ppt_funcs(struct smu_context *smu)
+{
+ smu->ppt_funcs = &vega20_ppt_funcs;
+ smu->smc_if_version = SMU11_DRIVER_IF_VERSION;
+}
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.h b/drivers/gpu/drm/amd/powerplay/vega20_ppt.h
new file mode 100644
index 000000000000..5a0d2af63173
--- /dev/null
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __VEGA20_PPT_H__
+#define __VEGA20_PPT_H__
+
+#define VEGA20_UMD_PSTATE_GFXCLK_LEVEL 0x3
+#define VEGA20_UMD_PSTATE_SOCCLK_LEVEL 0x3
+#define VEGA20_UMD_PSTATE_MCLK_LEVEL 0x2
+#define VEGA20_UMD_PSTATE_UVDCLK_LEVEL 0x3
+#define VEGA20_UMD_PSTATE_VCEMCLK_LEVEL 0x3
+
+#define MAX_REGULAR_DPM_NUMBER 16
+#define MAX_PCIE_CONF 2
+
+#define VOLTAGE_SCALE 4
+#define AVFS_CURVE 0
+#define OD8_HOTCURVE_TEMPERATURE 85
+
+struct vega20_dpm_level {
+ bool enabled;
+ uint32_t value;
+ uint32_t param1;
+};
+
+struct vega20_dpm_state {
+ uint32_t soft_min_level;
+ uint32_t soft_max_level;
+ uint32_t hard_min_level;
+ uint32_t hard_max_level;
+};
+
+struct vega20_single_dpm_table {
+ uint32_t count;
+ struct vega20_dpm_state dpm_state;
+ struct vega20_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
+};
+
+struct vega20_pcie_table {
+ uint16_t count;
+ uint8_t pcie_gen[MAX_PCIE_CONF];
+ uint8_t pcie_lane[MAX_PCIE_CONF];
+ uint32_t lclk[MAX_PCIE_CONF];
+};
+
+struct vega20_dpm_table {
+ struct vega20_single_dpm_table soc_table;
+ struct vega20_single_dpm_table gfx_table;
+ struct vega20_single_dpm_table mem_table;
+ struct vega20_single_dpm_table eclk_table;
+ struct vega20_single_dpm_table vclk_table;
+ struct vega20_single_dpm_table dclk_table;
+ struct vega20_single_dpm_table dcef_table;
+ struct vega20_single_dpm_table pixel_table;
+ struct vega20_single_dpm_table display_table;
+ struct vega20_single_dpm_table phy_table;
+ struct vega20_single_dpm_table fclk_table;
+ struct vega20_pcie_table pcie_table;
+};
+
+enum OD8_FEATURE_ID
+{
+ OD8_GFXCLK_LIMITS = 1 << 0,
+ OD8_GFXCLK_CURVE = 1 << 1,
+ OD8_UCLK_MAX = 1 << 2,
+ OD8_POWER_LIMIT = 1 << 3,
+ OD8_ACOUSTIC_LIMIT_SCLK = 1 << 4, //FanMaximumRpm
+ OD8_FAN_SPEED_MIN = 1 << 5, //FanMinimumPwm
+ OD8_TEMPERATURE_FAN = 1 << 6, //FanTargetTemperature
+ OD8_TEMPERATURE_SYSTEM = 1 << 7, //MaxOpTemp
+ OD8_MEMORY_TIMING_TUNE = 1 << 8,
+ OD8_FAN_ZERO_RPM_CONTROL = 1 << 9
+};
+
+enum OD8_SETTING_ID
+{
+ OD8_SETTING_GFXCLK_FMIN = 0,
+ OD8_SETTING_GFXCLK_FMAX,
+ OD8_SETTING_GFXCLK_FREQ1,
+ OD8_SETTING_GFXCLK_VOLTAGE1,
+ OD8_SETTING_GFXCLK_FREQ2,
+ OD8_SETTING_GFXCLK_VOLTAGE2,
+ OD8_SETTING_GFXCLK_FREQ3,
+ OD8_SETTING_GFXCLK_VOLTAGE3,
+ OD8_SETTING_UCLK_FMAX,
+ OD8_SETTING_POWER_PERCENTAGE,
+ OD8_SETTING_FAN_ACOUSTIC_LIMIT,
+ OD8_SETTING_FAN_MIN_SPEED,
+ OD8_SETTING_FAN_TARGET_TEMP,
+ OD8_SETTING_OPERATING_TEMP_MAX,
+ OD8_SETTING_AC_TIMING,
+ OD8_SETTING_FAN_ZERO_RPM_CONTROL,
+ OD8_SETTING_COUNT
+};
+
+struct vega20_od8_single_setting {
+ uint32_t feature_id;
+ int32_t min_value;
+ int32_t max_value;
+ int32_t current_value;
+ int32_t default_value;
+};
+
+struct vega20_od8_settings {
+ struct vega20_od8_single_setting od8_settings_array[OD8_SETTING_COUNT];
+};
+
+extern void vega20_set_ppt_funcs(struct smu_context *smu);
+
+#endif
diff --git a/drivers/gpu/drm/arm/display/include/malidp_product.h b/drivers/gpu/drm/arm/display/include/malidp_product.h
index b35fc5db866b..1053b11352eb 100644
--- a/drivers/gpu/drm/arm/display/include/malidp_product.h
+++ b/drivers/gpu/drm/arm/display/include/malidp_product.h
@@ -20,4 +20,16 @@
/* Mali-display product IDs */
#define MALIDP_D71_PRODUCT_ID 0x0071
+union komeda_config_id {
+ struct {
+ __u32 max_line_sz:16,
+ n_pipelines:2,
+ n_scalers:2, /* number of scalers per pipeline */
+ n_layers:3, /* number of layers per pipeline */
+ n_richs:3, /* number of rich layers per pipeline */
+ reserved_bits:6;
+ };
+ __u32 value;
+};
+
#endif /* _MALIDP_PRODUCT_H_ */
diff --git a/drivers/gpu/drm/arm/display/include/malidp_utils.h b/drivers/gpu/drm/arm/display/include/malidp_utils.h
index 63cc47cefcf8..8cfd91196e15 100644
--- a/drivers/gpu/drm/arm/display/include/malidp_utils.h
+++ b/drivers/gpu/drm/arm/display/include/malidp_utils.h
@@ -7,10 +7,41 @@
#ifndef _MALIDP_UTILS_
#define _MALIDP_UTILS_
+#include <linux/delay.h>
+
#define has_bit(nr, mask) (BIT(nr) & (mask))
#define has_bits(bits, mask) (((bits) & (mask)) == (bits))
#define dp_for_each_set_bit(bit, mask) \
for_each_set_bit((bit), ((unsigned long *)&(mask)), sizeof(mask) * 8)
+#define dp_wait_cond(__cond, __tries, __min_range, __max_range) \
+({ \
+ int num_tries = __tries; \
+ while (!__cond && (num_tries > 0)) { \
+ usleep_range(__min_range, __max_range); \
+ if (__cond) \
+ break; \
+ num_tries--; \
+ } \
+ num_tries; \
+})
+
+/* the restriction of range is [start, end] */
+struct malidp_range {
+ u32 start;
+ u32 end;
+};
+
+static inline void set_range(struct malidp_range *rg, u32 start, u32 end)
+{
+ rg->start = start;
+ rg->end = end;
+}
+
+static inline bool in_range(struct malidp_range *rg, u32 v)
+{
+ return (v >= rg->start) && (v <= rg->end);
+}
+
#endif /* _MALIDP_UTILS_ */
diff --git a/drivers/gpu/drm/arm/display/komeda/Makefile b/drivers/gpu/drm/arm/display/komeda/Makefile
index 1b875e5dc0f6..412eeba8c39f 100644
--- a/drivers/gpu/drm/arm/display/komeda/Makefile
+++ b/drivers/gpu/drm/arm/display/komeda/Makefile
@@ -1,14 +1,15 @@
# SPDX-License-Identifier: GPL-2.0
ccflags-y := \
- -I$(src)/../include \
- -I$(src)
+ -I $(srctree)/$(src)/../include \
+ -I $(srctree)/$(src)
komeda-y := \
komeda_drv.o \
komeda_dev.o \
komeda_format_caps.o \
komeda_pipeline.o \
+ komeda_pipeline_state.o \
komeda_framebuffer.o \
komeda_kms.o \
komeda_crtc.o \
@@ -16,6 +17,7 @@ komeda-y := \
komeda_private_obj.o
komeda-y += \
- d71/d71_dev.o
+ d71/d71_dev.o \
+ d71/d71_component.o
obj-$(CONFIG_DRM_KOMEDA) += komeda.o
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
new file mode 100644
index 000000000000..031e5f305a3c
--- /dev/null
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c
@@ -0,0 +1,685 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+
+#include <drm/drm_print.h>
+#include "d71_dev.h"
+#include "komeda_kms.h"
+#include "malidp_io.h"
+#include "komeda_framebuffer.h"
+
+static void get_resources_id(u32 hw_id, u32 *pipe_id, u32 *comp_id)
+{
+ u32 id = BLOCK_INFO_BLK_ID(hw_id);
+ u32 pipe = id;
+
+ switch (BLOCK_INFO_BLK_TYPE(hw_id)) {
+ case D71_BLK_TYPE_LPU_WB_LAYER:
+ id = KOMEDA_COMPONENT_WB_LAYER;
+ break;
+ case D71_BLK_TYPE_CU_SPLITTER:
+ id = KOMEDA_COMPONENT_SPLITTER;
+ break;
+ case D71_BLK_TYPE_CU_SCALER:
+ pipe = id / D71_PIPELINE_MAX_SCALERS;
+ id %= D71_PIPELINE_MAX_SCALERS;
+ id += KOMEDA_COMPONENT_SCALER0;
+ break;
+ case D71_BLK_TYPE_CU:
+ id += KOMEDA_COMPONENT_COMPIZ0;
+ break;
+ case D71_BLK_TYPE_LPU_LAYER:
+ pipe = id / D71_PIPELINE_MAX_LAYERS;
+ id %= D71_PIPELINE_MAX_LAYERS;
+ id += KOMEDA_COMPONENT_LAYER0;
+ break;
+ case D71_BLK_TYPE_DOU_IPS:
+ id += KOMEDA_COMPONENT_IPS0;
+ break;
+ case D71_BLK_TYPE_CU_MERGER:
+ id = KOMEDA_COMPONENT_MERGER;
+ break;
+ case D71_BLK_TYPE_DOU:
+ id = KOMEDA_COMPONENT_TIMING_CTRLR;
+ break;
+ default:
+ id = 0xFFFFFFFF;
+ }
+
+ if (comp_id)
+ *comp_id = id;
+
+ if (pipe_id)
+ *pipe_id = pipe;
+}
+
+static u32 get_valid_inputs(struct block_header *blk)
+{
+ u32 valid_inputs = 0, comp_id;
+ int i;
+
+ for (i = 0; i < PIPELINE_INFO_N_VALID_INPUTS(blk->pipeline_info); i++) {
+ get_resources_id(blk->input_ids[i], NULL, &comp_id);
+ if (comp_id == 0xFFFFFFFF)
+ continue;
+ valid_inputs |= BIT(comp_id);
+ }
+
+ return valid_inputs;
+}
+
+static void get_values_from_reg(void __iomem *reg, u32 offset,
+ u32 count, u32 *val)
+{
+ u32 i, addr;
+
+ for (i = 0; i < count; i++) {
+ addr = offset + (i << 2);
+ /* 0xA4 is WO register */
+ if (addr != 0xA4)
+ val[i] = malidp_read32(reg, addr);
+ else
+ val[i] = 0xDEADDEAD;
+ }
+}
+
+static void dump_block_header(struct seq_file *sf, void __iomem *reg)
+{
+ struct block_header hdr;
+ u32 i, n_input, n_output;
+
+ d71_read_block_header(reg, &hdr);
+ seq_printf(sf, "BLOCK_INFO:\t\t0x%X\n", hdr.block_info);
+ seq_printf(sf, "PIPELINE_INFO:\t\t0x%X\n", hdr.pipeline_info);
+
+ n_output = PIPELINE_INFO_N_OUTPUTS(hdr.pipeline_info);
+ n_input = PIPELINE_INFO_N_VALID_INPUTS(hdr.pipeline_info);
+
+ for (i = 0; i < n_input; i++)
+ seq_printf(sf, "VALID_INPUT_ID%u:\t0x%X\n",
+ i, hdr.input_ids[i]);
+
+ for (i = 0; i < n_output; i++)
+ seq_printf(sf, "OUTPUT_ID%u:\t\t0x%X\n",
+ i, hdr.output_ids[i]);
+}
+
+static u32 to_rot_ctrl(u32 rot)
+{
+ u32 lr_ctrl = 0;
+
+ switch (rot & DRM_MODE_ROTATE_MASK) {
+ case DRM_MODE_ROTATE_0:
+ lr_ctrl |= L_ROT(L_ROT_R0);
+ break;
+ case DRM_MODE_ROTATE_90:
+ lr_ctrl |= L_ROT(L_ROT_R90);
+ break;
+ case DRM_MODE_ROTATE_180:
+ lr_ctrl |= L_ROT(L_ROT_R180);
+ break;
+ case DRM_MODE_ROTATE_270:
+ lr_ctrl |= L_ROT(L_ROT_R270);
+ break;
+ }
+
+ if (rot & DRM_MODE_REFLECT_X)
+ lr_ctrl |= L_HFLIP;
+ if (rot & DRM_MODE_REFLECT_Y)
+ lr_ctrl |= L_VFLIP;
+
+ return lr_ctrl;
+}
+
+static inline u32 to_d71_input_id(struct komeda_component_output *output)
+{
+ struct komeda_component *comp = output->component;
+
+ return comp ? (comp->hw_id + output->output_port) : 0;
+}
+
+static void d71_layer_disable(struct komeda_component *c)
+{
+ malidp_write32_mask(c->reg, BLK_CONTROL, L_EN, 0);
+}
+
+static void d71_layer_update(struct komeda_component *c,
+ struct komeda_component_state *state)
+{
+ struct komeda_layer_state *st = to_layer_st(state);
+ struct drm_plane_state *plane_st = state->plane->state;
+ struct drm_framebuffer *fb = plane_st->fb;
+ struct komeda_fb *kfb = to_kfb(fb);
+ u32 __iomem *reg = c->reg;
+ u32 ctrl_mask = L_EN | L_ROT(L_ROT_R270) | L_HFLIP | L_VFLIP | L_TBU_EN;
+ u32 ctrl = L_EN | to_rot_ctrl(st->rot);
+ int i;
+
+ for (i = 0; i < fb->format->num_planes; i++) {
+ malidp_write32(reg,
+ BLK_P0_PTR_LOW + i * LAYER_PER_PLANE_REGS * 4,
+ lower_32_bits(st->addr[i]));
+ malidp_write32(reg,
+ BLK_P0_PTR_HIGH + i * LAYER_PER_PLANE_REGS * 4,
+ upper_32_bits(st->addr[i]));
+ if (i >= 2)
+ break;
+
+ malidp_write32(reg,
+ BLK_P0_STRIDE + i * LAYER_PER_PLANE_REGS * 4,
+ fb->pitches[i] & 0xFFFF);
+ }
+
+ malidp_write32(reg, LAYER_FMT, kfb->format_caps->hw_id);
+ malidp_write32(reg, BLK_IN_SIZE, HV_SIZE(st->hsize, st->vsize));
+
+ malidp_write32_mask(reg, BLK_CONTROL, ctrl_mask, ctrl);
+}
+
+static void d71_layer_dump(struct komeda_component *c, struct seq_file *sf)
+{
+ u32 v[15], i;
+ bool rich, rgb2rgb;
+ char *prefix;
+
+ get_values_from_reg(c->reg, LAYER_INFO, 1, &v[14]);
+ if (v[14] & 0x1) {
+ rich = true;
+ prefix = "LR_";
+ } else {
+ rich = false;
+ prefix = "LS_";
+ }
+
+ rgb2rgb = !!(v[14] & L_INFO_CM);
+
+ dump_block_header(sf, c->reg);
+
+ seq_printf(sf, "%sLAYER_INFO:\t\t0x%X\n", prefix, v[14]);
+
+ get_values_from_reg(c->reg, 0xD0, 1, v);
+ seq_printf(sf, "%sCONTROL:\t\t0x%X\n", prefix, v[0]);
+ if (rich) {
+ get_values_from_reg(c->reg, 0xD4, 1, v);
+ seq_printf(sf, "LR_RICH_CONTROL:\t0x%X\n", v[0]);
+ }
+ get_values_from_reg(c->reg, 0xD8, 4, v);
+ seq_printf(sf, "%sFORMAT:\t\t0x%X\n", prefix, v[0]);
+ seq_printf(sf, "%sIT_COEFFTAB:\t\t0x%X\n", prefix, v[1]);
+ seq_printf(sf, "%sIN_SIZE:\t\t0x%X\n", prefix, v[2]);
+ seq_printf(sf, "%sPALPHA:\t\t0x%X\n", prefix, v[3]);
+
+ get_values_from_reg(c->reg, 0x100, 3, v);
+ seq_printf(sf, "%sP0_PTR_LOW:\t\t0x%X\n", prefix, v[0]);
+ seq_printf(sf, "%sP0_PTR_HIGH:\t\t0x%X\n", prefix, v[1]);
+ seq_printf(sf, "%sP0_STRIDE:\t\t0x%X\n", prefix, v[2]);
+
+ get_values_from_reg(c->reg, 0x110, 2, v);
+ seq_printf(sf, "%sP1_PTR_LOW:\t\t0x%X\n", prefix, v[0]);
+ seq_printf(sf, "%sP1_PTR_HIGH:\t\t0x%X\n", prefix, v[1]);
+ if (rich) {
+ get_values_from_reg(c->reg, 0x118, 1, v);
+ seq_printf(sf, "LR_P1_STRIDE:\t\t0x%X\n", v[0]);
+
+ get_values_from_reg(c->reg, 0x120, 2, v);
+ seq_printf(sf, "LR_P2_PTR_LOW:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "LR_P2_PTR_HIGH:\t\t0x%X\n", v[1]);
+
+ get_values_from_reg(c->reg, 0x130, 12, v);
+ for (i = 0; i < 12; i++)
+ seq_printf(sf, "LR_YUV_RGB_COEFF%u:\t0x%X\n", i, v[i]);
+ }
+
+ if (rgb2rgb) {
+ get_values_from_reg(c->reg, LAYER_RGB_RGB_COEFF0, 12, v);
+ for (i = 0; i < 12; i++)
+ seq_printf(sf, "LS_RGB_RGB_COEFF%u:\t0x%X\n", i, v[i]);
+ }
+
+ get_values_from_reg(c->reg, 0x160, 3, v);
+ seq_printf(sf, "%sAD_CONTROL:\t\t0x%X\n", prefix, v[0]);
+ seq_printf(sf, "%sAD_H_CROP:\t\t0x%X\n", prefix, v[1]);
+ seq_printf(sf, "%sAD_V_CROP:\t\t0x%X\n", prefix, v[2]);
+}
+
+static struct komeda_component_funcs d71_layer_funcs = {
+ .update = d71_layer_update,
+ .disable = d71_layer_disable,
+ .dump_register = d71_layer_dump,
+};
+
+static int d71_layer_init(struct d71_dev *d71,
+ struct block_header *blk, u32 __iomem *reg)
+{
+ struct komeda_component *c;
+ struct komeda_layer *layer;
+ u32 pipe_id, layer_id, layer_info;
+
+ get_resources_id(blk->block_info, &pipe_id, &layer_id);
+ c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*layer),
+ layer_id,
+ BLOCK_INFO_INPUT_ID(blk->block_info),
+ &d71_layer_funcs, 0,
+ get_valid_inputs(blk),
+ 1, reg, "LPU%d_LAYER%d", pipe_id, layer_id);
+ if (IS_ERR(c)) {
+ DRM_ERROR("Failed to add layer component\n");
+ return PTR_ERR(c);
+ }
+
+ layer = to_layer(c);
+ layer_info = malidp_read32(reg, LAYER_INFO);
+
+ if (layer_info & L_INFO_RF)
+ layer->layer_type = KOMEDA_FMT_RICH_LAYER;
+ else
+ layer->layer_type = KOMEDA_FMT_SIMPLE_LAYER;
+
+ set_range(&layer->hsize_in, 4, d71->max_line_size);
+ set_range(&layer->vsize_in, 4, d71->max_vsize);
+
+ malidp_write32(reg, LAYER_PALPHA, D71_PALPHA_DEF_MAP);
+
+ layer->supported_rots = DRM_MODE_ROTATE_MASK | DRM_MODE_REFLECT_MASK;
+
+ return 0;
+}
+
+static int d71_wb_layer_init(struct d71_dev *d71,
+ struct block_header *blk, u32 __iomem *reg)
+{
+ DRM_DEBUG("Detect D71_Wb_Layer.\n");
+
+ return 0;
+}
+
+static void d71_component_disable(struct komeda_component *c)
+{
+ u32 __iomem *reg = c->reg;
+ u32 i;
+
+ malidp_write32(reg, BLK_CONTROL, 0);
+
+ for (i = 0; i < c->max_active_inputs; i++)
+ malidp_write32(reg, BLK_INPUT_ID0 + (i << 2), 0);
+}
+
+static void compiz_enable_input(u32 __iomem *id_reg,
+ u32 __iomem *cfg_reg,
+ u32 input_hw_id,
+ struct komeda_compiz_input_cfg *cin)
+{
+ u32 ctrl = CU_INPUT_CTRL_EN;
+ u8 blend = cin->pixel_blend_mode;
+
+ if (blend == DRM_MODE_BLEND_PIXEL_NONE)
+ ctrl |= CU_INPUT_CTRL_PAD;
+ else if (blend == DRM_MODE_BLEND_PREMULTI)
+ ctrl |= CU_INPUT_CTRL_PMUL;
+
+ ctrl |= CU_INPUT_CTRL_ALPHA(cin->layer_alpha);
+
+ malidp_write32(id_reg, BLK_INPUT_ID0, input_hw_id);
+
+ malidp_write32(cfg_reg, CU_INPUT0_SIZE,
+ HV_SIZE(cin->hsize, cin->vsize));
+ malidp_write32(cfg_reg, CU_INPUT0_OFFSET,
+ HV_OFFSET(cin->hoffset, cin->voffset));
+ malidp_write32(cfg_reg, CU_INPUT0_CONTROL, ctrl);
+}
+
+static void d71_compiz_update(struct komeda_component *c,
+ struct komeda_component_state *state)
+{
+ struct komeda_compiz_state *st = to_compiz_st(state);
+ u32 __iomem *reg = c->reg;
+ u32 __iomem *id_reg, *cfg_reg;
+ u32 index, input_hw_id;
+
+ for_each_changed_input(state, index) {
+ id_reg = reg + index;
+ cfg_reg = reg + index * CU_PER_INPUT_REGS;
+ input_hw_id = to_d71_input_id(&state->inputs[index]);
+ if (state->active_inputs & BIT(index)) {
+ compiz_enable_input(id_reg, cfg_reg,
+ input_hw_id, &st->cins[index]);
+ } else {
+ malidp_write32(id_reg, BLK_INPUT_ID0, 0);
+ malidp_write32(cfg_reg, CU_INPUT0_CONTROL, 0);
+ }
+ }
+
+ malidp_write32(reg, BLK_SIZE, HV_SIZE(st->hsize, st->vsize));
+}
+
+static void d71_compiz_dump(struct komeda_component *c, struct seq_file *sf)
+{
+ u32 v[8], i;
+
+ dump_block_header(sf, c->reg);
+
+ get_values_from_reg(c->reg, 0x80, 5, v);
+ for (i = 0; i < 5; i++)
+ seq_printf(sf, "CU_INPUT_ID%u:\t\t0x%X\n", i, v[i]);
+
+ get_values_from_reg(c->reg, 0xA0, 5, v);
+ seq_printf(sf, "CU_IRQ_RAW_STATUS:\t0x%X\n", v[0]);
+ seq_printf(sf, "CU_IRQ_CLEAR:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "CU_IRQ_MASK:\t\t0x%X\n", v[2]);
+ seq_printf(sf, "CU_IRQ_STATUS:\t\t0x%X\n", v[3]);
+ seq_printf(sf, "CU_STATUS:\t\t0x%X\n", v[4]);
+
+ get_values_from_reg(c->reg, 0xD0, 2, v);
+ seq_printf(sf, "CU_CONTROL:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "CU_SIZE:\t\t0x%X\n", v[1]);
+
+ get_values_from_reg(c->reg, 0xDC, 1, v);
+ seq_printf(sf, "CU_BG_COLOR:\t\t0x%X\n", v[0]);
+
+ for (i = 0, v[4] = 0xE0; i < 5; i++, v[4] += 0x10) {
+ get_values_from_reg(c->reg, v[4], 3, v);
+ seq_printf(sf, "CU_INPUT%u_SIZE:\t\t0x%X\n", i, v[0]);
+ seq_printf(sf, "CU_INPUT%u_OFFSET:\t0x%X\n", i, v[1]);
+ seq_printf(sf, "CU_INPUT%u_CONTROL:\t0x%X\n", i, v[2]);
+ }
+
+ get_values_from_reg(c->reg, 0x130, 2, v);
+ seq_printf(sf, "CU_USER_LOW:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "CU_USER_HIGH:\t\t0x%X\n", v[1]);
+}
+
+static struct komeda_component_funcs d71_compiz_funcs = {
+ .update = d71_compiz_update,
+ .disable = d71_component_disable,
+ .dump_register = d71_compiz_dump,
+};
+
+static int d71_compiz_init(struct d71_dev *d71,
+ struct block_header *blk, u32 __iomem *reg)
+{
+ struct komeda_component *c;
+ struct komeda_compiz *compiz;
+ u32 pipe_id, comp_id;
+
+ get_resources_id(blk->block_info, &pipe_id, &comp_id);
+
+ c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*compiz),
+ comp_id,
+ BLOCK_INFO_INPUT_ID(blk->block_info),
+ &d71_compiz_funcs,
+ CU_NUM_INPUT_IDS, get_valid_inputs(blk),
+ CU_NUM_OUTPUT_IDS, reg,
+ "CU%d", pipe_id);
+ if (IS_ERR(c))
+ return PTR_ERR(c);
+
+ compiz = to_compiz(c);
+
+ set_range(&compiz->hsize, D71_MIN_LINE_SIZE, d71->max_line_size);
+ set_range(&compiz->vsize, D71_MIN_VERTICAL_SIZE, d71->max_vsize);
+
+ return 0;
+}
+
+static void d71_improc_update(struct komeda_component *c,
+ struct komeda_component_state *state)
+{
+ struct komeda_improc_state *st = to_improc_st(state);
+ u32 __iomem *reg = c->reg;
+ u32 index, input_hw_id;
+
+ for_each_changed_input(state, index) {
+ input_hw_id = state->active_inputs & BIT(index) ?
+ to_d71_input_id(&state->inputs[index]) : 0;
+ malidp_write32(reg, BLK_INPUT_ID0 + index * 4, input_hw_id);
+ }
+
+ malidp_write32(reg, BLK_SIZE, HV_SIZE(st->hsize, st->vsize));
+}
+
+static void d71_improc_dump(struct komeda_component *c, struct seq_file *sf)
+{
+ u32 v[12], i;
+
+ dump_block_header(sf, c->reg);
+
+ get_values_from_reg(c->reg, 0x80, 2, v);
+ seq_printf(sf, "IPS_INPUT_ID0:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "IPS_INPUT_ID1:\t\t0x%X\n", v[1]);
+
+ get_values_from_reg(c->reg, 0xC0, 1, v);
+ seq_printf(sf, "IPS_INFO:\t\t0x%X\n", v[0]);
+
+ get_values_from_reg(c->reg, 0xD0, 3, v);
+ seq_printf(sf, "IPS_CONTROL:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "IPS_SIZE:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "IPS_DEPTH:\t\t0x%X\n", v[2]);
+
+ get_values_from_reg(c->reg, 0x130, 12, v);
+ for (i = 0; i < 12; i++)
+ seq_printf(sf, "IPS_RGB_RGB_COEFF%u:\t0x%X\n", i, v[i]);
+
+ get_values_from_reg(c->reg, 0x170, 12, v);
+ for (i = 0; i < 12; i++)
+ seq_printf(sf, "IPS_RGB_YUV_COEFF%u:\t0x%X\n", i, v[i]);
+}
+
+static struct komeda_component_funcs d71_improc_funcs = {
+ .update = d71_improc_update,
+ .disable = d71_component_disable,
+ .dump_register = d71_improc_dump,
+};
+
+static int d71_improc_init(struct d71_dev *d71,
+ struct block_header *blk, u32 __iomem *reg)
+{
+ struct komeda_component *c;
+ struct komeda_improc *improc;
+ u32 pipe_id, comp_id, value;
+
+ get_resources_id(blk->block_info, &pipe_id, &comp_id);
+
+ c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*improc),
+ comp_id,
+ BLOCK_INFO_INPUT_ID(blk->block_info),
+ &d71_improc_funcs, IPS_NUM_INPUT_IDS,
+ get_valid_inputs(blk),
+ IPS_NUM_OUTPUT_IDS, reg, "DOU%d_IPS", pipe_id);
+ if (IS_ERR(c)) {
+ DRM_ERROR("Failed to add improc component\n");
+ return PTR_ERR(c);
+ }
+
+ improc = to_improc(c);
+ improc->supported_color_depths = BIT(8) | BIT(10);
+ improc->supported_color_formats = DRM_COLOR_FORMAT_RGB444 |
+ DRM_COLOR_FORMAT_YCRCB444 |
+ DRM_COLOR_FORMAT_YCRCB422;
+ value = malidp_read32(reg, BLK_INFO);
+ if (value & IPS_INFO_CHD420)
+ improc->supported_color_formats |= DRM_COLOR_FORMAT_YCRCB420;
+
+ improc->supports_csc = true;
+ improc->supports_gamma = true;
+
+ return 0;
+}
+
+static void d71_timing_ctrlr_disable(struct komeda_component *c)
+{
+ malidp_write32_mask(c->reg, BLK_CONTROL, BS_CTRL_EN, 0);
+}
+
+static void d71_timing_ctrlr_update(struct komeda_component *c,
+ struct komeda_component_state *state)
+{
+ struct drm_crtc_state *crtc_st = state->crtc->state;
+ u32 __iomem *reg = c->reg;
+ struct videomode vm;
+ u32 value;
+
+ drm_display_mode_to_videomode(&crtc_st->adjusted_mode, &vm);
+
+ malidp_write32(reg, BS_ACTIVESIZE, HV_SIZE(vm.hactive, vm.vactive));
+ malidp_write32(reg, BS_HINTERVALS, BS_H_INTVALS(vm.hfront_porch,
+ vm.hback_porch));
+ malidp_write32(reg, BS_VINTERVALS, BS_V_INTVALS(vm.vfront_porch,
+ vm.vback_porch));
+
+ value = BS_SYNC_VSW(vm.vsync_len) | BS_SYNC_HSW(vm.hsync_len);
+ value |= vm.flags & DISPLAY_FLAGS_VSYNC_HIGH ? BS_SYNC_VSP : 0;
+ value |= vm.flags & DISPLAY_FLAGS_HSYNC_HIGH ? BS_SYNC_HSP : 0;
+ malidp_write32(reg, BS_SYNC, value);
+
+ malidp_write32(reg, BS_PROG_LINE, D71_DEFAULT_PREPRETCH_LINE - 1);
+ malidp_write32(reg, BS_PREFETCH_LINE, D71_DEFAULT_PREPRETCH_LINE);
+
+ /* configure bs control register */
+ value = BS_CTRL_EN | BS_CTRL_VM;
+
+ malidp_write32(reg, BLK_CONTROL, value);
+}
+
+static void d71_timing_ctrlr_dump(struct komeda_component *c,
+ struct seq_file *sf)
+{
+ u32 v[8], i;
+
+ dump_block_header(sf, c->reg);
+
+ get_values_from_reg(c->reg, 0xC0, 1, v);
+ seq_printf(sf, "BS_INFO:\t\t0x%X\n", v[0]);
+
+ get_values_from_reg(c->reg, 0xD0, 8, v);
+ seq_printf(sf, "BS_CONTROL:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "BS_PROG_LINE:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "BS_PREFETCH_LINE:\t0x%X\n", v[2]);
+ seq_printf(sf, "BS_BG_COLOR:\t\t0x%X\n", v[3]);
+ seq_printf(sf, "BS_ACTIVESIZE:\t\t0x%X\n", v[4]);
+ seq_printf(sf, "BS_HINTERVALS:\t\t0x%X\n", v[5]);
+ seq_printf(sf, "BS_VINTERVALS:\t\t0x%X\n", v[6]);
+ seq_printf(sf, "BS_SYNC:\t\t0x%X\n", v[7]);
+
+ get_values_from_reg(c->reg, 0x100, 3, v);
+ seq_printf(sf, "BS_DRIFT_TO:\t\t0x%X\n", v[0]);
+ seq_printf(sf, "BS_FRAME_TO:\t\t0x%X\n", v[1]);
+ seq_printf(sf, "BS_TE_TO:\t\t0x%X\n", v[2]);
+
+ get_values_from_reg(c->reg, 0x110, 3, v);
+ for (i = 0; i < 3; i++)
+ seq_printf(sf, "BS_T%u_INTERVAL:\t\t0x%X\n", i, v[i]);
+
+ get_values_from_reg(c->reg, 0x120, 5, v);
+ for (i = 0; i < 2; i++) {
+ seq_printf(sf, "BS_CRC%u_LOW:\t\t0x%X\n", i, v[i << 1]);
+ seq_printf(sf, "BS_CRC%u_HIGH:\t\t0x%X\n", i, v[(i << 1) + 1]);
+ }
+ seq_printf(sf, "BS_USER:\t\t0x%X\n", v[4]);
+}
+
+static struct komeda_component_funcs d71_timing_ctrlr_funcs = {
+ .update = d71_timing_ctrlr_update,
+ .disable = d71_timing_ctrlr_disable,
+ .dump_register = d71_timing_ctrlr_dump,
+};
+
+static int d71_timing_ctrlr_init(struct d71_dev *d71,
+ struct block_header *blk, u32 __iomem *reg)
+{
+ struct komeda_component *c;
+ struct komeda_timing_ctrlr *ctrlr;
+ u32 pipe_id, comp_id;
+
+ get_resources_id(blk->block_info, &pipe_id, &comp_id);
+
+ c = komeda_component_add(&d71->pipes[pipe_id]->base, sizeof(*ctrlr),
+ KOMEDA_COMPONENT_TIMING_CTRLR,
+ BLOCK_INFO_INPUT_ID(blk->block_info),
+ &d71_timing_ctrlr_funcs,
+ 1, BIT(KOMEDA_COMPONENT_IPS0 + pipe_id),
+ BS_NUM_OUTPUT_IDS, reg, "DOU%d_BS", pipe_id);
+ if (IS_ERR(c)) {
+ DRM_ERROR("Failed to add display_ctrl component\n");
+ return PTR_ERR(c);
+ }
+
+ ctrlr = to_ctrlr(c);
+
+ ctrlr->supports_dual_link = true;
+
+ return 0;
+}
+
+int d71_probe_block(struct d71_dev *d71,
+ struct block_header *blk, u32 __iomem *reg)
+{
+ struct d71_pipeline *pipe;
+ int blk_id = BLOCK_INFO_BLK_ID(blk->block_info);
+
+ int err = 0;
+
+ switch (BLOCK_INFO_BLK_TYPE(blk->block_info)) {
+ case D71_BLK_TYPE_GCU:
+ break;
+
+ case D71_BLK_TYPE_LPU:
+ pipe = d71->pipes[blk_id];
+ pipe->lpu_addr = reg;
+ break;
+
+ case D71_BLK_TYPE_LPU_LAYER:
+ err = d71_layer_init(d71, blk, reg);
+ break;
+
+ case D71_BLK_TYPE_LPU_WB_LAYER:
+ err = d71_wb_layer_init(d71, blk, reg);
+ break;
+
+ case D71_BLK_TYPE_CU:
+ pipe = d71->pipes[blk_id];
+ pipe->cu_addr = reg;
+ err = d71_compiz_init(d71, blk, reg);
+ break;
+
+ case D71_BLK_TYPE_CU_SPLITTER:
+ case D71_BLK_TYPE_CU_SCALER:
+ case D71_BLK_TYPE_CU_MERGER:
+ break;
+
+ case D71_BLK_TYPE_DOU:
+ pipe = d71->pipes[blk_id];
+ pipe->dou_addr = reg;
+ break;
+
+ case D71_BLK_TYPE_DOU_IPS:
+ err = d71_improc_init(d71, blk, reg);
+ break;
+
+ case D71_BLK_TYPE_DOU_FT_COEFF:
+ pipe = d71->pipes[blk_id];
+ pipe->dou_ft_coeff_addr = reg;
+ break;
+
+ case D71_BLK_TYPE_DOU_BS:
+ err = d71_timing_ctrlr_init(d71, blk, reg);
+ break;
+
+ case D71_BLK_TYPE_GLB_LT_COEFF:
+ break;
+
+ case D71_BLK_TYPE_GLB_SCL_COEFF:
+ d71->glb_scl_coeff_addr[blk_id] = reg;
+ break;
+
+ default:
+ DRM_ERROR("Unknown block (block_info: 0x%x) is found\n",
+ blk->block_info);
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
index edbf9daa1545..34506ef7ad40 100644
--- a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c
@@ -4,13 +4,425 @@
* Author: James.Qian.Wang <james.qian.wang@arm.com>
*
*/
+
+#include <drm/drm_print.h>
+#include "d71_dev.h"
#include "malidp_io.h"
-#include "komeda_dev.h"
+
+static u64 get_lpu_event(struct d71_pipeline *d71_pipeline)
+{
+ u32 __iomem *reg = d71_pipeline->lpu_addr;
+ u32 status, raw_status;
+ u64 evts = 0ULL;
+
+ raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS);
+ if (raw_status & LPU_IRQ_IBSY)
+ evts |= KOMEDA_EVENT_IBSY;
+ if (raw_status & LPU_IRQ_EOW)
+ evts |= KOMEDA_EVENT_EOW;
+
+ if (raw_status & (LPU_IRQ_ERR | LPU_IRQ_IBSY)) {
+ u32 restore = 0, tbu_status;
+ /* Check error of LPU status */
+ status = malidp_read32(reg, BLK_STATUS);
+ if (status & LPU_STATUS_AXIE) {
+ restore |= LPU_STATUS_AXIE;
+ evts |= KOMEDA_ERR_AXIE;
+ }
+ if (status & LPU_STATUS_ACE0) {
+ restore |= LPU_STATUS_ACE0;
+ evts |= KOMEDA_ERR_ACE0;
+ }
+ if (status & LPU_STATUS_ACE1) {
+ restore |= LPU_STATUS_ACE1;
+ evts |= KOMEDA_ERR_ACE1;
+ }
+ if (status & LPU_STATUS_ACE2) {
+ restore |= LPU_STATUS_ACE2;
+ evts |= KOMEDA_ERR_ACE2;
+ }
+ if (status & LPU_STATUS_ACE3) {
+ restore |= LPU_STATUS_ACE3;
+ evts |= KOMEDA_ERR_ACE3;
+ }
+ if (restore != 0)
+ malidp_write32_mask(reg, BLK_STATUS, restore, 0);
+
+ restore = 0;
+ /* Check errors of TBU status */
+ tbu_status = malidp_read32(reg, LPU_TBU_STATUS);
+ if (tbu_status & LPU_TBU_STATUS_TCF) {
+ restore |= LPU_TBU_STATUS_TCF;
+ evts |= KOMEDA_ERR_TCF;
+ }
+ if (tbu_status & LPU_TBU_STATUS_TTNG) {
+ restore |= LPU_TBU_STATUS_TTNG;
+ evts |= KOMEDA_ERR_TTNG;
+ }
+ if (tbu_status & LPU_TBU_STATUS_TITR) {
+ restore |= LPU_TBU_STATUS_TITR;
+ evts |= KOMEDA_ERR_TITR;
+ }
+ if (tbu_status & LPU_TBU_STATUS_TEMR) {
+ restore |= LPU_TBU_STATUS_TEMR;
+ evts |= KOMEDA_ERR_TEMR;
+ }
+ if (tbu_status & LPU_TBU_STATUS_TTF) {
+ restore |= LPU_TBU_STATUS_TTF;
+ evts |= KOMEDA_ERR_TTF;
+ }
+ if (restore != 0)
+ malidp_write32_mask(reg, LPU_TBU_STATUS, restore, 0);
+ }
+
+ malidp_write32(reg, BLK_IRQ_CLEAR, raw_status);
+ return evts;
+}
+
+static u64 get_cu_event(struct d71_pipeline *d71_pipeline)
+{
+ u32 __iomem *reg = d71_pipeline->cu_addr;
+ u32 status, raw_status;
+ u64 evts = 0ULL;
+
+ raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS);
+ if (raw_status & CU_IRQ_OVR)
+ evts |= KOMEDA_EVENT_OVR;
+
+ if (raw_status & (CU_IRQ_ERR | CU_IRQ_OVR)) {
+ status = malidp_read32(reg, BLK_STATUS) & 0x7FFFFFFF;
+ if (status & CU_STATUS_CPE)
+ evts |= KOMEDA_ERR_CPE;
+ if (status & CU_STATUS_ZME)
+ evts |= KOMEDA_ERR_ZME;
+ if (status & CU_STATUS_CFGE)
+ evts |= KOMEDA_ERR_CFGE;
+ if (status)
+ malidp_write32_mask(reg, BLK_STATUS, status, 0);
+ }
+
+ malidp_write32(reg, BLK_IRQ_CLEAR, raw_status);
+
+ return evts;
+}
+
+static u64 get_dou_event(struct d71_pipeline *d71_pipeline)
+{
+ u32 __iomem *reg = d71_pipeline->dou_addr;
+ u32 status, raw_status;
+ u64 evts = 0ULL;
+
+ raw_status = malidp_read32(reg, BLK_IRQ_RAW_STATUS);
+ if (raw_status & DOU_IRQ_PL0)
+ evts |= KOMEDA_EVENT_VSYNC;
+ if (raw_status & DOU_IRQ_UND)
+ evts |= KOMEDA_EVENT_URUN;
+
+ if (raw_status & (DOU_IRQ_ERR | DOU_IRQ_UND)) {
+ u32 restore = 0;
+
+ status = malidp_read32(reg, BLK_STATUS);
+ if (status & DOU_STATUS_DRIFTTO) {
+ restore |= DOU_STATUS_DRIFTTO;
+ evts |= KOMEDA_ERR_DRIFTTO;
+ }
+ if (status & DOU_STATUS_FRAMETO) {
+ restore |= DOU_STATUS_FRAMETO;
+ evts |= KOMEDA_ERR_FRAMETO;
+ }
+ if (status & DOU_STATUS_TETO) {
+ restore |= DOU_STATUS_TETO;
+ evts |= KOMEDA_ERR_TETO;
+ }
+ if (status & DOU_STATUS_CSCE) {
+ restore |= DOU_STATUS_CSCE;
+ evts |= KOMEDA_ERR_CSCE;
+ }
+
+ if (restore != 0)
+ malidp_write32_mask(reg, BLK_STATUS, restore, 0);
+ }
+
+ malidp_write32(reg, BLK_IRQ_CLEAR, raw_status);
+ return evts;
+}
+
+static u64 get_pipeline_event(struct d71_pipeline *d71_pipeline, u32 gcu_status)
+{
+ u32 evts = 0ULL;
+
+ if (gcu_status & (GLB_IRQ_STATUS_LPU0 | GLB_IRQ_STATUS_LPU1))
+ evts |= get_lpu_event(d71_pipeline);
+
+ if (gcu_status & (GLB_IRQ_STATUS_CU0 | GLB_IRQ_STATUS_CU1))
+ evts |= get_cu_event(d71_pipeline);
+
+ if (gcu_status & (GLB_IRQ_STATUS_DOU0 | GLB_IRQ_STATUS_DOU1))
+ evts |= get_dou_event(d71_pipeline);
+
+ return evts;
+}
+
+static irqreturn_t
+d71_irq_handler(struct komeda_dev *mdev, struct komeda_events *evts)
+{
+ struct d71_dev *d71 = mdev->chip_data;
+ u32 status, gcu_status, raw_status;
+
+ gcu_status = malidp_read32(d71->gcu_addr, GLB_IRQ_STATUS);
+
+ if (gcu_status & GLB_IRQ_STATUS_GCU) {
+ raw_status = malidp_read32(d71->gcu_addr, BLK_IRQ_RAW_STATUS);
+ if (raw_status & GCU_IRQ_CVAL0)
+ evts->pipes[0] |= KOMEDA_EVENT_FLIP;
+ if (raw_status & GCU_IRQ_CVAL1)
+ evts->pipes[1] |= KOMEDA_EVENT_FLIP;
+ if (raw_status & GCU_IRQ_ERR) {
+ status = malidp_read32(d71->gcu_addr, BLK_STATUS);
+ if (status & GCU_STATUS_MERR) {
+ evts->global |= KOMEDA_ERR_MERR;
+ malidp_write32_mask(d71->gcu_addr, BLK_STATUS,
+ GCU_STATUS_MERR, 0);
+ }
+ }
+
+ malidp_write32(d71->gcu_addr, BLK_IRQ_CLEAR, raw_status);
+ }
+
+ if (gcu_status & GLB_IRQ_STATUS_PIPE0)
+ evts->pipes[0] |= get_pipeline_event(d71->pipes[0], gcu_status);
+
+ if (gcu_status & GLB_IRQ_STATUS_PIPE1)
+ evts->pipes[1] |= get_pipeline_event(d71->pipes[1], gcu_status);
+
+ return gcu_status ? IRQ_HANDLED : IRQ_NONE;
+}
+
+#define ENABLED_GCU_IRQS (GCU_IRQ_CVAL0 | GCU_IRQ_CVAL1 | \
+ GCU_IRQ_MODE | GCU_IRQ_ERR)
+#define ENABLED_LPU_IRQS (LPU_IRQ_IBSY | LPU_IRQ_ERR | LPU_IRQ_EOW)
+#define ENABLED_CU_IRQS (CU_IRQ_OVR | CU_IRQ_ERR)
+#define ENABLED_DOU_IRQS (DOU_IRQ_UND | DOU_IRQ_ERR)
+
+static int d71_enable_irq(struct komeda_dev *mdev)
+{
+ struct d71_dev *d71 = mdev->chip_data;
+ struct d71_pipeline *pipe;
+ u32 i;
+
+ malidp_write32_mask(d71->gcu_addr, BLK_IRQ_MASK,
+ ENABLED_GCU_IRQS, ENABLED_GCU_IRQS);
+ for (i = 0; i < d71->num_pipelines; i++) {
+ pipe = d71->pipes[i];
+ malidp_write32_mask(pipe->cu_addr, BLK_IRQ_MASK,
+ ENABLED_CU_IRQS, ENABLED_CU_IRQS);
+ malidp_write32_mask(pipe->lpu_addr, BLK_IRQ_MASK,
+ ENABLED_LPU_IRQS, ENABLED_LPU_IRQS);
+ malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK,
+ ENABLED_DOU_IRQS, ENABLED_DOU_IRQS);
+ }
+ return 0;
+}
+
+static int d71_disable_irq(struct komeda_dev *mdev)
+{
+ struct d71_dev *d71 = mdev->chip_data;
+ struct d71_pipeline *pipe;
+ u32 i;
+
+ malidp_write32_mask(d71->gcu_addr, BLK_IRQ_MASK, ENABLED_GCU_IRQS, 0);
+ for (i = 0; i < d71->num_pipelines; i++) {
+ pipe = d71->pipes[i];
+ malidp_write32_mask(pipe->cu_addr, BLK_IRQ_MASK,
+ ENABLED_CU_IRQS, 0);
+ malidp_write32_mask(pipe->lpu_addr, BLK_IRQ_MASK,
+ ENABLED_LPU_IRQS, 0);
+ malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK,
+ ENABLED_DOU_IRQS, 0);
+ }
+ return 0;
+}
+
+static void d71_on_off_vblank(struct komeda_dev *mdev, int master_pipe, bool on)
+{
+ struct d71_dev *d71 = mdev->chip_data;
+ struct d71_pipeline *pipe = d71->pipes[master_pipe];
+
+ malidp_write32_mask(pipe->dou_addr, BLK_IRQ_MASK,
+ DOU_IRQ_PL0, on ? DOU_IRQ_PL0 : 0);
+}
+
+static int to_d71_opmode(int core_mode)
+{
+ switch (core_mode) {
+ case KOMEDA_MODE_DISP0:
+ return DO0_ACTIVE_MODE;
+ case KOMEDA_MODE_DISP1:
+ return DO1_ACTIVE_MODE;
+ case KOMEDA_MODE_DUAL_DISP:
+ return DO01_ACTIVE_MODE;
+ case KOMEDA_MODE_INACTIVE:
+ return INACTIVE_MODE;
+ default:
+ WARN(1, "Unknown operation mode");
+ return INACTIVE_MODE;
+ }
+}
+
+static int d71_change_opmode(struct komeda_dev *mdev, int new_mode)
+{
+ struct d71_dev *d71 = mdev->chip_data;
+ u32 opmode = to_d71_opmode(new_mode);
+ int ret;
+
+ malidp_write32_mask(d71->gcu_addr, BLK_CONTROL, 0x7, opmode);
+
+ ret = dp_wait_cond(((malidp_read32(d71->gcu_addr, BLK_CONTROL) & 0x7) == opmode),
+ 100, 1000, 10000);
+
+ return ret > 0 ? 0 : -ETIMEDOUT;
+}
+
+static void d71_flush(struct komeda_dev *mdev,
+ int master_pipe, u32 active_pipes)
+{
+ struct d71_dev *d71 = mdev->chip_data;
+ u32 reg_offset = (master_pipe == 0) ?
+ GCU_CONFIG_VALID0 : GCU_CONFIG_VALID1;
+
+ malidp_write32(d71->gcu_addr, reg_offset, GCU_CONFIG_CVAL);
+}
+
+static int d71_reset(struct d71_dev *d71)
+{
+ u32 __iomem *gcu = d71->gcu_addr;
+ int ret;
+
+ malidp_write32_mask(gcu, BLK_CONTROL,
+ GCU_CONTROL_SRST, GCU_CONTROL_SRST);
+
+ ret = dp_wait_cond(!(malidp_read32(gcu, BLK_CONTROL) & GCU_CONTROL_SRST),
+ 100, 1000, 10000);
+
+ return ret > 0 ? 0 : -ETIMEDOUT;
+}
+
+void d71_read_block_header(u32 __iomem *reg, struct block_header *blk)
+{
+ int i;
+
+ blk->block_info = malidp_read32(reg, BLK_BLOCK_INFO);
+ if (BLOCK_INFO_BLK_TYPE(blk->block_info) == D71_BLK_TYPE_RESERVED)
+ return;
+
+ blk->pipeline_info = malidp_read32(reg, BLK_PIPELINE_INFO);
+
+ /* get valid input and output ids */
+ for (i = 0; i < PIPELINE_INFO_N_VALID_INPUTS(blk->pipeline_info); i++)
+ blk->input_ids[i] = malidp_read32(reg + i, BLK_VALID_INPUT_ID0);
+ for (i = 0; i < PIPELINE_INFO_N_OUTPUTS(blk->pipeline_info); i++)
+ blk->output_ids[i] = malidp_read32(reg + i, BLK_OUTPUT_ID0);
+}
+
+static void d71_cleanup(struct komeda_dev *mdev)
+{
+ struct d71_dev *d71 = mdev->chip_data;
+
+ if (!d71)
+ return;
+
+ devm_kfree(mdev->dev, d71);
+ mdev->chip_data = NULL;
+}
static int d71_enum_resources(struct komeda_dev *mdev)
{
- /* TODO add enum resources */
- return -1;
+ struct d71_dev *d71;
+ struct komeda_pipeline *pipe;
+ struct block_header blk;
+ u32 __iomem *blk_base;
+ u32 i, value, offset;
+ int err;
+
+ d71 = devm_kzalloc(mdev->dev, sizeof(*d71), GFP_KERNEL);
+ if (!d71)
+ return -ENOMEM;
+
+ mdev->chip_data = d71;
+ d71->mdev = mdev;
+ d71->gcu_addr = mdev->reg_base;
+ d71->periph_addr = mdev->reg_base + (D71_BLOCK_OFFSET_PERIPH >> 2);
+
+ err = d71_reset(d71);
+ if (err) {
+ DRM_ERROR("Fail to reset d71 device.\n");
+ goto err_cleanup;
+ }
+
+ /* probe GCU */
+ value = malidp_read32(d71->gcu_addr, GLB_CORE_INFO);
+ d71->num_blocks = value & 0xFF;
+ d71->num_pipelines = (value >> 8) & 0x7;
+
+ if (d71->num_pipelines > D71_MAX_PIPELINE) {
+ DRM_ERROR("d71 supports %d pipelines, but got: %d.\n",
+ D71_MAX_PIPELINE, d71->num_pipelines);
+ err = -EINVAL;
+ goto err_cleanup;
+ }
+
+ /* probe PERIPH */
+ value = malidp_read32(d71->periph_addr, BLK_BLOCK_INFO);
+ if (BLOCK_INFO_BLK_TYPE(value) != D71_BLK_TYPE_PERIPH) {
+ DRM_ERROR("access blk periph but got blk: %d.\n",
+ BLOCK_INFO_BLK_TYPE(value));
+ err = -EINVAL;
+ goto err_cleanup;
+ }
+
+ value = malidp_read32(d71->periph_addr, PERIPH_CONFIGURATION_ID);
+
+ d71->max_line_size = value & PERIPH_MAX_LINE_SIZE ? 4096 : 2048;
+ d71->max_vsize = 4096;
+ d71->num_rich_layers = value & PERIPH_NUM_RICH_LAYERS ? 2 : 1;
+ d71->supports_dual_link = value & PERIPH_SPLIT_EN ? true : false;
+ d71->integrates_tbu = value & PERIPH_TBU_EN ? true : false;
+
+ for (i = 0; i < d71->num_pipelines; i++) {
+ pipe = komeda_pipeline_add(mdev, sizeof(struct d71_pipeline),
+ NULL);
+ if (IS_ERR(pipe)) {
+ err = PTR_ERR(pipe);
+ goto err_cleanup;
+ }
+ d71->pipes[i] = to_d71_pipeline(pipe);
+ }
+
+ /* loop the register blks and probe */
+ i = 2; /* exclude GCU and PERIPH */
+ offset = D71_BLOCK_SIZE; /* skip GCU */
+ while (i < d71->num_blocks) {
+ blk_base = mdev->reg_base + (offset >> 2);
+
+ d71_read_block_header(blk_base, &blk);
+ if (BLOCK_INFO_BLK_TYPE(blk.block_info) != D71_BLK_TYPE_RESERVED) {
+ err = d71_probe_block(d71, &blk, blk_base);
+ if (err)
+ goto err_cleanup;
+ i++;
+ }
+
+ offset += D71_BLOCK_SIZE;
+ }
+
+ DRM_DEBUG("total %d (out of %d) blocks are found.\n",
+ i, d71->num_blocks);
+
+ return 0;
+
+err_cleanup:
+ d71_cleanup(mdev);
+ return err;
}
#define __HW_ID(__group, __format) \
@@ -93,19 +505,22 @@ static void d71_init_fmt_tbl(struct komeda_dev *mdev)
static struct komeda_dev_funcs d71_chip_funcs = {
.init_format_table = d71_init_fmt_tbl,
.enum_resources = d71_enum_resources,
- .cleanup = NULL,
+ .cleanup = d71_cleanup,
+ .irq_handler = d71_irq_handler,
+ .enable_irq = d71_enable_irq,
+ .disable_irq = d71_disable_irq,
+ .on_off_vblank = d71_on_off_vblank,
+ .change_opmode = d71_change_opmode,
+ .flush = d71_flush,
};
-#define GLB_ARCH_ID 0x000
-#define GLB_CORE_ID 0x004
-#define GLB_CORE_INFO 0x008
-
struct komeda_dev_funcs *
d71_identify(u32 __iomem *reg_base, struct komeda_chip_info *chip)
{
chip->arch_id = malidp_read32(reg_base, GLB_ARCH_ID);
chip->core_id = malidp_read32(reg_base, GLB_CORE_ID);
chip->core_info = malidp_read32(reg_base, GLB_CORE_INFO);
+ chip->bus_width = D71_BUS_WIDTH_16_BYTES;
return &d71_chip_funcs;
}
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h
new file mode 100644
index 000000000000..7465c57d9774
--- /dev/null
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+#ifndef _D71_DEV_H_
+#define _D71_DEV_H_
+
+#include "komeda_dev.h"
+#include "komeda_pipeline.h"
+#include "d71_regs.h"
+
+struct d71_pipeline {
+ struct komeda_pipeline base;
+
+ /* d71 private pipeline blocks */
+ u32 __iomem *lpu_addr;
+ u32 __iomem *cu_addr;
+ u32 __iomem *dou_addr;
+ u32 __iomem *dou_ft_coeff_addr; /* forward transform coeffs table */
+};
+
+struct d71_dev {
+ struct komeda_dev *mdev;
+
+ int num_blocks;
+ int num_pipelines;
+ int num_rich_layers;
+ u32 max_line_size;
+ u32 max_vsize;
+ u32 supports_dual_link : 1;
+ u32 integrates_tbu : 1;
+
+ /* global register blocks */
+ u32 __iomem *gcu_addr;
+ /* scaling coeffs table */
+ u32 __iomem *glb_scl_coeff_addr[D71_MAX_GLB_SCL_COEFF];
+ u32 __iomem *periph_addr;
+
+ struct d71_pipeline *pipes[D71_MAX_PIPELINE];
+};
+
+#define to_d71_pipeline(x) container_of(x, struct d71_pipeline, base)
+
+int d71_probe_block(struct d71_dev *d71,
+ struct block_header *blk, u32 __iomem *reg);
+void d71_read_block_header(u32 __iomem *reg, struct block_header *blk);
+
+#endif /* !_D71_DEV_H_ */
diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h b/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h
new file mode 100644
index 000000000000..2d5e6d00b42c
--- /dev/null
+++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h
@@ -0,0 +1,530 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+#ifndef _D71_REG_H_
+#define _D71_REG_H_
+
+/* Common block registers offset */
+#define BLK_BLOCK_INFO 0x000
+#define BLK_PIPELINE_INFO 0x004
+#define BLK_VALID_INPUT_ID0 0x020
+#define BLK_OUTPUT_ID0 0x060
+#define BLK_INPUT_ID0 0x080
+#define BLK_IRQ_RAW_STATUS 0x0A0
+#define BLK_IRQ_CLEAR 0x0A4
+#define BLK_IRQ_MASK 0x0A8
+#define BLK_IRQ_STATUS 0x0AC
+#define BLK_STATUS 0x0B0
+#define BLK_INFO 0x0C0
+#define BLK_CONTROL 0x0D0
+#define BLK_SIZE 0x0D4
+#define BLK_IN_SIZE 0x0E0
+
+#define BLK_P0_PTR_LOW 0x100
+#define BLK_P0_PTR_HIGH 0x104
+#define BLK_P0_STRIDE 0x108
+#define BLK_P1_PTR_LOW 0x110
+#define BLK_P1_PTR_HIGH 0x114
+#define BLK_P1_STRIDE 0x118
+#define BLK_P2_PTR_LOW 0x120
+#define BLK_P2_PTR_HIGH 0x124
+
+#define BLOCK_INFO_N_SUBBLKS(x) ((x) & 0x000F)
+#define BLOCK_INFO_BLK_ID(x) (((x) & 0x00F0) >> 4)
+#define BLOCK_INFO_BLK_TYPE(x) (((x) & 0xFF00) >> 8)
+#define BLOCK_INFO_INPUT_ID(x) ((x) & 0xFFF0)
+#define BLOCK_INFO_TYPE_ID(x) (((x) & 0x0FF0) >> 4)
+
+#define PIPELINE_INFO_N_OUTPUTS(x) ((x) & 0x000F)
+#define PIPELINE_INFO_N_VALID_INPUTS(x) (((x) & 0x0F00) >> 8)
+
+/* Common block control register bits */
+#define BLK_CTRL_EN BIT(0)
+/* Common size macro */
+#define HV_SIZE(h, v) (((h) & 0x1FFF) + (((v) & 0x1FFF) << 16))
+#define HV_OFFSET(h, v) (((h) & 0xFFF) + (((v) & 0xFFF) << 16))
+#define HV_CROP(h, v) (((h) & 0xFFF) + (((v) & 0xFFF) << 16))
+
+/* AD_CONTROL register */
+#define AD_CONTROL 0x160
+
+/* AD_CONTROL register bits */
+#define AD_AEN BIT(0)
+#define AD_YT BIT(1)
+#define AD_BS BIT(2)
+#define AD_WB BIT(3)
+#define AD_TH BIT(4)
+
+/* Global Control Unit */
+#define GLB_ARCH_ID 0x000
+#define GLB_CORE_ID 0x004
+#define GLB_CORE_INFO 0x008
+#define GLB_IRQ_STATUS 0x010
+
+#define GCU_CONFIG_VALID0 0x0D4
+#define GCU_CONFIG_VALID1 0x0D8
+
+/* GCU_CONTROL_BITS */
+#define GCU_CONTROL_MODE(x) ((x) & 0x7)
+#define GCU_CONTROL_SRST BIT(16)
+
+/* GCU opmode */
+#define INACTIVE_MODE 0
+#define TBU_CONNECT_MODE 1
+#define TBU_DISCONNECT_MODE 2
+#define DO0_ACTIVE_MODE 3
+#define DO1_ACTIVE_MODE 4
+#define DO01_ACTIVE_MODE 5
+
+/* GLB_IRQ_STATUS bits */
+#define GLB_IRQ_STATUS_GCU BIT(0)
+#define GLB_IRQ_STATUS_LPU0 BIT(8)
+#define GLB_IRQ_STATUS_LPU1 BIT(9)
+#define GLB_IRQ_STATUS_ATU0 BIT(10)
+#define GLB_IRQ_STATUS_ATU1 BIT(11)
+#define GLB_IRQ_STATUS_ATU2 BIT(12)
+#define GLB_IRQ_STATUS_ATU3 BIT(13)
+#define GLB_IRQ_STATUS_CU0 BIT(16)
+#define GLB_IRQ_STATUS_CU1 BIT(17)
+#define GLB_IRQ_STATUS_DOU0 BIT(24)
+#define GLB_IRQ_STATUS_DOU1 BIT(25)
+
+#define GLB_IRQ_STATUS_PIPE0 (GLB_IRQ_STATUS_LPU0 |\
+ GLB_IRQ_STATUS_ATU0 |\
+ GLB_IRQ_STATUS_ATU1 |\
+ GLB_IRQ_STATUS_CU0 |\
+ GLB_IRQ_STATUS_DOU0)
+
+#define GLB_IRQ_STATUS_PIPE1 (GLB_IRQ_STATUS_LPU1 |\
+ GLB_IRQ_STATUS_ATU2 |\
+ GLB_IRQ_STATUS_ATU3 |\
+ GLB_IRQ_STATUS_CU1 |\
+ GLB_IRQ_STATUS_DOU1)
+
+#define GLB_IRQ_STATUS_ATU (GLB_IRQ_STATUS_ATU0 |\
+ GLB_IRQ_STATUS_ATU1 |\
+ GLB_IRQ_STATUS_ATU2 |\
+ GLB_IRQ_STATUS_ATU3)
+
+/* GCU_IRQ_BITS */
+#define GCU_IRQ_CVAL0 BIT(0)
+#define GCU_IRQ_CVAL1 BIT(1)
+#define GCU_IRQ_MODE BIT(4)
+#define GCU_IRQ_ERR BIT(11)
+
+/* GCU_STATUS_BITS */
+#define GCU_STATUS_MODE(x) ((x) & 0x7)
+#define GCU_STATUS_MERR BIT(4)
+#define GCU_STATUS_TCS0 BIT(8)
+#define GCU_STATUS_TCS1 BIT(9)
+#define GCU_STATUS_ACTIVE BIT(31)
+
+/* GCU_CONFIG_VALIDx BITS */
+#define GCU_CONFIG_CVAL BIT(0)
+
+/* PERIPHERAL registers */
+#define PERIPH_MAX_LINE_SIZE BIT(0)
+#define PERIPH_NUM_RICH_LAYERS BIT(4)
+#define PERIPH_SPLIT_EN BIT(8)
+#define PERIPH_TBU_EN BIT(12)
+#define PERIPH_AFBC_DMA_EN BIT(16)
+#define PERIPH_CONFIGURATION_ID 0x1D4
+
+/* LPU register */
+#define LPU_TBU_STATUS 0x0B4
+#define LPU_RAXI_CONTROL 0x0D0
+#define LPU_WAXI_CONTROL 0x0D4
+#define LPU_TBU_CONTROL 0x0D8
+
+/* LPU_xAXI_CONTROL_BITS */
+#define TO_RAXI_AOUTSTDCAPB(x) (x)
+#define TO_RAXI_BOUTSTDCAPB(x) ((x) << 8)
+#define TO_RAXI_BEN(x) ((x) << 15)
+#define TO_xAXI_BURSTLEN(x) ((x) << 16)
+#define TO_xAXI_AxQOS(x) ((x) << 24)
+#define TO_xAXI_ORD(x) ((x) << 31)
+#define TO_WAXI_OUTSTDCAPB(x) (x)
+
+#define RAXI_AOUTSTDCAPB_MASK 0x7F
+#define RAXI_BOUTSTDCAPB_MASK 0x7F00
+#define RAXI_BEN_MASK BIT(15)
+#define xAXI_BURSTLEN_MASK 0x3F0000
+#define xAXI_AxQOS_MASK 0xF000000
+#define xAXI_ORD_MASK BIT(31)
+#define WAXI_OUTSTDCAPB_MASK 0x3F
+
+/* LPU_TBU_CONTROL BITS */
+#define TO_TBU_DOUTSTDCAPB(x) (x)
+#define TBU_DOUTSTDCAPB_MASK 0x3F
+
+/* LPU_IRQ_BITS */
+#define LPU_IRQ_IBSY BIT(10)
+#define LPU_IRQ_ERR BIT(11)
+#define LPU_IRQ_EOW BIT(12)
+#define LPU_IRQ_PL0 BIT(13)
+
+/* LPU_STATUS_BITS */
+#define LPU_STATUS_AXIED(x) ((x) & 0xF)
+#define LPU_STATUS_AXIE BIT(4)
+#define LPU_STATUS_AXIRP BIT(5)
+#define LPU_STATUS_AXIWP BIT(6)
+#define LPU_STATUS_ACE0 BIT(16)
+#define LPU_STATUS_ACE1 BIT(17)
+#define LPU_STATUS_ACE2 BIT(18)
+#define LPU_STATUS_ACE3 BIT(19)
+#define LPU_STATUS_ACTIVE BIT(31)
+
+#define AXIEID_MASK 0xF
+#define AXIE_MASK LPU_STATUS_AXIE
+#define AXIRP_MASK LPU_STATUS_AXIRP
+#define AXIWP_MASK LPU_STATUS_AXIWP
+
+#define FROM_AXIEID(reg) ((reg) & AXIEID_MASK)
+#define TO_AXIE(x) ((x) << 4)
+#define FROM_AXIRP(reg) (((reg) & AXIRP_MASK) >> 5)
+#define FROM_AXIWP(reg) (((reg) & AXIWP_MASK) >> 6)
+
+/* LPU_TBU_STATUS_BITS */
+#define LPU_TBU_STATUS_TCF BIT(1)
+#define LPU_TBU_STATUS_TTNG BIT(2)
+#define LPU_TBU_STATUS_TITR BIT(8)
+#define LPU_TBU_STATUS_TEMR BIT(16)
+#define LPU_TBU_STATUS_TTF BIT(31)
+
+/* LPU_TBU_CONTROL BITS */
+#define LPU_TBU_CTRL_TLBPEN BIT(16)
+
+/* CROSSBAR CONTROL BITS */
+#define CBU_INPUT_CTRL_EN BIT(0)
+#define CBU_NUM_INPUT_IDS 5
+#define CBU_NUM_OUTPUT_IDS 5
+
+/* CU register */
+#define CU_BG_COLOR 0x0DC
+#define CU_INPUT0_SIZE 0x0E0
+#define CU_INPUT0_OFFSET 0x0E4
+#define CU_INPUT0_CONTROL 0x0E8
+#define CU_INPUT1_SIZE 0x0F0
+#define CU_INPUT1_OFFSET 0x0F4
+#define CU_INPUT1_CONTROL 0x0F8
+#define CU_INPUT2_SIZE 0x100
+#define CU_INPUT2_OFFSET 0x104
+#define CU_INPUT2_CONTROL 0x108
+#define CU_INPUT3_SIZE 0x110
+#define CU_INPUT3_OFFSET 0x114
+#define CU_INPUT3_CONTROL 0x118
+#define CU_INPUT4_SIZE 0x120
+#define CU_INPUT4_OFFSET 0x124
+#define CU_INPUT4_CONTROL 0x128
+
+#define CU_PER_INPUT_REGS 4
+
+#define CU_NUM_INPUT_IDS 5
+#define CU_NUM_OUTPUT_IDS 1
+
+/* CU control register bits */
+#define CU_CTRL_COPROC BIT(0)
+
+/* CU_IRQ_BITS */
+#define CU_IRQ_OVR BIT(9)
+#define CU_IRQ_ERR BIT(11)
+
+/* CU_STATUS_BITS */
+#define CU_STATUS_CPE BIT(0)
+#define CU_STATUS_ZME BIT(1)
+#define CU_STATUS_CFGE BIT(2)
+#define CU_STATUS_ACTIVE BIT(31)
+
+/* CU input control register bits */
+#define CU_INPUT_CTRL_EN BIT(0)
+#define CU_INPUT_CTRL_PAD BIT(1)
+#define CU_INPUT_CTRL_PMUL BIT(2)
+#define CU_INPUT_CTRL_ALPHA(x) (((x) & 0xFF) << 8)
+
+/* DOU register */
+
+/* DOU_IRQ_BITS */
+#define DOU_IRQ_UND BIT(8)
+#define DOU_IRQ_ERR BIT(11)
+#define DOU_IRQ_PL0 BIT(13)
+#define DOU_IRQ_PL1 BIT(14)
+
+/* DOU_STATUS_BITS */
+#define DOU_STATUS_DRIFTTO BIT(0)
+#define DOU_STATUS_FRAMETO BIT(1)
+#define DOU_STATUS_TETO BIT(2)
+#define DOU_STATUS_CSCE BIT(8)
+#define DOU_STATUS_ACTIVE BIT(31)
+
+/* Layer registers */
+#define LAYER_INFO 0x0C0
+#define LAYER_R_CONTROL 0x0D4
+#define LAYER_FMT 0x0D8
+#define LAYER_LT_COEFFTAB 0x0DC
+#define LAYER_PALPHA 0x0E4
+
+#define LAYER_YUV_RGB_COEFF0 0x130
+
+#define LAYER_AD_H_CROP 0x164
+#define LAYER_AD_V_CROP 0x168
+
+#define LAYER_RGB_RGB_COEFF0 0x170
+
+/* L_CONTROL_BITS */
+#define L_EN BIT(0)
+#define L_IT BIT(4)
+#define L_R2R BIT(5)
+#define L_FT BIT(6)
+#define L_ROT(x) (((x) & 3) << 8)
+#define L_HFLIP BIT(10)
+#define L_VFLIP BIT(11)
+#define L_TBU_EN BIT(16)
+#define L_A_RCACHE(x) (((x) & 0xF) << 28)
+#define L_ROT_R0 0
+#define L_ROT_R90 1
+#define L_ROT_R180 2
+#define L_ROT_R270 3
+
+/* LAYER_R_CONTROL BITS */
+#define LR_CHI422_BILINEAR 0
+#define LR_CHI422_REPLICATION 1
+#define LR_CHI420_JPEG (0 << 2)
+#define LR_CHI420_MPEG (1 << 2)
+
+#define L_ITSEL(x) ((x) & 0xFFF)
+#define L_FTSEL(x) (((x) & 0xFFF) << 16)
+
+#define LAYER_PER_PLANE_REGS 4
+
+/* Layer_WR registers */
+#define LAYER_WR_PROG_LINE 0x0D4
+#define LAYER_WR_FORMAT 0x0D8
+
+/* Layer_WR control bits */
+#define LW_OFM BIT(4)
+#define LW_LALPHA(x) (((x) & 0xFF) << 8)
+#define LW_A_WCACHE(x) (((x) & 0xF) << 28)
+#define LW_TBU_EN BIT(16)
+
+#define AxCACHE_MASK 0xF0000000
+
+/* Layer AXI R/W cache setting */
+#define AxCACHE_B BIT(0) /* Bufferable */
+#define AxCACHE_M BIT(1) /* Modifiable */
+#define AxCACHE_RA BIT(2) /* Read-Allocate */
+#define AxCACHE_WA BIT(3) /* Write-Allocate */
+
+/* Layer info bits */
+#define L_INFO_RF BIT(0)
+#define L_INFO_CM BIT(1)
+#define L_INFO_ABUF_SIZE(x) (((x) >> 4) & 0x7)
+
+/* Scaler registers */
+#define SC_COEFFTAB 0x0DC
+#define SC_OUT_SIZE 0x0E4
+#define SC_H_CROP 0x0E8
+#define SC_V_CROP 0x0EC
+#define SC_H_INIT_PH 0x0F0
+#define SC_H_DELTA_PH 0x0F4
+#define SC_V_INIT_PH 0x0F8
+#define SC_V_DELTA_PH 0x0FC
+#define SC_ENH_LIMITS 0x130
+#define SC_ENH_COEFF0 0x134
+
+#define SC_MAX_ENH_COEFF 9
+
+/* SC_CTRL_BITS */
+#define SC_CTRL_SCL BIT(0)
+#define SC_CTRL_LS BIT(1)
+#define SC_CTRL_AP BIT(4)
+#define SC_CTRL_IENH BIT(8)
+#define SC_CTRL_RGBSM BIT(16)
+#define SC_CTRL_ASM BIT(17)
+
+#define SC_VTSEL(vtal) ((vtal) << 16)
+
+#define SC_NUM_INPUTS_IDS 1
+#define SC_NUM_OUTPUTS_IDS 1
+
+#define MG_NUM_INPUTS_IDS 2
+#define MG_NUM_OUTPUTS_IDS 1
+
+/* Merger registers */
+#define MG_INPUT_ID0 BLK_INPUT_ID0
+#define MG_INPUT_ID1 (MG_INPUT_ID0 + 4)
+#define MG_SIZE BLK_SIZE
+
+/* Splitter registers */
+#define SP_OVERLAP_SIZE 0xD8
+
+/* Backend registers */
+#define BS_INFO 0x0C0
+#define BS_PROG_LINE 0x0D4
+#define BS_PREFETCH_LINE 0x0D8
+#define BS_BG_COLOR 0x0DC
+#define BS_ACTIVESIZE 0x0E0
+#define BS_HINTERVALS 0x0E4
+#define BS_VINTERVALS 0x0E8
+#define BS_SYNC 0x0EC
+#define BS_DRIFT_TO 0x100
+#define BS_FRAME_TO 0x104
+#define BS_TE_TO 0x108
+#define BS_T0_INTERVAL 0x110
+#define BS_T1_INTERVAL 0x114
+#define BS_T2_INTERVAL 0x118
+#define BS_CRC0_LOW 0x120
+#define BS_CRC0_HIGH 0x124
+#define BS_CRC1_LOW 0x128
+#define BS_CRC1_HIGH 0x12C
+#define BS_USER 0x130
+
+/* BS control register bits */
+#define BS_CTRL_EN BIT(0)
+#define BS_CTRL_VM BIT(1)
+#define BS_CTRL_BM BIT(2)
+#define BS_CTRL_HMASK BIT(4)
+#define BS_CTRL_VD BIT(5)
+#define BS_CTRL_TE BIT(8)
+#define BS_CTRL_TS BIT(9)
+#define BS_CTRL_TM BIT(12)
+#define BS_CTRL_DL BIT(16)
+#define BS_CTRL_SBS BIT(17)
+#define BS_CTRL_CRC BIT(18)
+#define BS_CTRL_PM BIT(20)
+
+/* BS active size/intervals */
+#define BS_H_INTVALS(hfp, hbp) (((hfp) & 0xFFF) + (((hbp) & 0x3FF) << 16))
+#define BS_V_INTVALS(vfp, vbp) (((vfp) & 0x3FFF) + (((vbp) & 0xFF) << 16))
+
+/* BS_SYNC bits */
+#define BS_SYNC_HSW(x) ((x) & 0x3FF)
+#define BS_SYNC_HSP BIT(12)
+#define BS_SYNC_VSW(x) (((x) & 0xFF) << 16)
+#define BS_SYNC_VSP BIT(28)
+
+#define BS_NUM_INPUT_IDS 0
+#define BS_NUM_OUTPUT_IDS 0
+
+/* Image process registers */
+#define IPS_DEPTH 0x0D8
+#define IPS_RGB_RGB_COEFF0 0x130
+#define IPS_RGB_YUV_COEFF0 0x170
+
+#define IPS_DEPTH_MARK 0xF
+
+/* IPS control register bits */
+#define IPS_CTRL_RGB BIT(0)
+#define IPS_CTRL_FT BIT(4)
+#define IPS_CTRL_YUV BIT(8)
+#define IPS_CTRL_CHD422 BIT(9)
+#define IPS_CTRL_CHD420 BIT(10)
+#define IPS_CTRL_LPF BIT(11)
+#define IPS_CTRL_DITH BIT(12)
+#define IPS_CTRL_CLAMP BIT(16)
+#define IPS_CTRL_SBS BIT(17)
+
+/* IPS info register bits */
+#define IPS_INFO_CHD420 BIT(10)
+
+#define IPS_NUM_INPUT_IDS 2
+#define IPS_NUM_OUTPUT_IDS 1
+
+/* FT_COEFF block registers */
+#define FT_COEFF0 0x80
+#define GLB_IT_COEFF 0x80
+
+/* GLB_SC_COEFF registers */
+#define GLB_SC_COEFF_ADDR 0x0080
+#define GLB_SC_COEFF_DATA 0x0084
+#define GLB_LT_COEFF_DATA 0x0080
+
+#define GLB_SC_COEFF_MAX_NUM 1024
+#define GLB_LT_COEFF_NUM 65
+/* GLB_SC_ADDR */
+#define SC_COEFF_R_ADDR BIT(18)
+#define SC_COEFF_G_ADDR BIT(17)
+#define SC_COEFF_B_ADDR BIT(16)
+
+#define SC_COEFF_DATA(x, y) (((y) & 0xFFFF) | (((x) & 0xFFFF) << 16))
+
+enum d71_blk_type {
+ D71_BLK_TYPE_GCU = 0x00,
+ D71_BLK_TYPE_LPU = 0x01,
+ D71_BLK_TYPE_CU = 0x02,
+ D71_BLK_TYPE_DOU = 0x03,
+ D71_BLK_TYPE_AEU = 0x04,
+ D71_BLK_TYPE_GLB_LT_COEFF = 0x05,
+ D71_BLK_TYPE_GLB_SCL_COEFF = 0x06, /* SH/SV scaler coeff */
+ D71_BLK_TYPE_GLB_SC_COEFF = 0x07,
+ D71_BLK_TYPE_PERIPH = 0x08,
+ D71_BLK_TYPE_LPU_TRUSTED = 0x09,
+ D71_BLK_TYPE_AEU_TRUSTED = 0x0A,
+ D71_BLK_TYPE_LPU_LAYER = 0x10,
+ D71_BLK_TYPE_LPU_WB_LAYER = 0x11,
+ D71_BLK_TYPE_CU_SPLITTER = 0x20,
+ D71_BLK_TYPE_CU_SCALER = 0x21,
+ D71_BLK_TYPE_CU_MERGER = 0x22,
+ D71_BLK_TYPE_DOU_IPS = 0x30,
+ D71_BLK_TYPE_DOU_BS = 0x31,
+ D71_BLK_TYPE_DOU_FT_COEFF = 0x32,
+ D71_BLK_TYPE_AEU_DS = 0x40,
+ D71_BLK_TYPE_AEU_AES = 0x41,
+ D71_BLK_TYPE_RESERVED = 0xFF
+};
+
+/* Constant of components */
+#define D71_MAX_PIPELINE 2
+#define D71_PIPELINE_MAX_SCALERS 2
+#define D71_PIPELINE_MAX_LAYERS 4
+
+#define D71_MAX_GLB_IT_COEFF 3
+#define D71_MAX_GLB_SCL_COEFF 4
+
+#define D71_MAX_LAYERS_PER_LPU 4
+#define D71_BLOCK_MAX_INPUT 9
+#define D71_BLOCK_MAX_OUTPUT 5
+#define D71_MAX_SC_PER_CU 2
+
+#define D71_BLOCK_OFFSET_PERIPH 0xFE00
+#define D71_BLOCK_SIZE 0x0200
+
+#define D71_DEFAULT_PREPRETCH_LINE 5
+#define D71_BUS_WIDTH_16_BYTES 16
+
+#define D71_MIN_LINE_SIZE 64
+#define D71_MIN_VERTICAL_SIZE 64
+#define D71_SC_MIN_LIN_SIZE 4
+#define D71_SC_MIN_VERTICAL_SIZE 4
+#define D71_SC_MAX_LIN_SIZE 2048
+#define D71_SC_MAX_VERTICAL_SIZE 4096
+
+#define D71_SC_MAX_UPSCALING 64
+#define D71_SC_MAX_DOWNSCALING 6
+#define D71_SC_SPLIT_OVERLAP 8
+#define D71_SC_ENH_SPLIT_OVERLAP 1
+
+#define D71_MG_MIN_MERGED_SIZE 4
+#define D71_MG_MAX_MERGED_HSIZE 4032
+#define D71_MG_MAX_MERGED_VSIZE 4096
+
+#define D71_PALPHA_DEF_MAP 0xFFAA5500
+#define D71_LAYER_CONTROL_DEFAULT 0x30000000
+#define D71_WB_LAYER_CONTROL_DEFAULT 0x3000FF00
+#define D71_BS_CONTROL_DEFAULT 0x00000002
+
+struct block_header {
+ u32 block_info;
+ u32 pipeline_info;
+ u32 input_ids[D71_BLOCK_MAX_INPUT];
+ u32 output_ids[D71_BLOCK_MAX_OUTPUT];
+};
+
+static inline u32 get_block_type(struct block_header *blk)
+{
+ return BLOCK_INFO_BLK_TYPE(blk->block_info);
+}
+
+#endif /* !_D71_REG_H_ */
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
index 3ca5718aa0c2..62fad59f5a6a 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c
@@ -18,10 +18,415 @@
#include "komeda_dev.h"
#include "komeda_kms.h"
-struct drm_crtc_helper_funcs komeda_crtc_helper_funcs = {
+/**
+ * komeda_crtc_atomic_check - build display output data flow
+ * @crtc: DRM crtc
+ * @state: the crtc state object
+ *
+ * crtc_atomic_check is the final check stage, so beside build a display data
+ * pipeline according to the crtc_state, but still needs to release or disable
+ * the unclaimed pipeline resources.
+ *
+ * RETURNS:
+ * Zero for success or -errno
+ */
+static int
+komeda_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct komeda_crtc *kcrtc = to_kcrtc(crtc);
+ struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(state);
+ int err;
+
+ if (state->active) {
+ err = komeda_build_display_data_flow(kcrtc, kcrtc_st);
+ if (err)
+ return err;
+ }
+
+ /* release unclaimed pipeline resources */
+ err = komeda_release_unclaimed_resources(kcrtc->master, kcrtc_st);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static u32 komeda_calc_mclk(struct komeda_crtc_state *kcrtc_st)
+{
+ unsigned long mclk = kcrtc_st->base.adjusted_mode.clock * 1000;
+
+ return mclk;
+}
+
+/* For active a crtc, mainly need two parts of preparation
+ * 1. adjust display operation mode.
+ * 2. enable needed clk
+ */
+static int
+komeda_crtc_prepare(struct komeda_crtc *kcrtc)
+{
+ struct komeda_dev *mdev = kcrtc->base.dev->dev_private;
+ struct komeda_pipeline *master = kcrtc->master;
+ struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(kcrtc->base.state);
+ unsigned long pxlclk_rate = kcrtc_st->base.adjusted_mode.clock * 1000;
+ u32 new_mode;
+ int err;
+
+ mutex_lock(&mdev->lock);
+
+ new_mode = mdev->dpmode | BIT(master->id);
+ if (WARN_ON(new_mode == mdev->dpmode)) {
+ err = 0;
+ goto unlock;
+ }
+
+ err = mdev->funcs->change_opmode(mdev, new_mode);
+ if (err) {
+ DRM_ERROR("failed to change opmode: 0x%x -> 0x%x.\n,",
+ mdev->dpmode, new_mode);
+ goto unlock;
+ }
+
+ mdev->dpmode = new_mode;
+ /* Only need to enable mclk on single display mode, but no need to
+ * enable mclk it on dual display mode, since the dual mode always
+ * switch from single display mode, the mclk already enabled, no need
+ * to enable it again.
+ */
+ if (new_mode != KOMEDA_MODE_DUAL_DISP) {
+ err = clk_set_rate(mdev->mclk, komeda_calc_mclk(kcrtc_st));
+ if (err)
+ DRM_ERROR("failed to set mclk.\n");
+ err = clk_prepare_enable(mdev->mclk);
+ if (err)
+ DRM_ERROR("failed to enable mclk.\n");
+ }
+
+ err = clk_prepare_enable(master->aclk);
+ if (err)
+ DRM_ERROR("failed to enable axi clk for pipe%d.\n", master->id);
+ err = clk_set_rate(master->pxlclk, pxlclk_rate);
+ if (err)
+ DRM_ERROR("failed to set pxlclk for pipe%d\n", master->id);
+ err = clk_prepare_enable(master->pxlclk);
+ if (err)
+ DRM_ERROR("failed to enable pxl clk for pipe%d.\n", master->id);
+
+unlock:
+ mutex_unlock(&mdev->lock);
+
+ return err;
+}
+
+static int
+komeda_crtc_unprepare(struct komeda_crtc *kcrtc)
+{
+ struct komeda_dev *mdev = kcrtc->base.dev->dev_private;
+ struct komeda_pipeline *master = kcrtc->master;
+ u32 new_mode;
+ int err;
+
+ mutex_lock(&mdev->lock);
+
+ new_mode = mdev->dpmode & (~BIT(master->id));
+
+ if (WARN_ON(new_mode == mdev->dpmode)) {
+ err = 0;
+ goto unlock;
+ }
+
+ err = mdev->funcs->change_opmode(mdev, new_mode);
+ if (err) {
+ DRM_ERROR("failed to change opmode: 0x%x -> 0x%x.\n,",
+ mdev->dpmode, new_mode);
+ goto unlock;
+ }
+
+ mdev->dpmode = new_mode;
+
+ clk_disable_unprepare(master->pxlclk);
+ clk_disable_unprepare(master->aclk);
+ if (new_mode == KOMEDA_MODE_INACTIVE)
+ clk_disable_unprepare(mdev->mclk);
+
+unlock:
+ mutex_unlock(&mdev->lock);
+
+ return err;
+}
+
+void komeda_crtc_handle_event(struct komeda_crtc *kcrtc,
+ struct komeda_events *evts)
+{
+ struct drm_crtc *crtc = &kcrtc->base;
+ u32 events = evts->pipes[kcrtc->master->id];
+
+ if (events & KOMEDA_EVENT_VSYNC)
+ drm_crtc_handle_vblank(crtc);
+
+ /* will handle it together with the write back support */
+ if (events & KOMEDA_EVENT_EOW)
+ DRM_DEBUG("EOW.\n");
+
+ if (events & KOMEDA_EVENT_FLIP) {
+ unsigned long flags;
+ struct drm_pending_vblank_event *event;
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ if (kcrtc->disable_done) {
+ complete_all(kcrtc->disable_done);
+ kcrtc->disable_done = NULL;
+ } else if (crtc->state->event) {
+ event = crtc->state->event;
+ /*
+ * Consume event before notifying drm core that flip
+ * happened.
+ */
+ crtc->state->event = NULL;
+ drm_crtc_send_vblank_event(crtc, event);
+ } else {
+ DRM_WARN("CRTC[%d]: FLIP happen but no pending commit.\n",
+ drm_crtc_index(&kcrtc->base));
+ }
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ }
+}
+
+static void
+komeda_crtc_do_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old)
+{
+ struct komeda_crtc *kcrtc = to_kcrtc(crtc);
+ struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc->state);
+ struct komeda_dev *mdev = kcrtc->base.dev->dev_private;
+ struct komeda_pipeline *master = kcrtc->master;
+
+ DRM_DEBUG_ATOMIC("CRTC%d_FLUSH: active_pipes: 0x%x, affected: 0x%x.\n",
+ drm_crtc_index(crtc),
+ kcrtc_st->active_pipes, kcrtc_st->affected_pipes);
+
+ /* step 1: update the pipeline/component state to HW */
+ if (has_bit(master->id, kcrtc_st->affected_pipes))
+ komeda_pipeline_update(master, old->state);
+
+ /* step 2: notify the HW to kickoff the update */
+ mdev->funcs->flush(mdev, master->id, kcrtc_st->active_pipes);
+}
+
+static void
+komeda_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old)
+{
+ komeda_crtc_prepare(to_kcrtc(crtc));
+ drm_crtc_vblank_on(crtc);
+ komeda_crtc_do_flush(crtc, old);
+}
+
+static void
+komeda_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old)
+{
+ struct komeda_crtc *kcrtc = to_kcrtc(crtc);
+ struct komeda_crtc_state *old_st = to_kcrtc_st(old);
+ struct komeda_dev *mdev = crtc->dev->dev_private;
+ struct komeda_pipeline *master = kcrtc->master;
+ struct completion *disable_done = &crtc->state->commit->flip_done;
+ struct completion temp;
+ int timeout;
+
+ DRM_DEBUG_ATOMIC("CRTC%d_DISABLE: active_pipes: 0x%x, affected: 0x%x.\n",
+ drm_crtc_index(crtc),
+ old_st->active_pipes, old_st->affected_pipes);
+
+ if (has_bit(master->id, old_st->active_pipes))
+ komeda_pipeline_disable(master, old->state);
+
+ /* crtc_disable has two scenarios according to the state->active switch.
+ * 1. active -> inactive
+ * this commit is a disable commit. and the commit will be finished
+ * or done after the disable operation. on this case we can directly
+ * use the crtc->state->event to tracking the HW disable operation.
+ * 2. active -> active
+ * the crtc->commit is not for disable, but a modeset operation when
+ * crtc is active, such commit actually has been completed by 3
+ * DRM operations:
+ * crtc_disable, update_planes(crtc_flush), crtc_enable
+ * so on this case the crtc->commit is for the whole process.
+ * we can not use it for tracing the disable, we need a temporary
+ * flip_done for tracing the disable. and crtc->state->event for
+ * the crtc_enable operation.
+ * That's also the reason why skip modeset commit in
+ * komeda_crtc_atomic_flush()
+ */
+ if (crtc->state->active) {
+ struct komeda_pipeline_state *pipe_st;
+ /* clear the old active_comps to zero */
+ pipe_st = komeda_pipeline_get_old_state(master, old->state);
+ pipe_st->active_comps = 0;
+
+ init_completion(&temp);
+ kcrtc->disable_done = &temp;
+ disable_done = &temp;
+ }
+
+ mdev->funcs->flush(mdev, master->id, 0);
+
+ /* wait the disable take affect.*/
+ timeout = wait_for_completion_timeout(disable_done, HZ);
+ if (timeout == 0) {
+ DRM_ERROR("disable pipeline%d timeout.\n", kcrtc->master->id);
+ if (crtc->state->active) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ kcrtc->disable_done = NULL;
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ }
+ }
+
+ drm_crtc_vblank_off(crtc);
+ komeda_crtc_unprepare(kcrtc);
+}
+
+static void
+komeda_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old)
+{
+ /* commit with modeset will be handled in enable/disable */
+ if (drm_atomic_crtc_needs_modeset(crtc->state))
+ return;
+
+ komeda_crtc_do_flush(crtc, old);
+}
+
+static enum drm_mode_status
+komeda_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *m)
+{
+ struct komeda_dev *mdev = crtc->dev->dev_private;
+ struct komeda_crtc *kcrtc = to_kcrtc(crtc);
+ struct komeda_pipeline *master = kcrtc->master;
+ long mode_clk, pxlclk;
+
+ if (m->flags & DRM_MODE_FLAG_INTERLACE)
+ return MODE_NO_INTERLACE;
+
+ /* main clock/AXI clk must be faster than pxlclk*/
+ mode_clk = m->clock * 1000;
+ pxlclk = clk_round_rate(master->pxlclk, mode_clk);
+ if (pxlclk != mode_clk) {
+ DRM_DEBUG_ATOMIC("pxlclk doesn't support %ld Hz\n", mode_clk);
+
+ return MODE_NOCLOCK;
+ }
+
+ if (clk_round_rate(mdev->mclk, mode_clk) < pxlclk) {
+ DRM_DEBUG_ATOMIC("mclk can't satisfy the requirement of %s-clk: %ld.\n",
+ m->name, pxlclk);
+
+ return MODE_CLOCK_HIGH;
+ }
+
+ if (clk_round_rate(master->aclk, mode_clk) < pxlclk) {
+ DRM_DEBUG_ATOMIC("aclk can't satisfy the requirement of %s-clk: %ld.\n",
+ m->name, pxlclk);
+
+ return MODE_CLOCK_HIGH;
+ }
+
+ return MODE_OK;
+}
+
+static bool komeda_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *m,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct komeda_crtc *kcrtc = to_kcrtc(crtc);
+ struct komeda_pipeline *master = kcrtc->master;
+ long mode_clk = m->clock * 1000;
+
+ adjusted_mode->clock = clk_round_rate(master->pxlclk, mode_clk) / 1000;
+
+ return true;
+}
+
+static struct drm_crtc_helper_funcs komeda_crtc_helper_funcs = {
+ .atomic_check = komeda_crtc_atomic_check,
+ .atomic_flush = komeda_crtc_atomic_flush,
+ .atomic_enable = komeda_crtc_atomic_enable,
+ .atomic_disable = komeda_crtc_atomic_disable,
+ .mode_valid = komeda_crtc_mode_valid,
+ .mode_fixup = komeda_crtc_mode_fixup,
};
+static void komeda_crtc_reset(struct drm_crtc *crtc)
+{
+ struct komeda_crtc_state *state;
+
+ if (crtc->state)
+ __drm_atomic_helper_crtc_destroy_state(crtc->state);
+
+ kfree(to_kcrtc_st(crtc->state));
+ crtc->state = NULL;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state) {
+ crtc->state = &state->base;
+ crtc->state->crtc = crtc;
+ }
+}
+
+static struct drm_crtc_state *
+komeda_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
+{
+ struct komeda_crtc_state *old = to_kcrtc_st(crtc->state);
+ struct komeda_crtc_state *new;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &new->base);
+
+ new->affected_pipes = old->active_pipes;
+
+ return &new->base;
+}
+
+static void komeda_crtc_atomic_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ __drm_atomic_helper_crtc_destroy_state(state);
+ kfree(to_kcrtc_st(state));
+}
+
+static int komeda_crtc_vblank_enable(struct drm_crtc *crtc)
+{
+ struct komeda_dev *mdev = crtc->dev->dev_private;
+ struct komeda_crtc *kcrtc = to_kcrtc(crtc);
+
+ mdev->funcs->on_off_vblank(mdev, kcrtc->master->id, true);
+ return 0;
+}
+
+static void komeda_crtc_vblank_disable(struct drm_crtc *crtc)
+{
+ struct komeda_dev *mdev = crtc->dev->dev_private;
+ struct komeda_crtc *kcrtc = to_kcrtc(crtc);
+
+ mdev->funcs->on_off_vblank(mdev, kcrtc->master->id, false);
+}
+
static const struct drm_crtc_funcs komeda_crtc_funcs = {
+ .gamma_set = drm_atomic_helper_legacy_gamma_set,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = komeda_crtc_reset,
+ .atomic_duplicate_state = komeda_crtc_atomic_duplicate_state,
+ .atomic_destroy_state = komeda_crtc_atomic_destroy_state,
+ .enable_vblank = komeda_crtc_vblank_enable,
+ .disable_vblank = komeda_crtc_vblank_disable,
};
int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms,
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
index 70e9bb7fa30c..ca3599e4a4d3 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
@@ -8,11 +8,99 @@
#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#endif
#include <drm/drm_print.h>
#include "komeda_dev.h"
+static int komeda_register_show(struct seq_file *sf, void *x)
+{
+ struct komeda_dev *mdev = sf->private;
+ int i;
+
+ if (mdev->funcs->dump_register)
+ mdev->funcs->dump_register(mdev, sf);
+
+ for (i = 0; i < mdev->n_pipelines; i++)
+ komeda_pipeline_dump_register(mdev->pipelines[i], sf);
+
+ return 0;
+}
+
+static int komeda_register_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, komeda_register_show, inode->i_private);
+}
+
+static const struct file_operations komeda_register_fops = {
+ .owner = THIS_MODULE,
+ .open = komeda_register_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+#ifdef CONFIG_DEBUG_FS
+static void komeda_debugfs_init(struct komeda_dev *mdev)
+{
+ if (!debugfs_initialized())
+ return;
+
+ mdev->debugfs_root = debugfs_create_dir("komeda", NULL);
+ if (IS_ERR_OR_NULL(mdev->debugfs_root))
+ return;
+
+ debugfs_create_file("register", 0444, mdev->debugfs_root,
+ mdev, &komeda_register_fops);
+}
+#endif
+
+static ssize_t
+core_id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct komeda_dev *mdev = dev_to_mdev(dev);
+
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", mdev->chip.core_id);
+}
+static DEVICE_ATTR_RO(core_id);
+
+static ssize_t
+config_id_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct komeda_dev *mdev = dev_to_mdev(dev);
+ struct komeda_pipeline *pipe = mdev->pipelines[0];
+ union komeda_config_id config_id;
+ int i;
+
+ memset(&config_id, 0, sizeof(config_id));
+
+ config_id.max_line_sz = pipe->layers[0]->hsize_in.end;
+ config_id.n_pipelines = mdev->n_pipelines;
+ config_id.n_scalers = pipe->n_scalers;
+ config_id.n_layers = pipe->n_layers;
+ config_id.n_richs = 0;
+ for (i = 0; i < pipe->n_layers; i++) {
+ if (pipe->layers[i]->layer_type == KOMEDA_FMT_RICH_LAYER)
+ config_id.n_richs++;
+ }
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", config_id.value);
+}
+static DEVICE_ATTR_RO(config_id);
+
+static struct attribute *komeda_sysfs_entries[] = {
+ &dev_attr_core_id.attr,
+ &dev_attr_config_id.attr,
+ NULL,
+};
+
+static struct attribute_group komeda_sysfs_attr_group = {
+ .attrs = komeda_sysfs_entries,
+};
+
static int komeda_parse_pipe_dt(struct komeda_dev *mdev, struct device_node *np)
{
struct komeda_pipeline *pipe;
@@ -53,6 +141,7 @@ static int komeda_parse_pipe_dt(struct komeda_dev *mdev, struct device_node *np)
static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev)
{
+ struct platform_device *pdev = to_platform_device(dev);
struct device_node *child, *np = dev->of_node;
struct clk *clk;
int ret;
@@ -62,6 +151,11 @@ static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev)
return PTR_ERR(clk);
mdev->mclk = clk;
+ mdev->irq = platform_get_irq(pdev, 0);
+ if (mdev->irq < 0) {
+ DRM_ERROR("could not get IRQ number.\n");
+ return mdev->irq;
+ }
for_each_available_child_of_node(np, child) {
if (of_node_cmp(child->name, "pipeline") == 0) {
@@ -99,6 +193,8 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
if (!mdev)
return ERR_PTR(-ENOMEM);
+ mutex_init(&mdev->lock);
+
mdev->dev = dev;
mdev->reg_base = devm_ioremap_resource(dev, io_res);
if (IS_ERR(mdev->reg_base)) {
@@ -147,6 +243,22 @@ struct komeda_dev *komeda_dev_create(struct device *dev)
goto err_cleanup;
}
+ err = komeda_assemble_pipelines(mdev);
+ if (err) {
+ DRM_ERROR("assemble display pipelines failed.\n");
+ goto err_cleanup;
+ }
+
+ err = sysfs_create_group(&dev->kobj, &komeda_sysfs_attr_group);
+ if (err) {
+ DRM_ERROR("create sysfs group failed.\n");
+ goto err_cleanup;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ komeda_debugfs_init(mdev);
+#endif
+
return mdev;
err_cleanup:
@@ -160,6 +272,12 @@ void komeda_dev_destroy(struct komeda_dev *mdev)
struct komeda_dev_funcs *funcs = mdev->funcs;
int i;
+ sysfs_remove_group(&dev->kobj, &komeda_sysfs_attr_group);
+
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(mdev->debugfs_root);
+#endif
+
for (i = 0; i < mdev->n_pipelines; i++) {
komeda_pipeline_destroy(mdev, mdev->pipelines[i]);
mdev->pipelines[i] = NULL;
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
index 0f77dead6a23..29e03c4e1ffc 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h
@@ -13,6 +13,33 @@
#include "malidp_product.h"
#include "komeda_format_caps.h"
+#define KOMEDA_EVENT_VSYNC BIT_ULL(0)
+#define KOMEDA_EVENT_FLIP BIT_ULL(1)
+#define KOMEDA_EVENT_URUN BIT_ULL(2)
+#define KOMEDA_EVENT_IBSY BIT_ULL(3)
+#define KOMEDA_EVENT_OVR BIT_ULL(4)
+#define KOMEDA_EVENT_EOW BIT_ULL(5)
+#define KOMEDA_EVENT_MODE BIT_ULL(6)
+
+#define KOMEDA_ERR_TETO BIT_ULL(14)
+#define KOMEDA_ERR_TEMR BIT_ULL(15)
+#define KOMEDA_ERR_TITR BIT_ULL(16)
+#define KOMEDA_ERR_CPE BIT_ULL(17)
+#define KOMEDA_ERR_CFGE BIT_ULL(18)
+#define KOMEDA_ERR_AXIE BIT_ULL(19)
+#define KOMEDA_ERR_ACE0 BIT_ULL(20)
+#define KOMEDA_ERR_ACE1 BIT_ULL(21)
+#define KOMEDA_ERR_ACE2 BIT_ULL(22)
+#define KOMEDA_ERR_ACE3 BIT_ULL(23)
+#define KOMEDA_ERR_DRIFTTO BIT_ULL(24)
+#define KOMEDA_ERR_FRAMETO BIT_ULL(25)
+#define KOMEDA_ERR_CSCE BIT_ULL(26)
+#define KOMEDA_ERR_ZME BIT_ULL(27)
+#define KOMEDA_ERR_MERR BIT_ULL(28)
+#define KOMEDA_ERR_TCF BIT_ULL(29)
+#define KOMEDA_ERR_TTNG BIT_ULL(30)
+#define KOMEDA_ERR_TTF BIT_ULL(31)
+
/* malidp device id */
enum {
MALI_D71 = 0,
@@ -39,6 +66,11 @@ struct komeda_product_data {
struct komeda_dev;
+struct komeda_events {
+ u64 global;
+ u64 pipes[KOMEDA_MAX_PIPELINES];
+};
+
/**
* struct komeda_dev_funcs
*
@@ -60,6 +92,49 @@ struct komeda_dev_funcs {
int (*enum_resources)(struct komeda_dev *mdev);
/** @cleanup: call to chip to cleanup komeda_dev->chip data */
void (*cleanup)(struct komeda_dev *mdev);
+ /**
+ * @irq_handler:
+ *
+ * for CORE to get the HW event from the CHIP when interrupt happened.
+ */
+ irqreturn_t (*irq_handler)(struct komeda_dev *mdev,
+ struct komeda_events *events);
+ /** @enable_irq: enable irq */
+ int (*enable_irq)(struct komeda_dev *mdev);
+ /** @disable_irq: disable irq */
+ int (*disable_irq)(struct komeda_dev *mdev);
+ /** @on_off_vblank: notify HW to on/off vblank */
+ void (*on_off_vblank)(struct komeda_dev *mdev,
+ int master_pipe, bool on);
+
+ /** @dump_register: Optional, dump registers to seq_file */
+ void (*dump_register)(struct komeda_dev *mdev, struct seq_file *seq);
+ /**
+ * @change_opmode:
+ *
+ * Notify HW to switch to a new display operation mode.
+ */
+ int (*change_opmode)(struct komeda_dev *mdev, int new_mode);
+ /** @flush: Notify the HW to flush or kickoff the update */
+ void (*flush)(struct komeda_dev *mdev,
+ int master_pipe, u32 active_pipes);
+};
+
+/*
+ * DISPLAY_MODE describes how many display been enabled, and which will be
+ * passed to CHIP by &komeda_dev_funcs->change_opmode(), then CHIP can do the
+ * pipeline resources assignment according to this usage hint.
+ * - KOMEDA_MODE_DISP0: Only one display enabled, pipeline-0 work as master.
+ * - KOMEDA_MODE_DISP1: Only one display enabled, pipeline-0 work as master.
+ * - KOMEDA_MODE_DUAL_DISP: Dual display mode, both display has been enabled.
+ * And D71 supports assign two pipelines to one single display on mode
+ * KOMEDA_MODE_DISP0/DISP1
+ */
+enum {
+ KOMEDA_MODE_INACTIVE = 0,
+ KOMEDA_MODE_DISP0 = BIT(0),
+ KOMEDA_MODE_DISP1 = BIT(1),
+ KOMEDA_MODE_DUAL_DISP = KOMEDA_MODE_DISP0 | KOMEDA_MODE_DISP1,
};
/**
@@ -70,18 +145,31 @@ struct komeda_dev_funcs {
* control-abilites of device.
*/
struct komeda_dev {
+ /** @dev: the base device structure */
struct device *dev;
+ /** @reg_base: the base address of komeda io space */
u32 __iomem *reg_base;
+ /** @chip: the basic chip information */
struct komeda_chip_info chip;
/** @fmt_tbl: initialized by &komeda_dev_funcs->init_format_table */
struct komeda_format_caps_table fmt_tbl;
/** @pclk: APB clock for register access */
struct clk *pclk;
- /** @mck: HW main engine clk */
+ /** @mclk: HW main engine clk */
struct clk *mclk;
+ /** @irq: irq number */
+ int irq;
+
+ /** @lock: used to protect dpmode */
+ struct mutex lock;
+ /** @dpmode: current display mode */
+ u32 dpmode;
+
+ /** @n_pipelines: the number of pipe in @pipelines */
int n_pipelines;
+ /** @pipelines: the komeda pipelines */
struct komeda_pipeline *pipelines[KOMEDA_MAX_PIPELINES];
/** @funcs: chip funcs to access to HW */
@@ -93,6 +181,9 @@ struct komeda_dev {
* destroyed by &komeda_dev_funcs.cleanup()
*/
void *chip_data;
+
+ /** @debugfs_root: root directory of komeda debugfs */
+ struct dentry *debugfs_root;
};
static inline bool
@@ -107,4 +198,6 @@ d71_identify(u32 __iomem *reg, struct komeda_chip_info *chip);
struct komeda_dev *komeda_dev_create(struct device *dev);
void komeda_dev_destroy(struct komeda_dev *mdev);
+struct komeda_dev *dev_to_mdev(struct device *dev);
+
#endif /*_KOMEDA_DEV_H_*/
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
index 2bdd189b041d..cfa5068d9d1e 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c
@@ -17,6 +17,13 @@ struct komeda_drv {
struct komeda_kms_dev *kms;
};
+struct komeda_dev *dev_to_mdev(struct device *dev)
+{
+ struct komeda_drv *mdrv = dev_get_drvdata(dev);
+
+ return mdrv ? mdrv->mdev : NULL;
+}
+
static void komeda_unbind(struct device *dev)
{
struct komeda_drv *mdrv = dev_get_drvdata(dev);
@@ -120,7 +127,7 @@ static const struct komeda_product_data komeda_products[] = {
},
};
-const struct of_device_id komeda_of_match[] = {
+static const struct of_device_id komeda_of_match[] = {
{ .compatible = "arm,mali-d71", .data = &komeda_products[MALI_D71], },
{},
};
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h
index 0de2e4a2afd2..ea2fe190c1e3 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h
@@ -10,11 +10,16 @@
#include <drm/drm_framebuffer.h>
#include "komeda_format_caps.h"
-/** struct komeda_fb - entend drm_framebuffer with komeda attribute */
+/**
+ * struct komeda_fb - Entending drm_framebuffer with komeda attribute
+ */
struct komeda_fb {
/** @base: &drm_framebuffer */
struct drm_framebuffer base;
- /* @format_caps: &komeda_format_caps */
+ /**
+ * @format_caps:
+ * extends drm_format_info for komeda specific information
+ */
const struct komeda_format_caps *format_caps;
/** @aligned_w: aligned frame buffer width */
u32 aligned_w;
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index 47a58ab20434..86f6542afb40 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -13,6 +13,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_irq.h>
#include <drm/drm_vblank.h>
#include "komeda_dev.h"
@@ -25,18 +26,39 @@ static int komeda_gem_cma_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- u32 alignment = 16; /* TODO get alignment from dev */
+ struct komeda_dev *mdev = dev->dev_private;
+ u32 pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
- args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8),
- alignment);
+ args->pitch = ALIGN(pitch, mdev->chip.bus_width);
return drm_gem_cma_dumb_create_internal(file, dev, args);
}
+static irqreturn_t komeda_kms_irq_handler(int irq, void *data)
+{
+ struct drm_device *drm = data;
+ struct komeda_dev *mdev = drm->dev_private;
+ struct komeda_kms_dev *kms = to_kdev(drm);
+ struct komeda_events evts;
+ irqreturn_t status;
+ u32 i;
+
+ /* Call into the CHIP to recognize events */
+ memset(&evts, 0, sizeof(evts));
+ status = mdev->funcs->irq_handler(mdev, &evts);
+
+ /* Notify the crtc to handle the events */
+ for (i = 0; i < kms->n_crtcs; i++)
+ komeda_crtc_handle_event(&kms->crtcs[i], &evts);
+
+ return status;
+}
+
static struct drm_driver komeda_kms_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC |
- DRIVER_PRIME,
+ DRIVER_PRIME | DRIVER_HAVE_IRQ,
.lastclose = drm_fb_helper_lastclose,
+ .irq_handler = komeda_kms_irq_handler,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = komeda_gem_cma_dumb_create,
@@ -78,9 +100,37 @@ static const struct drm_mode_config_helper_funcs komeda_mode_config_helpers = {
.atomic_commit_tail = komeda_kms_commit_tail,
};
+static int komeda_kms_check(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_st, *new_crtc_st;
+ int i, err;
+
+ err = drm_atomic_helper_check_modeset(dev, state);
+ if (err)
+ return err;
+
+ /* komeda need to re-calculate resource assumption in every commit
+ * so need to add all affected_planes (even unchanged) to
+ * drm_atomic_state.
+ */
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_st, new_crtc_st, i) {
+ err = drm_atomic_add_affected_planes(state, crtc);
+ if (err)
+ return err;
+ }
+
+ err = drm_atomic_helper_check_planes(dev, state);
+ if (err)
+ return err;
+
+ return 0;
+}
+
static const struct drm_mode_config_funcs komeda_mode_config_funcs = {
.fb_create = komeda_fb_create,
- .atomic_check = drm_atomic_helper_check,
+ .atomic_check = komeda_kms_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -144,14 +194,25 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev)
drm_mode_config_reset(drm);
- err = drm_dev_register(drm, 0);
+ err = drm_irq_install(drm, mdev->irq);
if (err)
goto cleanup_mode_config;
+ err = mdev->funcs->enable_irq(mdev);
+ if (err)
+ goto uninstall_irq;
+
+ err = drm_dev_register(drm, 0);
+ if (err)
+ goto uninstall_irq;
+
return kms;
+uninstall_irq:
+ drm_irq_uninstall(drm);
cleanup_mode_config:
drm_mode_config_cleanup(drm);
+ komeda_kms_cleanup_private_objs(kms);
free_kms:
kfree(kms);
return ERR_PTR(err);
@@ -162,9 +223,11 @@ void komeda_kms_detach(struct komeda_kms_dev *kms)
struct drm_device *drm = &kms->base;
struct komeda_dev *mdev = drm->dev_private;
+ mdev->funcs->disable_irq(mdev);
drm_dev_unregister(drm);
+ drm_irq_uninstall(drm);
component_unbind_all(mdev->dev, drm);
- komeda_kms_cleanup_private_objs(mdev);
+ komeda_kms_cleanup_private_objs(kms);
drm_mode_config_cleanup(drm);
drm->dev_private = NULL;
drm_dev_put(drm);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
index 874e9c9f0749..ac3d9209b4d9 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h
@@ -12,8 +12,12 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_writeback.h>
+#include <video/videomode.h>
+#include <video/display_timing.h>
-/** struct komeda_plane - komeda instance of drm_plane */
+/**
+ * struct komeda_plane - komeda instance of drm_plane
+ */
struct komeda_plane {
/** @base: &drm_plane */
struct drm_plane base;
@@ -68,9 +72,14 @@ struct komeda_crtc {
* merge into the master.
*/
struct komeda_pipeline *slave;
+
+ /** @disable_done: this flip_done is for tracing the disable */
+ struct completion *disable_done;
};
-/** struct komeda_crtc_state */
+/**
+ * struct komeda_crtc_state
+ */
struct komeda_crtc_state {
/** @base: &drm_crtc_state */
struct drm_crtc_state base;
@@ -78,7 +87,15 @@ struct komeda_crtc_state {
/* private properties */
/* computed state which are used by validate/check */
+ /**
+ * @affected_pipes:
+ * the affected pipelines in once display instance
+ */
u32 affected_pipes;
+ /**
+ * @active_pipes:
+ * the active pipelines in once display instance
+ */
u32 active_pipes;
};
@@ -106,7 +123,10 @@ int komeda_kms_add_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev);
int komeda_kms_add_planes(struct komeda_kms_dev *kms, struct komeda_dev *mdev);
int komeda_kms_add_private_objs(struct komeda_kms_dev *kms,
struct komeda_dev *mdev);
-void komeda_kms_cleanup_private_objs(struct komeda_dev *mdev);
+void komeda_kms_cleanup_private_objs(struct komeda_kms_dev *kms);
+
+void komeda_crtc_handle_event(struct komeda_crtc *kcrtc,
+ struct komeda_events *evts);
struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev);
void komeda_kms_detach(struct komeda_kms_dev *kms);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
index f1908e9ef128..c379439c6194 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
@@ -19,17 +19,17 @@ komeda_pipeline_add(struct komeda_dev *mdev, size_t size,
if (mdev->n_pipelines + 1 > KOMEDA_MAX_PIPELINES) {
DRM_ERROR("Exceed max support %d pipelines.\n",
KOMEDA_MAX_PIPELINES);
- return NULL;
+ return ERR_PTR(-ENOSPC);
}
if (size < sizeof(*pipe)) {
DRM_ERROR("Request pipeline size too small.\n");
- return NULL;
+ return ERR_PTR(-EINVAL);
}
pipe = devm_kzalloc(mdev->dev, size, GFP_KERNEL);
if (!pipe)
- return NULL;
+ return ERR_PTR(-ENOMEM);
pipe->mdev = mdev;
pipe->id = mdev->n_pipelines;
@@ -62,7 +62,7 @@ void komeda_pipeline_destroy(struct komeda_dev *mdev,
devm_kfree(mdev->dev, pipe);
}
-struct komeda_component **
+static struct komeda_component **
komeda_pipeline_get_component_pos(struct komeda_pipeline *pipe, int id)
{
struct komeda_dev *mdev = pipe->mdev;
@@ -142,32 +142,32 @@ komeda_component_add(struct komeda_pipeline *pipe,
if (max_active_inputs > KOMEDA_COMPONENT_N_INPUTS) {
WARN(1, "please large KOMEDA_COMPONENT_N_INPUTS to %d.\n",
max_active_inputs);
- return NULL;
+ return ERR_PTR(-ENOSPC);
}
pos = komeda_pipeline_get_component_pos(pipe, id);
if (!pos || (*pos))
- return NULL;
+ return ERR_PTR(-EINVAL);
if (has_bit(id, KOMEDA_PIPELINE_LAYERS)) {
idx = id - KOMEDA_COMPONENT_LAYER0;
num = &pipe->n_layers;
if (idx != pipe->n_layers) {
DRM_ERROR("please add Layer by id sequence.\n");
- return NULL;
+ return ERR_PTR(-EINVAL);
}
} else if (has_bit(id, KOMEDA_PIPELINE_SCALERS)) {
idx = id - KOMEDA_COMPONENT_SCALER0;
num = &pipe->n_scalers;
if (idx != pipe->n_scalers) {
DRM_ERROR("please add Scaler by id sequence.\n");
- return NULL;
+ return ERR_PTR(-EINVAL);
}
}
c = devm_kzalloc(pipe->mdev->dev, comp_sz, GFP_KERNEL);
if (!c)
- return NULL;
+ return ERR_PTR(-ENOMEM);
c->id = id;
c->hw_id = hw_id;
@@ -200,3 +200,98 @@ void komeda_component_destroy(struct komeda_dev *mdev,
{
devm_kfree(mdev->dev, c);
}
+
+static void komeda_component_dump(struct komeda_component *c)
+{
+ if (!c)
+ return;
+
+ DRM_DEBUG(" %s: ID %d-0x%08lx.\n",
+ c->name, c->id, BIT(c->id));
+ DRM_DEBUG(" max_active_inputs:%d, supported_inputs: 0x%08x.\n",
+ c->max_active_inputs, c->supported_inputs);
+ DRM_DEBUG(" max_active_outputs:%d, supported_outputs: 0x%08x.\n",
+ c->max_active_outputs, c->supported_outputs);
+}
+
+static void komeda_pipeline_dump(struct komeda_pipeline *pipe)
+{
+ struct komeda_component *c;
+ int id;
+
+ DRM_INFO("Pipeline-%d: n_layers: %d, n_scalers: %d, output: %s\n",
+ pipe->id, pipe->n_layers, pipe->n_scalers,
+ pipe->of_output_dev ? pipe->of_output_dev->full_name : "none");
+
+ dp_for_each_set_bit(id, pipe->avail_comps) {
+ c = komeda_pipeline_get_component(pipe, id);
+
+ komeda_component_dump(c);
+ }
+}
+
+static void komeda_component_verify_inputs(struct komeda_component *c)
+{
+ struct komeda_pipeline *pipe = c->pipeline;
+ struct komeda_component *input;
+ int id;
+
+ dp_for_each_set_bit(id, c->supported_inputs) {
+ input = komeda_pipeline_get_component(pipe, id);
+ if (!input) {
+ c->supported_inputs &= ~(BIT(id));
+ DRM_WARN("Can not find input(ID-%d) for component: %s.\n",
+ id, c->name);
+ continue;
+ }
+
+ input->supported_outputs |= BIT(c->id);
+ }
+}
+
+static void komeda_pipeline_assemble(struct komeda_pipeline *pipe)
+{
+ struct komeda_component *c;
+ int id;
+
+ dp_for_each_set_bit(id, pipe->avail_comps) {
+ c = komeda_pipeline_get_component(pipe, id);
+
+ komeda_component_verify_inputs(c);
+ }
+}
+
+int komeda_assemble_pipelines(struct komeda_dev *mdev)
+{
+ struct komeda_pipeline *pipe;
+ int i;
+
+ for (i = 0; i < mdev->n_pipelines; i++) {
+ pipe = mdev->pipelines[i];
+
+ komeda_pipeline_assemble(pipe);
+ komeda_pipeline_dump(pipe);
+ }
+
+ return 0;
+}
+
+void komeda_pipeline_dump_register(struct komeda_pipeline *pipe,
+ struct seq_file *sf)
+{
+ struct komeda_component *c;
+ u32 id;
+
+ seq_printf(sf, "\n======== Pipeline-%d ==========\n", pipe->id);
+
+ if (pipe->funcs && pipe->funcs->dump_register)
+ pipe->funcs->dump_register(pipe, sf);
+
+ dp_for_each_set_bit(id, pipe->avail_comps) {
+ c = komeda_pipeline_get_component(pipe, id);
+
+ seq_printf(sf, "\n------%s------\n", c->name);
+ if (c->funcs->dump_register)
+ c->funcs->dump_register(c, sf);
+ }
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
index 8c950bc8ae96..b1f813a349a4 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h
@@ -90,32 +90,35 @@ struct komeda_component {
u32 __iomem *reg;
/** @id: component id */
u32 id;
- /** @hw_ic: component hw id,
- * which is initialized by chip and used by chip only
+ /**
+ * @hw_id: component hw id,
+ * which is initialized by chip and used by chip only
*/
u32 hw_id;
/**
* @max_active_inputs:
- * @max_active_outpus:
+ * @max_active_outputs:
*
- * maximum number of inputs/outputs that can be active in the same time
+ * maximum number of inputs/outputs that can be active at the same time
* Note:
* the number isn't the bit number of @supported_inputs or
* @supported_outputs, but may be less than it, since component may not
* support enabling all @supported_inputs/outputs at the same time.
*/
u8 max_active_inputs;
+ /** @max_active_outputs: maximum number of outputs */
u8 max_active_outputs;
/**
* @supported_inputs:
* @supported_outputs:
*
- * bitmask of BIT(component->id) for the supported inputs/outputs
+ * bitmask of BIT(component->id) for the supported inputs/outputs,
* describes the possibilities of how a component is linked into a
* pipeline.
*/
u32 supported_inputs;
+ /** @supported_outputs: bitmask of supported output componenet ids */
u32 supported_outputs;
/**
@@ -134,7 +137,8 @@ struct komeda_component {
struct komeda_component_output {
/** @component: indicate which component the data comes from */
struct komeda_component *component;
- /** @output_port:
+ /**
+ * @output_port:
* the output port of the &komeda_component_output.component
*/
u8 output_port;
@@ -150,11 +154,12 @@ struct komeda_component_output {
struct komeda_component_state {
/** @obj: tracking component_state by drm_atomic_state */
struct drm_private_state obj;
+ /** @component: backpointer to the component */
struct komeda_component *component;
/**
* @binding_user:
- * currently bound user, the user can be crtc/plane/wb_conn, which is
- * valid decided by @component and @inputs
+ * currently bound user, the user can be @crtc, @plane or @wb_conn,
+ * which is valid decided by @component and @inputs
*
* - Layer: its user always is plane.
* - compiz/improc/timing_ctrlr: the user is crtc.
@@ -162,20 +167,24 @@ struct komeda_component_state {
* - scaler: plane when input is layer, wb_conn if input is compiz.
*/
union {
+ /** @crtc: backpointer for user crtc */
struct drm_crtc *crtc;
+ /** @plane: backpointer for user plane */
struct drm_plane *plane;
+ /** @wb_conn: backpointer for user wb_connector */
struct drm_connector *wb_conn;
void *binding_user;
};
+
/**
* @active_inputs:
*
* active_inputs is bitmask of @inputs index
*
- * - active_inputs = changed_active_inputs + unchanged_active_inputs
- * - affected_inputs = old->active_inputs + new->active_inputs;
+ * - active_inputs = changed_active_inputs | unchanged_active_inputs
+ * - affected_inputs = old->active_inputs | new->active_inputs;
* - disabling_inputs = affected_inputs ^ active_inputs;
- * - changed_inputs = disabling_inputs + changed_active_inputs;
+ * - changed_inputs = disabling_inputs | changed_active_inputs;
*
* NOTE:
* changed_inputs doesn't include all active_input but only
@@ -183,7 +192,9 @@ struct komeda_component_state {
* level for dirty update.
*/
u16 active_inputs;
+ /** @changed_active_inputs: bitmask of the changed @active_inputs */
u16 changed_active_inputs;
+ /** @affected_inputs: bitmask for affected @inputs */
u16 affected_inputs;
/**
* @inputs:
@@ -204,57 +215,96 @@ static inline u16 component_changed_inputs(struct komeda_component_state *st)
return component_disabling_inputs(st) | st->changed_active_inputs;
}
+#define for_each_changed_input(st, i) \
+ for ((i) = 0; (i) < (st)->component->max_active_inputs; (i)++) \
+ if (has_bit((i), component_changed_inputs(st)))
+
#define to_comp(__c) (((__c) == NULL) ? NULL : &((__c)->base))
#define to_cpos(__c) ((struct komeda_component **)&(__c))
-/* these structures are going to be filled in in uture patches */
struct komeda_layer {
struct komeda_component base;
- /* layer specific features and caps */
- int layer_type; /* RICH, SIMPLE or WB */
+ /* accepted h/v input range before rotation */
+ struct malidp_range hsize_in, vsize_in;
+ u32 layer_type; /* RICH, SIMPLE or WB */
+ u32 supported_rots;
};
struct komeda_layer_state {
struct komeda_component_state base;
/* layer specific configuration state */
+ u16 hsize, vsize;
+ u32 rot;
+ dma_addr_t addr[3];
};
-struct komeda_compiz {
+struct komeda_scaler {
struct komeda_component base;
- /* compiz specific features and caps */
+ /* scaler features and caps */
};
-struct komeda_compiz_state {
+struct komeda_scaler_state {
struct komeda_component_state base;
- /* compiz specific configuration state */
};
-struct komeda_scaler {
+struct komeda_compiz {
struct komeda_component base;
- /* scaler features and caps */
+ struct malidp_range hsize, vsize;
};
-struct komeda_scaler_state {
+struct komeda_compiz_input_cfg {
+ u16 hsize, vsize;
+ u16 hoffset, voffset;
+ u8 pixel_blend_mode, layer_alpha;
+};
+
+struct komeda_compiz_state {
struct komeda_component_state base;
+ /* composition size */
+ u16 hsize, vsize;
+ struct komeda_compiz_input_cfg cins[KOMEDA_COMPONENT_N_INPUTS];
};
struct komeda_improc {
struct komeda_component base;
+ u32 supported_color_formats; /* DRM_RGB/YUV444/YUV420*/
+ u32 supported_color_depths; /* BIT(8) | BIT(10)*/
+ u8 supports_degamma : 1;
+ u8 supports_csc : 1;
+ u8 supports_gamma : 1;
};
struct komeda_improc_state {
struct komeda_component_state base;
+ u16 hsize, vsize;
};
/* display timing controller */
struct komeda_timing_ctrlr {
struct komeda_component base;
+ u8 supports_dual_link : 1;
};
struct komeda_timing_ctrlr_state {
struct komeda_component_state base;
};
+/* Why define A separated structure but not use plane_state directly ?
+ * 1. Komeda supports layer_split which means a plane_state can be split and
+ * handled by two layers, one layer only handle half of plane image.
+ * 2. Fix up the user properties according to HW's capabilities, like user
+ * set rotation to R180, but HW only supports REFLECT_X+Y. the rot here is
+ * after drm_rotation_simplify()
+ */
+struct komeda_data_flow_cfg {
+ struct komeda_component_output input;
+ u16 in_x, in_y, in_w, in_h;
+ u32 out_x, out_y, out_w, out_h;
+ u32 rot;
+ int blending_zorder;
+ u8 pixel_blend_mode, layer_alpha;
+};
+
/** struct komeda_pipeline_funcs */
struct komeda_pipeline_funcs {
/* dump_register: Optional, dump registers to seq_file */
@@ -280,14 +330,23 @@ struct komeda_pipeline {
int id;
/** @avail_comps: available components mask of pipeline */
u32 avail_comps;
+ /** @n_layers: the number of layer on @layers */
int n_layers;
+ /** @layers: the pipeline layers */
struct komeda_layer *layers[KOMEDA_PIPELINE_MAX_LAYERS];
+ /** @n_scalers: the number of scaler on @scalers */
int n_scalers;
+ /** @scalers: the pipeline scalers */
struct komeda_scaler *scalers[KOMEDA_PIPELINE_MAX_SCALERS];
+ /** @compiz: compositor */
struct komeda_compiz *compiz;
+ /** @wb_layer: writeback layer */
struct komeda_layer *wb_layer;
+ /** @improc: post image processor */
struct komeda_improc *improc;
+ /** @ctrlr: timing controller */
struct komeda_timing_ctrlr *ctrlr;
+ /** @funcs: chip pipeline functions */
struct komeda_pipeline_funcs *funcs; /* private pipeline functions */
/** @of_node: pipeline dt node */
@@ -308,6 +367,7 @@ struct komeda_pipeline {
struct komeda_pipeline_state {
/** @obj: tracking pipeline_state by drm_atomic_state */
struct drm_private_state obj;
+ /** @pipe: backpointer to the pipeline */
struct komeda_pipeline *pipe;
/** @crtc: currently bound crtc */
struct drm_crtc *crtc;
@@ -340,10 +400,13 @@ komeda_pipeline_add(struct komeda_dev *mdev, size_t size,
struct komeda_pipeline_funcs *funcs);
void komeda_pipeline_destroy(struct komeda_dev *mdev,
struct komeda_pipeline *pipe);
-
+int komeda_assemble_pipelines(struct komeda_dev *mdev);
struct komeda_component *
komeda_pipeline_get_component(struct komeda_pipeline *pipe, int id);
+void komeda_pipeline_dump_register(struct komeda_pipeline *pipe,
+ struct seq_file *sf);
+
/* component APIs */
struct komeda_component *
komeda_component_add(struct komeda_pipeline *pipe,
@@ -356,4 +419,26 @@ komeda_component_add(struct komeda_pipeline *pipe,
void komeda_component_destroy(struct komeda_dev *mdev,
struct komeda_component *c);
+struct komeda_plane_state;
+struct komeda_crtc_state;
+struct komeda_crtc;
+
+int komeda_build_layer_data_flow(struct komeda_layer *layer,
+ struct komeda_plane_state *kplane_st,
+ struct komeda_crtc_state *kcrtc_st,
+ struct komeda_data_flow_cfg *dflow);
+int komeda_build_display_data_flow(struct komeda_crtc *kcrtc,
+ struct komeda_crtc_state *kcrtc_st);
+
+int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,
+ struct komeda_crtc_state *kcrtc_st);
+
+struct komeda_pipeline_state *
+komeda_pipeline_get_old_state(struct komeda_pipeline *pipe,
+ struct drm_atomic_state *state);
+void komeda_pipeline_disable(struct komeda_pipeline *pipe,
+ struct drm_atomic_state *old_state);
+void komeda_pipeline_update(struct komeda_pipeline *pipe,
+ struct drm_atomic_state *old_state);
+
#endif /* _KOMEDA_PIPELINE_H_*/
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
new file mode 100644
index 000000000000..36570d7dad61
--- /dev/null
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
@@ -0,0 +1,610 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * (C) COPYRIGHT 2018 ARM Limited. All rights reserved.
+ * Author: James.Qian.Wang <james.qian.wang@arm.com>
+ *
+ */
+
+#include <drm/drm_print.h>
+#include <linux/clk.h>
+#include "komeda_dev.h"
+#include "komeda_kms.h"
+#include "komeda_pipeline.h"
+#include "komeda_framebuffer.h"
+
+static inline bool is_switching_user(void *old, void *new)
+{
+ if (!old || !new)
+ return false;
+
+ return old != new;
+}
+
+static struct komeda_pipeline_state *
+komeda_pipeline_get_state(struct komeda_pipeline *pipe,
+ struct drm_atomic_state *state)
+{
+ struct drm_private_state *priv_st;
+
+ priv_st = drm_atomic_get_private_obj_state(state, &pipe->obj);
+ if (IS_ERR(priv_st))
+ return ERR_CAST(priv_st);
+
+ return priv_to_pipe_st(priv_st);
+}
+
+struct komeda_pipeline_state *
+komeda_pipeline_get_old_state(struct komeda_pipeline *pipe,
+ struct drm_atomic_state *state)
+{
+ struct drm_private_state *priv_st;
+
+ priv_st = drm_atomic_get_old_private_obj_state(state, &pipe->obj);
+ if (priv_st)
+ return priv_to_pipe_st(priv_st);
+ return NULL;
+}
+
+static struct komeda_pipeline_state *
+komeda_pipeline_get_new_state(struct komeda_pipeline *pipe,
+ struct drm_atomic_state *state)
+{
+ struct drm_private_state *priv_st;
+
+ priv_st = drm_atomic_get_new_private_obj_state(state, &pipe->obj);
+ if (priv_st)
+ return priv_to_pipe_st(priv_st);
+ return NULL;
+}
+
+/* Assign pipeline for crtc */
+static struct komeda_pipeline_state *
+komeda_pipeline_get_state_and_set_crtc(struct komeda_pipeline *pipe,
+ struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ struct komeda_pipeline_state *st;
+
+ st = komeda_pipeline_get_state(pipe, state);
+ if (IS_ERR(st))
+ return st;
+
+ if (is_switching_user(crtc, st->crtc)) {
+ DRM_DEBUG_ATOMIC("CRTC%d required pipeline%d is busy.\n",
+ drm_crtc_index(crtc), pipe->id);
+ return ERR_PTR(-EBUSY);
+ }
+
+ /* pipeline only can be disabled when the it is free or unused */
+ if (!crtc && st->active_comps) {
+ DRM_DEBUG_ATOMIC("Disabling a busy pipeline:%d.\n", pipe->id);
+ return ERR_PTR(-EBUSY);
+ }
+
+ st->crtc = crtc;
+
+ if (crtc) {
+ struct komeda_crtc_state *kcrtc_st;
+
+ kcrtc_st = to_kcrtc_st(drm_atomic_get_new_crtc_state(state,
+ crtc));
+
+ kcrtc_st->active_pipes |= BIT(pipe->id);
+ kcrtc_st->affected_pipes |= BIT(pipe->id);
+ }
+ return st;
+}
+
+static struct komeda_component_state *
+komeda_component_get_state(struct komeda_component *c,
+ struct drm_atomic_state *state)
+{
+ struct drm_private_state *priv_st;
+
+ WARN_ON(!drm_modeset_is_locked(&c->pipeline->obj.lock));
+
+ priv_st = drm_atomic_get_private_obj_state(state, &c->obj);
+ if (IS_ERR(priv_st))
+ return ERR_CAST(priv_st);
+
+ return priv_to_comp_st(priv_st);
+}
+
+static struct komeda_component_state *
+komeda_component_get_old_state(struct komeda_component *c,
+ struct drm_atomic_state *state)
+{
+ struct drm_private_state *priv_st;
+
+ priv_st = drm_atomic_get_old_private_obj_state(state, &c->obj);
+ if (priv_st)
+ return priv_to_comp_st(priv_st);
+ return NULL;
+}
+
+/**
+ * komeda_component_get_state_and_set_user()
+ *
+ * @c: component to get state and set user
+ * @state: global atomic state
+ * @user: direct user, the binding user
+ * @crtc: the CRTC user, the big boss :)
+ *
+ * This function accepts two users:
+ * - The direct user: can be plane/crtc/wb_connector depends on component
+ * - The big boss (CRTC)
+ * CRTC is the big boss (the final user), because all component resources
+ * eventually will be assigned to CRTC, like the layer will be binding to
+ * kms_plane, but kms plane will be binding to a CRTC eventually.
+ *
+ * The big boss (CRTC) is for pipeline assignment, since &komeda_component isn't
+ * independent and can be assigned to CRTC freely, but belongs to a specific
+ * pipeline, only pipeline can be shared between crtc, and pipeline as a whole
+ * (include all the internal components) assigned to a specific CRTC.
+ *
+ * So when set a user to komeda_component, need first to check the status of
+ * component->pipeline to see if the pipeline is available on this specific
+ * CRTC. if the pipeline is busy (assigned to another CRTC), even the required
+ * component is free, the component still cannot be assigned to the direct user.
+ */
+static struct komeda_component_state *
+komeda_component_get_state_and_set_user(struct komeda_component *c,
+ struct drm_atomic_state *state,
+ void *user,
+ struct drm_crtc *crtc)
+{
+ struct komeda_pipeline_state *pipe_st;
+ struct komeda_component_state *st;
+
+ /* First check if the pipeline is available */
+ pipe_st = komeda_pipeline_get_state_and_set_crtc(c->pipeline,
+ state, crtc);
+ if (IS_ERR(pipe_st))
+ return ERR_CAST(pipe_st);
+
+ st = komeda_component_get_state(c, state);
+ if (IS_ERR(st))
+ return st;
+
+ /* check if the component has been occupied */
+ if (is_switching_user(user, st->binding_user)) {
+ DRM_DEBUG_ATOMIC("required %s is busy.\n", c->name);
+ return ERR_PTR(-EBUSY);
+ }
+
+ st->binding_user = user;
+ /* mark the component as active if user is valid */
+ if (st->binding_user)
+ pipe_st->active_comps |= BIT(c->id);
+
+ return st;
+}
+
+static void
+komeda_component_add_input(struct komeda_component_state *state,
+ struct komeda_component_output *input,
+ int idx)
+{
+ struct komeda_component *c = state->component;
+
+ WARN_ON((idx < 0 || idx >= c->max_active_inputs));
+
+ /* since the inputs[i] is only valid when it is active. So if a input[i]
+ * is a newly enabled input which switches from disable to enable, then
+ * the old inputs[i] is undefined (NOT zeroed), we can not rely on
+ * memcmp, but directly mark it changed
+ */
+ if (!has_bit(idx, state->affected_inputs) ||
+ memcmp(&state->inputs[idx], input, sizeof(*input))) {
+ memcpy(&state->inputs[idx], input, sizeof(*input));
+ state->changed_active_inputs |= BIT(idx);
+ }
+ state->active_inputs |= BIT(idx);
+ state->affected_inputs |= BIT(idx);
+}
+
+static int
+komeda_component_check_input(struct komeda_component_state *state,
+ struct komeda_component_output *input,
+ int idx)
+{
+ struct komeda_component *c = state->component;
+
+ if ((idx < 0) || (idx >= c->max_active_inputs)) {
+ DRM_DEBUG_ATOMIC("%s invalid input id: %d.\n", c->name, idx);
+ return -EINVAL;
+ }
+
+ if (has_bit(idx, state->active_inputs)) {
+ DRM_DEBUG_ATOMIC("%s required input_id: %d has been occupied already.\n",
+ c->name, idx);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+komeda_component_set_output(struct komeda_component_output *output,
+ struct komeda_component *comp,
+ u8 output_port)
+{
+ output->component = comp;
+ output->output_port = output_port;
+}
+
+static int
+komeda_component_validate_private(struct komeda_component *c,
+ struct komeda_component_state *st)
+{
+ int err;
+
+ if (!c->funcs->validate)
+ return 0;
+
+ err = c->funcs->validate(c, st);
+ if (err)
+ DRM_DEBUG_ATOMIC("%s validate private failed.\n", c->name);
+
+ return err;
+}
+
+static int
+komeda_layer_check_cfg(struct komeda_layer *layer,
+ struct komeda_plane_state *kplane_st,
+ struct komeda_data_flow_cfg *dflow)
+{
+ if (!in_range(&layer->hsize_in, dflow->in_w)) {
+ DRM_DEBUG_ATOMIC("src_w: %d is out of range.\n", dflow->in_w);
+ return -EINVAL;
+ }
+
+ if (!in_range(&layer->vsize_in, dflow->in_h)) {
+ DRM_DEBUG_ATOMIC("src_h: %d is out of range.\n", dflow->in_h);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+komeda_layer_validate(struct komeda_layer *layer,
+ struct komeda_plane_state *kplane_st,
+ struct komeda_data_flow_cfg *dflow)
+{
+ struct drm_plane_state *plane_st = &kplane_st->base;
+ struct drm_framebuffer *fb = plane_st->fb;
+ struct komeda_fb *kfb = to_kfb(fb);
+ struct komeda_component_state *c_st;
+ struct komeda_layer_state *st;
+ int i, err;
+
+ err = komeda_layer_check_cfg(layer, kplane_st, dflow);
+ if (err)
+ return err;
+
+ c_st = komeda_component_get_state_and_set_user(&layer->base,
+ plane_st->state, plane_st->plane, plane_st->crtc);
+ if (IS_ERR(c_st))
+ return PTR_ERR(c_st);
+
+ st = to_layer_st(c_st);
+
+ st->rot = dflow->rot;
+ st->hsize = kfb->aligned_w;
+ st->vsize = kfb->aligned_h;
+
+ for (i = 0; i < fb->format->num_planes; i++)
+ st->addr[i] = komeda_fb_get_pixel_addr(kfb, dflow->in_x,
+ dflow->in_y, i);
+
+ err = komeda_component_validate_private(&layer->base, c_st);
+ if (err)
+ return err;
+
+ /* update the data flow for the next stage */
+ komeda_component_set_output(&dflow->input, &layer->base, 0);
+
+ return 0;
+}
+
+static void pipeline_composition_size(struct komeda_crtc_state *kcrtc_st,
+ u16 *hsize, u16 *vsize)
+{
+ struct drm_display_mode *m = &kcrtc_st->base.adjusted_mode;
+
+ if (hsize)
+ *hsize = m->hdisplay;
+ if (vsize)
+ *vsize = m->vdisplay;
+}
+
+static int
+komeda_compiz_set_input(struct komeda_compiz *compiz,
+ struct komeda_crtc_state *kcrtc_st,
+ struct komeda_data_flow_cfg *dflow)
+{
+ struct drm_atomic_state *drm_st = kcrtc_st->base.state;
+ struct komeda_component_state *c_st, *old_st;
+ struct komeda_compiz_input_cfg *cin;
+ u16 compiz_w, compiz_h;
+ int idx = dflow->blending_zorder;
+
+ pipeline_composition_size(kcrtc_st, &compiz_w, &compiz_h);
+ /* check display rect */
+ if ((dflow->out_x + dflow->out_w > compiz_w) ||
+ (dflow->out_y + dflow->out_h > compiz_h) ||
+ dflow->out_w == 0 || dflow->out_h == 0) {
+ DRM_DEBUG_ATOMIC("invalid disp rect [x=%d, y=%d, w=%d, h=%d]\n",
+ dflow->out_x, dflow->out_y,
+ dflow->out_w, dflow->out_h);
+ return -EINVAL;
+ }
+
+ c_st = komeda_component_get_state_and_set_user(&compiz->base, drm_st,
+ kcrtc_st->base.crtc, kcrtc_st->base.crtc);
+ if (IS_ERR(c_st))
+ return PTR_ERR(c_st);
+
+ if (komeda_component_check_input(c_st, &dflow->input, idx))
+ return -EINVAL;
+
+ cin = &(to_compiz_st(c_st)->cins[idx]);
+
+ cin->hsize = dflow->out_w;
+ cin->vsize = dflow->out_h;
+ cin->hoffset = dflow->out_x;
+ cin->voffset = dflow->out_y;
+ cin->pixel_blend_mode = dflow->pixel_blend_mode;
+ cin->layer_alpha = dflow->layer_alpha;
+
+ old_st = komeda_component_get_old_state(&compiz->base, drm_st);
+ WARN_ON(!old_st);
+
+ /* compare with old to check if this input has been changed */
+ if (memcmp(&(to_compiz_st(old_st)->cins[idx]), cin, sizeof(*cin)))
+ c_st->changed_active_inputs |= BIT(idx);
+
+ komeda_component_add_input(c_st, &dflow->input, idx);
+
+ return 0;
+}
+
+static int
+komeda_compiz_validate(struct komeda_compiz *compiz,
+ struct komeda_crtc_state *state,
+ struct komeda_data_flow_cfg *dflow)
+{
+ struct komeda_component_state *c_st;
+ struct komeda_compiz_state *st;
+
+ c_st = komeda_component_get_state_and_set_user(&compiz->base,
+ state->base.state, state->base.crtc, state->base.crtc);
+ if (IS_ERR(c_st))
+ return PTR_ERR(c_st);
+
+ st = to_compiz_st(c_st);
+
+ pipeline_composition_size(state, &st->hsize, &st->vsize);
+
+ komeda_component_set_output(&dflow->input, &compiz->base, 0);
+
+ /* compiz output dflow will be fed to the next pipeline stage, prepare
+ * the data flow configuration for the next stage
+ */
+ if (dflow) {
+ dflow->in_w = st->hsize;
+ dflow->in_h = st->vsize;
+ dflow->out_w = dflow->in_w;
+ dflow->out_h = dflow->in_h;
+ /* the output data of compiz doesn't have alpha, it only can be
+ * used as bottom layer when blend it with master layers
+ */
+ dflow->pixel_blend_mode = DRM_MODE_BLEND_PIXEL_NONE;
+ dflow->layer_alpha = 0xFF;
+ dflow->blending_zorder = 0;
+ }
+
+ return 0;
+}
+
+static int
+komeda_improc_validate(struct komeda_improc *improc,
+ struct komeda_crtc_state *kcrtc_st,
+ struct komeda_data_flow_cfg *dflow)
+{
+ struct drm_crtc *crtc = kcrtc_st->base.crtc;
+ struct komeda_component_state *c_st;
+ struct komeda_improc_state *st;
+
+ c_st = komeda_component_get_state_and_set_user(&improc->base,
+ kcrtc_st->base.state, crtc, crtc);
+ if (IS_ERR(c_st))
+ return PTR_ERR(c_st);
+
+ st = to_improc_st(c_st);
+
+ st->hsize = dflow->in_w;
+ st->vsize = dflow->in_h;
+
+ komeda_component_add_input(&st->base, &dflow->input, 0);
+ komeda_component_set_output(&dflow->input, &improc->base, 0);
+
+ return 0;
+}
+
+static int
+komeda_timing_ctrlr_validate(struct komeda_timing_ctrlr *ctrlr,
+ struct komeda_crtc_state *kcrtc_st,
+ struct komeda_data_flow_cfg *dflow)
+{
+ struct drm_crtc *crtc = kcrtc_st->base.crtc;
+ struct komeda_timing_ctrlr_state *st;
+ struct komeda_component_state *c_st;
+
+ c_st = komeda_component_get_state_and_set_user(&ctrlr->base,
+ kcrtc_st->base.state, crtc, crtc);
+ if (IS_ERR(c_st))
+ return PTR_ERR(c_st);
+
+ st = to_ctrlr_st(c_st);
+
+ komeda_component_add_input(&st->base, &dflow->input, 0);
+ komeda_component_set_output(&dflow->input, &ctrlr->base, 0);
+
+ return 0;
+}
+
+int komeda_build_layer_data_flow(struct komeda_layer *layer,
+ struct komeda_plane_state *kplane_st,
+ struct komeda_crtc_state *kcrtc_st,
+ struct komeda_data_flow_cfg *dflow)
+{
+ struct drm_plane *plane = kplane_st->base.plane;
+ struct komeda_pipeline *pipe = layer->base.pipeline;
+ int err;
+
+ DRM_DEBUG_ATOMIC("%s handling [PLANE:%d:%s]: src[x/y:%d/%d, w/h:%d/%d] disp[x/y:%d/%d, w/h:%d/%d]",
+ layer->base.name, plane->base.id, plane->name,
+ dflow->in_x, dflow->in_y, dflow->in_w, dflow->in_h,
+ dflow->out_x, dflow->out_y, dflow->out_w, dflow->out_h);
+
+ err = komeda_layer_validate(layer, kplane_st, dflow);
+ if (err)
+ return err;
+
+ err = komeda_compiz_set_input(pipe->compiz, kcrtc_st, dflow);
+
+ return err;
+}
+
+/* build display output data flow, the data path is:
+ * compiz -> improc -> timing_ctrlr
+ */
+int komeda_build_display_data_flow(struct komeda_crtc *kcrtc,
+ struct komeda_crtc_state *kcrtc_st)
+{
+ struct komeda_pipeline *master = kcrtc->master;
+ struct komeda_data_flow_cfg m_dflow; /* master data flow */
+ int err;
+
+ memset(&m_dflow, 0, sizeof(m_dflow));
+
+ err = komeda_compiz_validate(master->compiz, kcrtc_st, &m_dflow);
+ if (err)
+ return err;
+
+ err = komeda_improc_validate(master->improc, kcrtc_st, &m_dflow);
+ if (err)
+ return err;
+
+ err = komeda_timing_ctrlr_validate(master->ctrlr, kcrtc_st, &m_dflow);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void
+komeda_pipeline_unbound_components(struct komeda_pipeline *pipe,
+ struct komeda_pipeline_state *new)
+{
+ struct drm_atomic_state *drm_st = new->obj.state;
+ struct komeda_pipeline_state *old = priv_to_pipe_st(pipe->obj.state);
+ struct komeda_component_state *c_st;
+ struct komeda_component *c;
+ u32 disabling_comps, id;
+
+ WARN_ON(!old);
+
+ disabling_comps = (~new->active_comps) & old->active_comps;
+
+ /* unbound all disabling component */
+ dp_for_each_set_bit(id, disabling_comps) {
+ c = komeda_pipeline_get_component(pipe, id);
+ c_st = komeda_component_get_state_and_set_user(c,
+ drm_st, NULL, new->crtc);
+ WARN_ON(IS_ERR(c_st));
+ }
+}
+
+/* release unclaimed pipeline resource */
+int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe,
+ struct komeda_crtc_state *kcrtc_st)
+{
+ struct drm_atomic_state *drm_st = kcrtc_st->base.state;
+ struct komeda_pipeline_state *st;
+
+ /* ignore the pipeline which is not affected */
+ if (!pipe || !has_bit(pipe->id, kcrtc_st->affected_pipes))
+ return 0;
+
+ if (has_bit(pipe->id, kcrtc_st->active_pipes))
+ st = komeda_pipeline_get_new_state(pipe, drm_st);
+ else
+ st = komeda_pipeline_get_state_and_set_crtc(pipe, drm_st, NULL);
+
+ if (WARN_ON(IS_ERR_OR_NULL(st)))
+ return -EINVAL;
+
+ komeda_pipeline_unbound_components(pipe, st);
+
+ return 0;
+}
+
+void komeda_pipeline_disable(struct komeda_pipeline *pipe,
+ struct drm_atomic_state *old_state)
+{
+ struct komeda_pipeline_state *old;
+ struct komeda_component *c;
+ struct komeda_component_state *c_st;
+ u32 id, disabling_comps = 0;
+
+ old = komeda_pipeline_get_old_state(pipe, old_state);
+
+ disabling_comps = old->active_comps;
+ DRM_DEBUG_ATOMIC("PIPE%d: disabling_comps: 0x%x.\n",
+ pipe->id, disabling_comps);
+
+ dp_for_each_set_bit(id, disabling_comps) {
+ c = komeda_pipeline_get_component(pipe, id);
+ c_st = priv_to_comp_st(c->obj.state);
+
+ /*
+ * If we disabled a component then all active_inputs should be
+ * put in the list of changed_active_inputs, so they get
+ * re-enabled.
+ * This usually happens during a modeset when the pipeline is
+ * first disabled and then the actual state gets committed
+ * again.
+ */
+ c_st->changed_active_inputs |= c_st->active_inputs;
+
+ c->funcs->disable(c);
+ }
+}
+
+void komeda_pipeline_update(struct komeda_pipeline *pipe,
+ struct drm_atomic_state *old_state)
+{
+ struct komeda_pipeline_state *new = priv_to_pipe_st(pipe->obj.state);
+ struct komeda_pipeline_state *old;
+ struct komeda_component *c;
+ u32 id, changed_comps = 0;
+
+ old = komeda_pipeline_get_old_state(pipe, old_state);
+
+ changed_comps = new->active_comps | old->active_comps;
+
+ DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, changed: 0x%x.\n",
+ pipe->id, new->active_comps, changed_comps);
+
+ dp_for_each_set_bit(id, changed_comps) {
+ c = komeda_pipeline_get_component(pipe, id);
+
+ if (new->active_comps & BIT(c->id))
+ c->funcs->update(c, priv_to_comp_st(c->obj.state));
+ else
+ c->funcs->disable(c);
+ }
+}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
index 0a4953a9a909..07ed0cc1bc44 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_plane.c
@@ -7,10 +7,96 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
+#include <drm/drm_print.h>
#include "komeda_dev.h"
#include "komeda_kms.h"
+static int
+komeda_plane_init_data_flow(struct drm_plane_state *st,
+ struct komeda_data_flow_cfg *dflow)
+{
+ struct drm_framebuffer *fb = st->fb;
+
+ memset(dflow, 0, sizeof(*dflow));
+
+ dflow->blending_zorder = st->zpos;
+
+ /* if format doesn't have alpha, fix blend mode to PIXEL_NONE */
+ dflow->pixel_blend_mode = fb->format->has_alpha ?
+ st->pixel_blend_mode : DRM_MODE_BLEND_PIXEL_NONE;
+ dflow->layer_alpha = st->alpha >> 8;
+
+ dflow->out_x = st->crtc_x;
+ dflow->out_y = st->crtc_y;
+ dflow->out_w = st->crtc_w;
+ dflow->out_h = st->crtc_h;
+
+ dflow->in_x = st->src_x >> 16;
+ dflow->in_y = st->src_y >> 16;
+ dflow->in_w = st->src_w >> 16;
+ dflow->in_h = st->src_h >> 16;
+
+ return 0;
+}
+
+/**
+ * komeda_plane_atomic_check - build input data flow
+ * @plane: DRM plane
+ * @state: the plane state object
+ *
+ * RETURNS:
+ * Zero for success or -errno
+ */
+static int
+komeda_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct komeda_plane *kplane = to_kplane(plane);
+ struct komeda_plane_state *kplane_st = to_kplane_st(state);
+ struct komeda_layer *layer = kplane->layer;
+ struct drm_crtc_state *crtc_st;
+ struct komeda_crtc *kcrtc;
+ struct komeda_crtc_state *kcrtc_st;
+ struct komeda_data_flow_cfg dflow;
+ int err;
+
+ if (!state->crtc || !state->fb)
+ return 0;
+
+ crtc_st = drm_atomic_get_crtc_state(state->state, state->crtc);
+ if (!crtc_st->enable) {
+ DRM_DEBUG_ATOMIC("Cannot update plane on a disabled CRTC.\n");
+ return -EINVAL;
+ }
+
+ /* crtc is inactive, skip the resource assignment */
+ if (!crtc_st->active)
+ return 0;
+
+ kcrtc = to_kcrtc(state->crtc);
+ kcrtc_st = to_kcrtc_st(crtc_st);
+
+ err = komeda_plane_init_data_flow(state, &dflow);
+ if (err)
+ return err;
+
+ err = komeda_build_layer_data_flow(layer, kplane_st, kcrtc_st, &dflow);
+
+ return err;
+}
+
+/* plane doesn't represent a real HW, so there is no HW update for plane.
+ * komeda handles all the HW update in crtc->atomic_flush
+ */
+static void
+komeda_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+}
+
static const struct drm_plane_helper_funcs komeda_plane_helper_funcs = {
+ .atomic_check = komeda_plane_atomic_check,
+ .atomic_update = komeda_plane_atomic_update,
};
static void komeda_plane_destroy(struct drm_plane *plane)
@@ -20,7 +106,60 @@ static void komeda_plane_destroy(struct drm_plane *plane)
kfree(to_kplane(plane));
}
+static void komeda_plane_reset(struct drm_plane *plane)
+{
+ struct komeda_plane_state *state;
+ struct komeda_plane *kplane = to_kplane(plane);
+
+ if (plane->state)
+ __drm_atomic_helper_plane_destroy_state(plane->state);
+
+ kfree(plane->state);
+ plane->state = NULL;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state) {
+ state->base.rotation = DRM_MODE_ROTATE_0;
+ state->base.pixel_blend_mode = DRM_MODE_BLEND_PREMULTI;
+ state->base.alpha = DRM_BLEND_ALPHA_OPAQUE;
+ state->base.zpos = kplane->layer->base.id;
+ plane->state = &state->base;
+ plane->state->plane = plane;
+ }
+}
+
+static struct drm_plane_state *
+komeda_plane_atomic_duplicate_state(struct drm_plane *plane)
+{
+ struct komeda_plane_state *new;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (!new)
+ return NULL;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &new->base);
+
+ return &new->base;
+}
+
+static void
+komeda_plane_atomic_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ __drm_atomic_helper_plane_destroy_state(state);
+ kfree(to_kplane_st(state));
+}
+
static const struct drm_plane_funcs komeda_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = komeda_plane_destroy,
+ .reset = komeda_plane_reset,
+ .atomic_duplicate_state = komeda_plane_atomic_duplicate_state,
+ .atomic_destroy_state = komeda_plane_atomic_destroy_state,
};
/* for komeda, which is pipeline can be share between crtcs */
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c b/drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c
index f1c9e3fefa86..a54878cbd6e4 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_private_obj.c
@@ -7,6 +7,188 @@
#include "komeda_dev.h"
#include "komeda_kms.h"
+static void
+komeda_component_state_reset(struct komeda_component_state *st)
+{
+ st->binding_user = NULL;
+ st->affected_inputs = st->active_inputs;
+ st->active_inputs = 0;
+ st->changed_active_inputs = 0;
+}
+
+static struct drm_private_state *
+komeda_layer_atomic_duplicate_state(struct drm_private_obj *obj)
+{
+ struct komeda_layer_state *st;
+
+ st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return NULL;
+
+ komeda_component_state_reset(&st->base);
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &st->base.obj);
+
+ return &st->base.obj;
+}
+
+static void
+komeda_layer_atomic_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ struct komeda_layer_state *st = to_layer_st(priv_to_comp_st(state));
+
+ kfree(st);
+}
+
+static const struct drm_private_state_funcs komeda_layer_obj_funcs = {
+ .atomic_duplicate_state = komeda_layer_atomic_duplicate_state,
+ .atomic_destroy_state = komeda_layer_atomic_destroy_state,
+};
+
+static int komeda_layer_obj_add(struct komeda_kms_dev *kms,
+ struct komeda_layer *layer)
+{
+ struct komeda_layer_state *st;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->base.component = &layer->base;
+ drm_atomic_private_obj_init(&kms->base, &layer->base.obj, &st->base.obj,
+ &komeda_layer_obj_funcs);
+ return 0;
+}
+
+static struct drm_private_state *
+komeda_compiz_atomic_duplicate_state(struct drm_private_obj *obj)
+{
+ struct komeda_compiz_state *st;
+
+ st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return NULL;
+
+ komeda_component_state_reset(&st->base);
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &st->base.obj);
+
+ return &st->base.obj;
+}
+
+static void
+komeda_compiz_atomic_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ kfree(to_compiz_st(priv_to_comp_st(state)));
+}
+
+static const struct drm_private_state_funcs komeda_compiz_obj_funcs = {
+ .atomic_duplicate_state = komeda_compiz_atomic_duplicate_state,
+ .atomic_destroy_state = komeda_compiz_atomic_destroy_state,
+};
+
+static int komeda_compiz_obj_add(struct komeda_kms_dev *kms,
+ struct komeda_compiz *compiz)
+{
+ struct komeda_compiz_state *st;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->base.component = &compiz->base;
+ drm_atomic_private_obj_init(&kms->base, &compiz->base.obj, &st->base.obj,
+ &komeda_compiz_obj_funcs);
+
+ return 0;
+}
+
+static struct drm_private_state *
+komeda_improc_atomic_duplicate_state(struct drm_private_obj *obj)
+{
+ struct komeda_improc_state *st;
+
+ st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return NULL;
+
+ komeda_component_state_reset(&st->base);
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &st->base.obj);
+
+ return &st->base.obj;
+}
+
+static void
+komeda_improc_atomic_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ kfree(to_improc_st(priv_to_comp_st(state)));
+}
+
+static const struct drm_private_state_funcs komeda_improc_obj_funcs = {
+ .atomic_duplicate_state = komeda_improc_atomic_duplicate_state,
+ .atomic_destroy_state = komeda_improc_atomic_destroy_state,
+};
+
+static int komeda_improc_obj_add(struct komeda_kms_dev *kms,
+ struct komeda_improc *improc)
+{
+ struct komeda_improc_state *st;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->base.component = &improc->base;
+ drm_atomic_private_obj_init(&kms->base, &improc->base.obj, &st->base.obj,
+ &komeda_improc_obj_funcs);
+
+ return 0;
+}
+
+static struct drm_private_state *
+komeda_timing_ctrlr_atomic_duplicate_state(struct drm_private_obj *obj)
+{
+ struct komeda_timing_ctrlr_state *st;
+
+ st = kmemdup(obj->state, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return NULL;
+
+ komeda_component_state_reset(&st->base);
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &st->base.obj);
+
+ return &st->base.obj;
+}
+
+static void
+komeda_timing_ctrlr_atomic_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ kfree(to_ctrlr_st(priv_to_comp_st(state)));
+}
+
+static const struct drm_private_state_funcs komeda_timing_ctrlr_obj_funcs = {
+ .atomic_duplicate_state = komeda_timing_ctrlr_atomic_duplicate_state,
+ .atomic_destroy_state = komeda_timing_ctrlr_atomic_destroy_state,
+};
+
+static int komeda_timing_ctrlr_obj_add(struct komeda_kms_dev *kms,
+ struct komeda_timing_ctrlr *ctrlr)
+{
+ struct komeda_compiz_state *st;
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ st->base.component = &ctrlr->base;
+ drm_atomic_private_obj_init(&kms->base, &ctrlr->base.obj, &st->base.obj,
+ &komeda_timing_ctrlr_obj_funcs);
+
+ return 0;
+}
+
static struct drm_private_state *
komeda_pipeline_atomic_duplicate_state(struct drm_private_obj *obj)
{
@@ -55,7 +237,7 @@ int komeda_kms_add_private_objs(struct komeda_kms_dev *kms,
struct komeda_dev *mdev)
{
struct komeda_pipeline *pipe;
- int i, err;
+ int i, j, err;
for (i = 0; i < mdev->n_pipelines; i++) {
pipe = mdev->pipelines[i];
@@ -64,25 +246,33 @@ int komeda_kms_add_private_objs(struct komeda_kms_dev *kms,
if (err)
return err;
- /* Add component */
+ for (j = 0; j < pipe->n_layers; j++) {
+ err = komeda_layer_obj_add(kms, pipe->layers[j]);
+ if (err)
+ return err;
+ }
+
+ err = komeda_compiz_obj_add(kms, pipe->compiz);
+ if (err)
+ return err;
+
+ err = komeda_improc_obj_add(kms, pipe->improc);
+ if (err)
+ return err;
+
+ err = komeda_timing_ctrlr_obj_add(kms, pipe->ctrlr);
+ if (err)
+ return err;
}
return 0;
}
-void komeda_kms_cleanup_private_objs(struct komeda_dev *mdev)
+void komeda_kms_cleanup_private_objs(struct komeda_kms_dev *kms)
{
- struct komeda_pipeline *pipe;
- struct komeda_component *c;
- int i, id;
+ struct drm_mode_config *config = &kms->base.mode_config;
+ struct drm_private_obj *obj, *next;
- for (i = 0; i < mdev->n_pipelines; i++) {
- pipe = mdev->pipelines[i];
- dp_for_each_set_bit(id, pipe->avail_comps) {
- c = komeda_pipeline_get_component(pipe, id);
-
- drm_atomic_private_obj_fini(&c->obj);
- }
- drm_atomic_private_obj_fini(&pipe->obj);
- }
+ list_for_each_entry_safe(obj, next, &config->privobj_list, head)
+ drm_atomic_private_obj_fini(obj);
}
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index ab50ad06e271..21725c9b9f5e 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -264,37 +264,17 @@ static bool
malidp_verify_afbc_framebuffer_caps(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
- const struct drm_format_info *info;
-
- if ((mode_cmd->modifier[0] >> 56) != DRM_FORMAT_MOD_VENDOR_ARM) {
- DRM_DEBUG_KMS("Unknown modifier (not Arm)\n");
- return false;
- }
-
- if (mode_cmd->modifier[0] &
- ~DRM_FORMAT_MOD_ARM_AFBC(AFBC_MOD_VALID_BITS)) {
- DRM_DEBUG_KMS("Unsupported modifiers\n");
- return false;
- }
-
- info = drm_get_format_info(dev, mode_cmd);
- if (!info) {
- DRM_DEBUG_KMS("Unable to get the format information\n");
+ if (malidp_format_mod_supported(dev, mode_cmd->pixel_format,
+ mode_cmd->modifier[0]) == false)
return false;
- }
-
- if (info->num_planes != 1) {
- DRM_DEBUG_KMS("AFBC buffers expect one plane\n");
- return false;
- }
if (mode_cmd->offsets[0] != 0) {
DRM_DEBUG_KMS("AFBC buffers' plane offset should be 0\n");
return false;
}
- switch (mode_cmd->modifier[0] & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) {
- case AFBC_FORMAT_MOD_BLOCK_SIZE_16x16:
+ switch (mode_cmd->modifier[0] & AFBC_SIZE_MASK) {
+ case AFBC_SIZE_16X16:
if ((mode_cmd->width % 16) || (mode_cmd->height % 16)) {
DRM_DEBUG_KMS("AFBC buffers must be aligned to 16 pixels\n");
return false;
@@ -318,9 +298,10 @@ malidp_verify_afbc_framebuffer_size(struct drm_device *dev,
struct drm_gem_object *objs = NULL;
u32 afbc_superblock_size = 0, afbc_superblock_height = 0;
u32 afbc_superblock_width = 0, afbc_size = 0;
+ int bpp = 0;
- switch (mode_cmd->modifier[0] & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) {
- case AFBC_FORMAT_MOD_BLOCK_SIZE_16x16:
+ switch (mode_cmd->modifier[0] & AFBC_SIZE_MASK) {
+ case AFBC_SIZE_16X16:
afbc_superblock_height = 16;
afbc_superblock_width = 16;
break;
@@ -334,15 +315,19 @@ malidp_verify_afbc_framebuffer_size(struct drm_device *dev,
n_superblocks = (mode_cmd->width / afbc_superblock_width) *
(mode_cmd->height / afbc_superblock_height);
- afbc_superblock_size = info->cpp[0] * afbc_superblock_width *
- afbc_superblock_height;
+ bpp = malidp_format_get_bpp(info->format);
+
+ afbc_superblock_size = (bpp * afbc_superblock_width * afbc_superblock_height)
+ / BITS_PER_BYTE;
afbc_size = ALIGN(n_superblocks * AFBC_HEADER_SIZE, AFBC_SUPERBLK_ALIGNMENT);
afbc_size += n_superblocks * ALIGN(afbc_superblock_size, AFBC_SUPERBLK_ALIGNMENT);
- if (mode_cmd->width * info->cpp[0] != mode_cmd->pitches[0]) {
- DRM_DEBUG_KMS("Invalid value of pitch (=%u) should be same as width (=%u) * cpp (=%u)\n",
- mode_cmd->pitches[0], mode_cmd->width, info->cpp[0]);
+ if ((mode_cmd->width * bpp) != (mode_cmd->pitches[0] * BITS_PER_BYTE)) {
+ DRM_DEBUG_KMS("Invalid value of (pitch * BITS_PER_BYTE) (=%u) "
+ "should be same as width (=%u) * bpp (=%u)\n",
+ (mode_cmd->pitches[0] * BITS_PER_BYTE),
+ mode_cmd->width, bpp);
return false;
}
@@ -406,6 +391,7 @@ static int malidp_init(struct drm_device *drm)
drm->mode_config.max_height = hwdev->max_line_size;
drm->mode_config.funcs = &malidp_mode_config_funcs;
drm->mode_config.helper_private = &malidp_mode_config_helpers;
+ drm->mode_config.allow_fb_modifiers = true;
ret = malidp_crtc_init(drm);
if (ret)
diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h
index b76c86f18a56..019a682b2716 100644
--- a/drivers/gpu/drm/arm/malidp_drv.h
+++ b/drivers/gpu/drm/arm/malidp_drv.h
@@ -90,6 +90,12 @@ struct malidp_crtc_state {
int malidp_de_planes_init(struct drm_device *drm);
int malidp_crtc_init(struct drm_device *drm);
+bool malidp_hw_format_is_linear_only(u32 format);
+bool malidp_hw_format_is_afbc_only(u32 format);
+
+bool malidp_format_mod_supported(struct drm_device *drm,
+ u32 format, u64 modifier);
+
#ifdef CONFIG_DEBUG_FS
void malidp_error(struct malidp_drm *malidp,
struct malidp_error_stats *error_stats, u32 status,
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index b9bed1138fa3..8df12e9a33bb 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -49,11 +49,19 @@ static const struct malidp_format_id malidp500_de_formats[] = {
{ DRM_FORMAT_YUYV, DE_VIDEO1, 13 },
{ DRM_FORMAT_NV12, DE_VIDEO1 | SE_MEMWRITE, 14 },
{ DRM_FORMAT_YUV420, DE_VIDEO1, 15 },
+ { DRM_FORMAT_XYUV8888, DE_VIDEO1, 16 },
+ /* These are supported with AFBC only */
+ { DRM_FORMAT_YUV420_8BIT, DE_VIDEO1, 14 },
+ { DRM_FORMAT_VUY888, DE_VIDEO1, 16 },
+ { DRM_FORMAT_VUY101010, DE_VIDEO1, 17 },
+ { DRM_FORMAT_YUV420_10BIT, DE_VIDEO1, 18 }
};
#define MALIDP_ID(__group, __format) \
((((__group) & 0x7) << 3) | ((__format) & 0x7))
+#define AFBC_YUV_422_FORMAT_ID MALIDP_ID(5, 1)
+
#define MALIDP_COMMON_FORMATS \
/* fourcc, layers supporting the format, internal id */ \
{ DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(0, 0) }, \
@@ -74,11 +82,25 @@ static const struct malidp_format_id malidp500_de_formats[] = {
{ DRM_FORMAT_ABGR1555, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 1) }, \
{ DRM_FORMAT_RGB565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 2) }, \
{ DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 3) }, \
+ /* This is only supported with linear modifier */ \
+ { DRM_FORMAT_XYUV8888, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 0) },\
+ /* This is only supported with AFBC modifier */ \
+ { DRM_FORMAT_VUY888, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 0) }, \
{ DRM_FORMAT_YUYV, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 2) }, \
+ /* This is only supported with linear modifier */ \
{ DRM_FORMAT_UYVY, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 3) }, \
{ DRM_FORMAT_NV12, DE_VIDEO1 | DE_VIDEO2 | SE_MEMWRITE, MALIDP_ID(5, 6) }, \
+ /* This is only supported with AFBC modifier */ \
+ { DRM_FORMAT_YUV420_8BIT, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 6) }, \
{ DRM_FORMAT_YUV420, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 7) }, \
- { DRM_FORMAT_X0L2, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 6)}
+ /* This is only supported with linear modifier */ \
+ { DRM_FORMAT_XVYU2101010, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 0)}, \
+ /* This is only supported with AFBC modifier */ \
+ { DRM_FORMAT_VUY101010, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 0)}, \
+ { DRM_FORMAT_X0L2, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 6)}, \
+ /* This is only supported with AFBC modifier */ \
+ { DRM_FORMAT_YUV420_10BIT, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 7)}, \
+ { DRM_FORMAT_P010, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(6, 7)}
static const struct malidp_format_id malidp550_de_formats[] = {
MALIDP_COMMON_FORMATS,
@@ -94,11 +116,14 @@ static const struct malidp_layer malidp500_layers[] = {
* yuv2rgb matrix offset, mmu control register offset, rotation_features
*/
{ DE_VIDEO1, MALIDP500_DE_LV_BASE, MALIDP500_DE_LV_PTR_BASE,
- MALIDP_DE_LV_STRIDE0, MALIDP500_LV_YUV2RGB, 0, ROTATE_ANY },
+ MALIDP_DE_LV_STRIDE0, MALIDP500_LV_YUV2RGB, 0, ROTATE_ANY,
+ MALIDP500_DE_LV_AD_CTRL },
{ DE_GRAPHICS1, MALIDP500_DE_LG1_BASE, MALIDP500_DE_LG1_PTR_BASE,
- MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY },
+ MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY,
+ MALIDP500_DE_LG1_AD_CTRL },
{ DE_GRAPHICS2, MALIDP500_DE_LG2_BASE, MALIDP500_DE_LG2_PTR_BASE,
- MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY },
+ MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY,
+ MALIDP500_DE_LG2_AD_CTRL },
};
static const struct malidp_layer malidp550_layers[] = {
@@ -106,13 +131,16 @@ static const struct malidp_layer malidp550_layers[] = {
* yuv2rgb matrix offset, mmu control register offset, rotation_features
*/
{ DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE,
- MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, 0, ROTATE_ANY },
+ MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, 0, ROTATE_ANY,
+ MALIDP550_DE_LV1_AD_CTRL },
{ DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE,
- MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY },
+ MALIDP_DE_LG_STRIDE, 0, 0, ROTATE_ANY,
+ MALIDP550_DE_LG_AD_CTRL },
{ DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE,
- MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, 0, ROTATE_ANY },
+ MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB, 0, ROTATE_ANY,
+ MALIDP550_DE_LV2_AD_CTRL },
{ DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE,
- MALIDP550_DE_LS_R1_STRIDE, 0, 0, ROTATE_NONE },
+ MALIDP550_DE_LS_R1_STRIDE, 0, 0, ROTATE_NONE, 0 },
};
static const struct malidp_layer malidp650_layers[] = {
@@ -122,16 +150,44 @@ static const struct malidp_layer malidp650_layers[] = {
*/
{ DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE,
MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB,
- MALIDP650_DE_LV_MMU_CTRL, ROTATE_ANY },
+ MALIDP650_DE_LV_MMU_CTRL, ROTATE_ANY,
+ MALIDP550_DE_LV1_AD_CTRL },
{ DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE,
MALIDP_DE_LG_STRIDE, 0, MALIDP650_DE_LG_MMU_CTRL,
- ROTATE_COMPRESSED },
+ ROTATE_COMPRESSED, MALIDP550_DE_LG_AD_CTRL },
{ DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE,
MALIDP_DE_LV_STRIDE0, MALIDP550_LV_YUV2RGB,
- MALIDP650_DE_LV_MMU_CTRL, ROTATE_ANY },
+ MALIDP650_DE_LV_MMU_CTRL, ROTATE_ANY,
+ MALIDP550_DE_LV2_AD_CTRL },
{ DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE,
MALIDP550_DE_LS_R1_STRIDE, 0, MALIDP650_DE_LS_MMU_CTRL,
- ROTATE_NONE },
+ ROTATE_NONE, 0 },
+};
+
+const u64 malidp_format_modifiers[] = {
+ /* All RGB formats (except XRGB, RGBX, XBGR, BGRX) */
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_YTR | AFBC_SPARSE),
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_YTR),
+
+ /* All RGB formats > 16bpp (except XRGB, RGBX, XBGR, BGRX) */
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_YTR | AFBC_SPARSE | AFBC_SPLIT),
+
+ /* All 8 or 10 bit YUV 444 formats. */
+ /* In DP550, 10 bit YUV 420 format also supported */
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_SPARSE | AFBC_SPLIT),
+
+ /* YUV 420, 422 P1 8 bit and YUV 444 8 bit/10 bit formats */
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_SPARSE),
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16),
+
+ /* YUV 420, 422 P1 8, 10 bit formats */
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_CBR | AFBC_SPARSE),
+ DRM_FORMAT_MOD_ARM_AFBC(AFBC_SIZE_16X16 | AFBC_CBR),
+
+ /* All formats */
+ DRM_FORMAT_MOD_LINEAR,
+
+ DRM_FORMAT_MOD_INVALID
};
#define SE_N_SCALING_COEFFS 96
@@ -324,14 +380,39 @@ static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode *
malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
}
-static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt)
+int malidp_format_get_bpp(u32 fmt)
+{
+ int bpp = drm_format_plane_cpp(fmt, 0) * 8;
+
+ if (bpp == 0) {
+ switch (fmt) {
+ case DRM_FORMAT_VUY101010:
+ bpp = 30;
+ case DRM_FORMAT_YUV420_10BIT:
+ bpp = 15;
+ break;
+ case DRM_FORMAT_YUV420_8BIT:
+ bpp = 12;
+ break;
+ default:
+ bpp = 0;
+ }
+ }
+
+ return bpp;
+}
+
+static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w,
+ u16 h, u32 fmt, bool has_modifier)
{
/*
* Each layer needs enough rotation memory to fit 8 lines
* worth of pixel data. Required size is then:
* size = rotated_width * (bpp / 8) * 8;
*/
- return w * drm_format_plane_cpp(fmt, 0) * 8;
+ int bpp = malidp_format_get_bpp(fmt);
+
+ return w * bpp;
}
static void malidp500_se_write_pp_coefftab(struct malidp_hw_device *hwdev,
@@ -609,9 +690,9 @@ static void malidp550_modeset(struct malidp_hw_device *hwdev, struct videomode *
malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
}
-static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt)
+static int malidpx50_get_bytes_per_column(u32 fmt)
{
- u32 bytes_per_col;
+ u32 bytes_per_column;
switch (fmt) {
/* 8 lines at 4 bytes per pixel */
@@ -637,19 +718,77 @@ static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16
case DRM_FORMAT_UYVY:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_X0L0:
- case DRM_FORMAT_X0L2:
- bytes_per_col = 32;
+ bytes_per_column = 32;
break;
/* 16 lines at 1.5 bytes per pixel */
case DRM_FORMAT_NV12:
case DRM_FORMAT_YUV420:
- bytes_per_col = 24;
+ /* 8 lines at 3 bytes per pixel */
+ case DRM_FORMAT_VUY888:
+ /* 16 lines at 12 bits per pixel */
+ case DRM_FORMAT_YUV420_8BIT:
+ /* 8 lines at 3 bytes per pixel */
+ case DRM_FORMAT_P010:
+ bytes_per_column = 24;
+ break;
+ /* 8 lines at 30 bits per pixel */
+ case DRM_FORMAT_VUY101010:
+ /* 16 lines at 15 bits per pixel */
+ case DRM_FORMAT_YUV420_10BIT:
+ bytes_per_column = 30;
break;
default:
return -EINVAL;
}
- return w * bytes_per_col;
+ return bytes_per_column;
+}
+
+static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w,
+ u16 h, u32 fmt, bool has_modifier)
+{
+ int bytes_per_column = 0;
+
+ switch (fmt) {
+ /* 8 lines at 15 bits per pixel */
+ case DRM_FORMAT_YUV420_10BIT:
+ bytes_per_column = 15;
+ break;
+ /* Uncompressed YUV 420 10 bit single plane cannot be rotated */
+ case DRM_FORMAT_X0L2:
+ if (has_modifier)
+ bytes_per_column = 8;
+ else
+ return -EINVAL;
+ break;
+ default:
+ bytes_per_column = malidpx50_get_bytes_per_column(fmt);
+ }
+
+ if (bytes_per_column == -EINVAL)
+ return bytes_per_column;
+
+ return w * bytes_per_column;
+}
+
+static int malidp650_rotmem_required(struct malidp_hw_device *hwdev, u16 w,
+ u16 h, u32 fmt, bool has_modifier)
+{
+ int bytes_per_column = 0;
+
+ switch (fmt) {
+ /* 16 lines at 2 bytes per pixel */
+ case DRM_FORMAT_X0L2:
+ bytes_per_column = 32;
+ break;
+ default:
+ bytes_per_column = malidpx50_get_bytes_per_column(fmt);
+ }
+
+ if (bytes_per_column == -EINVAL)
+ return bytes_per_column;
+
+ return w * bytes_per_column;
}
static int malidp550_se_set_scaling_coeffs(struct malidp_hw_device *hwdev,
@@ -838,7 +977,10 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
.se_base = MALIDP550_SE_BASE,
.dc_base = MALIDP550_DC_BASE,
.out_depth_base = MALIDP550_DE_OUTPUT_DEPTH,
- .features = MALIDP_REGMAP_HAS_CLEARIRQ,
+ .features = MALIDP_REGMAP_HAS_CLEARIRQ |
+ MALIDP_DEVICE_AFBC_SUPPORT_SPLIT |
+ MALIDP_DEVICE_AFBC_YUV_420_10_SUPPORT_SPLIT |
+ MALIDP_DEVICE_AFBC_YUYV_USE_422_P2,
.n_layers = ARRAY_SIZE(malidp550_layers),
.layers = malidp550_layers,
.de_irq_map = {
@@ -884,7 +1026,9 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
.se_base = MALIDP550_SE_BASE,
.dc_base = MALIDP550_DC_BASE,
.out_depth_base = MALIDP550_DE_OUTPUT_DEPTH,
- .features = MALIDP_REGMAP_HAS_CLEARIRQ,
+ .features = MALIDP_REGMAP_HAS_CLEARIRQ |
+ MALIDP_DEVICE_AFBC_SUPPORT_SPLIT |
+ MALIDP_DEVICE_AFBC_YUYV_USE_422_P2,
.n_layers = ARRAY_SIZE(malidp650_layers),
.layers = malidp650_layers,
.de_irq_map = {
@@ -923,7 +1067,7 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
.in_config_mode = malidp550_in_config_mode,
.set_config_valid = malidp550_set_config_valid,
.modeset = malidp550_modeset,
- .rotmem_required = malidp550_rotmem_required,
+ .rotmem_required = malidp650_rotmem_required,
.se_set_scaling_coeffs = malidp550_se_set_scaling_coeffs,
.se_calc_mclk = malidp550_se_calc_mclk,
.enable_memwrite = malidp550_enable_memwrite,
@@ -933,19 +1077,72 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
};
u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
- u8 layer_id, u32 format)
+ u8 layer_id, u32 format, bool has_modifier)
{
unsigned int i;
for (i = 0; i < map->n_pixel_formats; i++) {
if (((map->pixel_formats[i].layer & layer_id) == layer_id) &&
- (map->pixel_formats[i].format == format))
- return map->pixel_formats[i].id;
+ (map->pixel_formats[i].format == format)) {
+ /*
+ * In some DP550 and DP650, DRM_FORMAT_YUYV + AFBC modifier
+ * is supported by a different h/w format id than
+ * DRM_FORMAT_YUYV (only).
+ */
+ if (format == DRM_FORMAT_YUYV &&
+ (has_modifier) &&
+ (map->features & MALIDP_DEVICE_AFBC_YUYV_USE_422_P2))
+ return AFBC_YUV_422_FORMAT_ID;
+ else
+ return map->pixel_formats[i].id;
+ }
}
return MALIDP_INVALID_FORMAT_ID;
}
+bool malidp_hw_format_is_linear_only(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_BGRA1010102:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_BGRA5551:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_XYUV8888:
+ case DRM_FORMAT_XVYU2101010:
+ case DRM_FORMAT_X0L2:
+ case DRM_FORMAT_X0L0:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool malidp_hw_format_is_afbc_only(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_VUY888:
+ case DRM_FORMAT_VUY101010:
+ case DRM_FORMAT_YUV420_8BIT:
+ case DRM_FORMAT_YUV420_10BIT:
+ return true;
+ default:
+ return false;
+ }
+}
+
static void malidp_hw_clear_irq(struct malidp_hw_device *hwdev, u8 block, u32 irq)
{
u32 base = malidp_get_block_base(hwdev, block);
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
index 40155e2ea9d9..207c3ce52f1a 100644
--- a/drivers/gpu/drm/arm/malidp_hw.h
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -70,6 +70,8 @@ struct malidp_layer {
s16 yuv2rgb_offset; /* offset to the YUV->RGB matrix entries */
u16 mmu_ctrl_offset; /* offset to the MMU control register */
enum rotation_features rot; /* type of rotation supported */
+ /* address offset for the AFBC decoder registers */
+ u16 afbc_decoder_offset;
};
enum malidp_scaling_coeff_set {
@@ -93,7 +95,10 @@ struct malidp_se_config {
};
/* regmap features */
-#define MALIDP_REGMAP_HAS_CLEARIRQ (1 << 0)
+#define MALIDP_REGMAP_HAS_CLEARIRQ BIT(0)
+#define MALIDP_DEVICE_AFBC_SUPPORT_SPLIT BIT(1)
+#define MALIDP_DEVICE_AFBC_YUV_420_10_SUPPORT_SPLIT BIT(2)
+#define MALIDP_DEVICE_AFBC_YUYV_USE_422_P2 BIT(3)
struct malidp_hw_regmap {
/* address offset of the DE register bank */
@@ -179,7 +184,8 @@ struct malidp_hw {
* Calculate the required rotation memory given the active area
* and the buffer format.
*/
- int (*rotmem_required)(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt);
+ int (*rotmem_required)(struct malidp_hw_device *hwdev, u16 w, u16 h,
+ u32 fmt, bool has_modifier);
int (*se_set_scaling_coeffs)(struct malidp_hw_device *hwdev,
struct malidp_se_config *se_config,
@@ -319,7 +325,9 @@ int malidp_se_irq_init(struct drm_device *drm, int irq);
void malidp_se_irq_fini(struct malidp_hw_device *hwdev);
u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
- u8 layer_id, u32 format);
+ u8 layer_id, u32 format, bool has_modifier);
+
+int malidp_format_get_bpp(u32 fmt);
static inline u8 malidp_hw_get_pitch_align(struct malidp_hw_device *hwdev, bool rotated)
{
@@ -388,9 +396,18 @@ static inline void malidp_se_set_enh_coeffs(struct malidp_hw_device *hwdev)
#define MALIDP_GAMMA_LUT_SIZE 4096
-#define AFBC_MOD_VALID_BITS (AFBC_FORMAT_MOD_BLOCK_SIZE_MASK | \
- AFBC_FORMAT_MOD_YTR | AFBC_FORMAT_MOD_SPLIT | \
- AFBC_FORMAT_MOD_SPARSE | AFBC_FORMAT_MOD_CBR | \
- AFBC_FORMAT_MOD_TILED | AFBC_FORMAT_MOD_SC)
+#define AFBC_SIZE_MASK AFBC_FORMAT_MOD_BLOCK_SIZE_MASK
+#define AFBC_SIZE_16X16 AFBC_FORMAT_MOD_BLOCK_SIZE_16x16
+#define AFBC_YTR AFBC_FORMAT_MOD_YTR
+#define AFBC_SPARSE AFBC_FORMAT_MOD_SPARSE
+#define AFBC_CBR AFBC_FORMAT_MOD_CBR
+#define AFBC_SPLIT AFBC_FORMAT_MOD_SPLIT
+#define AFBC_TILED AFBC_FORMAT_MOD_TILED
+#define AFBC_SC AFBC_FORMAT_MOD_SC
+
+#define AFBC_MOD_VALID_BITS (AFBC_SIZE_MASK | AFBC_YTR | AFBC_SPLIT | \
+ AFBC_SPARSE | AFBC_CBR | AFBC_TILED | AFBC_SC)
+
+extern const u64 malidp_format_modifiers[];
#endif /* __MALIDP_HW_H__ */
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
index 041a64dc7167..5f102bdaf841 100644
--- a/drivers/gpu/drm/arm/malidp_mw.c
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -141,9 +141,14 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
return -EINVAL;
}
+ if (fb->modifier) {
+ DRM_DEBUG_KMS("Writeback framebuffer does not support modifiers\n");
+ return -EINVAL;
+ }
+
mw_state->format =
malidp_hw_get_format_id(&malidp->dev->hw->map, SE_MEMWRITE,
- fb->format->format);
+ fb->format->format, !!fb->modifier);
if (mw_state->format == MALIDP_INVALID_FORMAT_ID) {
struct drm_format_name_buf format_name;
@@ -252,8 +257,7 @@ void malidp_mw_atomic_commit(struct drm_device *drm,
&mw_state->addrs[0],
mw_state->format);
- drm_writeback_queue_job(mw_conn, conn_state->writeback_job);
- conn_state->writeback_job = NULL;
+ drm_writeback_queue_job(mw_conn, conn_state);
hwdev->hw->enable_memwrite(hwdev, mw_state->addrs,
mw_state->pitches, mw_state->n_planes,
fb->width, fb->height, mw_state->format,
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index c9a6d3e0cada..d42e0ea9a303 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -52,6 +52,8 @@
#define MALIDP550_LS_ENABLE 0x01c
#define MALIDP550_LS_R1_IN_SIZE 0x020
+#define MODIFIERS_COUNT_MAX 15
+
/*
* This 4-entry look-up-table is used to determine the full 8-bit alpha value
* for formats with 1- or 2-bit alpha channels.
@@ -145,6 +147,119 @@ static void malidp_plane_atomic_print_state(struct drm_printer *p,
drm_printf(p, "\tmmu_prefetch_pgsize=%d\n", ms->mmu_prefetch_pgsize);
}
+bool malidp_format_mod_supported(struct drm_device *drm,
+ u32 format, u64 modifier)
+{
+ const struct drm_format_info *info;
+ const u64 *modifiers;
+ struct malidp_drm *malidp = drm->dev_private;
+ const struct malidp_hw_regmap *map = &malidp->dev->hw->map;
+
+ if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID))
+ return false;
+
+ /* Some pixel formats are supported without any modifier */
+ if (modifier == DRM_FORMAT_MOD_LINEAR) {
+ /*
+ * However these pixel formats need to be supported with
+ * modifiers only
+ */
+ return !malidp_hw_format_is_afbc_only(format);
+ }
+
+ if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_ARM) {
+ DRM_ERROR("Unknown modifier (not Arm)\n");
+ return false;
+ }
+
+ if (modifier &
+ ~DRM_FORMAT_MOD_ARM_AFBC(AFBC_MOD_VALID_BITS)) {
+ DRM_DEBUG_KMS("Unsupported modifiers\n");
+ return false;
+ }
+
+ modifiers = malidp_format_modifiers;
+
+ /* SPLIT buffers must use SPARSE layout */
+ if (WARN_ON_ONCE((modifier & AFBC_SPLIT) && !(modifier & AFBC_SPARSE)))
+ return false;
+
+ /* CBR only applies to YUV formats, where YTR should be always 0 */
+ if (WARN_ON_ONCE((modifier & AFBC_CBR) && (modifier & AFBC_YTR)))
+ return false;
+
+ while (*modifiers != DRM_FORMAT_MOD_INVALID) {
+ if (*modifiers == modifier)
+ break;
+
+ modifiers++;
+ }
+
+ /* return false, if the modifier was not found */
+ if (*modifiers == DRM_FORMAT_MOD_INVALID) {
+ DRM_DEBUG_KMS("Unsupported modifier\n");
+ return false;
+ }
+
+ info = drm_format_info(format);
+
+ if (info->num_planes != 1) {
+ DRM_DEBUG_KMS("AFBC buffers expect one plane\n");
+ return false;
+ }
+
+ if (malidp_hw_format_is_linear_only(format) == true) {
+ DRM_DEBUG_KMS("Given format (0x%x) is supported is linear mode only\n",
+ format);
+ return false;
+ }
+
+ /*
+ * RGB formats need to provide YTR modifier and YUV formats should not
+ * provide YTR modifier.
+ */
+ if (!(info->is_yuv) != !!(modifier & AFBC_FORMAT_MOD_YTR)) {
+ DRM_DEBUG_KMS("AFBC_FORMAT_MOD_YTR is %s for %s formats\n",
+ info->is_yuv ? "disallowed" : "mandatory",
+ info->is_yuv ? "YUV" : "RGB");
+ return false;
+ }
+
+ if (modifier & AFBC_SPLIT) {
+ if (!info->is_yuv) {
+ if (drm_format_plane_cpp(format, 0) <= 2) {
+ DRM_DEBUG_KMS("RGB formats <= 16bpp are not supported with SPLIT\n");
+ return false;
+ }
+ }
+
+ if ((drm_format_horz_chroma_subsampling(format) != 1) ||
+ (drm_format_vert_chroma_subsampling(format) != 1)) {
+ if (!(format == DRM_FORMAT_YUV420_10BIT &&
+ (map->features & MALIDP_DEVICE_AFBC_YUV_420_10_SUPPORT_SPLIT))) {
+ DRM_DEBUG_KMS("Formats which are sub-sampled should never be split\n");
+ return false;
+ }
+ }
+ }
+
+ if (modifier & AFBC_CBR) {
+ if ((drm_format_horz_chroma_subsampling(format) == 1) ||
+ (drm_format_vert_chroma_subsampling(format) == 1)) {
+ DRM_DEBUG_KMS("Formats which are not sub-sampled should not have CBR set\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool malidp_format_mod_supported_per_plane(struct drm_plane *plane,
+ u32 format, u64 modifier)
+{
+ return malidp_format_mod_supported(plane->dev, format, modifier);
+}
+
static const struct drm_plane_funcs malidp_de_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
@@ -153,6 +268,7 @@ static const struct drm_plane_funcs malidp_de_plane_funcs = {
.atomic_duplicate_state = malidp_duplicate_plane_state,
.atomic_destroy_state = malidp_destroy_plane_state,
.atomic_print_state = malidp_plane_atomic_print_state,
+ .format_mod_supported = malidp_format_mod_supported_per_plane,
};
static int malidp_se_check_scaling(struct malidp_plane *mp,
@@ -406,8 +522,8 @@ static int malidp_de_plane_check(struct drm_plane *plane,
fb = state->fb;
ms->format = malidp_hw_get_format_id(&mp->hwdev->hw->map,
- mp->layer->id,
- fb->format->format);
+ mp->layer->id, fb->format->format,
+ !!fb->modifier);
if (ms->format == MALIDP_INVALID_FORMAT_ID)
return -EINVAL;
@@ -415,8 +531,8 @@ static int malidp_de_plane_check(struct drm_plane *plane,
for (i = 0; i < ms->n_planes; i++) {
u8 alignment = malidp_hw_get_pitch_align(mp->hwdev, rotated);
- if ((fb->pitches[i] * drm_format_info_block_height(fb->format, i))
- & (alignment - 1)) {
+ if (((fb->pitches[i] * drm_format_info_block_height(fb->format, i))
+ & (alignment - 1)) && !(fb->modifier)) {
DRM_DEBUG_KMS("Invalid pitch %u for plane %d\n",
fb->pitches[i], i);
return -EINVAL;
@@ -469,13 +585,20 @@ static int malidp_de_plane_check(struct drm_plane *plane,
return -EINVAL;
}
+ /* SMART layer does not support AFBC */
+ if (mp->layer->id == DE_SMART && fb->modifier) {
+ DRM_ERROR("AFBC framebuffer not supported in SMART layer");
+ return -EINVAL;
+ }
+
ms->rotmem_size = 0;
if (state->rotation & MALIDP_ROTATED_MASK) {
int val;
val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_w,
state->crtc_h,
- fb->format->format);
+ fb->format->format,
+ !!(fb->modifier));
if (val < 0)
return val;
@@ -592,6 +715,83 @@ static void malidp_de_set_mmu_control(struct malidp_plane *mp,
mp->layer->base + mp->layer->mmu_ctrl_offset);
}
+static void malidp_set_plane_base_addr(struct drm_framebuffer *fb,
+ struct malidp_plane *mp,
+ int plane_index)
+{
+ dma_addr_t paddr;
+ u16 ptr;
+ struct drm_plane *plane = &mp->base;
+ bool afbc = fb->modifier ? true : false;
+
+ ptr = mp->layer->ptr + (plane_index << 4);
+
+ /*
+ * drm_fb_cma_get_gem_addr() alters the physical base address of the
+ * framebuffer as per the plane's src_x, src_y co-ordinates (ie to
+ * take care of source cropping).
+ * For AFBC, this is not needed as the cropping is handled by _AD_CROP_H
+ * and _AD_CROP_V registers.
+ */
+ if (!afbc) {
+ paddr = drm_fb_cma_get_gem_addr(fb, plane->state,
+ plane_index);
+ } else {
+ struct drm_gem_cma_object *obj;
+
+ obj = drm_fb_cma_get_gem_obj(fb, plane_index);
+
+ if (WARN_ON(!obj))
+ return;
+ paddr = obj->paddr;
+ }
+
+ malidp_hw_write(mp->hwdev, lower_32_bits(paddr), ptr);
+ malidp_hw_write(mp->hwdev, upper_32_bits(paddr), ptr + 4);
+}
+
+static void malidp_de_set_plane_afbc(struct drm_plane *plane)
+{
+ struct malidp_plane *mp;
+ u32 src_w, src_h, val = 0, src_x, src_y;
+ struct drm_framebuffer *fb = plane->state->fb;
+
+ mp = to_malidp_plane(plane);
+
+ /* no afbc_decoder_offset means AFBC is not supported on this plane */
+ if (!mp->layer->afbc_decoder_offset)
+ return;
+
+ if (!fb->modifier) {
+ malidp_hw_write(mp->hwdev, 0, mp->layer->afbc_decoder_offset);
+ return;
+ }
+
+ /* convert src values from Q16 fixed point to integer */
+ src_w = plane->state->src_w >> 16;
+ src_h = plane->state->src_h >> 16;
+ src_x = plane->state->src_x >> 16;
+ src_y = plane->state->src_y >> 16;
+
+ val = ((fb->width - (src_x + src_w)) << MALIDP_AD_CROP_RIGHT_OFFSET) |
+ src_x;
+ malidp_hw_write(mp->hwdev, val,
+ mp->layer->afbc_decoder_offset + MALIDP_AD_CROP_H);
+
+ val = ((fb->height - (src_y + src_h)) << MALIDP_AD_CROP_BOTTOM_OFFSET) |
+ src_y;
+ malidp_hw_write(mp->hwdev, val,
+ mp->layer->afbc_decoder_offset + MALIDP_AD_CROP_V);
+
+ val = MALIDP_AD_EN;
+ if (fb->modifier & AFBC_FORMAT_MOD_SPLIT)
+ val |= MALIDP_AD_BS;
+ if (fb->modifier & AFBC_FORMAT_MOD_YTR)
+ val |= MALIDP_AD_YTR;
+
+ malidp_hw_write(mp->hwdev, val, mp->layer->afbc_decoder_offset);
+}
+
static void malidp_de_plane_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
@@ -602,12 +802,23 @@ static void malidp_de_plane_update(struct drm_plane *plane,
u8 plane_alpha = state->alpha >> 8;
u32 src_w, src_h, dest_w, dest_h, val;
int i;
+ struct drm_framebuffer *fb = plane->state->fb;
mp = to_malidp_plane(plane);
- /* convert src values from Q16 fixed point to integer */
- src_w = state->src_w >> 16;
- src_h = state->src_h >> 16;
+ /*
+ * For AFBC framebuffer, use the framebuffer width and height for
+ * configuring layer input size register.
+ */
+ if (fb->modifier) {
+ src_w = fb->width;
+ src_h = fb->height;
+ } else {
+ /* convert src values from Q16 fixed point to integer */
+ src_w = state->src_w >> 16;
+ src_h = state->src_h >> 16;
+ }
+
dest_w = state->crtc_w;
dest_h = state->crtc_h;
@@ -615,15 +826,8 @@ static void malidp_de_plane_update(struct drm_plane *plane,
val = (val & ~LAYER_FORMAT_MASK) | ms->format;
malidp_hw_write(mp->hwdev, val, mp->layer->base);
- for (i = 0; i < ms->n_planes; i++) {
- /* calculate the offset for the layer's plane registers */
- u16 ptr = mp->layer->ptr + (i << 4);
- dma_addr_t fb_addr = drm_fb_cma_get_gem_addr(state->fb,
- state, i);
-
- malidp_hw_write(mp->hwdev, lower_32_bits(fb_addr), ptr);
- malidp_hw_write(mp->hwdev, upper_32_bits(fb_addr), ptr + 4);
- }
+ for (i = 0; i < ms->n_planes; i++)
+ malidp_set_plane_base_addr(fb, mp, i);
malidp_de_set_mmu_control(mp, ms);
@@ -657,6 +861,8 @@ static void malidp_de_plane_update(struct drm_plane *plane,
mp->layer->base + MALIDP550_LS_R1_IN_SIZE);
}
+ malidp_de_set_plane_afbc(plane);
+
/* first clear the rotation bits */
val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL);
val &= ~LAYER_ROT_MASK;
@@ -733,7 +939,26 @@ int malidp_de_planes_init(struct drm_device *drm)
BIT(DRM_MODE_BLEND_PREMULTI) |
BIT(DRM_MODE_BLEND_COVERAGE);
u32 *formats;
- int ret, i, j, n;
+ int ret, i = 0, j = 0, n;
+ u64 supported_modifiers[MODIFIERS_COUNT_MAX];
+ const u64 *modifiers;
+
+ modifiers = malidp_format_modifiers;
+
+ if (!(map->features & MALIDP_DEVICE_AFBC_SUPPORT_SPLIT)) {
+ /*
+ * Since our hardware does not support SPLIT, so build the list
+ * of supported modifiers excluding SPLIT ones.
+ */
+ while (*modifiers != DRM_FORMAT_MOD_INVALID) {
+ if (!(*modifiers & AFBC_SPLIT))
+ supported_modifiers[j++] = *modifiers;
+
+ modifiers++;
+ }
+ supported_modifiers[j++] = DRM_FORMAT_MOD_INVALID;
+ modifiers = supported_modifiers;
+ }
formats = kcalloc(map->n_pixel_formats, sizeof(*formats), GFP_KERNEL);
if (!formats) {
@@ -758,9 +983,15 @@ int malidp_de_planes_init(struct drm_device *drm)
plane_type = (i == 0) ? DRM_PLANE_TYPE_PRIMARY :
DRM_PLANE_TYPE_OVERLAY;
+
+ /*
+ * All the layers except smart layer supports AFBC modifiers.
+ */
ret = drm_universal_plane_init(drm, &plane->base, crtcs,
- &malidp_de_plane_funcs, formats,
- n, NULL, plane_type, NULL);
+ &malidp_de_plane_funcs, formats, n,
+ (id == DE_SMART) ? NULL : modifiers, plane_type,
+ NULL);
+
if (ret < 0)
goto cleanup;
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
index 7ce3e141464d..a0dd6e1676a8 100644
--- a/drivers/gpu/drm/arm/malidp_regs.h
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -198,10 +198,13 @@
#define MALIDP500_LV_YUV2RGB ((s16)(-0xB8))
#define MALIDP500_DE_LV_BASE 0x00100
#define MALIDP500_DE_LV_PTR_BASE 0x00124
+#define MALIDP500_DE_LV_AD_CTRL 0x00400
#define MALIDP500_DE_LG1_BASE 0x00200
#define MALIDP500_DE_LG1_PTR_BASE 0x0021c
+#define MALIDP500_DE_LG1_AD_CTRL 0x0040c
#define MALIDP500_DE_LG2_BASE 0x00300
#define MALIDP500_DE_LG2_PTR_BASE 0x0031c
+#define MALIDP500_DE_LG2_AD_CTRL 0x00418
#define MALIDP500_SE_BASE 0x00c00
#define MALIDP500_SE_CONTROL 0x00c0c
#define MALIDP500_SE_MEMWRITE_OUT_SIZE 0x00c2c
@@ -228,10 +231,13 @@
#define MALIDP550_LV_YUV2RGB 0x00084
#define MALIDP550_DE_LV1_BASE 0x00100
#define MALIDP550_DE_LV1_PTR_BASE 0x00124
+#define MALIDP550_DE_LV1_AD_CTRL 0x001B8
#define MALIDP550_DE_LV2_BASE 0x00200
#define MALIDP550_DE_LV2_PTR_BASE 0x00224
+#define MALIDP550_DE_LV2_AD_CTRL 0x002B8
#define MALIDP550_DE_LG_BASE 0x00300
#define MALIDP550_DE_LG_PTR_BASE 0x0031c
+#define MALIDP550_DE_LG_AD_CTRL 0x00330
#define MALIDP550_DE_LS_BASE 0x00400
#define MALIDP550_DE_LS_PTR_BASE 0x0042c
#define MALIDP550_DE_PERF_BASE 0x00500
@@ -258,6 +264,20 @@
#define MALIDP_MMU_CTRL_PX_PS(x) (1 << (8 + (x)))
#define MALIDP_MMU_CTRL_PP_NUM_REQ(x) (((x) & 0x7f) << 12)
+/* AFBC register offsets relative to MALIDPXXX_DE_LX_AD_CTRL */
+/* The following register offsets are common for DP500, DP550 and DP650 */
+#define MALIDP_AD_CROP_H 0x4
+#define MALIDP_AD_CROP_V 0x8
+#define MALIDP_AD_END_PTR_LOW 0xc
+#define MALIDP_AD_END_PTR_HIGH 0x10
+
+/* AFBC decoder Registers */
+#define MALIDP_AD_EN BIT(0)
+#define MALIDP_AD_YTR BIT(4)
+#define MALIDP_AD_BS BIT(8)
+#define MALIDP_AD_CROP_RIGHT_OFFSET 16
+#define MALIDP_AD_CROP_BOTTOM_OFFSET 16
+
/*
* Starting with DP550 the register map blocks has been standardised to the
* following layout:
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 8d23700848df..1e7140f005a5 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -78,8 +78,6 @@ static int armada_fbdev_create(struct drm_fb_helper *fbh,
goto err_fballoc;
}
- strlcpy(info->fix.id, "armada-drmfb", sizeof(info->fix.id));
- info->par = fbh;
info->fbops = &armada_fb_ops;
info->fix.smem_start = obj->phys_addr;
info->fix.smem_len = obj->obj.size;
@@ -87,9 +85,7 @@ static int armada_fbdev_create(struct drm_fb_helper *fbh,
info->screen_base = ptr;
fbh->fb = &dfb->fb;
- drm_fb_helper_fill_fix(info, dfb->fb.pitches[0],
- dfb->fb.format->depth);
- drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(info, fbh, sizes);
DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08llx\n",
dfb->fb.width, dfb->fb.height, dfb->fb.format->cpp[0] * 8,
diff --git a/drivers/gpu/drm/aspeed/Kconfig b/drivers/gpu/drm/aspeed/Kconfig
new file mode 100644
index 000000000000..cccab520e02f
--- /dev/null
+++ b/drivers/gpu/drm/aspeed/Kconfig
@@ -0,0 +1,14 @@
+config DRM_ASPEED_GFX
+ tristate "ASPEED BMC Display Controller"
+ depends on DRM && OF
+ depends on (COMPILE_TEST || ARCH_ASPEED)
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DMA_CMA if HAVE_DMA_CONTIGUOUS
+ select CMA if HAVE_DMA_CONTIGUOUS
+ select MFD_SYSCON
+ help
+ Chose this option if you have an ASPEED AST2500 SOC Display
+ Controller (aka GFX).
+
+ If M is selected this module will be called aspeed_gfx.
diff --git a/drivers/gpu/drm/aspeed/Makefile b/drivers/gpu/drm/aspeed/Makefile
new file mode 100644
index 000000000000..6e194cd790d8
--- /dev/null
+++ b/drivers/gpu/drm/aspeed/Makefile
@@ -0,0 +1,3 @@
+aspeed_gfx-y := aspeed_gfx_drv.o aspeed_gfx_crtc.o aspeed_gfx_out.o
+
+obj-$(CONFIG_DRM_ASPEED_GFX) += aspeed_gfx.o
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx.h b/drivers/gpu/drm/aspeed/aspeed_gfx.h
new file mode 100644
index 000000000000..a10358bb61ec
--- /dev/null
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright 2018 IBM Corporation */
+
+#include <drm/drm_device.h>
+#include <drm/drm_simple_kms_helper.h>
+
+struct aspeed_gfx {
+ void __iomem *base;
+ struct clk *clk;
+ struct reset_control *rst;
+ struct regmap *scu;
+
+ struct drm_simple_display_pipe pipe;
+ struct drm_connector connector;
+ struct drm_fbdev_cma *fbdev;
+};
+
+int aspeed_gfx_create_pipe(struct drm_device *drm);
+int aspeed_gfx_create_output(struct drm_device *drm);
+
+#define CRT_CTRL1 0x60 /* CRT Control I */
+#define CRT_CTRL2 0x64 /* CRT Control II */
+#define CRT_STATUS 0x68 /* CRT Status */
+#define CRT_MISC 0x6c /* CRT Misc Setting */
+#define CRT_HORIZ0 0x70 /* CRT Horizontal Total & Display Enable End */
+#define CRT_HORIZ1 0x74 /* CRT Horizontal Retrace Start & End */
+#define CRT_VERT0 0x78 /* CRT Vertical Total & Display Enable End */
+#define CRT_VERT1 0x7C /* CRT Vertical Retrace Start & End */
+#define CRT_ADDR 0x80 /* CRT Display Starting Address */
+#define CRT_OFFSET 0x84 /* CRT Display Offset & Terminal Count */
+#define CRT_THROD 0x88 /* CRT Threshold */
+#define CRT_XSCALE 0x8C /* CRT Scaling-Up Factor */
+#define CRT_CURSOR0 0x90 /* CRT Hardware Cursor X & Y Offset */
+#define CRT_CURSOR1 0x94 /* CRT Hardware Cursor X & Y Position */
+#define CRT_CURSOR2 0x98 /* CRT Hardware Cursor Pattern Address */
+#define CRT_9C 0x9C
+#define CRT_OSD_H 0xA0 /* CRT OSD Horizontal Start/End */
+#define CRT_OSD_V 0xA4 /* CRT OSD Vertical Start/End */
+#define CRT_OSD_ADDR 0xA8 /* CRT OSD Pattern Address */
+#define CRT_OSD_DISP 0xAC /* CRT OSD Offset */
+#define CRT_OSD_THRESH 0xB0 /* CRT OSD Threshold & Alpha */
+#define CRT_B4 0xB4
+#define CRT_STS_V 0xB8 /* CRT Status V */
+#define CRT_SCRATCH 0xBC /* Scratchpad */
+#define CRT_BB0_ADDR 0xD0 /* CRT Display BB0 Starting Address */
+#define CRT_BB1_ADDR 0xD4 /* CRT Display BB1 Starting Address */
+#define CRT_BB_COUNT 0xD8 /* CRT Display BB Terminal Count */
+#define OSD_COLOR1 0xE0 /* OSD Color Palette Index 1 & 0 */
+#define OSD_COLOR2 0xE4 /* OSD Color Palette Index 3 & 2 */
+#define OSD_COLOR3 0xE8 /* OSD Color Palette Index 5 & 4 */
+#define OSD_COLOR4 0xEC /* OSD Color Palette Index 7 & 6 */
+#define OSD_COLOR5 0xF0 /* OSD Color Palette Index 9 & 8 */
+#define OSD_COLOR6 0xF4 /* OSD Color Palette Index 11 & 10 */
+#define OSD_COLOR7 0xF8 /* OSD Color Palette Index 13 & 12 */
+#define OSD_COLOR8 0xFC /* OSD Color Palette Index 15 & 14 */
+
+/* CTRL1 */
+#define CRT_CTRL_EN BIT(0)
+#define CRT_CTRL_HW_CURSOR_EN BIT(1)
+#define CRT_CTRL_OSD_EN BIT(2)
+#define CRT_CTRL_INTERLACED BIT(3)
+#define CRT_CTRL_COLOR_RGB565 (0 << 7)
+#define CRT_CTRL_COLOR_YUV444 (1 << 7)
+#define CRT_CTRL_COLOR_XRGB8888 (2 << 7)
+#define CRT_CTRL_COLOR_RGB888 (3 << 7)
+#define CRT_CTRL_COLOR_YUV444_2RGB (5 << 7)
+#define CRT_CTRL_COLOR_YUV422 (7 << 7)
+#define CRT_CTRL_COLOR_MASK GENMASK(9, 7)
+#define CRT_CTRL_HSYNC_NEGATIVE BIT(16)
+#define CRT_CTRL_VSYNC_NEGATIVE BIT(17)
+#define CRT_CTRL_VERTICAL_INTR_EN BIT(30)
+#define CRT_CTRL_VERTICAL_INTR_STS BIT(31)
+
+/* CTRL2 */
+#define CRT_CTRL_DAC_EN BIT(0)
+#define CRT_CTRL_VBLANK_LINE(x) (((x) << 20) & CRT_CTRL_VBLANK_LINE_MASK)
+#define CRT_CTRL_VBLANK_LINE_MASK GENMASK(20, 31)
+
+/* CRT_HORIZ0 */
+#define CRT_H_TOTAL(x) (x)
+#define CRT_H_DE(x) ((x) << 16)
+
+/* CRT_HORIZ1 */
+#define CRT_H_RS_START(x) (x)
+#define CRT_H_RS_END(x) ((x) << 16)
+
+/* CRT_VIRT0 */
+#define CRT_V_TOTAL(x) (x)
+#define CRT_V_DE(x) ((x) << 16)
+
+/* CRT_VIRT1 */
+#define CRT_V_RS_START(x) (x)
+#define CRT_V_RS_END(x) ((x) << 16)
+
+/* CRT_OFFSET */
+#define CRT_DISP_OFFSET(x) (x)
+#define CRT_TERM_COUNT(x) ((x) << 16)
+
+/* CRT_THROD */
+#define CRT_THROD_LOW(x) (x)
+#define CRT_THROD_HIGH(x) ((x) << 8)
+
+/* Default Threshold Seting */
+#define G5_CRT_THROD_VAL (CRT_THROD_LOW(0x24) | CRT_THROD_HIGH(0x3C))
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
new file mode 100644
index 000000000000..15db9e426ec4
--- /dev/null
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c
@@ -0,0 +1,241 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright 2018 IBM Corporation
+
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_device.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "aspeed_gfx.h"
+
+static struct aspeed_gfx *
+drm_pipe_to_aspeed_gfx(struct drm_simple_display_pipe *pipe)
+{
+ return container_of(pipe, struct aspeed_gfx, pipe);
+}
+
+static int aspeed_gfx_set_pixel_fmt(struct aspeed_gfx *priv, u32 *bpp)
+{
+ struct drm_crtc *crtc = &priv->pipe.crtc;
+ struct drm_device *drm = crtc->dev;
+ const u32 format = crtc->primary->state->fb->format->format;
+ u32 ctrl1;
+
+ ctrl1 = readl(priv->base + CRT_CTRL1);
+ ctrl1 &= ~CRT_CTRL_COLOR_MASK;
+
+ switch (format) {
+ case DRM_FORMAT_RGB565:
+ dev_dbg(drm->dev, "Setting up RGB565 mode\n");
+ ctrl1 |= CRT_CTRL_COLOR_RGB565;
+ *bpp = 16;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ dev_dbg(drm->dev, "Setting up XRGB8888 mode\n");
+ ctrl1 |= CRT_CTRL_COLOR_XRGB8888;
+ *bpp = 32;
+ break;
+ default:
+ dev_err(drm->dev, "Unhandled pixel format %08x\n", format);
+ return -EINVAL;
+ }
+
+ writel(ctrl1, priv->base + CRT_CTRL1);
+
+ return 0;
+}
+
+static void aspeed_gfx_enable_controller(struct aspeed_gfx *priv)
+{
+ u32 ctrl1 = readl(priv->base + CRT_CTRL1);
+ u32 ctrl2 = readl(priv->base + CRT_CTRL2);
+
+ /* SCU2C: set DAC source for display output to Graphics CRT (GFX) */
+ regmap_update_bits(priv->scu, 0x2c, BIT(16), BIT(16));
+
+ writel(ctrl1 | CRT_CTRL_EN, priv->base + CRT_CTRL1);
+ writel(ctrl2 | CRT_CTRL_DAC_EN, priv->base + CRT_CTRL2);
+}
+
+static void aspeed_gfx_disable_controller(struct aspeed_gfx *priv)
+{
+ u32 ctrl1 = readl(priv->base + CRT_CTRL1);
+ u32 ctrl2 = readl(priv->base + CRT_CTRL2);
+
+ writel(ctrl1 & ~CRT_CTRL_EN, priv->base + CRT_CTRL1);
+ writel(ctrl2 & ~CRT_CTRL_DAC_EN, priv->base + CRT_CTRL2);
+
+ regmap_update_bits(priv->scu, 0x2c, BIT(16), 0);
+}
+
+static void aspeed_gfx_crtc_mode_set_nofb(struct aspeed_gfx *priv)
+{
+ struct drm_display_mode *m = &priv->pipe.crtc.state->adjusted_mode;
+ u32 ctrl1, d_offset, t_count, bpp;
+ int err;
+
+ err = aspeed_gfx_set_pixel_fmt(priv, &bpp);
+ if (err)
+ return;
+
+#if 0
+ /* TODO: we have only been able to test with the 40MHz USB clock. The
+ * clock is fixed, so we cannot adjust it here. */
+ clk_set_rate(priv->pixel_clk, m->crtc_clock * 1000);
+#endif
+
+ ctrl1 = readl(priv->base + CRT_CTRL1);
+ ctrl1 &= ~(CRT_CTRL_INTERLACED |
+ CRT_CTRL_HSYNC_NEGATIVE |
+ CRT_CTRL_VSYNC_NEGATIVE);
+
+ if (m->flags & DRM_MODE_FLAG_INTERLACE)
+ ctrl1 |= CRT_CTRL_INTERLACED;
+
+ if (!(m->flags & DRM_MODE_FLAG_PHSYNC))
+ ctrl1 |= CRT_CTRL_HSYNC_NEGATIVE;
+
+ if (!(m->flags & DRM_MODE_FLAG_PVSYNC))
+ ctrl1 |= CRT_CTRL_VSYNC_NEGATIVE;
+
+ writel(ctrl1, priv->base + CRT_CTRL1);
+
+ /* Horizontal timing */
+ writel(CRT_H_TOTAL(m->htotal - 1) | CRT_H_DE(m->hdisplay - 1),
+ priv->base + CRT_HORIZ0);
+ writel(CRT_H_RS_START(m->hsync_start - 1) | CRT_H_RS_END(m->hsync_end),
+ priv->base + CRT_HORIZ1);
+
+
+ /* Vertical timing */
+ writel(CRT_V_TOTAL(m->vtotal - 1) | CRT_V_DE(m->vdisplay - 1),
+ priv->base + CRT_VERT0);
+ writel(CRT_V_RS_START(m->vsync_start) | CRT_V_RS_END(m->vsync_end),
+ priv->base + CRT_VERT1);
+
+ /*
+ * Display Offset: address difference between consecutive scan lines
+ * Terminal Count: memory size of one scan line
+ */
+ d_offset = m->hdisplay * bpp / 8;
+ t_count = (m->hdisplay * bpp + 127) / 128;
+ writel(CRT_DISP_OFFSET(d_offset) | CRT_TERM_COUNT(t_count),
+ priv->base + CRT_OFFSET);
+
+ /*
+ * Threshold: FIFO thresholds of refill and stop (16 byte chunks
+ * per line, rounded up)
+ */
+ writel(G5_CRT_THROD_VAL, priv->base + CRT_THROD);
+}
+
+static void aspeed_gfx_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
+{
+ struct aspeed_gfx *priv = drm_pipe_to_aspeed_gfx(pipe);
+ struct drm_crtc *crtc = &pipe->crtc;
+
+ aspeed_gfx_crtc_mode_set_nofb(priv);
+ aspeed_gfx_enable_controller(priv);
+ drm_crtc_vblank_on(crtc);
+}
+
+static void aspeed_gfx_pipe_disable(struct drm_simple_display_pipe *pipe)
+{
+ struct aspeed_gfx *priv = drm_pipe_to_aspeed_gfx(pipe);
+ struct drm_crtc *crtc = &pipe->crtc;
+
+ drm_crtc_vblank_off(crtc);
+ aspeed_gfx_disable_controller(priv);
+}
+
+static void aspeed_gfx_pipe_update(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state)
+{
+ struct aspeed_gfx *priv = drm_pipe_to_aspeed_gfx(pipe);
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_framebuffer *fb = pipe->plane.state->fb;
+ struct drm_pending_vblank_event *event;
+ struct drm_gem_cma_object *gem;
+
+ spin_lock_irq(&crtc->dev->event_lock);
+ event = crtc->state->event;
+ if (event) {
+ crtc->state->event = NULL;
+
+ if (drm_crtc_vblank_get(crtc) == 0)
+ drm_crtc_arm_vblank_event(crtc, event);
+ else
+ drm_crtc_send_vblank_event(crtc, event);
+ }
+ spin_unlock_irq(&crtc->dev->event_lock);
+
+ if (!fb)
+ return;
+
+ gem = drm_fb_cma_get_gem_obj(fb, 0);
+ if (!gem)
+ return;
+ writel(gem->paddr, priv->base + CRT_ADDR);
+}
+
+static int aspeed_gfx_enable_vblank(struct drm_simple_display_pipe *pipe)
+{
+ struct aspeed_gfx *priv = drm_pipe_to_aspeed_gfx(pipe);
+ u32 reg = readl(priv->base + CRT_CTRL1);
+
+ /* Clear pending VBLANK IRQ */
+ writel(reg | CRT_CTRL_VERTICAL_INTR_STS, priv->base + CRT_CTRL1);
+
+ reg |= CRT_CTRL_VERTICAL_INTR_EN;
+ writel(reg, priv->base + CRT_CTRL1);
+
+ return 0;
+}
+
+static void aspeed_gfx_disable_vblank(struct drm_simple_display_pipe *pipe)
+{
+ struct aspeed_gfx *priv = drm_pipe_to_aspeed_gfx(pipe);
+ u32 reg = readl(priv->base + CRT_CTRL1);
+
+ reg &= ~CRT_CTRL_VERTICAL_INTR_EN;
+ writel(reg, priv->base + CRT_CTRL1);
+
+ /* Clear pending VBLANK IRQ */
+ writel(reg | CRT_CTRL_VERTICAL_INTR_STS, priv->base + CRT_CTRL1);
+}
+
+static struct drm_simple_display_pipe_funcs aspeed_gfx_funcs = {
+ .enable = aspeed_gfx_pipe_enable,
+ .disable = aspeed_gfx_pipe_disable,
+ .update = aspeed_gfx_pipe_update,
+ .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+ .enable_vblank = aspeed_gfx_enable_vblank,
+ .disable_vblank = aspeed_gfx_disable_vblank,
+};
+
+static const uint32_t aspeed_gfx_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGB565,
+};
+
+int aspeed_gfx_create_pipe(struct drm_device *drm)
+{
+ struct aspeed_gfx *priv = drm->dev_private;
+
+ return drm_simple_display_pipe_init(drm, &priv->pipe, &aspeed_gfx_funcs,
+ aspeed_gfx_formats,
+ ARRAY_SIZE(aspeed_gfx_formats),
+ NULL,
+ &priv->connector);
+}
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
new file mode 100644
index 000000000000..eeb22eccd1fc
--- /dev/null
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -0,0 +1,269 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright 2018 IBM Corporation
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/irq.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_device.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_vblank.h>
+#include <drm/drm_drv.h>
+
+#include "aspeed_gfx.h"
+
+/**
+ * DOC: ASPEED GFX Driver
+ *
+ * This driver is for the ASPEED BMC SoC's 'GFX' display hardware, also called
+ * the 'SOC Display Controller' in the datasheet. This driver runs on the ARM
+ * based BMC systems, unlike the ast driver which runs on a host CPU and is for
+ * a PCIe graphics device.
+ *
+ * The AST2500 supports a total of 3 output paths:
+ *
+ * 1. VGA output, the output target can choose either or both to the DAC
+ * or DVO interface.
+ *
+ * 2. Graphics CRT output, the output target can choose either or both to
+ * the DAC or DVO interface.
+ *
+ * 3. Video input from DVO, the video input can be used for video engine
+ * capture or DAC display output.
+ *
+ * Output options are selected in SCU2C.
+ *
+ * The "VGA mode" device is the PCI attached controller. The "Graphics CRT"
+ * is the ARM's internal display controller.
+ *
+ * The driver only supports a simple configuration consisting of a 40MHz
+ * pixel clock, fixed by hardware limitations, and the VGA output path.
+ *
+ * The driver was written with the 'AST2500 Software Programming Guide' v17,
+ * which is available under NDA from ASPEED.
+ */
+
+static const struct drm_mode_config_funcs aspeed_gfx_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void aspeed_gfx_setup_mode_config(struct drm_device *drm)
+{
+ drm_mode_config_init(drm);
+
+ drm->mode_config.min_width = 0;
+ drm->mode_config.min_height = 0;
+ drm->mode_config.max_width = 800;
+ drm->mode_config.max_height = 600;
+ drm->mode_config.funcs = &aspeed_gfx_mode_config_funcs;
+}
+
+static irqreturn_t aspeed_gfx_irq_handler(int irq, void *data)
+{
+ struct drm_device *drm = data;
+ struct aspeed_gfx *priv = drm->dev_private;
+ u32 reg;
+
+ reg = readl(priv->base + CRT_CTRL1);
+
+ if (reg & CRT_CTRL_VERTICAL_INTR_STS) {
+ drm_crtc_handle_vblank(&priv->pipe.crtc);
+ writel(reg, priv->base + CRT_CTRL1);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+
+
+static int aspeed_gfx_load(struct drm_device *drm)
+{
+ struct platform_device *pdev = to_platform_device(drm->dev);
+ struct aspeed_gfx *priv;
+ struct resource *res;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ drm->dev_private = priv;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(drm->dev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ priv->scu = syscon_regmap_lookup_by_compatible("aspeed,ast2500-scu");
+ if (IS_ERR(priv->scu)) {
+ dev_err(&pdev->dev, "failed to find SCU regmap\n");
+ return PTR_ERR(priv->scu);
+ }
+
+ ret = of_reserved_mem_device_init(drm->dev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to initialize reserved mem: %d\n", ret);
+ return ret;
+ }
+
+ ret = dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "failed to set DMA mask: %d\n", ret);
+ return ret;
+ }
+
+ priv->rst = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(priv->rst)) {
+ dev_err(&pdev->dev,
+ "missing or invalid reset controller device tree entry");
+ return PTR_ERR(priv->rst);
+ }
+ reset_control_deassert(priv->rst);
+
+ priv->clk = devm_clk_get(drm->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(&pdev->dev,
+ "missing or invalid clk device tree entry");
+ return PTR_ERR(priv->clk);
+ }
+ clk_prepare_enable(priv->clk);
+
+ /* Sanitize control registers */
+ writel(0, priv->base + CRT_CTRL1);
+ writel(0, priv->base + CRT_CTRL2);
+
+ aspeed_gfx_setup_mode_config(drm);
+
+ ret = drm_vblank_init(drm, 1);
+ if (ret < 0) {
+ dev_err(drm->dev, "Failed to initialise vblank\n");
+ return ret;
+ }
+
+ ret = aspeed_gfx_create_output(drm);
+ if (ret < 0) {
+ dev_err(drm->dev, "Failed to create outputs\n");
+ return ret;
+ }
+
+ ret = aspeed_gfx_create_pipe(drm);
+ if (ret < 0) {
+ dev_err(drm->dev, "Cannot setup simple display pipe\n");
+ return ret;
+ }
+
+ ret = devm_request_irq(drm->dev, platform_get_irq(pdev, 0),
+ aspeed_gfx_irq_handler, 0, "aspeed gfx", drm);
+ if (ret < 0) {
+ dev_err(drm->dev, "Failed to install IRQ handler\n");
+ return ret;
+ }
+
+ drm_mode_config_reset(drm);
+
+ drm_fbdev_generic_setup(drm, 32);
+
+ return 0;
+}
+
+static void aspeed_gfx_unload(struct drm_device *drm)
+{
+ drm_kms_helper_poll_fini(drm);
+ drm_mode_config_cleanup(drm);
+
+ drm->dev_private = NULL;
+}
+
+DEFINE_DRM_GEM_CMA_FOPS(fops);
+
+static struct drm_driver aspeed_gfx_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET |
+ DRIVER_PRIME | DRIVER_ATOMIC,
+ .gem_create_object = drm_cma_gem_create_object_default_funcs,
+ .dumb_create = drm_gem_cma_dumb_create,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
+ .gem_prime_mmap = drm_gem_prime_mmap,
+ .fops = &fops,
+ .name = "aspeed-gfx-drm",
+ .desc = "ASPEED GFX DRM",
+ .date = "20180319",
+ .major = 1,
+ .minor = 0,
+};
+
+static const struct of_device_id aspeed_gfx_match[] = {
+ { .compatible = "aspeed,ast2500-gfx" },
+ { }
+};
+
+static int aspeed_gfx_probe(struct platform_device *pdev)
+{
+ struct drm_device *drm;
+ int ret;
+
+ drm = drm_dev_alloc(&aspeed_gfx_driver, &pdev->dev);
+ if (IS_ERR(drm))
+ return PTR_ERR(drm);
+
+ ret = aspeed_gfx_load(drm);
+ if (ret)
+ goto err_free;
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ goto err_unload;
+
+ return 0;
+
+err_unload:
+ aspeed_gfx_unload(drm);
+err_free:
+ drm_dev_put(drm);
+
+ return ret;
+}
+
+static int aspeed_gfx_remove(struct platform_device *pdev)
+{
+ struct drm_device *drm = platform_get_drvdata(pdev);
+
+ drm_dev_unregister(drm);
+ aspeed_gfx_unload(drm);
+ drm_dev_put(drm);
+
+ return 0;
+}
+
+static struct platform_driver aspeed_gfx_platform_driver = {
+ .probe = aspeed_gfx_probe,
+ .remove = aspeed_gfx_remove,
+ .driver = {
+ .name = "aspeed_gfx",
+ .of_match_table = aspeed_gfx_match,
+ },
+};
+
+module_platform_driver(aspeed_gfx_platform_driver);
+
+MODULE_AUTHOR("Joel Stanley <joel@jms.id.au>");
+MODULE_DESCRIPTION("ASPEED BMC DRM/KMS driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_out.c b/drivers/gpu/drm/aspeed/aspeed_gfx_out.c
new file mode 100644
index 000000000000..67ee5fa10055
--- /dev/null
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_out.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright 2018 IBM Corporation
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_probe_helper.h>
+
+#include "aspeed_gfx.h"
+
+static int aspeed_gfx_get_modes(struct drm_connector *connector)
+{
+ return drm_add_modes_noedid(connector, 800, 600);
+}
+
+static const struct
+drm_connector_helper_funcs aspeed_gfx_connector_helper_funcs = {
+ .get_modes = aspeed_gfx_get_modes,
+};
+
+static const struct drm_connector_funcs aspeed_gfx_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+int aspeed_gfx_create_output(struct drm_device *drm)
+{
+ struct aspeed_gfx *priv = drm->dev_private;
+ int ret;
+
+ priv->connector.dpms = DRM_MODE_DPMS_OFF;
+ priv->connector.polled = 0;
+ drm_connector_helper_add(&priv->connector,
+ &aspeed_gfx_connector_helper_funcs);
+ ret = drm_connector_init(drm, &priv->connector,
+ &aspeed_gfx_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ return ret;
+}
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index bfc65040dfcb..1cf0c75e411d 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -259,7 +259,7 @@ struct ast_framebuffer {
};
struct ast_fbdev {
- struct drm_fb_helper helper;
+ struct drm_fb_helper helper; /* must be first */
struct ast_framebuffer afb;
void *sysram;
int size;
@@ -353,8 +353,6 @@ extern int ast_dumb_mmap_offset(struct drm_file *file,
uint32_t handle,
uint64_t *offset);
-#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
-
int ast_mm_init(struct ast_private *ast);
void ast_mm_fini(struct ast_private *ast);
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 2c9f8dd9733a..e718d0f60d6b 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -217,8 +217,6 @@ static int astfb_create(struct drm_fb_helper *helper,
ret = PTR_ERR(info);
goto out;
}
- info->par = afbdev;
-
ret = ast_framebuffer_init(dev, &afbdev->afb, &mode_cmd, gobj);
if (ret)
goto out;
@@ -229,15 +227,12 @@ static int astfb_create(struct drm_fb_helper *helper,
fb = &afbdev->afb.base;
afbdev->helper.fb = fb;
- strcpy(info->fix.id, "astdrmfb");
-
info->fbops = &astfb_ops;
info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(info, &afbdev->helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(info, &afbdev->helper, sizes);
info->screen_base = sysram;
info->screen_size = size;
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index c168d62fe8f9..75d477b37854 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -178,7 +178,6 @@ int ast_mm_init(struct ast_private *ast)
ret = ttm_bo_device_init(&ast->ttm.bdev,
&ast_bo_driver,
dev->anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET,
true);
if (ret) {
DRM_ERROR("Error initialising bo driver; %d\n", ret);
@@ -344,13 +343,8 @@ int ast_bo_push_sysram(struct ast_bo *bo)
int ast_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct drm_file *file_priv;
- struct ast_private *ast;
+ struct drm_file *file_priv = filp->private_data;
+ struct ast_private *ast = file_priv->minor->dev->dev_private;
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
- return -EINVAL;
-
- file_priv = filp->private_data;
- ast = file_priv->minor->dev->dev_private;
return ttm_bo_mmap(filp, vma, &ast->ttm.bdev);
}
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
index 03711394f1ed..341cc9d1bab4 100644
--- a/drivers/gpu/drm/bochs/bochs.h
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -7,6 +7,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_gem.h>
@@ -69,11 +70,9 @@ struct bochs_device {
struct edid *edid;
/* drm */
- struct drm_device *dev;
- struct drm_crtc crtc;
- struct drm_encoder encoder;
+ struct drm_device *dev;
+ struct drm_simple_display_pipe pipe;
struct drm_connector connector;
- bool mode_config_initialized;
/* ttm */
struct {
@@ -101,8 +100,6 @@ static inline struct bochs_bo *gem_to_bochs_bo(struct drm_gem_object *gem)
return container_of(gem, struct bochs_bo, gem);
}
-#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
-
static inline u64 bochs_bo_mmap_offset(struct bochs_bo *bo)
{
return drm_vma_node_offset_addr(&bo->bo.vma_node);
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 9cd82e3631fb..5e905f50449d 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -22,76 +22,55 @@ MODULE_PARM_DESC(defy, "default y resolution");
/* ---------------------------------------------------------------------- */
-static void bochs_crtc_mode_set_nofb(struct drm_crtc *crtc)
+static const uint32_t bochs_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_BGRX8888,
+};
+
+static void bochs_plane_update(struct bochs_device *bochs,
+ struct drm_plane_state *state)
{
- struct bochs_device *bochs =
- container_of(crtc, struct bochs_device, crtc);
+ struct bochs_bo *bo;
- bochs_hw_setmode(bochs, &crtc->mode);
+ if (!state->fb || !bochs->stride)
+ return;
+
+ bo = gem_to_bochs_bo(state->fb->obj[0]);
+ bochs_hw_setbase(bochs,
+ state->crtc_x,
+ state->crtc_y,
+ bo->bo.offset);
+ bochs_hw_setformat(bochs, state->fb->format);
}
-static void bochs_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+static void bochs_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
{
+ struct bochs_device *bochs = pipe->crtc.dev->dev_private;
+
+ bochs_hw_setmode(bochs, &crtc_state->mode);
+ bochs_plane_update(bochs, plane_state);
}
-static void bochs_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+static void bochs_pipe_update(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_state)
{
- struct drm_device *dev = crtc->dev;
- struct drm_pending_vblank_event *event;
+ struct bochs_device *bochs = pipe->crtc.dev->dev_private;
+ struct drm_crtc *crtc = &pipe->crtc;
- if (crtc->state && crtc->state->event) {
- unsigned long irqflags;
+ bochs_plane_update(bochs, pipe->plane.state);
- spin_lock_irqsave(&dev->event_lock, irqflags);
- event = crtc->state->event;
+ if (crtc->state->event) {
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
- drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irqrestore(&dev->event_lock, irqflags);
+ spin_unlock_irq(&crtc->dev->event_lock);
}
}
-
-/* These provide the minimum set of functions required to handle a CRTC */
-static const struct drm_crtc_funcs bochs_crtc_funcs = {
- .set_config = drm_atomic_helper_set_config,
- .destroy = drm_crtc_cleanup,
- .page_flip = drm_atomic_helper_page_flip,
- .reset = drm_atomic_helper_crtc_reset,
- .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
-};
-
-static const struct drm_crtc_helper_funcs bochs_helper_funcs = {
- .mode_set_nofb = bochs_crtc_mode_set_nofb,
- .atomic_enable = bochs_crtc_atomic_enable,
- .atomic_flush = bochs_crtc_atomic_flush,
-};
-
-static const uint32_t bochs_formats[] = {
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_BGRX8888,
-};
-
-static void bochs_plane_atomic_update(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct bochs_device *bochs = plane->dev->dev_private;
- struct bochs_bo *bo;
-
- if (!plane->state->fb)
- return;
- bo = gem_to_bochs_bo(plane->state->fb->obj[0]);
- bochs_hw_setbase(bochs,
- plane->state->crtc_x,
- plane->state->crtc_y,
- bo->bo.offset);
- bochs_hw_setformat(bochs, plane->state->fb->format);
-}
-
-static int bochs_plane_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state)
+static int bochs_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *new_state)
{
struct bochs_bo *bo;
@@ -101,8 +80,8 @@ static int bochs_plane_prepare_fb(struct drm_plane *plane,
return bochs_bo_pin(bo, TTM_PL_FLAG_VRAM);
}
-static void bochs_plane_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+static void bochs_pipe_cleanup_fb(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_state)
{
struct bochs_bo *bo;
@@ -112,73 +91,13 @@ static void bochs_plane_cleanup_fb(struct drm_plane *plane,
bochs_bo_unpin(bo);
}
-static const struct drm_plane_helper_funcs bochs_plane_helper_funcs = {
- .atomic_update = bochs_plane_atomic_update,
- .prepare_fb = bochs_plane_prepare_fb,
- .cleanup_fb = bochs_plane_cleanup_fb,
-};
-
-static const struct drm_plane_funcs bochs_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_primary_helper_destroy,
- .reset = drm_atomic_helper_plane_reset,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
-};
-
-static struct drm_plane *bochs_primary_plane(struct drm_device *dev)
-{
- struct drm_plane *primary;
- int ret;
-
- primary = kzalloc(sizeof(*primary), GFP_KERNEL);
- if (primary == NULL) {
- DRM_DEBUG_KMS("Failed to allocate primary plane\n");
- return NULL;
- }
-
- ret = drm_universal_plane_init(dev, primary, 0,
- &bochs_plane_funcs,
- bochs_formats,
- ARRAY_SIZE(bochs_formats),
- NULL,
- DRM_PLANE_TYPE_PRIMARY, NULL);
- if (ret) {
- kfree(primary);
- return NULL;
- }
-
- drm_plane_helper_add(primary, &bochs_plane_helper_funcs);
- return primary;
-}
-
-static void bochs_crtc_init(struct drm_device *dev)
-{
- struct bochs_device *bochs = dev->dev_private;
- struct drm_crtc *crtc = &bochs->crtc;
- struct drm_plane *primary = bochs_primary_plane(dev);
-
- drm_crtc_init_with_planes(dev, crtc, primary, NULL,
- &bochs_crtc_funcs, NULL);
- drm_crtc_helper_add(crtc, &bochs_helper_funcs);
-}
-
-static const struct drm_encoder_funcs bochs_encoder_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
+static const struct drm_simple_display_pipe_funcs bochs_pipe_funcs = {
+ .enable = bochs_pipe_enable,
+ .update = bochs_pipe_update,
+ .prepare_fb = bochs_pipe_prepare_fb,
+ .cleanup_fb = bochs_pipe_cleanup_fb,
};
-static void bochs_encoder_init(struct drm_device *dev)
-{
- struct bochs_device *bochs = dev->dev_private;
- struct drm_encoder *encoder = &bochs->encoder;
-
- encoder->possible_crtcs = 0x1;
- drm_encoder_init(dev, encoder, &bochs_encoder_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
-}
-
-
static int bochs_connector_get_modes(struct drm_connector *connector)
{
struct bochs_device *bochs =
@@ -214,20 +133,9 @@ static enum drm_mode_status bochs_connector_mode_valid(struct drm_connector *con
return MODE_OK;
}
-static struct drm_encoder *
-bochs_connector_best_encoder(struct drm_connector *connector)
-{
- int enc_id = connector->encoder_ids[0];
- /* pick the encoder ids */
- if (enc_id)
- return drm_encoder_find(connector->dev, NULL, enc_id);
- return NULL;
-}
-
static const struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = {
.get_modes = bochs_connector_get_modes,
.mode_valid = bochs_connector_mode_valid,
- .best_encoder = bochs_connector_best_encoder,
};
static const struct drm_connector_funcs bochs_connector_connector_funcs = {
@@ -278,7 +186,6 @@ const struct drm_mode_config_funcs bochs_mode_funcs = {
int bochs_kms_init(struct bochs_device *bochs)
{
drm_mode_config_init(bochs->dev);
- bochs->mode_config_initialized = true;
bochs->dev->mode_config.max_width = 8192;
bochs->dev->mode_config.max_height = 8192;
@@ -290,11 +197,14 @@ int bochs_kms_init(struct bochs_device *bochs)
bochs->dev->mode_config.funcs = &bochs_mode_funcs;
- bochs_crtc_init(bochs->dev);
- bochs_encoder_init(bochs->dev);
bochs_connector_init(bochs->dev);
- drm_connector_attach_encoder(&bochs->connector,
- &bochs->encoder);
+ drm_simple_display_pipe_init(bochs->dev,
+ &bochs->pipe,
+ &bochs_pipe_funcs,
+ bochs_formats,
+ ARRAY_SIZE(bochs_formats),
+ NULL,
+ &bochs->connector);
drm_mode_config_reset(bochs->dev);
@@ -303,8 +213,6 @@ int bochs_kms_init(struct bochs_device *bochs)
void bochs_kms_fini(struct bochs_device *bochs)
{
- if (bochs->mode_config_initialized) {
- drm_mode_config_cleanup(bochs->dev);
- bochs->mode_config_initialized = false;
- }
+ drm_atomic_helper_shutdown(bochs->dev);
+ drm_mode_config_cleanup(bochs->dev);
}
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 49463348a07a..4a40308169c4 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -156,7 +156,6 @@ int bochs_mm_init(struct bochs_device *bochs)
ret = ttm_bo_device_init(&bochs->ttm.bdev,
&bochs_bo_driver,
bochs->dev->anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET,
true);
if (ret) {
DRM_ERROR("Error initialising bo driver; %d\n", ret);
@@ -264,14 +263,9 @@ int bochs_bo_unpin(struct bochs_bo *bo)
int bochs_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct drm_file *file_priv;
- struct bochs_device *bochs;
+ struct drm_file *file_priv = filp->private_data;
+ struct bochs_device *bochs = file_priv->minor->dev->dev_private;
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
- return -EINVAL;
-
- file_priv = filp->private_data;
- bochs = file_priv->minor->dev->dev_private;
return ttm_bo_mmap(filp, vma, &bochs->ttm.bdev);
}
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
index 0805801f4e94..e64736c39a9f 100644
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c
@@ -234,7 +234,7 @@ static int dumb_vga_remove(struct platform_device *pdev)
*/
static const struct drm_bridge_timings default_dac_timings = {
/* Timing specifications, datasheet page 7 */
- .sampling_edge = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
.setup_time_ps = 500,
.hold_time_ps = 1500,
};
@@ -245,7 +245,7 @@ static const struct drm_bridge_timings default_dac_timings = {
*/
static const struct drm_bridge_timings ti_ths8134_dac_timings = {
/* From timing diagram, datasheet page 9 */
- .sampling_edge = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
/* From datasheet, page 12 */
.setup_time_ps = 3000,
/* I guess this means latched input */
@@ -258,7 +258,7 @@ static const struct drm_bridge_timings ti_ths8134_dac_timings = {
*/
static const struct drm_bridge_timings ti_ths8135_dac_timings = {
/* From timing diagram, datasheet page 14 */
- .sampling_edge = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
/* From datasheet, page 16 */
.setup_time_ps = 2000,
.hold_time_ps = 500,
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 888980d4bc74..e570c9dee180 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1222,8 +1222,8 @@ static int tc_bridge_attach(struct drm_bridge *bridge)
&bus_format, 1);
tc->connector.display_info.bus_flags =
DRM_BUS_FLAG_DE_HIGH |
- DRM_BUS_FLAG_PIXDATA_NEGEDGE |
- DRM_BUS_FLAG_SYNC_NEGEDGE;
+ DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE |
+ DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE;
drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder);
return 0;
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index 7bfb4f338813..8b0e71bd3ca7 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -27,10 +27,16 @@
struct tfp410 {
struct drm_bridge bridge;
struct drm_connector connector;
+ unsigned int connector_type;
+ u32 bus_format;
struct i2c_adapter *ddc;
struct gpio_desc *hpd;
+ int hpd_irq;
struct delayed_work hpd_work;
+ struct gpio_desc *powerdown;
+
+ struct drm_bridge_timings timings;
struct device *dev;
};
@@ -120,26 +126,47 @@ static int tfp410_attach(struct drm_bridge *bridge)
return -ENODEV;
}
- if (dvi->hpd)
+ if (dvi->hpd_irq >= 0)
dvi->connector.polled = DRM_CONNECTOR_POLL_HPD;
+ else
+ dvi->connector.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
drm_connector_helper_add(&dvi->connector,
&tfp410_con_helper_funcs);
ret = drm_connector_init(bridge->dev, &dvi->connector,
- &tfp410_con_funcs, DRM_MODE_CONNECTOR_HDMIA);
+ &tfp410_con_funcs, dvi->connector_type);
if (ret) {
dev_err(dvi->dev, "drm_connector_init() failed: %d\n", ret);
return ret;
}
+ drm_display_info_set_bus_formats(&dvi->connector.display_info,
+ &dvi->bus_format, 1);
+
drm_connector_attach_encoder(&dvi->connector,
bridge->encoder);
return 0;
}
+static void tfp410_enable(struct drm_bridge *bridge)
+{
+ struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
+
+ gpiod_set_value_cansleep(dvi->powerdown, 0);
+}
+
+static void tfp410_disable(struct drm_bridge *bridge)
+{
+ struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
+
+ gpiod_set_value_cansleep(dvi->powerdown, 1);
+}
+
static const struct drm_bridge_funcs tfp410_bridge_funcs = {
.attach = tfp410_attach,
+ .enable = tfp410_enable,
+ .disable = tfp410_disable,
};
static void tfp410_hpd_work_func(struct work_struct *work)
@@ -162,6 +189,83 @@ static irqreturn_t tfp410_hpd_irq_thread(int irq, void *arg)
return IRQ_HANDLED;
}
+static const struct drm_bridge_timings tfp410_default_timings = {
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE
+ | DRM_BUS_FLAG_DE_HIGH,
+ .setup_time_ps = 1200,
+ .hold_time_ps = 1300,
+};
+
+static int tfp410_parse_timings(struct tfp410 *dvi, bool i2c)
+{
+ struct drm_bridge_timings *timings = &dvi->timings;
+ struct device_node *ep;
+ u32 pclk_sample = 0;
+ u32 bus_width = 24;
+ s32 deskew = 0;
+
+ /* Start with defaults. */
+ *timings = tfp410_default_timings;
+
+ if (i2c)
+ /*
+ * In I2C mode timings are configured through the I2C interface.
+ * As the driver doesn't support I2C configuration yet, we just
+ * go with the defaults (BSEL=1, DSEL=1, DKEN=0, EDGE=1).
+ */
+ return 0;
+
+ /*
+ * In non-I2C mode, timings are configured through the BSEL, DSEL, DKEN
+ * and EDGE pins. They are specified in DT through endpoint properties
+ * and vendor-specific properties.
+ */
+ ep = of_graph_get_endpoint_by_regs(dvi->dev->of_node, 0, 0);
+ if (!ep)
+ return -EINVAL;
+
+ /* Get the sampling edge from the endpoint. */
+ of_property_read_u32(ep, "pclk-sample", &pclk_sample);
+ of_property_read_u32(ep, "bus-width", &bus_width);
+ of_node_put(ep);
+
+ timings->input_bus_flags = DRM_BUS_FLAG_DE_HIGH;
+
+ switch (pclk_sample) {
+ case 0:
+ timings->input_bus_flags |= DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE
+ | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE;
+ break;
+ case 1:
+ timings->input_bus_flags |= DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE
+ | DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (bus_width) {
+ case 12:
+ dvi->bus_format = MEDIA_BUS_FMT_RGB888_2X12_LE;
+ break;
+ case 24:
+ dvi->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Get the setup and hold time from vendor-specific properties. */
+ of_property_read_u32(dvi->dev->of_node, "ti,deskew", (u32 *)&deskew);
+ if (deskew < -4 || deskew > 3)
+ return -EINVAL;
+
+ timings->setup_time_ps = min(0, 1200 - 350 * deskew);
+ timings->hold_time_ps = min(0, 1300 + 350 * deskew);
+
+ return 0;
+}
+
static int tfp410_get_connector_properties(struct tfp410 *dvi)
{
struct device_node *connector_node, *ddc_phandle;
@@ -172,6 +276,11 @@ static int tfp410_get_connector_properties(struct tfp410 *dvi)
if (!connector_node)
return -ENODEV;
+ if (of_device_is_compatible(connector_node, "hdmi-connector"))
+ dvi->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+ else
+ dvi->connector_type = DRM_MODE_CONNECTOR_DVID;
+
dvi->hpd = fwnode_get_named_gpiod(&connector_node->fwnode,
"hpd-gpios", 0, GPIOD_IN, "hpd");
if (IS_ERR(dvi->hpd)) {
@@ -200,7 +309,7 @@ fail:
return ret;
}
-static int tfp410_init(struct device *dev)
+static int tfp410_init(struct device *dev, bool i2c)
{
struct tfp410 *dvi;
int ret;
@@ -217,16 +326,33 @@ static int tfp410_init(struct device *dev)
dvi->bridge.funcs = &tfp410_bridge_funcs;
dvi->bridge.of_node = dev->of_node;
+ dvi->bridge.timings = &dvi->timings;
dvi->dev = dev;
+ ret = tfp410_parse_timings(dvi, i2c);
+ if (ret)
+ goto fail;
+
ret = tfp410_get_connector_properties(dvi);
if (ret)
goto fail;
- if (dvi->hpd) {
+ dvi->powerdown = devm_gpiod_get_optional(dev, "powerdown",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(dvi->powerdown)) {
+ dev_err(dev, "failed to parse powerdown gpio\n");
+ return PTR_ERR(dvi->powerdown);
+ }
+
+ if (dvi->hpd)
+ dvi->hpd_irq = gpiod_to_irq(dvi->hpd);
+ else
+ dvi->hpd_irq = -ENXIO;
+
+ if (dvi->hpd_irq >= 0) {
INIT_DELAYED_WORK(&dvi->hpd_work, tfp410_hpd_work_func);
- ret = devm_request_threaded_irq(dev, gpiod_to_irq(dvi->hpd),
+ ret = devm_request_threaded_irq(dev, dvi->hpd_irq,
NULL, tfp410_hpd_irq_thread, IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"hdmi-hpd", dvi);
@@ -264,7 +390,7 @@ static int tfp410_fini(struct device *dev)
static int tfp410_probe(struct platform_device *pdev)
{
- return tfp410_init(&pdev->dev);
+ return tfp410_init(&pdev->dev, false);
}
static int tfp410_remove(struct platform_device *pdev)
@@ -301,7 +427,7 @@ static int tfp410_i2c_probe(struct i2c_client *client,
return -ENXIO;
}
- return tfp410_init(&client->dev);
+ return tfp410_init(&client->dev, true);
}
static int tfp410_i2c_remove(struct i2c_client *client)
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
index fc78c90ee931..dd4f52a0bc1c 100644
--- a/drivers/gpu/drm/cirrus/Kconfig
+++ b/drivers/gpu/drm/cirrus/Kconfig
@@ -2,7 +2,7 @@ config DRM_CIRRUS_QEMU
tristate "Cirrus driver for QEMU emulated device"
depends on DRM && PCI && MMU
select DRM_KMS_HELPER
- select DRM_TTM
+ select DRM_GEM_SHMEM_HELPER
help
This is a KMS driver for emulated cirrus device in qemu.
It is *NOT* intended for real cirrus devices. This requires
diff --git a/drivers/gpu/drm/cirrus/Makefile b/drivers/gpu/drm/cirrus/Makefile
index 919c0a336c97..acf8971d37a1 100644
--- a/drivers/gpu/drm/cirrus/Makefile
+++ b/drivers/gpu/drm/cirrus/Makefile
@@ -1,4 +1 @@
-cirrus-y := cirrus_main.o cirrus_mode.o \
- cirrus_drv.o cirrus_fbdev.o cirrus_ttm.o
-
obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus.o
diff --git a/drivers/gpu/drm/cirrus/cirrus.c b/drivers/gpu/drm/cirrus/cirrus.c
new file mode 100644
index 000000000000..be4ea370ba31
--- /dev/null
+++ b/drivers/gpu/drm/cirrus/cirrus.c
@@ -0,0 +1,657 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2012-2019 Red Hat
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License version 2. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Authors: Matthew Garrett
+ * Dave Airlie
+ * Gerd Hoffmann
+ *
+ * Portions of this code derived from cirrusfb.c:
+ * drivers/video/cirrusfb.c - driver for Cirrus Logic chipsets
+ *
+ * Copyright 1999-2001 Jeff Garzik <jgarzik@pobox.com>
+ */
+
+#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+#include <video/cirrus.h>
+#include <video/vga.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_file.h>
+#include <drm/drm_format_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_vblank.h>
+
+#define DRIVER_NAME "cirrus"
+#define DRIVER_DESC "qemu cirrus vga"
+#define DRIVER_DATE "2019"
+#define DRIVER_MAJOR 2
+#define DRIVER_MINOR 0
+
+#define CIRRUS_MAX_PITCH (0x1FF << 3) /* (4096 - 1) & ~111b bytes */
+#define CIRRUS_VRAM_SIZE (4 * 1024 * 1024) /* 4 MB */
+
+struct cirrus_device {
+ struct drm_device dev;
+ struct drm_simple_display_pipe pipe;
+ struct drm_connector conn;
+ unsigned int cpp;
+ unsigned int pitch;
+ void __iomem *vram;
+ void __iomem *mmio;
+};
+
+/* ------------------------------------------------------------------ */
+/*
+ * The meat of this driver. The core passes us a mode and we have to program
+ * it. The modesetting here is the bare minimum required to satisfy the qemu
+ * emulation of this hardware, and running this against a real device is
+ * likely to result in an inadequately programmed mode. We've already had
+ * the opportunity to modify the mode, so whatever we receive here should
+ * be something that can be correctly programmed and displayed
+ */
+
+#define SEQ_INDEX 4
+#define SEQ_DATA 5
+
+static u8 rreg_seq(struct cirrus_device *cirrus, u8 reg)
+{
+ iowrite8(reg, cirrus->mmio + SEQ_INDEX);
+ return ioread8(cirrus->mmio + SEQ_DATA);
+}
+
+static void wreg_seq(struct cirrus_device *cirrus, u8 reg, u8 val)
+{
+ iowrite8(reg, cirrus->mmio + SEQ_INDEX);
+ iowrite8(val, cirrus->mmio + SEQ_DATA);
+}
+
+#define CRT_INDEX 0x14
+#define CRT_DATA 0x15
+
+static u8 rreg_crt(struct cirrus_device *cirrus, u8 reg)
+{
+ iowrite8(reg, cirrus->mmio + CRT_INDEX);
+ return ioread8(cirrus->mmio + CRT_DATA);
+}
+
+static void wreg_crt(struct cirrus_device *cirrus, u8 reg, u8 val)
+{
+ iowrite8(reg, cirrus->mmio + CRT_INDEX);
+ iowrite8(val, cirrus->mmio + CRT_DATA);
+}
+
+#define GFX_INDEX 0xe
+#define GFX_DATA 0xf
+
+static void wreg_gfx(struct cirrus_device *cirrus, u8 reg, u8 val)
+{
+ iowrite8(reg, cirrus->mmio + GFX_INDEX);
+ iowrite8(val, cirrus->mmio + GFX_DATA);
+}
+
+#define VGA_DAC_MASK 0x06
+
+static void wreg_hdr(struct cirrus_device *cirrus, u8 val)
+{
+ ioread8(cirrus->mmio + VGA_DAC_MASK);
+ ioread8(cirrus->mmio + VGA_DAC_MASK);
+ ioread8(cirrus->mmio + VGA_DAC_MASK);
+ ioread8(cirrus->mmio + VGA_DAC_MASK);
+ iowrite8(val, cirrus->mmio + VGA_DAC_MASK);
+}
+
+static int cirrus_convert_to(struct drm_framebuffer *fb)
+{
+ if (fb->format->cpp[0] == 4 && fb->pitches[0] > CIRRUS_MAX_PITCH) {
+ if (fb->width * 3 <= CIRRUS_MAX_PITCH)
+ /* convert from XR24 to RG24 */
+ return 3;
+ else
+ /* convert from XR24 to RG16 */
+ return 2;
+ }
+ return 0;
+}
+
+static int cirrus_cpp(struct drm_framebuffer *fb)
+{
+ int convert_cpp = cirrus_convert_to(fb);
+
+ if (convert_cpp)
+ return convert_cpp;
+ return fb->format->cpp[0];
+}
+
+static int cirrus_pitch(struct drm_framebuffer *fb)
+{
+ int convert_cpp = cirrus_convert_to(fb);
+
+ if (convert_cpp)
+ return convert_cpp * fb->width;
+ return fb->pitches[0];
+}
+
+static void cirrus_set_start_address(struct cirrus_device *cirrus, u32 offset)
+{
+ u32 addr;
+ u8 tmp;
+
+ addr = offset >> 2;
+ wreg_crt(cirrus, 0x0c, (u8)((addr >> 8) & 0xff));
+ wreg_crt(cirrus, 0x0d, (u8)(addr & 0xff));
+
+ tmp = rreg_crt(cirrus, 0x1b);
+ tmp &= 0xf2;
+ tmp |= (addr >> 16) & 0x01;
+ tmp |= (addr >> 15) & 0x0c;
+ wreg_crt(cirrus, 0x1b, tmp);
+
+ tmp = rreg_crt(cirrus, 0x1d);
+ tmp &= 0x7f;
+ tmp |= (addr >> 12) & 0x80;
+ wreg_crt(cirrus, 0x1d, tmp);
+}
+
+static int cirrus_mode_set(struct cirrus_device *cirrus,
+ struct drm_display_mode *mode,
+ struct drm_framebuffer *fb)
+{
+ int hsyncstart, hsyncend, htotal, hdispend;
+ int vtotal, vdispend;
+ int tmp;
+ int sr07 = 0, hdr = 0;
+
+ htotal = mode->htotal / 8;
+ hsyncend = mode->hsync_end / 8;
+ hsyncstart = mode->hsync_start / 8;
+ hdispend = mode->hdisplay / 8;
+
+ vtotal = mode->vtotal;
+ vdispend = mode->vdisplay;
+
+ vdispend -= 1;
+ vtotal -= 2;
+
+ htotal -= 5;
+ hdispend -= 1;
+ hsyncstart += 1;
+ hsyncend += 1;
+
+ wreg_crt(cirrus, VGA_CRTC_V_SYNC_END, 0x20);
+ wreg_crt(cirrus, VGA_CRTC_H_TOTAL, htotal);
+ wreg_crt(cirrus, VGA_CRTC_H_DISP, hdispend);
+ wreg_crt(cirrus, VGA_CRTC_H_SYNC_START, hsyncstart);
+ wreg_crt(cirrus, VGA_CRTC_H_SYNC_END, hsyncend);
+ wreg_crt(cirrus, VGA_CRTC_V_TOTAL, vtotal & 0xff);
+ wreg_crt(cirrus, VGA_CRTC_V_DISP_END, vdispend & 0xff);
+
+ tmp = 0x40;
+ if ((vdispend + 1) & 512)
+ tmp |= 0x20;
+ wreg_crt(cirrus, VGA_CRTC_MAX_SCAN, tmp);
+
+ /*
+ * Overflow bits for values that don't fit in the standard registers
+ */
+ tmp = 0x10;
+ if (vtotal & 0x100)
+ tmp |= 0x01;
+ if (vdispend & 0x100)
+ tmp |= 0x02;
+ if ((vdispend + 1) & 0x100)
+ tmp |= 0x08;
+ if (vtotal & 0x200)
+ tmp |= 0x20;
+ if (vdispend & 0x200)
+ tmp |= 0x40;
+ wreg_crt(cirrus, VGA_CRTC_OVERFLOW, tmp);
+
+ tmp = 0;
+
+ /* More overflow bits */
+
+ if ((htotal + 5) & 0x40)
+ tmp |= 0x10;
+ if ((htotal + 5) & 0x80)
+ tmp |= 0x20;
+ if (vtotal & 0x100)
+ tmp |= 0x40;
+ if (vtotal & 0x200)
+ tmp |= 0x80;
+
+ wreg_crt(cirrus, CL_CRT1A, tmp);
+
+ /* Disable Hercules/CGA compatibility */
+ wreg_crt(cirrus, VGA_CRTC_MODE, 0x03);
+
+ sr07 = rreg_seq(cirrus, 0x07);
+ sr07 &= 0xe0;
+ hdr = 0;
+
+ cirrus->cpp = cirrus_cpp(fb);
+ switch (cirrus->cpp * 8) {
+ case 8:
+ sr07 |= 0x11;
+ break;
+ case 16:
+ sr07 |= 0x17;
+ hdr = 0xc1;
+ break;
+ case 24:
+ sr07 |= 0x15;
+ hdr = 0xc5;
+ break;
+ case 32:
+ sr07 |= 0x19;
+ hdr = 0xc5;
+ break;
+ default:
+ return -1;
+ }
+
+ wreg_seq(cirrus, 0x7, sr07);
+
+ /* Program the pitch */
+ cirrus->pitch = cirrus_pitch(fb);
+ tmp = cirrus->pitch / 8;
+ wreg_crt(cirrus, VGA_CRTC_OFFSET, tmp);
+
+ /* Enable extended blanking and pitch bits, and enable full memory */
+ tmp = 0x22;
+ tmp |= (cirrus->pitch >> 7) & 0x10;
+ tmp |= (cirrus->pitch >> 6) & 0x40;
+ wreg_crt(cirrus, 0x1b, tmp);
+
+ /* Enable high-colour modes */
+ wreg_gfx(cirrus, VGA_GFX_MODE, 0x40);
+
+ /* And set graphics mode */
+ wreg_gfx(cirrus, VGA_GFX_MISC, 0x01);
+
+ wreg_hdr(cirrus, hdr);
+
+ cirrus_set_start_address(cirrus, 0);
+
+ /* Unblank (needed on S3 resume, vgabios doesn't do it then) */
+ outb(0x20, 0x3c0);
+ return 0;
+}
+
+static int cirrus_fb_blit_rect(struct drm_framebuffer *fb,
+ struct drm_rect *rect)
+{
+ struct cirrus_device *cirrus = fb->dev->dev_private;
+ void *vmap;
+
+ vmap = drm_gem_shmem_vmap(fb->obj[0]);
+ if (!vmap)
+ return -ENOMEM;
+
+ if (cirrus->cpp == fb->format->cpp[0])
+ drm_fb_memcpy_dstclip(cirrus->vram,
+ vmap, fb, rect);
+
+ else if (fb->format->cpp[0] == 4 && cirrus->cpp == 2)
+ drm_fb_xrgb8888_to_rgb565_dstclip(cirrus->vram,
+ cirrus->pitch,
+ vmap, fb, rect, false);
+
+ else if (fb->format->cpp[0] == 4 && cirrus->cpp == 3)
+ drm_fb_xrgb8888_to_rgb888_dstclip(cirrus->vram,
+ cirrus->pitch,
+ vmap, fb, rect);
+
+ else
+ WARN_ON_ONCE("cpp mismatch");
+
+ drm_gem_shmem_vunmap(fb->obj[0], vmap);
+ return 0;
+}
+
+static int cirrus_fb_blit_fullscreen(struct drm_framebuffer *fb)
+{
+ struct drm_rect fullscreen = {
+ .x1 = 0,
+ .x2 = fb->width,
+ .y1 = 0,
+ .y2 = fb->height,
+ };
+ return cirrus_fb_blit_rect(fb, &fullscreen);
+}
+
+static int cirrus_check_size(int width, int height,
+ struct drm_framebuffer *fb)
+{
+ int pitch = width * 2;
+
+ if (fb)
+ pitch = cirrus_pitch(fb);
+
+ if (pitch > CIRRUS_MAX_PITCH)
+ return -EINVAL;
+ if (pitch * height > CIRRUS_VRAM_SIZE)
+ return -EINVAL;
+ return 0;
+}
+
+/* ------------------------------------------------------------------ */
+/* cirrus connector */
+
+static int cirrus_conn_get_modes(struct drm_connector *conn)
+{
+ int count;
+
+ count = drm_add_modes_noedid(conn,
+ conn->dev->mode_config.max_width,
+ conn->dev->mode_config.max_height);
+ drm_set_preferred_mode(conn, 1024, 768);
+ return count;
+}
+
+static const struct drm_connector_helper_funcs cirrus_conn_helper_funcs = {
+ .get_modes = cirrus_conn_get_modes,
+};
+
+static const struct drm_connector_funcs cirrus_conn_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int cirrus_conn_init(struct cirrus_device *cirrus)
+{
+ drm_connector_helper_add(&cirrus->conn, &cirrus_conn_helper_funcs);
+ return drm_connector_init(&cirrus->dev, &cirrus->conn,
+ &cirrus_conn_funcs, DRM_MODE_CONNECTOR_VGA);
+
+}
+
+/* ------------------------------------------------------------------ */
+/* cirrus (simple) display pipe */
+
+static enum drm_mode_status cirrus_pipe_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ if (cirrus_check_size(mode->hdisplay, mode->vdisplay, NULL) < 0)
+ return MODE_BAD;
+ return MODE_OK;
+}
+
+static int cirrus_pipe_check(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state,
+ struct drm_crtc_state *crtc_state)
+{
+ struct drm_framebuffer *fb = plane_state->fb;
+
+ if (!fb)
+ return 0;
+ return cirrus_check_size(fb->width, fb->height, fb);
+}
+
+static void cirrus_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
+{
+ struct cirrus_device *cirrus = pipe->crtc.dev->dev_private;
+
+ cirrus_mode_set(cirrus, &crtc_state->mode, plane_state->fb);
+ cirrus_fb_blit_fullscreen(plane_state->fb);
+}
+
+static void cirrus_pipe_update(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_state)
+{
+ struct cirrus_device *cirrus = pipe->crtc.dev->dev_private;
+ struct drm_plane_state *state = pipe->plane.state;
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_rect rect;
+
+ if (pipe->plane.state->fb &&
+ cirrus->cpp != cirrus_cpp(pipe->plane.state->fb))
+ cirrus_mode_set(cirrus, &crtc->mode,
+ pipe->plane.state->fb);
+
+ if (drm_atomic_helper_damage_merged(old_state, state, &rect))
+ cirrus_fb_blit_rect(pipe->plane.state->fb, &rect);
+
+ if (crtc->state->event) {
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irq(&crtc->dev->event_lock);
+ }
+}
+
+static const struct drm_simple_display_pipe_funcs cirrus_pipe_funcs = {
+ .mode_valid = cirrus_pipe_mode_valid,
+ .check = cirrus_pipe_check,
+ .enable = cirrus_pipe_enable,
+ .update = cirrus_pipe_update,
+};
+
+static const uint32_t cirrus_formats[] = {
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_XRGB8888,
+};
+
+static const uint64_t cirrus_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static int cirrus_pipe_init(struct cirrus_device *cirrus)
+{
+ return drm_simple_display_pipe_init(&cirrus->dev,
+ &cirrus->pipe,
+ &cirrus_pipe_funcs,
+ cirrus_formats,
+ ARRAY_SIZE(cirrus_formats),
+ cirrus_modifiers,
+ &cirrus->conn);
+}
+
+/* ------------------------------------------------------------------ */
+/* cirrus framebuffers & mode config */
+
+static struct drm_framebuffer*
+cirrus_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ if (mode_cmd->pixel_format != DRM_FORMAT_RGB565 &&
+ mode_cmd->pixel_format != DRM_FORMAT_RGB888 &&
+ mode_cmd->pixel_format != DRM_FORMAT_XRGB8888)
+ return ERR_PTR(-EINVAL);
+ if (cirrus_check_size(mode_cmd->width, mode_cmd->height, NULL) < 0)
+ return ERR_PTR(-EINVAL);
+ return drm_gem_fb_create_with_dirty(dev, file_priv, mode_cmd);
+}
+
+static const struct drm_mode_config_funcs cirrus_mode_config_funcs = {
+ .fb_create = cirrus_fb_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void cirrus_mode_config_init(struct cirrus_device *cirrus)
+{
+ struct drm_device *dev = &cirrus->dev;
+
+ drm_mode_config_init(dev);
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.max_width = CIRRUS_MAX_PITCH / 2;
+ dev->mode_config.max_height = 1024;
+ dev->mode_config.preferred_depth = 16;
+ dev->mode_config.prefer_shadow = 0;
+ dev->mode_config.funcs = &cirrus_mode_config_funcs;
+}
+
+/* ------------------------------------------------------------------ */
+
+DEFINE_DRM_GEM_SHMEM_FOPS(cirrus_fops);
+
+static struct drm_driver cirrus_driver = {
+ .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC | DRIVER_PRIME,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+
+ .fops = &cirrus_fops,
+ DRM_GEM_SHMEM_DRIVER_OPS,
+};
+
+static int cirrus_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct drm_device *dev;
+ struct cirrus_device *cirrus;
+ int ret;
+
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "cirrusdrmfb");
+ if (ret)
+ return ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ ret = pci_request_regions(pdev, DRIVER_NAME);
+ if (ret)
+ return ret;
+
+ ret = -ENOMEM;
+ cirrus = kzalloc(sizeof(*cirrus), GFP_KERNEL);
+ if (cirrus == NULL)
+ goto err_pci_release;
+
+ dev = &cirrus->dev;
+ ret = drm_dev_init(dev, &cirrus_driver, &pdev->dev);
+ if (ret)
+ goto err_free_cirrus;
+ dev->dev_private = cirrus;
+
+ ret = -ENOMEM;
+ cirrus->vram = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (cirrus->vram == NULL)
+ goto err_dev_put;
+
+ cirrus->mmio = ioremap(pci_resource_start(pdev, 1),
+ pci_resource_len(pdev, 1));
+ if (cirrus->mmio == NULL)
+ goto err_unmap_vram;
+
+ cirrus_mode_config_init(cirrus);
+
+ ret = cirrus_conn_init(cirrus);
+ if (ret < 0)
+ goto err_cleanup;
+
+ ret = cirrus_pipe_init(cirrus);
+ if (ret < 0)
+ goto err_cleanup;
+
+ drm_mode_config_reset(dev);
+
+ dev->pdev = pdev;
+ pci_set_drvdata(pdev, dev);
+ ret = drm_dev_register(dev, 0);
+ if (ret)
+ goto err_cleanup;
+
+ drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
+ return 0;
+
+err_cleanup:
+ drm_mode_config_cleanup(dev);
+ iounmap(cirrus->mmio);
+err_unmap_vram:
+ iounmap(cirrus->vram);
+err_dev_put:
+ drm_dev_put(dev);
+err_free_cirrus:
+ kfree(cirrus);
+err_pci_release:
+ pci_release_regions(pdev);
+ return ret;
+}
+
+static void cirrus_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct cirrus_device *cirrus = dev->dev_private;
+
+ drm_dev_unregister(dev);
+ drm_mode_config_cleanup(dev);
+ iounmap(cirrus->mmio);
+ iounmap(cirrus->vram);
+ drm_dev_put(dev);
+ kfree(cirrus);
+ pci_release_regions(pdev);
+}
+
+static const struct pci_device_id pciidlist[] = {
+ {
+ .vendor = PCI_VENDOR_ID_CIRRUS,
+ .device = PCI_DEVICE_ID_CIRRUS_5446,
+ /* only bind to the cirrus chip in qemu */
+ .subvendor = PCI_SUBVENDOR_ID_REDHAT_QUMRANET,
+ .subdevice = PCI_SUBDEVICE_ID_QEMU,
+ }, {
+ .vendor = PCI_VENDOR_ID_CIRRUS,
+ .device = PCI_DEVICE_ID_CIRRUS_5446,
+ .subvendor = PCI_VENDOR_ID_XEN,
+ .subdevice = 0x0001,
+ },
+ { /* end if list */ }
+};
+
+static struct pci_driver cirrus_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = cirrus_pci_probe,
+ .remove = cirrus_pci_remove,
+};
+
+static int __init cirrus_init(void)
+{
+ if (vgacon_text_force())
+ return -EINVAL;
+ return pci_register_driver(&cirrus_pci_driver);
+}
+
+static void __exit cirrus_exit(void)
+{
+ pci_unregister_driver(&cirrus_pci_driver);
+}
+
+module_init(cirrus_init);
+module_exit(cirrus_exit);
+
+MODULE_DEVICE_TABLE(pci, pciidlist);
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
deleted file mode 100644
index 8ec880f3a322..000000000000
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Copyright 2012 Red Hat <mjg@redhat.com>
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License version 2. See the file COPYING in the main
- * directory of this archive for more details.
- *
- * Authors: Matthew Garrett
- * Dave Airlie
- */
-#include <linux/module.h>
-#include <linux/console.h>
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_probe_helper.h>
-
-#include "cirrus_drv.h"
-
-int cirrus_modeset = -1;
-int cirrus_bpp = 16;
-
-MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
-module_param_named(modeset, cirrus_modeset, int, 0400);
-MODULE_PARM_DESC(bpp, "Max bits-per-pixel (default:16)");
-module_param_named(bpp, cirrus_bpp, int, 0400);
-
-/*
- * This is the generic driver code. This binds the driver to the drm core,
- * which then performs further device association and calls our graphics init
- * functions
- */
-
-static struct drm_driver driver;
-
-/* only bind to the cirrus chip in qemu */
-static const struct pci_device_id pciidlist[] = {
- { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446,
- PCI_SUBVENDOR_ID_REDHAT_QUMRANET, PCI_SUBDEVICE_ID_QEMU,
- 0, 0, 0 },
- { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, PCI_VENDOR_ID_XEN,
- 0x0001, 0, 0, 0 },
- {0,}
-};
-
-
-static int cirrus_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- int ret;
-
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "cirrusdrmfb");
- if (ret)
- return ret;
-
- return drm_get_pci_dev(pdev, ent, &driver);
-}
-
-static void cirrus_pci_remove(struct pci_dev *pdev)
-{
- struct drm_device *dev = pci_get_drvdata(pdev);
-
- drm_put_dev(dev);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int cirrus_pm_suspend(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct cirrus_device *cdev = drm_dev->dev_private;
-
- drm_kms_helper_poll_disable(drm_dev);
-
- if (cdev->mode_info.gfbdev) {
- console_lock();
- drm_fb_helper_set_suspend(&cdev->mode_info.gfbdev->helper, 1);
- console_unlock();
- }
-
- return 0;
-}
-
-static int cirrus_pm_resume(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct cirrus_device *cdev = drm_dev->dev_private;
-
- drm_helper_resume_force_mode(drm_dev);
-
- if (cdev->mode_info.gfbdev) {
- console_lock();
- drm_fb_helper_set_suspend(&cdev->mode_info.gfbdev->helper, 0);
- console_unlock();
- }
-
- drm_kms_helper_poll_enable(drm_dev);
- return 0;
-}
-#endif
-
-static const struct file_operations cirrus_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = cirrus_mmap,
- .poll = drm_poll,
- .compat_ioctl = drm_compat_ioctl,
-};
-static struct drm_driver driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM,
- .load = cirrus_driver_load,
- .unload = cirrus_driver_unload,
- .fops = &cirrus_driver_fops,
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = DRIVER_DATE,
- .major = DRIVER_MAJOR,
- .minor = DRIVER_MINOR,
- .patchlevel = DRIVER_PATCHLEVEL,
- .gem_free_object_unlocked = cirrus_gem_free_object,
- .dumb_create = cirrus_dumb_create,
- .dumb_map_offset = cirrus_dumb_mmap_offset,
-};
-
-static const struct dev_pm_ops cirrus_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(cirrus_pm_suspend,
- cirrus_pm_resume)
-};
-
-static struct pci_driver cirrus_pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- .probe = cirrus_pci_probe,
- .remove = cirrus_pci_remove,
- .driver.pm = &cirrus_pm_ops,
-};
-
-static int __init cirrus_init(void)
-{
- if (vgacon_text_force() && cirrus_modeset == -1)
- return -EINVAL;
-
- if (cirrus_modeset == 0)
- return -EINVAL;
- return pci_register_driver(&cirrus_pci_driver);
-}
-
-static void __exit cirrus_exit(void)
-{
- pci_unregister_driver(&cirrus_pci_driver);
-}
-
-module_init(cirrus_init);
-module_exit(cirrus_exit);
-
-MODULE_DEVICE_TABLE(pci, pciidlist);
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index f2b2e0d169fa..1bd816be3aae 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -101,7 +101,6 @@ struct cirrus_crtc {
struct cirrus_fbdev;
struct cirrus_mode_info {
- bool mode_config_initialized;
struct cirrus_crtc *crtc;
/* pointer to fbdev info structure */
struct cirrus_fbdev *gfbdev;
@@ -143,7 +142,7 @@ struct cirrus_device {
struct cirrus_fbdev {
- struct drm_fb_helper helper;
+ struct drm_fb_helper helper; /* must be first */
struct drm_framebuffer *gfb;
void *sysram;
int size;
@@ -169,7 +168,6 @@ cirrus_bo(struct ttm_buffer_object *bo)
#define to_cirrus_obj(x) container_of(x, struct cirrus_gem_object, base)
-#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
/* cirrus_main.c */
int cirrus_device_init(struct cirrus_device *cdev,
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
deleted file mode 100644
index 39df62acac69..000000000000
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ /dev/null
@@ -1,315 +0,0 @@
-/*
- * Copyright 2012 Red Hat
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License version 2. See the file COPYING in the main
- * directory of this archive for more details.
- *
- * Authors: Matthew Garrett
- * Dave Airlie
- */
-#include <linux/module.h>
-#include <drm/drmP.h>
-#include <drm/drm_util.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_crtc_helper.h>
-
-#include "cirrus_drv.h"
-
-static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
- int x, int y, int width, int height)
-{
- int i;
- struct drm_gem_object *obj;
- struct cirrus_bo *bo;
- int src_offset, dst_offset;
- int bpp = afbdev->gfb->format->cpp[0];
- int ret = -EBUSY;
- bool unmap = false;
- bool store_for_later = false;
- int x2, y2;
- unsigned long flags;
-
- obj = afbdev->gfb->obj[0];
- bo = gem_to_cirrus_bo(obj);
-
- /*
- * try and reserve the BO, if we fail with busy
- * then the BO is being moved and we should
- * store up the damage until later.
- */
- if (drm_can_sleep())
- ret = cirrus_bo_reserve(bo, true);
- if (ret) {
- if (ret != -EBUSY)
- return;
- store_for_later = true;
- }
-
- x2 = x + width - 1;
- y2 = y + height - 1;
- spin_lock_irqsave(&afbdev->dirty_lock, flags);
-
- if (afbdev->y1 < y)
- y = afbdev->y1;
- if (afbdev->y2 > y2)
- y2 = afbdev->y2;
- if (afbdev->x1 < x)
- x = afbdev->x1;
- if (afbdev->x2 > x2)
- x2 = afbdev->x2;
-
- if (store_for_later) {
- afbdev->x1 = x;
- afbdev->x2 = x2;
- afbdev->y1 = y;
- afbdev->y2 = y2;
- spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
- return;
- }
-
- afbdev->x1 = afbdev->y1 = INT_MAX;
- afbdev->x2 = afbdev->y2 = 0;
- spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
-
- if (!bo->kmap.virtual) {
- ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
- if (ret) {
- DRM_ERROR("failed to kmap fb updates\n");
- cirrus_bo_unreserve(bo);
- return;
- }
- unmap = true;
- }
- for (i = y; i < y + height; i++) {
- /* assume equal stride for now */
- src_offset = dst_offset = i * afbdev->gfb->pitches[0] + (x * bpp);
- memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
-
- }
- if (unmap)
- ttm_bo_kunmap(&bo->kmap);
-
- cirrus_bo_unreserve(bo);
-}
-
-static void cirrus_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect)
-{
- struct cirrus_fbdev *afbdev = info->par;
- drm_fb_helper_sys_fillrect(info, rect);
- cirrus_dirty_update(afbdev, rect->dx, rect->dy, rect->width,
- rect->height);
-}
-
-static void cirrus_copyarea(struct fb_info *info,
- const struct fb_copyarea *area)
-{
- struct cirrus_fbdev *afbdev = info->par;
- drm_fb_helper_sys_copyarea(info, area);
- cirrus_dirty_update(afbdev, area->dx, area->dy, area->width,
- area->height);
-}
-
-static void cirrus_imageblit(struct fb_info *info,
- const struct fb_image *image)
-{
- struct cirrus_fbdev *afbdev = info->par;
- drm_fb_helper_sys_imageblit(info, image);
- cirrus_dirty_update(afbdev, image->dx, image->dy, image->width,
- image->height);
-}
-
-
-static struct fb_ops cirrusfb_ops = {
- .owner = THIS_MODULE,
- .fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = drm_fb_helper_set_par,
- .fb_fillrect = cirrus_fillrect,
- .fb_copyarea = cirrus_copyarea,
- .fb_imageblit = cirrus_imageblit,
- .fb_pan_display = drm_fb_helper_pan_display,
- .fb_blank = drm_fb_helper_blank,
- .fb_setcmap = drm_fb_helper_setcmap,
-};
-
-static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object **gobj_p)
-{
- struct drm_device *dev = afbdev->helper.dev;
- struct cirrus_device *cdev = dev->dev_private;
- u32 bpp;
- u32 size;
- struct drm_gem_object *gobj;
- int ret = 0;
-
- bpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0) * 8;
-
- if (!cirrus_check_framebuffer(cdev, mode_cmd->width, mode_cmd->height,
- bpp, mode_cmd->pitches[0]))
- return -EINVAL;
-
- size = mode_cmd->pitches[0] * mode_cmd->height;
- ret = cirrus_gem_create(dev, size, true, &gobj);
- if (ret)
- return ret;
-
- *gobj_p = gobj;
- return ret;
-}
-
-static int cirrusfb_create(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct cirrus_fbdev *gfbdev =
- container_of(helper, struct cirrus_fbdev, helper);
- struct cirrus_device *cdev = gfbdev->helper.dev->dev_private;
- struct fb_info *info;
- struct drm_framebuffer *fb;
- struct drm_mode_fb_cmd2 mode_cmd;
- void *sysram;
- struct drm_gem_object *gobj = NULL;
- int size, ret;
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
- mode_cmd.pitches[0] = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
- size = mode_cmd.pitches[0] * mode_cmd.height;
-
- ret = cirrusfb_create_object(gfbdev, &mode_cmd, &gobj);
- if (ret) {
- DRM_ERROR("failed to create fbcon backing object %d\n", ret);
- return ret;
- }
-
- sysram = vmalloc(size);
- if (!sysram)
- return -ENOMEM;
-
- info = drm_fb_helper_alloc_fbi(helper);
- if (IS_ERR(info)) {
- ret = PTR_ERR(info);
- goto err_vfree;
- }
-
- info->par = gfbdev;
-
- fb = kzalloc(sizeof(*fb), GFP_KERNEL);
- if (!fb) {
- ret = -ENOMEM;
- goto err_drm_gem_object_put_unlocked;
- }
-
- ret = cirrus_framebuffer_init(cdev->dev, fb, &mode_cmd, gobj);
- if (ret)
- goto err_kfree;
-
- gfbdev->sysram = sysram;
- gfbdev->size = size;
- gfbdev->gfb = fb;
-
- /* setup helper */
- gfbdev->helper.fb = fb;
-
- strcpy(info->fix.id, "cirrusdrmfb");
-
- info->fbops = &cirrusfb_ops;
-
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(info, &gfbdev->helper, sizes->fb_width,
- sizes->fb_height);
-
- /* setup aperture base/size for vesafb takeover */
- info->apertures->ranges[0].base = cdev->dev->mode_config.fb_base;
- info->apertures->ranges[0].size = cdev->mc.vram_size;
-
- info->fix.smem_start = cdev->dev->mode_config.fb_base;
- info->fix.smem_len = cdev->mc.vram_size;
-
- info->screen_base = sysram;
- info->screen_size = size;
-
- info->fix.mmio_start = 0;
- info->fix.mmio_len = 0;
-
- DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
- DRM_INFO("vram aper at 0x%lX\n", (unsigned long)info->fix.smem_start);
- DRM_INFO("size %lu\n", (unsigned long)info->fix.smem_len);
- DRM_INFO("fb depth is %d\n", fb->format->depth);
- DRM_INFO(" pitch is %d\n", fb->pitches[0]);
-
- return 0;
-
-err_kfree:
- kfree(fb);
-err_drm_gem_object_put_unlocked:
- drm_gem_object_put_unlocked(gobj);
-err_vfree:
- vfree(sysram);
- return ret;
-}
-
-static int cirrus_fbdev_destroy(struct drm_device *dev,
- struct cirrus_fbdev *gfbdev)
-{
- struct drm_framebuffer *gfb = gfbdev->gfb;
-
- drm_helper_force_disable_all(dev);
-
- drm_fb_helper_unregister_fbi(&gfbdev->helper);
-
- vfree(gfbdev->sysram);
- drm_fb_helper_fini(&gfbdev->helper);
- if (gfb)
- drm_framebuffer_put(gfb);
-
- return 0;
-}
-
-static const struct drm_fb_helper_funcs cirrus_fb_helper_funcs = {
- .fb_probe = cirrusfb_create,
-};
-
-int cirrus_fbdev_init(struct cirrus_device *cdev)
-{
- struct cirrus_fbdev *gfbdev;
- int ret;
-
- /*bpp_sel = 8;*/
- gfbdev = kzalloc(sizeof(struct cirrus_fbdev), GFP_KERNEL);
- if (!gfbdev)
- return -ENOMEM;
-
- cdev->mode_info.gfbdev = gfbdev;
- spin_lock_init(&gfbdev->dirty_lock);
-
- drm_fb_helper_prepare(cdev->dev, &gfbdev->helper,
- &cirrus_fb_helper_funcs);
-
- ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
- CIRRUSFB_CONN_LIMIT);
- if (ret)
- return ret;
-
- ret = drm_fb_helper_single_add_all_connectors(&gfbdev->helper);
- if (ret)
- return ret;
-
- /* disable all the possible outputs/crtcs before entering KMS mode */
- drm_helper_disable_unused_functions(cdev->dev);
-
- return drm_fb_helper_initial_config(&gfbdev->helper, cirrus_bpp);
-}
-
-void cirrus_fbdev_fini(struct cirrus_device *cdev)
-{
- if (!cdev->mode_info.gfbdev)
- return;
-
- cirrus_fbdev_destroy(cdev->dev, cdev->mode_info.gfbdev);
- kfree(cdev->mode_info.gfbdev);
- cdev->mode_info.gfbdev = NULL;
-}
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
deleted file mode 100644
index 57f8fe6d020b..000000000000
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ /dev/null
@@ -1,328 +0,0 @@
-/*
- * Copyright 2012 Red Hat
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License version 2. See the file COPYING in the main
- * directory of this archive for more details.
- *
- * Authors: Matthew Garrett
- * Dave Airlie
- */
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
-
-#include "cirrus_drv.h"
-
-static const struct drm_framebuffer_funcs cirrus_fb_funcs = {
- .create_handle = drm_gem_fb_create_handle,
- .destroy = drm_gem_fb_destroy,
-};
-
-int cirrus_framebuffer_init(struct drm_device *dev,
- struct drm_framebuffer *gfb,
- const struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_gem_object *obj)
-{
- int ret;
-
- drm_helper_mode_fill_fb_struct(dev, gfb, mode_cmd);
- gfb->obj[0] = obj;
- ret = drm_framebuffer_init(dev, gfb, &cirrus_fb_funcs);
- if (ret) {
- DRM_ERROR("drm_framebuffer_init failed: %d\n", ret);
- return ret;
- }
- return 0;
-}
-
-static struct drm_framebuffer *
-cirrus_user_framebuffer_create(struct drm_device *dev,
- struct drm_file *filp,
- const struct drm_mode_fb_cmd2 *mode_cmd)
-{
- struct cirrus_device *cdev = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_framebuffer *fb;
- u32 bpp;
- int ret;
-
- bpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0) * 8;
-
- if (!cirrus_check_framebuffer(cdev, mode_cmd->width, mode_cmd->height,
- bpp, mode_cmd->pitches[0]))
- return ERR_PTR(-EINVAL);
-
- obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
- if (obj == NULL)
- return ERR_PTR(-ENOENT);
-
- fb = kzalloc(sizeof(*fb), GFP_KERNEL);
- if (!fb) {
- drm_gem_object_put_unlocked(obj);
- return ERR_PTR(-ENOMEM);
- }
-
- ret = cirrus_framebuffer_init(dev, fb, mode_cmd, obj);
- if (ret) {
- drm_gem_object_put_unlocked(obj);
- kfree(fb);
- return ERR_PTR(ret);
- }
- return fb;
-}
-
-static const struct drm_mode_config_funcs cirrus_mode_funcs = {
- .fb_create = cirrus_user_framebuffer_create,
-};
-
-/* Unmap the framebuffer from the core and release the memory */
-static void cirrus_vram_fini(struct cirrus_device *cdev)
-{
- iounmap(cdev->rmmio);
- cdev->rmmio = NULL;
- if (cdev->mc.vram_base)
- release_mem_region(cdev->mc.vram_base, cdev->mc.vram_size);
-}
-
-/* Map the framebuffer from the card and configure the core */
-static int cirrus_vram_init(struct cirrus_device *cdev)
-{
- /* BAR 0 is VRAM */
- cdev->mc.vram_base = pci_resource_start(cdev->dev->pdev, 0);
- cdev->mc.vram_size = pci_resource_len(cdev->dev->pdev, 0);
-
- if (!request_mem_region(cdev->mc.vram_base, cdev->mc.vram_size,
- "cirrusdrmfb_vram")) {
- DRM_ERROR("can't reserve VRAM\n");
- return -ENXIO;
- }
-
- return 0;
-}
-
-/*
- * Our emulated hardware has two sets of memory. One is video RAM and can
- * simply be used as a linear framebuffer - the other provides mmio access
- * to the display registers. The latter can also be accessed via IO port
- * access, but we map the range and use mmio to program them instead
- */
-
-int cirrus_device_init(struct cirrus_device *cdev,
- struct drm_device *ddev,
- struct pci_dev *pdev, uint32_t flags)
-{
- int ret;
-
- cdev->dev = ddev;
- cdev->flags = flags;
-
- /* Hardcode the number of CRTCs to 1 */
- cdev->num_crtc = 1;
-
- /* BAR 0 is the framebuffer, BAR 1 contains registers */
- cdev->rmmio_base = pci_resource_start(cdev->dev->pdev, 1);
- cdev->rmmio_size = pci_resource_len(cdev->dev->pdev, 1);
-
- if (!request_mem_region(cdev->rmmio_base, cdev->rmmio_size,
- "cirrusdrmfb_mmio")) {
- DRM_ERROR("can't reserve mmio registers\n");
- return -ENOMEM;
- }
-
- cdev->rmmio = ioremap(cdev->rmmio_base, cdev->rmmio_size);
-
- if (cdev->rmmio == NULL)
- return -ENOMEM;
-
- ret = cirrus_vram_init(cdev);
- if (ret) {
- release_mem_region(cdev->rmmio_base, cdev->rmmio_size);
- return ret;
- }
-
- return 0;
-}
-
-void cirrus_device_fini(struct cirrus_device *cdev)
-{
- release_mem_region(cdev->rmmio_base, cdev->rmmio_size);
- cirrus_vram_fini(cdev);
-}
-
-/*
- * Functions here will be called by the core once it's bound the driver to
- * a PCI device
- */
-
-int cirrus_driver_load(struct drm_device *dev, unsigned long flags)
-{
- struct cirrus_device *cdev;
- int r;
-
- cdev = kzalloc(sizeof(struct cirrus_device), GFP_KERNEL);
- if (cdev == NULL)
- return -ENOMEM;
- dev->dev_private = (void *)cdev;
-
- r = cirrus_device_init(cdev, dev, dev->pdev, flags);
- if (r) {
- dev_err(&dev->pdev->dev, "Fatal error during GPU init: %d\n", r);
- goto out;
- }
-
- r = cirrus_mm_init(cdev);
- if (r) {
- dev_err(&dev->pdev->dev, "fatal err on mm init\n");
- goto out;
- }
-
- /*
- * cirrus_modeset_init() is initializing/registering the emulated fbdev
- * and DRM internals can access/test some of the fields in
- * mode_config->funcs as part of the fbdev registration process.
- * Make sure dev->mode_config.funcs is properly set to avoid
- * dereferencing a NULL pointer.
- * FIXME: mode_config.funcs assignment should probably be done in
- * cirrus_modeset_init() (that's a common pattern seen in other DRM
- * drivers).
- */
- dev->mode_config.funcs = &cirrus_mode_funcs;
- r = cirrus_modeset_init(cdev);
- if (r) {
- dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
- goto out;
- }
-
- return 0;
-out:
- cirrus_driver_unload(dev);
- return r;
-}
-
-void cirrus_driver_unload(struct drm_device *dev)
-{
- struct cirrus_device *cdev = dev->dev_private;
-
- if (cdev == NULL)
- return;
- cirrus_modeset_fini(cdev);
- cirrus_mm_fini(cdev);
- cirrus_device_fini(cdev);
- kfree(cdev);
- dev->dev_private = NULL;
-}
-
-int cirrus_gem_create(struct drm_device *dev,
- u32 size, bool iskernel,
- struct drm_gem_object **obj)
-{
- struct cirrus_bo *cirrusbo;
- int ret;
-
- *obj = NULL;
-
- size = roundup(size, PAGE_SIZE);
- if (size == 0)
- return -EINVAL;
-
- ret = cirrus_bo_create(dev, size, 0, 0, &cirrusbo);
- if (ret) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("failed to allocate GEM object\n");
- return ret;
- }
- *obj = &cirrusbo->gem;
- return 0;
-}
-
-int cirrus_dumb_create(struct drm_file *file,
- struct drm_device *dev,
- struct drm_mode_create_dumb *args)
-{
- int ret;
- struct drm_gem_object *gobj;
- u32 handle;
-
- args->pitch = args->width * ((args->bpp + 7) / 8);
- args->size = args->pitch * args->height;
-
- ret = cirrus_gem_create(dev, args->size, false,
- &gobj);
- if (ret)
- return ret;
-
- ret = drm_gem_handle_create(file, gobj, &handle);
- drm_gem_object_put_unlocked(gobj);
- if (ret)
- return ret;
-
- args->handle = handle;
- return 0;
-}
-
-static void cirrus_bo_unref(struct cirrus_bo **bo)
-{
- struct ttm_buffer_object *tbo;
-
- if ((*bo) == NULL)
- return;
-
- tbo = &((*bo)->bo);
- ttm_bo_put(tbo);
- *bo = NULL;
-}
-
-void cirrus_gem_free_object(struct drm_gem_object *obj)
-{
- struct cirrus_bo *cirrus_bo = gem_to_cirrus_bo(obj);
-
- cirrus_bo_unref(&cirrus_bo);
-}
-
-
-static inline u64 cirrus_bo_mmap_offset(struct cirrus_bo *bo)
-{
- return drm_vma_node_offset_addr(&bo->bo.vma_node);
-}
-
-int
-cirrus_dumb_mmap_offset(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle,
- uint64_t *offset)
-{
- struct drm_gem_object *obj;
- struct cirrus_bo *bo;
-
- obj = drm_gem_object_lookup(file, handle);
- if (obj == NULL)
- return -ENOENT;
-
- bo = gem_to_cirrus_bo(obj);
- *offset = cirrus_bo_mmap_offset(bo);
-
- drm_gem_object_put_unlocked(obj);
-
- return 0;
-}
-
-bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
- int bpp, int pitch)
-{
- const int max_pitch = 0x1FF << 3; /* (4096 - 1) & ~111b bytes */
- const int max_size = cdev->mc.vram_size;
-
- if (bpp > cirrus_bpp)
- return false;
- if (bpp > 32)
- return false;
-
- if (pitch > max_pitch)
- return false;
-
- if (pitch * height > max_size)
- return false;
-
- return true;
-}
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
deleted file mode 100644
index 7f9bc32af685..000000000000
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ /dev/null
@@ -1,621 +0,0 @@
-
-/*
- * Copyright 2012 Red Hat
- *
- * This file is subject to the terms and conditions of the GNU General
- * Public License version 2. See the file COPYING in the main
- * directory of this archive for more details.
- *
- * Authors: Matthew Garrett
- * Dave Airlie
- *
- * Portions of this code derived from cirrusfb.c:
- * drivers/video/cirrusfb.c - driver for Cirrus Logic chipsets
- *
- * Copyright 1999-2001 Jeff Garzik <jgarzik@pobox.com>
- */
-#include <drm/drmP.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_plane_helper.h>
-#include <drm/drm_probe_helper.h>
-
-#include <video/cirrus.h>
-
-#include "cirrus_drv.h"
-
-#define CIRRUS_LUT_SIZE 256
-
-#define PALETTE_INDEX 0x8
-#define PALETTE_DATA 0x9
-
-/*
- * This file contains setup code for the CRTC.
- */
-
-/*
- * The DRM core requires DPMS functions, but they make little sense in our
- * case and so are just stubs
- */
-
-static void cirrus_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
- struct drm_device *dev = crtc->dev;
- struct cirrus_device *cdev = dev->dev_private;
- u8 sr01, gr0e;
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- sr01 = 0x00;
- gr0e = 0x00;
- break;
- case DRM_MODE_DPMS_STANDBY:
- sr01 = 0x20;
- gr0e = 0x02;
- break;
- case DRM_MODE_DPMS_SUSPEND:
- sr01 = 0x20;
- gr0e = 0x04;
- break;
- case DRM_MODE_DPMS_OFF:
- sr01 = 0x20;
- gr0e = 0x06;
- break;
- default:
- return;
- }
-
- WREG8(SEQ_INDEX, 0x1);
- sr01 |= RREG8(SEQ_DATA) & ~0x20;
- WREG_SEQ(0x1, sr01);
-
- WREG8(GFX_INDEX, 0xe);
- gr0e |= RREG8(GFX_DATA) & ~0x06;
- WREG_GFX(0xe, gr0e);
-}
-
-static void cirrus_set_start_address(struct drm_crtc *crtc, unsigned offset)
-{
- struct cirrus_device *cdev = crtc->dev->dev_private;
- u32 addr;
- u8 tmp;
-
- addr = offset >> 2;
- WREG_CRT(0x0c, (u8)((addr >> 8) & 0xff));
- WREG_CRT(0x0d, (u8)(addr & 0xff));
-
- WREG8(CRT_INDEX, 0x1b);
- tmp = RREG8(CRT_DATA);
- tmp &= 0xf2;
- tmp |= (addr >> 16) & 0x01;
- tmp |= (addr >> 15) & 0x0c;
- WREG_CRT(0x1b, tmp);
- WREG8(CRT_INDEX, 0x1d);
- tmp = RREG8(CRT_DATA);
- tmp &= 0x7f;
- tmp |= (addr >> 12) & 0x80;
- WREG_CRT(0x1d, tmp);
-}
-
-/* cirrus is different - we will force move buffers out of VRAM */
-static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y, int atomic)
-{
- struct cirrus_device *cdev = crtc->dev->dev_private;
- struct cirrus_bo *bo;
- int ret;
- u64 gpu_addr;
-
- /* push the previous fb to system ram */
- if (!atomic && fb) {
- bo = gem_to_cirrus_bo(fb->obj[0]);
- ret = cirrus_bo_reserve(bo, false);
- if (ret)
- return ret;
- cirrus_bo_push_sysram(bo);
- cirrus_bo_unreserve(bo);
- }
-
- bo = gem_to_cirrus_bo(crtc->primary->fb->obj[0]);
-
- ret = cirrus_bo_reserve(bo, false);
- if (ret)
- return ret;
-
- ret = cirrus_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
- if (ret) {
- cirrus_bo_unreserve(bo);
- return ret;
- }
-
- if (cdev->mode_info.gfbdev->gfb == crtc->primary->fb) {
- /* if pushing console in kmap it */
- ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
- if (ret)
- DRM_ERROR("failed to kmap fbcon\n");
- }
- cirrus_bo_unreserve(bo);
-
- cirrus_set_start_address(crtc, (u32)gpu_addr);
- return 0;
-}
-
-static int cirrus_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
-{
- return cirrus_crtc_do_set_base(crtc, old_fb, x, y, 0);
-}
-
-/*
- * The meat of this driver. The core passes us a mode and we have to program
- * it. The modesetting here is the bare minimum required to satisfy the qemu
- * emulation of this hardware, and running this against a real device is
- * likely to result in an inadequately programmed mode. We've already had
- * the opportunity to modify the mode, so whatever we receive here should
- * be something that can be correctly programmed and displayed
- */
-static int cirrus_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y, struct drm_framebuffer *old_fb)
-{
- struct drm_device *dev = crtc->dev;
- struct cirrus_device *cdev = dev->dev_private;
- const struct drm_framebuffer *fb = crtc->primary->fb;
- int hsyncstart, hsyncend, htotal, hdispend;
- int vtotal, vdispend;
- int tmp;
- int sr07 = 0, hdr = 0;
-
- htotal = mode->htotal / 8;
- hsyncend = mode->hsync_end / 8;
- hsyncstart = mode->hsync_start / 8;
- hdispend = mode->hdisplay / 8;
-
- vtotal = mode->vtotal;
- vdispend = mode->vdisplay;
-
- vdispend -= 1;
- vtotal -= 2;
-
- htotal -= 5;
- hdispend -= 1;
- hsyncstart += 1;
- hsyncend += 1;
-
- WREG_CRT(VGA_CRTC_V_SYNC_END, 0x20);
- WREG_CRT(VGA_CRTC_H_TOTAL, htotal);
- WREG_CRT(VGA_CRTC_H_DISP, hdispend);
- WREG_CRT(VGA_CRTC_H_SYNC_START, hsyncstart);
- WREG_CRT(VGA_CRTC_H_SYNC_END, hsyncend);
- WREG_CRT(VGA_CRTC_V_TOTAL, vtotal & 0xff);
- WREG_CRT(VGA_CRTC_V_DISP_END, vdispend & 0xff);
-
- tmp = 0x40;
- if ((vdispend + 1) & 512)
- tmp |= 0x20;
- WREG_CRT(VGA_CRTC_MAX_SCAN, tmp);
-
- /*
- * Overflow bits for values that don't fit in the standard registers
- */
- tmp = 16;
- if (vtotal & 256)
- tmp |= 1;
- if (vdispend & 256)
- tmp |= 2;
- if ((vdispend + 1) & 256)
- tmp |= 8;
- if (vtotal & 512)
- tmp |= 32;
- if (vdispend & 512)
- tmp |= 64;
- WREG_CRT(VGA_CRTC_OVERFLOW, tmp);
-
- tmp = 0;
-
- /* More overflow bits */
-
- if ((htotal + 5) & 64)
- tmp |= 16;
- if ((htotal + 5) & 128)
- tmp |= 32;
- if (vtotal & 256)
- tmp |= 64;
- if (vtotal & 512)
- tmp |= 128;
-
- WREG_CRT(CL_CRT1A, tmp);
-
- /* Disable Hercules/CGA compatibility */
- WREG_CRT(VGA_CRTC_MODE, 0x03);
-
- WREG8(SEQ_INDEX, 0x7);
- sr07 = RREG8(SEQ_DATA);
- sr07 &= 0xe0;
- hdr = 0;
- switch (fb->format->cpp[0] * 8) {
- case 8:
- sr07 |= 0x11;
- break;
- case 16:
- sr07 |= 0x17;
- hdr = 0xc1;
- break;
- case 24:
- sr07 |= 0x15;
- hdr = 0xc5;
- break;
- case 32:
- sr07 |= 0x19;
- hdr = 0xc5;
- break;
- default:
- return -1;
- }
-
- WREG_SEQ(0x7, sr07);
-
- /* Program the pitch */
- tmp = fb->pitches[0] / 8;
- WREG_CRT(VGA_CRTC_OFFSET, tmp);
-
- /* Enable extended blanking and pitch bits, and enable full memory */
- tmp = 0x22;
- tmp |= (fb->pitches[0] >> 7) & 0x10;
- tmp |= (fb->pitches[0] >> 6) & 0x40;
- WREG_CRT(0x1b, tmp);
-
- /* Enable high-colour modes */
- WREG_GFX(VGA_GFX_MODE, 0x40);
-
- /* And set graphics mode */
- WREG_GFX(VGA_GFX_MISC, 0x01);
-
- WREG_HDR(hdr);
- cirrus_crtc_do_set_base(crtc, old_fb, x, y, 0);
-
- /* Unblank (needed on S3 resume, vgabios doesn't do it then) */
- outb(0x20, 0x3c0);
- return 0;
-}
-
-/*
- * This is called before a mode is programmed. A typical use might be to
- * enable DPMS during the programming to avoid seeing intermediate stages,
- * but that's not relevant to us
- */
-static void cirrus_crtc_prepare(struct drm_crtc *crtc)
-{
-}
-
-static void cirrus_crtc_load_lut(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct cirrus_device *cdev = dev->dev_private;
- u16 *r, *g, *b;
- int i;
-
- if (!crtc->enabled)
- return;
-
- r = crtc->gamma_store;
- g = r + crtc->gamma_size;
- b = g + crtc->gamma_size;
-
- for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
- /* VGA registers */
- WREG8(PALETTE_INDEX, i);
- WREG8(PALETTE_DATA, *r++ >> 8);
- WREG8(PALETTE_DATA, *g++ >> 8);
- WREG8(PALETTE_DATA, *b++ >> 8);
- }
-}
-
-/*
- * This is called after a mode is programmed. It should reverse anything done
- * by the prepare function
- */
-static void cirrus_crtc_commit(struct drm_crtc *crtc)
-{
- cirrus_crtc_load_lut(crtc);
-}
-
-/*
- * The core can pass us a set of gamma values to program. We actually only
- * use this for 8-bit mode so can't perform smooth fades on deeper modes,
- * but it's a requirement that we provide the function
- */
-static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t size,
- struct drm_modeset_acquire_ctx *ctx)
-{
- cirrus_crtc_load_lut(crtc);
-
- return 0;
-}
-
-/* Simple cleanup function */
-static void cirrus_crtc_destroy(struct drm_crtc *crtc)
-{
- struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
-
- drm_crtc_cleanup(crtc);
- kfree(cirrus_crtc);
-}
-
-/* These provide the minimum set of functions required to handle a CRTC */
-static const struct drm_crtc_funcs cirrus_crtc_funcs = {
- .gamma_set = cirrus_crtc_gamma_set,
- .set_config = drm_crtc_helper_set_config,
- .destroy = cirrus_crtc_destroy,
-};
-
-static const struct drm_crtc_helper_funcs cirrus_helper_funcs = {
- .dpms = cirrus_crtc_dpms,
- .mode_set = cirrus_crtc_mode_set,
- .mode_set_base = cirrus_crtc_mode_set_base,
- .prepare = cirrus_crtc_prepare,
- .commit = cirrus_crtc_commit,
-};
-
-/* CRTC setup */
-static const uint32_t cirrus_formats_16[] = {
- DRM_FORMAT_RGB565,
-};
-
-static const uint32_t cirrus_formats_24[] = {
- DRM_FORMAT_RGB888,
- DRM_FORMAT_RGB565,
-};
-
-static const uint32_t cirrus_formats_32[] = {
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_RGB565,
-};
-
-static struct drm_plane *cirrus_primary_plane(struct drm_device *dev)
-{
- const uint32_t *formats;
- uint32_t nformats;
- struct drm_plane *primary;
- int ret;
-
- switch (cirrus_bpp) {
- case 16:
- formats = cirrus_formats_16;
- nformats = ARRAY_SIZE(cirrus_formats_16);
- break;
- case 24:
- formats = cirrus_formats_24;
- nformats = ARRAY_SIZE(cirrus_formats_24);
- break;
- case 32:
- formats = cirrus_formats_32;
- nformats = ARRAY_SIZE(cirrus_formats_32);
- break;
- default:
- return NULL;
- }
-
- primary = kzalloc(sizeof(*primary), GFP_KERNEL);
- if (primary == NULL) {
- DRM_DEBUG_KMS("Failed to allocate primary plane\n");
- return NULL;
- }
-
- ret = drm_universal_plane_init(dev, primary, 0,
- &drm_primary_helper_funcs,
- formats, nformats,
- NULL,
- DRM_PLANE_TYPE_PRIMARY, NULL);
- if (ret) {
- kfree(primary);
- primary = NULL;
- }
-
- return primary;
-}
-
-static void cirrus_crtc_init(struct drm_device *dev)
-{
- struct cirrus_device *cdev = dev->dev_private;
- struct cirrus_crtc *cirrus_crtc;
- struct drm_plane *primary;
-
- cirrus_crtc = kzalloc(sizeof(struct cirrus_crtc) +
- (CIRRUSFB_CONN_LIMIT * sizeof(struct drm_connector *)),
- GFP_KERNEL);
-
- if (cirrus_crtc == NULL)
- return;
-
- primary = cirrus_primary_plane(dev);
- if (primary == NULL) {
- kfree(cirrus_crtc);
- return;
- }
-
- drm_crtc_init_with_planes(dev, &cirrus_crtc->base,
- primary, NULL,
- &cirrus_crtc_funcs, NULL);
-
- drm_mode_crtc_set_gamma_size(&cirrus_crtc->base, CIRRUS_LUT_SIZE);
- cdev->mode_info.crtc = cirrus_crtc;
-
- drm_crtc_helper_add(&cirrus_crtc->base, &cirrus_helper_funcs);
-}
-
-static void cirrus_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
-}
-
-static void cirrus_encoder_dpms(struct drm_encoder *encoder, int state)
-{
- return;
-}
-
-static void cirrus_encoder_prepare(struct drm_encoder *encoder)
-{
-}
-
-static void cirrus_encoder_commit(struct drm_encoder *encoder)
-{
-}
-
-static void cirrus_encoder_destroy(struct drm_encoder *encoder)
-{
- struct cirrus_encoder *cirrus_encoder = to_cirrus_encoder(encoder);
- drm_encoder_cleanup(encoder);
- kfree(cirrus_encoder);
-}
-
-static const struct drm_encoder_helper_funcs cirrus_encoder_helper_funcs = {
- .dpms = cirrus_encoder_dpms,
- .mode_set = cirrus_encoder_mode_set,
- .prepare = cirrus_encoder_prepare,
- .commit = cirrus_encoder_commit,
-};
-
-static const struct drm_encoder_funcs cirrus_encoder_encoder_funcs = {
- .destroy = cirrus_encoder_destroy,
-};
-
-static struct drm_encoder *cirrus_encoder_init(struct drm_device *dev)
-{
- struct drm_encoder *encoder;
- struct cirrus_encoder *cirrus_encoder;
-
- cirrus_encoder = kzalloc(sizeof(struct cirrus_encoder), GFP_KERNEL);
- if (!cirrus_encoder)
- return NULL;
-
- encoder = &cirrus_encoder->base;
- encoder->possible_crtcs = 0x1;
-
- drm_encoder_init(dev, encoder, &cirrus_encoder_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
- drm_encoder_helper_add(encoder, &cirrus_encoder_helper_funcs);
-
- return encoder;
-}
-
-
-static int cirrus_vga_get_modes(struct drm_connector *connector)
-{
- int count;
-
- /* Just add a static list of modes */
- if (cirrus_bpp <= 24) {
- count = drm_add_modes_noedid(connector, 1280, 1024);
- drm_set_preferred_mode(connector, 1024, 768);
- } else {
- count = drm_add_modes_noedid(connector, 800, 600);
- drm_set_preferred_mode(connector, 800, 600);
- }
- return count;
-}
-
-static struct drm_encoder *cirrus_connector_best_encoder(struct drm_connector
- *connector)
-{
- int enc_id = connector->encoder_ids[0];
- /* pick the encoder ids */
- if (enc_id)
- return drm_encoder_find(connector->dev, NULL, enc_id);
- return NULL;
-}
-
-static void cirrus_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_cleanup(connector);
- kfree(connector);
-}
-
-static const struct drm_connector_helper_funcs cirrus_vga_connector_helper_funcs = {
- .get_modes = cirrus_vga_get_modes,
- .best_encoder = cirrus_connector_best_encoder,
-};
-
-static const struct drm_connector_funcs cirrus_vga_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = cirrus_connector_destroy,
-};
-
-static struct drm_connector *cirrus_vga_init(struct drm_device *dev)
-{
- struct drm_connector *connector;
- struct cirrus_connector *cirrus_connector;
-
- cirrus_connector = kzalloc(sizeof(struct cirrus_connector), GFP_KERNEL);
- if (!cirrus_connector)
- return NULL;
-
- connector = &cirrus_connector->base;
-
- drm_connector_init(dev, connector,
- &cirrus_vga_connector_funcs, DRM_MODE_CONNECTOR_VGA);
-
- drm_connector_helper_add(connector, &cirrus_vga_connector_helper_funcs);
-
- drm_connector_register(connector);
- return connector;
-}
-
-
-int cirrus_modeset_init(struct cirrus_device *cdev)
-{
- struct drm_encoder *encoder;
- struct drm_connector *connector;
- int ret;
-
- drm_mode_config_init(cdev->dev);
- cdev->mode_info.mode_config_initialized = true;
-
- cdev->dev->mode_config.max_width = CIRRUS_MAX_FB_WIDTH;
- cdev->dev->mode_config.max_height = CIRRUS_MAX_FB_HEIGHT;
-
- cdev->dev->mode_config.fb_base = cdev->mc.vram_base;
- cdev->dev->mode_config.preferred_depth = cirrus_bpp;
- /* don't prefer a shadow on virt GPU */
- cdev->dev->mode_config.prefer_shadow = 0;
-
- cirrus_crtc_init(cdev->dev);
-
- encoder = cirrus_encoder_init(cdev->dev);
- if (!encoder) {
- DRM_ERROR("cirrus_encoder_init failed\n");
- return -1;
- }
-
- connector = cirrus_vga_init(cdev->dev);
- if (!connector) {
- DRM_ERROR("cirrus_vga_init failed\n");
- return -1;
- }
-
- drm_connector_attach_encoder(connector, encoder);
-
- ret = cirrus_fbdev_init(cdev);
- if (ret) {
- DRM_ERROR("cirrus_fbdev_init failed\n");
- return ret;
- }
-
- return 0;
-}
-
-void cirrus_modeset_fini(struct cirrus_device *cdev)
-{
- cirrus_fbdev_fini(cdev);
-
- if (cdev->mode_info.mode_config_initialized) {
- drm_mode_config_cleanup(cdev->dev);
- cdev->mode_info.mode_config_initialized = false;
- }
-}
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index e075810b4bd4..e6b98467a428 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -178,7 +178,6 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
ret = ttm_bo_device_init(&cirrus->ttm.bdev,
&cirrus_bo_driver,
dev->anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET,
true);
if (ret) {
DRM_ERROR("Error initialising bo driver; %d\n", ret);
@@ -331,13 +330,8 @@ int cirrus_bo_push_sysram(struct cirrus_bo *bo)
int cirrus_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct drm_file *file_priv;
- struct cirrus_device *cirrus;
+ struct drm_file *file_priv = filp->private_data;
+ struct cirrus_device *cirrus = file_priv->minor->dev->dev_private;
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
- return -EINVAL;
-
- file_priv = filp->private_data;
- cirrus = file_priv->minor->dev->dev_private;
return ttm_bo_mmap(filp, vma, &cirrus->ttm.bdev);
}
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 5eb40130fafb..f4924cb7f495 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -798,6 +798,50 @@ drm_atomic_get_private_obj_state(struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_atomic_get_private_obj_state);
/**
+ * drm_atomic_get_old_private_obj_state
+ * @state: global atomic state object
+ * @obj: private_obj to grab
+ *
+ * This function returns the old private object state for the given private_obj,
+ * or NULL if the private_obj is not part of the global atomic state.
+ */
+struct drm_private_state *
+drm_atomic_get_old_private_obj_state(struct drm_atomic_state *state,
+ struct drm_private_obj *obj)
+{
+ int i;
+
+ for (i = 0; i < state->num_private_objs; i++)
+ if (obj == state->private_objs[i].ptr)
+ return state->private_objs[i].old_state;
+
+ return NULL;
+}
+EXPORT_SYMBOL(drm_atomic_get_old_private_obj_state);
+
+/**
+ * drm_atomic_get_new_private_obj_state
+ * @state: global atomic state object
+ * @obj: private_obj to grab
+ *
+ * This function returns the new private object state for the given private_obj,
+ * or NULL if the private_obj is not part of the global atomic state.
+ */
+struct drm_private_state *
+drm_atomic_get_new_private_obj_state(struct drm_atomic_state *state,
+ struct drm_private_obj *obj)
+{
+ int i;
+
+ for (i = 0; i < state->num_private_objs; i++)
+ if (obj == state->private_objs[i].ptr)
+ return state->private_objs[i].new_state;
+
+ return NULL;
+}
+EXPORT_SYMBOL(drm_atomic_get_new_private_obj_state);
+
+/**
* drm_atomic_get_connector_state - get connector state
* @state: global atomic state object
* @connector: connector to get state object for
@@ -1236,4 +1280,3 @@ int drm_atomic_debugfs_init(struct drm_minor *minor)
minor->debugfs_root, minor);
}
#endif
-
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index fbb76332cc9f..2e0cb4246cbd 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -495,7 +495,7 @@ mode_fixup(struct drm_atomic_state *state)
static enum drm_mode_status mode_valid_path(struct drm_connector *connector,
struct drm_encoder *encoder,
struct drm_crtc *crtc,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
enum drm_mode_status ret;
@@ -534,7 +534,7 @@ mode_valid(struct drm_atomic_state *state)
struct drm_crtc *crtc = conn_state->crtc;
struct drm_crtc_state *crtc_state;
enum drm_mode_status mode_status;
- struct drm_display_mode *mode;
+ const struct drm_display_mode *mode;
if (!crtc || !encoder)
continue;
@@ -1751,7 +1751,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
*
* NOTE: Commit work has multiple phases, first hardware commit, then
* cleanup. We want them to overlap, hence need system_unbound_wq to
- * make sure work items don't artifically stall on each another.
+ * make sure work items don't artificially stall on each another.
*/
drm_atomic_state_get(state);
@@ -1785,7 +1785,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
*
* Asynchronous workers need to have sufficient parallelism to be able to run
* different atomic commits on different CRTCs in parallel. The simplest way to
- * achive this is by running them on the &system_unbound_wq work queue. Note
+ * achieve this is by running them on the &system_unbound_wq work queue. Note
* that drivers are not required to split up atomic commits and run an
* individual commit in parallel - userspace is supposed to do that if it cares.
* But it might be beneficial to do that for modesets, since those necessarily
@@ -2260,10 +2260,21 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
int drm_atomic_helper_prepare_planes(struct drm_device *dev,
struct drm_atomic_state *state)
{
+ struct drm_connector *connector;
+ struct drm_connector_state *new_conn_state;
struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
int ret, i, j;
+ for_each_new_connector_in_state(state, connector, new_conn_state, i) {
+ if (!new_conn_state->writeback_job)
+ continue;
+
+ ret = drm_writeback_prepare_job(new_conn_state->writeback_job);
+ if (ret < 0)
+ return ret;
+ }
+
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
const struct drm_plane_helper_funcs *funcs;
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index 4985384e51f6..59ffb6b9c745 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -30,6 +30,7 @@
#include <drm/drm_connector.h>
#include <drm/drm_atomic.h>
#include <drm/drm_device.h>
+#include <drm/drm_writeback.h>
#include <linux/slab.h>
#include <linux/dma-fence.h>
@@ -412,6 +413,9 @@ __drm_atomic_helper_connector_destroy_state(struct drm_connector_state *state)
if (state->commit)
drm_crtc_commit_put(state->commit);
+
+ if (state->writeback_job)
+ drm_writeback_cleanup_job(state->writeback_job);
}
EXPORT_SYMBOL(__drm_atomic_helper_connector_destroy_state);
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 0aabd401d3ca..428d82662dc4 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -512,8 +512,8 @@ drm_atomic_crtc_get_property(struct drm_crtc *crtc,
}
static int drm_atomic_plane_set_property(struct drm_plane *plane,
- struct drm_plane_state *state, struct drm_property *property,
- uint64_t val)
+ struct drm_plane_state *state, struct drm_file *file_priv,
+ struct drm_property *property, uint64_t val)
{
struct drm_device *dev = plane->dev;
struct drm_mode_config *config = &dev->mode_config;
@@ -521,7 +521,8 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
int ret;
if (property == config->prop_fb_id) {
- struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
+ struct drm_framebuffer *fb;
+ fb = drm_framebuffer_lookup(dev, file_priv, val);
drm_atomic_set_fb_for_plane(state, fb);
if (fb)
drm_framebuffer_put(fb);
@@ -537,7 +538,9 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
return -EINVAL;
} else if (property == config->prop_crtc_id) {
- struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val);
+ struct drm_crtc *crtc = drm_crtc_find(dev, file_priv, val);
+ if (val && !crtc)
+ return -EACCES;
return drm_atomic_set_crtc_for_plane(state, crtc);
} else if (property == config->prop_crtc_x) {
state->crtc_x = U642I64(val);
@@ -647,28 +650,15 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
return 0;
}
-static struct drm_writeback_job *
-drm_atomic_get_writeback_job(struct drm_connector_state *conn_state)
-{
- WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
-
- if (!conn_state->writeback_job)
- conn_state->writeback_job =
- kzalloc(sizeof(*conn_state->writeback_job), GFP_KERNEL);
-
- return conn_state->writeback_job;
-}
-
static int drm_atomic_set_writeback_fb_for_connector(
struct drm_connector_state *conn_state,
struct drm_framebuffer *fb)
{
- struct drm_writeback_job *job =
- drm_atomic_get_writeback_job(conn_state);
- if (!job)
- return -ENOMEM;
+ int ret;
- drm_framebuffer_assign(&job->fb, fb);
+ ret = drm_writeback_set_fb(conn_state, fb);
+ if (ret < 0)
+ return ret;
if (fb)
DRM_DEBUG_ATOMIC("Set [FB:%d] for connector state %p\n",
@@ -681,14 +671,16 @@ static int drm_atomic_set_writeback_fb_for_connector(
}
static int drm_atomic_connector_set_property(struct drm_connector *connector,
- struct drm_connector_state *state, struct drm_property *property,
- uint64_t val)
+ struct drm_connector_state *state, struct drm_file *file_priv,
+ struct drm_property *property, uint64_t val)
{
struct drm_device *dev = connector->dev;
struct drm_mode_config *config = &dev->mode_config;
if (property == config->prop_crtc_id) {
- struct drm_crtc *crtc = drm_crtc_find(dev, NULL, val);
+ struct drm_crtc *crtc = drm_crtc_find(dev, file_priv, val);
+ if (val && !crtc)
+ return -EACCES;
return drm_atomic_set_crtc_for_connector(state, crtc);
} else if (property == config->dpms_property) {
/* setting DPMS property requires special handling, which
@@ -746,9 +738,13 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
return -EINVAL;
}
state->content_protection = val;
+ } else if (property == connector->colorspace_property) {
+ state->colorspace = val;
} else if (property == config->writeback_fb_id_property) {
- struct drm_framebuffer *fb = drm_framebuffer_lookup(dev, NULL, val);
- int ret = drm_atomic_set_writeback_fb_for_connector(state, fb);
+ struct drm_framebuffer *fb;
+ int ret;
+ fb = drm_framebuffer_lookup(dev, file_priv, val);
+ ret = drm_atomic_set_writeback_fb_for_connector(state, fb);
if (fb)
drm_framebuffer_put(fb);
return ret;
@@ -814,6 +810,8 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
*val = state->picture_aspect_ratio;
} else if (property == config->content_type_property) {
*val = state->content_type;
+ } else if (property == connector->colorspace_property) {
+ *val = state->colorspace;
} else if (property == connector->scaling_mode_property) {
*val = state->scaling_mode;
} else if (property == connector->content_protection_property) {
@@ -943,6 +941,7 @@ out:
}
int drm_atomic_set_property(struct drm_atomic_state *state,
+ struct drm_file *file_priv,
struct drm_mode_object *obj,
struct drm_property *prop,
uint64_t prop_value)
@@ -965,7 +964,8 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
}
ret = drm_atomic_connector_set_property(connector,
- connector_state, prop, prop_value);
+ connector_state, file_priv,
+ prop, prop_value);
break;
}
case DRM_MODE_OBJECT_CRTC: {
@@ -993,7 +993,8 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
}
ret = drm_atomic_plane_set_property(plane,
- plane_state, prop, prop_value);
+ plane_state, file_priv,
+ prop, prop_value);
break;
}
default:
@@ -1158,19 +1159,17 @@ static int prepare_signaling(struct drm_device *dev,
for_each_new_connector_in_state(state, conn, conn_state, i) {
struct drm_writeback_connector *wb_conn;
- struct drm_writeback_job *job;
struct drm_out_fence_state *f;
struct dma_fence *fence;
s32 __user *fence_ptr;
+ if (!conn_state->writeback_job)
+ continue;
+
fence_ptr = get_out_fence_for_connector(state, conn);
if (!fence_ptr)
continue;
- job = drm_atomic_get_writeback_job(conn_state);
- if (!job)
- return -ENOMEM;
-
f = krealloc(*fence_state, sizeof(**fence_state) *
(*num_fences + 1), GFP_KERNEL);
if (!f)
@@ -1192,7 +1191,7 @@ static int prepare_signaling(struct drm_device *dev,
return ret;
}
- job->out_fence = fence;
+ conn_state->writeback_job->out_fence = fence;
}
/*
@@ -1365,8 +1364,8 @@ retry:
goto out;
}
- ret = drm_atomic_set_property(state, obj, prop,
- prop_value);
+ ret = drm_atomic_set_property(state, file_priv,
+ obj, prop, prop_value);
if (ret) {
drm_mode_object_put(obj);
goto out;
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 1669c42c40ed..22c7a104b802 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -103,14 +103,11 @@ struct drm_master *drm_master_create(struct drm_device *dev)
return NULL;
kref_init(&master->refcount);
- spin_lock_init(&master->lock.spinlock);
- init_waitqueue_head(&master->lock.lock_queue);
+ drm_master_legacy_init(master);
idr_init(&master->magic_map);
master->dev = dev;
/* initialize the tree of output resource lessees */
- master->lessor = NULL;
- master->lessee_id = 0;
INIT_LIST_HEAD(&master->lessees);
INIT_LIST_HEAD(&master->lessee_list);
idr_init(&master->leases);
@@ -274,21 +271,7 @@ void drm_master_release(struct drm_file *file_priv)
if (!drm_is_current_master(file_priv))
goto out;
- if (drm_core_check_feature(dev, DRIVER_LEGACY)) {
- /*
- * Since the master is disappearing, so is the
- * possibility to lock.
- */
- mutex_lock(&dev->struct_mutex);
- if (master->lock.hw_lock) {
- if (dev->sigdata.lock == master->lock.hw_lock)
- dev->sigdata.lock = NULL;
- master->lock.hw_lock = NULL;
- master->lock.file_priv = NULL;
- wake_up_interruptible_all(&master->lock.lock_queue);
- }
- mutex_unlock(&dev->struct_mutex);
- }
+ drm_legacy_lock_master_cleanup(dev, master);
if (dev->master == file_priv->master)
drm_drop_master(dev, file_priv);
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index e407adb033e7..bfc419ed9d6c 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -584,6 +584,14 @@ void drm_legacy_master_rmmaps(struct drm_device *dev, struct drm_master *master)
mutex_unlock(&dev->struct_mutex);
}
+void drm_legacy_rmmaps(struct drm_device *dev)
+{
+ struct drm_map_list *r_list, *list_temp;
+
+ list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
+ drm_legacy_rmmap(dev, r_list->map);
+}
+
/* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
* the last close of the device, and this is necessary for cleanup when things
* exit uncleanly. Therefore, having userland manually remove mappings seems
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index 9b2bd28dde0a..f20d1dda3961 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -69,7 +69,8 @@ EXPORT_SYMBOL(drm_client_close);
* @name: Client name
* @funcs: DRM client functions (optional)
*
- * This initialises the client and opens a &drm_file. Use drm_client_add() to complete the process.
+ * This initialises the client and opens a &drm_file.
+ * Use drm_client_register() to complete the process.
* The caller needs to hold a reference on @dev before calling this function.
* The client is freed when the &drm_device is unregistered. See drm_client_release().
*
@@ -108,16 +109,16 @@ err_put_module:
EXPORT_SYMBOL(drm_client_init);
/**
- * drm_client_add - Add client to the device list
+ * drm_client_register - Register client
* @client: DRM client
*
* Add the client to the &drm_device client list to activate its callbacks.
* @client must be initialized by a call to drm_client_init(). After
- * drm_client_add() it is no longer permissible to call drm_client_release()
+ * drm_client_register() it is no longer permissible to call drm_client_release()
* directly (outside the unregister callback), instead cleanup will happen
* automatically on driver unload.
*/
-void drm_client_add(struct drm_client_dev *client)
+void drm_client_register(struct drm_client_dev *client)
{
struct drm_device *dev = client->dev;
@@ -125,7 +126,7 @@ void drm_client_add(struct drm_client_dev *client)
list_add(&client->list, &dev->clientlist);
mutex_unlock(&dev->clientlist_mutex);
}
-EXPORT_SYMBOL(drm_client_add);
+EXPORT_SYMBOL(drm_client_register);
/**
* drm_client_release - Release DRM client resources
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index dd40eff0911c..b34c3d38bf15 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -245,6 +245,7 @@ int drm_connector_init(struct drm_device *dev,
INIT_LIST_HEAD(&connector->modes);
mutex_init(&connector->mutex);
connector->edid_blob_ptr = NULL;
+ connector->tile_blob_ptr = NULL;
connector->status = connector_status_unknown;
connector->display_info.panel_orientation =
DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
@@ -272,6 +273,9 @@ int drm_connector_init(struct drm_device *dev,
drm_object_attach_property(&connector->base,
config->non_desktop_property,
0);
+ drm_object_attach_property(&connector->base,
+ config->tile_property,
+ 0);
if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
drm_object_attach_property(&connector->base, config->prop_crtc_id, 0);
@@ -826,6 +830,33 @@ static struct drm_prop_enum_list drm_cp_enum_list[] = {
};
DRM_ENUM_NAME_FN(drm_get_content_protection_name, drm_cp_enum_list)
+static const struct drm_prop_enum_list hdmi_colorspaces[] = {
+ /* For Default case, driver will set the colorspace */
+ { DRM_MODE_COLORIMETRY_DEFAULT, "Default" },
+ /* Standard Definition Colorimetry based on CEA 861 */
+ { DRM_MODE_COLORIMETRY_SMPTE_170M_YCC, "SMPTE_170M_YCC" },
+ { DRM_MODE_COLORIMETRY_BT709_YCC, "BT709_YCC" },
+ /* Standard Definition Colorimetry based on IEC 61966-2-4 */
+ { DRM_MODE_COLORIMETRY_XVYCC_601, "XVYCC_601" },
+ /* High Definition Colorimetry based on IEC 61966-2-4 */
+ { DRM_MODE_COLORIMETRY_XVYCC_709, "XVYCC_709" },
+ /* Colorimetry based on IEC 61966-2-1/Amendment 1 */
+ { DRM_MODE_COLORIMETRY_SYCC_601, "SYCC_601" },
+ /* Colorimetry based on IEC 61966-2-5 [33] */
+ { DRM_MODE_COLORIMETRY_OPYCC_601, "opYCC_601" },
+ /* Colorimetry based on IEC 61966-2-5 */
+ { DRM_MODE_COLORIMETRY_OPRGB, "opRGB" },
+ /* Colorimetry based on ITU-R BT.2020 */
+ { DRM_MODE_COLORIMETRY_BT2020_CYCC, "BT2020_CYCC" },
+ /* Colorimetry based on ITU-R BT.2020 */
+ { DRM_MODE_COLORIMETRY_BT2020_RGB, "BT2020_RGB" },
+ /* Colorimetry based on ITU-R BT.2020 */
+ { DRM_MODE_COLORIMETRY_BT2020_YCC, "BT2020_YCC" },
+ /* Added as part of Additional Colorimetry Extension in 861.G */
+ { DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65, "DCI-P3_RGB_D65" },
+ { DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER, "DCI-P3_RGB_Theater" },
+};
+
/**
* DOC: standard connector properties
*
@@ -1385,12 +1416,6 @@ EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
*
* The driver may place further restrictions within these minimum
* and maximum bounds.
- *
- * The semantics for the vertical blank timestamp differ when
- * variable refresh rate is active. The vertical blank timestamp
- * is defined to be an estimate using the current mode's fixed
- * refresh rate timings. The semantics for the page-flip event
- * timestamp remain the same.
*/
/**
@@ -1548,6 +1573,57 @@ int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property);
/**
+ * DOC: standard connector properties
+ *
+ * Colorspace:
+ * drm_mode_create_colorspace_property - create colorspace property
+ * This property helps select a suitable colorspace based on the sink
+ * capability. Modern sink devices support wider gamut like BT2020.
+ * This helps switch to BT2020 mode if the BT2020 encoded video stream
+ * is being played by the user, same for any other colorspace. Thereby
+ * giving a good visual experience to users.
+ *
+ * The expectation from userspace is that it should parse the EDID
+ * and get supported colorspaces. Use this property and switch to the
+ * one supported. Sink supported colorspaces should be retrieved by
+ * userspace from EDID and driver will not explicitly expose them.
+ *
+ * Basically the expectation from userspace is:
+ * - Set up CRTC DEGAMMA/CTM/GAMMA to convert to some sink
+ * colorspace
+ * - Set this new property to let the sink know what it
+ * converted the CRTC output to.
+ * - This property is just to inform sink what colorspace
+ * source is trying to drive.
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_colorspace_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_property *prop;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
+ prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
+ "Colorspace",
+ hdmi_colorspaces,
+ ARRAY_SIZE(hdmi_colorspaces));
+ if (!prop)
+ return -ENOMEM;
+ } else {
+ DRM_DEBUG_KMS("Colorspace property not supported\n");
+ return 0;
+ }
+
+ connector->colorspace_property = prop;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_colorspace_property);
+
+/**
* drm_mode_create_content_type_property - create content type property
* @dev: DRM device
*
@@ -1634,6 +1710,8 @@ EXPORT_SYMBOL(drm_connector_set_path_property);
* This looks up the tile information for a connector, and creates a
* property for userspace to parse if it exists. The property is of
* the form of 8 integers using ':' as a separator.
+ * This is used for dual port tiled displays with DisplayPort SST
+ * or DisplayPort MST connectors.
*
* Returns:
* Zero on success, errno on failure.
@@ -1677,6 +1755,9 @@ EXPORT_SYMBOL(drm_connector_set_tile_property);
*
* This function creates a new blob modeset object and assigns its id to the
* connector's edid property.
+ * Since we also parse tile information from EDID's displayID block, we also
+ * set the connector's tile property here. See drm_connector_set_tile_property()
+ * for more details.
*
* Returns:
* Zero on success, negative errno on failure.
@@ -1718,7 +1799,9 @@ int drm_connector_update_edid_property(struct drm_connector *connector,
edid,
&connector->base,
dev->mode_config.edid_property);
- return ret;
+ if (ret)
+ return ret;
+ return drm_connector_set_tile_property(connector);
}
EXPORT_SYMBOL(drm_connector_update_edid_property);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 7dabbaf033a1..790ba5941954 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -559,6 +559,10 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
plane = crtc->primary;
+ /* allow disabling with the primary plane leased */
+ if (crtc_req->mode_valid && !drm_lease_held(file_priv, plane->base.id))
+ return -EACCES;
+
mutex_lock(&crtc->dev->mode_config.mutex);
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx,
DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret);
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index 216f2a9ee3d4..0719a235d6cc 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -214,6 +214,7 @@ int drm_atomic_connector_commit_dpms(struct drm_atomic_state *state,
struct drm_connector *connector,
int mode);
int drm_atomic_set_property(struct drm_atomic_state *state,
+ struct drm_file *file_priv,
struct drm_mode_object *obj,
struct drm_property *prop,
uint64_t prop_value);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index dc7ac0c60547..c630ed157994 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -3022,7 +3022,6 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
edid = drm_edid_duplicate(port->cached_edid);
else {
edid = drm_get_edid(connector, &port->aux.ddc);
- drm_connector_set_tile_property(connector);
}
port->has_audio = drm_detect_monitor_audio(edid);
drm_dp_mst_topology_put_port(port);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 05bbc2b622fc..862621494a93 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -286,6 +286,138 @@ void drm_minor_release(struct drm_minor *minor)
* Note that the lifetime rules for &drm_device instance has still a lot of
* historical baggage. Hence use the reference counting provided by
* drm_dev_get() and drm_dev_put() only carefully.
+ *
+ * Display driver example
+ * ~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * The following example shows a typical structure of a DRM display driver.
+ * The example focus on the probe() function and the other functions that is
+ * almost always present and serves as a demonstration of devm_drm_dev_init()
+ * usage with its accompanying drm_driver->release callback.
+ *
+ * .. code-block:: c
+ *
+ * struct driver_device {
+ * struct drm_device drm;
+ * void *userspace_facing;
+ * struct clk *pclk;
+ * };
+ *
+ * static void driver_drm_release(struct drm_device *drm)
+ * {
+ * struct driver_device *priv = container_of(...);
+ *
+ * drm_mode_config_cleanup(drm);
+ * drm_dev_fini(drm);
+ * kfree(priv->userspace_facing);
+ * kfree(priv);
+ * }
+ *
+ * static struct drm_driver driver_drm_driver = {
+ * [...]
+ * .release = driver_drm_release,
+ * };
+ *
+ * static int driver_probe(struct platform_device *pdev)
+ * {
+ * struct driver_device *priv;
+ * struct drm_device *drm;
+ * int ret;
+ *
+ * [
+ * devm_kzalloc() can't be used here because the drm_device
+ * lifetime can exceed the device lifetime if driver unbind
+ * happens when userspace still has open file descriptors.
+ * ]
+ * priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ * if (!priv)
+ * return -ENOMEM;
+ *
+ * drm = &priv->drm;
+ *
+ * ret = devm_drm_dev_init(&pdev->dev, drm, &driver_drm_driver);
+ * if (ret) {
+ * kfree(drm);
+ * return ret;
+ * }
+ *
+ * drm_mode_config_init(drm);
+ *
+ * priv->userspace_facing = kzalloc(..., GFP_KERNEL);
+ * if (!priv->userspace_facing)
+ * return -ENOMEM;
+ *
+ * priv->pclk = devm_clk_get(dev, "PCLK");
+ * if (IS_ERR(priv->pclk))
+ * return PTR_ERR(priv->pclk);
+ *
+ * [ Further setup, display pipeline etc ]
+ *
+ * platform_set_drvdata(pdev, drm);
+ *
+ * drm_mode_config_reset(drm);
+ *
+ * ret = drm_dev_register(drm);
+ * if (ret)
+ * return ret;
+ *
+ * drm_fbdev_generic_setup(drm, 32);
+ *
+ * return 0;
+ * }
+ *
+ * [ This function is called before the devm_ resources are released ]
+ * static int driver_remove(struct platform_device *pdev)
+ * {
+ * struct drm_device *drm = platform_get_drvdata(pdev);
+ *
+ * drm_dev_unregister(drm);
+ * drm_atomic_helper_shutdown(drm)
+ *
+ * return 0;
+ * }
+ *
+ * [ This function is called on kernel restart and shutdown ]
+ * static void driver_shutdown(struct platform_device *pdev)
+ * {
+ * drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
+ * }
+ *
+ * static int __maybe_unused driver_pm_suspend(struct device *dev)
+ * {
+ * return drm_mode_config_helper_suspend(dev_get_drvdata(dev));
+ * }
+ *
+ * static int __maybe_unused driver_pm_resume(struct device *dev)
+ * {
+ * drm_mode_config_helper_resume(dev_get_drvdata(dev));
+ *
+ * return 0;
+ * }
+ *
+ * static const struct dev_pm_ops driver_pm_ops = {
+ * SET_SYSTEM_SLEEP_PM_OPS(driver_pm_suspend, driver_pm_resume)
+ * };
+ *
+ * static struct platform_driver driver_driver = {
+ * .driver = {
+ * [...]
+ * .pm = &driver_pm_ops,
+ * },
+ * .probe = driver_probe,
+ * .remove = driver_remove,
+ * .shutdown = driver_shutdown,
+ * };
+ * module_platform_driver(driver_driver);
+ *
+ * Drivers that want to support device unplugging (USB, DT overlay unload) should
+ * use drm_dev_unplug() instead of drm_dev_unregister(). The driver must protect
+ * regions that is accessing device resources to prevent use after they're
+ * released. This is done using drm_dev_enter() and drm_dev_exit(). There is one
+ * shortcoming however, drm_dev_unplug() marks the drm_device as unplugged before
+ * drm_atomic_helper_shutdown() is called. This means that if the disable code
+ * paths are protected, they will not run on regular driver module unload,
+ * possibily leaving the hardware enabled.
*/
/**
@@ -376,7 +508,6 @@ void drm_dev_unplug(struct drm_device *dev)
synchronize_srcu(&drm_unplug_srcu);
drm_dev_unregister(dev);
- drm_dev_put(dev);
}
EXPORT_SYMBOL(drm_dev_unplug);
@@ -453,6 +584,31 @@ static void drm_fs_inode_free(struct inode *inode)
}
/**
+ * DOC: component helper usage recommendations
+ *
+ * DRM drivers that drive hardware where a logical device consists of a pile of
+ * independent hardware blocks are recommended to use the :ref:`component helper
+ * library<component>`. For consistency and better options for code reuse the
+ * following guidelines apply:
+ *
+ * - The entire device initialization procedure should be run from the
+ * &component_master_ops.master_bind callback, starting with drm_dev_init(),
+ * then binding all components with component_bind_all() and finishing with
+ * drm_dev_register().
+ *
+ * - The opaque pointer passed to all components through component_bind_all()
+ * should point at &struct drm_device of the device instance, not some driver
+ * specific private structure.
+ *
+ * - The component helper fills the niche where further standardization of
+ * interfaces is not practical. When there already is, or will be, a
+ * standardized interface like &drm_bridge or &drm_panel, providing its own
+ * functions to find such components at driver load time, like
+ * drm_of_find_panel_or_bridge(), then the component helper should not be
+ * used.
+ */
+
+/**
* drm_dev_init - Initialise new DRM device
* @dev: DRM device
* @driver: DRM driver
@@ -497,26 +653,22 @@ int drm_dev_init(struct drm_device *dev,
BUG_ON(!parent);
kref_init(&dev->ref);
- dev->dev = parent;
+ dev->dev = get_device(parent);
dev->driver = driver;
/* no per-device feature limits by default */
dev->driver_features = ~0u;
+ drm_legacy_init_members(dev);
INIT_LIST_HEAD(&dev->filelist);
INIT_LIST_HEAD(&dev->filelist_internal);
INIT_LIST_HEAD(&dev->clientlist);
- INIT_LIST_HEAD(&dev->ctxlist);
- INIT_LIST_HEAD(&dev->vmalist);
- INIT_LIST_HEAD(&dev->maplist);
INIT_LIST_HEAD(&dev->vblank_event_list);
- spin_lock_init(&dev->buf_lock);
spin_lock_init(&dev->event_lock);
mutex_init(&dev->struct_mutex);
mutex_init(&dev->filelist_mutex);
mutex_init(&dev->clientlist_mutex);
- mutex_init(&dev->ctxlist_mutex);
mutex_init(&dev->master_mutex);
dev->anon_inode = drm_fs_inode_new();
@@ -536,7 +688,7 @@ int drm_dev_init(struct drm_device *dev,
if (ret)
goto err_minors;
- ret = drm_ht_create(&dev->map_hash, 12);
+ ret = drm_legacy_create_map_hash(dev);
if (ret)
goto err_minors;
@@ -561,21 +713,61 @@ err_setunique:
drm_gem_destroy(dev);
err_ctxbitmap:
drm_legacy_ctxbitmap_cleanup(dev);
- drm_ht_remove(&dev->map_hash);
+ drm_legacy_remove_map_hash(dev);
err_minors:
drm_minor_free(dev, DRM_MINOR_PRIMARY);
drm_minor_free(dev, DRM_MINOR_RENDER);
drm_fs_inode_free(dev->anon_inode);
err_free:
+ put_device(dev->dev);
mutex_destroy(&dev->master_mutex);
- mutex_destroy(&dev->ctxlist_mutex);
mutex_destroy(&dev->clientlist_mutex);
mutex_destroy(&dev->filelist_mutex);
mutex_destroy(&dev->struct_mutex);
+ drm_legacy_destroy_members(dev);
return ret;
}
EXPORT_SYMBOL(drm_dev_init);
+static void devm_drm_dev_init_release(void *data)
+{
+ drm_dev_put(data);
+}
+
+/**
+ * devm_drm_dev_init - Resource managed drm_dev_init()
+ * @parent: Parent device object
+ * @dev: DRM device
+ * @driver: DRM driver
+ *
+ * Managed drm_dev_init(). The DRM device initialized with this function is
+ * automatically put on driver detach using drm_dev_put(). You must supply a
+ * &drm_driver.release callback to control the finalization explicitly.
+ *
+ * RETURNS:
+ * 0 on success, or error code on failure.
+ */
+int devm_drm_dev_init(struct device *parent,
+ struct drm_device *dev,
+ struct drm_driver *driver)
+{
+ int ret;
+
+ if (WARN_ON(!parent || !driver->release))
+ return -EINVAL;
+
+ ret = drm_dev_init(dev, driver, parent);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action(parent, devm_drm_dev_init_release, dev);
+ if (ret)
+ devm_drm_dev_init_release(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL(devm_drm_dev_init);
+
/**
* drm_dev_fini - Finalize a dead DRM device
* @dev: DRM device
@@ -596,17 +788,19 @@ void drm_dev_fini(struct drm_device *dev)
drm_gem_destroy(dev);
drm_legacy_ctxbitmap_cleanup(dev);
- drm_ht_remove(&dev->map_hash);
+ drm_legacy_remove_map_hash(dev);
drm_fs_inode_free(dev->anon_inode);
drm_minor_free(dev, DRM_MINOR_PRIMARY);
drm_minor_free(dev, DRM_MINOR_RENDER);
+ put_device(dev->dev);
+
mutex_destroy(&dev->master_mutex);
- mutex_destroy(&dev->ctxlist_mutex);
mutex_destroy(&dev->clientlist_mutex);
mutex_destroy(&dev->filelist_mutex);
mutex_destroy(&dev->struct_mutex);
+ drm_legacy_destroy_members(dev);
kfree(dev->unique);
}
EXPORT_SYMBOL(drm_dev_fini);
@@ -840,8 +1034,6 @@ EXPORT_SYMBOL(drm_dev_register);
*/
void drm_dev_unregister(struct drm_device *dev)
{
- struct drm_map_list *r_list, *list_temp;
-
if (drm_core_check_feature(dev, DRIVER_LEGACY))
drm_lastclose(dev);
@@ -858,8 +1050,7 @@ void drm_dev_unregister(struct drm_device *dev)
if (dev->agp)
drm_pci_agp_destroy(dev);
- list_for_each_entry_safe(r_list, list_temp, &dev->maplist, head)
- drm_legacy_rmmap(dev, r_list->map);
+ drm_legacy_rmmaps(dev);
remove_compat_control_link(dev);
drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c
index bce99f95c1a3..77f4e5ae4197 100644
--- a/drivers/gpu/drm/drm_dsc.c
+++ b/drivers/gpu/drm/drm_dsc.c
@@ -11,6 +11,7 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/byteorder/generic.h>
+#include <drm/drm_print.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_dsc.h>
@@ -31,75 +32,74 @@
/**
* drm_dsc_dp_pps_header_init() - Initializes the PPS Header
* for DisplayPort as per the DP 1.4 spec.
- * @pps_sdp: Secondary data packet for DSC Picture Parameter Set
- * as defined in &struct drm_dsc_pps_infoframe
+ * @pps_header: Secondary data packet header for DSC Picture
+ * Parameter Set as defined in &struct dp_sdp_header
*
* DP 1.4 spec defines the secondary data packet for sending the
* picture parameter infoframes from the source to the sink.
- * This function populates the pps header defined in
- * &struct drm_dsc_pps_infoframe as per the header bytes defined
- * in &struct dp_sdp_header.
+ * This function populates the SDP header defined in
+ * &struct dp_sdp_header.
*/
-void drm_dsc_dp_pps_header_init(struct drm_dsc_pps_infoframe *pps_sdp)
+void drm_dsc_dp_pps_header_init(struct dp_sdp_header *pps_header)
{
- memset(&pps_sdp->pps_header, 0, sizeof(pps_sdp->pps_header));
+ memset(pps_header, 0, sizeof(*pps_header));
- pps_sdp->pps_header.HB1 = DP_SDP_PPS;
- pps_sdp->pps_header.HB2 = DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1;
+ pps_header->HB1 = DP_SDP_PPS;
+ pps_header->HB2 = DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1;
}
EXPORT_SYMBOL(drm_dsc_dp_pps_header_init);
/**
- * drm_dsc_pps_infoframe_pack() - Populates the DSC PPS infoframe
+ * drm_dsc_pps_payload_pack() - Populates the DSC PPS
*
- * @pps_sdp:
- * Secondary data packet for DSC Picture Parameter Set. This is defined
- * by &struct drm_dsc_pps_infoframe
+ * @pps_payload:
+ * Bitwise struct for DSC Picture Parameter Set. This is defined
+ * by &struct drm_dsc_picture_parameter_set
* @dsc_cfg:
* DSC Configuration data filled by driver as defined by
* &struct drm_dsc_config
*
- * DSC source device sends a secondary data packet filled with all the
- * picture parameter set (PPS) information required by the sink to decode
- * the compressed frame. Driver populates the dsC PPS infoframe using the DSC
- * configuration parameters in the order expected by the DSC Display Sink
- * device. For the DSC, the sink device expects the PPS payload in the big
- * endian format for the fields that span more than 1 byte.
+ * DSC source device sends a picture parameter set (PPS) containing the
+ * information required by the sink to decode the compressed frame. Driver
+ * populates the DSC PPS struct using the DSC configuration parameters in
+ * the order expected by the DSC Display Sink device. For the DSC, the sink
+ * device expects the PPS payload in big endian format for fields
+ * that span more than 1 byte.
*/
-void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp,
+void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_payload,
const struct drm_dsc_config *dsc_cfg)
{
int i;
/* Protect against someone accidently changing struct size */
- BUILD_BUG_ON(sizeof(pps_sdp->pps_payload) !=
+ BUILD_BUG_ON(sizeof(*pps_payload) !=
DP_SDP_PPS_HEADER_PAYLOAD_BYTES_MINUS_1 + 1);
- memset(&pps_sdp->pps_payload, 0, sizeof(pps_sdp->pps_payload));
+ memset(pps_payload, 0, sizeof(*pps_payload));
/* PPS 0 */
- pps_sdp->pps_payload.dsc_version =
+ pps_payload->dsc_version =
dsc_cfg->dsc_version_minor |
dsc_cfg->dsc_version_major << DSC_PPS_VERSION_MAJOR_SHIFT;
/* PPS 1, 2 is 0 */
/* PPS 3 */
- pps_sdp->pps_payload.pps_3 =
+ pps_payload->pps_3 =
dsc_cfg->line_buf_depth |
dsc_cfg->bits_per_component << DSC_PPS_BPC_SHIFT;
/* PPS 4 */
- pps_sdp->pps_payload.pps_4 =
+ pps_payload->pps_4 =
((dsc_cfg->bits_per_pixel & DSC_PPS_BPP_HIGH_MASK) >>
DSC_PPS_MSB_SHIFT) |
dsc_cfg->vbr_enable << DSC_PPS_VBR_EN_SHIFT |
- dsc_cfg->enable422 << DSC_PPS_SIMPLE422_SHIFT |
+ dsc_cfg->simple_422 << DSC_PPS_SIMPLE422_SHIFT |
dsc_cfg->convert_rgb << DSC_PPS_CONVERT_RGB_SHIFT |
dsc_cfg->block_pred_enable << DSC_PPS_BLOCK_PRED_EN_SHIFT;
/* PPS 5 */
- pps_sdp->pps_payload.bits_per_pixel_low =
+ pps_payload->bits_per_pixel_low =
(dsc_cfg->bits_per_pixel & DSC_PPS_LSB_MASK);
/*
@@ -110,103 +110,103 @@ void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp,
*/
/* PPS 6, 7 */
- pps_sdp->pps_payload.pic_height = cpu_to_be16(dsc_cfg->pic_height);
+ pps_payload->pic_height = cpu_to_be16(dsc_cfg->pic_height);
/* PPS 8, 9 */
- pps_sdp->pps_payload.pic_width = cpu_to_be16(dsc_cfg->pic_width);
+ pps_payload->pic_width = cpu_to_be16(dsc_cfg->pic_width);
/* PPS 10, 11 */
- pps_sdp->pps_payload.slice_height = cpu_to_be16(dsc_cfg->slice_height);
+ pps_payload->slice_height = cpu_to_be16(dsc_cfg->slice_height);
/* PPS 12, 13 */
- pps_sdp->pps_payload.slice_width = cpu_to_be16(dsc_cfg->slice_width);
+ pps_payload->slice_width = cpu_to_be16(dsc_cfg->slice_width);
/* PPS 14, 15 */
- pps_sdp->pps_payload.chunk_size = cpu_to_be16(dsc_cfg->slice_chunk_size);
+ pps_payload->chunk_size = cpu_to_be16(dsc_cfg->slice_chunk_size);
/* PPS 16 */
- pps_sdp->pps_payload.initial_xmit_delay_high =
+ pps_payload->initial_xmit_delay_high =
((dsc_cfg->initial_xmit_delay &
DSC_PPS_INIT_XMIT_DELAY_HIGH_MASK) >>
DSC_PPS_MSB_SHIFT);
/* PPS 17 */
- pps_sdp->pps_payload.initial_xmit_delay_low =
+ pps_payload->initial_xmit_delay_low =
(dsc_cfg->initial_xmit_delay & DSC_PPS_LSB_MASK);
/* PPS 18, 19 */
- pps_sdp->pps_payload.initial_dec_delay =
+ pps_payload->initial_dec_delay =
cpu_to_be16(dsc_cfg->initial_dec_delay);
/* PPS 20 is 0 */
/* PPS 21 */
- pps_sdp->pps_payload.initial_scale_value =
+ pps_payload->initial_scale_value =
dsc_cfg->initial_scale_value;
/* PPS 22, 23 */
- pps_sdp->pps_payload.scale_increment_interval =
+ pps_payload->scale_increment_interval =
cpu_to_be16(dsc_cfg->scale_increment_interval);
/* PPS 24 */
- pps_sdp->pps_payload.scale_decrement_interval_high =
+ pps_payload->scale_decrement_interval_high =
((dsc_cfg->scale_decrement_interval &
DSC_PPS_SCALE_DEC_INT_HIGH_MASK) >>
DSC_PPS_MSB_SHIFT);
/* PPS 25 */
- pps_sdp->pps_payload.scale_decrement_interval_low =
+ pps_payload->scale_decrement_interval_low =
(dsc_cfg->scale_decrement_interval & DSC_PPS_LSB_MASK);
/* PPS 26[7:0], PPS 27[7:5] RESERVED */
/* PPS 27 */
- pps_sdp->pps_payload.first_line_bpg_offset =
+ pps_payload->first_line_bpg_offset =
dsc_cfg->first_line_bpg_offset;
/* PPS 28, 29 */
- pps_sdp->pps_payload.nfl_bpg_offset =
+ pps_payload->nfl_bpg_offset =
cpu_to_be16(dsc_cfg->nfl_bpg_offset);
/* PPS 30, 31 */
- pps_sdp->pps_payload.slice_bpg_offset =
+ pps_payload->slice_bpg_offset =
cpu_to_be16(dsc_cfg->slice_bpg_offset);
/* PPS 32, 33 */
- pps_sdp->pps_payload.initial_offset =
+ pps_payload->initial_offset =
cpu_to_be16(dsc_cfg->initial_offset);
/* PPS 34, 35 */
- pps_sdp->pps_payload.final_offset = cpu_to_be16(dsc_cfg->final_offset);
+ pps_payload->final_offset = cpu_to_be16(dsc_cfg->final_offset);
/* PPS 36 */
- pps_sdp->pps_payload.flatness_min_qp = dsc_cfg->flatness_min_qp;
+ pps_payload->flatness_min_qp = dsc_cfg->flatness_min_qp;
/* PPS 37 */
- pps_sdp->pps_payload.flatness_max_qp = dsc_cfg->flatness_max_qp;
+ pps_payload->flatness_max_qp = dsc_cfg->flatness_max_qp;
/* PPS 38, 39 */
- pps_sdp->pps_payload.rc_model_size =
+ pps_payload->rc_model_size =
cpu_to_be16(DSC_RC_MODEL_SIZE_CONST);
/* PPS 40 */
- pps_sdp->pps_payload.rc_edge_factor = DSC_RC_EDGE_FACTOR_CONST;
+ pps_payload->rc_edge_factor = DSC_RC_EDGE_FACTOR_CONST;
/* PPS 41 */
- pps_sdp->pps_payload.rc_quant_incr_limit0 =
+ pps_payload->rc_quant_incr_limit0 =
dsc_cfg->rc_quant_incr_limit0;
/* PPS 42 */
- pps_sdp->pps_payload.rc_quant_incr_limit1 =
+ pps_payload->rc_quant_incr_limit1 =
dsc_cfg->rc_quant_incr_limit1;
/* PPS 43 */
- pps_sdp->pps_payload.rc_tgt_offset = DSC_RC_TGT_OFFSET_LO_CONST |
+ pps_payload->rc_tgt_offset = DSC_RC_TGT_OFFSET_LO_CONST |
DSC_RC_TGT_OFFSET_HI_CONST << DSC_PPS_RC_TGT_OFFSET_HI_SHIFT;
/* PPS 44 - 57 */
for (i = 0; i < DSC_NUM_BUF_RANGES - 1; i++)
- pps_sdp->pps_payload.rc_buf_thresh[i] =
+ pps_payload->rc_buf_thresh[i] =
dsc_cfg->rc_buf_thresh[i];
/* PPS 58 - 87 */
@@ -215,32 +215,181 @@ void drm_dsc_pps_infoframe_pack(struct drm_dsc_pps_infoframe *pps_sdp,
* are as follows: Min_qp[15:11], max_qp[10:6], offset[5:0]
*/
for (i = 0; i < DSC_NUM_BUF_RANGES; i++) {
- pps_sdp->pps_payload.rc_range_parameters[i] =
+ pps_payload->rc_range_parameters[i] =
((dsc_cfg->rc_range_params[i].range_min_qp <<
DSC_PPS_RC_RANGE_MINQP_SHIFT) |
(dsc_cfg->rc_range_params[i].range_max_qp <<
DSC_PPS_RC_RANGE_MAXQP_SHIFT) |
(dsc_cfg->rc_range_params[i].range_bpg_offset));
- pps_sdp->pps_payload.rc_range_parameters[i] =
- cpu_to_be16(pps_sdp->pps_payload.rc_range_parameters[i]);
+ pps_payload->rc_range_parameters[i] =
+ cpu_to_be16(pps_payload->rc_range_parameters[i]);
}
/* PPS 88 */
- pps_sdp->pps_payload.native_422_420 = dsc_cfg->native_422 |
+ pps_payload->native_422_420 = dsc_cfg->native_422 |
dsc_cfg->native_420 << DSC_PPS_NATIVE_420_SHIFT;
/* PPS 89 */
- pps_sdp->pps_payload.second_line_bpg_offset =
+ pps_payload->second_line_bpg_offset =
dsc_cfg->second_line_bpg_offset;
/* PPS 90, 91 */
- pps_sdp->pps_payload.nsl_bpg_offset =
+ pps_payload->nsl_bpg_offset =
cpu_to_be16(dsc_cfg->nsl_bpg_offset);
/* PPS 92, 93 */
- pps_sdp->pps_payload.second_line_offset_adj =
+ pps_payload->second_line_offset_adj =
cpu_to_be16(dsc_cfg->second_line_offset_adj);
/* PPS 94 - 127 are O */
}
-EXPORT_SYMBOL(drm_dsc_pps_infoframe_pack);
+EXPORT_SYMBOL(drm_dsc_pps_payload_pack);
+
+/**
+ * drm_dsc_compute_rc_parameters() - Write rate control
+ * parameters to the dsc configuration defined in
+ * &struct drm_dsc_config in accordance with the DSC 1.2
+ * specification. Some configuration fields must be present
+ * beforehand.
+ *
+ * @vdsc_cfg:
+ * DSC Configuration data partially filled by driver
+ */
+int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg)
+{
+ unsigned long groups_per_line = 0;
+ unsigned long groups_total = 0;
+ unsigned long num_extra_mux_bits = 0;
+ unsigned long slice_bits = 0;
+ unsigned long hrd_delay = 0;
+ unsigned long final_scale = 0;
+ unsigned long rbs_min = 0;
+
+ if (vdsc_cfg->native_420 || vdsc_cfg->native_422) {
+ /* Number of groups used to code each line of a slice */
+ groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width / 2,
+ DSC_RC_PIXELS_PER_GROUP);
+
+ /* chunksize in Bytes */
+ vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width / 2 *
+ vdsc_cfg->bits_per_pixel,
+ (8 * 16));
+ } else {
+ /* Number of groups used to code each line of a slice */
+ groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width,
+ DSC_RC_PIXELS_PER_GROUP);
+
+ /* chunksize in Bytes */
+ vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width *
+ vdsc_cfg->bits_per_pixel,
+ (8 * 16));
+ }
+
+ if (vdsc_cfg->convert_rgb)
+ num_extra_mux_bits = 3 * (vdsc_cfg->mux_word_size +
+ (4 * vdsc_cfg->bits_per_component + 4)
+ - 2);
+ else if (vdsc_cfg->native_422)
+ num_extra_mux_bits = 4 * vdsc_cfg->mux_word_size +
+ (4 * vdsc_cfg->bits_per_component + 4) +
+ 3 * (4 * vdsc_cfg->bits_per_component) - 2;
+ else
+ num_extra_mux_bits = 3 * vdsc_cfg->mux_word_size +
+ (4 * vdsc_cfg->bits_per_component + 4) +
+ 2 * (4 * vdsc_cfg->bits_per_component) - 2;
+ /* Number of bits in one Slice */
+ slice_bits = 8 * vdsc_cfg->slice_chunk_size * vdsc_cfg->slice_height;
+
+ while ((num_extra_mux_bits > 0) &&
+ ((slice_bits - num_extra_mux_bits) % vdsc_cfg->mux_word_size))
+ num_extra_mux_bits--;
+
+ if (groups_per_line < vdsc_cfg->initial_scale_value - 8)
+ vdsc_cfg->initial_scale_value = groups_per_line + 8;
+
+ /* scale_decrement_interval calculation according to DSC spec 1.11 */
+ if (vdsc_cfg->initial_scale_value > 8)
+ vdsc_cfg->scale_decrement_interval = groups_per_line /
+ (vdsc_cfg->initial_scale_value - 8);
+ else
+ vdsc_cfg->scale_decrement_interval = DSC_SCALE_DECREMENT_INTERVAL_MAX;
+
+ vdsc_cfg->final_offset = vdsc_cfg->rc_model_size -
+ (vdsc_cfg->initial_xmit_delay *
+ vdsc_cfg->bits_per_pixel + 8) / 16 + num_extra_mux_bits;
+
+ if (vdsc_cfg->final_offset >= vdsc_cfg->rc_model_size) {
+ DRM_DEBUG_KMS("FinalOfs < RcModelSze for this InitialXmitDelay\n");
+ return -ERANGE;
+ }
+
+ final_scale = (vdsc_cfg->rc_model_size * 8) /
+ (vdsc_cfg->rc_model_size - vdsc_cfg->final_offset);
+ if (vdsc_cfg->slice_height > 1)
+ /*
+ * NflBpgOffset is 16 bit value with 11 fractional bits
+ * hence we multiply by 2^11 for preserving the
+ * fractional part
+ */
+ vdsc_cfg->nfl_bpg_offset = DIV_ROUND_UP((vdsc_cfg->first_line_bpg_offset << 11),
+ (vdsc_cfg->slice_height - 1));
+ else
+ vdsc_cfg->nfl_bpg_offset = 0;
+
+ /* 2^16 - 1 */
+ if (vdsc_cfg->nfl_bpg_offset > 65535) {
+ DRM_DEBUG_KMS("NflBpgOffset is too large for this slice height\n");
+ return -ERANGE;
+ }
+
+ /* Number of groups used to code the entire slice */
+ groups_total = groups_per_line * vdsc_cfg->slice_height;
+
+ /* slice_bpg_offset is 16 bit value with 11 fractional bits */
+ vdsc_cfg->slice_bpg_offset = DIV_ROUND_UP(((vdsc_cfg->rc_model_size -
+ vdsc_cfg->initial_offset +
+ num_extra_mux_bits) << 11),
+ groups_total);
+
+ if (final_scale > 9) {
+ /*
+ * ScaleIncrementInterval =
+ * finaloffset/((NflBpgOffset + SliceBpgOffset)*8(finalscale - 1.125))
+ * as (NflBpgOffset + SliceBpgOffset) has 11 bit fractional value,
+ * we need divide by 2^11 from pstDscCfg values
+ */
+ vdsc_cfg->scale_increment_interval =
+ (vdsc_cfg->final_offset * (1 << 11)) /
+ ((vdsc_cfg->nfl_bpg_offset +
+ vdsc_cfg->slice_bpg_offset) *
+ (final_scale - 9));
+ } else {
+ /*
+ * If finalScaleValue is less than or equal to 9, a value of 0 should
+ * be used to disable the scale increment at the end of the slice
+ */
+ vdsc_cfg->scale_increment_interval = 0;
+ }
+
+ if (vdsc_cfg->scale_increment_interval > 65535) {
+ DRM_DEBUG_KMS("ScaleIncrementInterval is large for slice height\n");
+ return -ERANGE;
+ }
+
+ /*
+ * DSC spec mentions that bits_per_pixel specifies the target
+ * bits/pixel (bpp) rate that is used by the encoder,
+ * in steps of 1/16 of a bit per pixel
+ */
+ rbs_min = vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset +
+ DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay *
+ vdsc_cfg->bits_per_pixel, 16) +
+ groups_per_line * vdsc_cfg->first_line_bpg_offset;
+
+ hrd_delay = DIV_ROUND_UP((rbs_min * 16), vdsc_cfg->bits_per_pixel);
+ vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16;
+ vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dsc_compute_rc_parameters);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 990b1909f9d7..649cfd8b4200 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -68,8 +68,6 @@
* maximum size and use that.
*/
#define EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE (1 << 4)
-/* Monitor forgot to set the first detailed is preferred bit. */
-#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
/* use +hsync +vsync for detailed mode */
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
/* Force reduced-blanking timings for detailed modes */
@@ -107,8 +105,6 @@ static const struct edid_quirk {
{ "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
/* Acer F51 */
{ "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 },
- /* Unknown Acer */
- { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
{ "AEO", 0, EDID_QUIRK_FORCE_6BPC },
@@ -145,12 +141,6 @@ static const struct edid_quirk {
{ "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
{ "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
- /* Philips 107p5 CRT */
- { "PHL", 57364, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
-
- /* Proview AY765C */
- { "PTS", 765, EDID_QUIRK_FIRST_DETAILED_PREFERRED },
-
/* Samsung SyncMaster 205BW. Note: irony */
{ "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP },
/* Samsung SyncMaster 22[5-6]BW */
@@ -172,6 +162,25 @@ static const struct edid_quirk {
/* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
{ "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
+ /* Valve Index Headset */
+ { "VLV", 0x91a8, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91b0, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91b1, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91b2, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91b3, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91b4, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91b5, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91b6, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91b7, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91b8, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91b9, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91ba, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91bb, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91bc, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91bd, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91be, EDID_QUIRK_NON_DESKTOP },
+ { "VLV", 0x91bf, EDID_QUIRK_NON_DESKTOP },
+
/* HTC Vive and Vive Pro VR Headsets */
{ "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
{ "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP },
@@ -193,6 +202,12 @@ static const struct edid_quirk {
/* Sony PlayStation VR Headset */
{ "SNY", 0x0704, EDID_QUIRK_NON_DESKTOP },
+
+ /* Sensics VR Headsets */
+ { "SEN", 0x1019, EDID_QUIRK_NON_DESKTOP },
+
+ /* OSVR HDK and HDK2 VR Headsets */
+ { "SVR", 0x1019, EDID_QUIRK_NON_DESKTOP },
};
/*
@@ -4924,6 +4939,76 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
}
EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
+/* HDMI Colorspace Spec Definitions */
+#define FULL_COLORIMETRY_MASK 0x1FF
+#define NORMAL_COLORIMETRY_MASK 0x3
+#define EXTENDED_COLORIMETRY_MASK 0x7
+#define EXTENDED_ACE_COLORIMETRY_MASK 0xF
+
+#define C(x) ((x) << 0)
+#define EC(x) ((x) << 2)
+#define ACE(x) ((x) << 5)
+
+#define HDMI_COLORIMETRY_NO_DATA 0x0
+#define HDMI_COLORIMETRY_SMPTE_170M_YCC (C(1) | EC(0) | ACE(0))
+#define HDMI_COLORIMETRY_BT709_YCC (C(2) | EC(0) | ACE(0))
+#define HDMI_COLORIMETRY_XVYCC_601 (C(3) | EC(0) | ACE(0))
+#define HDMI_COLORIMETRY_XVYCC_709 (C(3) | EC(1) | ACE(0))
+#define HDMI_COLORIMETRY_SYCC_601 (C(3) | EC(2) | ACE(0))
+#define HDMI_COLORIMETRY_OPYCC_601 (C(3) | EC(3) | ACE(0))
+#define HDMI_COLORIMETRY_OPRGB (C(3) | EC(4) | ACE(0))
+#define HDMI_COLORIMETRY_BT2020_CYCC (C(3) | EC(5) | ACE(0))
+#define HDMI_COLORIMETRY_BT2020_RGB (C(3) | EC(6) | ACE(0))
+#define HDMI_COLORIMETRY_BT2020_YCC (C(3) | EC(6) | ACE(0))
+#define HDMI_COLORIMETRY_DCI_P3_RGB_D65 (C(3) | EC(7) | ACE(0))
+#define HDMI_COLORIMETRY_DCI_P3_RGB_THEATER (C(3) | EC(7) | ACE(1))
+
+static const u32 hdmi_colorimetry_val[] = {
+ [DRM_MODE_COLORIMETRY_NO_DATA] = HDMI_COLORIMETRY_NO_DATA,
+ [DRM_MODE_COLORIMETRY_SMPTE_170M_YCC] = HDMI_COLORIMETRY_SMPTE_170M_YCC,
+ [DRM_MODE_COLORIMETRY_BT709_YCC] = HDMI_COLORIMETRY_BT709_YCC,
+ [DRM_MODE_COLORIMETRY_XVYCC_601] = HDMI_COLORIMETRY_XVYCC_601,
+ [DRM_MODE_COLORIMETRY_XVYCC_709] = HDMI_COLORIMETRY_XVYCC_709,
+ [DRM_MODE_COLORIMETRY_SYCC_601] = HDMI_COLORIMETRY_SYCC_601,
+ [DRM_MODE_COLORIMETRY_OPYCC_601] = HDMI_COLORIMETRY_OPYCC_601,
+ [DRM_MODE_COLORIMETRY_OPRGB] = HDMI_COLORIMETRY_OPRGB,
+ [DRM_MODE_COLORIMETRY_BT2020_CYCC] = HDMI_COLORIMETRY_BT2020_CYCC,
+ [DRM_MODE_COLORIMETRY_BT2020_RGB] = HDMI_COLORIMETRY_BT2020_RGB,
+ [DRM_MODE_COLORIMETRY_BT2020_YCC] = HDMI_COLORIMETRY_BT2020_YCC,
+};
+
+#undef C
+#undef EC
+#undef ACE
+
+/**
+ * drm_hdmi_avi_infoframe_colorspace() - fill the HDMI AVI infoframe
+ * colorspace information
+ * @frame: HDMI AVI infoframe
+ * @conn_state: connector state
+ */
+void
+drm_hdmi_avi_infoframe_colorspace(struct hdmi_avi_infoframe *frame,
+ const struct drm_connector_state *conn_state)
+{
+ u32 colorimetry_val;
+ u32 colorimetry_index = conn_state->colorspace & FULL_COLORIMETRY_MASK;
+
+ if (colorimetry_index >= ARRAY_SIZE(hdmi_colorimetry_val))
+ colorimetry_val = HDMI_COLORIMETRY_NO_DATA;
+ else
+ colorimetry_val = hdmi_colorimetry_val[colorimetry_index];
+
+ frame->colorimetry = colorimetry_val & NORMAL_COLORIMETRY_MASK;
+ /*
+ * ToDo: Extend it for ACE formats as well. Modify the infoframe
+ * structure and extend it in drivers/video/hdmi
+ */
+ frame->extended_colorimetry = (colorimetry_val >> 2) &
+ EXTENDED_COLORIMETRY_MASK;
+}
+EXPORT_SYMBOL(drm_hdmi_avi_infoframe_colorspace);
+
/**
* drm_hdmi_avi_infoframe_quant_range() - fill the HDMI AVI infoframe
* quantization range information
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index af2ab640cadb..498f95c3e81d 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -639,20 +639,19 @@ static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
static void dpms_legacy(struct drm_fb_helper *fb_helper, int dpms_mode)
{
struct drm_device *dev = fb_helper->dev;
- struct drm_crtc *crtc;
struct drm_connector *connector;
+ struct drm_mode_set *modeset;
int i, j;
drm_modeset_lock_all(dev);
for (i = 0; i < fb_helper->crtc_count; i++) {
- crtc = fb_helper->crtc_info[i].mode_set.crtc;
+ modeset = &fb_helper->crtc_info[i].mode_set;
- if (!crtc->enabled)
+ if (!modeset->crtc->enabled)
continue;
- /* Walk the connectors & encoders on this fb turning them on/off */
- drm_fb_helper_for_each_connector(fb_helper, j) {
- connector = fb_helper->connector_info[j]->connector;
+ for (j = 0; j < modeset->num_connectors; j++) {
+ connector = modeset->connectors[j];
connector->funcs->dpms(connector, dpms_mode);
drm_object_property_set_value(&connector->base,
dev->mode_config.dpms_property, dpms_mode);
@@ -934,6 +933,7 @@ struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper)
}
fb_helper->fbdev = info;
+ info->skip_vt_switch = true;
return info;
@@ -1873,7 +1873,6 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
int crtc_count = 0;
int i;
struct drm_fb_helper_surface_size sizes;
- int gamma_size = 0;
int best_depth = 0;
memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
@@ -1889,7 +1888,6 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
if (preferred_bpp != sizes.surface_bpp)
sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
- /* first up get a count of crtcs now in use and new min/maxes width/heights */
drm_fb_helper_for_each_connector(fb_helper, i) {
struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
struct drm_cmdline_mode *cmdline_mode;
@@ -1969,6 +1967,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
sizes.surface_depth = best_depth;
}
+ /* first up get a count of crtcs now in use and new min/maxes width/heights */
crtc_count = 0;
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_display_mode *desired_mode;
@@ -1991,9 +1990,6 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
x = fb_helper->crtc_info[i].x;
y = fb_helper->crtc_info[i].y;
- if (gamma_size == 0)
- gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
-
sizes.surface_width = max_t(u32, desired_mode->hdisplay + x, sizes.surface_width);
sizes.surface_height = max_t(u32, desired_mode->vdisplay + y, sizes.surface_height);
@@ -2036,21 +2032,8 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
return 0;
}
-/**
- * drm_fb_helper_fill_fix - initializes fixed fbdev information
- * @info: fbdev registered by the helper
- * @pitch: desired pitch
- * @depth: desired depth
- *
- * Helper to fill in the fixed fbdev information useful for a non-accelerated
- * fbdev emulations. Drivers which support acceleration methods which impose
- * additional constraints need to set up their own limits.
- *
- * Drivers should call this (or their equivalent setup code) from their
- * &drm_fb_helper_funcs.fb_probe callback.
- */
-void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
- uint32_t depth)
+static void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
+ uint32_t depth)
{
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
@@ -2065,24 +2048,10 @@ void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
info->fix.line_length = pitch;
}
-EXPORT_SYMBOL(drm_fb_helper_fill_fix);
-/**
- * drm_fb_helper_fill_var - initalizes variable fbdev information
- * @info: fbdev instance to set up
- * @fb_helper: fb helper instance to use as template
- * @fb_width: desired fb width
- * @fb_height: desired fb height
- *
- * Sets up the variable fbdev metainformation from the given fb helper instance
- * and the drm framebuffer allocated in &drm_fb_helper.fb.
- *
- * Drivers should call this (or their equivalent setup code) from their
- * &drm_fb_helper_funcs.fb_probe callback after having allocated the fbdev
- * backing storage framebuffer.
- */
-void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
- uint32_t fb_width, uint32_t fb_height)
+static void drm_fb_helper_fill_var(struct fb_info *info,
+ struct drm_fb_helper *fb_helper,
+ uint32_t fb_width, uint32_t fb_height)
{
struct drm_framebuffer *fb = fb_helper->fb;
@@ -2102,7 +2071,36 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe
info->var.xres = fb_width;
info->var.yres = fb_height;
}
-EXPORT_SYMBOL(drm_fb_helper_fill_var);
+
+/**
+ * drm_fb_helper_fill_info - initializes fbdev information
+ * @info: fbdev instance to set up
+ * @fb_helper: fb helper instance to use as template
+ * @sizes: describes fbdev size and scanout surface size
+ *
+ * Sets up the variable and fixed fbdev metainformation from the given fb helper
+ * instance and the drm framebuffer allocated in &drm_fb_helper.fb.
+ *
+ * Drivers should call this (or their equivalent setup code) from their
+ * &drm_fb_helper_funcs.fb_probe callback after having allocated the fbdev
+ * backing storage framebuffer.
+ */
+void drm_fb_helper_fill_info(struct fb_info *info,
+ struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_framebuffer *fb = fb_helper->fb;
+
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
+ drm_fb_helper_fill_var(info, fb_helper,
+ sizes->fb_width, sizes->fb_height);
+
+ info->par = fb_helper;
+ snprintf(info->fix.id, sizeof(info->fix.id), "%sdrmfb",
+ fb_helper->dev->driver->name);
+
+}
+EXPORT_SYMBOL(drm_fb_helper_fill_info);
static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
uint32_t maxX,
@@ -2561,6 +2559,195 @@ static void drm_setup_crtc_rotation(struct drm_fb_helper *fb_helper,
fb_helper->sw_rotations |= DRM_MODE_ROTATE_0;
}
+static struct drm_fb_helper_crtc *
+drm_fb_helper_crtc(struct drm_fb_helper *fb_helper, struct drm_crtc *crtc)
+{
+ int i;
+
+ for (i = 0; i < fb_helper->crtc_count; i++)
+ if (fb_helper->crtc_info[i].mode_set.crtc == crtc)
+ return &fb_helper->crtc_info[i];
+
+ return NULL;
+}
+
+/* Try to read the BIOS display configuration and use it for the initial config */
+static bool drm_fb_helper_firmware_config(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_crtc **crtcs,
+ struct drm_display_mode **modes,
+ struct drm_fb_offset *offsets,
+ bool *enabled, int width, int height)
+{
+ struct drm_device *dev = fb_helper->dev;
+ unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
+ unsigned long conn_configured, conn_seq, mask;
+ int i, j;
+ bool *save_enabled;
+ bool fallback = true, ret = true;
+ int num_connectors_enabled = 0;
+ int num_connectors_detected = 0;
+ struct drm_modeset_acquire_ctx ctx;
+
+ if (!drm_drv_uses_atomic_modeset(dev))
+ return false;
+
+ save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
+ if (!save_enabled)
+ return false;
+
+ drm_modeset_acquire_init(&ctx, 0);
+
+ while (drm_modeset_lock_all_ctx(dev, &ctx) != 0)
+ drm_modeset_backoff(&ctx);
+
+ memcpy(save_enabled, enabled, count);
+ mask = GENMASK(count - 1, 0);
+ conn_configured = 0;
+retry:
+ conn_seq = conn_configured;
+ for (i = 0; i < count; i++) {
+ struct drm_fb_helper_connector *fb_conn;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+ struct drm_fb_helper_crtc *new_crtc;
+
+ fb_conn = fb_helper->connector_info[i];
+ connector = fb_conn->connector;
+
+ if (conn_configured & BIT(i))
+ continue;
+
+ if (conn_seq == 0 && !connector->has_tile)
+ continue;
+
+ if (connector->status == connector_status_connected)
+ num_connectors_detected++;
+
+ if (!enabled[i]) {
+ DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
+ connector->name);
+ conn_configured |= BIT(i);
+ continue;
+ }
+
+ if (connector->force == DRM_FORCE_OFF) {
+ DRM_DEBUG_KMS("connector %s is disabled by user, skipping\n",
+ connector->name);
+ enabled[i] = false;
+ continue;
+ }
+
+ encoder = connector->state->best_encoder;
+ if (!encoder || WARN_ON(!connector->state->crtc)) {
+ if (connector->force > DRM_FORCE_OFF)
+ goto bail;
+
+ DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
+ connector->name);
+ enabled[i] = false;
+ conn_configured |= BIT(i);
+ continue;
+ }
+
+ num_connectors_enabled++;
+
+ new_crtc = drm_fb_helper_crtc(fb_helper, connector->state->crtc);
+
+ /*
+ * Make sure we're not trying to drive multiple connectors
+ * with a single CRTC, since our cloning support may not
+ * match the BIOS.
+ */
+ for (j = 0; j < count; j++) {
+ if (crtcs[j] == new_crtc) {
+ DRM_DEBUG_KMS("fallback: cloned configuration\n");
+ goto bail;
+ }
+ }
+
+ DRM_DEBUG_KMS("looking for cmdline mode on connector %s\n",
+ connector->name);
+
+ /* go for command line mode first */
+ modes[i] = drm_pick_cmdline_mode(fb_conn);
+
+ /* try for preferred next */
+ if (!modes[i]) {
+ DRM_DEBUG_KMS("looking for preferred mode on connector %s %d\n",
+ connector->name, connector->has_tile);
+ modes[i] = drm_has_preferred_mode(fb_conn, width,
+ height);
+ }
+
+ /* No preferred mode marked by the EDID? Are there any modes? */
+ if (!modes[i] && !list_empty(&connector->modes)) {
+ DRM_DEBUG_KMS("using first mode listed on connector %s\n",
+ connector->name);
+ modes[i] = list_first_entry(&connector->modes,
+ struct drm_display_mode,
+ head);
+ }
+
+ /* last resort: use current mode */
+ if (!modes[i]) {
+ /*
+ * IMPORTANT: We want to use the adjusted mode (i.e.
+ * after the panel fitter upscaling) as the initial
+ * config, not the input mode, which is what crtc->mode
+ * usually contains. But since our current
+ * code puts a mode derived from the post-pfit timings
+ * into crtc->mode this works out correctly.
+ *
+ * This is crtc->mode and not crtc->state->mode for the
+ * fastboot check to work correctly.
+ */
+ DRM_DEBUG_KMS("looking for current mode on connector %s\n",
+ connector->name);
+ modes[i] = &connector->state->crtc->mode;
+ }
+ crtcs[i] = new_crtc;
+
+ DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n",
+ connector->name,
+ connector->state->crtc->base.id,
+ connector->state->crtc->name,
+ modes[i]->hdisplay, modes[i]->vdisplay,
+ modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" : "");
+
+ fallback = false;
+ conn_configured |= BIT(i);
+ }
+
+ if ((conn_configured & mask) != mask && conn_configured != conn_seq)
+ goto retry;
+
+ /*
+ * If the BIOS didn't enable everything it could, fall back to have the
+ * same user experiencing of lighting up as much as possible like the
+ * fbdev helper library.
+ */
+ if (num_connectors_enabled != num_connectors_detected &&
+ num_connectors_enabled < dev->mode_config.num_crtc) {
+ DRM_DEBUG_KMS("fallback: Not all outputs enabled\n");
+ DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled,
+ num_connectors_detected);
+ fallback = true;
+ }
+
+ if (fallback) {
+bail:
+ DRM_DEBUG_KMS("Not using firmware configuration\n");
+ memcpy(enabled, save_enabled, count);
+ ret = false;
+ }
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ kfree(save_enabled);
+ return ret;
+}
+
static void drm_setup_crtcs(struct drm_fb_helper *fb_helper,
u32 width, u32 height)
{
@@ -2593,10 +2780,8 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper,
DRM_DEBUG_KMS("No connectors reported connected with modes\n");
drm_enable_connectors(fb_helper, enabled);
- if (!(fb_helper->funcs->initial_config &&
- fb_helper->funcs->initial_config(fb_helper, crtcs, modes,
- offsets,
- enabled, width, height))) {
+ if (!drm_fb_helper_firmware_config(fb_helper, crtcs, modes, offsets,
+ enabled, width, height)) {
memset(modes, 0, fb_helper->connector_count*sizeof(modes[0]));
memset(crtcs, 0, fb_helper->connector_count*sizeof(crtcs[0]));
memset(offsets, 0, fb_helper->connector_count*sizeof(offsets[0]));
@@ -2780,9 +2965,8 @@ __drm_fb_helper_initial_config_and_unlock(struct drm_fb_helper *fb_helper,
*
* This function will call down into the &drm_fb_helper_funcs.fb_probe callback
* to let the driver allocate and initialize the fbdev info structure and the
- * drm framebuffer used to back the fbdev. drm_fb_helper_fill_var() and
- * drm_fb_helper_fill_fix() are provided as helpers to setup simple default
- * values for the fbdev info structure.
+ * drm framebuffer used to back the fbdev. drm_fb_helper_fill_info() is provided
+ * as a helper to setup simple default values for the fbdev info structure.
*
* HANG DEBUGGING:
*
@@ -3024,7 +3208,8 @@ static int drm_fbdev_fb_open(struct fb_info *info, int user)
{
struct drm_fb_helper *fb_helper = info->par;
- if (!try_module_get(fb_helper->dev->driver->fops->owner))
+ /* No need to take a ref for fbcon because it unbinds on unregister */
+ if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
return -ENODEV;
return 0;
@@ -3034,7 +3219,8 @@ static int drm_fbdev_fb_release(struct fb_info *info, int user)
{
struct drm_fb_helper *fb_helper = info->par;
- module_put(fb_helper->dev->driver->fops->owner);
+ if (user)
+ module_put(fb_helper->dev->driver->fops->owner);
return 0;
}
@@ -3149,7 +3335,6 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
if (IS_ERR(fbi))
return PTR_ERR(fbi);
- fbi->par = fb_helper;
fbi->fbops = &drm_fbdev_fb_ops;
fbi->screen_size = fb->height * fb->pitches[0];
fbi->fix.smem_len = fbi->screen_size;
@@ -3160,10 +3345,7 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
fbi->fix.smem_start =
page_to_phys(virt_to_page(fbi->screen_buffer));
#endif
- strcpy(fbi->fix.id, "DRM emulated");
-
- drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(fbi, fb_helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(fbi, fb_helper, sizes);
if (fb->funcs->dirty) {
struct fb_ops *fbops;
@@ -3317,8 +3499,6 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
return ret;
}
- drm_client_add(&fb_helper->client);
-
if (!preferred_bpp)
preferred_bpp = dev->mode_config.preferred_depth;
if (!preferred_bpp)
@@ -3329,6 +3509,8 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
if (ret)
DRM_DEV_DEBUG(dev->dev, "client hotplug ret=%d\n", ret);
+ drm_client_register(&fb_helper->client);
+
return 0;
}
EXPORT_SYMBOL(drm_fbdev_generic_setup);
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 7caa3c7ed978..233f114d2186 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -128,7 +128,6 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor)
/* for compatibility root is always authenticated */
file->authenticated = capable(CAP_SYS_ADMIN);
- file->lock_count = 0;
INIT_LIST_HEAD(&file->lhead);
INIT_LIST_HEAD(&file->fbs);
@@ -425,30 +424,6 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
return 0;
}
-static void drm_legacy_dev_reinit(struct drm_device *dev)
-{
- if (dev->irq_enabled)
- drm_irq_uninstall(dev);
-
- mutex_lock(&dev->struct_mutex);
-
- drm_legacy_agp_clear(dev);
-
- drm_legacy_sg_cleanup(dev);
- drm_legacy_vma_flush(dev);
- drm_legacy_dma_takedown(dev);
-
- mutex_unlock(&dev->struct_mutex);
-
- dev->sigdata.lock = NULL;
-
- dev->context_flag = 0;
- dev->last_context = 0;
- dev->if_version = 0;
-
- DRM_DEBUG("lastclose completed\n");
-}
-
void drm_lastclose(struct drm_device * dev)
{
DRM_DEBUG("\n");
@@ -577,6 +552,7 @@ put_back_event:
file_priv->event_space -= length;
list_add(&e->link, &file_priv->event_list);
spin_unlock_irq(&dev->event_lock);
+ wake_up_interruptible(&file_priv->event_wait);
break;
}
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
new file mode 100644
index 000000000000..a18da35145b7
--- /dev/null
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -0,0 +1,324 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2016 Noralf Trønnes
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+#include <drm/drm_format_helper.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_rect.h>
+
+static unsigned int clip_offset(struct drm_rect *clip,
+ unsigned int pitch, unsigned int cpp)
+{
+ return clip->y1 * pitch + clip->x1 * cpp;
+}
+
+/**
+ * drm_fb_memcpy - Copy clip buffer
+ * @dst: Destination buffer
+ * @vaddr: Source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * This function does not apply clipping on dst, i.e. the destination
+ * is a small buffer containing the clip rect only.
+ */
+void drm_fb_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb,
+ struct drm_rect *clip)
+{
+ unsigned int cpp = drm_format_plane_cpp(fb->format->format, 0);
+ size_t len = (clip->x2 - clip->x1) * cpp;
+ unsigned int y, lines = clip->y2 - clip->y1;
+
+ vaddr += clip_offset(clip, fb->pitches[0], cpp);
+ for (y = 0; y < lines; y++) {
+ memcpy(dst, vaddr, len);
+ vaddr += fb->pitches[0];
+ dst += len;
+ }
+}
+EXPORT_SYMBOL(drm_fb_memcpy);
+
+/**
+ * drm_fb_memcpy_dstclip - Copy clip buffer
+ * @dst: Destination buffer (iomem)
+ * @vaddr: Source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * This function applies clipping on dst, i.e. the destination is a
+ * full (iomem) framebuffer but only the clip rect content is copied over.
+ */
+void drm_fb_memcpy_dstclip(void __iomem *dst, void *vaddr,
+ struct drm_framebuffer *fb,
+ struct drm_rect *clip)
+{
+ unsigned int cpp = drm_format_plane_cpp(fb->format->format, 0);
+ unsigned int offset = clip_offset(clip, fb->pitches[0], cpp);
+ size_t len = (clip->x2 - clip->x1) * cpp;
+ unsigned int y, lines = clip->y2 - clip->y1;
+
+ vaddr += offset;
+ dst += offset;
+ for (y = 0; y < lines; y++) {
+ memcpy_toio(dst, vaddr, len);
+ vaddr += fb->pitches[0];
+ dst += fb->pitches[0];
+ }
+}
+EXPORT_SYMBOL(drm_fb_memcpy_dstclip);
+
+/**
+ * drm_fb_swab16 - Swap bytes into clip buffer
+ * @dst: RGB565 destination buffer
+ * @vaddr: RGB565 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ */
+void drm_fb_swab16(u16 *dst, void *vaddr, struct drm_framebuffer *fb,
+ struct drm_rect *clip)
+{
+ size_t len = (clip->x2 - clip->x1) * sizeof(u16);
+ unsigned int x, y;
+ u16 *src, *buf;
+
+ /*
+ * The cma memory is write-combined so reads are uncached.
+ * Speed up by fetching one line at a time.
+ */
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ for (y = clip->y1; y < clip->y2; y++) {
+ src = vaddr + (y * fb->pitches[0]);
+ src += clip->x1;
+ memcpy(buf, src, len);
+ src = buf;
+ for (x = clip->x1; x < clip->x2; x++)
+ *dst++ = swab16(*src++);
+ }
+
+ kfree(buf);
+}
+EXPORT_SYMBOL(drm_fb_swab16);
+
+static void drm_fb_xrgb8888_to_rgb565_line(u16 *dbuf, u32 *sbuf,
+ unsigned int pixels,
+ bool swab)
+{
+ unsigned int x;
+ u16 val16;
+
+ for (x = 0; x < pixels; x++) {
+ val16 = ((sbuf[x] & 0x00F80000) >> 8) |
+ ((sbuf[x] & 0x0000FC00) >> 5) |
+ ((sbuf[x] & 0x000000F8) >> 3);
+ if (swab)
+ dbuf[x] = swab16(val16);
+ else
+ dbuf[x] = val16;
+ }
+}
+
+/**
+ * drm_fb_xrgb8888_to_rgb565 - Convert XRGB8888 to RGB565 clip buffer
+ * @dst: RGB565 destination buffer
+ * @vaddr: XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @swab: Swap bytes
+ *
+ * Drivers can use this function for RGB565 devices that don't natively
+ * support XRGB8888.
+ *
+ * This function does not apply clipping on dst, i.e. the destination
+ * is a small buffer containing the clip rect only.
+ */
+void drm_fb_xrgb8888_to_rgb565(void *dst, void *vaddr,
+ struct drm_framebuffer *fb,
+ struct drm_rect *clip, bool swab)
+{
+ size_t linepixels = clip->x2 - clip->x1;
+ size_t src_len = linepixels * sizeof(u32);
+ size_t dst_len = linepixels * sizeof(u16);
+ unsigned y, lines = clip->y2 - clip->y1;
+ void *sbuf;
+
+ /*
+ * The cma memory is write-combined so reads are uncached.
+ * Speed up by fetching one line at a time.
+ */
+ sbuf = kmalloc(src_len, GFP_KERNEL);
+ if (!sbuf)
+ return;
+
+ vaddr += clip_offset(clip, fb->pitches[0], sizeof(u32));
+ for (y = 0; y < lines; y++) {
+ memcpy(sbuf, vaddr, src_len);
+ drm_fb_xrgb8888_to_rgb565_line(dst, sbuf, linepixels, swab);
+ vaddr += fb->pitches[0];
+ dst += dst_len;
+ }
+
+ kfree(sbuf);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565);
+
+/**
+ * drm_fb_xrgb8888_to_rgb565_dstclip - Convert XRGB8888 to RGB565 clip buffer
+ * @dst: RGB565 destination buffer (iomem)
+ * @dst_pitch: destination buffer pitch
+ * @vaddr: XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ * @swab: Swap bytes
+ *
+ * Drivers can use this function for RGB565 devices that don't natively
+ * support XRGB8888.
+ *
+ * This function applies clipping on dst, i.e. the destination is a
+ * full (iomem) framebuffer but only the clip rect content is copied over.
+ */
+void drm_fb_xrgb8888_to_rgb565_dstclip(void __iomem *dst, unsigned int dst_pitch,
+ void *vaddr, struct drm_framebuffer *fb,
+ struct drm_rect *clip, bool swab)
+{
+ size_t linepixels = clip->x2 - clip->x1;
+ size_t dst_len = linepixels * sizeof(u16);
+ unsigned y, lines = clip->y2 - clip->y1;
+ void *dbuf;
+
+ dbuf = kmalloc(dst_len, GFP_KERNEL);
+ if (!dbuf)
+ return;
+
+ vaddr += clip_offset(clip, fb->pitches[0], sizeof(u32));
+ dst += clip_offset(clip, dst_pitch, sizeof(u16));
+ for (y = 0; y < lines; y++) {
+ drm_fb_xrgb8888_to_rgb565_line(dbuf, vaddr, linepixels, swab);
+ memcpy_toio(dst, dbuf, dst_len);
+ vaddr += fb->pitches[0];
+ dst += dst_len;
+ }
+
+ kfree(dbuf);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565_dstclip);
+
+static void drm_fb_xrgb8888_to_rgb888_line(u8 *dbuf, u32 *sbuf,
+ unsigned int pixels)
+{
+ unsigned int x;
+
+ for (x = 0; x < pixels; x++) {
+ *dbuf++ = (sbuf[x] & 0x000000FF) >> 0;
+ *dbuf++ = (sbuf[x] & 0x0000FF00) >> 8;
+ *dbuf++ = (sbuf[x] & 0x00FF0000) >> 16;
+ }
+}
+
+/**
+ * drm_fb_xrgb8888_to_rgb888_dstclip - Convert XRGB8888 to RGB888 clip buffer
+ * @dst: RGB565 destination buffer (iomem)
+ * @dst_pitch: destination buffer pitch
+ * @vaddr: XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * Drivers can use this function for RGB888 devices that don't natively
+ * support XRGB8888.
+ *
+ * This function applies clipping on dst, i.e. the destination is a
+ * full (iomem) framebuffer but only the clip rect content is copied over.
+ */
+void drm_fb_xrgb8888_to_rgb888_dstclip(void __iomem *dst, unsigned int dst_pitch,
+ void *vaddr, struct drm_framebuffer *fb,
+ struct drm_rect *clip)
+{
+ size_t linepixels = clip->x2 - clip->x1;
+ size_t dst_len = linepixels * 3;
+ unsigned y, lines = clip->y2 - clip->y1;
+ void *dbuf;
+
+ dbuf = kmalloc(dst_len, GFP_KERNEL);
+ if (!dbuf)
+ return;
+
+ vaddr += clip_offset(clip, fb->pitches[0], sizeof(u32));
+ dst += clip_offset(clip, dst_pitch, sizeof(u16));
+ for (y = 0; y < lines; y++) {
+ drm_fb_xrgb8888_to_rgb888_line(dbuf, vaddr, linepixels);
+ memcpy_toio(dst, dbuf, dst_len);
+ vaddr += fb->pitches[0];
+ dst += dst_len;
+ }
+
+ kfree(dbuf);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888_dstclip);
+
+/**
+ * drm_fb_xrgb8888_to_gray8 - Convert XRGB8888 to grayscale
+ * @dst: 8-bit grayscale destination buffer
+ * @vaddr: XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * Drm doesn't have native monochrome or grayscale support.
+ * Such drivers can announce the commonly supported XR24 format to userspace
+ * and use this function to convert to the native format.
+ *
+ * Monochrome drivers will use the most significant bit,
+ * where 1 means foreground color and 0 background color.
+ *
+ * ITU BT.601 is used for the RGB -> luma (brightness) conversion.
+ */
+void drm_fb_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb,
+ struct drm_rect *clip)
+{
+ unsigned int len = (clip->x2 - clip->x1) * sizeof(u32);
+ unsigned int x, y;
+ void *buf;
+ u32 *src;
+
+ if (WARN_ON(fb->format->format != DRM_FORMAT_XRGB8888))
+ return;
+ /*
+ * The cma memory is write-combined so reads are uncached.
+ * Speed up by fetching one line at a time.
+ */
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ for (y = clip->y1; y < clip->y2; y++) {
+ src = vaddr + (y * fb->pitches[0]);
+ src += clip->x1;
+ memcpy(buf, src, len);
+ src = buf;
+ for (x = clip->x1; x < clip->x2; x++) {
+ u8 r = (*src & 0x00ff0000) >> 16;
+ u8 g = (*src & 0x0000ff00) >> 8;
+ u8 b = *src & 0x000000ff;
+
+ /* ITU BT.601: Y = 0.299 R + 0.587 G + 0.114 B */
+ *dst++ = (3 * r + 6 * g + b) / 10;
+ src++;
+ }
+ }
+
+ kfree(buf);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8);
+
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index ba7e19d4336c..6ea55fb4526d 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -198,6 +198,10 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGBA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_XRGB16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XBGR16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_ARGB16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
+ { .format = DRM_FORMAT_ABGR16161616F, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGB888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_BGR888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_XRGB8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
@@ -225,7 +229,17 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_VYUY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_XYUV8888, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_VUY888, .depth = 0, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_AYUV, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true },
+ { .format = DRM_FORMAT_Y210, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_Y212, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_Y216, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 2, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_Y410, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true },
+ { .format = DRM_FORMAT_Y412, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true },
+ { .format = DRM_FORMAT_Y416, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true, .is_yuv = true },
+ { .format = DRM_FORMAT_XVYU2101010, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_XVYU12_16161616, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_XVYU16161616, .depth = 0, .num_planes = 1, .cpp = { 8, 0, 0 }, .hsub = 1, .vsub = 1, .is_yuv = true },
{ .format = DRM_FORMAT_Y0L0, .depth = 0, .num_planes = 1,
.char_per_block = { 8, 0, 0 }, .block_w = { 2, 0, 0 }, .block_h = { 2, 0, 0 },
.hsub = 2, .vsub = 2, .has_alpha = true, .is_yuv = true },
@@ -247,6 +261,19 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_P016, .depth = 0, .num_planes = 2,
.char_per_block = { 2, 4, 0 }, .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 },
.hsub = 2, .vsub = 2, .is_yuv = true},
+ { .format = DRM_FORMAT_P210, .depth = 0,
+ .num_planes = 2, .char_per_block = { 2, 4, 0 },
+ .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 }, .hsub = 2,
+ .vsub = 1, .is_yuv = true },
+ { .format = DRM_FORMAT_VUY101010, .depth = 0,
+ .num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 1, .vsub = 1,
+ .is_yuv = true },
+ { .format = DRM_FORMAT_YUV420_8BIT, .depth = 0,
+ .num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 2, .vsub = 2,
+ .is_yuv = true },
+ { .format = DRM_FORMAT_YUV420_10BIT, .depth = 0,
+ .num_planes = 1, .cpp = { 0, 0, 0 }, .hsub = 2, .vsub = 2,
+ .is_yuv = true },
};
unsigned int i;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index d0b9f6a9953f..50de138c89e0 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -72,23 +72,6 @@
* up at a later date, and as our interface with shmfs for memory allocation.
*/
-/*
- * We make up offsets for buffer objects so we can recognize them at
- * mmap time.
- */
-
-/* pgoff in mmap is an unsigned long, so we need to make sure that
- * the faked up offset will fit
- */
-
-#if BITS_PER_LONG == 64
-#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
-#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
-#else
-#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
-#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
-#endif
-
/**
* drm_gem_init - Initialize the GEM device fields
* @dev: drm_devic structure to initialize
@@ -171,6 +154,10 @@ void drm_gem_private_object_init(struct drm_device *dev,
kref_init(&obj->refcount);
obj->handle_count = 0;
obj->size = size;
+ reservation_object_init(&obj->_resv);
+ if (!obj->resv)
+ obj->resv = &obj->_resv;
+
drm_vma_node_reset(&obj->vma_node);
}
EXPORT_SYMBOL(drm_gem_private_object_init);
@@ -659,6 +646,85 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
}
EXPORT_SYMBOL(drm_gem_put_pages);
+static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
+ struct drm_gem_object **objs)
+{
+ int i, ret = 0;
+ struct drm_gem_object *obj;
+
+ spin_lock(&filp->table_lock);
+
+ for (i = 0; i < count; i++) {
+ /* Check if we currently have a reference on the object */
+ obj = idr_find(&filp->object_idr, handle[i]);
+ if (!obj) {
+ ret = -ENOENT;
+ break;
+ }
+ drm_gem_object_get(obj);
+ objs[i] = obj;
+ }
+ spin_unlock(&filp->table_lock);
+
+ return ret;
+}
+
+/**
+ * drm_gem_objects_lookup - look up GEM objects from an array of handles
+ * @filp: DRM file private date
+ * @bo_handles: user pointer to array of userspace handle
+ * @count: size of handle array
+ * @objs_out: returned pointer to array of drm_gem_object pointers
+ *
+ * Takes an array of userspace handles and returns a newly allocated array of
+ * GEM objects.
+ *
+ * For a single handle lookup, use drm_gem_object_lookup().
+ *
+ * Returns:
+ *
+ * @objs filled in with GEM object pointers. Returned GEM objects need to be
+ * released with drm_gem_object_put(). -ENOENT is returned on a lookup
+ * failure. 0 is returned on success.
+ *
+ */
+int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
+ int count, struct drm_gem_object ***objs_out)
+{
+ int ret;
+ u32 *handles;
+ struct drm_gem_object **objs;
+
+ if (!count)
+ return 0;
+
+ objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!objs)
+ return -ENOMEM;
+
+ handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
+ if (!handles) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
+ ret = -EFAULT;
+ DRM_DEBUG("Failed to copy in GEM handles\n");
+ goto out;
+ }
+
+ ret = objects_lookup(filp, handles, count, objs);
+ *objs_out = objs;
+
+out:
+ kvfree(handles);
+ return ret;
+
+}
+EXPORT_SYMBOL(drm_gem_objects_lookup);
+
/**
* drm_gem_object_lookup - look up a GEM object from its handle
* @filp: DRM file private date
@@ -668,24 +734,56 @@ EXPORT_SYMBOL(drm_gem_put_pages);
*
* A reference to the object named by the handle if such exists on @filp, NULL
* otherwise.
+ *
+ * If looking up an array of handles, use drm_gem_objects_lookup().
*/
struct drm_gem_object *
drm_gem_object_lookup(struct drm_file *filp, u32 handle)
{
+ struct drm_gem_object *obj = NULL;
+
+ objects_lookup(filp, &handle, 1, &obj);
+ return obj;
+}
+EXPORT_SYMBOL(drm_gem_object_lookup);
+
+/**
+ * drm_gem_reservation_object_wait - Wait on GEM object's reservation's objects
+ * shared and/or exclusive fences.
+ * @filep: DRM file private date
+ * @handle: userspace handle
+ * @wait_all: if true, wait on all fences, else wait on just exclusive fence
+ * @timeout: timeout value in jiffies or zero to return immediately
+ *
+ * Returns:
+ *
+ * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
+ * greater than 0 on success.
+ */
+long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle,
+ bool wait_all, unsigned long timeout)
+{
+ long ret;
struct drm_gem_object *obj;
- spin_lock(&filp->table_lock);
+ obj = drm_gem_object_lookup(filep, handle);
+ if (!obj) {
+ DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
+ return -EINVAL;
+ }
- /* Check if we currently have a reference on the object */
- obj = idr_find(&filp->object_idr, handle);
- if (obj)
- drm_gem_object_get(obj);
+ ret = reservation_object_wait_timeout_rcu(obj->resv, wait_all,
+ true, timeout);
+ if (ret == 0)
+ ret = -ETIME;
+ else if (ret > 0)
+ ret = 0;
- spin_unlock(&filp->table_lock);
+ drm_gem_object_put_unlocked(obj);
- return obj;
+ return ret;
}
-EXPORT_SYMBOL(drm_gem_object_lookup);
+EXPORT_SYMBOL(drm_gem_reservation_object_wait);
/**
* drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
@@ -851,6 +949,7 @@ drm_gem_object_release(struct drm_gem_object *obj)
if (obj->filp)
fput(obj->filp);
+ reservation_object_fini(&obj->_resv);
drm_gem_free_mmap_offset(obj);
}
EXPORT_SYMBOL(drm_gem_object_release);
@@ -1190,3 +1289,174 @@ void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr)
obj->dev->driver->gem_prime_vunmap(obj, vaddr);
}
EXPORT_SYMBOL(drm_gem_vunmap);
+
+/**
+ * drm_gem_lock_reservations - Sets up the ww context and acquires
+ * the lock on an array of GEM objects.
+ *
+ * Once you've locked your reservations, you'll want to set up space
+ * for your shared fences (if applicable), submit your job, then
+ * drm_gem_unlock_reservations().
+ *
+ * @objs: drm_gem_objects to lock
+ * @count: Number of objects in @objs
+ * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
+ * part of tracking this set of locked reservations.
+ */
+int
+drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
+ struct ww_acquire_ctx *acquire_ctx)
+{
+ int contended = -1;
+ int i, ret;
+
+ ww_acquire_init(acquire_ctx, &reservation_ww_class);
+
+retry:
+ if (contended != -1) {
+ struct drm_gem_object *obj = objs[contended];
+
+ ret = ww_mutex_lock_slow_interruptible(&obj->resv->lock,
+ acquire_ctx);
+ if (ret) {
+ ww_acquire_done(acquire_ctx);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < count; i++) {
+ if (i == contended)
+ continue;
+
+ ret = ww_mutex_lock_interruptible(&objs[i]->resv->lock,
+ acquire_ctx);
+ if (ret) {
+ int j;
+
+ for (j = 0; j < i; j++)
+ ww_mutex_unlock(&objs[j]->resv->lock);
+
+ if (contended != -1 && contended >= i)
+ ww_mutex_unlock(&objs[contended]->resv->lock);
+
+ if (ret == -EDEADLK) {
+ contended = i;
+ goto retry;
+ }
+
+ ww_acquire_done(acquire_ctx);
+ return ret;
+ }
+ }
+
+ ww_acquire_done(acquire_ctx);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_lock_reservations);
+
+void
+drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
+ struct ww_acquire_ctx *acquire_ctx)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ ww_mutex_unlock(&objs[i]->resv->lock);
+
+ ww_acquire_fini(acquire_ctx);
+}
+EXPORT_SYMBOL(drm_gem_unlock_reservations);
+
+/**
+ * drm_gem_fence_array_add - Adds the fence to an array of fences to be
+ * waited on, deduplicating fences from the same context.
+ *
+ * @fence_array: array of dma_fence * for the job to block on.
+ * @fence: the dma_fence to add to the list of dependencies.
+ *
+ * Returns:
+ * 0 on success, or an error on failing to expand the array.
+ */
+int drm_gem_fence_array_add(struct xarray *fence_array,
+ struct dma_fence *fence)
+{
+ struct dma_fence *entry;
+ unsigned long index;
+ u32 id = 0;
+ int ret;
+
+ if (!fence)
+ return 0;
+
+ /* Deduplicate if we already depend on a fence from the same context.
+ * This lets the size of the array of deps scale with the number of
+ * engines involved, rather than the number of BOs.
+ */
+ xa_for_each(fence_array, index, entry) {
+ if (entry->context != fence->context)
+ continue;
+
+ if (dma_fence_is_later(fence, entry)) {
+ dma_fence_put(entry);
+ xa_store(fence_array, index, fence, GFP_KERNEL);
+ } else {
+ dma_fence_put(fence);
+ }
+ return 0;
+ }
+
+ ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
+ if (ret != 0)
+ dma_fence_put(fence);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_fence_array_add);
+
+/**
+ * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked
+ * in the GEM object's reservation object to an array of dma_fences for use in
+ * scheduling a rendering job.
+ *
+ * This should be called after drm_gem_lock_reservations() on your array of
+ * GEM objects used in the job but before updating the reservations with your
+ * own fences.
+ *
+ * @fence_array: array of dma_fence * for the job to block on.
+ * @obj: the gem object to add new dependencies from.
+ * @write: whether the job might write the object (so we need to depend on
+ * shared fences in the reservation object).
+ */
+int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
+ struct drm_gem_object *obj,
+ bool write)
+{
+ int ret;
+ struct dma_fence **fences;
+ unsigned int i, fence_count;
+
+ if (!write) {
+ struct dma_fence *fence =
+ reservation_object_get_excl_rcu(obj->resv);
+
+ return drm_gem_fence_array_add(fence_array, fence);
+ }
+
+ ret = reservation_object_get_fences_rcu(obj->resv, NULL,
+ &fence_count, &fences);
+ if (ret || !fence_count)
+ return ret;
+
+ for (i = 0; i < fence_count; i++) {
+ ret = drm_gem_fence_array_add(fence_array, fences[i]);
+ if (ret)
+ break;
+ }
+
+ for (; i < fence_count; i++)
+ dma_fence_put(fences[i]);
+ kfree(fences);
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index cc26625b4b33..e01ceed09e67 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -186,13 +186,13 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
cma_obj = to_drm_gem_cma_obj(gem_obj);
- if (cma_obj->vaddr) {
- dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
- cma_obj->vaddr, cma_obj->paddr);
- } else if (gem_obj->import_attach) {
+ if (gem_obj->import_attach) {
if (cma_obj->vaddr)
dma_buf_vunmap(gem_obj->import_attach->dmabuf, cma_obj->vaddr);
drm_prime_gem_destroy(gem_obj, cma_obj->sgt);
+ } else if (cma_obj->vaddr) {
+ dma_free_wc(gem_obj->dev->dev, cma_obj->base.size,
+ cma_obj->vaddr, cma_obj->paddr);
}
drm_gem_object_release(gem_obj);
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
new file mode 100644
index 000000000000..1ee208c2c85e
--- /dev/null
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -0,0 +1,625 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Noralf Trønnes
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/shmem_fs.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_prime.h>
+#include <drm/drm_print.h>
+
+/**
+ * DOC: overview
+ *
+ * This library provides helpers for GEM objects backed by shmem buffers
+ * allocated using anonymous pageable memory.
+ */
+
+static const struct drm_gem_object_funcs drm_gem_shmem_funcs = {
+ .free = drm_gem_shmem_free_object,
+ .print_info = drm_gem_shmem_print_info,
+ .pin = drm_gem_shmem_pin,
+ .unpin = drm_gem_shmem_unpin,
+ .get_sg_table = drm_gem_shmem_get_sg_table,
+ .vmap = drm_gem_shmem_vmap,
+ .vunmap = drm_gem_shmem_vunmap,
+ .vm_ops = &drm_gem_shmem_vm_ops,
+};
+
+/**
+ * drm_gem_shmem_create - Allocate an object with the given size
+ * @dev: DRM device
+ * @size: Size of the object to allocate
+ *
+ * This function creates a shmem GEM object.
+ *
+ * Returns:
+ * A struct drm_gem_shmem_object * on success or an ERR_PTR()-encoded negative
+ * error code on failure.
+ */
+struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size)
+{
+ struct drm_gem_shmem_object *shmem;
+ struct drm_gem_object *obj;
+ int ret;
+
+ size = PAGE_ALIGN(size);
+
+ if (dev->driver->gem_create_object)
+ obj = dev->driver->gem_create_object(dev, size);
+ else
+ obj = kzalloc(sizeof(*shmem), GFP_KERNEL);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ if (!obj->funcs)
+ obj->funcs = &drm_gem_shmem_funcs;
+
+ ret = drm_gem_object_init(dev, obj, size);
+ if (ret)
+ goto err_free;
+
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto err_release;
+
+ shmem = to_drm_gem_shmem_obj(obj);
+ mutex_init(&shmem->pages_lock);
+ mutex_init(&shmem->vmap_lock);
+
+ /*
+ * Our buffers are kept pinned, so allocating them
+ * from the MOVABLE zone is a really bad idea, and
+ * conflicts with CMA. See comments above new_inode()
+ * why this is required _and_ expected if you're
+ * going to pin these pages.
+ */
+ mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
+ __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+
+ return shmem;
+
+err_release:
+ drm_gem_object_release(obj);
+err_free:
+ kfree(obj);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
+
+/**
+ * drm_gem_shmem_free_object - Free resources associated with a shmem GEM object
+ * @obj: GEM object to free
+ *
+ * This function cleans up the GEM object state and frees the memory used to
+ * store the object itself.
+ */
+void drm_gem_shmem_free_object(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ WARN_ON(shmem->vmap_use_count);
+
+ if (obj->import_attach) {
+ shmem->pages_use_count--;
+ drm_prime_gem_destroy(obj, shmem->sgt);
+ kvfree(shmem->pages);
+ } else {
+ if (shmem->sgt) {
+ dma_unmap_sg(obj->dev->dev, shmem->sgt->sgl,
+ shmem->sgt->nents, DMA_BIDIRECTIONAL);
+
+ drm_gem_shmem_put_pages(shmem);
+ sg_free_table(shmem->sgt);
+ kfree(shmem->sgt);
+ }
+ }
+
+ WARN_ON(shmem->pages_use_count);
+
+ drm_gem_object_release(obj);
+ mutex_destroy(&shmem->pages_lock);
+ mutex_destroy(&shmem->vmap_lock);
+ kfree(shmem);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_free_object);
+
+static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
+{
+ struct drm_gem_object *obj = &shmem->base;
+ struct page **pages;
+
+ if (shmem->pages_use_count++ > 0)
+ return 0;
+
+ pages = drm_gem_get_pages(obj);
+ if (IS_ERR(pages)) {
+ DRM_DEBUG_KMS("Failed to get pages (%ld)\n", PTR_ERR(pages));
+ shmem->pages_use_count = 0;
+ return PTR_ERR(pages);
+ }
+
+ shmem->pages = pages;
+
+ return 0;
+}
+
+/*
+ * drm_gem_shmem_get_pages - Allocate backing pages for a shmem GEM object
+ * @shmem: shmem GEM object
+ *
+ * This function makes sure that backing pages exists for the shmem GEM object
+ * and increases the use count.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem)
+{
+ int ret;
+
+ ret = mutex_lock_interruptible(&shmem->pages_lock);
+ if (ret)
+ return ret;
+ ret = drm_gem_shmem_get_pages_locked(shmem);
+ mutex_unlock(&shmem->pages_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_gem_shmem_get_pages);
+
+static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
+{
+ struct drm_gem_object *obj = &shmem->base;
+
+ if (WARN_ON_ONCE(!shmem->pages_use_count))
+ return;
+
+ if (--shmem->pages_use_count > 0)
+ return;
+
+ drm_gem_put_pages(obj, shmem->pages,
+ shmem->pages_mark_dirty_on_put,
+ shmem->pages_mark_accessed_on_put);
+ shmem->pages = NULL;
+}
+
+/*
+ * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object
+ * @shmem: shmem GEM object
+ *
+ * This function decreases the use count and puts the backing pages when use drops to zero.
+ */
+void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
+{
+ mutex_lock(&shmem->pages_lock);
+ drm_gem_shmem_put_pages_locked(shmem);
+ mutex_unlock(&shmem->pages_lock);
+}
+EXPORT_SYMBOL(drm_gem_shmem_put_pages);
+
+/**
+ * drm_gem_shmem_pin - Pin backing pages for a shmem GEM object
+ * @obj: GEM object
+ *
+ * This function makes sure the backing pages are pinned in memory while the
+ * buffer is exported.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_shmem_pin(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ return drm_gem_shmem_get_pages(shmem);
+}
+EXPORT_SYMBOL(drm_gem_shmem_pin);
+
+/**
+ * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object
+ * @obj: GEM object
+ *
+ * This function removes the requirement that the backing pages are pinned in
+ * memory.
+ */
+void drm_gem_shmem_unpin(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ drm_gem_shmem_put_pages(shmem);
+}
+EXPORT_SYMBOL(drm_gem_shmem_unpin);
+
+static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
+{
+ struct drm_gem_object *obj = &shmem->base;
+ int ret;
+
+ if (shmem->vmap_use_count++ > 0)
+ return shmem->vaddr;
+
+ ret = drm_gem_shmem_get_pages(shmem);
+ if (ret)
+ goto err_zero_use;
+
+ if (obj->import_attach)
+ shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
+ else
+ shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, VM_MAP, PAGE_KERNEL);
+
+ if (!shmem->vaddr) {
+ DRM_DEBUG_KMS("Failed to vmap pages\n");
+ ret = -ENOMEM;
+ goto err_put_pages;
+ }
+
+ return shmem->vaddr;
+
+err_put_pages:
+ drm_gem_shmem_put_pages(shmem);
+err_zero_use:
+ shmem->vmap_use_count = 0;
+
+ return ERR_PTR(ret);
+}
+
+/*
+ * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object
+ * @shmem: shmem GEM object
+ *
+ * This function makes sure that a virtual address exists for the buffer backing
+ * the shmem GEM object.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+void *drm_gem_shmem_vmap(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ void *vaddr;
+ int ret;
+
+ ret = mutex_lock_interruptible(&shmem->vmap_lock);
+ if (ret)
+ return ERR_PTR(ret);
+ vaddr = drm_gem_shmem_vmap_locked(shmem);
+ mutex_unlock(&shmem->vmap_lock);
+
+ return vaddr;
+}
+EXPORT_SYMBOL(drm_gem_shmem_vmap);
+
+static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem)
+{
+ struct drm_gem_object *obj = &shmem->base;
+
+ if (WARN_ON_ONCE(!shmem->vmap_use_count))
+ return;
+
+ if (--shmem->vmap_use_count > 0)
+ return;
+
+ if (obj->import_attach)
+ dma_buf_vunmap(obj->import_attach->dmabuf, shmem->vaddr);
+ else
+ vunmap(shmem->vaddr);
+
+ shmem->vaddr = NULL;
+ drm_gem_shmem_put_pages(shmem);
+}
+
+/*
+ * drm_gem_shmem_vunmap - Unmap a virtual mapping fo a shmem GEM object
+ * @shmem: shmem GEM object
+ *
+ * This function removes the virtual address when use count drops to zero.
+ */
+void drm_gem_shmem_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ mutex_lock(&shmem->vmap_lock);
+ drm_gem_shmem_vunmap_locked(shmem);
+ mutex_unlock(&shmem->vmap_lock);
+}
+EXPORT_SYMBOL(drm_gem_shmem_vunmap);
+
+struct drm_gem_shmem_object *
+drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
+ struct drm_device *dev, size_t size,
+ uint32_t *handle)
+{
+ struct drm_gem_shmem_object *shmem;
+ int ret;
+
+ shmem = drm_gem_shmem_create(dev, size);
+ if (IS_ERR(shmem))
+ return shmem;
+
+ /*
+ * Allocate an id of idr table where the obj is registered
+ * and handle has the id what user can see.
+ */
+ ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
+ /* drop reference from allocate - handle holds it now. */
+ drm_gem_object_put_unlocked(&shmem->base);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return shmem;
+}
+EXPORT_SYMBOL(drm_gem_shmem_create_with_handle);
+
+/**
+ * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
+ * @file: DRM file structure to create the dumb buffer for
+ * @dev: DRM device
+ * @args: IOCTL data
+ *
+ * This function computes the pitch of the dumb buffer and rounds it up to an
+ * integer number of bytes per pixel. Drivers for hardware that doesn't have
+ * any additional restrictions on the pitch can directly use this function as
+ * their &drm_driver.dumb_create callback.
+ *
+ * For hardware with additional restrictions, drivers can adjust the fields
+ * set up by userspace before calling into this function.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ u32 min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ struct drm_gem_shmem_object *shmem;
+
+ if (!args->pitch || !args->size) {
+ args->pitch = min_pitch;
+ args->size = args->pitch * args->height;
+ } else {
+ /* ensure sane minimum values */
+ if (args->pitch < min_pitch)
+ args->pitch = min_pitch;
+ if (args->size < args->pitch * args->height)
+ args->size = args->pitch * args->height;
+ }
+
+ shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, &args->handle);
+
+ return PTR_ERR_OR_ZERO(shmem);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
+
+static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ loff_t num_pages = obj->size >> PAGE_SHIFT;
+ struct page *page;
+
+ if (vmf->pgoff >= num_pages || WARN_ON_ONCE(!shmem->pages))
+ return VM_FAULT_SIGBUS;
+
+ page = shmem->pages[vmf->pgoff];
+
+ return vmf_insert_page(vma, vmf->address, page);
+}
+
+static void drm_gem_shmem_vm_open(struct vm_area_struct *vma)
+{
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ int ret;
+
+ ret = drm_gem_shmem_get_pages(shmem);
+ WARN_ON_ONCE(ret != 0);
+
+ drm_gem_vm_open(vma);
+}
+
+static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
+{
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ drm_gem_shmem_put_pages(shmem);
+ drm_gem_vm_close(vma);
+}
+
+const struct vm_operations_struct drm_gem_shmem_vm_ops = {
+ .fault = drm_gem_shmem_fault,
+ .open = drm_gem_shmem_vm_open,
+ .close = drm_gem_shmem_vm_close,
+};
+EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
+
+/**
+ * drm_gem_shmem_mmap - Memory-map a shmem GEM object
+ * @filp: File object
+ * @vma: VMA for the area to be mapped
+ *
+ * This function implements an augmented version of the GEM DRM file mmap
+ * operation for shmem objects. Drivers which employ the shmem helpers should
+ * use this function as their &file_operations.mmap handler in the DRM device file's
+ * file_operations structure.
+ *
+ * Instead of directly referencing this function, drivers should use the
+ * DEFINE_DRM_GEM_SHMEM_FOPS() macro.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_gem_shmem_object *shmem;
+ int ret;
+
+ ret = drm_gem_mmap(filp, vma);
+ if (ret)
+ return ret;
+
+ shmem = to_drm_gem_shmem_obj(vma->vm_private_data);
+
+ ret = drm_gem_shmem_get_pages(shmem);
+ if (ret) {
+ drm_gem_vm_close(vma);
+ return ret;
+ }
+
+ /* VM_PFNMAP was set by drm_gem_mmap() */
+ vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_flags |= VM_MIXEDMAP;
+
+ /* Remove the fake offset */
+ vma->vm_pgoff -= drm_vma_node_start(&shmem->base.vma_node);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_mmap);
+
+/**
+ * drm_gem_shmem_print_info() - Print &drm_gem_shmem_object info for debugfs
+ * @p: DRM printer
+ * @indent: Tab indentation level
+ * @obj: GEM object
+ */
+void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
+ const struct drm_gem_object *obj)
+{
+ const struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count);
+ drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count);
+ drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr);
+}
+EXPORT_SYMBOL(drm_gem_shmem_print_info);
+
+/**
+ * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned
+ * pages for a shmem GEM object
+ * @obj: GEM object
+ *
+ * This function exports a scatter/gather table suitable for PRIME usage by
+ * calling the standard DMA mapping API.
+ *
+ * Returns:
+ * A pointer to the scatter/gather table of pinned pages or NULL on failure.
+ */
+struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_object *obj)
+{
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+
+ return drm_prime_pages_to_sg(shmem->pages, obj->size >> PAGE_SHIFT);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sg_table);
+
+/**
+ * drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
+ * scatter/gather table for a shmem GEM object.
+ * @obj: GEM object
+ *
+ * This function returns a scatter/gather table suitable for driver usage. If
+ * the sg table doesn't exist, the pages are pinned, dma-mapped, and a sg
+ * table created.
+ *
+ * Returns:
+ * A pointer to the scatter/gather table of pinned pages or errno on failure.
+ */
+struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_object *obj)
+{
+ int ret;
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ struct sg_table *sgt;
+
+ if (shmem->sgt)
+ return shmem->sgt;
+
+ WARN_ON(obj->import_attach);
+
+ ret = drm_gem_shmem_get_pages(shmem);
+ if (ret)
+ return ERR_PTR(ret);
+
+ sgt = drm_gem_shmem_get_sg_table(&shmem->base);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto err_put_pages;
+ }
+ /* Map the pages for use by the h/w. */
+ dma_map_sg(obj->dev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL);
+
+ shmem->sgt = sgt;
+
+ return sgt;
+
+err_put_pages:
+ drm_gem_shmem_put_pages(shmem);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
+
+/**
+ * drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
+ * another driver's scatter/gather table of pinned pages
+ * @dev: Device to import into
+ * @attach: DMA-BUF attachment
+ * @sgt: Scatter/gather table of pinned pages
+ *
+ * This function imports a scatter/gather table exported via DMA-BUF by
+ * another driver. Drivers that use the shmem helpers should set this as their
+ * &drm_driver.gem_prime_import_sg_table callback.
+ *
+ * Returns:
+ * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
+ * error code on failure.
+ */
+struct drm_gem_object *
+drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt)
+{
+ size_t size = PAGE_ALIGN(attach->dmabuf->size);
+ size_t npages = size >> PAGE_SHIFT;
+ struct drm_gem_shmem_object *shmem;
+ int ret;
+
+ shmem = drm_gem_shmem_create(dev, size);
+ if (IS_ERR(shmem))
+ return ERR_CAST(shmem);
+
+ shmem->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
+ if (!shmem->pages) {
+ ret = -ENOMEM;
+ goto err_free_gem;
+ }
+
+ ret = drm_prime_sg_to_page_addr_arrays(sgt, shmem->pages, NULL, npages);
+ if (ret < 0)
+ goto err_free_array;
+
+ shmem->sgt = sgt;
+ shmem->pages_use_count = 1; /* Permanently pinned from our point of view */
+
+ DRM_DEBUG_PRIME("size = %zu\n", size);
+
+ return &shmem->base;
+
+err_free_array:
+ kvfree(shmem->pages);
+err_free_gem:
+ drm_gem_object_put_unlocked(&shmem->base);
+
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_prime_import_sg_table);
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 251d67e04c2d..e19ac7ca602d 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -71,8 +71,10 @@ int drm_legacy_modeset_ctl_ioctl(struct drm_device *dev, void *data,
/* drm_irq.c */
/* IOCTLS */
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
int drm_legacy_irq_control(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+#endif
int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
@@ -180,12 +182,20 @@ int drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
int drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
+int drm_syncobj_transfer_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private);
int drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
+int drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private);
int drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
int drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private);
+int drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private);
+int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private);
/* drm_framebuffer.c */
void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 0e3043e08c69..374b372da58a 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -156,6 +156,7 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd,
return -EINVAL;
}
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
typedef struct drm_map32 {
u32 offset; /* Requested physical address (0 for SAREA) */
u32 size; /* Requested physical size (bytes) */
@@ -239,6 +240,7 @@ static int compat_drm_rmmap(struct file *file, unsigned int cmd,
map.handle = compat_ptr(handle);
return drm_ioctl_kernel(file, drm_legacy_rmmap_ioctl, &map, DRM_AUTH);
}
+#endif
typedef struct drm_client32 {
int idx; /* Which client desired? */
@@ -301,6 +303,7 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd,
return 0;
}
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
typedef struct drm_buf_desc32 {
int count; /* Number of buffers of this size */
int size; /* Size in bytes */
@@ -604,6 +607,7 @@ static int compat_drm_dma(struct file *file, unsigned int cmd,
return 0;
}
+#endif
#if IS_ENABLED(CONFIG_AGP)
typedef struct drm_agp_mode32 {
@@ -748,6 +752,7 @@ static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
}
#endif /* CONFIG_AGP */
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
typedef struct drm_scatter_gather32 {
u32 size; /**< In bytes -- will round to page boundary */
u32 handle; /**< Used for mapping / unmapping */
@@ -788,7 +793,7 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd,
return drm_ioctl_kernel(file, drm_legacy_sg_free, &request,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
-
+#endif
#if defined(CONFIG_X86)
typedef struct drm_update_draw32 {
drm_drawable_t handle;
@@ -903,10 +908,13 @@ static struct {
#define DRM_IOCTL32_DEF(n, f) [DRM_IOCTL_NR(n##32)] = {.fn = f, .name = #n}
DRM_IOCTL32_DEF(DRM_IOCTL_VERSION, compat_drm_version),
DRM_IOCTL32_DEF(DRM_IOCTL_GET_UNIQUE, compat_drm_getunique),
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
DRM_IOCTL32_DEF(DRM_IOCTL_GET_MAP, compat_drm_getmap),
+#endif
DRM_IOCTL32_DEF(DRM_IOCTL_GET_CLIENT, compat_drm_getclient),
DRM_IOCTL32_DEF(DRM_IOCTL_GET_STATS, compat_drm_getstats),
DRM_IOCTL32_DEF(DRM_IOCTL_SET_UNIQUE, compat_drm_setunique),
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
DRM_IOCTL32_DEF(DRM_IOCTL_ADD_MAP, compat_drm_addmap),
DRM_IOCTL32_DEF(DRM_IOCTL_ADD_BUFS, compat_drm_addbufs),
DRM_IOCTL32_DEF(DRM_IOCTL_MARK_BUFS, compat_drm_markbufs),
@@ -918,6 +926,7 @@ static struct {
DRM_IOCTL32_DEF(DRM_IOCTL_GET_SAREA_CTX, compat_drm_getsareactx),
DRM_IOCTL32_DEF(DRM_IOCTL_RES_CTX, compat_drm_resctx),
DRM_IOCTL32_DEF(DRM_IOCTL_DMA, compat_drm_dma),
+#endif
#if IS_ENABLED(CONFIG_AGP)
DRM_IOCTL32_DEF(DRM_IOCTL_AGP_ENABLE, compat_drm_agp_enable),
DRM_IOCTL32_DEF(DRM_IOCTL_AGP_INFO, compat_drm_agp_info),
@@ -926,8 +935,10 @@ static struct {
DRM_IOCTL32_DEF(DRM_IOCTL_AGP_BIND, compat_drm_agp_bind),
DRM_IOCTL32_DEF(DRM_IOCTL_AGP_UNBIND, compat_drm_agp_unbind),
#endif
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
DRM_IOCTL32_DEF(DRM_IOCTL_SG_ALLOC, compat_drm_sg_alloc),
DRM_IOCTL32_DEF(DRM_IOCTL_SG_FREE, compat_drm_sg_free),
+#endif
#if defined(CONFIG_X86) || defined(CONFIG_IA64)
DRM_IOCTL32_DEF(DRM_IOCTL_UPDATE_DRAW, compat_drm_update_draw),
#endif
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 687943df58e1..2263e3ddd822 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -245,6 +245,9 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_
case DRM_CAP_SYNCOBJ:
req->value = drm_core_check_feature(dev, DRIVER_SYNCOBJ);
return 0;
+ case DRM_CAP_SYNCOBJ_TIMELINE:
+ req->value = drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE);
+ return 0;
}
/* Other caps only work with KMS drivers */
@@ -508,13 +511,6 @@ int drm_version(struct drm_device *dev, void *data,
return err;
}
-static inline bool
-drm_render_driver_and_ioctl(const struct drm_device *dev, u32 flags)
-{
- return drm_core_check_feature(dev, DRIVER_RENDER) &&
- (flags & DRM_RENDER_ALLOW);
-}
-
/**
* drm_ioctl_permit - Check ioctl permissions against caller
*
@@ -529,19 +525,14 @@ drm_render_driver_and_ioctl(const struct drm_device *dev, u32 flags)
*/
int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
{
- const struct drm_device *dev = file_priv->minor->dev;
-
/* ROOT_ONLY is only for CAP_SYS_ADMIN */
if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
return -EACCES;
- /* AUTH is only for master ... */
- if (unlikely((flags & DRM_AUTH) && drm_is_primary_client(file_priv))) {
- /* authenticated ones, or render capable on DRM_RENDER_ALLOW. */
- if (!file_priv->authenticated &&
- !drm_render_driver_and_ioctl(dev, flags))
- return -EACCES;
- }
+ /* AUTH is only for authenticated or render client */
+ if (unlikely((flags & DRM_AUTH) && !drm_is_render_client(file_priv) &&
+ !file_priv->authenticated))
+ return -EACCES;
/* MASTER is only for master or control clients */
if (unlikely((flags & DRM_MASTER) &&
@@ -565,6 +556,12 @@ EXPORT_SYMBOL(drm_ioctl_permit);
.name = #ioctl \
}
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
+#define DRM_LEGACY_IOCTL_DEF(ioctl, _func, _flags) DRM_IOCTL_DEF(ioctl, _func, _flags)
+#else
+#define DRM_LEGACY_IOCTL_DEF(ioctl, _func, _flags) DRM_IOCTL_DEF(ioctl, drm_invalid_op, _flags)
+#endif
+
/* Ioctl table */
static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version,
@@ -572,7 +569,9 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, DRM_UNLOCKED),
+
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, DRM_UNLOCKED),
+
DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
@@ -584,39 +583,38 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_UNLOCKED|DRM_MASTER),
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_UNLOCKED|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_UNLOCKED|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_legacy_getctx, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_legacy_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_legacy_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_legacy_resctx, DRM_AUTH),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_legacy_getctx, DRM_AUTH),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_legacy_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_legacy_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_legacy_resctx, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_legacy_lock, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_legacy_unlock, DRM_AUTH),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_LOCK, drm_legacy_lock, DRM_AUTH),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_legacy_unlock, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_legacy_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_legacy_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_legacy_infobufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_legacy_mapbufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_legacy_freebufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_legacy_dma_ioctl, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_legacy_irq_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_legacy_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_legacy_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_legacy_infobufs, DRM_AUTH),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_legacy_mapbufs, DRM_AUTH),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_legacy_freebufs, DRM_AUTH),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_DMA, drm_legacy_dma_ioctl, DRM_AUTH),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_legacy_irq_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
#if IS_ENABLED(CONFIG_AGP)
DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -629,8 +627,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
#endif
- DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_legacy_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_legacy_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_legacy_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_legacy_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank_ioctl, DRM_UNLOCKED),
@@ -686,12 +684,20 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE, drm_syncobj_fd_to_handle_ioctl,
DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TRANSFER, drm_syncobj_transfer_ioctl,
+ DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_WAIT, drm_syncobj_wait_ioctl,
DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT, drm_syncobj_timeline_wait_ioctl,
+ DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_RESET, drm_syncobj_reset_ioctl,
DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_SIGNAL, drm_syncobj_signal_ioctl,
DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL, drm_syncobj_timeline_signal_ioctl,
+ DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_SYNCOBJ_QUERY, drm_syncobj_query_ioctl,
+ DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_CRTC_GET_SEQUENCE, drm_crtc_get_sequence_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_CRTC_QUEUE_SEQUENCE, drm_crtc_queue_sequence_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_LEASE, drm_mode_create_lease_ioctl, DRM_MASTER|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 9bd8908d5fd8..02f38cc9f468 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -213,6 +213,7 @@ int drm_irq_uninstall(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_irq_uninstall);
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
int drm_legacy_irq_control(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -253,3 +254,4 @@ int drm_legacy_irq_control(struct drm_device *dev, void *data,
return -EINVAL;
}
}
+#endif
diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c
index 93e2b30fe1a5..9c5ae825c507 100644
--- a/drivers/gpu/drm/drm_kms_helper_common.c
+++ b/drivers/gpu/drm/drm_kms_helper_common.c
@@ -39,7 +39,7 @@ MODULE_LICENSE("GPL and additional rights");
/* Backward compatibility for drm_kms_helper.edid_firmware */
static int edid_firmware_set(const char *val, const struct kernel_param *kp)
{
- DRM_NOTE("drm_kms_firmware.edid_firmware is deprecated, please use drm.edid_firmware intead.\n");
+ DRM_NOTE("drm_kms_firmware.edid_firmware is deprecated, please use drm.edid_firmware instead.\n");
return __drm_set_edid_firmware_path(val);
}
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index 603b0bd9c5ce..694ff363a90b 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -111,7 +111,7 @@ static bool _drm_has_leased(struct drm_master *master, int id)
*/
bool _drm_lease_held(struct drm_file *file_priv, int id)
{
- if (file_priv == NULL || file_priv->master == NULL)
+ if (!file_priv || !file_priv->master)
return true;
return _drm_lease_held_master(file_priv->master, id);
@@ -133,7 +133,7 @@ bool drm_lease_held(struct drm_file *file_priv, int id)
struct drm_master *master;
bool ret;
- if (file_priv == NULL || file_priv->master == NULL)
+ if (!file_priv || !file_priv->master || !file_priv->master->lessor)
return true;
master = file_priv->master;
@@ -159,7 +159,7 @@ uint32_t drm_lease_filter_crtcs(struct drm_file *file_priv, uint32_t crtcs_in)
int count_in, count_out;
uint32_t crtcs_out = 0;
- if (file_priv == NULL || file_priv->master == NULL)
+ if (!file_priv || !file_priv->master || !file_priv->master->lessor)
return crtcs_in;
master = file_priv->master;
@@ -220,8 +220,6 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr
error = 0;
if (!idr_find(&dev->mode_config.object_idr, object))
error = -ENOENT;
- else if (!_drm_lease_held_master(lessor, object))
- error = -EACCES;
else if (_drm_has_leased(lessor, object))
error = -EBUSY;
@@ -403,11 +401,6 @@ static int fill_object_idr(struct drm_device *dev,
/* step one - get references to all the mode objects
and check for validity. */
for (o = 0; o < object_count; o++) {
- if ((int) object_ids[o] < 0) {
- ret = -EINVAL;
- goto out_free_objects;
- }
-
objects[o] = drm_mode_object_find(dev, lessor_priv,
object_ids[o],
DRM_MODE_OBJECT_ANY);
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
index 280fbeb846ff..51f1fabfa145 100644
--- a/drivers/gpu/drm/drm_legacy.h
+++ b/drivers/gpu/drm/drm_legacy.h
@@ -42,11 +42,19 @@ struct drm_file;
#define DRM_KERNEL_CONTEXT 0
#define DRM_RESERVED_CONTEXTS 1
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
void drm_legacy_ctxbitmap_init(struct drm_device *dev);
void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev);
-void drm_legacy_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file);
+#else
+static inline void drm_legacy_ctxbitmap_init(struct drm_device *dev) {}
+static inline void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev) {}
+static inline void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file) {}
+#endif
+void drm_legacy_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
+
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
int drm_legacy_resctx(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_addctx(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_getctx(struct drm_device *d, void *v, struct drm_file *f);
@@ -56,6 +64,7 @@ int drm_legacy_rmctx(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_setsareactx(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_getsareactx(struct drm_device *d, void *v, struct drm_file *f);
+#endif
/*
* Generic Buffer Management
@@ -63,16 +72,39 @@ int drm_legacy_getsareactx(struct drm_device *d, void *v, struct drm_file *f);
#define DRM_MAP_HASH_OFFSET 0x10000000
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
+static inline int drm_legacy_create_map_hash(struct drm_device *dev)
+{
+ return drm_ht_create(&dev->map_hash, 12);
+}
+
+static inline void drm_legacy_remove_map_hash(struct drm_device *dev)
+{
+ drm_ht_remove(&dev->map_hash);
+}
+#else
+static inline int drm_legacy_create_map_hash(struct drm_device *dev)
+{
+ return 0;
+}
+
+static inline void drm_legacy_remove_map_hash(struct drm_device *dev) {}
+#endif
+
+
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_legacy_addmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_rmmap_ioctl(struct drm_device *d, void *v, struct drm_file *f);
+
int drm_legacy_addbufs(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_infobufs(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_markbufs(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_freebufs(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_mapbufs(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_dma_ioctl(struct drm_device *d, void *v, struct drm_file *f);
+#endif
int __drm_legacy_infobufs(struct drm_device *, void *, int *,
int (*)(void *, int, struct drm_buf_entry *));
@@ -81,7 +113,17 @@ int __drm_legacy_mapbufs(struct drm_device *, void *, int *,
int (*)(void *, int, unsigned long, struct drm_buf *),
struct drm_file *);
-#ifdef CONFIG_DRM_VM
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
+void drm_legacy_master_rmmaps(struct drm_device *dev,
+ struct drm_master *master);
+void drm_legacy_rmmaps(struct drm_device *dev);
+#else
+static inline void drm_legacy_master_rmmaps(struct drm_device *dev,
+ struct drm_master *master) {}
+static inline void drm_legacy_rmmaps(struct drm_device *dev) {}
+#endif
+
+#if IS_ENABLED(CONFIG_DRM_VM) && IS_ENABLED(CONFIG_DRM_LEGACY)
void drm_legacy_vma_flush(struct drm_device *d);
#else
static inline void drm_legacy_vma_flush(struct drm_device *d)
@@ -103,23 +145,64 @@ struct drm_agp_mem {
};
/* drm_lock.c */
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
int drm_legacy_lock(struct drm_device *d, void *v, struct drm_file *f);
int drm_legacy_unlock(struct drm_device *d, void *v, struct drm_file *f);
void drm_legacy_lock_release(struct drm_device *dev, struct file *filp);
+#else
+static inline void drm_legacy_lock_release(struct drm_device *dev, struct file *filp) {}
+#endif
/* DMA support */
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
int drm_legacy_dma_setup(struct drm_device *dev);
void drm_legacy_dma_takedown(struct drm_device *dev);
+#else
+static inline int drm_legacy_dma_setup(struct drm_device *dev)
+{
+ return 0;
+}
+#endif
+
void drm_legacy_free_buffer(struct drm_device *dev,
struct drm_buf * buf);
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
void drm_legacy_reclaim_buffers(struct drm_device *dev,
struct drm_file *filp);
+#else
+static inline void drm_legacy_reclaim_buffers(struct drm_device *dev,
+ struct drm_file *filp) {}
+#endif
/* Scatter Gather Support */
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
void drm_legacy_sg_cleanup(struct drm_device *dev);
int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int drm_legacy_sg_free(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+#endif
+
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
+void drm_legacy_init_members(struct drm_device *dev);
+void drm_legacy_destroy_members(struct drm_device *dev);
+void drm_legacy_dev_reinit(struct drm_device *dev);
+#else
+static inline void drm_legacy_init_members(struct drm_device *dev) {}
+static inline void drm_legacy_destroy_members(struct drm_device *dev) {}
+static inline void drm_legacy_dev_reinit(struct drm_device *dev) {}
+#endif
+
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
+void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master);
+#else
+static inline void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master) {}
+#endif
+
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
+void drm_master_legacy_init(struct drm_master *master);
+#else
+static inline void drm_master_legacy_init(struct drm_master *master) {}
+#endif
#endif /* __DRM_LEGACY_H__ */
diff --git a/drivers/gpu/drm/drm_legacy_misc.c b/drivers/gpu/drm/drm_legacy_misc.c
new file mode 100644
index 000000000000..2fe786839ca8
--- /dev/null
+++ b/drivers/gpu/drm/drm_legacy_misc.c
@@ -0,0 +1,82 @@
+/**
+ * \file drm_legacy_misc.c
+ * Misc legacy support functions.
+ *
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Gareth Hughes <gareth@valinux.com>
+ */
+
+/*
+ * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include "drm_internal.h"
+#include "drm_legacy.h"
+
+void drm_legacy_init_members(struct drm_device *dev)
+{
+ INIT_LIST_HEAD(&dev->ctxlist);
+ INIT_LIST_HEAD(&dev->vmalist);
+ INIT_LIST_HEAD(&dev->maplist);
+ spin_lock_init(&dev->buf_lock);
+ mutex_init(&dev->ctxlist_mutex);
+}
+
+void drm_legacy_destroy_members(struct drm_device *dev)
+{
+ mutex_destroy(&dev->ctxlist_mutex);
+}
+
+void drm_legacy_dev_reinit(struct drm_device *dev)
+{
+ if (dev->irq_enabled)
+ drm_irq_uninstall(dev);
+
+ mutex_lock(&dev->struct_mutex);
+
+ drm_legacy_agp_clear(dev);
+
+ drm_legacy_sg_cleanup(dev);
+ drm_legacy_vma_flush(dev);
+ drm_legacy_dma_takedown(dev);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ dev->sigdata.lock = NULL;
+
+ dev->context_flag = 0;
+ dev->last_context = 0;
+ dev->if_version = 0;
+
+ DRM_DEBUG("lastclose completed\n");
+}
+
+void drm_master_legacy_init(struct drm_master *master)
+{
+ spin_lock_init(&master->lock.spinlock);
+ init_waitqueue_head(&master->lock.lock_queue);
+}
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 67a1a2ca7174..b70058e77a28 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -347,3 +347,22 @@ void drm_legacy_lock_release(struct drm_device *dev, struct file *filp)
_DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
}
}
+
+void drm_legacy_lock_master_cleanup(struct drm_device *dev, struct drm_master *master)
+{
+ if (!drm_core_check_feature(dev, DRIVER_LEGACY))
+ return;
+
+ /*
+ * Since the master is disappearing, so is the
+ * possibility to lock.
+ */ mutex_lock(&dev->struct_mutex);
+ if (master->lock.hw_lock) {
+ if (dev->sigdata.lock == master->lock.hw_lock)
+ dev->sigdata.lock = NULL;
+ master->lock.hw_lock = NULL;
+ master->lock.file_priv = NULL;
+ wake_up_interruptible_all(&master->lock.lock_queue);
+ }
+ mutex_unlock(&dev->struct_mutex);
+}
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 40c4349cb939..132fef8ff1b6 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -35,6 +35,7 @@
#include <linux/highmem.h>
#include <linux/export.h>
+#include <xen/xen.h>
#include <drm/drmP.h>
#include "drm_legacy.h"
@@ -150,15 +151,34 @@ void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev)
}
EXPORT_SYMBOL(drm_legacy_ioremapfree);
-u64 drm_get_max_iomem(void)
+bool drm_need_swiotlb(int dma_bits)
{
struct resource *tmp;
resource_size_t max_iomem = 0;
+ /*
+ * Xen paravirtual hosts require swiotlb regardless of requested dma
+ * transfer size.
+ *
+ * NOTE: Really, what it requires is use of the dma_alloc_coherent
+ * allocator used in ttm_dma_populate() instead of
+ * ttm_populate_and_map_pages(), which bounce buffers so much in
+ * Xen it leads to swiotlb buffer exhaustion.
+ */
+ if (xen_pv_domain())
+ return true;
+
+ /*
+ * Enforce dma_alloc_coherent when memory encryption is active as well
+ * for the same reasons as for Xen paravirtual hosts.
+ */
+ if (mem_encrypt_active())
+ return true;
+
for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) {
max_iomem = max(max_iomem, tmp->end);
}
- return max_iomem;
+ return max_iomem > ((u64)1 << dma_bits);
}
-EXPORT_SYMBOL(drm_get_max_iomem);
+EXPORT_SYMBOL(drm_need_swiotlb);
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 4a1c2023ccf0..1a346ae1599d 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -297,8 +297,9 @@ static int drm_mode_create_standard_properties(struct drm_device *dev)
return -ENOMEM;
dev->mode_config.prop_crtc_id = prop;
- prop = drm_property_create(dev, DRM_MODE_PROP_BLOB, "FB_DAMAGE_CLIPS",
- 0);
+ prop = drm_property_create(dev,
+ DRM_MODE_PROP_ATOMIC | DRM_MODE_PROP_BLOB,
+ "FB_DAMAGE_CLIPS", 0);
if (!prop)
return -ENOMEM;
dev->mode_config.prop_fb_damage_clips = prop;
diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
index a9005c1c2384..f32507e65b79 100644
--- a/drivers/gpu/drm/drm_mode_object.c
+++ b/drivers/gpu/drm/drm_mode_object.c
@@ -451,6 +451,7 @@ static int set_property_legacy(struct drm_mode_object *obj,
}
static int set_property_atomic(struct drm_mode_object *obj,
+ struct drm_file *file_priv,
struct drm_property *prop,
uint64_t prop_value)
{
@@ -477,7 +478,7 @@ retry:
obj_to_connector(obj),
prop_value);
} else {
- ret = drm_atomic_set_property(state, obj, prop, prop_value);
+ ret = drm_atomic_set_property(state, file_priv, obj, prop, prop_value);
if (ret)
goto out;
ret = drm_atomic_commit(state);
@@ -520,7 +521,7 @@ int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
goto out_unref;
if (drm_drv_uses_atomic_modeset(property->dev))
- ret = set_property_atomic(arg_obj, property, arg->value);
+ ret = set_property_atomic(arg_obj, file_priv, property, arg->value);
else
ret = set_property_legacy(arg_obj, property, arg->value);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 869ac6f4671e..56f92a0bba62 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -655,22 +655,22 @@ EXPORT_SYMBOL_GPL(drm_display_mode_to_videomode);
* @bus_flags: information about pixelclk, sync and DE polarity will be stored
* here
*
- * Sets DRM_BUS_FLAG_DE_(LOW|HIGH), DRM_BUS_FLAG_PIXDATA_(POS|NEG)EDGE and
- * DISPLAY_FLAGS_SYNC_(POS|NEG)EDGE in @bus_flags according to DISPLAY_FLAGS
+ * Sets DRM_BUS_FLAG_DE_(LOW|HIGH), DRM_BUS_FLAG_PIXDATA_DRIVE_(POS|NEG)EDGE
+ * and DISPLAY_FLAGS_SYNC_(POS|NEG)EDGE in @bus_flags according to DISPLAY_FLAGS
* found in @vm
*/
void drm_bus_flags_from_videomode(const struct videomode *vm, u32 *bus_flags)
{
*bus_flags = 0;
if (vm->flags & DISPLAY_FLAGS_PIXDATA_POSEDGE)
- *bus_flags |= DRM_BUS_FLAG_PIXDATA_POSEDGE;
+ *bus_flags |= DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
if (vm->flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
- *bus_flags |= DRM_BUS_FLAG_PIXDATA_NEGEDGE;
+ *bus_flags |= DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
if (vm->flags & DISPLAY_FLAGS_SYNC_POSEDGE)
- *bus_flags |= DRM_BUS_FLAG_SYNC_POSEDGE;
+ *bus_flags |= DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE;
if (vm->flags & DISPLAY_FLAGS_SYNC_NEGEDGE)
- *bus_flags |= DRM_BUS_FLAG_SYNC_NEGEDGE;
+ *bus_flags |= DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE;
if (vm->flags & DISPLAY_FLAGS_DE_LOW)
*bus_flags |= DRM_BUS_FLAG_DE_LOW;
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 52e445bb1aa5..521aff99b08a 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -80,6 +80,12 @@ static const struct drm_dmi_panel_orientation_data lcd800x1280_rightside_up = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = {
+ .width = 1200,
+ .height = 1920,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
static const struct dmi_system_id orientation_data[] = {
{ /* Acer One 10 (S1003) */
.matches = {
@@ -148,6 +154,13 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* Lenovo Ideapad D330 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "81H3"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
+ },
+ .driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* VIOS LTH17 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 4cfb56893b7f..d6ad60ab0d38 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -960,6 +960,11 @@ retry:
if (ret)
goto out;
+ if (!drm_lease_held(file_priv, crtc->cursor->base.id)) {
+ ret = -EACCES;
+ goto out;
+ }
+
ret = drm_mode_cursor_universal(crtc, req, file_priv, &ctx);
goto out;
}
@@ -1062,6 +1067,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
plane = crtc->primary;
+ if (!drm_lease_held(file_priv, plane->base.id))
+ return -EACCES;
+
if (crtc->funcs->page_flip_target) {
u32 current_vblank;
int r;
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 231e3f6d5f41..dc079efb3b0f 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -504,6 +504,7 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
.size = obj->size,
.flags = flags,
.priv = obj,
+ .resv = obj->resv,
};
if (dev->driver->gem_prime_res_obj)
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index 0e7fc3e7dfb4..f5cb0aabfe35 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -253,3 +253,31 @@ void drm_err(const char *format, ...)
va_end(args);
}
EXPORT_SYMBOL(drm_err);
+
+/**
+ * drm_print_regset32 - print the contents of registers to a
+ * &drm_printer stream.
+ *
+ * @p: the &drm printer
+ * @regset: the list of registers to print.
+ *
+ * Often in driver debug, it's useful to be able to either capture the
+ * contents of registers in the steady state using debugfs or at
+ * specific points during operation. This lets the driver have a
+ * single list of registers for both.
+ */
+void drm_print_regset32(struct drm_printer *p, struct debugfs_regset32 *regset)
+{
+ int namelen = 0;
+ int i;
+
+ for (i = 0; i < regset->nregs; i++)
+ namelen = max(namelen, (int)strlen(regset->regs[i].name));
+
+ for (i = 0; i < regset->nregs; i++) {
+ drm_printf(p, "%*s = 0x%08x\n",
+ namelen, regset->regs[i].name,
+ readl(regset->base + regset->regs[i].offset));
+ }
+}
+EXPORT_SYMBOL(drm_print_regset32);
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 8bdb4a3bd7bf..3d400905100b 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -61,6 +61,7 @@ struct syncobj_wait_entry {
struct task_struct *task;
struct dma_fence *fence;
struct dma_fence_cb fence_cb;
+ u64 point;
};
static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
@@ -95,6 +96,8 @@ EXPORT_SYMBOL(drm_syncobj_find);
static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj,
struct syncobj_wait_entry *wait)
{
+ struct dma_fence *fence;
+
if (wait->fence)
return;
@@ -103,11 +106,15 @@ static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj,
* have the lock, try one more time just to be sure we don't add a
* callback when a fence has already been set.
*/
- if (syncobj->fence)
- wait->fence = dma_fence_get(
- rcu_dereference_protected(syncobj->fence, 1));
- else
+ fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
+ if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
+ dma_fence_put(fence);
list_add_tail(&wait->node, &syncobj->cb_list);
+ } else if (!fence) {
+ wait->fence = dma_fence_get_stub();
+ } else {
+ wait->fence = fence;
+ }
spin_unlock(&syncobj->lock);
}
@@ -123,6 +130,44 @@ static void drm_syncobj_remove_wait(struct drm_syncobj *syncobj,
}
/**
+ * drm_syncobj_add_point - add new timeline point to the syncobj
+ * @syncobj: sync object to add timeline point do
+ * @chain: chain node to use to add the point
+ * @fence: fence to encapsulate in the chain node
+ * @point: sequence number to use for the point
+ *
+ * Add the chain node as new timeline point to the syncobj.
+ */
+void drm_syncobj_add_point(struct drm_syncobj *syncobj,
+ struct dma_fence_chain *chain,
+ struct dma_fence *fence,
+ uint64_t point)
+{
+ struct syncobj_wait_entry *cur, *tmp;
+ struct dma_fence *prev;
+
+ dma_fence_get(fence);
+
+ spin_lock(&syncobj->lock);
+
+ prev = drm_syncobj_fence_get(syncobj);
+ /* You are adding an unorder point to timeline, which could cause payload returned from query_ioctl is 0! */
+ if (prev && prev->seqno >= point)
+ DRM_ERROR("You are adding an unorder point to timeline!\n");
+ dma_fence_chain_init(chain, prev, fence, point);
+ rcu_assign_pointer(syncobj->fence, &chain->base);
+
+ list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
+ syncobj_wait_syncobj_func(syncobj, cur);
+ spin_unlock(&syncobj->lock);
+
+ /* Walk the chain once to trigger garbage collection */
+ dma_fence_chain_for_each(fence, prev);
+ dma_fence_put(prev);
+}
+EXPORT_SYMBOL(drm_syncobj_add_point);
+
+/**
* drm_syncobj_replace_fence - replace fence in a sync object.
* @syncobj: Sync object to replace fence in
* @fence: fence to install in sync file.
@@ -145,10 +190,8 @@ void drm_syncobj_replace_fence(struct drm_syncobj *syncobj,
rcu_assign_pointer(syncobj->fence, fence);
if (fence != old_fence) {
- list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
- list_del_init(&cur->node);
+ list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
syncobj_wait_syncobj_func(syncobj, cur);
- }
}
spin_unlock(&syncobj->lock);
@@ -171,6 +214,8 @@ static void drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
dma_fence_put(fence);
}
+/* 5s default for wait submission */
+#define DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT 5000000000ULL
/**
* drm_syncobj_find_fence - lookup and reference the fence in a sync object
* @file_private: drm file private pointer
@@ -191,16 +236,58 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
struct dma_fence **fence)
{
struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
- int ret = 0;
+ struct syncobj_wait_entry wait;
+ u64 timeout = nsecs_to_jiffies64(DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT);
+ int ret;
if (!syncobj)
return -ENOENT;
*fence = drm_syncobj_fence_get(syncobj);
- if (!*fence) {
+ drm_syncobj_put(syncobj);
+
+ if (*fence) {
+ ret = dma_fence_chain_find_seqno(fence, point);
+ if (!ret)
+ return 0;
+ dma_fence_put(*fence);
+ } else {
ret = -EINVAL;
}
- drm_syncobj_put(syncobj);
+
+ if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
+ return ret;
+
+ memset(&wait, 0, sizeof(wait));
+ wait.task = current;
+ wait.point = point;
+ drm_syncobj_fence_add_wait(syncobj, &wait);
+
+ do {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (wait.fence) {
+ ret = 0;
+ break;
+ }
+ if (timeout == 0) {
+ ret = -ETIME;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ timeout = schedule_timeout(timeout);
+ } while (1);
+
+ __set_current_state(TASK_RUNNING);
+ *fence = wait.fence;
+
+ if (wait.node.next)
+ drm_syncobj_remove_wait(syncobj, &wait);
+
return ret;
}
EXPORT_SYMBOL(drm_syncobj_find_fence);
@@ -592,6 +679,80 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
&args->handle);
}
+static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private,
+ struct drm_syncobj_transfer *args)
+{
+ struct drm_syncobj *timeline_syncobj = NULL;
+ struct dma_fence *fence;
+ struct dma_fence_chain *chain;
+ int ret;
+
+ timeline_syncobj = drm_syncobj_find(file_private, args->dst_handle);
+ if (!timeline_syncobj) {
+ return -ENOENT;
+ }
+ ret = drm_syncobj_find_fence(file_private, args->src_handle,
+ args->src_point, args->flags,
+ &fence);
+ if (ret)
+ goto err;
+ chain = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
+ if (!chain) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+ drm_syncobj_add_point(timeline_syncobj, chain, fence, args->dst_point);
+err1:
+ dma_fence_put(fence);
+err:
+ drm_syncobj_put(timeline_syncobj);
+
+ return ret;
+}
+
+static int
+drm_syncobj_transfer_to_binary(struct drm_file *file_private,
+ struct drm_syncobj_transfer *args)
+{
+ struct drm_syncobj *binary_syncobj = NULL;
+ struct dma_fence *fence;
+ int ret;
+
+ binary_syncobj = drm_syncobj_find(file_private, args->dst_handle);
+ if (!binary_syncobj)
+ return -ENOENT;
+ ret = drm_syncobj_find_fence(file_private, args->src_handle,
+ args->src_point, args->flags, &fence);
+ if (ret)
+ goto err;
+ drm_syncobj_replace_fence(binary_syncobj, fence);
+ dma_fence_put(fence);
+err:
+ drm_syncobj_put(binary_syncobj);
+
+ return ret;
+}
+int
+drm_syncobj_transfer_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private)
+{
+ struct drm_syncobj_transfer *args = data;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
+ return -EOPNOTSUPP;
+
+ if (args->pad)
+ return -EINVAL;
+
+ if (args->dst_point)
+ ret = drm_syncobj_transfer_to_timeline(file_private, args);
+ else
+ ret = drm_syncobj_transfer_to_binary(file_private, args);
+
+ return ret;
+}
+
static void syncobj_wait_fence_func(struct dma_fence *fence,
struct dma_fence_cb *cb)
{
@@ -604,13 +765,27 @@ static void syncobj_wait_fence_func(struct dma_fence *fence,
static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
struct syncobj_wait_entry *wait)
{
+ struct dma_fence *fence;
+
/* This happens inside the syncobj lock */
- wait->fence = dma_fence_get(rcu_dereference_protected(syncobj->fence,
- lockdep_is_held(&syncobj->lock)));
+ fence = rcu_dereference_protected(syncobj->fence,
+ lockdep_is_held(&syncobj->lock));
+ dma_fence_get(fence);
+ if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
+ dma_fence_put(fence);
+ return;
+ } else if (!fence) {
+ wait->fence = dma_fence_get_stub();
+ } else {
+ wait->fence = fence;
+ }
+
wake_up_process(wait->task);
+ list_del_init(&wait->node);
}
static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
+ void __user *user_points,
uint32_t count,
uint32_t flags,
signed long timeout,
@@ -618,12 +793,27 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
{
struct syncobj_wait_entry *entries;
struct dma_fence *fence;
+ uint64_t *points;
uint32_t signaled_count, i;
- entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
- if (!entries)
+ points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
+ if (points == NULL)
return -ENOMEM;
+ if (!user_points) {
+ memset(points, 0, count * sizeof(uint64_t));
+
+ } else if (copy_from_user(points, user_points,
+ sizeof(uint64_t) * count)) {
+ timeout = -EFAULT;
+ goto err_free_points;
+ }
+
+ entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
+ if (!entries) {
+ timeout = -ENOMEM;
+ goto err_free_points;
+ }
/* Walk the list of sync objects and initialize entries. We do
* this up-front so that we can properly return -EINVAL if there is
* a syncobj with a missing fence and then never have the chance of
@@ -631,9 +821,13 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
*/
signaled_count = 0;
for (i = 0; i < count; ++i) {
+ struct dma_fence *fence;
+
entries[i].task = current;
- entries[i].fence = drm_syncobj_fence_get(syncobjs[i]);
- if (!entries[i].fence) {
+ entries[i].point = points[i];
+ fence = drm_syncobj_fence_get(syncobjs[i]);
+ if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) {
+ dma_fence_put(fence);
if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
continue;
} else {
@@ -642,7 +836,13 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
}
}
- if (dma_fence_is_signaled(entries[i].fence)) {
+ if (fence)
+ entries[i].fence = fence;
+ else
+ entries[i].fence = dma_fence_get_stub();
+
+ if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
+ dma_fence_is_signaled(entries[i].fence)) {
if (signaled_count == 0 && idx)
*idx = i;
signaled_count++;
@@ -675,7 +875,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
if (!fence)
continue;
- if (dma_fence_is_signaled(fence) ||
+ if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
+ dma_fence_is_signaled(fence) ||
(!entries[i].fence_cb.func &&
dma_fence_add_callback(fence,
&entries[i].fence_cb,
@@ -720,6 +921,9 @@ cleanup_entries:
}
kfree(entries);
+err_free_points:
+ kfree(points);
+
return timeout;
}
@@ -730,7 +934,7 @@ cleanup_entries:
*
* Calculate the timeout in jiffies from an absolute time in sec/nsec.
*/
-static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
+signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
{
ktime_t abs_timeout, now;
u64 timeout_ns, timeout_jiffies64;
@@ -754,23 +958,38 @@ static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
return timeout_jiffies64 + 1;
}
+EXPORT_SYMBOL(drm_timeout_abs_to_jiffies);
static int drm_syncobj_array_wait(struct drm_device *dev,
struct drm_file *file_private,
struct drm_syncobj_wait *wait,
- struct drm_syncobj **syncobjs)
+ struct drm_syncobj_timeline_wait *timeline_wait,
+ struct drm_syncobj **syncobjs, bool timeline)
{
- signed long timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
+ signed long timeout = 0;
uint32_t first = ~0;
- timeout = drm_syncobj_array_wait_timeout(syncobjs,
- wait->count_handles,
- wait->flags,
- timeout, &first);
- if (timeout < 0)
- return timeout;
-
- wait->first_signaled = first;
+ if (!timeline) {
+ timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
+ timeout = drm_syncobj_array_wait_timeout(syncobjs,
+ NULL,
+ wait->count_handles,
+ wait->flags,
+ timeout, &first);
+ if (timeout < 0)
+ return timeout;
+ wait->first_signaled = first;
+ } else {
+ timeout = drm_timeout_abs_to_jiffies(timeline_wait->timeout_nsec);
+ timeout = drm_syncobj_array_wait_timeout(syncobjs,
+ u64_to_user_ptr(timeline_wait->points),
+ timeline_wait->count_handles,
+ timeline_wait->flags,
+ timeout, &first);
+ if (timeout < 0)
+ return timeout;
+ timeline_wait->first_signaled = first;
+ }
return 0;
}
@@ -856,13 +1075,48 @@ drm_syncobj_wait_ioctl(struct drm_device *dev, void *data,
return ret;
ret = drm_syncobj_array_wait(dev, file_private,
- args, syncobjs);
+ args, NULL, syncobjs, false);
+
+ drm_syncobj_array_free(syncobjs, args->count_handles);
+
+ return ret;
+}
+
+int
+drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private)
+{
+ struct drm_syncobj_timeline_wait *args = data;
+ struct drm_syncobj **syncobjs;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
+ return -EOPNOTSUPP;
+
+ if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
+ return -EINVAL;
+
+ if (args->count_handles == 0)
+ return -EINVAL;
+
+ ret = drm_syncobj_array_find(file_private,
+ u64_to_user_ptr(args->handles),
+ args->count_handles,
+ &syncobjs);
+ if (ret < 0)
+ return ret;
+
+ ret = drm_syncobj_array_wait(dev, file_private,
+ NULL, args, syncobjs, true);
drm_syncobj_array_free(syncobjs, args->count_handles);
return ret;
}
+
int
drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_private)
@@ -928,3 +1182,138 @@ drm_syncobj_signal_ioctl(struct drm_device *dev, void *data,
return ret;
}
+
+int
+drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private)
+{
+ struct drm_syncobj_timeline_array *args = data;
+ struct drm_syncobj **syncobjs;
+ struct dma_fence_chain **chains;
+ uint64_t *points;
+ uint32_t i, j;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
+ return -EOPNOTSUPP;
+
+ if (args->pad != 0)
+ return -EINVAL;
+
+ if (args->count_handles == 0)
+ return -EINVAL;
+
+ ret = drm_syncobj_array_find(file_private,
+ u64_to_user_ptr(args->handles),
+ args->count_handles,
+ &syncobjs);
+ if (ret < 0)
+ return ret;
+
+ points = kmalloc_array(args->count_handles, sizeof(*points),
+ GFP_KERNEL);
+ if (!points) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ if (!u64_to_user_ptr(args->points)) {
+ memset(points, 0, args->count_handles * sizeof(uint64_t));
+ } else if (copy_from_user(points, u64_to_user_ptr(args->points),
+ sizeof(uint64_t) * args->count_handles)) {
+ ret = -EFAULT;
+ goto err_points;
+ }
+
+ chains = kmalloc_array(args->count_handles, sizeof(void *), GFP_KERNEL);
+ if (!chains) {
+ ret = -ENOMEM;
+ goto err_points;
+ }
+ for (i = 0; i < args->count_handles; i++) {
+ chains[i] = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
+ if (!chains[i]) {
+ for (j = 0; j < i; j++)
+ kfree(chains[j]);
+ ret = -ENOMEM;
+ goto err_chains;
+ }
+ }
+
+ for (i = 0; i < args->count_handles; i++) {
+ struct dma_fence *fence = dma_fence_get_stub();
+
+ drm_syncobj_add_point(syncobjs[i], chains[i],
+ fence, points[i]);
+ dma_fence_put(fence);
+ }
+err_chains:
+ kfree(chains);
+err_points:
+ kfree(points);
+out:
+ drm_syncobj_array_free(syncobjs, args->count_handles);
+
+ return ret;
+}
+
+int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_private)
+{
+ struct drm_syncobj_timeline_array *args = data;
+ struct drm_syncobj **syncobjs;
+ uint64_t __user *points = u64_to_user_ptr(args->points);
+ uint32_t i;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
+ return -EOPNOTSUPP;
+
+ if (args->pad != 0)
+ return -EINVAL;
+
+ if (args->count_handles == 0)
+ return -EINVAL;
+
+ ret = drm_syncobj_array_find(file_private,
+ u64_to_user_ptr(args->handles),
+ args->count_handles,
+ &syncobjs);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < args->count_handles; i++) {
+ struct dma_fence_chain *chain;
+ struct dma_fence *fence;
+ uint64_t point;
+
+ fence = drm_syncobj_fence_get(syncobjs[i]);
+ chain = to_dma_fence_chain(fence);
+ if (chain) {
+ struct dma_fence *iter, *last_signaled = NULL;
+
+ dma_fence_chain_for_each(iter, fence) {
+ if (!iter)
+ break;
+ dma_fence_put(last_signaled);
+ last_signaled = dma_fence_get(iter);
+ if (!to_dma_fence_chain(last_signaled)->prev_seqno)
+ /* It is most likely that timeline has
+ * unorder points. */
+ break;
+ }
+ point = dma_fence_is_signaled(last_signaled) ?
+ last_signaled->seqno :
+ to_dma_fence_chain(last_signaled)->prev_seqno;
+ dma_fence_put(last_signaled);
+ } else {
+ point = 0;
+ }
+ ret = copy_to_user(&points[i], &point, sizeof(uint64_t));
+ ret = ret ? -EFAULT : 0;
+ if (ret)
+ break;
+ }
+ drm_syncobj_array_free(syncobjs, args->count_handles);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index c3301046dfaa..10cf83d569e1 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -584,8 +584,8 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
vma->vm_ops = &drm_vm_ops;
break;
}
- /* fall through to _DRM_FRAME_BUFFER... */
#endif
+ /* fall through - to _DRM_FRAME_BUFFER... */
case _DRM_FRAME_BUFFER:
case _DRM_REGISTERS:
offset = drm_core_get_reg_ofs(dev);
@@ -610,7 +610,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
vma->vm_page_prot = drm_dma_prot(map->type, vma);
- /* fall through to _DRM_SHM */
+ /* fall through - to _DRM_SHM */
case _DRM_SHM:
vma->vm_ops = &drm_vm_shm_ops;
vma->vm_private_data = (void *)map;
@@ -646,6 +646,7 @@ int drm_legacy_mmap(struct file *filp, struct vm_area_struct *vma)
}
EXPORT_SYMBOL(drm_legacy_mmap);
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
void drm_legacy_vma_flush(struct drm_device *dev)
{
struct drm_vma_entry *vma, *vma_temp;
@@ -656,3 +657,4 @@ void drm_legacy_vma_flush(struct drm_device *dev)
kfree(vma);
}
}
+#endif
diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c
index c20e6fe00cb3..79ac014701c8 100644
--- a/drivers/gpu/drm/drm_writeback.c
+++ b/drivers/gpu/drm/drm_writeback.c
@@ -239,14 +239,52 @@ fail:
}
EXPORT_SYMBOL(drm_writeback_connector_init);
+int drm_writeback_set_fb(struct drm_connector_state *conn_state,
+ struct drm_framebuffer *fb)
+{
+ WARN_ON(conn_state->connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
+
+ if (!conn_state->writeback_job) {
+ conn_state->writeback_job =
+ kzalloc(sizeof(*conn_state->writeback_job), GFP_KERNEL);
+ if (!conn_state->writeback_job)
+ return -ENOMEM;
+
+ conn_state->writeback_job->connector =
+ drm_connector_to_writeback(conn_state->connector);
+ }
+
+ drm_framebuffer_assign(&conn_state->writeback_job->fb, fb);
+ return 0;
+}
+
+int drm_writeback_prepare_job(struct drm_writeback_job *job)
+{
+ struct drm_writeback_connector *connector = job->connector;
+ const struct drm_connector_helper_funcs *funcs =
+ connector->base.helper_private;
+ int ret;
+
+ if (funcs->prepare_writeback_job) {
+ ret = funcs->prepare_writeback_job(connector, job);
+ if (ret < 0)
+ return ret;
+ }
+
+ job->prepared = true;
+ return 0;
+}
+EXPORT_SYMBOL(drm_writeback_prepare_job);
+
/**
* drm_writeback_queue_job - Queue a writeback job for later signalling
* @wb_connector: The writeback connector to queue a job on
- * @job: The job to queue
+ * @conn_state: The connector state containing the job to queue
*
- * This function adds a job to the job_queue for a writeback connector. It
- * should be considered to take ownership of the writeback job, and so any other
- * references to the job must be cleared after calling this function.
+ * This function adds the job contained in @conn_state to the job_queue for a
+ * writeback connector. It takes ownership of the writeback job and sets the
+ * @conn_state->writeback_job to NULL, and so no access to the job may be
+ * performed by the caller after this function returns.
*
* Drivers must ensure that for a given writeback connector, jobs are queued in
* exactly the same order as they will be completed by the hardware (and
@@ -258,16 +296,36 @@ EXPORT_SYMBOL(drm_writeback_connector_init);
* See also: drm_writeback_signal_completion()
*/
void drm_writeback_queue_job(struct drm_writeback_connector *wb_connector,
- struct drm_writeback_job *job)
+ struct drm_connector_state *conn_state)
{
+ struct drm_writeback_job *job;
unsigned long flags;
+ job = conn_state->writeback_job;
+ conn_state->writeback_job = NULL;
+
spin_lock_irqsave(&wb_connector->job_lock, flags);
list_add_tail(&job->list_entry, &wb_connector->job_queue);
spin_unlock_irqrestore(&wb_connector->job_lock, flags);
}
EXPORT_SYMBOL(drm_writeback_queue_job);
+void drm_writeback_cleanup_job(struct drm_writeback_job *job)
+{
+ struct drm_writeback_connector *connector = job->connector;
+ const struct drm_connector_helper_funcs *funcs =
+ connector->base.helper_private;
+
+ if (job->prepared && funcs->cleanup_writeback_job)
+ funcs->cleanup_writeback_job(connector, job);
+
+ if (job->fb)
+ drm_framebuffer_put(job->fb);
+
+ kfree(job);
+}
+EXPORT_SYMBOL(drm_writeback_cleanup_job);
+
/*
* @cleanup_work: deferred cleanup of a writeback job
*
@@ -280,10 +338,9 @@ static void cleanup_work(struct work_struct *work)
struct drm_writeback_job *job = container_of(work,
struct drm_writeback_job,
cleanup_work);
- drm_framebuffer_put(job->fb);
- kfree(job);
-}
+ drm_writeback_cleanup_job(job);
+}
/**
* drm_writeback_signal_completion - Signal the completion of a writeback job
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 18c27f795cf6..7eb7cf9c3fa8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -473,7 +473,6 @@ static struct drm_driver etnaviv_drm_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
- .gem_prime_res_obj = etnaviv_gem_prime_res_obj,
.gem_prime_pin = etnaviv_gem_prime_pin,
.gem_prime_unpin = etnaviv_gem_prime_unpin,
.gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table,
@@ -515,6 +514,9 @@ static int etnaviv_bind(struct device *dev)
}
drm->dev_private = priv;
+ dev->dma_parms = &priv->dma_parms;
+ dma_set_max_seg_size(dev, SZ_2G);
+
mutex_init(&priv->gem_lock);
INIT_LIST_HEAD(&priv->gem_list);
priv->num_gpus = 0;
@@ -552,6 +554,8 @@ static void etnaviv_unbind(struct device *dev)
component_unbind_all(dev, drm);
+ dev->dma_parms = NULL;
+
drm->dev_private = NULL;
kfree(priv);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index a6a7ded37ef1..8798423705e1 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -42,6 +42,7 @@ struct etnaviv_file_private {
struct etnaviv_drm_private {
int num_gpus;
+ struct device_dma_parameters dma_parms;
struct etnaviv_gpu *gpu[ETNA_MAX_PIPES];
/* list of GEM objects: */
@@ -60,7 +61,6 @@ void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);
void etnaviv_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int etnaviv_gem_prime_mmap(struct drm_gem_object *obj,
struct vm_area_struct *vma);
-struct reservation_object *etnaviv_gem_prime_res_obj(struct drm_gem_object *obj);
struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
int etnaviv_gem_prime_pin(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 5c48915f492d..e8778ebb72e6 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -397,13 +397,13 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
}
if (op & ETNA_PREP_NOSYNC) {
- if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
+ if (!reservation_object_test_signaled_rcu(obj->resv,
write))
return -EBUSY;
} else {
unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
- ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
+ ret = reservation_object_wait_timeout_rcu(obj->resv,
write, true, remain);
if (ret <= 0)
return ret == 0 ? -ETIMEDOUT : ret;
@@ -459,7 +459,7 @@ static void etnaviv_gem_describe_fence(struct dma_fence *fence,
static void etnaviv_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
- struct reservation_object *robj = etnaviv_obj->resv;
+ struct reservation_object *robj = obj->resv;
struct reservation_object_list *fobj;
struct dma_fence *fence;
unsigned long off = drm_vma_node_start(&obj->vma_node);
@@ -549,8 +549,6 @@ void etnaviv_gem_free_object(struct drm_gem_object *obj)
drm_gem_free_mmap_offset(obj);
etnaviv_obj->ops->release(etnaviv_obj);
- if (etnaviv_obj->resv == &etnaviv_obj->_resv)
- reservation_object_fini(&etnaviv_obj->_resv);
drm_gem_object_release(obj);
kfree(etnaviv_obj);
@@ -596,12 +594,8 @@ static int etnaviv_gem_new_impl(struct drm_device *dev, u32 size, u32 flags,
etnaviv_obj->flags = flags;
etnaviv_obj->ops = ops;
- if (robj) {
- etnaviv_obj->resv = robj;
- } else {
- etnaviv_obj->resv = &etnaviv_obj->_resv;
- reservation_object_init(&etnaviv_obj->_resv);
- }
+ if (robj)
+ etnaviv_obj->base.resv = robj;
mutex_init(&etnaviv_obj->lock);
INIT_LIST_HEAD(&etnaviv_obj->vram_list);
@@ -628,24 +622,18 @@ int etnaviv_gem_new_handle(struct drm_device *dev, struct drm_file *file,
lockdep_set_class(&to_etnaviv_bo(obj)->lock, &etnaviv_shm_lock_class);
ret = drm_gem_object_init(dev, obj, size);
- if (ret == 0) {
- struct address_space *mapping;
-
- /*
- * Our buffers are kept pinned, so allocating them
- * from the MOVABLE zone is a really bad idea, and
- * conflicts with CMA. See comments above new_inode()
- * why this is required _and_ expected if you're
- * going to pin these pages.
- */
- mapping = obj->filp->f_mapping;
- mapping_set_gfp_mask(mapping, GFP_HIGHUSER |
- __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
- }
-
if (ret)
goto fail;
+ /*
+ * Our buffers are kept pinned, so allocating them from the MOVABLE
+ * zone is a really bad idea, and conflicts with CMA. See comments
+ * above new_inode() why this is required _and_ expected if you're
+ * going to pin these pages.
+ */
+ mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER |
+ __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+
etnaviv_gem_obj_add(dev, obj);
ret = drm_gem_handle_create(file, obj, handle);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index f0abb744ef95..753c458497d0 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -47,10 +47,6 @@ struct etnaviv_gem_object {
struct sg_table *sgt;
void *vaddr;
- /* normally (resv == &_resv) except for imported bo's */
- struct reservation_object *resv;
- struct reservation_object _resv;
-
struct list_head vram_list;
/* cache maintenance */
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index f21529e635e3..00e8b6a817e3 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -139,10 +139,3 @@ fail:
return ERR_PTR(ret);
}
-
-struct reservation_object *etnaviv_gem_prime_res_obj(struct drm_gem_object *obj)
-{
- struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
-
- return etnaviv_obj->resv;
-}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index b2fe3446bfbc..e054f09ac828 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -108,9 +108,9 @@ out_unlock:
static void submit_unlock_object(struct etnaviv_gem_submit *submit, int i)
{
if (submit->bos[i].flags & BO_LOCKED) {
- struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+ struct drm_gem_object *obj = &submit->bos[i].obj->base;
- ww_mutex_unlock(&etnaviv_obj->resv->lock);
+ ww_mutex_unlock(&obj->resv->lock);
submit->bos[i].flags &= ~BO_LOCKED;
}
}
@@ -122,7 +122,7 @@ static int submit_lock_objects(struct etnaviv_gem_submit *submit,
retry:
for (i = 0; i < submit->nr_bos; i++) {
- struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+ struct drm_gem_object *obj = &submit->bos[i].obj->base;
if (slow_locked == i)
slow_locked = -1;
@@ -130,7 +130,7 @@ retry:
contended = i;
if (!(submit->bos[i].flags & BO_LOCKED)) {
- ret = ww_mutex_lock_interruptible(&etnaviv_obj->resv->lock,
+ ret = ww_mutex_lock_interruptible(&obj->resv->lock,
ticket);
if (ret == -EALREADY)
DRM_ERROR("BO at index %u already on submit list\n",
@@ -153,12 +153,12 @@ fail:
submit_unlock_object(submit, slow_locked);
if (ret == -EDEADLK) {
- struct etnaviv_gem_object *etnaviv_obj;
+ struct drm_gem_object *obj;
- etnaviv_obj = submit->bos[contended].obj;
+ obj = &submit->bos[contended].obj->base;
/* we lost out in a seqno race, lock and retry.. */
- ret = ww_mutex_lock_slow_interruptible(&etnaviv_obj->resv->lock,
+ ret = ww_mutex_lock_slow_interruptible(&obj->resv->lock,
ticket);
if (!ret) {
submit->bos[contended].flags |= BO_LOCKED;
@@ -176,7 +176,7 @@ static int submit_fence_sync(struct etnaviv_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) {
struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
- struct reservation_object *robj = bo->obj->resv;
+ struct reservation_object *robj = bo->obj->base.resv;
if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) {
ret = reservation_object_reserve_shared(robj, 1);
@@ -207,13 +207,13 @@ static void submit_attach_object_fences(struct etnaviv_gem_submit *submit)
int i;
for (i = 0; i < submit->nr_bos; i++) {
- struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
+ struct drm_gem_object *obj = &submit->bos[i].obj->base;
if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
- reservation_object_add_excl_fence(etnaviv_obj->resv,
+ reservation_object_add_excl_fence(obj->resv,
submit->out_fence);
else
- reservation_object_add_shared_fence(etnaviv_obj->resv,
+ reservation_object_add_shared_fence(obj->resv,
submit->out_fence);
submit_unlock_object(submit, i);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 6904535475de..72d01e873160 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -365,6 +365,7 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
dev_info(gpu->dev, "model: GC%x, revision: %x\n",
gpu->identity.model, gpu->identity.revision);
+ gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
/*
* If there is a match in the HWDB, we aren't interested in the
* remaining register values, as they might be wrong.
@@ -412,7 +413,7 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
}
/* GC600 idle register reports zero bits where modules aren't present */
- if (gpu->identity.model == chipModel_GC600) {
+ if (gpu->identity.model == chipModel_GC600)
gpu->idle_mask = VIVS_HI_IDLE_STATE_TX |
VIVS_HI_IDLE_STATE_RA |
VIVS_HI_IDLE_STATE_SE |
@@ -421,9 +422,6 @@ static void etnaviv_hw_identify(struct etnaviv_gpu *gpu)
VIVS_HI_IDLE_STATE_PE |
VIVS_HI_IDLE_STATE_DE |
VIVS_HI_IDLE_STATE_FE;
- } else {
- gpu->idle_mask = ~VIVS_HI_IDLE_STATE_AXI_LP;
- }
etnaviv_hw_specs(gpu);
}
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 5b4e0e8b23bc..73b318a7ef49 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -188,7 +188,7 @@ static void decon_setup_trigger(struct decon_context *ctx)
if (regmap_update_bits(ctx->sysreg, DSD_CFG_MUX,
DSD_CFG_MUX_TE_UNMASK_GLOBAL, ~0))
- DRM_ERROR("Cannot update sysreg.\n");
+ DRM_DEV_ERROR(ctx->dev, "Cannot update sysreg.\n");
}
static void decon_commit(struct exynos_drm_crtc *crtc)
@@ -356,7 +356,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
break;
}
- DRM_DEBUG_KMS("cpp = %u\n", fb->format->cpp[0]);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "cpp = %u\n", fb->format->cpp[0]);
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
@@ -561,8 +561,6 @@ static void decon_clear_channels(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
int win, i, ret;
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++) {
ret = clk_prepare_enable(ctx->clks[i]);
if (ret < 0)
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 381aa3d60e37..0217ee9a118d 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -99,7 +99,7 @@ static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc)
if (!wait_event_timeout(ctx->wait_vsync_queue,
!atomic_read(&ctx->wait_vsync_event),
HZ/20))
- DRM_DEBUG_KMS("vblank wait timed out.\n");
+ DRM_DEV_DEBUG_KMS(ctx->dev, "vblank wait timed out.\n");
}
static void decon_clear_channels(struct exynos_drm_crtc *crtc)
@@ -107,8 +107,6 @@ static void decon_clear_channels(struct exynos_drm_crtc *crtc)
struct decon_context *ctx = crtc->ctx;
unsigned int win, ch_enabled = 0;
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
/* Check if any channel is enabled. */
for (win = 0; win < WINDOWS_NR; win++) {
u32 val = readl(ctx->regs + WINCON(win));
@@ -315,7 +313,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
break;
}
- DRM_DEBUG_KMS("cpp = %d\n", fb->format->cpp[0]);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "cpp = %d\n", fb->format->cpp[0]);
/*
* In case of exynos, setting dma-burst to 16Word causes permanent
@@ -422,9 +420,9 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
writel(state->src.x, ctx->regs + VIDW_OFFSET_X(win));
writel(state->src.y, ctx->regs + VIDW_OFFSET_Y(win));
- DRM_DEBUG_KMS("start addr = 0x%lx\n",
+ DRM_DEV_DEBUG_KMS(ctx->dev, "start addr = 0x%lx\n",
(unsigned long)val);
- DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
+ DRM_DEV_DEBUG_KMS(ctx->dev, "ovl_width = %d, ovl_height = %d\n",
state->crtc.w, state->crtc.h);
val = VIDOSDxA_TOPLEFT_X(state->crtc.x) |
@@ -442,7 +440,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
writel(val, ctx->regs + VIDOSD_B(win));
- DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
+ DRM_DEV_DEBUG_KMS(ctx->dev, "osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
state->crtc.x, state->crtc.y, last_x, last_y);
/* OSD alpha */
@@ -622,7 +620,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
ret = decon_ctx_initialize(ctx, drm_dev);
if (ret) {
- DRM_ERROR("decon_ctx_initialize failed.\n");
+ DRM_DEV_ERROR(dev, "decon_ctx_initialize failed.\n");
return ret;
}
@@ -802,25 +800,29 @@ static int exynos7_decon_resume(struct device *dev)
ret = clk_prepare_enable(ctx->pclk);
if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the pclk [%d]\n", ret);
+ DRM_DEV_ERROR(dev, "Failed to prepare_enable the pclk [%d]\n",
+ ret);
return ret;
}
ret = clk_prepare_enable(ctx->aclk);
if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the aclk [%d]\n", ret);
+ DRM_DEV_ERROR(dev, "Failed to prepare_enable the aclk [%d]\n",
+ ret);
return ret;
}
ret = clk_prepare_enable(ctx->eclk);
if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the eclk [%d]\n", ret);
+ DRM_DEV_ERROR(dev, "Failed to prepare_enable the eclk [%d]\n",
+ ret);
return ret;
}
ret = clk_prepare_enable(ctx->vclk);
if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the vclk [%d]\n", ret);
+ DRM_DEV_ERROR(dev, "Failed to prepare_enable the vclk [%d]\n",
+ ret);
return ret;
}
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 471242a5e580..b0288cf85701 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -83,7 +83,8 @@ static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data,
mode = drm_mode_create(connector->dev);
if (!mode) {
- DRM_ERROR("failed to create a new display mode.\n");
+ DRM_DEV_ERROR(dp->dev,
+ "failed to create a new display mode.\n");
return num_modes;
}
@@ -111,7 +112,8 @@ static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data,
if (dp->ptn_bridge) {
ret = drm_bridge_attach(&dp->encoder, dp->ptn_bridge, bridge);
if (ret) {
- DRM_ERROR("Failed to attach bridge to drm\n");
+ DRM_DEV_ERROR(dp->dev,
+ "Failed to attach bridge to drm\n");
bridge->next = NULL;
return ret;
}
@@ -147,7 +149,8 @@ static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
ret = of_get_videomode(dp->dev->of_node, &dp->vm, OF_USE_NATIVE_MODE);
if (ret) {
- DRM_ERROR("failed: of_get_videomode() : %d\n", ret);
+ DRM_DEV_ERROR(dp->dev,
+ "failed: of_get_videomode() : %d\n", ret);
return ret;
}
return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
index 3432c5ee9f0c..bef8bc3c8e00 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dma.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
@@ -62,7 +62,7 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
int ret;
if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
- DRM_ERROR("Device %s lacks support for IOMMU\n",
+ DRM_DEV_ERROR(subdrv_dev, "Device %s lacks support for IOMMU\n",
dev_name(subdrv_dev));
return -EINVAL;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index ae425c9a3f7b..6ea92173db9f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -77,7 +77,8 @@ static int exynos_dpi_get_modes(struct drm_connector *connector)
mode = drm_mode_create(connector->dev);
if (!mode) {
- DRM_ERROR("failed to create a new display mode\n");
+ DRM_DEV_ERROR(ctx->dev,
+ "failed to create a new display mode\n");
return 0;
}
drm_display_mode_from_videomode(ctx->vm, mode);
@@ -108,7 +109,8 @@ static int exynos_dpi_create_connector(struct drm_encoder *encoder)
&exynos_dpi_connector_funcs,
DRM_MODE_CONNECTOR_VGA);
if (ret) {
- DRM_ERROR("failed to initialize connector with drm\n");
+ DRM_DEV_ERROR(ctx->dev,
+ "failed to initialize connector with drm\n");
return ret;
}
@@ -213,7 +215,8 @@ int exynos_dpi_bind(struct drm_device *dev, struct drm_encoder *encoder)
ret = exynos_dpi_create_connector(encoder);
if (ret) {
- DRM_ERROR("failed to create connector ret = %d\n", ret);
+ DRM_DEV_ERROR(encoder_to_dpi(encoder)->dev,
+ "failed to create connector ret = %d\n", ret);
drm_encoder_cleanup(encoder);
return ret;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index a4253dd55f86..63a4b5074a99 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1483,7 +1483,8 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
ret = drm_connector_init(drm, connector, &exynos_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
if (ret) {
- DRM_ERROR("Failed to initialize connector with drm\n");
+ DRM_DEV_ERROR(dsi->dev,
+ "Failed to initialize connector with drm\n");
return ret;
}
@@ -1527,7 +1528,9 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
int ret = exynos_dsi_create_connector(encoder);
if (ret) {
- DRM_ERROR("failed to create connector ret = %d\n", ret);
+ DRM_DEV_ERROR(dsi->dev,
+ "failed to create connector ret = %d\n",
+ ret);
drm_encoder_cleanup(encoder);
return ret;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 1f11ab0f8e9d..832d22f57b4b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -45,7 +45,8 @@ static int check_fb_gem_memory_type(struct drm_device *drm_dev,
* supported without IOMMU.
*/
if (IS_NONCONTIG_BUFFER(flags)) {
- DRM_ERROR("Non-contiguous GEM memory is not supported.\n");
+ DRM_DEV_ERROR(drm_dev->dev,
+ "Non-contiguous GEM memory is not supported.\n");
return -EINVAL;
}
@@ -83,7 +84,8 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs);
if (ret < 0) {
- DRM_ERROR("failed to initialize framebuffer\n");
+ DRM_DEV_ERROR(dev->dev,
+ "failed to initialize framebuffer\n");
goto err;
}
@@ -113,7 +115,8 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
exynos_gem[i] = exynos_drm_gem_get(file_priv,
mode_cmd->handles[i]);
if (!exynos_gem[i]) {
- DRM_ERROR("failed to lookup gem object\n");
+ DRM_DEV_ERROR(dev->dev,
+ "failed to lookup gem object\n");
ret = -ENOENT;
goto err;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index c30dd88cdb25..724cb52a374a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -55,7 +55,7 @@ static int exynos_drm_fb_mmap(struct fb_info *info,
exynos_gem->dma_addr, exynos_gem->size,
exynos_gem->dma_attrs);
if (ret < 0) {
- DRM_ERROR("failed to mmap.\n");
+ DRM_DEV_ERROR(to_dma_dev(helper->dev), "failed to mmap.\n");
return ret;
}
@@ -83,22 +83,22 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
fbi = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(fbi)) {
- DRM_ERROR("failed to allocate fb info.\n");
+ DRM_DEV_ERROR(to_dma_dev(helper->dev),
+ "failed to allocate fb info.\n");
return PTR_ERR(fbi);
}
- fbi->par = helper;
fbi->fbops = &exynos_drm_fb_ops;
- drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(fbi, helper, sizes);
nr_pages = exynos_gem->size >> PAGE_SHIFT;
exynos_gem->kvaddr = (void __iomem *) vmap(exynos_gem->pages, nr_pages,
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
if (!exynos_gem->kvaddr) {
- DRM_ERROR("failed to map pages to kernel space.\n");
+ DRM_DEV_ERROR(to_dma_dev(helper->dev),
+ "failed to map pages to kernel space.\n");
return -EIO;
}
@@ -122,9 +122,10 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
unsigned long size;
int ret;
- DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n",
- sizes->surface_width, sizes->surface_height,
- sizes->surface_bpp);
+ DRM_DEV_DEBUG_KMS(dev->dev,
+ "surface width(%d), height(%d) and bpp(%d\n",
+ sizes->surface_width, sizes->surface_height,
+ sizes->surface_bpp);
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
@@ -154,7 +155,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
helper->fb =
exynos_drm_framebuffer_init(dev, &mode_cmd, &exynos_gem, 1);
if (IS_ERR(helper->fb)) {
- DRM_ERROR("failed to create drm framebuffer.\n");
+ DRM_DEV_ERROR(dev->dev, "failed to create drm framebuffer.\n");
ret = PTR_ERR(helper->fb);
goto err_destroy_gem;
}
@@ -203,20 +204,23 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
ret = drm_fb_helper_init(dev, helper, MAX_CONNECTOR);
if (ret < 0) {
- DRM_ERROR("failed to initialize drm fb helper.\n");
+ DRM_DEV_ERROR(dev->dev,
+ "failed to initialize drm fb helper.\n");
goto err_init;
}
ret = drm_fb_helper_single_add_all_connectors(helper);
if (ret < 0) {
- DRM_ERROR("failed to register drm_fb_helper_connector.\n");
+ DRM_DEV_ERROR(dev->dev,
+ "failed to register drm_fb_helper_connector.\n");
goto err_setup;
}
ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
if (ret < 0) {
- DRM_ERROR("failed to set up hw configuration.\n");
+ DRM_DEV_ERROR(dev->dev,
+ "failed to set up hw configuration.\n");
goto err_setup;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 90dfea0aec4d..c50b0f9270a4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -186,7 +186,7 @@ static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable)
{
u32 cfg;
- DRM_DEBUG_KMS("enable[%d]\n", enable);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "enable[%d]\n", enable);
cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
if (enable)
@@ -201,7 +201,7 @@ static void fimc_mask_irq(struct fimc_context *ctx, bool enable)
{
u32 cfg;
- DRM_DEBUG_KMS("enable[%d]\n", enable);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "enable[%d]\n", enable);
cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
if (enable) {
@@ -225,15 +225,16 @@ static bool fimc_check_ovf(struct fimc_context *ctx)
flag = EXYNOS_CISTATUS_OVFIY | EXYNOS_CISTATUS_OVFICB |
EXYNOS_CISTATUS_OVFICR;
- DRM_DEBUG_KMS("flag[0x%x]\n", flag);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "flag[0x%x]\n", flag);
if (status & flag) {
fimc_set_bits(ctx, EXYNOS_CIWDOFST,
EXYNOS_CIWDOFST_CLROVFIY | EXYNOS_CIWDOFST_CLROVFICB |
EXYNOS_CIWDOFST_CLROVFICR);
- dev_err(ctx->dev, "occurred overflow at %d, status 0x%x.\n",
- ctx->id, status);
+ DRM_DEV_ERROR(ctx->dev,
+ "occurred overflow at %d, status 0x%x.\n",
+ ctx->id, status);
return true;
}
@@ -246,7 +247,7 @@ static bool fimc_check_frame_end(struct fimc_context *ctx)
cfg = fimc_read(ctx, EXYNOS_CISTATUS);
- DRM_DEBUG_KMS("cfg[0x%x]\n", cfg);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "cfg[0x%x]\n", cfg);
if (!(cfg & EXYNOS_CISTATUS_FRAMEEND))
return false;
@@ -268,17 +269,17 @@ static int fimc_get_buf_id(struct fimc_context *ctx)
if (frame_cnt == 0)
frame_cnt = EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg);
- DRM_DEBUG_KMS("present[%d]before[%d]\n",
- EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg),
- EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg));
+ DRM_DEV_DEBUG_KMS(ctx->dev, "present[%d]before[%d]\n",
+ EXYNOS_CISTATUS2_GET_FRAMECOUNT_PRESENT(cfg),
+ EXYNOS_CISTATUS2_GET_FRAMECOUNT_BEFORE(cfg));
if (frame_cnt == 0) {
- DRM_ERROR("failed to get frame count.\n");
+ DRM_DEV_ERROR(ctx->dev, "failed to get frame count.\n");
return -EIO;
}
buf_id = frame_cnt - 1;
- DRM_DEBUG_KMS("buf_id[%d]\n", buf_id);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "buf_id[%d]\n", buf_id);
return buf_id;
}
@@ -287,7 +288,7 @@ static void fimc_handle_lastend(struct fimc_context *ctx, bool enable)
{
u32 cfg;
- DRM_DEBUG_KMS("enable[%d]\n", enable);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "enable[%d]\n", enable);
cfg = fimc_read(ctx, EXYNOS_CIOCTRL);
if (enable)
@@ -302,7 +303,7 @@ static void fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt)
{
u32 cfg;
- DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "fmt[0x%x]\n", fmt);
/* RGB */
cfg = fimc_read(ctx, EXYNOS_CISCCTRL);
@@ -367,7 +368,7 @@ static void fimc_src_set_fmt(struct fimc_context *ctx, u32 fmt, bool tiled)
{
u32 cfg;
- DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "fmt[0x%x]\n", fmt);
cfg = fimc_read(ctx, EXYNOS_MSCTRL);
cfg &= ~EXYNOS_MSCTRL_INFORMAT_RGB;
@@ -420,7 +421,7 @@ static void fimc_src_set_transf(struct fimc_context *ctx, unsigned int rotation)
unsigned int degree = rotation & DRM_MODE_ROTATE_MASK;
u32 cfg1, cfg2;
- DRM_DEBUG_KMS("rotation[%x]\n", rotation);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "rotation[%x]\n", rotation);
cfg1 = fimc_read(ctx, EXYNOS_MSCTRL);
cfg1 &= ~(EXYNOS_MSCTRL_FLIP_X_MIRROR |
@@ -478,10 +479,11 @@ static void fimc_set_window(struct fimc_context *ctx,
v1 = buf->rect.y;
v2 = buf->buf.height - buf->rect.h - buf->rect.y;
- DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
- buf->rect.x, buf->rect.y, buf->rect.w, buf->rect.h,
- real_width, buf->buf.height);
- DRM_DEBUG_KMS("h1[%d]h2[%d]v1[%d]v2[%d]\n", h1, h2, v1, v2);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
+ buf->rect.x, buf->rect.y, buf->rect.w, buf->rect.h,
+ real_width, buf->buf.height);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "h1[%d]h2[%d]v1[%d]v2[%d]\n", h1, h2, v1,
+ v2);
/*
* set window offset 1, 2 size
@@ -506,7 +508,8 @@ static void fimc_src_set_size(struct fimc_context *ctx,
unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
u32 cfg;
- DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", real_width, buf->buf.height);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "hsize[%d]vsize[%d]\n", real_width,
+ buf->buf.height);
/* original size */
cfg = (EXYNOS_ORGISIZE_HORIZONTAL(real_width) |
@@ -514,8 +517,8 @@ static void fimc_src_set_size(struct fimc_context *ctx,
fimc_write(ctx, cfg, EXYNOS_ORGISIZE);
- DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", buf->rect.x, buf->rect.y,
- buf->rect.w, buf->rect.h);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "x[%d]y[%d]w[%d]h[%d]\n", buf->rect.x,
+ buf->rect.y, buf->rect.w, buf->rect.h);
/* set input DMA image size */
cfg = fimc_read(ctx, EXYNOS_CIREAL_ISIZE);
@@ -560,7 +563,7 @@ static void fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt)
{
u32 cfg;
- DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "fmt[0x%x]\n", fmt);
/* RGB */
cfg = fimc_read(ctx, EXYNOS_CISCCTRL);
@@ -631,7 +634,7 @@ static void fimc_dst_set_fmt(struct fimc_context *ctx, u32 fmt, bool tiled)
{
u32 cfg;
- DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "fmt[0x%x]\n", fmt);
cfg = fimc_read(ctx, EXYNOS_CIEXTEN);
@@ -691,7 +694,7 @@ static void fimc_dst_set_transf(struct fimc_context *ctx, unsigned int rotation)
unsigned int degree = rotation & DRM_MODE_ROTATE_MASK;
u32 cfg;
- DRM_DEBUG_KMS("rotation[0x%x]\n", rotation);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "rotation[0x%x]\n", rotation);
cfg = fimc_read(ctx, EXYNOS_CITRGFMT);
cfg &= ~EXYNOS_CITRGFMT_FLIP_MASK;
@@ -775,19 +778,20 @@ static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc,
pre_dst_width = src_w >> hfactor;
pre_dst_height = src_h >> vfactor;
- DRM_DEBUG_KMS("pre_dst_width[%d]pre_dst_height[%d]\n",
- pre_dst_width, pre_dst_height);
- DRM_DEBUG_KMS("hfactor[%d]vfactor[%d]\n", hfactor, vfactor);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "pre_dst_width[%d]pre_dst_height[%d]\n",
+ pre_dst_width, pre_dst_height);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "hfactor[%d]vfactor[%d]\n", hfactor,
+ vfactor);
sc->hratio = (src_w << 14) / (dst_w << hfactor);
sc->vratio = (src_h << 14) / (dst_h << vfactor);
sc->up_h = (dst_w >= src_w) ? true : false;
sc->up_v = (dst_h >= src_h) ? true : false;
- DRM_DEBUG_KMS("hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n",
- sc->hratio, sc->vratio, sc->up_h, sc->up_v);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "hratio[%d]vratio[%d]up_h[%d]up_v[%d]\n",
+ sc->hratio, sc->vratio, sc->up_h, sc->up_v);
shfactor = FIMC_SHFACTOR - (hfactor + vfactor);
- DRM_DEBUG_KMS("shfactor[%d]\n", shfactor);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "shfactor[%d]\n", shfactor);
cfg = (EXYNOS_CISCPRERATIO_SHFACTOR(shfactor) |
EXYNOS_CISCPRERATIO_PREHORRATIO(1 << hfactor) |
@@ -805,10 +809,10 @@ static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
{
u32 cfg, cfg_ext;
- DRM_DEBUG_KMS("range[%d]bypass[%d]up_h[%d]up_v[%d]\n",
- sc->range, sc->bypass, sc->up_h, sc->up_v);
- DRM_DEBUG_KMS("hratio[%d]vratio[%d]\n",
- sc->hratio, sc->vratio);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "range[%d]bypass[%d]up_h[%d]up_v[%d]\n",
+ sc->range, sc->bypass, sc->up_h, sc->up_v);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "hratio[%d]vratio[%d]\n",
+ sc->hratio, sc->vratio);
cfg = fimc_read(ctx, EXYNOS_CISCCTRL);
cfg &= ~(EXYNOS_CISCCTRL_SCALERBYPASS |
@@ -846,7 +850,8 @@ static void fimc_dst_set_size(struct fimc_context *ctx,
unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
u32 cfg, cfg_ext;
- DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", real_width, buf->buf.height);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "hsize[%d]vsize[%d]\n", real_width,
+ buf->buf.height);
/* original size */
cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(real_width) |
@@ -854,8 +859,9 @@ static void fimc_dst_set_size(struct fimc_context *ctx,
fimc_write(ctx, cfg, EXYNOS_ORGOSIZE);
- DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]\n", buf->rect.x, buf->rect.y,
- buf->rect.w, buf->rect.h);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "x[%d]y[%d]w[%d]h[%d]\n", buf->rect.x,
+ buf->rect.y,
+ buf->rect.w, buf->rect.h);
/* CSC ITU */
cfg = fimc_read(ctx, EXYNOS_CIGCTRL);
@@ -905,7 +911,7 @@ static void fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id,
u32 buf_num;
u32 cfg;
- DRM_DEBUG_KMS("buf_id[%d]enqueu[%d]\n", buf_id, enqueue);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "buf_id[%d]enqueu[%d]\n", buf_id, enqueue);
spin_lock_irqsave(&ctx->lock, flags);
@@ -945,7 +951,7 @@ static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
struct fimc_context *ctx = dev_id;
int buf_id;
- DRM_DEBUG_KMS("fimc id[%d]\n", ctx->id);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "fimc id[%d]\n", ctx->id);
fimc_clear_irq(ctx);
if (fimc_check_ovf(ctx))
@@ -958,7 +964,7 @@ static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
if (buf_id < 0)
return IRQ_HANDLED;
- DRM_DEBUG_KMS("buf_id[%d]\n", buf_id);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "buf_id[%d]\n", buf_id);
if (ctx->task) {
struct exynos_drm_ipp_task *task = ctx->task;
@@ -1128,9 +1134,10 @@ static int fimc_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_ipp *ipp = &ctx->ipp;
ctx->drm_dev = drm_dev;
+ ipp->drm_dev = drm_dev;
exynos_drm_register_dma(drm_dev, dev);
- exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
+ exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
DRM_EXYNOS_IPP_CAP_SCALE | DRM_EXYNOS_IPP_CAP_CONVERT,
ctx->formats, ctx->num_formats, "fimc");
@@ -1147,7 +1154,7 @@ static void fimc_unbind(struct device *dev, struct device *master,
struct drm_device *drm_dev = data;
struct exynos_drm_ipp *ipp = &ctx->ipp;
- exynos_drm_ipp_unregister(drm_dev, ipp);
+ exynos_drm_ipp_unregister(dev, ipp);
exynos_drm_unregister_dma(drm_dev, dev);
}
@@ -1380,7 +1387,7 @@ static int fimc_runtime_suspend(struct device *dev)
{
struct fimc_context *ctx = get_fimc_context(dev);
- DRM_DEBUG_KMS("id[%d]\n", ctx->id);
+ DRM_DEV_DEBUG_KMS(dev, "id[%d]\n", ctx->id);
clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
return 0;
}
@@ -1389,7 +1396,7 @@ static int fimc_runtime_resume(struct device *dev)
{
struct fimc_context *ctx = get_fimc_context(dev);
- DRM_DEBUG_KMS("id[%d]\n", ctx->id);
+ DRM_DEV_DEBUG_KMS(dev, "id[%d]\n", ctx->id);
return clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
}
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 786a8ee6f10f..8039e1a3671d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -315,7 +315,7 @@ static void fimd_wait_for_vblank(struct exynos_drm_crtc *crtc)
if (!wait_event_timeout(ctx->wait_vsync_queue,
!atomic_read(&ctx->wait_vsync_event),
HZ/20))
- DRM_DEBUG_KMS("vblank wait timed out.\n");
+ DRM_DEV_DEBUG_KMS(ctx->dev, "vblank wait timed out.\n");
}
static void fimd_enable_video_output(struct fimd_context *ctx, unsigned int win,
@@ -350,8 +350,6 @@ static void fimd_clear_channels(struct exynos_drm_crtc *crtc)
struct fimd_context *ctx = crtc->ctx;
unsigned int win, ch_enabled = 0;
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
/* Hardware is in unknown state, so ensure it gets enabled properly */
pm_runtime_get_sync(ctx->dev);
@@ -400,7 +398,7 @@ static int fimd_atomic_check(struct exynos_drm_crtc *crtc,
u32 clkdiv;
if (mode->clock == 0) {
- DRM_INFO("Mode has zero clock value.\n");
+ DRM_DEV_ERROR(ctx->dev, "Mode has zero clock value.\n");
return -EINVAL;
}
@@ -416,15 +414,17 @@ static int fimd_atomic_check(struct exynos_drm_crtc *crtc,
lcd_rate = clk_get_rate(ctx->lcd_clk);
if (2 * lcd_rate < ideal_clk) {
- DRM_INFO("sclk_fimd clock too low(%lu) for requested pixel clock(%lu)\n",
- lcd_rate, ideal_clk);
+ DRM_DEV_ERROR(ctx->dev,
+ "sclk_fimd clock too low(%lu) for requested pixel clock(%lu)\n",
+ lcd_rate, ideal_clk);
return -EINVAL;
}
/* Find the clock divider value that gets us closest to ideal_clk */
clkdiv = DIV_ROUND_CLOSEST(lcd_rate, ideal_clk);
if (clkdiv >= 0x200) {
- DRM_INFO("requested pixel clock(%lu) too low\n", ideal_clk);
+ DRM_DEV_ERROR(ctx->dev, "requested pixel clock(%lu) too low\n",
+ ideal_clk);
return -EINVAL;
}
@@ -481,7 +481,8 @@ static void fimd_commit(struct exynos_drm_crtc *crtc)
driver_data->lcdblk_offset,
0x3 << driver_data->lcdblk_vt_shift,
0x1 << driver_data->lcdblk_vt_shift)) {
- DRM_ERROR("Failed to update sysreg for I80 i/f.\n");
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to update sysreg for I80 i/f.\n");
return;
}
} else {
@@ -525,7 +526,8 @@ static void fimd_commit(struct exynos_drm_crtc *crtc)
driver_data->lcdblk_offset,
0x1 << driver_data->lcdblk_bypass_shift,
0x1 << driver_data->lcdblk_bypass_shift)) {
- DRM_ERROR("Failed to update sysreg for bypass setting.\n");
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to update sysreg for bypass setting.\n");
return;
}
@@ -537,7 +539,8 @@ static void fimd_commit(struct exynos_drm_crtc *crtc)
driver_data->lcdblk_offset,
0x1 << driver_data->lcdblk_mic_bypass_shift,
0x1 << driver_data->lcdblk_mic_bypass_shift)) {
- DRM_ERROR("Failed to update sysreg for bypass mic.\n");
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to update sysreg for bypass mic.\n");
return;
}
@@ -814,10 +817,11 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc,
val = (unsigned long)(dma_addr + size);
writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
- DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
- (unsigned long)dma_addr, val, size);
- DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
- state->crtc.w, state->crtc.h);
+ DRM_DEV_DEBUG_KMS(ctx->dev,
+ "start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
+ (unsigned long)dma_addr, val, size);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "ovl_width = %d, ovl_height = %d\n",
+ state->crtc.w, state->crtc.h);
/* buffer size */
buf_offsize = pitch - (state->crtc.w * cpp);
@@ -847,8 +851,9 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc,
writel(val, ctx->regs + VIDOSD_B(win));
- DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
- state->crtc.x, state->crtc.y, last_x, last_y);
+ DRM_DEV_DEBUG_KMS(ctx->dev,
+ "osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
+ state->crtc.x, state->crtc.y, last_x, last_y);
/* OSD size */
if (win != 3 && win != 4) {
@@ -858,7 +863,8 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc,
val = state->crtc.w * state->crtc.h;
writel(val, ctx->regs + offset);
- DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "osd size = 0x%x\n",
+ (unsigned int)val);
}
fimd_win_set_pixfmt(ctx, win, fb, state->src.w);
@@ -1252,13 +1258,17 @@ static int exynos_fimd_resume(struct device *dev)
ret = clk_prepare_enable(ctx->bus_clk);
if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the bus clk [%d]\n", ret);
+ DRM_DEV_ERROR(dev,
+ "Failed to prepare_enable the bus clk [%d]\n",
+ ret);
return ret;
}
ret = clk_prepare_enable(ctx->lcd_clk);
if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the lcd clk [%d]\n", ret);
+ DRM_DEV_ERROR(dev,
+ "Failed to prepare_enable the lcd clk [%d]\n",
+ ret);
return ret;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 24c536d6d9cf..c20b3a759370 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -429,7 +429,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
int ret;
if (!size) {
- DRM_ERROR("invalid userptr size.\n");
+ DRM_DEV_ERROR(g2d->dev, "invalid userptr size.\n");
return ERR_PTR(-EINVAL);
}
@@ -482,7 +482,8 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
ret = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
g2d_userptr->vec);
if (ret != npages) {
- DRM_ERROR("failed to get user pages from userptr.\n");
+ DRM_DEV_ERROR(g2d->dev,
+ "failed to get user pages from userptr.\n");
if (ret < 0)
goto err_destroy_framevec;
ret = -EFAULT;
@@ -503,7 +504,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
frame_vector_pages(g2d_userptr->vec),
npages, offset, size, GFP_KERNEL);
if (ret < 0) {
- DRM_ERROR("failed to get sgt from pages.\n");
+ DRM_DEV_ERROR(g2d->dev, "failed to get sgt from pages.\n");
goto err_free_sgt;
}
@@ -511,7 +512,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
if (!dma_map_sg(to_dma_dev(g2d->drm_dev), sgt->sgl, sgt->nents,
DMA_BIDIRECTIONAL)) {
- DRM_ERROR("failed to map sgt with dma region.\n");
+ DRM_DEV_ERROR(g2d->dev, "failed to map sgt with dma region.\n");
ret = -ENOMEM;
goto err_sg_free_table;
}
@@ -560,7 +561,7 @@ static void g2d_userptr_free_all(struct g2d_data *g2d, struct drm_file *filp)
g2d->current_pool = 0;
}
-static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
+static enum g2d_reg_type g2d_get_reg_type(struct g2d_data *g2d, int reg_offset)
{
enum g2d_reg_type reg_type;
@@ -593,7 +594,8 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
break;
default:
reg_type = REG_TYPE_NONE;
- DRM_ERROR("Unknown register offset![%d]\n", reg_offset);
+ DRM_DEV_ERROR(g2d->dev, "Unknown register offset![%d]\n",
+ reg_offset);
break;
}
@@ -627,9 +629,10 @@ static unsigned long g2d_get_buf_bpp(unsigned int format)
return bpp;
}
-static bool g2d_check_buf_desc_is_valid(struct g2d_buf_desc *buf_desc,
- enum g2d_reg_type reg_type,
- unsigned long size)
+static bool g2d_check_buf_desc_is_valid(struct g2d_data *g2d,
+ struct g2d_buf_desc *buf_desc,
+ enum g2d_reg_type reg_type,
+ unsigned long size)
{
int width, height;
unsigned long bpp, last_pos;
@@ -644,14 +647,15 @@ static bool g2d_check_buf_desc_is_valid(struct g2d_buf_desc *buf_desc,
/* This check also makes sure that right_x > left_x. */
width = (int)buf_desc->right_x - (int)buf_desc->left_x;
if (width < G2D_LEN_MIN || width > G2D_LEN_MAX) {
- DRM_ERROR("width[%d] is out of range!\n", width);
+ DRM_DEV_ERROR(g2d->dev, "width[%d] is out of range!\n", width);
return false;
}
/* This check also makes sure that bottom_y > top_y. */
height = (int)buf_desc->bottom_y - (int)buf_desc->top_y;
if (height < G2D_LEN_MIN || height > G2D_LEN_MAX) {
- DRM_ERROR("height[%d] is out of range!\n", height);
+ DRM_DEV_ERROR(g2d->dev,
+ "height[%d] is out of range!\n", height);
return false;
}
@@ -670,8 +674,8 @@ static bool g2d_check_buf_desc_is_valid(struct g2d_buf_desc *buf_desc,
*/
if (last_pos >= size) {
- DRM_ERROR("last engine access position [%lu] "
- "is out of range [%lu]!\n", last_pos, size);
+ DRM_DEV_ERROR(g2d->dev, "last engine access position [%lu] "
+ "is out of range [%lu]!\n", last_pos, size);
return false;
}
@@ -701,7 +705,7 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
offset = cmdlist->data[reg_pos];
handle = cmdlist->data[reg_pos + 1];
- reg_type = g2d_get_reg_type(offset);
+ reg_type = g2d_get_reg_type(g2d, offset);
if (reg_type == REG_TYPE_NONE) {
ret = -EFAULT;
goto err;
@@ -718,7 +722,7 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
goto err;
}
- if (!g2d_check_buf_desc_is_valid(buf_desc,
+ if (!g2d_check_buf_desc_is_valid(g2d, buf_desc,
reg_type, exynos_gem->size)) {
exynos_drm_gem_put(exynos_gem);
ret = -EFAULT;
@@ -736,8 +740,9 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
goto err;
}
- if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type,
- g2d_userptr.size)) {
+ if (!g2d_check_buf_desc_is_valid(g2d, buf_desc,
+ reg_type,
+ g2d_userptr.size)) {
ret = -EFAULT;
goto err;
}
@@ -845,7 +850,7 @@ static void g2d_free_runqueue_node(struct g2d_data *g2d,
*
* Has to be called under runqueue lock.
*/
-static void g2d_remove_runqueue_nodes(struct g2d_data *g2d, struct drm_file* file)
+static void g2d_remove_runqueue_nodes(struct g2d_data *g2d, struct drm_file *file)
{
struct g2d_runqueue_node *node, *n;
@@ -1044,7 +1049,7 @@ static int g2d_check_reg_offset(struct g2d_data *g2d,
if (!for_addr)
goto err;
- reg_type = g2d_get_reg_type(reg_offset);
+ reg_type = g2d_get_reg_type(g2d, reg_offset);
/* check userptr buffer type. */
if ((cmdlist->data[index] & ~0x7fffffff) >> 31) {
@@ -1058,7 +1063,7 @@ static int g2d_check_reg_offset(struct g2d_data *g2d,
if (for_addr)
goto err;
- reg_type = g2d_get_reg_type(reg_offset);
+ reg_type = g2d_get_reg_type(g2d, reg_offset);
buf_desc = &buf_info->descs[reg_type];
buf_desc->stride = cmdlist->data[index + 1];
@@ -1068,7 +1073,7 @@ static int g2d_check_reg_offset(struct g2d_data *g2d,
if (for_addr)
goto err;
- reg_type = g2d_get_reg_type(reg_offset);
+ reg_type = g2d_get_reg_type(g2d, reg_offset);
buf_desc = &buf_info->descs[reg_type];
value = cmdlist->data[index + 1];
@@ -1080,7 +1085,7 @@ static int g2d_check_reg_offset(struct g2d_data *g2d,
if (for_addr)
goto err;
- reg_type = g2d_get_reg_type(reg_offset);
+ reg_type = g2d_get_reg_type(g2d, reg_offset);
buf_desc = &buf_info->descs[reg_type];
value = cmdlist->data[index + 1];
@@ -1093,7 +1098,7 @@ static int g2d_check_reg_offset(struct g2d_data *g2d,
if (for_addr)
goto err;
- reg_type = g2d_get_reg_type(reg_offset);
+ reg_type = g2d_get_reg_type(g2d, reg_offset);
buf_desc = &buf_info->descs[reg_type];
value = cmdlist->data[index + 1];
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index df66c383a877..a55f5ac41bf3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -29,7 +29,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
int ret = -ENOMEM;
if (exynos_gem->dma_addr) {
- DRM_DEBUG_KMS("already allocated.\n");
+ DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "already allocated.\n");
return 0;
}
@@ -61,7 +61,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
exynos_gem->pages = kvmalloc_array(nr_pages, sizeof(struct page *),
GFP_KERNEL | __GFP_ZERO);
if (!exynos_gem->pages) {
- DRM_ERROR("failed to allocate pages.\n");
+ DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate pages.\n");
return -ENOMEM;
}
@@ -69,7 +69,7 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
&exynos_gem->dma_addr, GFP_KERNEL,
exynos_gem->dma_attrs);
if (!exynos_gem->cookie) {
- DRM_ERROR("failed to allocate buffer.\n");
+ DRM_DEV_ERROR(to_dma_dev(dev), "failed to allocate buffer.\n");
goto err_free;
}
@@ -77,20 +77,20 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
exynos_gem->dma_addr, exynos_gem->size,
exynos_gem->dma_attrs);
if (ret < 0) {
- DRM_ERROR("failed to get sgtable.\n");
+ DRM_DEV_ERROR(to_dma_dev(dev), "failed to get sgtable.\n");
goto err_dma_free;
}
if (drm_prime_sg_to_page_addr_arrays(&sgt, exynos_gem->pages, NULL,
nr_pages)) {
- DRM_ERROR("invalid sgtable.\n");
+ DRM_DEV_ERROR(to_dma_dev(dev), "invalid sgtable.\n");
ret = -EINVAL;
goto err_sgt_free;
}
sg_free_table(&sgt);
- DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
+ DRM_DEV_DEBUG_KMS(to_dma_dev(dev), "dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
return 0;
@@ -111,11 +111,11 @@ static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
struct drm_device *dev = exynos_gem->base.dev;
if (!exynos_gem->dma_addr) {
- DRM_DEBUG_KMS("dma_addr is invalid.\n");
+ DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr is invalid.\n");
return;
}
- DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
+ DRM_DEV_DEBUG_KMS(dev->dev, "dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
@@ -139,7 +139,7 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
if (ret)
return ret;
- DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
+ DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "gem handle = 0x%x\n", *handle);
/* drop reference from allocate - handle holds it now. */
drm_gem_object_put_unlocked(obj);
@@ -151,7 +151,8 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
{
struct drm_gem_object *obj = &exynos_gem->base;
- DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
+ DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "handle count = %d\n",
+ obj->handle_count);
/*
* do not release memory region from exporter.
@@ -186,7 +187,7 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
ret = drm_gem_object_init(dev, obj, size);
if (ret < 0) {
- DRM_ERROR("failed to initialize gem object\n");
+ DRM_DEV_ERROR(dev->dev, "failed to initialize gem object\n");
kfree(exynos_gem);
return ERR_PTR(ret);
}
@@ -198,7 +199,7 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
return ERR_PTR(ret);
}
- DRM_DEBUG_KMS("created file object = %pK\n", obj->filp);
+ DRM_DEV_DEBUG_KMS(dev->dev, "created file object = %pK\n", obj->filp);
return exynos_gem;
}
@@ -211,12 +212,13 @@ struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
int ret;
if (flags & ~(EXYNOS_BO_MASK)) {
- DRM_ERROR("invalid GEM buffer flags: %u\n", flags);
+ DRM_DEV_ERROR(dev->dev,
+ "invalid GEM buffer flags: %u\n", flags);
return ERR_PTR(-EINVAL);
}
if (!size) {
- DRM_ERROR("invalid GEM buffer size: %lu\n", size);
+ DRM_DEV_ERROR(dev->dev, "invalid GEM buffer size: %lu\n", size);
return ERR_PTR(-EINVAL);
}
@@ -325,7 +327,7 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(file_priv, args->handle);
if (!obj) {
- DRM_ERROR("failed to lookup gem object.\n");
+ DRM_DEV_ERROR(dev->dev, "failed to lookup gem object.\n");
return -EINVAL;
}
@@ -408,7 +410,8 @@ static int exynos_drm_gem_mmap_obj(struct drm_gem_object *obj,
struct exynos_drm_gem *exynos_gem = to_exynos_gem(obj);
int ret;
- DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem->flags);
+ DRM_DEV_DEBUG_KMS(to_dma_dev(obj->dev), "flags = 0x%x\n",
+ exynos_gem->flags);
/* non-cachable as default. */
if (exynos_gem->flags & EXYNOS_BO_CACHABLE)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index f048d97fe9e2..0bfb5e9f6e91 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -395,7 +395,7 @@ static int gsc_sw_reset(struct gsc_context *ctx)
}
if (cfg) {
- DRM_ERROR("failed to reset gsc h/w.\n");
+ DRM_DEV_ERROR(ctx->dev, "failed to reset gsc h/w.\n");
return -EBUSY;
}
@@ -422,8 +422,8 @@ static void gsc_handle_irq(struct gsc_context *ctx, bool enable,
{
u32 cfg;
- DRM_DEBUG_KMS("enable[%d]overflow[%d]level[%d]\n",
- enable, overflow, done);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "enable[%d]overflow[%d]level[%d]\n",
+ enable, overflow, done);
cfg = gsc_read(GSC_IRQ);
cfg |= (GSC_IRQ_OR_MASK | GSC_IRQ_FRMDONE_MASK);
@@ -451,7 +451,7 @@ static void gsc_src_set_fmt(struct gsc_context *ctx, u32 fmt, bool tiled)
{
u32 cfg;
- DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "fmt[0x%x]\n", fmt);
cfg = gsc_read(GSC_IN_CON);
cfg &= ~(GSC_IN_RGB_TYPE_MASK | GSC_IN_YUV422_1P_ORDER_MASK |
@@ -638,7 +638,7 @@ static void gsc_dst_set_fmt(struct gsc_context *ctx, u32 fmt, bool tiled)
{
u32 cfg;
- DRM_DEBUG_KMS("fmt[0x%x]\n", fmt);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "fmt[0x%x]\n", fmt);
cfg = gsc_read(GSC_OUT_CON);
cfg &= ~(GSC_OUT_RGB_TYPE_MASK | GSC_OUT_YUV422_1P_ORDER_MASK |
@@ -706,12 +706,13 @@ static void gsc_dst_set_fmt(struct gsc_context *ctx, u32 fmt, bool tiled)
gsc_write(cfg, GSC_OUT_CON);
}
-static int gsc_get_ratio_shift(u32 src, u32 dst, u32 *ratio)
+static int gsc_get_ratio_shift(struct gsc_context *ctx, u32 src, u32 dst,
+ u32 *ratio)
{
- DRM_DEBUG_KMS("src[%d]dst[%d]\n", src, dst);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "src[%d]dst[%d]\n", src, dst);
if (src >= dst * 8) {
- DRM_ERROR("failed to make ratio and shift.\n");
+ DRM_DEV_ERROR(ctx->dev, "failed to make ratio and shift.\n");
return -EINVAL;
} else if (src >= dst * 4)
*ratio = 4;
@@ -759,31 +760,31 @@ static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc,
dst_h = dst->h;
}
- ret = gsc_get_ratio_shift(src_w, dst_w, &sc->pre_hratio);
+ ret = gsc_get_ratio_shift(ctx, src_w, dst_w, &sc->pre_hratio);
if (ret) {
- dev_err(ctx->dev, "failed to get ratio horizontal.\n");
+ DRM_DEV_ERROR(ctx->dev, "failed to get ratio horizontal.\n");
return ret;
}
- ret = gsc_get_ratio_shift(src_h, dst_h, &sc->pre_vratio);
+ ret = gsc_get_ratio_shift(ctx, src_h, dst_h, &sc->pre_vratio);
if (ret) {
- dev_err(ctx->dev, "failed to get ratio vertical.\n");
+ DRM_DEV_ERROR(ctx->dev, "failed to get ratio vertical.\n");
return ret;
}
- DRM_DEBUG_KMS("pre_hratio[%d]pre_vratio[%d]\n",
- sc->pre_hratio, sc->pre_vratio);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "pre_hratio[%d]pre_vratio[%d]\n",
+ sc->pre_hratio, sc->pre_vratio);
sc->main_hratio = (src_w << 16) / dst_w;
sc->main_vratio = (src_h << 16) / dst_h;
- DRM_DEBUG_KMS("main_hratio[%ld]main_vratio[%ld]\n",
- sc->main_hratio, sc->main_vratio);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "main_hratio[%ld]main_vratio[%ld]\n",
+ sc->main_hratio, sc->main_vratio);
gsc_get_prescaler_shfactor(sc->pre_hratio, sc->pre_vratio,
&sc->pre_shfactor);
- DRM_DEBUG_KMS("pre_shfactor[%d]\n", sc->pre_shfactor);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "pre_shfactor[%d]\n", sc->pre_shfactor);
cfg = (GSC_PRESC_SHFACTOR(sc->pre_shfactor) |
GSC_PRESC_H_RATIO(sc->pre_hratio) |
@@ -849,8 +850,8 @@ static void gsc_set_scaler(struct gsc_context *ctx, struct gsc_scaler *sc)
{
u32 cfg;
- DRM_DEBUG_KMS("main_hratio[%ld]main_vratio[%ld]\n",
- sc->main_hratio, sc->main_vratio);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "main_hratio[%ld]main_vratio[%ld]\n",
+ sc->main_hratio, sc->main_vratio);
gsc_set_h_coef(ctx, sc->main_hratio);
cfg = GSC_MAIN_H_RATIO_VALUE(sc->main_hratio);
@@ -916,7 +917,7 @@ static int gsc_dst_get_buf_seq(struct gsc_context *ctx)
if (cfg & (mask << i))
buf_num--;
- DRM_DEBUG_KMS("buf_num[%d]\n", buf_num);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "buf_num[%d]\n", buf_num);
return buf_num;
}
@@ -963,7 +964,7 @@ static int gsc_get_src_buf_index(struct gsc_context *ctx)
u32 cfg, curr_index, i;
u32 buf_id = GSC_MAX_SRC;
- DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "gsc id[%d]\n", ctx->id);
cfg = gsc_read(GSC_IN_BASE_ADDR_Y_MASK);
curr_index = GSC_IN_CURR_GET_INDEX(cfg);
@@ -975,11 +976,11 @@ static int gsc_get_src_buf_index(struct gsc_context *ctx)
}
}
- DRM_DEBUG_KMS("cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg,
- curr_index, buf_id);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg,
+ curr_index, buf_id);
if (buf_id == GSC_MAX_SRC) {
- DRM_ERROR("failed to get in buffer index.\n");
+ DRM_DEV_ERROR(ctx->dev, "failed to get in buffer index.\n");
return -EINVAL;
}
@@ -993,7 +994,7 @@ static int gsc_get_dst_buf_index(struct gsc_context *ctx)
u32 cfg, curr_index, i;
u32 buf_id = GSC_MAX_DST;
- DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "gsc id[%d]\n", ctx->id);
cfg = gsc_read(GSC_OUT_BASE_ADDR_Y_MASK);
curr_index = GSC_OUT_CURR_GET_INDEX(cfg);
@@ -1006,14 +1007,14 @@ static int gsc_get_dst_buf_index(struct gsc_context *ctx)
}
if (buf_id == GSC_MAX_DST) {
- DRM_ERROR("failed to get out buffer index.\n");
+ DRM_DEV_ERROR(ctx->dev, "failed to get out buffer index.\n");
return -EINVAL;
}
gsc_dst_set_buf_seq(ctx, buf_id, false);
- DRM_DEBUG_KMS("cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg,
- curr_index, buf_id);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "cfg[0x%x]curr_index[%d]buf_id[%d]\n", cfg,
+ curr_index, buf_id);
return buf_id;
}
@@ -1024,7 +1025,7 @@ static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
u32 status;
int err = 0;
- DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "gsc id[%d]\n", ctx->id);
status = gsc_read(GSC_IRQ);
if (status & GSC_IRQ_STATUS_OR_IRQ) {
@@ -1042,8 +1043,8 @@ static irqreturn_t gsc_irq_handler(int irq, void *dev_id)
src_buf_id = gsc_get_src_buf_index(ctx);
dst_buf_id = gsc_get_dst_buf_index(ctx);
- DRM_DEBUG_KMS("buf_id_src[%d]buf_id_dst[%d]\n", src_buf_id,
- dst_buf_id);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "buf_id_src[%d]buf_id_dst[%d]\n",
+ src_buf_id, dst_buf_id);
if (src_buf_id < 0 || dst_buf_id < 0)
err = -EINVAL;
@@ -1169,9 +1170,10 @@ static int gsc_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_ipp *ipp = &ctx->ipp;
ctx->drm_dev = drm_dev;
+ ctx->drm_dev = drm_dev;
exynos_drm_register_dma(drm_dev, dev);
- exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
+ exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
DRM_EXYNOS_IPP_CAP_SCALE | DRM_EXYNOS_IPP_CAP_CONVERT,
ctx->formats, ctx->num_formats, "gsc");
@@ -1188,7 +1190,7 @@ static void gsc_unbind(struct device *dev, struct device *master,
struct drm_device *drm_dev = data;
struct exynos_drm_ipp *ipp = &ctx->ipp;
- exynos_drm_ipp_unregister(drm_dev, ipp);
+ exynos_drm_ipp_unregister(dev, ipp);
exynos_drm_unregister_dma(drm_dev, dev);
}
@@ -1324,7 +1326,7 @@ static int __maybe_unused gsc_runtime_suspend(struct device *dev)
struct gsc_context *ctx = get_gsc_context(dev);
int i;
- DRM_DEBUG_KMS("id[%d]\n", ctx->id);
+ DRM_DEV_DEBUG_KMS(dev, "id[%d]\n", ctx->id);
for (i = ctx->num_clocks - 1; i >= 0; i--)
clk_disable_unprepare(ctx->clocks[i]);
@@ -1337,7 +1339,7 @@ static int __maybe_unused gsc_runtime_resume(struct device *dev)
struct gsc_context *ctx = get_gsc_context(dev);
int i, ret;
- DRM_DEBUG_KMS("id[%d]\n", ctx->id);
+ DRM_DEV_DEBUG_KMS(dev, "id[%d]\n", ctx->id);
for (i = 0; i < ctx->num_clocks; i++) {
ret = clk_prepare_enable(ctx->clocks[i]);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 23226a0212e8..c862099723a0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -43,7 +43,7 @@ static LIST_HEAD(ipp_list);
* Returns:
* Zero on success, error code on failure.
*/
-int exynos_drm_ipp_register(struct drm_device *dev, struct exynos_drm_ipp *ipp,
+int exynos_drm_ipp_register(struct device *dev, struct exynos_drm_ipp *ipp,
const struct exynos_drm_ipp_funcs *funcs, unsigned int caps,
const struct exynos_drm_ipp_formats *formats,
unsigned int num_formats, const char *name)
@@ -67,7 +67,7 @@ int exynos_drm_ipp_register(struct drm_device *dev, struct exynos_drm_ipp *ipp,
list_add_tail(&ipp->head, &ipp_list);
ipp->id = num_ipp++;
- DRM_DEBUG_DRIVER("Registered ipp %d\n", ipp->id);
+ DRM_DEV_DEBUG_DRIVER(dev, "Registered ipp %d\n", ipp->id);
return 0;
}
@@ -77,7 +77,7 @@ int exynos_drm_ipp_register(struct drm_device *dev, struct exynos_drm_ipp *ipp,
* @dev: DRM device
* @ipp: ipp module
*/
-void exynos_drm_ipp_unregister(struct drm_device *dev,
+void exynos_drm_ipp_unregister(struct device *dev,
struct exynos_drm_ipp *ipp)
{
WARN_ON(ipp->task);
@@ -268,7 +268,7 @@ static inline struct exynos_drm_ipp_task *
task->src.rect.h = task->dst.rect.h = UINT_MAX;
task->transform.rotation = DRM_MODE_ROTATE_0;
- DRM_DEBUG_DRIVER("Allocated task %pK\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev, "Allocated task %pK\n", task);
return task;
}
@@ -335,7 +335,9 @@ static int exynos_drm_ipp_task_set(struct exynos_drm_ipp_task *task,
size -= map[i].size;
}
- DRM_DEBUG_DRIVER("Got task %pK configuration from userspace\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev,
+ "Got task %pK configuration from userspace\n",
+ task);
return 0;
}
@@ -389,12 +391,12 @@ static void exynos_drm_ipp_task_release_buf(struct exynos_drm_ipp_buffer *buf)
static void exynos_drm_ipp_task_free(struct exynos_drm_ipp *ipp,
struct exynos_drm_ipp_task *task)
{
- DRM_DEBUG_DRIVER("Freeing task %pK\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev, "Freeing task %pK\n", task);
exynos_drm_ipp_task_release_buf(&task->src);
exynos_drm_ipp_task_release_buf(&task->dst);
if (task->event)
- drm_event_cancel_free(ipp->dev, &task->event->base);
+ drm_event_cancel_free(ipp->drm_dev, &task->event->base);
kfree(task);
}
@@ -553,8 +555,9 @@ static int exynos_drm_ipp_check_format(struct exynos_drm_ipp_task *task,
buf == src ? DRM_EXYNOS_IPP_FORMAT_SOURCE :
DRM_EXYNOS_IPP_FORMAT_DESTINATION);
if (!fmt) {
- DRM_DEBUG_DRIVER("Task %pK: %s format not supported\n", task,
- buf == src ? "src" : "dst");
+ DRM_DEV_DEBUG_DRIVER(task->dev,
+ "Task %pK: %s format not supported\n",
+ task, buf == src ? "src" : "dst");
return -EINVAL;
}
@@ -603,7 +606,7 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
bool rotate = (rotation != DRM_MODE_ROTATE_0);
bool scale = false;
- DRM_DEBUG_DRIVER("Checking task %pK\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev, "Checking task %pK\n", task);
if (src->rect.w == UINT_MAX)
src->rect.w = src->buf.width;
@@ -618,8 +621,9 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
src->rect.y + src->rect.h > (src->buf.height) ||
dst->rect.x + dst->rect.w > (dst->buf.width) ||
dst->rect.y + dst->rect.h > (dst->buf.height)) {
- DRM_DEBUG_DRIVER("Task %pK: defined area is outside provided buffers\n",
- task);
+ DRM_DEV_DEBUG_DRIVER(task->dev,
+ "Task %pK: defined area is outside provided buffers\n",
+ task);
return -EINVAL;
}
@@ -635,7 +639,8 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
(!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_SCALE) && scale) ||
(!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_CONVERT) &&
src->buf.fourcc != dst->buf.fourcc)) {
- DRM_DEBUG_DRIVER("Task %pK: hw capabilities exceeded\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev, "Task %pK: hw capabilities exceeded\n",
+ task);
return -EINVAL;
}
@@ -647,7 +652,8 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
if (ret)
return ret;
- DRM_DEBUG_DRIVER("Task %pK: all checks done.\n", task);
+ DRM_DEV_DEBUG_DRIVER(ipp->dev, "Task %pK: all checks done.\n",
+ task);
return ret;
}
@@ -658,20 +664,26 @@ static int exynos_drm_ipp_task_setup_buffers(struct exynos_drm_ipp_task *task,
struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
int ret = 0;
- DRM_DEBUG_DRIVER("Setting buffer for task %pK\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev, "Setting buffer for task %pK\n",
+ task);
ret = exynos_drm_ipp_task_setup_buffer(src, filp);
if (ret) {
- DRM_DEBUG_DRIVER("Task %pK: src buffer setup failed\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev,
+ "Task %pK: src buffer setup failed\n",
+ task);
return ret;
}
ret = exynos_drm_ipp_task_setup_buffer(dst, filp);
if (ret) {
- DRM_DEBUG_DRIVER("Task %pK: dst buffer setup failed\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev,
+ "Task %pK: dst buffer setup failed\n",
+ task);
return ret;
}
- DRM_DEBUG_DRIVER("Task %pK: buffers prepared.\n", task);
+ DRM_DEV_DEBUG_DRIVER(task->dev, "Task %pK: buffers prepared.\n",
+ task);
return ret;
}
@@ -691,7 +703,7 @@ static int exynos_drm_ipp_event_create(struct exynos_drm_ipp_task *task,
e->event.base.length = sizeof(e->event);
e->event.user_data = user_data;
- ret = drm_event_reserve_init(task->dev, file_priv, &e->base,
+ ret = drm_event_reserve_init(task->ipp->drm_dev, file_priv, &e->base,
&e->event.base);
if (ret)
goto free;
@@ -712,7 +724,7 @@ static void exynos_drm_ipp_event_send(struct exynos_drm_ipp_task *task)
task->event->event.tv_usec = now.tv_nsec / NSEC_PER_USEC;
task->event->event.sequence = atomic_inc_return(&task->ipp->sequence);
- drm_send_event(task->dev, &task->event->base);
+ drm_send_event(task->ipp->drm_dev, &task->event->base);
}
static int exynos_drm_ipp_task_cleanup(struct exynos_drm_ipp_task *task)
@@ -749,7 +761,8 @@ void exynos_drm_ipp_task_done(struct exynos_drm_ipp_task *task, int ret)
struct exynos_drm_ipp *ipp = task->ipp;
unsigned long flags;
- DRM_DEBUG_DRIVER("ipp: %d, task %pK done: %d\n", ipp->id, task, ret);
+ DRM_DEV_DEBUG_DRIVER(task->dev, "ipp: %d, task %pK done: %d\n",
+ ipp->id, task, ret);
spin_lock_irqsave(&ipp->lock, flags);
if (ipp->task == task)
@@ -773,7 +786,8 @@ static void exynos_drm_ipp_next_task(struct exynos_drm_ipp *ipp)
unsigned long flags;
int ret;
- DRM_DEBUG_DRIVER("ipp: %d, try to run new task\n", ipp->id);
+ DRM_DEV_DEBUG_DRIVER(ipp->dev, "ipp: %d, try to run new task\n",
+ ipp->id);
spin_lock_irqsave(&ipp->lock, flags);
@@ -789,7 +803,9 @@ static void exynos_drm_ipp_next_task(struct exynos_drm_ipp *ipp)
spin_unlock_irqrestore(&ipp->lock, flags);
- DRM_DEBUG_DRIVER("ipp: %d, selected task %pK to run\n", ipp->id, task);
+ DRM_DEV_DEBUG_DRIVER(ipp->dev,
+ "ipp: %d, selected task %pK to run\n", ipp->id,
+ task);
ret = ipp->funcs->commit(ipp, task);
if (ret)
@@ -897,15 +913,16 @@ int exynos_drm_ipp_commit_ioctl(struct drm_device *dev, void *data,
* then freed after exynos_drm_ipp_task_done()
*/
if (arg->flags & DRM_EXYNOS_IPP_FLAG_NONBLOCK) {
- DRM_DEBUG_DRIVER("ipp: %d, nonblocking processing task %pK\n",
- ipp->id, task);
+ DRM_DEV_DEBUG_DRIVER(ipp->dev,
+ "ipp: %d, nonblocking processing task %pK\n",
+ ipp->id, task);
task->flags |= DRM_EXYNOS_IPP_TASK_ASYNC;
exynos_drm_ipp_schedule_task(task->ipp, task);
ret = 0;
} else {
- DRM_DEBUG_DRIVER("ipp: %d, processing task %pK\n", ipp->id,
- task);
+ DRM_DEV_DEBUG_DRIVER(ipp->dev, "ipp: %d, processing task %pK\n",
+ ipp->id, task);
exynos_drm_ipp_schedule_task(ipp, task);
ret = wait_event_interruptible(ipp->done_wq,
task->flags & DRM_EXYNOS_IPP_TASK_DONE);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.h b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
index 0b27d4a9bf94..5524c457a947 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.h
@@ -54,7 +54,8 @@ struct exynos_drm_ipp_funcs {
* struct exynos_drm_ipp - central picture processor module structure
*/
struct exynos_drm_ipp {
- struct drm_device *dev;
+ struct drm_device *drm_dev;
+ struct device *dev;
struct list_head head;
unsigned int id;
@@ -85,7 +86,7 @@ struct exynos_drm_ipp_buffer {
* has to be performed by the picture processor hardware module
*/
struct exynos_drm_ipp_task {
- struct drm_device *dev;
+ struct device *dev;
struct exynos_drm_ipp *ipp;
struct list_head head;
@@ -129,11 +130,11 @@ struct exynos_drm_ipp_formats {
#define IPP_SCALE_LIMIT(val...) \
.type = (DRM_EXYNOS_IPP_LIMIT_TYPE_SCALE), val
-int exynos_drm_ipp_register(struct drm_device *dev, struct exynos_drm_ipp *ipp,
+int exynos_drm_ipp_register(struct device *dev, struct exynos_drm_ipp *ipp,
const struct exynos_drm_ipp_funcs *funcs, unsigned int caps,
const struct exynos_drm_ipp_formats *formats,
unsigned int num_formats, const char *name);
-void exynos_drm_ipp_unregister(struct drm_device *dev,
+void exynos_drm_ipp_unregister(struct device *dev,
struct exynos_drm_ipp *ipp);
void exynos_drm_ipp_task_done(struct exynos_drm_ipp_task *task, int ret);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index dd02e8a323ef..d1c8411ae7d4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -113,7 +113,8 @@ static void mic_set_path(struct exynos_mic *mic, bool enable)
ret = regmap_read(mic->sysreg, DSD_CFG_MUX, &val);
if (ret) {
- DRM_ERROR("mic: Failed to read system register\n");
+ DRM_DEV_ERROR(mic->dev,
+ "mic: Failed to read system register\n");
return;
}
@@ -129,7 +130,8 @@ static void mic_set_path(struct exynos_mic *mic, bool enable)
ret = regmap_write(mic->sysreg, DSD_CFG_MUX, val);
if (ret)
- DRM_ERROR("mic: Failed to read system register\n");
+ DRM_DEV_ERROR(mic->dev,
+ "mic: Failed to read system register\n");
}
static int mic_sw_reset(struct exynos_mic *mic)
@@ -190,7 +192,7 @@ static void mic_set_output_timing(struct exynos_mic *mic)
struct videomode vm = mic->vm;
u32 reg, bs_size_2d;
- DRM_DEBUG("w: %u, h: %u\n", vm.hactive, vm.vactive);
+ DRM_DEV_DEBUG(mic->dev, "w: %u, h: %u\n", vm.hactive, vm.vactive);
bs_size_2d = ((vm.hactive >> 2) << 1) + (vm.vactive % 4);
reg = MIC_BS_SIZE_2D(bs_size_2d);
writel(reg, mic->reg + MIC_2D_OUTPUT_TIMING_2);
@@ -274,7 +276,7 @@ static void mic_pre_enable(struct drm_bridge *bridge)
ret = mic_sw_reset(mic);
if (ret) {
- DRM_ERROR("Failed to reset\n");
+ DRM_DEV_ERROR(mic->dev, "Failed to reset\n");
goto turn_off;
}
@@ -354,8 +356,8 @@ static int exynos_mic_resume(struct device *dev)
for (i = 0; i < NUM_CLKS; i++) {
ret = clk_prepare_enable(mic->clks[i]);
if (ret < 0) {
- DRM_ERROR("Failed to enable clock (%s)\n",
- clk_names[i]);
+ DRM_DEV_ERROR(dev, "Failed to enable clock (%s)\n",
+ clk_names[i]);
while (--i > -1)
clk_disable_unprepare(mic->clks[i]);
return ret;
@@ -380,7 +382,8 @@ static int exynos_mic_probe(struct platform_device *pdev)
mic = devm_kzalloc(dev, sizeof(*mic), GFP_KERNEL);
if (!mic) {
- DRM_ERROR("mic: Failed to allocate memory for MIC object\n");
+ DRM_DEV_ERROR(dev,
+ "mic: Failed to allocate memory for MIC object\n");
ret = -ENOMEM;
goto err;
}
@@ -389,12 +392,12 @@ static int exynos_mic_probe(struct platform_device *pdev)
ret = of_address_to_resource(dev->of_node, 0, &res);
if (ret) {
- DRM_ERROR("mic: Failed to get mem region for MIC\n");
+ DRM_DEV_ERROR(dev, "mic: Failed to get mem region for MIC\n");
goto err;
}
mic->reg = devm_ioremap(dev, res.start, resource_size(&res));
if (!mic->reg) {
- DRM_ERROR("mic: Failed to remap for MIC\n");
+ DRM_DEV_ERROR(dev, "mic: Failed to remap for MIC\n");
ret = -ENOMEM;
goto err;
}
@@ -402,7 +405,7 @@ static int exynos_mic_probe(struct platform_device *pdev)
mic->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
"samsung,disp-syscon");
if (IS_ERR(mic->sysreg)) {
- DRM_ERROR("mic: Failed to get system register.\n");
+ DRM_DEV_ERROR(dev, "mic: Failed to get system register.\n");
ret = PTR_ERR(mic->sysreg);
goto err;
}
@@ -410,8 +413,8 @@ static int exynos_mic_probe(struct platform_device *pdev)
for (i = 0; i < NUM_CLKS; i++) {
mic->clks[i] = devm_clk_get(dev, clk_names[i]);
if (IS_ERR(mic->clks[i])) {
- DRM_ERROR("mic: Failed to get clock (%s)\n",
- clk_names[i]);
+ DRM_DEV_ERROR(dev, "mic: Failed to get clock (%s)\n",
+ clk_names[i]);
ret = PTR_ERR(mic->clks[i]);
goto err;
}
@@ -430,7 +433,7 @@ static int exynos_mic_probe(struct platform_device *pdev)
if (ret)
goto err_pm;
- DRM_DEBUG_KMS("MIC has been probed\n");
+ DRM_DEV_DEBUG_KMS(dev, "MIC has been probed\n");
return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index df0508e0e49e..e18babb25170 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -119,9 +119,10 @@ static void exynos_plane_mode_set(struct exynos_drm_plane_state *exynos_state)
exynos_state->crtc.w = actual_w;
exynos_state->crtc.h = actual_h;
- DRM_DEBUG_KMS("plane : offset_x/y(%d,%d), width/height(%d,%d)",
- exynos_state->crtc.x, exynos_state->crtc.y,
- exynos_state->crtc.w, exynos_state->crtc.h);
+ DRM_DEV_DEBUG_KMS(crtc->dev->dev,
+ "plane : offset_x/y(%d,%d), width/height(%d,%d)",
+ exynos_state->crtc.x, exynos_state->crtc.y,
+ exynos_state->crtc.w, exynos_state->crtc.h);
}
static void exynos_drm_plane_reset(struct drm_plane *plane)
@@ -181,6 +182,7 @@ exynos_drm_plane_check_format(const struct exynos_drm_plane_config *config,
struct exynos_drm_plane_state *state)
{
struct drm_framebuffer *fb = state->base.fb;
+ struct drm_device *dev = fb->dev;
switch (fb->modifier) {
case DRM_FORMAT_MOD_SAMSUNG_64_32_TILE:
@@ -192,7 +194,7 @@ exynos_drm_plane_check_format(const struct exynos_drm_plane_config *config,
break;
default:
- DRM_ERROR("unsupported pixel format modifier");
+ DRM_DEV_ERROR(dev->dev, "unsupported pixel format modifier");
return -ENOTSUPP;
}
@@ -203,6 +205,7 @@ static int
exynos_drm_plane_check_size(const struct exynos_drm_plane_config *config,
struct exynos_drm_plane_state *state)
{
+ struct drm_crtc *crtc = state->base.crtc;
bool width_ok = false, height_ok = false;
if (config->capabilities & EXYNOS_DRM_PLANE_CAP_SCALE)
@@ -225,7 +228,7 @@ exynos_drm_plane_check_size(const struct exynos_drm_plane_config *config,
if (width_ok && height_ok)
return 0;
- DRM_DEBUG_KMS("scaling mode is not supported");
+ DRM_DEV_DEBUG_KMS(crtc->dev->dev, "scaling mode is not supported");
return -ENOTSUPP;
}
@@ -310,7 +313,7 @@ int exynos_plane_init(struct drm_device *dev,
config->num_pixel_formats,
NULL, config->type, NULL);
if (err) {
- DRM_ERROR("failed to initialize plane\n");
+ DRM_DEV_ERROR(dev->dev, "failed to initialize plane\n");
return err;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 05abfed6f7f8..b6586fa95ad1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -243,9 +243,10 @@ static int rotator_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_ipp *ipp = &rot->ipp;
rot->drm_dev = drm_dev;
+ ipp->drm_dev = drm_dev;
exynos_drm_register_dma(drm_dev, dev);
- exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
+ exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE,
rot->formats, rot->num_formats, "rotator");
@@ -258,10 +259,9 @@ static void rotator_unbind(struct device *dev, struct device *master,
void *data)
{
struct rot_context *rot = dev_get_drvdata(dev);
- struct drm_device *drm_dev = data;
struct exynos_drm_ipp *ipp = &rot->ipp;
- exynos_drm_ipp_unregister(drm_dev, ipp);
+ exynos_drm_ipp_unregister(dev, ipp);
exynos_drm_unregister_dma(rot->drm_dev, rot->dev);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index ed1dd1aec902..f1cbdd1e6e3c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -451,9 +451,10 @@ static int scaler_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_ipp *ipp = &scaler->ipp;
scaler->drm_dev = drm_dev;
+ ipp->drm_dev = drm_dev;
exynos_drm_register_dma(drm_dev, dev);
- exynos_drm_ipp_register(drm_dev, ipp, &ipp_funcs,
+ exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
DRM_EXYNOS_IPP_CAP_SCALE | DRM_EXYNOS_IPP_CAP_CONVERT,
scaler->scaler_data->formats,
@@ -468,10 +469,9 @@ static void scaler_unbind(struct device *dev, struct device *master,
void *data)
{
struct scaler_context *scaler = dev_get_drvdata(dev);
- struct drm_device *drm_dev = data;
struct exynos_drm_ipp *ipp = &scaler->ipp;
- exynos_drm_ipp_unregister(drm_dev, ipp);
+ exynos_drm_ipp_unregister(dev, ipp);
exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 29f4c1932aed..44bcb2d60bb2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -40,8 +40,8 @@
struct vidi_context {
struct drm_encoder encoder;
- struct platform_device *pdev;
struct drm_device *drm_dev;
+ struct device *dev;
struct exynos_drm_crtc *crtc;
struct drm_connector connector;
struct exynos_drm_plane planes[WINDOWS_NR];
@@ -123,7 +123,7 @@ static void vidi_update_plane(struct exynos_drm_crtc *crtc,
return;
addr = exynos_drm_fb_dma_addr(state->fb, 0);
- DRM_DEBUG_KMS("dma_addr = %pad\n", &addr);
+ DRM_DEV_DEBUG_KMS(ctx->dev, "dma_addr = %pad\n", &addr);
}
static void vidi_enable(struct exynos_drm_crtc *crtc)
@@ -205,11 +205,11 @@ static ssize_t vidi_store_connection(struct device *dev,
/* if raw_edid isn't same as fake data then it can't be tested. */
if (ctx->raw_edid != (struct edid *)fake_edid_info) {
- DRM_DEBUG_KMS("edid data is not fake data.\n");
+ DRM_DEV_DEBUG_KMS(dev, "edid data is not fake data.\n");
return -EINVAL;
}
- DRM_DEBUG_KMS("requested connection.\n");
+ DRM_DEV_DEBUG_KMS(dev, "requested connection.\n");
drm_helper_hpd_irq_event(ctx->drm_dev);
@@ -226,17 +226,20 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
struct drm_exynos_vidi_connection *vidi = data;
if (!vidi) {
- DRM_DEBUG_KMS("user data for vidi is null.\n");
+ DRM_DEV_DEBUG_KMS(ctx->dev,
+ "user data for vidi is null.\n");
return -EINVAL;
}
if (vidi->connection > 1) {
- DRM_DEBUG_KMS("connection should be 0 or 1.\n");
+ DRM_DEV_DEBUG_KMS(ctx->dev,
+ "connection should be 0 or 1.\n");
return -EINVAL;
}
if (ctx->connected == vidi->connection) {
- DRM_DEBUG_KMS("same connection request.\n");
+ DRM_DEV_DEBUG_KMS(ctx->dev,
+ "same connection request.\n");
return -EINVAL;
}
@@ -245,12 +248,14 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
raw_edid = (struct edid *)(unsigned long)vidi->edid;
if (!drm_edid_is_valid(raw_edid)) {
- DRM_DEBUG_KMS("edid data is invalid.\n");
+ DRM_DEV_DEBUG_KMS(ctx->dev,
+ "edid data is invalid.\n");
return -EINVAL;
}
ctx->raw_edid = drm_edid_duplicate(raw_edid);
if (!ctx->raw_edid) {
- DRM_DEBUG_KMS("failed to allocate raw_edid.\n");
+ DRM_DEV_DEBUG_KMS(ctx->dev,
+ "failed to allocate raw_edid.\n");
return -ENOMEM;
}
} else {
@@ -308,14 +313,14 @@ static int vidi_get_modes(struct drm_connector *connector)
* to ctx->raw_edid through specific ioctl.
*/
if (!ctx->raw_edid) {
- DRM_DEBUG_KMS("raw_edid is null.\n");
+ DRM_DEV_DEBUG_KMS(ctx->dev, "raw_edid is null.\n");
return -EFAULT;
}
edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
if (!edid) {
- DRM_DEBUG_KMS("failed to allocate edid\n");
+ DRM_DEV_DEBUG_KMS(ctx->dev, "failed to allocate edid\n");
return -ENOMEM;
}
@@ -339,7 +344,8 @@ static int vidi_create_connector(struct drm_encoder *encoder)
ret = drm_connector_init(ctx->drm_dev, connector,
&vidi_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
if (ret) {
- DRM_ERROR("Failed to initialize connector with drm\n");
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to initialize connector with drm\n");
return ret;
}
@@ -402,7 +408,7 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base,
EXYNOS_DISPLAY_TYPE_VIDI, &vidi_crtc_ops, ctx);
if (IS_ERR(ctx->crtc)) {
- DRM_ERROR("failed to create crtc.\n");
+ DRM_DEV_ERROR(dev, "failed to create crtc.\n");
return PTR_ERR(ctx->crtc);
}
@@ -417,7 +423,8 @@ static int vidi_bind(struct device *dev, struct device *master, void *data)
ret = vidi_create_connector(encoder);
if (ret) {
- DRM_ERROR("failed to create connector ret = %d\n", ret);
+ DRM_DEV_ERROR(dev, "failed to create connector ret = %d\n",
+ ret);
drm_encoder_cleanup(encoder);
return ret;
}
@@ -441,13 +448,14 @@ static const struct component_ops vidi_component_ops = {
static int vidi_probe(struct platform_device *pdev)
{
struct vidi_context *ctx;
+ struct device *dev = &pdev->dev;
int ret;
- ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
- ctx->pdev = pdev;
+ ctx->dev = dev;
timer_setup(&ctx->timer, vidi_fake_vblank_timer, 0);
@@ -455,20 +463,21 @@ static int vidi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ctx);
- ret = device_create_file(&pdev->dev, &dev_attr_connection);
+ ret = device_create_file(dev, &dev_attr_connection);
if (ret < 0) {
- DRM_ERROR("failed to create connection sysfs.\n");
+ DRM_DEV_ERROR(dev,
+ "failed to create connection sysfs.\n");
return ret;
}
- ret = component_add(&pdev->dev, &vidi_component_ops);
+ ret = component_add(dev, &vidi_component_ops);
if (ret)
goto err_remove_file;
return ret;
err_remove_file:
- device_remove_file(&pdev->dev, &dev_attr_connection);
+ device_remove_file(dev, &dev_attr_connection);
return ret;
}
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 8e2c02fc66e8..19c252f659dd 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -885,9 +885,9 @@ static int hdmi_get_modes(struct drm_connector *connector)
return -ENODEV;
hdata->dvi_mode = !drm_detect_hdmi_monitor(edid);
- DRM_DEBUG_KMS("%s : width[%d] x height[%d]\n",
- (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
- edid->width_cm, edid->height_cm);
+ DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n",
+ (hdata->dvi_mode ? "dvi monitor" : "hdmi monitor"),
+ edid->width_cm, edid->height_cm);
drm_connector_update_edid_property(connector, edid);
cec_notifier_set_phys_addr_from_edid(hdata->notifier, edid);
@@ -908,7 +908,8 @@ static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
if (confs->data[i].pixel_clock == pixel_clock)
return i;
- DRM_DEBUG_KMS("Could not find phy config for %d\n", pixel_clock);
+ DRM_DEV_DEBUG_KMS(hdata->dev, "Could not find phy config for %d\n",
+ pixel_clock);
return -EINVAL;
}
@@ -918,10 +919,11 @@ static int hdmi_mode_valid(struct drm_connector *connector,
struct hdmi_context *hdata = connector_to_hdmi(connector);
int ret;
- DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n",
- mode->hdisplay, mode->vdisplay, mode->vrefresh,
- (mode->flags & DRM_MODE_FLAG_INTERLACE) ? true :
- false, mode->clock * 1000);
+ DRM_DEV_DEBUG_KMS(hdata->dev,
+ "xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n",
+ mode->hdisplay, mode->vdisplay, mode->vrefresh,
+ (mode->flags & DRM_MODE_FLAG_INTERLACE) ? true :
+ false, mode->clock * 1000);
ret = hdmi_find_phy_conf(hdata, mode->clock * 1000);
if (ret < 0)
@@ -947,7 +949,8 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
ret = drm_connector_init(hdata->drm_dev, connector,
&hdmi_connector_funcs, DRM_MODE_CONNECTOR_HDMIA);
if (ret) {
- DRM_ERROR("Failed to initialize connector with drm\n");
+ DRM_DEV_ERROR(hdata->dev,
+ "Failed to initialize connector with drm\n");
return ret;
}
@@ -957,7 +960,7 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
if (hdata->bridge) {
ret = drm_bridge_attach(encoder, hdata->bridge, NULL);
if (ret)
- DRM_ERROR("Failed to attach bridge\n");
+ DRM_DEV_ERROR(hdata->dev, "Failed to attach bridge\n");
}
return ret;
@@ -1002,8 +1005,10 @@ static bool hdmi_mode_fixup(struct drm_encoder *encoder,
DRM_INFO("desired mode doesn't exist so\n");
DRM_INFO("use the most suitable mode among modes.\n");
- DRM_DEBUG_KMS("Adjusted Mode: [%d]x[%d] [%d]Hz\n",
- m->hdisplay, m->vdisplay, m->vrefresh);
+ DRM_DEV_DEBUG_KMS(dev->dev,
+ "Adjusted Mode: [%d]x[%d] [%d]Hz\n",
+ m->hdisplay, m->vdisplay,
+ m->vrefresh);
drm_mode_copy(adjusted_mode, m);
break;
@@ -1169,13 +1174,15 @@ static void hdmiphy_wait_for_pll(struct hdmi_context *hdata)
u32 val = hdmi_reg_read(hdata, HDMI_PHY_STATUS);
if (val & HDMI_PHY_STATUS_READY) {
- DRM_DEBUG_KMS("PLL stabilized after %d tries\n", tries);
+ DRM_DEV_DEBUG_KMS(hdata->dev,
+ "PLL stabilized after %d tries\n",
+ tries);
return;
}
usleep_range(10, 20);
}
- DRM_ERROR("PLL could not reach steady state\n");
+ DRM_DEV_ERROR(hdata->dev, "PLL could not reach steady state\n");
}
static void hdmi_v13_mode_apply(struct hdmi_context *hdata)
@@ -1411,7 +1418,7 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
ret = hdmi_find_phy_conf(hdata, m->clock * 1000);
if (ret < 0) {
- DRM_ERROR("failed to find hdmiphy conf\n");
+ DRM_DEV_ERROR(hdata->dev, "failed to find hdmiphy conf\n");
return;
}
phy_conf = hdata->drv_data->phy_confs.data[ret].conf;
@@ -1423,7 +1430,7 @@ static void hdmiphy_conf_apply(struct hdmi_context *hdata)
hdmiphy_enable_mode_set(hdata, true);
ret = hdmiphy_reg_write_buf(hdata, 0, phy_conf, 32);
if (ret) {
- DRM_ERROR("failed to configure hdmiphy\n");
+ DRM_DEV_ERROR(hdata->dev, "failed to configure hdmiphy\n");
return;
}
hdmiphy_enable_mode_set(hdata, false);
@@ -1460,7 +1467,8 @@ static void hdmiphy_enable(struct hdmi_context *hdata)
pm_runtime_get_sync(hdata->dev);
if (regulator_bulk_enable(ARRAY_SIZE(supply), hdata->regul_bulk))
- DRM_DEBUG_KMS("failed to enable regulator bulk\n");
+ DRM_DEV_DEBUG_KMS(hdata->dev,
+ "failed to enable regulator bulk\n");
regmap_update_bits(hdata->pmureg, PMU_HDMI_PHY_CONTROL,
PMU_HDMI_PHY_ENABLE_BIT, 1);
@@ -1734,7 +1742,7 @@ static int hdmi_bridge_init(struct hdmi_context *hdata)
np = of_graph_get_remote_port_parent(ep);
of_node_put(ep);
if (!np) {
- DRM_ERROR("failed to get remote port parent");
+ DRM_DEV_ERROR(dev, "failed to get remote port parent");
return -EINVAL;
}
@@ -1752,17 +1760,17 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
struct device *dev = hdata->dev;
int i, ret;
- DRM_DEBUG_KMS("HDMI resource init\n");
+ DRM_DEV_DEBUG_KMS(dev, "HDMI resource init\n");
hdata->hpd_gpio = devm_gpiod_get(dev, "hpd", GPIOD_IN);
if (IS_ERR(hdata->hpd_gpio)) {
- DRM_ERROR("cannot get hpd gpio property\n");
+ DRM_DEV_ERROR(dev, "cannot get hpd gpio property\n");
return PTR_ERR(hdata->hpd_gpio);
}
hdata->irq = gpiod_to_irq(hdata->hpd_gpio);
if (hdata->irq < 0) {
- DRM_ERROR("failed to get GPIO irq\n");
+ DRM_DEV_ERROR(dev, "failed to get GPIO irq\n");
return hdata->irq;
}
@@ -1780,7 +1788,7 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(supply), hdata->regul_bulk);
if (ret) {
if (ret != -EPROBE_DEFER)
- DRM_ERROR("failed to get regulators\n");
+ DRM_DEV_ERROR(dev, "failed to get regulators\n");
return ret;
}
@@ -1792,7 +1800,8 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
ret = regulator_enable(hdata->reg_hdmi_en);
if (ret) {
- DRM_ERROR("failed to enable hdmi-en regulator\n");
+ DRM_DEV_ERROR(dev,
+ "failed to enable hdmi-en regulator\n");
return ret;
}
}
@@ -1845,7 +1854,8 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
ret = hdmi_create_connector(encoder);
if (ret) {
- DRM_ERROR("failed to create connector ret = %d\n", ret);
+ DRM_DEV_ERROR(dev, "failed to create connector ret = %d\n",
+ ret);
drm_encoder_cleanup(encoder);
return ret;
}
@@ -1875,7 +1885,8 @@ static int hdmi_get_ddc_adapter(struct hdmi_context *hdata)
np = of_parse_phandle(hdata->dev->of_node, "ddc", 0);
if (!np) {
- DRM_ERROR("Failed to find ddc node in device tree\n");
+ DRM_DEV_ERROR(hdata->dev,
+ "Failed to find ddc node in device tree\n");
return -ENODEV;
}
@@ -1902,7 +1913,8 @@ static int hdmi_get_phy_io(struct hdmi_context *hdata)
if (!np) {
np = of_parse_phandle(hdata->dev->of_node, "phy", 0);
if (!np) {
- DRM_ERROR("Failed to find hdmiphy node in device tree\n");
+ DRM_DEV_ERROR(hdata->dev,
+ "Failed to find hdmiphy node in device tree\n");
return -ENODEV;
}
}
@@ -1910,7 +1922,8 @@ static int hdmi_get_phy_io(struct hdmi_context *hdata)
if (hdata->drv_data->is_apb_phy) {
hdata->regs_hdmiphy = of_iomap(np, 0);
if (!hdata->regs_hdmiphy) {
- DRM_ERROR("failed to ioremap hdmi phy\n");
+ DRM_DEV_ERROR(hdata->dev,
+ "failed to ioremap hdmi phy\n");
ret = -ENOMEM;
goto out;
}
@@ -1951,7 +1964,7 @@ static int hdmi_probe(struct platform_device *pdev)
ret = hdmi_resources_init(hdata);
if (ret) {
if (ret != -EPROBE_DEFER)
- DRM_ERROR("hdmi_resources_init failed\n");
+ DRM_DEV_ERROR(dev, "hdmi_resources_init failed\n");
return ret;
}
@@ -1977,14 +1990,14 @@ static int hdmi_probe(struct platform_device *pdev)
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"hdmi", hdata);
if (ret) {
- DRM_ERROR("failed to register hdmi interrupt\n");
+ DRM_DEV_ERROR(dev, "failed to register hdmi interrupt\n");
goto err_hdmiphy;
}
hdata->pmureg = syscon_regmap_lookup_by_phandle(dev->of_node,
"samsung,syscon-phandle");
if (IS_ERR(hdata->pmureg)) {
- DRM_ERROR("syscon regmap lookup failed.\n");
+ DRM_DEV_ERROR(dev, "syscon regmap lookup failed.\n");
ret = -EPROBE_DEFER;
goto err_hdmiphy;
}
@@ -1993,7 +2006,7 @@ static int hdmi_probe(struct platform_device *pdev)
hdata->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node,
"samsung,sysreg-phandle");
if (IS_ERR(hdata->sysreg)) {
- DRM_ERROR("sysreg regmap lookup failed.\n");
+ DRM_DEV_ERROR(dev, "sysreg regmap lookup failed.\n");
ret = -EPROBE_DEFER;
goto err_hdmiphy;
}
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index f35e4ab55b27..b8415e53964d 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -228,8 +228,8 @@ static void mixer_regs_dump(struct mixer_context *ctx)
{
#define DUMPREG(reg_id) \
do { \
- DRM_DEBUG_KMS(#reg_id " = %08x\n", \
- (u32)readl(ctx->mixer_regs + reg_id)); \
+ DRM_DEV_DEBUG_KMS(ctx->dev, #reg_id " = %08x\n", \
+ (u32)readl(ctx->mixer_regs + reg_id)); \
} while (0)
DUMPREG(MXR_STATUS);
@@ -260,8 +260,8 @@ static void vp_regs_dump(struct mixer_context *ctx)
{
#define DUMPREG(reg_id) \
do { \
- DRM_DEBUG_KMS(#reg_id " = %08x\n", \
- (u32) readl(ctx->vp_regs + reg_id)); \
+ DRM_DEV_DEBUG_KMS(ctx->dev, #reg_id " = %08x\n", \
+ (u32) readl(ctx->vp_regs + reg_id)); \
} while (0)
DUMPREG(VP_ENABLE);
@@ -885,7 +885,8 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
/* acquire resources: regs, irqs, clocks */
ret = mixer_resources_init(mixer_ctx);
if (ret) {
- DRM_ERROR("mixer_resources_init failed ret=%d\n", ret);
+ DRM_DEV_ERROR(mixer_ctx->dev,
+ "mixer_resources_init failed ret=%d\n", ret);
return ret;
}
@@ -893,7 +894,8 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
/* acquire vp resources: regs, irqs, clocks */
ret = vp_resources_init(mixer_ctx);
if (ret) {
- DRM_ERROR("vp_resources_init failed ret=%d\n", ret);
+ DRM_DEV_ERROR(mixer_ctx->dev,
+ "vp_resources_init failed ret=%d\n", ret);
return ret;
}
}
@@ -952,7 +954,7 @@ static void mixer_update_plane(struct exynos_drm_crtc *crtc,
{
struct mixer_context *mixer_ctx = crtc->ctx;
- DRM_DEBUG_KMS("win: %d\n", plane->index);
+ DRM_DEV_DEBUG_KMS(mixer_ctx->dev, "win: %d\n", plane->index);
if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
return;
@@ -969,7 +971,7 @@ static void mixer_disable_plane(struct exynos_drm_crtc *crtc,
struct mixer_context *mixer_ctx = crtc->ctx;
unsigned long flags;
- DRM_DEBUG_KMS("win: %d\n", plane->index);
+ DRM_DEV_DEBUG_KMS(mixer_ctx->dev, "win: %d\n", plane->index);
if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
return;
@@ -1046,8 +1048,9 @@ static int mixer_mode_valid(struct exynos_drm_crtc *crtc,
struct mixer_context *ctx = crtc->ctx;
u32 w = mode->hdisplay, h = mode->vdisplay;
- DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%d\n", w, h,
- mode->vrefresh, !!(mode->flags & DRM_MODE_FLAG_INTERLACE));
+ DRM_DEV_DEBUG_KMS(ctx->dev, "xres=%d, yres=%d, refresh=%d, intl=%d\n",
+ w, h, mode->vrefresh,
+ !!(mode->flags & DRM_MODE_FLAG_INTERLACE));
if (ctx->mxr_ver == MXR_VER_128_0_0_184)
return MODE_OK;
@@ -1227,7 +1230,7 @@ static int mixer_probe(struct platform_device *pdev)
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
- DRM_ERROR("failed to alloc mixer context.\n");
+ DRM_DEV_ERROR(dev, "failed to alloc mixer context.\n");
return -ENOMEM;
}
@@ -1282,27 +1285,33 @@ static int __maybe_unused exynos_mixer_resume(struct device *dev)
ret = clk_prepare_enable(ctx->mixer);
if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the mixer clk [%d]\n", ret);
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to prepare_enable the mixer clk [%d]\n",
+ ret);
return ret;
}
ret = clk_prepare_enable(ctx->hdmi);
if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the hdmi clk [%d]\n", ret);
+ DRM_DEV_ERROR(dev,
+ "Failed to prepare_enable the hdmi clk [%d]\n",
+ ret);
return ret;
}
if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) {
ret = clk_prepare_enable(ctx->vp);
if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the vp clk [%d]\n",
- ret);
+ DRM_DEV_ERROR(dev,
+ "Failed to prepare_enable the vp clk [%d]\n",
+ ret);
return ret;
}
if (test_bit(MXR_BIT_HAS_SCLK, &ctx->flags)) {
ret = clk_prepare_enable(ctx->sclk_mixer);
if (ret < 0) {
- DRM_ERROR("Failed to prepare_enable the " \
+ DRM_DEV_ERROR(dev,
+ "Failed to prepare_enable the " \
"sclk_mixer clk [%d]\n",
- ret);
+ ret);
return ret;
}
}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index bf256971063d..83c841b50272 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -94,7 +94,7 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
drm_display_mode_to_videomode(mode, &vm);
/* INV_PXCK as default (most display sample data on rising edge) */
- if (!(con->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE))
+ if (!(con->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE))
pol |= DCU_SYN_POL_INV_PXCK;
if (vm.flags & DISPLAY_FLAGS_HSYNC_LOW)
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index c934b3df1f81..a9d3a4a30ab8 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -389,7 +389,6 @@ static int psbfb_create(struct psb_fbdev *fbdev,
ret = PTR_ERR(info);
goto out;
}
- info->par = fbdev;
mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
@@ -402,9 +401,6 @@ static int psbfb_create(struct psb_fbdev *fbdev,
fbdev->psb_fb_helper.fb = fb;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
- strcpy(info->fix.id, "psbdrmfb");
-
if (dev_priv->ops->accel_2d && pitch_lines > 8) /* 2D engine */
info->fbops = &psbfb_ops;
else if (gtt_roll) { /* GTT rolling seems best */
@@ -427,8 +423,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
}
- drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
- sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(info, &fbdev->psb_fb_helper, sizes);
info->fix.mmio_start = pci_resource_start(dev->pdev, 0);
info->fix.mmio_len = pci_resource_len(dev->pdev, 0);
diff --git a/drivers/gpu/drm/gma500/framebuffer.h b/drivers/gpu/drm/gma500/framebuffer.h
index 23dc3c5f8f0d..e8e6357f033b 100644
--- a/drivers/gpu/drm/gma500/framebuffer.h
+++ b/drivers/gpu/drm/gma500/framebuffer.h
@@ -34,7 +34,7 @@ struct psb_framebuffer {
};
struct psb_fbdev {
- struct drm_fb_helper psb_fb_helper;
+ struct drm_fb_helper psb_fb_helper; /* must be first */
struct psb_framebuffer pfb;
};
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
index 3c168ae77b0c..0a381c22de26 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h
@@ -31,7 +31,7 @@ struct hibmc_framebuffer {
};
struct hibmc_fbdev {
- struct drm_fb_helper helper;
+ struct drm_fb_helper helper; /* must be first */
struct hibmc_framebuffer *fb;
int size;
};
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
index de9d7cc97e44..8026859aa07d 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
@@ -116,8 +116,6 @@ static int hibmc_drm_fb_create(struct drm_fb_helper *helper,
goto out_release_fbi;
}
- info->par = hi_fbdev;
-
hi_fbdev->fb = hibmc_framebuffer_init(priv->dev, &mode_cmd, gobj);
if (IS_ERR(hi_fbdev->fb)) {
ret = PTR_ERR(hi_fbdev->fb);
@@ -129,14 +127,9 @@ static int hibmc_drm_fb_create(struct drm_fb_helper *helper,
priv->fbdev->size = size;
hi_fbdev->helper.fb = &hi_fbdev->fb->fb;
- strcpy(info->fix.id, "hibmcdrmfb");
-
info->fbops = &hibmc_drm_fb_ops;
- drm_fb_helper_fill_fix(info, hi_fbdev->fb->fb.pitches[0],
- hi_fbdev->fb->fb.format->depth);
- drm_fb_helper_fill_var(info, &priv->fbdev->helper, sizes->fb_width,
- sizes->fb_height);
+ drm_fb_helper_fill_info(info, &priv->fbdev->helper, sizes);
info->screen_base = bo->kmap.virtual;
info->screen_size = size;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index d2cf7317930a..8c2f9b9cafb3 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -33,17 +33,10 @@ static enum drm_mode_status hibmc_connector_mode_valid(struct drm_connector *con
return MODE_OK;
}
-static struct drm_encoder *
-hibmc_connector_best_encoder(struct drm_connector *connector)
-{
- return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]);
-}
-
static const struct drm_connector_helper_funcs
hibmc_connector_helper_funcs = {
.get_modes = hibmc_connector_get_modes,
.mode_valid = hibmc_connector_mode_valid,
- .best_encoder = hibmc_connector_best_encoder,
};
static const struct drm_connector_funcs hibmc_connector_funcs = {
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index dd383267884c..6093c421daff 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -21,8 +21,6 @@
#include "hibmc_drm_drv.h"
-#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
-
static inline struct hibmc_drm_private *
hibmc_bdev(struct ttm_bo_device *bd)
{
@@ -191,7 +189,6 @@ int hibmc_mm_init(struct hibmc_drm_private *hibmc)
ret = ttm_bo_device_init(&hibmc->bdev,
&hibmc_bo_driver,
dev->anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET,
true);
if (ret) {
DRM_ERROR("error initializing bo driver: %d\n", ret);
@@ -322,14 +319,9 @@ int hibmc_bo_unpin(struct hibmc_bo *bo)
int hibmc_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct drm_file *file_priv;
- struct hibmc_drm_private *hibmc;
-
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
- return -EINVAL;
+ struct drm_file *file_priv = filp->private_data;
+ struct hibmc_drm_private *hibmc = file_priv->minor->dev->dev_private;
- file_priv = filp->private_data;
- hibmc = file_priv->minor->dev->dev_private;
return ttm_bo_mmap(filp, vma, &hibmc->bdev);
}
diff --git a/drivers/gpu/drm/i915/.gitignore b/drivers/gpu/drm/i915/.gitignore
new file mode 100644
index 000000000000..cff45d81f42f
--- /dev/null
+++ b/drivers/gpu/drm/i915/.gitignore
@@ -0,0 +1 @@
+header_test_*.c
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 1787e1299b1b..fbcb0904f4a8 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -32,10 +32,13 @@ CFLAGS_intel_fbdev.o = $(call cc-disable-warning, override-init)
subdir-ccflags-y += \
$(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA)
+# Extra header tests
+include $(src)/Makefile.header-test
+
# Please keep these build lists sorted!
# core driver code
-i915-y := i915_drv.o \
+i915-y += i915_drv.o \
i915_irq.o \
i915_memcpy.o \
i915_mm.o \
@@ -46,6 +49,7 @@ i915-y := i915_drv.o \
i915_sw_fence.o \
i915_syncmap.o \
i915_sysfs.o \
+ i915_user_extensions.o \
intel_csr.o \
intel_device_info.o \
intel_pm.o \
@@ -77,6 +81,7 @@ i915-y += \
i915_gem_tiling.o \
i915_gem_userptr.o \
i915_gemfs.o \
+ i915_globals.o \
i915_query.o \
i915_request.o \
i915_scheduler.o \
@@ -84,6 +89,7 @@ i915-y += \
i915_trace_points.o \
i915_vma.o \
intel_breadcrumbs.o \
+ intel_context.o \
intel_engine_cs.o \
intel_hangcheck.o \
intel_lrc.o \
diff --git a/drivers/gpu/drm/i915/Makefile.header-test b/drivers/gpu/drm/i915/Makefile.header-test
new file mode 100644
index 000000000000..c1c391816fa7
--- /dev/null
+++ b/drivers/gpu/drm/i915/Makefile.header-test
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: MIT
+# Copyright © 2019 Intel Corporation
+
+# Test the headers are compilable as standalone units
+header_test := \
+ i915_active_types.h \
+ i915_gem_context_types.h \
+ i915_priolist_types.h \
+ i915_scheduler_types.h \
+ i915_timeline_types.h \
+ intel_atomic_plane.h \
+ intel_audio.h \
+ intel_cdclk.h \
+ intel_color.h \
+ intel_connector.h \
+ intel_context_types.h \
+ intel_crt.h \
+ intel_csr.h \
+ intel_ddi.h \
+ intel_dp.h \
+ intel_dvo.h \
+ intel_engine_types.h \
+ intel_fbc.h \
+ intel_fbdev.h \
+ intel_frontbuffer.h \
+ intel_hdcp.h \
+ intel_hdmi.h \
+ intel_lspcon.h \
+ intel_lvds.h \
+ intel_panel.h \
+ intel_pipe_crc.h \
+ intel_pm.h \
+ intel_psr.h \
+ intel_sdvo.h \
+ intel_sprite.h \
+ intel_tv.h \
+ intel_workarounds_types.h
+
+quiet_cmd_header_test = HDRTEST $@
+ cmd_header_test = echo "\#include \"$(<F)\"" > $@
+
+header_test_%.c: %.h
+ $(call cmd,header_test)
+
+i915-$(CONFIG_DRM_I915_WERROR) += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.o,$(h)))
+
+clean-files += $(foreach h,$(header_test),$(patsubst %.h,header_test_%.c,$(h)))
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
index 271fb46d4dd0..ea8324abc784 100644
--- a/drivers/gpu/drm/i915/gvt/Makefile
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -5,5 +5,5 @@ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
execlist.o scheduler.o sched_policy.o mmio_context.o cmd_parser.o debugfs.o \
fb_decoder.o dmabuf.o page_track.o
-ccflags-y += -I$(src) -I$(src)/$(GVT_DIR)
+ccflags-y += -I $(srctree)/$(src) -I $(srctree)/$(src)/$(GVT_DIR)/
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 3592d04c33b2..ab002cfd3cab 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -391,12 +391,12 @@ struct cmd_info {
#define F_POST_HANDLE (1<<2)
u32 flag;
-#define R_RCS (1 << RCS)
-#define R_VCS1 (1 << VCS)
-#define R_VCS2 (1 << VCS2)
+#define R_RCS BIT(RCS0)
+#define R_VCS1 BIT(VCS0)
+#define R_VCS2 BIT(VCS1)
#define R_VCS (R_VCS1 | R_VCS2)
-#define R_BCS (1 << BCS)
-#define R_VECS (1 << VECS)
+#define R_BCS BIT(BCS0)
+#define R_VECS BIT(VECS0)
#define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
/* rings that support this cmd: BLT/RCS/VCS/VECS */
u16 rings;
@@ -558,7 +558,7 @@ static const struct decode_info decode_info_vebox = {
};
static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
- [RCS] = {
+ [RCS0] = {
&decode_info_mi,
NULL,
NULL,
@@ -569,7 +569,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
NULL,
},
- [VCS] = {
+ [VCS0] = {
&decode_info_mi,
NULL,
NULL,
@@ -580,7 +580,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
NULL,
},
- [BCS] = {
+ [BCS0] = {
&decode_info_mi,
NULL,
&decode_info_2d,
@@ -591,7 +591,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
NULL,
},
- [VECS] = {
+ [VECS0] = {
&decode_info_mi,
NULL,
NULL,
@@ -602,7 +602,7 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
NULL,
},
- [VCS2] = {
+ [VCS1] = {
&decode_info_mi,
NULL,
NULL,
@@ -631,8 +631,7 @@ static inline const struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
struct cmd_entry *e;
hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
- if ((opcode == e->info->opcode) &&
- (e->info->rings & (1 << ring_id)))
+ if (opcode == e->info->opcode && e->info->rings & BIT(ring_id))
return e->info;
}
return NULL;
@@ -943,15 +942,12 @@ static int cmd_handler_lri(struct parser_exec_state *s)
struct intel_gvt *gvt = s->vgpu->gvt;
for (i = 1; i < cmd_len; i += 2) {
- if (IS_BROADWELL(gvt->dev_priv) &&
- (s->ring_id != RCS)) {
- if (s->ring_id == BCS &&
- cmd_reg(s, i) ==
- i915_mmio_reg_offset(DERRMR))
+ if (IS_BROADWELL(gvt->dev_priv) && s->ring_id != RCS0) {
+ if (s->ring_id == BCS0 &&
+ cmd_reg(s, i) == i915_mmio_reg_offset(DERRMR))
ret |= 0;
else
- ret |= (cmd_reg_inhibit(s, i)) ?
- -EBADRQC : 0;
+ ret |= cmd_reg_inhibit(s, i) ? -EBADRQC : 0;
}
if (ret)
break;
@@ -1047,27 +1043,27 @@ struct cmd_interrupt_event {
};
static struct cmd_interrupt_event cmd_interrupt_events[] = {
- [RCS] = {
+ [RCS0] = {
.pipe_control_notify = RCS_PIPE_CONTROL,
.mi_flush_dw = INTEL_GVT_EVENT_RESERVED,
.mi_user_interrupt = RCS_MI_USER_INTERRUPT,
},
- [BCS] = {
+ [BCS0] = {
.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
.mi_flush_dw = BCS_MI_FLUSH_DW,
.mi_user_interrupt = BCS_MI_USER_INTERRUPT,
},
- [VCS] = {
+ [VCS0] = {
.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
.mi_flush_dw = VCS_MI_FLUSH_DW,
.mi_user_interrupt = VCS_MI_USER_INTERRUPT,
},
- [VCS2] = {
+ [VCS1] = {
.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
.mi_flush_dw = VCS2_MI_FLUSH_DW,
.mi_user_interrupt = VCS2_MI_USER_INTERRUPT,
},
- [VECS] = {
+ [VECS0] = {
.pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
.mi_flush_dw = VECS_MI_FLUSH_DW,
.mi_user_interrupt = VECS_MI_USER_INTERRUPT,
@@ -1081,6 +1077,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
bool index_mode = false;
unsigned int post_sync;
int ret = 0;
+ u32 hws_pga, val;
post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14;
@@ -1104,6 +1101,15 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
index_mode = true;
ret |= cmd_address_audit(s, gma, sizeof(u64),
index_mode);
+ if (ret)
+ return ret;
+ if (index_mode) {
+ hws_pga = s->vgpu->hws_pga[s->ring_id];
+ gma = hws_pga + gma;
+ patch_value(s, cmd_ptr(s, 2), gma);
+ val = cmd_val(s, 1) & (~(1 << 21));
+ patch_value(s, cmd_ptr(s, 1), val);
+ }
}
}
}
@@ -1321,8 +1327,14 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
info->tile_val << 10);
}
- vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
- intel_vgpu_trigger_virtual_event(vgpu, info->event);
+ if (info->plane == PLANE_PRIMARY)
+ vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(info->pipe))++;
+
+ if (info->async_flip)
+ intel_vgpu_trigger_virtual_event(vgpu, info->event);
+ else
+ set_bit(info->event, vgpu->irq.flip_done_event[info->pipe]);
+
return 0;
}
@@ -1567,6 +1579,7 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
unsigned long gma;
bool index_mode = false;
int ret = 0;
+ u32 hws_pga, val;
/* Check post-sync and ppgtt bit */
if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) {
@@ -1577,6 +1590,15 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
if (cmd_val(s, 0) & (1 << 21))
index_mode = true;
ret = cmd_address_audit(s, gma, sizeof(u64), index_mode);
+ if (ret)
+ return ret;
+ if (index_mode) {
+ hws_pga = s->vgpu->hws_pga[s->ring_id];
+ gma = hws_pga + gma;
+ patch_value(s, cmd_ptr(s, 1), gma);
+ val = cmd_val(s, 0) & (~(1 << 21));
+ patch_value(s, cmd_ptr(s, 0), val);
+ }
}
/* Check notify bit */
if ((cmd_val(s, 0) & (1 << 8)))
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index e3f9caa7839f..e1c313da6c00 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -407,7 +407,6 @@ static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
if (!pipe_is_enabled(vgpu, pipe))
continue;
- vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, event);
}
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 69a9a1b2ea4a..4e1e425189ba 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -153,7 +153,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_object *obj;
- obj = i915_gem_object_alloc(dev_priv);
+ obj = i915_gem_object_alloc();
if (obj == NULL)
return NULL;
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index 70494e394d2c..f21b8fb5b37e 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -47,17 +47,16 @@
((a)->lrca == (b)->lrca))
static int context_switch_events[] = {
- [RCS] = RCS_AS_CONTEXT_SWITCH,
- [BCS] = BCS_AS_CONTEXT_SWITCH,
- [VCS] = VCS_AS_CONTEXT_SWITCH,
- [VCS2] = VCS2_AS_CONTEXT_SWITCH,
- [VECS] = VECS_AS_CONTEXT_SWITCH,
+ [RCS0] = RCS_AS_CONTEXT_SWITCH,
+ [BCS0] = BCS_AS_CONTEXT_SWITCH,
+ [VCS0] = VCS_AS_CONTEXT_SWITCH,
+ [VCS1] = VCS2_AS_CONTEXT_SWITCH,
+ [VECS0] = VECS_AS_CONTEXT_SWITCH,
};
-static int ring_id_to_context_switch_event(int ring_id)
+static int ring_id_to_context_switch_event(unsigned int ring_id)
{
- if (WARN_ON(ring_id < RCS ||
- ring_id >= ARRAY_SIZE(context_switch_events)))
+ if (WARN_ON(ring_id >= ARRAY_SIZE(context_switch_events)))
return -EINVAL;
return context_switch_events[ring_id];
@@ -411,7 +410,7 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
gvt_dbg_el("complete workload %p status %d\n", workload,
workload->status);
- if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id)))
+ if (workload->status || (vgpu->resetting_eng & BIT(ring_id)))
goto out;
if (!list_empty(workload_q_head(vgpu, ring_id))) {
@@ -527,12 +526,13 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw;
}
-static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask)
+static void clean_execlist(struct intel_vgpu *vgpu,
+ intel_engine_mask_t engine_mask)
{
- unsigned int tmp;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine;
struct intel_vgpu_submission *s = &vgpu->submission;
+ intel_engine_mask_t tmp;
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
kfree(s->ring_scan_buffer[engine->id]);
@@ -542,18 +542,18 @@ static void clean_execlist(struct intel_vgpu *vgpu, unsigned long engine_mask)
}
static void reset_execlist(struct intel_vgpu *vgpu,
- unsigned long engine_mask)
+ intel_engine_mask_t engine_mask)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine;
- unsigned int tmp;
+ intel_engine_mask_t tmp;
for_each_engine_masked(engine, dev_priv, engine_mask, tmp)
init_vgpu_execlist(vgpu, engine->id);
}
static int init_execlist(struct intel_vgpu *vgpu,
- unsigned long engine_mask)
+ intel_engine_mask_t engine_mask)
{
reset_execlist(vgpu, engine_mask);
return 0;
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
index 714d709829a2..5ccc2c695848 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.h
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -180,6 +180,6 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id);
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
- unsigned long engine_mask);
+ intel_engine_mask_t engine_mask);
#endif /*_GVT_EXECLIST_H_*/
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 9814773882ec..c2f7d20f6346 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -2504,6 +2504,7 @@ static void clean_spt_oos(struct intel_gvt *gvt)
list_for_each_safe(pos, n, &gtt->oos_page_free_list_head) {
oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
list_del(&oos_page->list);
+ free_page((unsigned long)oos_page->mem);
kfree(oos_page);
}
}
@@ -2524,6 +2525,12 @@ static int setup_spt_oos(struct intel_gvt *gvt)
ret = -ENOMEM;
goto fail;
}
+ oos_page->mem = (void *)__get_free_pages(GFP_KERNEL, 0);
+ if (!oos_page->mem) {
+ ret = -ENOMEM;
+ kfree(oos_page);
+ goto fail;
+ }
INIT_LIST_HEAD(&oos_page->list);
INIT_LIST_HEAD(&oos_page->vm_list);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index edb610dc5d86..32c573aea494 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -222,7 +222,7 @@ struct intel_vgpu_oos_page {
struct list_head list;
struct list_head vm_list;
int id;
- unsigned char mem[I915_GTT_PAGE_SIZE];
+ void *mem;
};
#define GTT_ENTRY_NUM_IN_ONE_PAGE 512
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 8bce09de4b82..f5a328b5290a 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -94,7 +94,6 @@ struct intel_vgpu_fence {
struct intel_vgpu_mmio {
void *vreg;
- void *sreg;
};
#define INTEL_GVT_MAX_BAR_NUM 4
@@ -111,11 +110,9 @@ struct intel_vgpu_cfg_space {
#define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
-#define INTEL_GVT_MAX_PIPE 4
-
struct intel_vgpu_irq {
bool irq_warn_once[INTEL_GVT_EVENT_MAX];
- DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE],
+ DECLARE_BITMAP(flip_done_event[I915_MAX_PIPES],
INTEL_GVT_EVENT_MAX);
};
@@ -144,9 +141,9 @@ enum {
struct intel_vgpu_submission_ops {
const char *name;
- int (*init)(struct intel_vgpu *vgpu, unsigned long engine_mask);
- void (*clean)(struct intel_vgpu *vgpu, unsigned long engine_mask);
- void (*reset)(struct intel_vgpu *vgpu, unsigned long engine_mask);
+ int (*init)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
+ void (*clean)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
+ void (*reset)(struct intel_vgpu *vgpu, intel_engine_mask_t engine_mask);
};
struct intel_vgpu_submission {
@@ -449,10 +446,6 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
(*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
#define vgpu_vreg64(vgpu, offset) \
(*(u64 *)(vgpu->mmio.vreg + (offset)))
-#define vgpu_sreg_t(vgpu, reg) \
- (*(u32 *)(vgpu->mmio.sreg + i915_mmio_reg_offset(reg)))
-#define vgpu_sreg(vgpu, offset) \
- (*(u32 *)(vgpu->mmio.sreg + (offset)))
#define for_each_active_vgpu(gvt, vgpu, id) \
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
@@ -488,7 +481,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_release_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
- unsigned int engine_mask);
+ intel_engine_mask_t engine_mask);
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index bc64b810e0d5..18f01eeb2510 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -311,7 +311,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- unsigned int engine_mask = 0;
+ intel_engine_mask_t engine_mask = 0;
u32 data;
write_vreg(vgpu, offset, p_data, bytes);
@@ -323,25 +323,25 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
} else {
if (data & GEN6_GRDOM_RENDER) {
gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
- engine_mask |= (1 << RCS);
+ engine_mask |= BIT(RCS0);
}
if (data & GEN6_GRDOM_MEDIA) {
gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
- engine_mask |= (1 << VCS);
+ engine_mask |= BIT(VCS0);
}
if (data & GEN6_GRDOM_BLT) {
gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
- engine_mask |= (1 << BCS);
+ engine_mask |= BIT(BCS0);
}
if (data & GEN6_GRDOM_VECS) {
gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
- engine_mask |= (1 << VECS);
+ engine_mask |= BIT(VECS0);
}
if (data & GEN8_GRDOM_MEDIA2) {
gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
- if (HAS_BSD2(vgpu->gvt->dev_priv))
- engine_mask |= (1 << VCS2);
+ engine_mask |= BIT(VCS1);
}
+ engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask;
}
/* vgpu_lock already hold by emulate mmio r/w */
@@ -750,18 +750,19 @@ static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- unsigned int index = DSPSURF_TO_PIPE(offset);
- i915_reg_t surflive_reg = DSPSURFLIVE(index);
- int flip_event[] = {
- [PIPE_A] = PRIMARY_A_FLIP_DONE,
- [PIPE_B] = PRIMARY_B_FLIP_DONE,
- [PIPE_C] = PRIMARY_C_FLIP_DONE,
- };
+ u32 pipe = DSPSURF_TO_PIPE(offset);
+ int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY);
write_vreg(vgpu, offset, p_data, bytes);
- vgpu_vreg_t(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
+ vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
+
+ vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
+
+ if (vgpu_vreg_t(vgpu, DSPCNTR(pipe)) & PLANE_CTL_ASYNC_FLIP)
+ intel_vgpu_trigger_virtual_event(vgpu, event);
+ else
+ set_bit(event, vgpu->irq.flip_done_event[pipe]);
- set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
return 0;
}
@@ -771,18 +772,42 @@ static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static int spr_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- unsigned int index = SPRSURF_TO_PIPE(offset);
- i915_reg_t surflive_reg = SPRSURFLIVE(index);
- int flip_event[] = {
- [PIPE_A] = SPRITE_A_FLIP_DONE,
- [PIPE_B] = SPRITE_B_FLIP_DONE,
- [PIPE_C] = SPRITE_C_FLIP_DONE,
- };
+ u32 pipe = SPRSURF_TO_PIPE(offset);
+ int event = SKL_FLIP_EVENT(pipe, PLANE_SPRITE0);
+
+ write_vreg(vgpu, offset, p_data, bytes);
+ vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
+
+ if (vgpu_vreg_t(vgpu, SPRCTL(pipe)) & PLANE_CTL_ASYNC_FLIP)
+ intel_vgpu_trigger_virtual_event(vgpu, event);
+ else
+ set_bit(event, vgpu->irq.flip_done_event[pipe]);
+
+ return 0;
+}
+
+static int reg50080_mmio_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data,
+ unsigned int bytes)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ enum pipe pipe = REG_50080_TO_PIPE(offset);
+ enum plane_id plane = REG_50080_TO_PLANE(offset);
+ int event = SKL_FLIP_EVENT(pipe, plane);
write_vreg(vgpu, offset, p_data, bytes);
- vgpu_vreg_t(vgpu, surflive_reg) = vgpu_vreg(vgpu, offset);
+ if (plane == PLANE_PRIMARY) {
+ vgpu_vreg_t(vgpu, DSPSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
+ vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
+ } else {
+ vgpu_vreg_t(vgpu, SPRSURFLIVE(pipe)) = vgpu_vreg(vgpu, offset);
+ }
+
+ if ((vgpu_vreg(vgpu, offset) & REG50080_FLIP_TYPE_MASK) == REG50080_FLIP_TYPE_ASYNC)
+ intel_vgpu_trigger_virtual_event(vgpu, event);
+ else
+ set_bit(event, vgpu->irq.flip_done_event[pipe]);
- set_bit(flip_event[index], vgpu->irq.flip_done_event[index]);
return 0;
}
@@ -1704,7 +1729,7 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
ret = intel_vgpu_select_submission_ops(vgpu,
- ENGINE_MASK(ring_id),
+ BIT(ring_id),
INTEL_VGPU_EXECLIST_SUBMISSION);
if (ret)
return ret;
@@ -1724,19 +1749,19 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
switch (offset) {
case 0x4260:
- id = RCS;
+ id = RCS0;
break;
case 0x4264:
- id = VCS;
+ id = VCS0;
break;
case 0x4268:
- id = VCS2;
+ id = VCS1;
break;
case 0x426c:
- id = BCS;
+ id = BCS0;
break;
case 0x4270:
- id = VECS;
+ id = VECS0;
break;
default:
return -EINVAL;
@@ -1793,7 +1818,7 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \
MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \
MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \
- if (HAS_BSD2(dev_priv)) \
+ if (HAS_ENGINE(dev_priv, VCS1)) \
MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \
} while (0)
@@ -1848,7 +1873,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL);
MMIO_GM_RDR(_MMIO(0x2148), D_ALL, NULL, NULL);
- MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
+ MMIO_GM_RDR(CCID(RENDER_RING_BASE), D_ALL, NULL, NULL);
MMIO_GM_RDR(_MMIO(0x12198), D_ALL, NULL, NULL);
MMIO_D(GEN7_CXT_SIZE, D_ALL);
@@ -1969,6 +1994,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DSPSURF(PIPE_A), D_ALL, NULL, pri_surf_mmio_write);
MMIO_D(DSPOFFSET(PIPE_A), D_ALL);
MMIO_D(DSPSURFLIVE(PIPE_A), D_ALL);
+ MMIO_DH(REG_50080(PIPE_A, PLANE_PRIMARY), D_ALL, NULL,
+ reg50080_mmio_write);
MMIO_D(DSPCNTR(PIPE_B), D_ALL);
MMIO_D(DSPADDR(PIPE_B), D_ALL);
@@ -1978,6 +2005,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DSPSURF(PIPE_B), D_ALL, NULL, pri_surf_mmio_write);
MMIO_D(DSPOFFSET(PIPE_B), D_ALL);
MMIO_D(DSPSURFLIVE(PIPE_B), D_ALL);
+ MMIO_DH(REG_50080(PIPE_B, PLANE_PRIMARY), D_ALL, NULL,
+ reg50080_mmio_write);
MMIO_D(DSPCNTR(PIPE_C), D_ALL);
MMIO_D(DSPADDR(PIPE_C), D_ALL);
@@ -1987,6 +2016,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DSPSURF(PIPE_C), D_ALL, NULL, pri_surf_mmio_write);
MMIO_D(DSPOFFSET(PIPE_C), D_ALL);
MMIO_D(DSPSURFLIVE(PIPE_C), D_ALL);
+ MMIO_DH(REG_50080(PIPE_C, PLANE_PRIMARY), D_ALL, NULL,
+ reg50080_mmio_write);
MMIO_D(SPRCTL(PIPE_A), D_ALL);
MMIO_D(SPRLINOFF(PIPE_A), D_ALL);
@@ -2000,6 +2031,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(SPROFFSET(PIPE_A), D_ALL);
MMIO_D(SPRSCALE(PIPE_A), D_ALL);
MMIO_D(SPRSURFLIVE(PIPE_A), D_ALL);
+ MMIO_DH(REG_50080(PIPE_A, PLANE_SPRITE0), D_ALL, NULL,
+ reg50080_mmio_write);
MMIO_D(SPRCTL(PIPE_B), D_ALL);
MMIO_D(SPRLINOFF(PIPE_B), D_ALL);
@@ -2013,6 +2046,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(SPROFFSET(PIPE_B), D_ALL);
MMIO_D(SPRSCALE(PIPE_B), D_ALL);
MMIO_D(SPRSURFLIVE(PIPE_B), D_ALL);
+ MMIO_DH(REG_50080(PIPE_B, PLANE_SPRITE0), D_ALL, NULL,
+ reg50080_mmio_write);
MMIO_D(SPRCTL(PIPE_C), D_ALL);
MMIO_D(SPRLINOFF(PIPE_C), D_ALL);
@@ -2026,6 +2061,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(SPROFFSET(PIPE_C), D_ALL);
MMIO_D(SPRSCALE(PIPE_C), D_ALL);
MMIO_D(SPRSURFLIVE(PIPE_C), D_ALL);
+ MMIO_DH(REG_50080(PIPE_C, PLANE_SPRITE0), D_ALL, NULL,
+ reg50080_mmio_write);
MMIO_D(HTOTAL(TRANSCODER_A), D_ALL);
MMIO_D(HBLANK(TRANSCODER_A), D_ALL);
@@ -2827,26 +2864,26 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
- MMIO_D(_MMIO(0xa210), D_SKL_PLUS);
+ MMIO_D(GEN9_PG_ENABLE, D_SKL_PLUS);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DH(_MMIO(0x4ddc), D_SKL_PLUS, NULL, NULL);
- MMIO_DH(_MMIO(0x42080), D_SKL_PLUS, NULL, NULL);
- MMIO_D(_MMIO(0x45504), D_SKL_PLUS);
- MMIO_D(_MMIO(0x45520), D_SKL_PLUS);
- MMIO_D(_MMIO(0x46000), D_SKL_PLUS);
- MMIO_DH(_MMIO(0x46010), D_SKL_PLUS, NULL, skl_lcpll_write);
- MMIO_DH(_MMIO(0x46014), D_SKL_PLUS, NULL, skl_lcpll_write);
- MMIO_D(_MMIO(0x6C040), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6C048), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6C050), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6C044), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6C04C), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6C054), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6c058), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6c05c), D_SKL_PLUS);
- MMIO_DH(_MMIO(0x6c060), D_SKL_PLUS, dpll_status_read, NULL);
+ MMIO_DH(MMCD_MISC_CTRL, D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(CHICKEN_PAR1_1, D_SKL_PLUS, NULL, NULL);
+ MMIO_D(DC_STATE_EN, D_SKL_PLUS);
+ MMIO_D(DC_STATE_DEBUG, D_SKL_PLUS);
+ MMIO_D(CDCLK_CTL, D_SKL_PLUS);
+ MMIO_DH(LCPLL1_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
+ MMIO_DH(LCPLL2_CTL, D_SKL_PLUS, NULL, skl_lcpll_write);
+ MMIO_D(_MMIO(_DPLL1_CFGCR1), D_SKL_PLUS);
+ MMIO_D(_MMIO(_DPLL2_CFGCR1), D_SKL_PLUS);
+ MMIO_D(_MMIO(_DPLL3_CFGCR1), D_SKL_PLUS);
+ MMIO_D(_MMIO(_DPLL1_CFGCR2), D_SKL_PLUS);
+ MMIO_D(_MMIO(_DPLL2_CFGCR2), D_SKL_PLUS);
+ MMIO_D(_MMIO(_DPLL3_CFGCR2), D_SKL_PLUS);
+ MMIO_D(DPLL_CTRL1, D_SKL_PLUS);
+ MMIO_D(DPLL_CTRL2, D_SKL_PLUS);
+ MMIO_DH(DPLL_STATUS, D_SKL_PLUS, dpll_status_read, NULL);
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 0), D_SKL_PLUS, NULL, pf_write);
MMIO_DH(SKL_PS_WIN_POS(PIPE_A, 1), D_SKL_PLUS, NULL, pf_write);
@@ -2965,40 +3002,41 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 3)), D_SKL_PLUS, NULL, NULL);
MMIO_DH(_MMIO(_REG_701C4(PIPE_C, 4)), D_SKL_PLUS, NULL, NULL);
- MMIO_D(_MMIO(0x70380), D_SKL_PLUS);
- MMIO_D(_MMIO(0x71380), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_CTL_3_A), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_CTL_3_B), D_SKL_PLUS);
MMIO_D(_MMIO(0x72380), D_SKL_PLUS);
MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
- MMIO_D(_MMIO(0x7039c), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_SURF_3_A), D_SKL_PLUS);
- MMIO_D(_MMIO(0x8f074), D_SKL_PLUS);
- MMIO_D(_MMIO(0x8f004), D_SKL_PLUS);
- MMIO_D(_MMIO(0x8f034), D_SKL_PLUS);
+ MMIO_D(CSR_SSP_BASE, D_SKL_PLUS);
+ MMIO_D(CSR_HTP_SKL, D_SKL_PLUS);
+ MMIO_D(CSR_LAST_WRITE, D_SKL_PLUS);
- MMIO_D(_MMIO(0xb11c), D_SKL_PLUS);
+ MMIO_D(BDW_SCRATCH1, D_SKL_PLUS);
- MMIO_D(_MMIO(0x51000), D_SKL_PLUS);
- MMIO_D(_MMIO(0x6c00c), D_SKL_PLUS);
+ MMIO_D(SKL_DFSM, D_SKL_PLUS);
+ MMIO_D(DISPIO_CR_TX_BMU_CR0, D_SKL_PLUS);
- MMIO_F(_MMIO(0xc800), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
+ MMIO_F(GEN9_GFX_MOCS(0), 0x7f8, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
NULL, NULL);
- MMIO_F(_MMIO(0xb020), 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
+ MMIO_F(GEN7_L3CNTLREG2, 0x80, F_CMD_ACCESS, 0, 0, D_SKL_PLUS,
NULL, NULL);
MMIO_D(RPM_CONFIG0, D_SKL_PLUS);
MMIO_D(_MMIO(0xd08), D_SKL_PLUS);
MMIO_D(RC6_LOCATION, D_SKL_PLUS);
- MMIO_DFH(_MMIO(0x20e0), D_SKL_PLUS, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(_MMIO(0x20ec), D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
+ MMIO_DFH(GEN7_FF_SLICE_CS_CHICKEN1, D_SKL_PLUS, F_MODE_MASK,
+ NULL, NULL);
+ MMIO_DFH(GEN9_CS_DEBUG_MODE1, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
/* TRTT */
- MMIO_DFH(_MMIO(0x4de0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4de4), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4de8), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4dec), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4df0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(_MMIO(0x4df4), D_SKL_PLUS, F_CMD_ACCESS,
+ MMIO_DFH(TRVATTL3PTRDW(0), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(TRVATTL3PTRDW(1), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(TRVATTL3PTRDW(2), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(TRVATTL3PTRDW(3), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(TRVADR, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS,
NULL, gen9_trtte_write);
MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write);
@@ -3011,7 +3049,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DH(DMA_CTRL, D_SKL_PLUS, NULL, dma_ctrl_write);
MMIO_D(_MMIO(0x65900), D_SKL_PLUS);
- MMIO_D(_MMIO(0x1082c0), D_SKL_PLUS);
+ MMIO_D(GEN6_STOLEN_RESERVED, D_SKL_PLUS);
MMIO_D(_MMIO(0x4068), D_SKL_PLUS);
MMIO_D(_MMIO(0x67054), D_SKL_PLUS);
MMIO_D(_MMIO(0x6e560), D_SKL_PLUS);
@@ -3042,8 +3080,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
- MMIO_D(_MMIO(0x4ab8), D_KBL | D_CFL);
- MMIO_D(_MMIO(0x2248), D_SKL_PLUS);
+ MMIO_D(GAMT_CHKN_BIT_REG, D_KBL);
+ MMIO_D(GEN9_CTX_PREEMPT_REG, D_KBL | D_SKL);
return 0;
}
@@ -3265,7 +3303,7 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
/* Special MMIO blocks. */
static struct gvt_mmio_block mmio_blocks[] = {
{D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
- {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
+ {D_ALL, MCHBAR_MIRROR_REG_BASE, 0x4000, NULL, NULL},
{D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
pvinfo_mmio_read, pvinfo_mmio_write},
{D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
@@ -3489,12 +3527,11 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
return mmio_info->read(vgpu, offset, pdata, bytes);
else {
u64 ro_mask = mmio_info->ro_mask;
- u32 old_vreg = 0, old_sreg = 0;
+ u32 old_vreg = 0;
u64 data = 0;
if (intel_gvt_mmio_has_mode_mask(gvt, mmio_info->offset)) {
old_vreg = vgpu_vreg(vgpu, offset);
- old_sreg = vgpu_sreg(vgpu, offset);
}
if (likely(!ro_mask))
@@ -3516,8 +3553,6 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
vgpu_vreg(vgpu, offset) = (old_vreg & ~mask)
| (vgpu_vreg(vgpu, offset) & mask);
- vgpu_sreg(vgpu, offset) = (old_sreg & ~mask)
- | (vgpu_sreg(vgpu, offset) & mask);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 67125c5eec6e..951681813230 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -536,7 +536,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1);
- if (HAS_BSD2(gvt->dev_priv)) {
+ if (HAS_ENGINE(gvt->dev_priv, VCS1)) {
SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT,
INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW,
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index ed4df2f6d60b..a55178884d67 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -239,7 +239,6 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
if (dmlr) {
memcpy(vgpu->mmio.vreg, mmio, info->mmio_size);
- memcpy(vgpu->mmio.sreg, mmio, info->mmio_size);
vgpu_vreg_t(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
@@ -280,7 +279,6 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
* touched
*/
memcpy(vgpu->mmio.vreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
- memcpy(vgpu->mmio.sreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
}
}
@@ -296,12 +294,10 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
{
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
- vgpu->mmio.vreg = vzalloc(array_size(info->mmio_size, 2));
+ vgpu->mmio.vreg = vzalloc(info->mmio_size);
if (!vgpu->mmio.vreg)
return -ENOMEM;
- vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
-
intel_vgpu_reset_mmio(vgpu, true);
return 0;
@@ -315,5 +311,5 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu)
{
vfree(vgpu->mmio.vreg);
- vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
+ vgpu->mmio.vreg = NULL;
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 7902fb162d09..e7e14c842be4 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -41,103 +41,102 @@
/* Raw offset is appened to each line for convenience. */
static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
- {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
- {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
- {RCS, HWSTAM, 0x0, false}, /* 0x2098 */
- {RCS, INSTPM, 0xffff, true}, /* 0x20c0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
- {RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
- {RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
- {RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
- {RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
- {RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
- {RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
-
- {BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
- {BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
- {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
- {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
- {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
- {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
+ {RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
+ {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
+ {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */
+ {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
+ {RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
+ {RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
+ {RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
+ {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
+ {RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
+ {RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
+
+ {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
+ {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
+ {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
+ {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
+ {BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */
+ {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */
};
static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
- {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
- {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
- {RCS, HWSTAM, 0x0, false}, /* 0x2098 */
- {RCS, INSTPM, 0xffff, true}, /* 0x20c0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
- {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
- {RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
- {RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
- {RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
- {RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
- {RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
- {RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
-
- {RCS, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */
- {RCS, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */
- {RCS, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */
- {RCS, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */
- {RCS, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */
- {RCS, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */
- {RCS, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */
- {RCS, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */
- {RCS, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
- {RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
- {RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
- {RCS, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */
- {RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */
- {RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */
- {RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */
- {RCS, TRINVTILEDETCT, 0, false}, /* 0x4dec */
- {RCS, TRVADR, 0, false}, /* 0x4df0 */
- {RCS, TRTTE, 0, false}, /* 0x4df4 */
-
- {BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
- {BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
- {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
- {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
- {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
-
- {VCS2, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */
-
- {VECS, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */
-
- {RCS, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */
- {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
- {RCS, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */
- {RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */
-
- {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
- {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
- {RCS, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */
-
- {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
- {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
- {RCS, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */
- {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
+ {RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
+ {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
+ {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */
+ {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
+ {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
+ {RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
+ {RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
+ {RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
+ {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
+ {RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
+ {RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
+
+ {RCS0, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */
+ {RCS0, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */
+ {RCS0, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */
+ {RCS0, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */
+ {RCS0, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */
+ {RCS0, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */
+ {RCS0, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */
+ {RCS0, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */
+ {RCS0, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
+ {RCS0, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
+ {RCS0, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
+ {RCS0, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */
+ {RCS0, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */
+ {RCS0, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */
+ {RCS0, TRNULLDETCT, 0, false}, /* 0x4de8 */
+ {RCS0, TRINVTILEDETCT, 0, false}, /* 0x4dec */
+ {RCS0, TRVADR, 0, false}, /* 0x4df0 */
+ {RCS0, TRTTE, 0, false}, /* 0x4df4 */
+
+ {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
+ {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
+ {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
+ {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
+ {BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */
+
+ {VCS1, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */
+
+ {VECS0, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */
+
+ {RCS0, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */
+ {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
+ {RCS0, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */
+ {RCS0, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */
+
+ {RCS0, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
+ {RCS0, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
+
+ {RCS0, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
+ {RCS0, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
+ {RCS0, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */
+ {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */
};
static struct {
@@ -150,11 +149,11 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
{
i915_reg_t offset;
u32 regs[] = {
- [RCS] = 0xc800,
- [VCS] = 0xc900,
- [VCS2] = 0xca00,
- [BCS] = 0xcc00,
- [VECS] = 0xcb00,
+ [RCS0] = 0xc800,
+ [VCS0] = 0xc900,
+ [VCS1] = 0xca00,
+ [BCS0] = 0xcc00,
+ [VECS0] = 0xcb00,
};
int ring_id, i;
@@ -302,7 +301,7 @@ int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
goto out;
/* no MOCS register in context except render engine */
- if (req->engine->id != RCS)
+ if (req->engine->id != RCS0)
goto out;
ret = restore_render_mocs_control_for_inhibit(vgpu, req);
@@ -328,15 +327,16 @@ out:
static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_uncore *uncore = &dev_priv->uncore;
struct intel_vgpu_submission *s = &vgpu->submission;
enum forcewake_domains fw;
i915_reg_t reg;
u32 regs[] = {
- [RCS] = 0x4260,
- [VCS] = 0x4264,
- [VCS2] = 0x4268,
- [BCS] = 0x426c,
- [VECS] = 0x4270,
+ [RCS0] = 0x4260,
+ [VCS0] = 0x4264,
+ [VCS1] = 0x4268,
+ [BCS0] = 0x426c,
+ [VECS0] = 0x4270,
};
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
@@ -352,21 +352,21 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
* otherwise device can go to RC6 state and interrupt invalidation
* process
*/
- fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
+ fw = intel_uncore_forcewake_for_reg(uncore, reg,
FW_REG_READ | FW_REG_WRITE);
- if (ring_id == RCS && (INTEL_GEN(dev_priv) >= 9))
+ if (ring_id == RCS0 && INTEL_GEN(dev_priv) >= 9)
fw |= FORCEWAKE_RENDER;
- intel_uncore_forcewake_get(dev_priv, fw);
+ intel_uncore_forcewake_get(uncore, fw);
- I915_WRITE_FW(reg, 0x1);
+ intel_uncore_write_fw(uncore, reg, 0x1);
- if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
+ if (wait_for_atomic((intel_uncore_read_fw(uncore, reg) == 0), 50))
gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
else
vgpu_vreg_t(vgpu, reg) = 0;
- intel_uncore_forcewake_put(dev_priv, fw);
+ intel_uncore_forcewake_put(uncore, fw);
gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
}
@@ -379,11 +379,11 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
u32 old_v, new_v;
u32 regs[] = {
- [RCS] = 0xc800,
- [VCS] = 0xc900,
- [VCS2] = 0xca00,
- [BCS] = 0xcc00,
- [VECS] = 0xcb00,
+ [RCS0] = 0xc800,
+ [VCS0] = 0xc900,
+ [VCS1] = 0xca00,
+ [BCS0] = 0xcc00,
+ [VECS0] = 0xcb00,
};
int i;
@@ -391,8 +391,10 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return;
- if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)
- || IS_COFFEELAKE(dev_priv)) && ring_id == RCS)
+ if (ring_id == RCS0 &&
+ (IS_KABYLAKE(dev_priv) ||
+ IS_BROXTON(dev_priv) ||
+ IS_COFFEELAKE(dev_priv)))
return;
if (!pre && !gen9_render_mocs.initialized)
@@ -415,7 +417,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
offset.reg += 4;
}
- if (ring_id == RCS) {
+ if (ring_id == RCS0) {
l3_offset.reg = 0xb020;
for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
if (pre)
@@ -493,7 +495,8 @@ static void switch_mmio(struct intel_vgpu *pre,
* itself.
*/
if (mmio->in_context &&
- !is_inhibit_context(&s->shadow_ctx->__engine[ring_id]))
+ !is_inhibit_context(intel_context_lookup(s->shadow_ctx,
+ dev_priv->engine[ring_id])))
continue;
if (mmio->mask)
@@ -550,9 +553,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
* performace for batch mmio read/write, so we need
* handle forcewake mannually.
*/
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
switch_mmio(pre, next, ring_id);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index 428d252344f1..3de5b643b266 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -60,6 +60,37 @@
#define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + (plane - 1) * 0x100)
#define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + (plane - 1) * 0x100)
+#define SKL_FLIP_EVENT(pipe, plane) (PRIMARY_A_FLIP_DONE + (plane) * 3 + (pipe))
+
+#define PLANE_CTL_ASYNC_FLIP (1 << 9)
+#define REG50080_FLIP_TYPE_MASK 0x3
+#define REG50080_FLIP_TYPE_ASYNC 0x1
+
+#define REG_50080(_pipe, _plane) ({ \
+ typeof(_pipe) (p) = (_pipe); \
+ typeof(_plane) (q) = (_plane); \
+ (((p) == PIPE_A) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x50080)) : \
+ (_MMIO(0x50090))) : \
+ (((p) == PIPE_B) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x50088)) : \
+ (_MMIO(0x50098))) : \
+ (((p) == PIPE_C) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x5008C)) : \
+ (_MMIO(0x5009C))) : \
+ (_MMIO(0x50080))))); })
+
+#define REG_50080_TO_PIPE(_reg) ({ \
+ typeof(_reg) (reg) = (_reg); \
+ (((reg) == 0x50080 || (reg) == 0x50090) ? (PIPE_A) : \
+ (((reg) == 0x50088 || (reg) == 0x50098) ? (PIPE_B) : \
+ (((reg) == 0x5008C || (reg) == 0x5009C) ? (PIPE_C) : \
+ (INVALID_PIPE)))); })
+
+#define REG_50080_TO_PLANE(_reg) ({ \
+ typeof(_reg) (reg) = (_reg); \
+ (((reg) == 0x50080 || (reg) == 0x50088 || (reg) == 0x5008C) ? \
+ (PLANE_PRIMARY) : \
+ (((reg) == 0x50090 || (reg) == 0x50098 || (reg) == 0x5009C) ? \
+ (PLANE_SPRITE0) : (I915_MAX_PLANES))); })
+
#define GFX_MODE_BIT_SET_IN_MASK(val, bit) \
((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16))))
@@ -95,4 +126,7 @@
#define RING_GFX_MODE(base) _MMIO((base) + 0x29c)
#define VF_GUARDBAND _MMIO(0x83a4)
+/* define the effective range of MCHBAR register on Sandybridge+ */
+#define MCHBAR_MIRROR_REG_BASE _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x4000)
+
#endif
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 05b953793316..8998fa5ab198 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -93,7 +93,7 @@ static void sr_oa_regs(struct intel_vgpu_workload *workload,
i915_mmio_reg_offset(EU_PERF_CNTL6),
};
- if (workload->ring_id != RCS)
+ if (workload->ring_id != RCS0)
return;
if (save) {
@@ -149,7 +149,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
COPY_REG_MASKED(ctx_ctrl);
COPY_REG(ctx_timestamp);
- if (ring_id == RCS) {
+ if (ring_id == RCS0) {
COPY_REG(bb_per_ctx_ptr);
COPY_REG(rcs_indirect_ctx);
COPY_REG(rcs_indirect_ctx_offset);
@@ -177,7 +177,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
context_page_num = context_page_num >> PAGE_SHIFT;
- if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
+ if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS0)
context_page_num = 19;
i = 2;
@@ -434,8 +434,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
if (ret)
goto err_unpin;
- if ((workload->ring_id == RCS) &&
- (workload->wa_ctx.indirect_ctx.size != 0)) {
+ if (workload->ring_id == RCS0 && workload->wa_ctx.indirect_ctx.size) {
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
if (ret)
goto err_shadow;
@@ -803,7 +802,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
context_page_num = rq->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;
- if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS)
+ if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS0)
context_page_num = 19;
i = 2;
@@ -851,13 +850,13 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
}
void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
- unsigned long engine_mask)
+ intel_engine_mask_t engine_mask)
{
struct intel_vgpu_submission *s = &vgpu->submission;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_engine_cs *engine;
struct intel_vgpu_workload *pos, *n;
- unsigned int tmp;
+ intel_engine_mask_t tmp;
/* free the unsubmited workloads in the queues. */
for_each_engine_masked(engine, dev_priv, engine_mask, tmp) {
@@ -903,8 +902,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
workload->status = 0;
}
- if (!workload->status && !(vgpu->resetting_eng &
- ENGINE_MASK(ring_id))) {
+ if (!workload->status &&
+ !(vgpu->resetting_eng & BIT(ring_id))) {
update_guest_context(workload);
for_each_set_bit(event, workload->pending_events,
@@ -927,7 +926,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
list_del_init(&workload->list);
- if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
+ if (workload->status || vgpu->resetting_eng & BIT(ring_id)) {
/* if workload->status is not successful means HW GPU
* has occurred GPU hang or something wrong with i915/GVT,
* and GVT won't inject context switch interrupt to guest.
@@ -941,7 +940,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
* cleaned up during the resetting process later, so doing
* the workload clean up here doesn't have any impact.
**/
- intel_vgpu_clean_workloads(vgpu, ENGINE_MASK(ring_id));
+ intel_vgpu_clean_workloads(vgpu, BIT(ring_id));
}
workload->complete(workload);
@@ -1001,7 +1000,7 @@ static int workload_thread(void *priv)
workload->ring_id, workload);
if (need_force_wake)
- intel_uncore_forcewake_get(gvt->dev_priv,
+ intel_uncore_forcewake_get(&gvt->dev_priv->uncore,
FORCEWAKE_ALL);
ret = dispatch_workload(workload);
@@ -1023,7 +1022,7 @@ complete:
complete_current_workload(gvt, ring_id);
if (need_force_wake)
- intel_uncore_forcewake_put(gvt->dev_priv,
+ intel_uncore_forcewake_put(&gvt->dev_priv->uncore,
FORCEWAKE_ALL);
intel_runtime_pm_put_unchecked(gvt->dev_priv);
@@ -1114,9 +1113,9 @@ i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s)
struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt;
int i;
- if (i915_vm_is_48bit(&i915_ppgtt->vm))
+ if (i915_vm_is_4lvl(&i915_ppgtt->vm)) {
px_dma(&i915_ppgtt->pml4) = s->i915_context_pml4;
- else {
+ } else {
for (i = 0; i < GEN8_3LVL_PDPES; i++)
px_dma(i915_ppgtt->pdp.page_directory[i]) =
s->i915_context_pdps[i];
@@ -1150,7 +1149,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
*
*/
void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
- unsigned long engine_mask)
+ intel_engine_mask_t engine_mask)
{
struct intel_vgpu_submission *s = &vgpu->submission;
@@ -1167,7 +1166,7 @@ i915_context_ppgtt_root_save(struct intel_vgpu_submission *s)
struct i915_hw_ppgtt *i915_ppgtt = s->shadow_ctx->ppgtt;
int i;
- if (i915_vm_is_48bit(&i915_ppgtt->vm))
+ if (i915_vm_is_4lvl(&i915_ppgtt->vm))
s->i915_context_pml4 = px_dma(&i915_ppgtt->pml4);
else {
for (i = 0; i < GEN8_3LVL_PDPES; i++)
@@ -1240,7 +1239,7 @@ out_shadow_ctx:
*
*/
int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
- unsigned long engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned int interface)
{
struct intel_vgpu_submission *s = &vgpu->submission;
@@ -1450,7 +1449,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
workload->rb_start = start;
workload->rb_ctl = ctl;
- if (ring_id == RCS) {
+ if (ring_id == RCS0) {
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 0635b2c4bed7..90c6756f5453 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -142,12 +142,12 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu);
int intel_vgpu_setup_submission(struct intel_vgpu *vgpu);
void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
- unsigned long engine_mask);
+ intel_engine_mask_t engine_mask);
void intel_vgpu_clean_submission(struct intel_vgpu *vgpu);
int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
- unsigned long engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned int interface);
extern const struct intel_vgpu_submission_ops
@@ -160,6 +160,6 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
- unsigned long engine_mask);
+ intel_engine_mask_t engine_mask);
#endif
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 720e2b10adaa..44ce3c2b9ac1 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -44,7 +44,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0;
vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
- vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
+ vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_PPGTT;
vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HUGE_GTT;
@@ -526,11 +526,11 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
* GPU engines. For FLR, engine_mask is ignored.
*/
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
- unsigned int engine_mask)
+ intel_engine_mask_t engine_mask)
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
- unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
+ intel_engine_mask_t resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
gvt_dbg_core("------------------------------------------\n");
gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index db7bb5bd5add..863ae12707ba 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -6,6 +6,7 @@
#include "i915_drv.h"
#include "i915_active.h"
+#include "i915_globals.h"
#define BKL(ref) (&(ref)->i915->drm.struct_mutex)
@@ -17,6 +18,7 @@
* nodes from a local slab cache to hopefully reduce the fragmentation.
*/
static struct i915_global_active {
+ struct i915_global base;
struct kmem_cache *slab_cache;
} global;
@@ -285,16 +287,27 @@ void i915_active_retire_noop(struct i915_active_request *active,
#include "selftests/i915_active.c"
#endif
+static void i915_global_active_shrink(void)
+{
+ kmem_cache_shrink(global.slab_cache);
+}
+
+static void i915_global_active_exit(void)
+{
+ kmem_cache_destroy(global.slab_cache);
+}
+
+static struct i915_global_active global = { {
+ .shrink = i915_global_active_shrink,
+ .exit = i915_global_active_exit,
+} };
+
int __init i915_global_active_init(void)
{
global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
if (!global.slab_cache)
return -ENOMEM;
+ i915_global_register(&global.base);
return 0;
}
-
-void __exit i915_global_active_exit(void)
-{
- kmem_cache_destroy(global.slab_cache);
-}
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index 12b5c1d287d1..7d758719ce39 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -108,19 +108,6 @@ i915_active_request_set_retire_fn(struct i915_active_request *active,
active->retire = fn ?: i915_active_retire_noop;
}
-static inline struct i915_request *
-__i915_active_request_peek(const struct i915_active_request *active)
-{
- /*
- * Inside the error capture (running with the driver in an unknown
- * state), we want to bend the rules slightly (a lot).
- *
- * Work is in progress to make it safer, in the meantime this keeps
- * the known issue from spamming the logs.
- */
- return rcu_dereference_protected(active->request, 1);
-}
-
/**
* i915_active_request_raw - return the active request
* @active - the active tracker
@@ -419,7 +406,4 @@ void i915_active_fini(struct i915_active *ref);
static inline void i915_active_fini(struct i915_active *ref) { }
#endif
-int i915_global_active_init(void);
-void i915_global_active_exit(void);
-
#endif /* _I915_ACTIVE_H_ */
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 33e8eed64423..503d548a55f7 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -868,8 +868,8 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
if (!IS_GEN(engine->i915, 7))
return;
- switch (engine->id) {
- case RCS:
+ switch (engine->class) {
+ case RENDER_CLASS:
if (IS_HASWELL(engine->i915)) {
cmd_tables = hsw_render_ring_cmds;
cmd_table_count =
@@ -889,12 +889,12 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
engine->get_cmd_length_mask = gen7_render_get_cmd_length_mask;
break;
- case VCS:
+ case VIDEO_DECODE_CLASS:
cmd_tables = gen7_video_cmds;
cmd_table_count = ARRAY_SIZE(gen7_video_cmds);
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
- case BCS:
+ case COPY_ENGINE_CLASS:
if (IS_HASWELL(engine->i915)) {
cmd_tables = hsw_blt_ring_cmds;
cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
@@ -913,14 +913,14 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
engine->get_cmd_length_mask = gen7_blt_get_cmd_length_mask;
break;
- case VECS:
+ case VIDEO_ENHANCEMENT_CLASS:
cmd_tables = hsw_vebox_cmds;
cmd_table_count = ARRAY_SIZE(hsw_vebox_cmds);
/* VECS can use the same length_mask function as VCS */
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
break;
default:
- MISSING_CASE(engine->id);
+ MISSING_CASE(engine->class);
return;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index f6f6e5b78e97..5823ffb17821 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -26,14 +26,21 @@
*
*/
-#include <linux/sort.h>
#include <linux/sched/mm.h>
+#include <linux/sort.h>
+
#include <drm/drm_debugfs.h>
#include <drm/drm_fourcc.h>
-#include "intel_drv.h"
-#include "intel_guc_submission.h"
#include "i915_reset.h"
+#include "intel_dp.h"
+#include "intel_drv.h"
+#include "intel_fbc.h"
+#include "intel_guc_submission.h"
+#include "intel_hdcp.h"
+#include "intel_hdmi.h"
+#include "intel_pm.h"
+#include "intel_psr.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
@@ -388,12 +395,9 @@ static void print_context_stats(struct seq_file *m,
struct i915_gem_context *ctx;
list_for_each_entry(ctx, &i915->contexts.list, link) {
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, i915, id) {
- struct intel_context *ce = to_intel_context(ctx, engine);
+ struct intel_context *ce;
+ list_for_each_entry(ce, &ctx->active_engines, active_link) {
if (ce->state)
per_file_stats(0, ce->state->obj, &kstats);
if (ce->ring)
@@ -412,9 +416,8 @@ static void print_context_stats(struct seq_file *m,
rcu_read_lock();
task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID);
- snprintf(name, sizeof(name), "%s/%d",
- task ? task->comm : "<unknown>",
- ctx->user_handle);
+ snprintf(name, sizeof(name), "%s",
+ task ? task->comm : "<unknown>");
rcu_read_unlock();
print_file_stats(m, name, stats);
@@ -830,11 +833,11 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
} else if (!HAS_PCH_SPLIT(dev_priv)) {
seq_printf(m, "Interrupt enable: %08x\n",
- I915_READ(IER));
+ I915_READ(GEN2_IER));
seq_printf(m, "Interrupt identity: %08x\n",
- I915_READ(IIR));
+ I915_READ(GEN2_IIR));
seq_printf(m, "Interrupt mask: %08x\n",
- I915_READ(IMR));
+ I915_READ(GEN2_IMR));
for_each_pipe(dev_priv, pipe)
seq_printf(m, "Pipe %c stat: %08x\n",
pipe_name(pipe),
@@ -884,7 +887,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
for_each_engine(engine, dev_priv, id) {
seq_printf(m,
"Graphics Interrupt mask (%s): %08x\n",
- engine->name, I915_READ_IMR(engine));
+ engine->name, ENGINE_READ(engine, RING_IMR));
}
}
@@ -1097,7 +1100,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
}
/* RPSTAT1 is in the GT power well */
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
reqf = I915_READ(GEN6_RPNSWREQ);
if (INTEL_GEN(dev_priv) >= 9)
@@ -1125,7 +1128,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
cagf = intel_gpu_freq(dev_priv,
intel_get_cagf(dev_priv, rpstat));
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
if (INTEL_GEN(dev_priv) >= 11) {
pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE);
@@ -1281,14 +1284,11 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
intel_wakeref_t wakeref;
enum intel_engine_id id;
+ seq_printf(m, "Reset flags: %lx\n", dev_priv->gpu_error.flags);
if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
- seq_puts(m, "Wedged\n");
+ seq_puts(m, "\tWedged\n");
if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
- seq_puts(m, "Reset in progress: struct_mutex backoff\n");
- if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
- seq_puts(m, "Waiter holding struct mutex\n");
- if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
- seq_puts(m, "struct_mutex blocked for reset\n");
+ seq_puts(m, "\tDevice (global) reset in progress\n");
if (!i915_modparams.enable_hangcheck) {
seq_puts(m, "Hangcheck disabled\n");
@@ -1298,10 +1298,10 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
with_intel_runtime_pm(dev_priv, wakeref) {
for_each_engine(engine, dev_priv, id) {
acthd[id] = intel_engine_get_active_head(engine);
- seqno[id] = intel_engine_get_seqno(engine);
+ seqno[id] = intel_engine_get_hangcheck_seqno(engine);
}
- intel_engine_get_instdone(dev_priv->engine[RCS], &instdone);
+ intel_engine_get_instdone(dev_priv->engine[RCS0], &instdone);
}
if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
@@ -1318,8 +1318,9 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
for_each_engine(engine, dev_priv, id) {
seq_printf(m, "%s:\n", engine->name);
seq_printf(m, "\tseqno = %x [current %x, last %x], %dms ago\n",
- engine->hangcheck.seqno, seqno[id],
- intel_engine_last_submit(engine),
+ engine->hangcheck.last_seqno,
+ seqno[id],
+ engine->hangcheck.next_seqno,
jiffies_to_msecs(jiffies -
engine->hangcheck.action_timestamp));
@@ -1327,7 +1328,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
(long long)engine->hangcheck.acthd,
(long long)acthd[id]);
- if (engine->id == RCS) {
+ if (engine->id == RCS0) {
seq_puts(m, "\tinstdone read =\n");
i915_instdone_info(dev_priv, m, &instdone);
@@ -1419,13 +1420,14 @@ static int ironlake_drpc_info(struct seq_file *m)
static int i915_forcewake_domains(struct seq_file *m, void *data)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct intel_uncore *uncore = &i915->uncore;
struct intel_uncore_forcewake_domain *fw_domain;
unsigned int tmp;
seq_printf(m, "user.bypass_count = %u\n",
- i915->uncore.user_forcewake.count);
+ uncore->user_forcewake.count);
- for_each_fw_domain(fw_domain, i915, tmp)
+ for_each_fw_domain(fw_domain, uncore, tmp)
seq_printf(m, "%s.wake_count = %u\n",
intel_uncore_forcewake_domain_to_str(fw_domain->id),
READ_ONCE(fw_domain->wake_count));
@@ -1882,9 +1884,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
- struct intel_engine_cs *engine;
struct i915_gem_context *ctx;
- enum intel_engine_id id;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1892,6 +1892,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
return ret;
list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
+ struct intel_context *ce;
+
seq_puts(m, "HW context ");
if (!list_empty(&ctx->hw_id_link))
seq_printf(m, "%x [pin %u]", ctx->hw_id,
@@ -1914,11 +1916,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
seq_putc(m, ctx->remap_slice ? 'R' : 'r');
seq_putc(m, '\n');
- for_each_engine(engine, dev_priv, id) {
- struct intel_context *ce =
- to_intel_context(ctx, engine);
-
- seq_printf(m, "%s: ", engine->name);
+ list_for_each_entry(ce, &ctx->active_engines, active_link) {
+ seq_printf(m, "%s: ", ce->engine->name);
if (ce->state)
describe_obj(m, ce->state->obj);
if (ce->ring)
@@ -2023,11 +2022,9 @@ static const char *rps_power_to_str(unsigned int power)
static int i915_rps_boost_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
struct intel_rps *rps = &dev_priv->gt_pm.rps;
u32 act_freq = rps->cur_freq;
intel_wakeref_t wakeref;
- struct drm_file *file;
with_intel_runtime_pm_if_in_use(dev_priv, wakeref) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
@@ -2061,22 +2058,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
intel_gpu_freq(dev_priv, rps->efficient_freq),
intel_gpu_freq(dev_priv, rps->boost_freq));
- mutex_lock(&dev->filelist_mutex);
- list_for_each_entry_reverse(file, &dev->filelist, lhead) {
- struct drm_i915_file_private *file_priv = file->driver_priv;
- struct task_struct *task;
-
- rcu_read_lock();
- task = pid_task(file->pid, PIDTYPE_PID);
- seq_printf(m, "%s [%d]: %d boosts\n",
- task ? task->comm : "<unknown>",
- task ? task->pid : -1,
- atomic_read(&file_priv->rps_client.boosts));
- rcu_read_unlock();
- }
- seq_printf(m, "Kernel (anonymous) boosts: %d\n",
- atomic_read(&rps->boosts));
- mutex_unlock(&dev->filelist_mutex);
+ seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts));
if (INTEL_GEN(dev_priv) >= 6 &&
rps->enabled &&
@@ -2084,12 +2066,12 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
u32 rpup, rpupei;
u32 rpdown, rpdownei;
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK;
rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK;
rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK;
rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK;
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
rps_power_to_str(rps->power.mode));
@@ -2112,8 +2094,8 @@ static int i915_llc(struct seq_file *m, void *data)
const bool edram = INTEL_GEN(dev_priv) > 8;
seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv)));
- seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
- intel_uncore_edram_size(dev_priv)/1024/1024);
+ seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC",
+ dev_priv->edram_size_mb);
return 0;
}
@@ -2270,7 +2252,7 @@ static int i915_guc_stage_pool(struct seq_file *m, void *data)
const struct intel_guc *guc = &dev_priv->guc;
struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
struct intel_guc_client *client = guc->execbuf_client;
- unsigned int tmp;
+ intel_engine_mask_t tmp;
int index;
if (!USES_GUC_SUBMISSION(dev_priv))
@@ -2607,7 +2589,6 @@ static int
i915_edp_psr_debug_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
- struct drm_modeset_acquire_ctx ctx;
intel_wakeref_t wakeref;
int ret;
@@ -2618,18 +2599,7 @@ i915_edp_psr_debug_set(void *data, u64 val)
wakeref = intel_runtime_pm_get(dev_priv);
- drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
-
-retry:
- ret = intel_psr_set_debugfs_mode(dev_priv, &ctx, val);
- if (ret == -EDEADLK) {
- ret = drm_modeset_backoff(&ctx);
- if (!ret)
- goto retry;
- }
-
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
+ ret = intel_psr_debug_set(dev_priv, val);
intel_runtime_pm_put(dev_priv, wakeref);
@@ -2686,8 +2656,7 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
seq_printf(m, "Runtime power status: %s\n",
enableddisabled(!dev_priv->power_domains.wakeref));
- seq_printf(m, "GPU idle: %s (epoch %u)\n",
- yesno(!dev_priv->gt.awake), dev_priv->gt.epoch);
+ seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
seq_printf(m, "IRQs disabled: %s\n",
yesno(!intel_irqs_enabled(dev_priv)));
#ifdef CONFIG_PM
@@ -2904,7 +2873,6 @@ static void intel_connector_info(struct seq_file *m,
if (connector->status == connector_status_disconnected)
return;
- seq_printf(m, "\tname: %s\n", connector->display_info.name);
seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
connector->display_info.width_mm,
connector->display_info.height_mm);
@@ -3123,8 +3091,7 @@ static int i915_engine_info(struct seq_file *m, void *unused)
wakeref = intel_runtime_pm_get(dev_priv);
- seq_printf(m, "GT awake? %s (epoch %u)\n",
- yesno(dev_priv->gt.awake), dev_priv->gt.epoch);
+ seq_printf(m, "GT awake? %s\n", yesno(dev_priv->gt.awake));
seq_printf(m, "Global active requests: %d\n",
dev_priv->gt.active_requests);
seq_printf(m, "CS timestamp frequency: %u kHz\n",
@@ -3211,7 +3178,7 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
static int i915_wa_registers(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
- const struct i915_wa_list *wal = &i915->engine[RCS]->ctx_wa_list;
+ const struct i915_wa_list *wal = &i915->engine[RCS0]->ctx_wa_list;
struct i915_wa *wa;
unsigned int i;
@@ -3865,11 +3832,18 @@ static const struct file_operations i915_cur_wm_latency_fops = {
static int
i915_wedged_get(void *data, u64 *val)
{
- struct drm_i915_private *dev_priv = data;
-
- *val = i915_terminally_wedged(&dev_priv->gpu_error);
+ int ret = i915_terminally_wedged(data);
- return 0;
+ switch (ret) {
+ case -EIO:
+ *val = 1;
+ return 0;
+ case 0:
+ *val = 0;
+ return 0;
+ default:
+ return ret;
+ }
}
static int
@@ -3877,16 +3851,9 @@ i915_wedged_set(void *data, u64 val)
{
struct drm_i915_private *i915 = data;
- /*
- * There is no safeguard against this debugfs entry colliding
- * with the hangcheck calling same i915_handle_error() in
- * parallel, causing an explosion. For now we assume that the
- * test harness is responsible enough not to inject gpu hangs
- * while it is writing to 'i915_wedged'
- */
-
- if (i915_reset_backoff(&i915->gpu_error))
- return -EAGAIN;
+ /* Flush any previous reset before applying for a new one */
+ wait_event(i915->gpu_error.reset_queue,
+ !test_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags));
i915_handle_error(i915, val, I915_ERROR_CAPTURE,
"Manually set wedged engine mask = %llx", val);
@@ -3927,12 +3894,9 @@ static int
i915_drop_caches_set(void *data, u64 val)
{
struct drm_i915_private *i915 = data;
- intel_wakeref_t wakeref;
- int ret = 0;
DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n",
val, val & DROP_ALL);
- wakeref = intel_runtime_pm_get(i915);
if (val & DROP_RESET_ACTIVE &&
wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT))
@@ -3941,9 +3905,11 @@ i915_drop_caches_set(void *data, u64 val)
/* No need to check and wait for gpu resets, only libdrm auto-restarts
* on ioctls on -EAGAIN. */
if (val & (DROP_ACTIVE | DROP_RETIRE | DROP_RESET_SEQNO)) {
+ int ret;
+
ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
if (ret)
- goto out;
+ return ret;
if (val & DROP_ACTIVE)
ret = i915_gem_wait_for_idle(i915,
@@ -3957,7 +3923,7 @@ i915_drop_caches_set(void *data, u64 val)
mutex_unlock(&i915->drm.struct_mutex);
}
- if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(&i915->gpu_error))
+ if (val & DROP_RESET_ACTIVE && i915_terminally_wedged(i915))
i915_handle_error(i915, ALL_ENGINES, 0, NULL);
fs_reclaim_acquire(GFP_KERNEL);
@@ -3982,10 +3948,7 @@ i915_drop_caches_set(void *data, u64 val)
if (val & DROP_FREED)
i915_gem_drain_freed_objects(i915);
-out:
- intel_runtime_pm_put(i915, wakeref);
-
- return ret;
+ return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops,
@@ -4293,7 +4256,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
return 0;
file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915);
- intel_uncore_forcewake_user_get(i915);
+ intel_uncore_forcewake_user_get(&i915->uncore);
return 0;
}
@@ -4305,7 +4268,7 @@ static int i915_forcewake_release(struct inode *inode, struct file *file)
if (INTEL_GEN(i915) < 6)
return 0;
- intel_uncore_forcewake_user_put(i915);
+ intel_uncore_forcewake_user_put(&i915->uncore);
intel_runtime_pm_put(i915,
(intel_wakeref_t)(uintptr_t)file->private_data);
@@ -4858,6 +4821,8 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
yesno(crtc_state->dsc_params.compression_enable));
seq_printf(m, "DSC_Sink_Support: %s\n",
yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
+ seq_printf(m, "Force_DSC_Enable: %s\n",
+ yesno(intel_dp->force_dsc_en));
if (!intel_dp_is_edp(intel_dp))
seq_printf(m, "FEC_Sink_Support: %s\n",
yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 9df65d386d11..1ad88e6d7c04 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -48,12 +48,19 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
-#include "i915_trace.h"
#include "i915_pmu.h"
-#include "i915_reset.h"
#include "i915_query.h"
+#include "i915_reset.h"
+#include "i915_trace.h"
#include "i915_vgpu.h"
+#include "intel_audio.h"
+#include "intel_cdclk.h"
+#include "intel_csr.h"
+#include "intel_dp.h"
#include "intel_drv.h"
+#include "intel_fbdev.h"
+#include "intel_pm.h"
+#include "intel_sprite.h"
#include "intel_uc.h"
#include "intel_workarounds.h"
@@ -188,6 +195,11 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n");
WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
return PCH_CNP;
+ case INTEL_PCH_CMP_DEVICE_ID_TYPE:
+ DRM_DEBUG_KMS("Found Comet Lake PCH (CMP)\n");
+ WARN_ON(!IS_COFFEELAKE(dev_priv));
+ /* CometPoint is CNP Compatible */
+ return PCH_CNP;
case INTEL_PCH_ICP_DEVICE_ID_TYPE:
DRM_DEBUG_KMS("Found Ice Lake PCH\n");
WARN_ON(!IS_ICELAKE(dev_priv));
@@ -219,20 +231,20 @@ intel_virt_detect_pch(const struct drm_i915_private *dev_priv)
* make an educated guess as to which PCH is really there.
*/
- if (IS_GEN(dev_priv, 5))
- id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
- else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
- id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
+ if (IS_ICELAKE(dev_priv))
+ id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
+ else if (IS_CANNONLAKE(dev_priv) || IS_COFFEELAKE(dev_priv))
+ id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
+ else if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv))
+ id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
id = INTEL_PCH_LPT_DEVICE_ID_TYPE;
- else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
- id = INTEL_PCH_SPT_DEVICE_ID_TYPE;
- else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv))
- id = INTEL_PCH_CNP_DEVICE_ID_TYPE;
- else if (IS_ICELAKE(dev_priv))
- id = INTEL_PCH_ICP_DEVICE_ID_TYPE;
+ else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
+ id = INTEL_PCH_CPT_DEVICE_ID_TYPE;
+ else if (IS_GEN(dev_priv, 5))
+ id = INTEL_PCH_IBX_DEVICE_ID_TYPE;
if (id)
DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id);
@@ -330,16 +342,16 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = dev_priv->overlay ? 1 : 0;
break;
case I915_PARAM_HAS_BSD:
- value = !!dev_priv->engine[VCS];
+ value = !!dev_priv->engine[VCS0];
break;
case I915_PARAM_HAS_BLT:
- value = !!dev_priv->engine[BCS];
+ value = !!dev_priv->engine[BCS0];
break;
case I915_PARAM_HAS_VEBOX:
- value = !!dev_priv->engine[VECS];
+ value = !!dev_priv->engine[VECS0];
break;
case I915_PARAM_HAS_BSD2:
- value = !!dev_priv->engine[VCS2];
+ value = !!dev_priv->engine[VCS1];
break;
case I915_PARAM_HAS_LLC:
value = HAS_LLC(dev_priv);
@@ -348,10 +360,10 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
value = HAS_WT(dev_priv);
break;
case I915_PARAM_HAS_ALIASING_PPGTT:
- value = min_t(int, INTEL_PPGTT(dev_priv), I915_GEM_PPGTT_FULL);
+ value = INTEL_PPGTT(dev_priv);
break;
case I915_PARAM_HAS_SEMAPHORES:
- value = 0;
+ value = !!(dev_priv->caps.scheduler & I915_SCHEDULER_CAP_SEMAPHORES);
break;
case I915_PARAM_HAS_SECURE_BATCHES:
value = capable(CAP_SYS_ADMIN);
@@ -714,8 +726,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
return 0;
cleanup_gem:
- if (i915_gem_suspend(dev_priv))
- DRM_ERROR("failed to idle hardware; continuing to unload!\n");
+ i915_gem_suspend(dev_priv);
i915_gem_fini(dev_priv);
cleanup_modeset:
intel_modeset_cleanup(dev);
@@ -864,15 +875,19 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv)
if (i915_inject_load_failure())
return -ENODEV;
+ intel_device_info_subplatform_init(dev_priv);
+
+ intel_uncore_init_early(&dev_priv->uncore);
+
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
mutex_init(&dev_priv->backlight_lock);
- spin_lock_init(&dev_priv->uncore.lock);
mutex_init(&dev_priv->sb_lock);
mutex_init(&dev_priv->av_mutex);
mutex_init(&dev_priv->wm.wm_mutex);
mutex_init(&dev_priv->pps_mutex);
+ mutex_init(&dev_priv->hdcp_comp_mutex);
i915_memcpy_init_early(dev_priv);
intel_runtime_pm_init_early(dev_priv);
@@ -930,46 +945,6 @@ static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
i915_engines_cleanup(dev_priv);
}
-static int i915_mmio_setup(struct drm_i915_private *dev_priv)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
- int mmio_bar;
- int mmio_size;
-
- mmio_bar = IS_GEN(dev_priv, 2) ? 1 : 0;
- /*
- * Before gen4, the registers and the GTT are behind different BARs.
- * However, from gen4 onwards, the registers and the GTT are shared
- * in the same BAR, so we want to restrict this ioremap from
- * clobbering the GTT which we want ioremap_wc instead. Fortunately,
- * the register BAR remains the same size for all the earlier
- * generations up to Ironlake.
- */
- if (INTEL_GEN(dev_priv) < 5)
- mmio_size = 512 * 1024;
- else
- mmio_size = 2 * 1024 * 1024;
- dev_priv->regs = pci_iomap(pdev, mmio_bar, mmio_size);
- if (dev_priv->regs == NULL) {
- DRM_ERROR("failed to map registers\n");
-
- return -EIO;
- }
-
- /* Try to make sure MCHBAR is enabled before poking at it */
- intel_setup_mchbar(dev_priv);
-
- return 0;
-}
-
-static void i915_mmio_cleanup(struct drm_i915_private *dev_priv)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
-
- intel_teardown_mchbar(dev_priv);
- pci_iounmap(pdev, dev_priv->regs);
-}
-
/**
* i915_driver_init_mmio - setup device MMIO
* @dev_priv: device private
@@ -989,15 +964,16 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
if (i915_get_bridge_dev(dev_priv))
return -EIO;
- ret = i915_mmio_setup(dev_priv);
+ ret = intel_uncore_init_mmio(&dev_priv->uncore);
if (ret < 0)
goto err_bridge;
- intel_uncore_init(dev_priv);
+ /* Try to make sure MCHBAR is enabled before poking at it */
+ intel_setup_mchbar(dev_priv);
intel_device_info_init_mmio(dev_priv);
- intel_uncore_prune(dev_priv);
+ intel_uncore_prune_mmio_domains(&dev_priv->uncore);
intel_uc_init_mmio(dev_priv);
@@ -1010,8 +986,8 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
return 0;
err_uncore:
- intel_uncore_fini(dev_priv);
- i915_mmio_cleanup(dev_priv);
+ intel_teardown_mchbar(dev_priv);
+ intel_uncore_fini_mmio(&dev_priv->uncore);
err_bridge:
pci_dev_put(dev_priv->bridge_dev);
@@ -1024,8 +1000,8 @@ err_bridge:
*/
static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
{
- intel_uncore_fini(dev_priv);
- i915_mmio_cleanup(dev_priv);
+ intel_teardown_mchbar(dev_priv);
+ intel_uncore_fini_mmio(&dev_priv->uncore);
pci_dev_put(dev_priv->bridge_dev);
}
@@ -1034,110 +1010,180 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
intel_gvt_sanitize_options(dev_priv);
}
-static enum dram_rank skl_get_dimm_rank(u8 size, u32 rank)
+#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
+
+static const char *intel_dram_type_str(enum intel_dram_type type)
{
- if (size == 0)
- return I915_DRAM_RANK_INVALID;
- if (rank == SKL_DRAM_RANK_SINGLE)
- return I915_DRAM_RANK_SINGLE;
- else if (rank == SKL_DRAM_RANK_DUAL)
- return I915_DRAM_RANK_DUAL;
+ static const char * const str[] = {
+ DRAM_TYPE_STR(UNKNOWN),
+ DRAM_TYPE_STR(DDR3),
+ DRAM_TYPE_STR(DDR4),
+ DRAM_TYPE_STR(LPDDR3),
+ DRAM_TYPE_STR(LPDDR4),
+ };
+
+ if (type >= ARRAY_SIZE(str))
+ type = INTEL_DRAM_UNKNOWN;
- return I915_DRAM_RANK_INVALID;
+ return str[type];
}
-static bool
-skl_is_16gb_dimm(enum dram_rank rank, u8 size, u8 width)
+#undef DRAM_TYPE_STR
+
+static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
{
- if (rank == I915_DRAM_RANK_SINGLE && width == 8 && size == 16)
- return true;
- else if (rank == I915_DRAM_RANK_DUAL && width == 8 && size == 32)
- return true;
- else if (rank == SKL_DRAM_RANK_SINGLE && width == 16 && size == 8)
- return true;
- else if (rank == SKL_DRAM_RANK_DUAL && width == 16 && size == 16)
- return true;
+ return dimm->ranks * 64 / (dimm->width ?: 1);
+}
- return false;
+/* Returns total GB for the whole DIMM */
+static int skl_get_dimm_size(u16 val)
+{
+ return val & SKL_DRAM_SIZE_MASK;
}
-static int
-skl_dram_get_channel_info(struct dram_channel_info *ch, u32 val)
+static int skl_get_dimm_width(u16 val)
{
- u32 tmp_l, tmp_s;
- u32 s_val = val >> SKL_DRAM_S_SHIFT;
+ if (skl_get_dimm_size(val) == 0)
+ return 0;
- if (!val)
- return -EINVAL;
+ switch (val & SKL_DRAM_WIDTH_MASK) {
+ case SKL_DRAM_WIDTH_X8:
+ case SKL_DRAM_WIDTH_X16:
+ case SKL_DRAM_WIDTH_X32:
+ val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
+ return 8 << val;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static int skl_get_dimm_ranks(u16 val)
+{
+ if (skl_get_dimm_size(val) == 0)
+ return 0;
+
+ val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;
+
+ return val + 1;
+}
+
+/* Returns total GB for the whole DIMM */
+static int cnl_get_dimm_size(u16 val)
+{
+ return (val & CNL_DRAM_SIZE_MASK) / 2;
+}
+
+static int cnl_get_dimm_width(u16 val)
+{
+ if (cnl_get_dimm_size(val) == 0)
+ return 0;
+
+ switch (val & CNL_DRAM_WIDTH_MASK) {
+ case CNL_DRAM_WIDTH_X8:
+ case CNL_DRAM_WIDTH_X16:
+ case CNL_DRAM_WIDTH_X32:
+ val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT;
+ return 8 << val;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static int cnl_get_dimm_ranks(u16 val)
+{
+ if (cnl_get_dimm_size(val) == 0)
+ return 0;
+
+ val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT;
+
+ return val + 1;
+}
- tmp_l = val & SKL_DRAM_SIZE_MASK;
- tmp_s = s_val & SKL_DRAM_SIZE_MASK;
+static bool
+skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
+{
+ /* Convert total GB to Gb per DRAM device */
+ return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
+}
+
+static void
+skl_dram_get_dimm_info(struct drm_i915_private *dev_priv,
+ struct dram_dimm_info *dimm,
+ int channel, char dimm_name, u16 val)
+{
+ if (INTEL_GEN(dev_priv) >= 10) {
+ dimm->size = cnl_get_dimm_size(val);
+ dimm->width = cnl_get_dimm_width(val);
+ dimm->ranks = cnl_get_dimm_ranks(val);
+ } else {
+ dimm->size = skl_get_dimm_size(val);
+ dimm->width = skl_get_dimm_width(val);
+ dimm->ranks = skl_get_dimm_ranks(val);
+ }
+
+ DRM_DEBUG_KMS("CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
+ channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
+ yesno(skl_is_16gb_dimm(dimm)));
+}
+
+static int
+skl_dram_get_channel_info(struct drm_i915_private *dev_priv,
+ struct dram_channel_info *ch,
+ int channel, u32 val)
+{
+ skl_dram_get_dimm_info(dev_priv, &ch->dimm_l,
+ channel, 'L', val & 0xffff);
+ skl_dram_get_dimm_info(dev_priv, &ch->dimm_s,
+ channel, 'S', val >> 16);
- if (tmp_l == 0 && tmp_s == 0)
+ if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
+ DRM_DEBUG_KMS("CH%u not populated\n", channel);
return -EINVAL;
+ }
- ch->l_info.size = tmp_l;
- ch->s_info.size = tmp_s;
-
- tmp_l = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
- tmp_s = (s_val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
- ch->l_info.width = (1 << tmp_l) * 8;
- ch->s_info.width = (1 << tmp_s) * 8;
-
- tmp_l = val & SKL_DRAM_RANK_MASK;
- tmp_s = s_val & SKL_DRAM_RANK_MASK;
- ch->l_info.rank = skl_get_dimm_rank(ch->l_info.size, tmp_l);
- ch->s_info.rank = skl_get_dimm_rank(ch->s_info.size, tmp_s);
-
- if (ch->l_info.rank == I915_DRAM_RANK_DUAL ||
- ch->s_info.rank == I915_DRAM_RANK_DUAL)
- ch->rank = I915_DRAM_RANK_DUAL;
- else if (ch->l_info.rank == I915_DRAM_RANK_SINGLE &&
- ch->s_info.rank == I915_DRAM_RANK_SINGLE)
- ch->rank = I915_DRAM_RANK_DUAL;
+ if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2)
+ ch->ranks = 2;
+ else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1)
+ ch->ranks = 2;
else
- ch->rank = I915_DRAM_RANK_SINGLE;
+ ch->ranks = 1;
- ch->is_16gb_dimm = skl_is_16gb_dimm(ch->l_info.rank, ch->l_info.size,
- ch->l_info.width) ||
- skl_is_16gb_dimm(ch->s_info.rank, ch->s_info.size,
- ch->s_info.width);
+ ch->is_16gb_dimm =
+ skl_is_16gb_dimm(&ch->dimm_l) ||
+ skl_is_16gb_dimm(&ch->dimm_s);
- DRM_DEBUG_KMS("(size:width:rank) L(%dGB:X%d:%s) S(%dGB:X%d:%s)\n",
- ch->l_info.size, ch->l_info.width,
- ch->l_info.rank ? "dual" : "single",
- ch->s_info.size, ch->s_info.width,
- ch->s_info.rank ? "dual" : "single");
+ DRM_DEBUG_KMS("CH%u ranks: %u, 16Gb DIMMs: %s\n",
+ channel, ch->ranks, yesno(ch->is_16gb_dimm));
return 0;
}
static bool
-intel_is_dram_symmetric(u32 val_ch0, u32 val_ch1,
- struct dram_channel_info *ch0)
+intel_is_dram_symmetric(const struct dram_channel_info *ch0,
+ const struct dram_channel_info *ch1)
{
- return (val_ch0 == val_ch1 &&
- (ch0->s_info.size == 0 ||
- (ch0->l_info.size == ch0->s_info.size &&
- ch0->l_info.width == ch0->s_info.width &&
- ch0->l_info.rank == ch0->s_info.rank)));
+ return !memcmp(ch0, ch1, sizeof(*ch0)) &&
+ (ch0->dimm_s.size == 0 ||
+ !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l)));
}
static int
skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
{
struct dram_info *dram_info = &dev_priv->dram_info;
- struct dram_channel_info ch0, ch1;
- u32 val_ch0, val_ch1;
+ struct dram_channel_info ch0 = {}, ch1 = {};
+ u32 val;
int ret;
- val_ch0 = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
- ret = skl_dram_get_channel_info(&ch0, val_ch0);
+ val = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
+ ret = skl_dram_get_channel_info(dev_priv, &ch0, 0, val);
if (ret == 0)
dram_info->num_channels++;
- val_ch1 = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
- ret = skl_dram_get_channel_info(&ch1, val_ch1);
+ val = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
+ ret = skl_dram_get_channel_info(dev_priv, &ch1, 1, val);
if (ret == 0)
dram_info->num_channels++;
@@ -1151,28 +1197,47 @@ skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
* will be same as if single rank memory, so consider single rank
* memory.
*/
- if (ch0.rank == I915_DRAM_RANK_SINGLE ||
- ch1.rank == I915_DRAM_RANK_SINGLE)
- dram_info->rank = I915_DRAM_RANK_SINGLE;
+ if (ch0.ranks == 1 || ch1.ranks == 1)
+ dram_info->ranks = 1;
else
- dram_info->rank = max(ch0.rank, ch1.rank);
+ dram_info->ranks = max(ch0.ranks, ch1.ranks);
- if (dram_info->rank == I915_DRAM_RANK_INVALID) {
+ if (dram_info->ranks == 0) {
DRM_INFO("couldn't get memory rank information\n");
return -EINVAL;
}
dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
- dev_priv->dram_info.symmetric_memory = intel_is_dram_symmetric(val_ch0,
- val_ch1,
- &ch0);
+ dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
- DRM_DEBUG_KMS("memory configuration is %sSymmetric memory\n",
- dev_priv->dram_info.symmetric_memory ? "" : "not ");
+ DRM_DEBUG_KMS("Memory configuration is symmetric? %s\n",
+ yesno(dram_info->symmetric_memory));
return 0;
}
+static enum intel_dram_type
+skl_get_dram_type(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ val = I915_READ(SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN);
+
+ switch (val & SKL_DRAM_DDR_TYPE_MASK) {
+ case SKL_DRAM_DDR_TYPE_DDR3:
+ return INTEL_DRAM_DDR3;
+ case SKL_DRAM_DDR_TYPE_DDR4:
+ return INTEL_DRAM_DDR4;
+ case SKL_DRAM_DDR_TYPE_LPDDR3:
+ return INTEL_DRAM_LPDDR3;
+ case SKL_DRAM_DDR_TYPE_LPDDR4:
+ return INTEL_DRAM_LPDDR4;
+ default:
+ MISSING_CASE(val);
+ return INTEL_DRAM_UNKNOWN;
+ }
+}
+
static int
skl_get_dram_info(struct drm_i915_private *dev_priv)
{
@@ -1180,6 +1245,9 @@ skl_get_dram_info(struct drm_i915_private *dev_priv)
u32 mem_freq_khz, val;
int ret;
+ dram_info->type = skl_get_dram_type(dev_priv);
+ DRM_DEBUG_KMS("DRAM type: %s\n", intel_dram_type_str(dram_info->type));
+
ret = skl_dram_get_channels_info(dev_priv);
if (ret)
return ret;
@@ -1200,6 +1268,85 @@ skl_get_dram_info(struct drm_i915_private *dev_priv)
return 0;
}
+/* Returns Gb per DRAM device */
+static int bxt_get_dimm_size(u32 val)
+{
+ switch (val & BXT_DRAM_SIZE_MASK) {
+ case BXT_DRAM_SIZE_4GBIT:
+ return 4;
+ case BXT_DRAM_SIZE_6GBIT:
+ return 6;
+ case BXT_DRAM_SIZE_8GBIT:
+ return 8;
+ case BXT_DRAM_SIZE_12GBIT:
+ return 12;
+ case BXT_DRAM_SIZE_16GBIT:
+ return 16;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static int bxt_get_dimm_width(u32 val)
+{
+ if (!bxt_get_dimm_size(val))
+ return 0;
+
+ val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
+
+ return 8 << val;
+}
+
+static int bxt_get_dimm_ranks(u32 val)
+{
+ if (!bxt_get_dimm_size(val))
+ return 0;
+
+ switch (val & BXT_DRAM_RANK_MASK) {
+ case BXT_DRAM_RANK_SINGLE:
+ return 1;
+ case BXT_DRAM_RANK_DUAL:
+ return 2;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static enum intel_dram_type bxt_get_dimm_type(u32 val)
+{
+ if (!bxt_get_dimm_size(val))
+ return INTEL_DRAM_UNKNOWN;
+
+ switch (val & BXT_DRAM_TYPE_MASK) {
+ case BXT_DRAM_TYPE_DDR3:
+ return INTEL_DRAM_DDR3;
+ case BXT_DRAM_TYPE_LPDDR3:
+ return INTEL_DRAM_LPDDR3;
+ case BXT_DRAM_TYPE_DDR4:
+ return INTEL_DRAM_DDR4;
+ case BXT_DRAM_TYPE_LPDDR4:
+ return INTEL_DRAM_LPDDR4;
+ default:
+ MISSING_CASE(val);
+ return INTEL_DRAM_UNKNOWN;
+ }
+}
+
+static void bxt_get_dimm_info(struct dram_dimm_info *dimm,
+ u32 val)
+{
+ dimm->width = bxt_get_dimm_width(val);
+ dimm->ranks = bxt_get_dimm_ranks(val);
+
+ /*
+ * Size in register is Gb per DRAM device. Convert to total
+ * GB to match the way we report this for non-LP platforms.
+ */
+ dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8;
+}
+
static int
bxt_get_dram_info(struct drm_i915_private *dev_priv)
{
@@ -1228,57 +1375,44 @@ bxt_get_dram_info(struct drm_i915_private *dev_priv)
* Now read each DUNIT8/9/10/11 to check the rank of each dimms.
*/
for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
- u8 size, width;
- enum dram_rank rank;
- u32 tmp;
+ struct dram_dimm_info dimm;
+ enum intel_dram_type type;
val = I915_READ(BXT_D_CR_DRP0_DUNIT(i));
if (val == 0xFFFFFFFF)
continue;
dram_info->num_channels++;
- tmp = val & BXT_DRAM_RANK_MASK;
-
- if (tmp == BXT_DRAM_RANK_SINGLE)
- rank = I915_DRAM_RANK_SINGLE;
- else if (tmp == BXT_DRAM_RANK_DUAL)
- rank = I915_DRAM_RANK_DUAL;
- else
- rank = I915_DRAM_RANK_INVALID;
-
- tmp = val & BXT_DRAM_SIZE_MASK;
- if (tmp == BXT_DRAM_SIZE_4GB)
- size = 4;
- else if (tmp == BXT_DRAM_SIZE_6GB)
- size = 6;
- else if (tmp == BXT_DRAM_SIZE_8GB)
- size = 8;
- else if (tmp == BXT_DRAM_SIZE_12GB)
- size = 12;
- else if (tmp == BXT_DRAM_SIZE_16GB)
- size = 16;
- else
- size = 0;
-
- tmp = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
- width = (1 << tmp) * 8;
- DRM_DEBUG_KMS("dram size:%dGB width:X%d rank:%s\n", size,
- width, rank == I915_DRAM_RANK_SINGLE ? "single" :
- rank == I915_DRAM_RANK_DUAL ? "dual" : "unknown");
+
+ bxt_get_dimm_info(&dimm, val);
+ type = bxt_get_dimm_type(val);
+
+ WARN_ON(type != INTEL_DRAM_UNKNOWN &&
+ dram_info->type != INTEL_DRAM_UNKNOWN &&
+ dram_info->type != type);
+
+ DRM_DEBUG_KMS("CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n",
+ i - BXT_D_CR_DRP0_DUNIT_START,
+ dimm.size, dimm.width, dimm.ranks,
+ intel_dram_type_str(type));
/*
* If any of the channel is single rank channel,
* worst case output will be same as if single rank
* memory, so consider single rank memory.
*/
- if (dram_info->rank == I915_DRAM_RANK_INVALID)
- dram_info->rank = rank;
- else if (rank == I915_DRAM_RANK_SINGLE)
- dram_info->rank = I915_DRAM_RANK_SINGLE;
+ if (dram_info->ranks == 0)
+ dram_info->ranks = dimm.ranks;
+ else if (dimm.ranks == 1)
+ dram_info->ranks = 1;
+
+ if (type != INTEL_DRAM_UNKNOWN)
+ dram_info->type = type;
}
- if (dram_info->rank == I915_DRAM_RANK_INVALID) {
- DRM_INFO("couldn't get memory rank information\n");
+ if (dram_info->type == INTEL_DRAM_UNKNOWN ||
+ dram_info->ranks == 0) {
+ DRM_INFO("couldn't get memory information\n");
return -EINVAL;
}
@@ -1290,14 +1424,8 @@ static void
intel_get_dram_info(struct drm_i915_private *dev_priv)
{
struct dram_info *dram_info = &dev_priv->dram_info;
- char bandwidth_str[32];
int ret;
- dram_info->valid = false;
- dram_info->rank = I915_DRAM_RANK_INVALID;
- dram_info->bandwidth_kbps = 0;
- dram_info->num_channels = 0;
-
/*
* Assume 16Gb DIMMs are present until proven otherwise.
* This is only used for the level 0 watermark latency
@@ -1305,28 +1433,61 @@ intel_get_dram_info(struct drm_i915_private *dev_priv)
*/
dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);
- if (INTEL_GEN(dev_priv) < 9 || IS_GEMINILAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) < 9)
return;
- /* Need to calculate bandwidth only for Gen9 */
- if (IS_BROXTON(dev_priv))
+ if (IS_GEN9_LP(dev_priv))
ret = bxt_get_dram_info(dev_priv);
- else if (IS_GEN(dev_priv, 9))
- ret = skl_get_dram_info(dev_priv);
else
- ret = skl_dram_get_channels_info(dev_priv);
+ ret = skl_get_dram_info(dev_priv);
if (ret)
return;
- if (dram_info->bandwidth_kbps)
- sprintf(bandwidth_str, "%d KBps", dram_info->bandwidth_kbps);
+ DRM_DEBUG_KMS("DRAM bandwidth: %u kBps, channels: %u\n",
+ dram_info->bandwidth_kbps,
+ dram_info->num_channels);
+
+ DRM_DEBUG_KMS("DRAM ranks: %u, 16Gb DIMMs: %s\n",
+ dram_info->ranks, yesno(dram_info->is_16gb_dimm));
+}
+
+static u32 gen9_edram_size_mb(struct drm_i915_private *dev_priv, u32 cap)
+{
+ const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
+ const unsigned int sets[4] = { 1, 1, 2, 2 };
+
+ return EDRAM_NUM_BANKS(cap) *
+ ways[EDRAM_WAYS_IDX(cap)] *
+ sets[EDRAM_SETS_IDX(cap)];
+}
+
+static void edram_detect(struct drm_i915_private *dev_priv)
+{
+ u32 edram_cap = 0;
+
+ if (!(IS_HASWELL(dev_priv) ||
+ IS_BROADWELL(dev_priv) ||
+ INTEL_GEN(dev_priv) >= 9))
+ return;
+
+ edram_cap = __raw_uncore_read32(&dev_priv->uncore, HSW_EDRAM_CAP);
+
+ /* NB: We can't write IDICR yet because we don't have gt funcs set up */
+
+ if (!(edram_cap & EDRAM_ENABLED))
+ return;
+
+ /*
+ * The needed capability bits for size calculation are not there with
+ * pre gen9 so return 128MB always.
+ */
+ if (INTEL_GEN(dev_priv) < 9)
+ dev_priv->edram_size_mb = 128;
else
- sprintf(bandwidth_str, "unknown");
- DRM_DEBUG_KMS("DRAM bandwidth:%s, total-channels: %u\n",
- bandwidth_str, dram_info->num_channels);
- DRM_DEBUG_KMS("DRAM rank: %s rank 16GB-dimm:%s\n",
- (dram_info->rank == I915_DRAM_RANK_DUAL) ?
- "dual" : "single", yesno(dram_info->is_16gb_dimm));
+ dev_priv->edram_size_mb =
+ gen9_edram_size_mb(dev_priv, edram_cap);
+
+ DRM_INFO("Found %uMB of eDRAM\n", dev_priv->edram_size_mb);
}
/**
@@ -1348,7 +1509,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
if (HAS_PPGTT(dev_priv)) {
if (intel_vgpu_active(dev_priv) &&
- !intel_vgpu_has_full_48bit_ppgtt(dev_priv)) {
+ !intel_vgpu_has_full_ppgtt(dev_priv)) {
i915_report_error(dev_priv,
"incompatible vGPU found, support for isolated ppGTT required\n");
return -ENXIO;
@@ -1371,6 +1532,9 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
intel_sanitize_options(dev_priv);
+ /* needs to be done before ggtt probe */
+ edram_detect(dev_priv);
+
i915_perf_init(dev_priv);
ret = i915_ggtt_probe_hw(dev_priv);
@@ -1606,10 +1770,12 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
if (drm_debug & DRM_UT_DRIVER) {
struct drm_printer p = drm_debug_printer("i915 device info:");
- drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s gen=%i\n",
+ drm_printf(&p, "pciid=0x%04x rev=0x%02x platform=%s (subplatform=0x%x) gen=%i\n",
INTEL_DEVID(dev_priv),
INTEL_REVID(dev_priv),
intel_platform_name(INTEL_INFO(dev_priv)->platform),
+ intel_subplatform(RUNTIME_INFO(dev_priv),
+ INTEL_INFO(dev_priv)->platform),
INTEL_GEN(dev_priv));
intel_device_info_dump_flags(INTEL_INFO(dev_priv), &p);
@@ -1652,8 +1818,6 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
memcpy(device_info, match_info, sizeof(*device_info));
RUNTIME_INFO(i915)->device_id = pdev->device;
- BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
- BITS_PER_TYPE(device_info->platform_mask));
BUG_ON(device_info->gen > BITS_PER_TYPE(device_info->gen_mask));
return i915;
@@ -1750,11 +1914,17 @@ void i915_driver_unload(struct drm_device *dev)
i915_driver_unregister(dev_priv);
+ /*
+ * After unregistering the device to prevent any new users, cancel
+ * all in-flight requests so that we can quickly unbind the active
+ * resources.
+ */
+ i915_gem_set_wedged(dev_priv);
+
/* Flush any external code that still may be under the RCU lock */
synchronize_rcu();
- if (i915_gem_suspend(dev_priv))
- DRM_ERROR("failed to idle hardware; continuing to unload!\n");
+ i915_gem_suspend(dev_priv);
drm_atomic_helper_shutdown(dev);
@@ -1862,7 +2032,6 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv)
static int i915_drm_prepare(struct drm_device *dev)
{
struct drm_i915_private *i915 = to_i915(dev);
- int err;
/*
* NB intel_display_suspend() may issue new requests after we've
@@ -1870,12 +2039,9 @@ static int i915_drm_prepare(struct drm_device *dev)
* split out that work and pull it forward so that after point,
* the GPU is not woken again.
*/
- err = i915_gem_suspend(i915);
- if (err)
- dev_err(&i915->drm.pdev->dev,
- "GEM idle failed, suspend/resume might fail\n");
+ i915_gem_suspend(i915);
- return err;
+ return 0;
}
static int i915_drm_suspend(struct drm_device *dev)
@@ -1945,7 +2111,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
i915_gem_suspend_late(dev_priv);
- intel_uncore_suspend(dev_priv);
+ intel_uncore_suspend(&dev_priv->uncore);
intel_power_domains_suspend(dev_priv,
get_suspend_mode(dev_priv, hibernation));
@@ -2141,7 +2307,9 @@ static int i915_drm_resume_early(struct drm_device *dev)
DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
ret);
- intel_uncore_resume_early(dev_priv);
+ intel_uncore_resume_early(&dev_priv->uncore);
+
+ i915_check_and_clear_faults(dev_priv);
if (INTEL_GEN(dev_priv) >= 11 || IS_GEN9_LP(dev_priv)) {
gen9_sanitize_dc_state(dev_priv);
@@ -2545,7 +2713,7 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
if (!force_on)
return 0;
- err = intel_wait_for_register(dev_priv,
+ err = intel_wait_for_register(&dev_priv->uncore,
VLV_GTLC_SURVIVABILITY_REG,
VLV_GFX_CLK_STATUS_BIT,
VLV_GFX_CLK_STATUS_BIT,
@@ -2711,7 +2879,7 @@ static int intel_runtime_suspend(struct device *kdev)
intel_runtime_pm_disable_interrupts(dev_priv);
- intel_uncore_suspend(dev_priv);
+ intel_uncore_suspend(&dev_priv->uncore);
ret = 0;
if (INTEL_GEN(dev_priv) >= 11) {
@@ -2728,7 +2896,7 @@ static int intel_runtime_suspend(struct device *kdev)
if (ret) {
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
- intel_uncore_runtime_resume(dev_priv);
+ intel_uncore_runtime_resume(&dev_priv->uncore);
intel_runtime_pm_enable_interrupts(dev_priv);
@@ -2745,7 +2913,7 @@ static int intel_runtime_suspend(struct device *kdev)
enable_rpm_wakeref_asserts(dev_priv);
intel_runtime_pm_cleanup(dev_priv);
- if (intel_uncore_arm_unclaimed_mmio_detection(dev_priv))
+ if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
DRM_ERROR("Unclaimed access detected prior to suspending\n");
dev_priv->runtime_pm.suspended = true;
@@ -2773,7 +2941,7 @@ static int intel_runtime_suspend(struct device *kdev)
intel_opregion_notify_adapter(dev_priv, PCI_D1);
}
- assert_forcewakes_inactive(dev_priv);
+ assert_forcewakes_inactive(&dev_priv->uncore);
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
intel_hpd_poll_init(dev_priv);
@@ -2799,7 +2967,7 @@ static int intel_runtime_resume(struct device *kdev)
intel_opregion_notify_adapter(dev_priv, PCI_D0);
dev_priv->runtime_pm.suspended = false;
- if (intel_uncore_unclaimed_mmio(dev_priv))
+ if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
if (INTEL_GEN(dev_priv) >= 11) {
@@ -2825,7 +2993,7 @@ static int intel_runtime_resume(struct device *kdev)
ret = vlv_resume_prepare(dev_priv, true);
}
- intel_uncore_runtime_resume(dev_priv);
+ intel_uncore_runtime_resume(&dev_priv->uncore);
intel_runtime_pm_enable_interrupts(dev_priv);
@@ -2969,7 +3137,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER),
DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER),
DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
- DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE_EXT, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a67a63b5aa84..066fd2a12851 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -55,6 +55,7 @@
#include <drm/drm_util.h>
#include <drm/drm_dsc.h>
#include <drm/drm_connector.h>
+#include <drm/i915_mei_hdcp_interface.h>
#include "i915_fixed.h"
#include "i915_params.h"
@@ -65,13 +66,14 @@
#include "intel_device_info.h"
#include "intel_display.h"
#include "intel_dpll_mgr.h"
+#include "intel_frontbuffer.h"
#include "intel_lrc.h"
#include "intel_opregion.h"
#include "intel_ringbuffer.h"
+#include "intel_uc.h"
#include "intel_uncore.h"
#include "intel_wopcm.h"
#include "intel_workarounds.h"
-#include "intel_uc.h"
#include "i915_gem.h"
#include "i915_gem_context.h"
@@ -91,8 +93,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20190207"
-#define DRIVER_TIMESTAMP 1549572331
+#define DRIVER_DATE "20190417"
+#define DRIVER_TIMESTAMP 1555492067
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
@@ -215,11 +217,12 @@ struct drm_i915_file_private {
*/
#define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20)
} mm;
+
struct idr context_idr;
+ struct mutex context_idr_lock; /* guards context_idr */
- struct intel_rps_client {
- atomic_t boosts;
- } rps_client;
+ struct idr vm_idr;
+ struct mutex vm_idr_lock; /* guards vm_idr */
unsigned int bsd_engine;
@@ -280,7 +283,8 @@ struct drm_i915_display_funcs {
void (*get_cdclk)(struct drm_i915_private *dev_priv,
struct intel_cdclk_state *cdclk_state);
void (*set_cdclk)(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state);
+ const struct intel_cdclk_state *cdclk_state,
+ enum pipe pipe);
int (*get_fifo_size)(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane);
int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
@@ -323,6 +327,7 @@ struct drm_i915_display_funcs {
/* display clock increase/decrease */
/* pll clock increase/decrease */
+ int (*color_check)(struct intel_crtc_state *crtc_state);
/*
* Program double buffered color management registers during
* vblank evasion. The registers should then latch during the
@@ -371,14 +376,6 @@ enum i915_cache_level {
#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
-enum fb_op_origin {
- ORIGIN_GTT,
- ORIGIN_CPU,
- ORIGIN_CS,
- ORIGIN_FLIP,
- ORIGIN_DIRTYFB,
-};
-
struct intel_fbc {
/* This is always the inner lock when overlapping with struct_mutex and
* it's the outer lock when overlapping with stolen_lock. */
@@ -508,7 +505,7 @@ struct i915_psr {
u32 debug;
bool sink_support;
- bool prepared, enabled;
+ bool enabled;
struct intel_dp *dp;
enum pipe pipe;
bool active;
@@ -526,16 +523,22 @@ struct i915_psr {
u16 su_x_granularity;
};
+/*
+ * Sorted by south display engine compatibility.
+ * If the new PCH comes with a south display engine that is not
+ * inherited from the latest item, please do not add it to the
+ * end. Instead, add it right after its "parent" PCH.
+ */
enum intel_pch {
+ PCH_NOP = -1, /* PCH without south display */
PCH_NONE = 0, /* No PCH present */
PCH_IBX, /* Ibexpeak PCH */
PCH_CPT, /* Cougarpoint/Pantherpoint PCH */
PCH_LPT, /* Lynxpoint/Wildcatpoint PCH */
PCH_SPT, /* Sunrisepoint PCH */
PCH_KBP, /* Kaby Lake PCH */
- PCH_CNP, /* Cannon Lake PCH */
+ PCH_CNP, /* Cannon/Comet Lake PCH */
PCH_ICP, /* Ice Lake PCH */
- PCH_NOP, /* PCH without south display */
};
enum intel_sbi_destination {
@@ -949,6 +952,7 @@ struct ddi_vbt_port_info {
#define HDMI_LEVEL_SHIFT_UNKNOWN 0xff
u8 hdmi_level_shift;
+ u8 present:1;
u8 supports_dvi:1;
u8 supports_hdmi:1;
u8 supports_dp:1;
@@ -1009,6 +1013,7 @@ struct intel_vbt_data {
enum psr_lines_to_wait lines_to_wait;
int tp1_wakeup_time_us;
int tp2_tp3_wakeup_time_us;
+ int psr2_tp2_tp3_wakeup_time_us;
} psr;
struct {
@@ -1130,6 +1135,7 @@ struct skl_wm_level {
u16 plane_res_b;
u8 plane_res_l;
bool plane_en;
+ bool ignore_lines;
};
/* Stores plane specific WM parameters */
@@ -1200,7 +1206,11 @@ enum intel_pipe_crc_source {
INTEL_PIPE_CRC_SOURCE_NONE,
INTEL_PIPE_CRC_SOURCE_PLANE1,
INTEL_PIPE_CRC_SOURCE_PLANE2,
- INTEL_PIPE_CRC_SOURCE_PF,
+ INTEL_PIPE_CRC_SOURCE_PLANE3,
+ INTEL_PIPE_CRC_SOURCE_PLANE4,
+ INTEL_PIPE_CRC_SOURCE_PLANE5,
+ INTEL_PIPE_CRC_SOURCE_PLANE6,
+ INTEL_PIPE_CRC_SOURCE_PLANE7,
INTEL_PIPE_CRC_SOURCE_PIPE,
/* TV/DP on pre-gen5/vlv can't use the pipe source. */
INTEL_PIPE_CRC_SOURCE_TV,
@@ -1468,13 +1478,6 @@ struct intel_cdclk_state {
struct drm_i915_private {
struct drm_device drm;
- struct kmem_cache *objects;
- struct kmem_cache *vmas;
- struct kmem_cache *luts;
- struct kmem_cache *requests;
- struct kmem_cache *dependencies;
- struct kmem_cache *priorities;
-
const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
struct intel_driver_caps caps;
@@ -1503,8 +1506,6 @@ struct drm_i915_private {
*/
resource_size_t stolen_usable_size; /* Total size minus reserved ranges */
- void __iomem *regs;
-
struct intel_uncore uncore;
struct i915_virtual_gpu vgpu;
@@ -1622,6 +1623,8 @@ struct drm_i915_private {
struct intel_cdclk_state actual;
/* The current hardware cdclk state */
struct intel_cdclk_state hw;
+
+ int force_min_cdclk;
} cdclk;
/**
@@ -1700,8 +1703,11 @@ struct drm_i915_private {
struct intel_l3_parity l3_parity;
- /* Cannot be determined by PCIID. You must always read a register. */
- u32 edram_cap;
+ /*
+ * edram size in MB.
+ * Cannot be determined by PCIID. You must always read a register.
+ */
+ u32 edram_size_mb;
/*
* Protects RPS/RC6 register access and PCU communication.
@@ -1741,6 +1747,7 @@ struct drm_i915_private {
*
*/
struct mutex av_mutex;
+ int audio_power_refcount;
struct {
struct mutex mutex;
@@ -1831,13 +1838,16 @@ struct drm_i915_private {
bool valid;
bool is_16gb_dimm;
u8 num_channels;
- enum dram_rank {
- I915_DRAM_RANK_INVALID = 0,
- I915_DRAM_RANK_SINGLE,
- I915_DRAM_RANK_DUAL
- } rank;
+ u8 ranks;
u32 bandwidth_kbps;
bool symmetric_memory;
+ enum intel_dram_type {
+ INTEL_DRAM_UNKNOWN,
+ INTEL_DRAM_DDR3,
+ INTEL_DRAM_DDR4,
+ INTEL_DRAM_LPDDR3,
+ INTEL_DRAM_LPDDR4
+ } type;
} dram_info;
struct i915_runtime_pm runtime_pm;
@@ -1985,7 +1995,6 @@ struct drm_i915_private {
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
struct {
- void (*resume)(struct drm_i915_private *);
void (*cleanup_engine)(struct intel_engine_cs *engine);
struct i915_gt_timelines {
@@ -1997,6 +2006,7 @@ struct drm_i915_private {
struct list_head hwsp_free_list;
} timelines;
+ intel_engine_mask_t active_engines;
struct list_head active_rings;
struct list_head closed_vma;
u32 active_requests;
@@ -2011,12 +2021,6 @@ struct drm_i915_private {
intel_wakeref_t awake;
/**
- * The number of times we have woken up.
- */
- unsigned int epoch;
-#define I915_EPOCH_INVALID 0
-
- /**
* We leave the user IRQ off as much as possible,
* but this means that requests will finish and never
* be retired once the system goes idle. Set a timer to
@@ -2039,6 +2043,14 @@ struct drm_i915_private {
struct i915_vma *scratch;
} gt;
+ /* For i945gm vblank irq vs. C3 workaround */
+ struct {
+ struct work_struct work;
+ struct pm_qos_request pm_qos;
+ u8 c3_disable_latency;
+ u8 enabled;
+ } i945gm_vblank;
+
/* perform PHY state sanity checks? */
bool chv_phy_assert[2];
@@ -2055,18 +2067,25 @@ struct drm_i915_private {
struct i915_pmu pmu;
+ struct i915_hdcp_comp_master *hdcp_master;
+ bool hdcp_comp_added;
+
+ /* Mutex to protect the above hdcp component related values. */
+ struct mutex hdcp_comp_mutex;
+
/*
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
* will be rejected. Instead look for a better place.
*/
};
+struct dram_dimm_info {
+ u8 size, width, ranks;
+};
+
struct dram_channel_info {
- struct info {
- u8 size, width;
- enum dram_rank rank;
- } l_info, s_info;
- enum dram_rank rank;
+ struct dram_dimm_info dimm_l, dimm_s;
+ u8 ranks;
bool is_16gb_dimm;
};
@@ -2095,6 +2114,11 @@ static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc)
return container_of(huc, struct drm_i915_private, huc);
}
+static inline struct drm_i915_private *uncore_to_i915(struct intel_uncore *uncore)
+{
+ return container_of(uncore, struct drm_i915_private, uncore);
+}
+
/* Simple iterator over all initialised engines */
#define for_each_engine(engine__, dev_priv__, id__) \
for ((id__) = 0; \
@@ -2104,7 +2128,7 @@ static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc)
/* Iterator over subset of engines selected by mask */
#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
- for ((tmp__) = (mask__) & INTEL_INFO(dev_priv__)->ring_mask; \
+ for ((tmp__) = (mask__) & INTEL_INFO(dev_priv__)->engine_mask; \
(tmp__) ? \
((engine__) = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : \
0;)
@@ -2274,7 +2298,69 @@ static inline unsigned int i915_sg_segment_size(void)
#define IS_REVID(p, since, until) \
(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))
-#define IS_PLATFORM(dev_priv, p) (INTEL_INFO(dev_priv)->platform_mask & BIT(p))
+static __always_inline unsigned int
+__platform_mask_index(const struct intel_runtime_info *info,
+ enum intel_platform p)
+{
+ const unsigned int pbits =
+ BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
+
+ /* Expand the platform_mask array if this fails. */
+ BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
+ pbits * ARRAY_SIZE(info->platform_mask));
+
+ return p / pbits;
+}
+
+static __always_inline unsigned int
+__platform_mask_bit(const struct intel_runtime_info *info,
+ enum intel_platform p)
+{
+ const unsigned int pbits =
+ BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;
+
+ return p % pbits + INTEL_SUBPLATFORM_BITS;
+}
+
+static inline u32
+intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
+{
+ const unsigned int pi = __platform_mask_index(info, p);
+
+ return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS;
+}
+
+static __always_inline bool
+IS_PLATFORM(const struct drm_i915_private *i915, enum intel_platform p)
+{
+ const struct intel_runtime_info *info = RUNTIME_INFO(i915);
+ const unsigned int pi = __platform_mask_index(info, p);
+ const unsigned int pb = __platform_mask_bit(info, p);
+
+ BUILD_BUG_ON(!__builtin_constant_p(p));
+
+ return info->platform_mask[pi] & BIT(pb);
+}
+
+static __always_inline bool
+IS_SUBPLATFORM(const struct drm_i915_private *i915,
+ enum intel_platform p, unsigned int s)
+{
+ const struct intel_runtime_info *info = RUNTIME_INFO(i915);
+ const unsigned int pi = __platform_mask_index(info, p);
+ const unsigned int pb = __platform_mask_bit(info, p);
+ const unsigned int msb = BITS_PER_TYPE(info->platform_mask[0]) - 1;
+ const u32 mask = info->platform_mask[pi];
+
+ BUILD_BUG_ON(!__builtin_constant_p(p));
+ BUILD_BUG_ON(!__builtin_constant_p(s));
+ BUILD_BUG_ON((s) >= INTEL_SUBPLATFORM_BITS);
+
+ /* Shift and test on the MSB position so sign flag can be used. */
+ return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb);
+}
+
+#define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile)
#define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830)
#define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G)
@@ -2289,11 +2375,11 @@ static inline unsigned int i915_sg_segment_size(void)
#define IS_G45(dev_priv) IS_PLATFORM(dev_priv, INTEL_G45)
#define IS_GM45(dev_priv) IS_PLATFORM(dev_priv, INTEL_GM45)
#define IS_G4X(dev_priv) (IS_G45(dev_priv) || IS_GM45(dev_priv))
-#define IS_PINEVIEW_G(dev_priv) (INTEL_DEVID(dev_priv) == 0xa001)
-#define IS_PINEVIEW_M(dev_priv) (INTEL_DEVID(dev_priv) == 0xa011)
#define IS_PINEVIEW(dev_priv) IS_PLATFORM(dev_priv, INTEL_PINEVIEW)
#define IS_G33(dev_priv) IS_PLATFORM(dev_priv, INTEL_G33)
-#define IS_IRONLAKE_M(dev_priv) (INTEL_DEVID(dev_priv) == 0x0046)
+#define IS_IRONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IRONLAKE)
+#define IS_IRONLAKE_M(dev_priv) \
+ (IS_PLATFORM(dev_priv, INTEL_IRONLAKE) && IS_MOBILE(dev_priv))
#define IS_IVYBRIDGE(dev_priv) IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
#define IS_IVB_GT1(dev_priv) (IS_IVYBRIDGE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 1)
@@ -2308,46 +2394,35 @@ static inline unsigned int i915_sg_segment_size(void)
#define IS_COFFEELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
#define IS_CANNONLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
#define IS_ICELAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ICELAKE)
-#define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile)
+#define IS_ELKHARTLAKE(dev_priv) IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
(INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
-#define IS_BDW_ULT(dev_priv) (IS_BROADWELL(dev_priv) && \
- ((INTEL_DEVID(dev_priv) & 0xf) == 0x6 || \
- (INTEL_DEVID(dev_priv) & 0xf) == 0xb || \
- (INTEL_DEVID(dev_priv) & 0xf) == 0xe))
-/* ULX machines are also considered ULT. */
-#define IS_BDW_ULX(dev_priv) (IS_BROADWELL(dev_priv) && \
- (INTEL_DEVID(dev_priv) & 0xf) == 0xe)
+#define IS_BDW_ULT(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT)
+#define IS_BDW_ULX(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX)
#define IS_BDW_GT3(dev_priv) (IS_BROADWELL(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 3)
-#define IS_HSW_ULT(dev_priv) (IS_HASWELL(dev_priv) && \
- (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0A00)
+#define IS_HSW_ULT(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT)
#define IS_HSW_GT3(dev_priv) (IS_HASWELL(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 3)
#define IS_HSW_GT1(dev_priv) (IS_HASWELL(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 1)
/* ULX machines are also considered ULT. */
-#define IS_HSW_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x0A0E || \
- INTEL_DEVID(dev_priv) == 0x0A1E)
-#define IS_SKL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x1906 || \
- INTEL_DEVID(dev_priv) == 0x1913 || \
- INTEL_DEVID(dev_priv) == 0x1916 || \
- INTEL_DEVID(dev_priv) == 0x1921 || \
- INTEL_DEVID(dev_priv) == 0x1926)
-#define IS_SKL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x190E || \
- INTEL_DEVID(dev_priv) == 0x1915 || \
- INTEL_DEVID(dev_priv) == 0x191E)
-#define IS_KBL_ULT(dev_priv) (INTEL_DEVID(dev_priv) == 0x5906 || \
- INTEL_DEVID(dev_priv) == 0x5913 || \
- INTEL_DEVID(dev_priv) == 0x5916 || \
- INTEL_DEVID(dev_priv) == 0x5921 || \
- INTEL_DEVID(dev_priv) == 0x5926)
-#define IS_KBL_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x590E || \
- INTEL_DEVID(dev_priv) == 0x5915 || \
- INTEL_DEVID(dev_priv) == 0x591E)
-#define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \
- INTEL_DEVID(dev_priv) == 0x87C0 || \
- INTEL_DEVID(dev_priv) == 0x87CA)
+#define IS_HSW_ULX(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX)
+#define IS_SKL_ULT(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT)
+#define IS_SKL_ULX(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX)
+#define IS_KBL_ULT(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
+#define IS_KBL_ULX(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
+#define IS_AML_ULX(dev_priv) \
+ (IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_AML) || \
+ IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_AML))
#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 2)
#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
@@ -2358,16 +2433,16 @@ static inline unsigned int i915_sg_segment_size(void)
INTEL_INFO(dev_priv)->gt == 2)
#define IS_KBL_GT3(dev_priv) (IS_KABYLAKE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 3)
-#define IS_CFL_ULT(dev_priv) (IS_COFFEELAKE(dev_priv) && \
- (INTEL_DEVID(dev_priv) & 0x00F0) == 0x00A0)
+#define IS_CFL_ULT(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
#define IS_CFL_GT2(dev_priv) (IS_COFFEELAKE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 2)
#define IS_CFL_GT3(dev_priv) (IS_COFFEELAKE(dev_priv) && \
INTEL_INFO(dev_priv)->gt == 3)
-#define IS_CNL_WITH_PORT_F(dev_priv) (IS_CANNONLAKE(dev_priv) && \
- (INTEL_DEVID(dev_priv) & 0x0004) == 0x0004)
-#define IS_ICL_WITH_PORT_F(dev_priv) (IS_ICELAKE(dev_priv) && \
- INTEL_DEVID(dev_priv) != 0x8A51)
+#define IS_CNL_WITH_PORT_F(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_CANNONLAKE, INTEL_SUBPLATFORM_PORTF)
+#define IS_ICL_WITH_PORT_F(dev_priv) \
+ IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
@@ -2426,28 +2501,22 @@ static inline unsigned int i915_sg_segment_size(void)
#define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
#define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
-#define ENGINE_MASK(id) BIT(id)
-#define RENDER_RING ENGINE_MASK(RCS)
-#define BSD_RING ENGINE_MASK(VCS)
-#define BLT_RING ENGINE_MASK(BCS)
-#define VEBOX_RING ENGINE_MASK(VECS)
-#define BSD2_RING ENGINE_MASK(VCS2)
-#define BSD3_RING ENGINE_MASK(VCS3)
-#define BSD4_RING ENGINE_MASK(VCS4)
-#define VEBOX2_RING ENGINE_MASK(VECS2)
-#define ALL_ENGINES (~0)
-
-#define HAS_ENGINE(dev_priv, id) \
- (!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id)))
-
-#define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS)
-#define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2)
-#define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS)
-#define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS)
+#define HAS_ENGINE(dev_priv, id) (INTEL_INFO(dev_priv)->engine_mask & BIT(id))
+
+#define ENGINE_INSTANCES_MASK(dev_priv, first, count) ({ \
+ unsigned int first__ = (first); \
+ unsigned int count__ = (count); \
+ (INTEL_INFO(dev_priv)->engine_mask & \
+ GENMASK(first__ + count__ - 1, first__)) >> first__; \
+})
+#define VDBOX_MASK(dev_priv) \
+ ENGINE_INSTANCES_MASK(dev_priv, VCS0, I915_MAX_VCS)
+#define VEBOX_MASK(dev_priv) \
+ ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS)
#define HAS_LLC(dev_priv) (INTEL_INFO(dev_priv)->has_llc)
#define HAS_SNOOP(dev_priv) (INTEL_INFO(dev_priv)->has_snoop)
-#define HAS_EDRAM(dev_priv) (!!((dev_priv)->edram_cap & EDRAM_ENABLED))
+#define HAS_EDRAM(dev_priv) ((dev_priv)->edram_size_mb)
#define HAS_WT(dev_priv) ((IS_HASWELL(dev_priv) || \
IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
@@ -2462,13 +2531,11 @@ static inline unsigned int i915_sg_segment_size(void)
#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)
-#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt)
+#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type)
#define HAS_PPGTT(dev_priv) \
(INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
#define HAS_FULL_PPGTT(dev_priv) \
(INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL)
-#define HAS_FULL_48BIT_PPGTT(dev_priv) \
- (INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL_4LVL)
#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
GEM_BUG_ON((sizes) == 0); \
@@ -2512,6 +2579,7 @@ static inline unsigned int i915_sg_segment_size(void)
#define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg)
#define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr)
+#define HAS_TRANSCODER_EDP(dev_priv) (INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_EDP] != 0)
#define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6)
#define HAS_RC6p(dev_priv) (INTEL_INFO(dev_priv)->has_rc6p)
@@ -2558,6 +2626,7 @@ static inline unsigned int i915_sg_segment_size(void)
#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA280
#define INTEL_PCH_CNP_DEVICE_ID_TYPE 0xA300
#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80
+#define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280
#define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
@@ -2567,8 +2636,6 @@ static inline unsigned int i915_sg_segment_size(void)
#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
-#define HAS_PCH_CNP_LP(dev_priv) \
- (INTEL_PCH_ID(dev_priv) == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE)
#define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
@@ -2800,8 +2867,6 @@ void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
int i915_gem_freeze(struct drm_i915_private *dev_priv);
int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
-void *i915_gem_object_alloc(struct drm_i915_private *dev_priv);
-void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
struct drm_i915_gem_object *
@@ -2844,6 +2909,7 @@ static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
int pass = 2;
do {
rcu_barrier();
+ i915_gem_drain_freed_objects(i915);
drain_workqueue(i915->wq);
} while (--pass);
}
@@ -2974,6 +3040,14 @@ i915_coherent_map_type(struct drm_i915_private *i915)
void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
enum i915_map_type type);
+void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
+ unsigned long offset,
+ unsigned long size);
+static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
+{
+ __i915_gem_object_flush_map(obj, 0, obj->base.size);
+}
+
/**
* i915_gem_object_unpin_map - releases an earlier mapping
* @obj: the object to unmap
@@ -3002,7 +3076,12 @@ i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
i915_gem_object_unpin_pages(obj);
}
-int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
+static inline int __must_check
+i915_mutex_lock_interruptible(struct drm_device *dev)
+{
+ return mutex_lock_interruptible(&dev->struct_mutex);
+}
+
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
@@ -3016,22 +3095,14 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
-struct i915_request *
-i915_gem_find_active_request(struct intel_engine_cs *engine);
-
-static inline bool i915_reset_backoff(struct i915_gpu_error *error)
-{
- return unlikely(test_bit(I915_RESET_BACKOFF, &error->flags));
-}
-
-static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
+static inline bool __i915_wedged(struct i915_gpu_error *error)
{
return unlikely(test_bit(I915_WEDGED, &error->flags));
}
-static inline bool i915_reset_backoff_or_wedged(struct i915_gpu_error *error)
+static inline bool i915_reset_failed(struct drm_i915_private *i915)
{
- return i915_reset_backoff(error) | i915_terminally_wedged(error);
+ return __i915_wedged(&i915->gpu_error);
}
static inline u32 i915_reset_count(struct i915_gpu_error *error)
@@ -3056,14 +3127,13 @@ void i915_gem_fini(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv);
int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
unsigned int flags, long timeout);
-int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv);
+void i915_gem_suspend(struct drm_i915_private *dev_priv);
void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
void i915_gem_resume(struct drm_i915_private *dev_priv);
vm_fault_t i915_gem_fault(struct vm_fault *vmf);
int i915_gem_object_wait(struct drm_i915_gem_object *obj,
unsigned int flags,
- long timeout,
- struct intel_rps_client *rps);
+ long timeout);
int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
unsigned int flags,
const struct i915_sched_attr *attr);
@@ -3106,7 +3176,6 @@ struct drm_i915_fence_reg *
i915_reserve_fence(struct drm_i915_private *dev_priv);
void i915_unreserve_fence(struct drm_i915_fence_reg *fence);
-void i915_gem_revoke_fences(struct drm_i915_private *dev_priv);
void i915_gem_restore_fences(struct drm_i915_private *dev_priv);
void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv);
@@ -3142,7 +3211,7 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
void i915_oa_init_reg_state(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx,
+ struct intel_context *ce,
u32 *reg_state);
/* i915_gem_evict.c */
@@ -3457,18 +3526,21 @@ static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000);
}
-#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
-#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
+#define __I915_REG_OP(op__, dev_priv__, ...) \
+ intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__)
+
+#define I915_READ8(reg__) __I915_REG_OP(read8, dev_priv, (reg__))
+#define I915_WRITE8(reg__, val__) __I915_REG_OP(write8, dev_priv, (reg__), (val__))
-#define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
-#define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
-#define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
-#define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
+#define I915_READ16(reg__) __I915_REG_OP(read16, dev_priv, (reg__))
+#define I915_WRITE16(reg__, val__) __I915_REG_OP(write16, dev_priv, (reg__), (val__))
+#define I915_READ16_NOTRACE(reg__) __I915_REG_OP(read16_notrace, dev_priv, (reg__))
+#define I915_WRITE16_NOTRACE(reg__, val__) __I915_REG_OP(write16_notrace, dev_priv, (reg__), (val__))
-#define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
-#define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
-#define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
-#define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
+#define I915_READ(reg__) __I915_REG_OP(read, dev_priv, (reg__))
+#define I915_WRITE(reg__, val__) __I915_REG_OP(write, dev_priv, (reg__), (val__))
+#define I915_READ_NOTRACE(reg__) __I915_REG_OP(read_notrace, dev_priv, (reg__))
+#define I915_WRITE_NOTRACE(reg__, val__) __I915_REG_OP(write_notrace, dev_priv, (reg__), (val__))
/* Be very careful with read/write 64-bit values. On 32-bit machines, they
* will be implemented using 2 32-bit writes in an arbitrary order with
@@ -3484,46 +3556,12 @@ static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
*
* You have been warned.
*/
-#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
-
-#define I915_READ64_2x32(lower_reg, upper_reg) ({ \
- u32 upper, lower, old_upper, loop = 0; \
- upper = I915_READ(upper_reg); \
- do { \
- old_upper = upper; \
- lower = I915_READ(lower_reg); \
- upper = I915_READ(upper_reg); \
- } while (upper != old_upper && loop++ < 2); \
- (u64)upper << 32 | lower; })
-
-#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
-#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
-
-#define __raw_read(x, s) \
-static inline uint##x##_t __raw_i915_read##x(const struct drm_i915_private *dev_priv, \
- i915_reg_t reg) \
-{ \
- return read##s(dev_priv->regs + i915_mmio_reg_offset(reg)); \
-}
-
-#define __raw_write(x, s) \
-static inline void __raw_i915_write##x(const struct drm_i915_private *dev_priv, \
- i915_reg_t reg, uint##x##_t val) \
-{ \
- write##s(val, dev_priv->regs + i915_mmio_reg_offset(reg)); \
-}
-__raw_read(8, b)
-__raw_read(16, w)
-__raw_read(32, l)
-__raw_read(64, q)
-
-__raw_write(8, b)
-__raw_write(16, w)
-__raw_write(32, l)
-__raw_write(64, q)
+#define I915_READ64(reg__) __I915_REG_OP(read64, dev_priv, (reg__))
+#define I915_READ64_2x32(lower_reg__, upper_reg__) \
+ __I915_REG_OP(read64_2x32, dev_priv, (lower_reg__), (upper_reg__))
-#undef __raw_read
-#undef __raw_write
+#define POSTING_READ(reg__) __I915_REG_OP(posting_read, dev_priv, (reg__))
+#define POSTING_READ16(reg__) __I915_REG_OP(posting_read16, dev_priv, (reg__))
/* These are untraced mmio-accessors that are only valid to be used inside
* critical sections, such as inside IRQ handlers, where forcewake is explicitly
@@ -3551,10 +3589,10 @@ __raw_write(64, q)
* therefore generally be serialised, by either the dev_priv->uncore.lock or
* a more localised lock guarding all access to that bank of registers.
*/
-#define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
-#define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
-#define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__))
-#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
+#define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__))
+#define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__))
+#define I915_WRITE64_FW(reg__, val__) __I915_REG_OP(write64_fw, dev_priv, (reg__), (val__))
+#define POSTING_READ_FW(reg__) __I915_REG_OP(posting_read_fw, dev_priv, (reg__))
/* "Broadcast RGB" property */
#define INTEL_BROADCAST_RGB_AUTO 0
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3c724cc949a5..ad01c92aaf74 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -42,6 +42,7 @@
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_gemfs.h"
+#include "i915_globals.h"
#include "i915_reset.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
@@ -49,6 +50,7 @@
#include "intel_drv.h"
#include "intel_frontbuffer.h"
#include "intel_mocs.h"
+#include "intel_pm.h"
#include "intel_workarounds.h"
static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
@@ -100,48 +102,7 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
spin_unlock(&dev_priv->mm.object_stat_lock);
}
-static int
-i915_gem_wait_for_error(struct i915_gpu_error *error)
-{
- int ret;
-
- might_sleep();
-
- /*
- * Only wait 10 seconds for the gpu reset to complete to avoid hanging
- * userspace. If it takes that long something really bad is going on and
- * we should simply try to bail out and fail as gracefully as possible.
- */
- ret = wait_event_interruptible_timeout(error->reset_queue,
- !i915_reset_backoff(error),
- I915_RESET_TIMEOUT);
- if (ret == 0) {
- DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
- return -EIO;
- } else if (ret < 0) {
- return ret;
- } else {
- return 0;
- }
-}
-
-int i915_mutex_lock_interruptible(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- int ret;
-
- ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
- if (ret)
- return ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static u32 __i915_gem_park(struct drm_i915_private *i915)
+static void __i915_gem_park(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
@@ -152,9 +113,7 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
GEM_BUG_ON(!list_empty(&i915->gt.active_rings));
if (!i915->gt.awake)
- return I915_EPOCH_INVALID;
-
- GEM_BUG_ON(i915->gt.epoch == I915_EPOCH_INVALID);
+ return;
/*
* Be paranoid and flush a concurrent interrupt to make sure
@@ -183,7 +142,7 @@ static u32 __i915_gem_park(struct drm_i915_private *i915)
intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
- return i915->gt.epoch;
+ i915_globals_park();
}
void i915_gem_park(struct drm_i915_private *i915)
@@ -225,8 +184,7 @@ void i915_gem_unpark(struct drm_i915_private *i915)
i915->gt.awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
GEM_BUG_ON(!i915->gt.awake);
- if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */
- i915->gt.epoch = 1;
+ i915_globals_unpark();
intel_enable_gt_powersave(i915);
i915_update_gfx_val(i915);
@@ -351,7 +309,7 @@ static void __start_cpu_write(struct drm_i915_gem_object *obj)
obj->cache_dirty = true;
}
-static void
+void
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
struct sg_table *pages,
bool needs_clflush)
@@ -459,8 +417,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
static long
i915_gem_object_wait_fence(struct dma_fence *fence,
unsigned int flags,
- long timeout,
- struct intel_rps_client *rps_client)
+ long timeout)
{
struct i915_request *rq;
@@ -478,27 +435,6 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
if (i915_request_completed(rq))
goto out;
- /*
- * This client is about to stall waiting for the GPU. In many cases
- * this is undesirable and limits the throughput of the system, as
- * many clients cannot continue processing user input/output whilst
- * blocked. RPS autotuning may take tens of milliseconds to respond
- * to the GPU load and thus incurs additional latency for the client.
- * We can circumvent that by promoting the GPU frequency to maximum
- * before we wait. This makes the GPU throttle up much more quickly
- * (good for benchmarks and user experience, e.g. window animations),
- * but at a cost of spending more power processing the workload
- * (bad for battery). Not all clients even want their results
- * immediately and for them we should just let the GPU select its own
- * frequency to maximise efficiency. To prevent a single client from
- * forcing the clocks too high for the whole system, we only allow
- * each client to waitboost once in a busy period.
- */
- if (rps_client && !i915_request_started(rq)) {
- if (INTEL_GEN(rq->i915) >= 6)
- gen6_rps_boost(rq, rps_client);
- }
-
timeout = i915_request_wait(rq, flags, timeout);
out:
@@ -511,8 +447,7 @@ out:
static long
i915_gem_object_wait_reservation(struct reservation_object *resv,
unsigned int flags,
- long timeout,
- struct intel_rps_client *rps_client)
+ long timeout)
{
unsigned int seq = __read_seqcount_begin(&resv->seq);
struct dma_fence *excl;
@@ -530,8 +465,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
for (i = 0; i < count; i++) {
timeout = i915_gem_object_wait_fence(shared[i],
- flags, timeout,
- rps_client);
+ flags, timeout);
if (timeout < 0)
break;
@@ -557,8 +491,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
}
if (excl && timeout >= 0)
- timeout = i915_gem_object_wait_fence(excl, flags, timeout,
- rps_client);
+ timeout = i915_gem_object_wait_fence(excl, flags, timeout);
dma_fence_put(excl);
@@ -652,30 +585,19 @@ i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
* @obj: i915 gem object
* @flags: how to wait (under a lock, for all rendering or just for writes etc)
* @timeout: how long to wait
- * @rps_client: client (user process) to charge for any waitboosting
*/
int
i915_gem_object_wait(struct drm_i915_gem_object *obj,
unsigned int flags,
- long timeout,
- struct intel_rps_client *rps_client)
+ long timeout)
{
might_sleep();
GEM_BUG_ON(timeout < 0);
- timeout = i915_gem_object_wait_reservation(obj->resv,
- flags, timeout,
- rps_client);
+ timeout = i915_gem_object_wait_reservation(obj->resv, flags, timeout);
return timeout < 0 ? timeout : 0;
}
-static struct intel_rps_client *to_rps_client(struct drm_file *file)
-{
- struct drm_i915_file_private *fpriv = file->driver_priv;
-
- return &fpriv->rps_client;
-}
-
static int
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
@@ -698,28 +620,18 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
return 0;
}
-void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
-{
- return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
-}
-
-void i915_gem_object_free(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- kmem_cache_free(dev_priv->objects, obj);
-}
-
static int
i915_gem_create(struct drm_file *file,
struct drm_i915_private *dev_priv,
- u64 size,
+ u64 *size_p,
u32 *handle_p)
{
struct drm_i915_gem_object *obj;
- int ret;
u32 handle;
+ u64 size;
+ int ret;
- size = roundup(size, PAGE_SIZE);
+ size = round_up(*size_p, PAGE_SIZE);
if (size == 0)
return -EINVAL;
@@ -735,6 +647,7 @@ i915_gem_create(struct drm_file *file,
return ret;
*handle_p = handle;
+ *size_p = size;
return 0;
}
@@ -747,7 +660,7 @@ i915_gem_dumb_create(struct drm_file *file,
args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
args->size = args->pitch * args->height;
return i915_gem_create(file, to_i915(dev),
- args->size, &args->handle);
+ &args->size, &args->handle);
}
static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
@@ -772,7 +685,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
i915_gem_flush_free_objects(dev_priv);
return i915_gem_create(file, dev_priv,
- args->size, &args->handle);
+ &args->size, &args->handle);
}
static inline enum fb_op_origin
@@ -881,8 +794,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT,
- NULL);
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
@@ -934,8 +846,7 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED |
I915_WAIT_ALL,
- MAX_SCHEDULE_TIMEOUT,
- NULL);
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
@@ -1197,8 +1108,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT,
- to_rps_client(file));
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
goto out;
@@ -1497,8 +1407,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_ALL,
- MAX_SCHEDULE_TIMEOUT,
- to_rps_client(file));
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
goto err;
@@ -1578,17 +1487,37 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
return -EINVAL;
- /* Having something in the write domain implies it's in the read
+ /*
+ * Having something in the write domain implies it's in the read
* domain, and only that read domain. Enforce that in the request.
*/
- if (write_domain != 0 && read_domains != write_domain)
+ if (write_domain && read_domains != write_domain)
return -EINVAL;
+ if (!read_domains)
+ return 0;
+
obj = i915_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
- /* Try to flush the object off the GPU without holding the lock.
+ /*
+ * Already in the desired write domain? Nothing for us to do!
+ *
+ * We apply a little bit of cunning here to catch a broader set of
+ * no-ops. If obj->write_domain is set, we must be in the same
+ * obj->read_domains, and only that domain. Therefore, if that
+ * obj->write_domain matches the request read_domains, we are
+ * already in the same read/write domain and can skip the operation,
+ * without having to further check the requested write_domain.
+ */
+ if (READ_ONCE(obj->write_domain) == read_domains) {
+ err = 0;
+ goto out;
+ }
+
+ /*
+ * Try to flush the object off the GPU without holding the lock.
* We will repeat the flush holding the lock in the normal manner
* to catch cases where we are gazumped.
*/
@@ -1596,8 +1525,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_PRIORITY |
(write_domain ? I915_WAIT_ALL : 0),
- MAX_SCHEDULE_TIMEOUT,
- to_rps_client(file));
+ MAX_SCHEDULE_TIMEOUT);
if (err)
goto out;
@@ -1808,6 +1736,9 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
* 2 - Recognise WC as a separate cache domain so that we can flush the
* delayed writes via GTT before performing direct access via WC.
*
+ * 3 - Remove implicit set-domain(GTT) and synchronisation on initial
+ * pagefault; swapin remains transparent.
+ *
* Restrictions:
*
* * snoopable objects cannot be accessed via the GTT. It can cause machine
@@ -1835,7 +1766,7 @@ static unsigned int tile_row_pages(const struct drm_i915_gem_object *obj)
*/
int i915_gem_mmap_gtt_version(void)
{
- return 2;
+ return 3;
}
static inline struct i915_ggtt_view
@@ -1891,6 +1822,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
intel_wakeref_t wakeref;
struct i915_vma *vma;
pgoff_t page_offset;
+ int srcu;
int ret;
/* Sanity check that we allow writing into this object */
@@ -1902,27 +1834,21 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
trace_i915_gem_object_fault(obj, page_offset, true, write);
- /* Try to flush the object off the GPU first without holding the lock.
- * Upon acquiring the lock, we will perform our sanity checks and then
- * repeat the flush holding the lock in the normal manner to catch cases
- * where we are gazumped.
- */
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT,
- NULL);
- if (ret)
- goto err;
-
ret = i915_gem_object_pin_pages(obj);
if (ret)
goto err;
wakeref = intel_runtime_pm_get(dev_priv);
+ srcu = i915_reset_trylock(dev_priv);
+ if (srcu < 0) {
+ ret = srcu;
+ goto err_rpm;
+ }
+
ret = i915_mutex_lock_interruptible(dev);
if (ret)
- goto err_rpm;
+ goto err_reset;
/* Access to snoopable pages through the GTT is incoherent. */
if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
@@ -1930,7 +1856,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
goto err_unlock;
}
-
/* Now pin it into the GTT as needed */
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
PIN_MAPPABLE |
@@ -1964,10 +1889,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
goto err_unlock;
}
- ret = i915_gem_object_set_to_gtt_domain(obj, write);
- if (ret)
- goto err_unpin;
-
ret = i915_vma_pin_fence(vma);
if (ret)
goto err_unpin;
@@ -1995,6 +1916,8 @@ err_unpin:
__i915_vma_unpin(vma);
err_unlock:
mutex_unlock(&dev->struct_mutex);
+err_reset:
+ i915_reset_unlock(dev_priv, srcu);
err_rpm:
intel_runtime_pm_put(dev_priv, wakeref);
i915_gem_object_unpin_pages(obj);
@@ -2007,7 +1930,7 @@ err:
* fail). But any other -EIO isn't ours (e.g. swap in failure)
* and so needs to be reported.
*/
- if (!i915_terminally_wedged(&dev_priv->gpu_error))
+ if (!i915_terminally_wedged(dev_priv))
return VM_FAULT_SIGBUS;
/* else: fall through */
case -EAGAIN:
@@ -2280,7 +2203,6 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
struct page *page;
__i915_gem_object_release_shmem(obj, pages, true);
-
i915_gem_gtt_finish_pages(obj, pages);
if (i915_gem_object_needs_bit17_swizzle(obj))
@@ -2488,7 +2410,7 @@ rebuild_st:
do {
cond_resched();
page = shmem_read_mapping_page_gfp(mapping, i, gfp);
- if (likely(!IS_ERR(page)))
+ if (!IS_ERR(page))
break;
if (!*s) {
@@ -2622,6 +2544,14 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
lockdep_assert_held(&obj->mm.lock);
+ /* Make the pages coherent with the GPU (flushing any swapin). */
+ if (obj->cache_dirty) {
+ obj->write_domain = 0;
+ if (i915_gem_object_has_struct_page(obj))
+ drm_clflush_sg(pages);
+ obj->cache_dirty = false;
+ }
+
obj->mm.get_page.sg_pos = pages->sgl;
obj->mm.get_page.sg_idx = 0;
@@ -2823,6 +2753,33 @@ err_unlock:
goto out_unlock;
}
+void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
+ unsigned long offset,
+ unsigned long size)
+{
+ enum i915_map_type has_type;
+ void *ptr;
+
+ GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
+ offset, size, obj->base.size));
+
+ obj->mm.dirty = true;
+
+ if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
+ return;
+
+ ptr = page_unpack_bits(obj->mm.mapping, &has_type);
+ if (has_type == I915_MAP_WC)
+ return;
+
+ drm_clflush_virt_range(ptr + offset, size);
+ if (size == obj->base.size) {
+ obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
+ obj->cache_dirty = false;
+ }
+}
+
static int
i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pwrite *arg)
@@ -2832,7 +2789,11 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
u64 remain, offset;
unsigned int pg;
- /* Before we instantiate/pin the backing store for our use, we
+ /* Caller already validated user args */
+ GEM_BUG_ON(!access_ok(user_data, arg->size));
+
+ /*
+ * Before we instantiate/pin the backing store for our use, we
* can prepopulate the shmemfs filp efficiently using a write into
* the pagecache. We avoid the penalty of instantiating all the
* pages, important if the user is just writing to a few and never
@@ -2846,7 +2807,8 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
if (obj->mm.madv != I915_MADV_WILLNEED)
return -EFAULT;
- /* Before the pages are instantiated the object is treated as being
+ /*
+ * Before the pages are instantiated the object is treated as being
* in the CPU domain. The pages will be clflushed as required before
* use, and we can freely write into the pages directly. If userspace
* races pwrite with any other operation; corruption will ensue -
@@ -2862,20 +2824,32 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
struct page *page;
void *data, *vaddr;
int err;
+ char c;
len = PAGE_SIZE - pg;
if (len > remain)
len = remain;
+ /* Prefault the user page to reduce potential recursion */
+ err = __get_user(c, user_data);
+ if (err)
+ return err;
+
+ err = __get_user(c, user_data + len - 1);
+ if (err)
+ return err;
+
err = pagecache_write_begin(obj->base.filp, mapping,
offset, len, 0,
&page, &data);
if (err < 0)
return err;
- vaddr = kmap(page);
- unwritten = copy_from_user(vaddr + pg, user_data, len);
- kunmap(page);
+ vaddr = kmap_atomic(page);
+ unwritten = __copy_from_user_inatomic(vaddr + pg,
+ user_data,
+ len);
+ kunmap_atomic(vaddr);
err = pagecache_write_end(obj->base.filp, mapping,
offset, len, len - unwritten,
@@ -2883,8 +2857,9 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
if (err < 0)
return err;
+ /* We don't handle -EFAULT, leave it to the caller to check */
if (unwritten)
- return -EFAULT;
+ return -ENODEV;
remain -= len;
user_data += len;
@@ -2895,51 +2870,6 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
return 0;
}
-static bool match_ring(struct i915_request *rq)
-{
- struct drm_i915_private *dev_priv = rq->i915;
- u32 ring = I915_READ(RING_START(rq->engine->mmio_base));
-
- return ring == i915_ggtt_offset(rq->ring->vma);
-}
-
-struct i915_request *
-i915_gem_find_active_request(struct intel_engine_cs *engine)
-{
- struct i915_request *request, *active = NULL;
- unsigned long flags;
-
- /*
- * We are called by the error capture, reset and to dump engine
- * state at random points in time. In particular, note that neither is
- * crucially ordered with an interrupt. After a hang, the GPU is dead
- * and we assume that no more writes can happen (we waited long enough
- * for all writes that were in transaction to be flushed) - adding an
- * extra delay for a recent interrupt is pointless. Hence, we do
- * not need an engine->irq_seqno_barrier() before the seqno reads.
- * At all other times, we must assume the GPU is still running, but
- * we only care about the snapshot of this moment.
- */
- spin_lock_irqsave(&engine->timeline.lock, flags);
- list_for_each_entry(request, &engine->timeline.requests, link) {
- if (i915_request_completed(request))
- continue;
-
- if (!i915_request_started(request))
- break;
-
- /* More than one preemptible request may match! */
- if (!match_ring(request))
- break;
-
- active = request;
- break;
- }
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
-
- return active;
-}
-
static void
i915_gem_retire_work_handler(struct work_struct *work)
{
@@ -2964,180 +2894,105 @@ i915_gem_retire_work_handler(struct work_struct *work)
round_jiffies_up_relative(HZ));
}
-static void shrink_caches(struct drm_i915_private *i915)
+static bool switch_to_kernel_context_sync(struct drm_i915_private *i915,
+ unsigned long mask)
{
- /*
- * kmem_cache_shrink() discards empty slabs and reorders partially
- * filled slabs to prioritise allocating from the mostly full slabs,
- * with the aim of reducing fragmentation.
- */
- kmem_cache_shrink(i915->priorities);
- kmem_cache_shrink(i915->dependencies);
- kmem_cache_shrink(i915->requests);
- kmem_cache_shrink(i915->luts);
- kmem_cache_shrink(i915->vmas);
- kmem_cache_shrink(i915->objects);
-}
-
-struct sleep_rcu_work {
- union {
- struct rcu_head rcu;
- struct work_struct work;
- };
- struct drm_i915_private *i915;
- unsigned int epoch;
-};
+ bool result = true;
-static inline bool
-same_epoch(struct drm_i915_private *i915, unsigned int epoch)
-{
/*
- * There is a small chance that the epoch wrapped since we started
- * sleeping. If we assume that epoch is at least a u32, then it will
- * take at least 2^32 * 100ms for it to wrap, or about 326 years.
+ * Even if we fail to switch, give whatever is running a small chance
+ * to save itself before we report the failure. Yes, this may be a
+ * false positive due to e.g. ENOMEM, caveat emptor!
*/
- return epoch == READ_ONCE(i915->gt.epoch);
-}
-
-static void __sleep_work(struct work_struct *work)
-{
- struct sleep_rcu_work *s = container_of(work, typeof(*s), work);
- struct drm_i915_private *i915 = s->i915;
- unsigned int epoch = s->epoch;
-
- kfree(s);
- if (same_epoch(i915, epoch))
- shrink_caches(i915);
-}
+ if (i915_gem_switch_to_kernel_context(i915, mask))
+ result = false;
-static void __sleep_rcu(struct rcu_head *rcu)
-{
- struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu);
- struct drm_i915_private *i915 = s->i915;
-
- destroy_rcu_head(&s->rcu);
+ if (i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED |
+ I915_WAIT_FOR_IDLE_BOOST,
+ I915_GEM_IDLE_TIMEOUT))
+ result = false;
+
+ if (!result) {
+ if (i915_modparams.reset) { /* XXX hide warning from gem_eio */
+ dev_err(i915->drm.dev,
+ "Failed to idle engines, declaring wedged!\n");
+ GEM_TRACE_DUMP();
+ }
- if (same_epoch(i915, s->epoch)) {
- INIT_WORK(&s->work, __sleep_work);
- queue_work(i915->wq, &s->work);
- } else {
- kfree(s);
+ /* Forcibly cancel outstanding work and leave the gpu quiet. */
+ i915_gem_set_wedged(i915);
}
-}
-static inline bool
-new_requests_since_last_retire(const struct drm_i915_private *i915)
-{
- return (READ_ONCE(i915->gt.active_requests) ||
- work_pending(&i915->gt.idle_work.work));
+ i915_retire_requests(i915); /* ensure we flush after wedging */
+ return result;
}
-static void assert_kernel_context_is_current(struct drm_i915_private *i915)
+static bool load_power_context(struct drm_i915_private *i915)
{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
+ /* Force loading the kernel context on all engines */
+ if (!switch_to_kernel_context_sync(i915, ALL_ENGINES))
+ return false;
- if (i915_terminally_wedged(&i915->gpu_error))
- return;
+ /*
+ * Immediately park the GPU so that we enable powersaving and
+ * treat it as idle. The next time we issue a request, we will
+ * unpark and start using the engine->pinned_default_state, otherwise
+ * it is in limbo and an early reset may fail.
+ */
+ __i915_gem_park(i915);
- GEM_BUG_ON(i915->gt.active_requests);
- for_each_engine(engine, i915, id) {
- GEM_BUG_ON(__i915_active_request_peek(&engine->timeline.last_request));
- GEM_BUG_ON(engine->last_retired_context !=
- to_intel_context(i915->kernel_context, engine));
- }
+ return true;
}
static void
i915_gem_idle_work_handler(struct work_struct *work)
{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), gt.idle_work.work);
- unsigned int epoch = I915_EPOCH_INVALID;
+ struct drm_i915_private *i915 =
+ container_of(work, typeof(*i915), gt.idle_work.work);
bool rearm_hangcheck;
- if (!READ_ONCE(dev_priv->gt.awake))
+ if (!READ_ONCE(i915->gt.awake))
return;
- if (READ_ONCE(dev_priv->gt.active_requests))
+ if (READ_ONCE(i915->gt.active_requests))
return;
- /*
- * Flush out the last user context, leaving only the pinned
- * kernel context resident. When we are idling on the kernel_context,
- * no more new requests (with a context switch) are emitted and we
- * can finally rest. A consequence is that the idle work handler is
- * always called at least twice before idling (and if the system is
- * idle that implies a round trip through the retire worker).
- */
- mutex_lock(&dev_priv->drm.struct_mutex);
- i915_gem_switch_to_kernel_context(dev_priv);
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- GEM_TRACE("active_requests=%d (after switch-to-kernel-context)\n",
- READ_ONCE(dev_priv->gt.active_requests));
-
- /*
- * Wait for last execlists context complete, but bail out in case a
- * new request is submitted. As we don't trust the hardware, we
- * continue on if the wait times out. This is necessary to allow
- * the machine to suspend even if the hardware dies, and we will
- * try to recover in resume (after depriving the hardware of power,
- * it may be in a better mmod).
- */
- __wait_for(if (new_requests_since_last_retire(dev_priv)) return,
- intel_engines_are_idle(dev_priv),
- I915_IDLE_ENGINES_TIMEOUT * 1000,
- 10, 500);
-
rearm_hangcheck =
- cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
+ cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
- if (!mutex_trylock(&dev_priv->drm.struct_mutex)) {
+ if (!mutex_trylock(&i915->drm.struct_mutex)) {
/* Currently busy, come back later */
- mod_delayed_work(dev_priv->wq,
- &dev_priv->gt.idle_work,
+ mod_delayed_work(i915->wq,
+ &i915->gt.idle_work,
msecs_to_jiffies(50));
goto out_rearm;
}
/*
- * New request retired after this work handler started, extend active
- * period until next instance of the work.
+ * Flush out the last user context, leaving only the pinned
+ * kernel context resident. Should anything unfortunate happen
+ * while we are idle (such as the GPU being power cycled), no users
+ * will be harmed.
*/
- if (new_requests_since_last_retire(dev_priv))
- goto out_unlock;
+ if (!work_pending(&i915->gt.idle_work.work) &&
+ !i915->gt.active_requests) {
+ ++i915->gt.active_requests; /* don't requeue idle */
- epoch = __i915_gem_park(dev_priv);
+ switch_to_kernel_context_sync(i915, i915->gt.active_engines);
- assert_kernel_context_is_current(dev_priv);
+ if (!--i915->gt.active_requests) {
+ __i915_gem_park(i915);
+ rearm_hangcheck = false;
+ }
+ }
- rearm_hangcheck = false;
-out_unlock:
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ mutex_unlock(&i915->drm.struct_mutex);
out_rearm:
if (rearm_hangcheck) {
- GEM_BUG_ON(!dev_priv->gt.awake);
- i915_queue_hangcheck(dev_priv);
- }
-
- /*
- * When we are idle, it is an opportune time to reap our caches.
- * However, we have many objects that utilise RCU and the ordered
- * i915->wq that this work is executing on. To try and flush any
- * pending frees now we are idle, we first wait for an RCU grace
- * period, and then queue a task (that will run last on the wq) to
- * shrink and re-optimize the caches.
- */
- if (same_epoch(dev_priv, epoch)) {
- struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL);
- if (s) {
- init_rcu_head(&s->rcu);
- s->i915 = dev_priv;
- s->epoch = epoch;
- call_rcu(&s->rcu, __sleep_rcu);
- }
+ GEM_BUG_ON(!i915->gt.awake);
+ i915_queue_hangcheck(i915);
}
}
@@ -3171,7 +3026,7 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
list_del(&lut->obj_link);
list_del(&lut->ctx_link);
- kmem_cache_free(i915->luts, lut);
+ i915_lut_handle_free(lut);
__i915_gem_object_release_unless_active(obj);
}
@@ -3234,8 +3089,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_PRIORITY |
I915_WAIT_ALL,
- to_wait_timeout(args->timeout_ns),
- to_rps_client(file));
+ to_wait_timeout(args->timeout_ns));
if (args->timeout_ns > 0) {
args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
@@ -3304,7 +3158,7 @@ wait_for_timelines(struct drm_i915_private *i915,
* stalls, so allow the gpu to boost to maximum clocks.
*/
if (flags & I915_WAIT_FOR_IDLE_BOOST)
- gen6_rps_boost(rq, NULL);
+ gen6_rps_boost(rq);
timeout = i915_request_wait(rq, flags, timeout);
i915_request_put(rq);
@@ -3340,19 +3194,11 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915,
lockdep_assert_held(&i915->drm.struct_mutex);
- if (GEM_SHOW_DEBUG() && !timeout) {
- /* Presume that timeout was non-zero to begin with! */
- dev_warn(&i915->drm.pdev->dev,
- "Missed idle-completion interrupt!\n");
- GEM_TRACE_DUMP();
- }
-
err = wait_for_engines(i915);
if (err)
return err;
i915_retire_requests(i915);
- GEM_BUG_ON(i915->gt.active_requests);
}
return 0;
@@ -3399,8 +3245,7 @@ i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED |
(write ? I915_WAIT_ALL : 0),
- MAX_SCHEDULE_TIMEOUT,
- NULL);
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
@@ -3462,8 +3307,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED |
(write ? I915_WAIT_ALL : 0),
- MAX_SCHEDULE_TIMEOUT,
- NULL);
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
@@ -3578,8 +3422,7 @@ restart:
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED |
I915_WAIT_ALL,
- MAX_SCHEDULE_TIMEOUT,
- NULL);
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
@@ -3717,8 +3560,7 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT,
- to_rps_client(file));
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
goto out;
@@ -3844,8 +3686,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED |
(write ? I915_WAIT_ALL : 0),
- MAX_SCHEDULE_TIMEOUT,
- NULL);
+ MAX_SCHEDULE_TIMEOUT);
if (ret)
return ret;
@@ -3891,8 +3732,9 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
long ret;
/* ABI: return -EIO if already wedged */
- if (i915_terminally_wedged(&dev_priv->gpu_error))
- return -EIO;
+ ret = i915_terminally_wedged(dev_priv);
+ if (ret)
+ return ret;
spin_lock(&file_priv->mm.lock);
list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
@@ -3968,7 +3810,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
}
vma = i915_vma_instance(obj, vm, view);
- if (unlikely(IS_ERR(vma)))
+ if (IS_ERR(vma))
return vma;
if (i915_vma_misplaced(vma, size, alignment, flags)) {
@@ -4000,22 +3842,19 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
return vma;
}
-static __always_inline unsigned int __busy_read_flag(unsigned int id)
+static __always_inline u32 __busy_read_flag(u8 id)
{
- /* Note that we could alias engines in the execbuf API, but
- * that would be very unwise as it prevents userspace from
- * fine control over engine selection. Ahem.
- *
- * This should be something like EXEC_MAX_ENGINE instead of
- * I915_NUM_ENGINES.
- */
- BUILD_BUG_ON(I915_NUM_ENGINES > 16);
- return 0x10000 << id;
+ if (id == (u8)I915_ENGINE_CLASS_INVALID)
+ return 0xffff0000u;
+
+ GEM_BUG_ON(id >= 16);
+ return 0x10000u << id;
}
-static __always_inline unsigned int __busy_write_id(unsigned int id)
+static __always_inline u32 __busy_write_id(u8 id)
{
- /* The uABI guarantees an active writer is also amongst the read
+ /*
+ * The uABI guarantees an active writer is also amongst the read
* engines. This would be true if we accessed the activity tracking
* under the lock, but as we perform the lookup of the object and
* its activity locklessly we can not guarantee that the last_write
@@ -4023,16 +3862,19 @@ static __always_inline unsigned int __busy_write_id(unsigned int id)
* last_read - hence we always set both read and write busy for
* last_write.
*/
- return id | __busy_read_flag(id);
+ if (id == (u8)I915_ENGINE_CLASS_INVALID)
+ return 0xffffffffu;
+
+ return (id + 1) | __busy_read_flag(id);
}
static __always_inline unsigned int
-__busy_set_if_active(const struct dma_fence *fence,
- unsigned int (*flag)(unsigned int id))
+__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
{
- struct i915_request *rq;
+ const struct i915_request *rq;
- /* We have to check the current hw status of the fence as the uABI
+ /*
+ * We have to check the current hw status of the fence as the uABI
* guarantees forward progress. We could rely on the idle worker
* to eventually flush us, but to minimise latency just ask the
* hardware.
@@ -4043,11 +3885,13 @@ __busy_set_if_active(const struct dma_fence *fence,
return 0;
/* opencode to_request() in order to avoid const warnings */
- rq = container_of(fence, struct i915_request, fence);
+ rq = container_of(fence, const struct i915_request, fence);
if (i915_request_completed(rq))
return 0;
- return flag(rq->engine->uabi_id);
+ /* Beware type-expansion follies! */
+ BUILD_BUG_ON(!typecheck(u8, rq->engine->uabi_class));
+ return flag(rq->engine->uabi_class);
}
static __always_inline unsigned int
@@ -4081,7 +3925,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
if (!obj)
goto out;
- /* A discrepancy here is that we do not report the status of
+ /*
+ * A discrepancy here is that we do not report the status of
* non-i915 fences, i.e. even though we may report the object as idle,
* a call to set-domain may still stall waiting for foreign rendering.
* This also means that wait-ioctl may report an object as busy,
@@ -4281,7 +4126,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc(dev_priv);
+ obj = i915_gem_object_alloc();
if (obj == NULL)
return ERR_PTR(-ENOMEM);
@@ -4414,7 +4259,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(i915, obj->base.size);
- kfree(obj->bit_17);
+ bitmap_free(obj->bit_17);
i915_gem_object_free(obj);
GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
@@ -4537,7 +4382,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
GEM_TRACE("\n");
wakeref = intel_runtime_pm_get(i915);
- intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
/*
* As we have just resumed the machine and woken the device up from
@@ -4545,7 +4390,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
* back to defaults, recovering from whatever wedged state we left it
* in and so worth trying to use the device once more.
*/
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
i915_gem_unset_wedged(i915);
/*
@@ -4558,7 +4403,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
*/
intel_engines_sanitize(i915, false);
- intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
intel_runtime_pm_put(i915, wakeref);
mutex_lock(&i915->drm.struct_mutex);
@@ -4566,15 +4411,13 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
mutex_unlock(&i915->drm.struct_mutex);
}
-int i915_gem_suspend(struct drm_i915_private *i915)
+void i915_gem_suspend(struct drm_i915_private *i915)
{
intel_wakeref_t wakeref;
- int ret;
GEM_TRACE("\n");
wakeref = intel_runtime_pm_get(i915);
- intel_suspend_gt_powersave(i915);
flush_workqueue(i915->wq);
@@ -4589,22 +4432,7 @@ int i915_gem_suspend(struct drm_i915_private *i915)
* state. Fortunately, the kernel_context is disposable and we do
* not rely on its state.
*/
- if (!i915_terminally_wedged(&i915->gpu_error)) {
- ret = i915_gem_switch_to_kernel_context(i915);
- if (ret)
- goto err_unlock;
-
- ret = i915_gem_wait_for_idle(i915,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED |
- I915_WAIT_FOR_IDLE_BOOST,
- MAX_SCHEDULE_TIMEOUT);
- if (ret && ret != -EIO)
- goto err_unlock;
-
- assert_kernel_context_is_current(i915);
- }
- i915_retire_requests(i915); /* ensure we flush after wedging */
+ switch_to_kernel_context_sync(i915, i915->gt.active_engines);
mutex_unlock(&i915->drm.struct_mutex);
i915_reset_flush(i915);
@@ -4617,23 +4445,15 @@ int i915_gem_suspend(struct drm_i915_private *i915)
*/
drain_delayed_work(&i915->gt.idle_work);
- intel_uc_suspend(i915);
-
/*
* Assert that we successfully flushed all the work and
* reset the GPU back to its idle, low power state.
*/
- WARN_ON(i915->gt.awake);
- if (WARN_ON(!intel_engines_are_idle(i915)))
- i915_gem_set_wedged(i915); /* no hope, discard everything */
+ GEM_BUG_ON(i915->gt.awake);
- intel_runtime_pm_put(i915, wakeref);
- return 0;
+ intel_uc_suspend(i915);
-err_unlock:
- mutex_unlock(&i915->drm.struct_mutex);
intel_runtime_pm_put(i915, wakeref);
- return ret;
}
void i915_gem_suspend_late(struct drm_i915_private *i915)
@@ -4683,7 +4503,7 @@ void i915_gem_resume(struct drm_i915_private *i915)
WARN_ON(i915->gt.awake);
mutex_lock(&i915->drm.struct_mutex);
- intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
i915_gem_restore_gtt_mappings(i915);
i915_gem_restore_fences(i915);
@@ -4693,7 +4513,7 @@ void i915_gem_resume(struct drm_i915_private *i915)
* guarantee that the context image is complete. So let's just reset
* it and start again.
*/
- i915->gt.resume(i915);
+ intel_gt_resume(i915);
if (i915_gem_init_hw(i915))
goto err_wedged;
@@ -4701,17 +4521,18 @@ void i915_gem_resume(struct drm_i915_private *i915)
intel_uc_resume(i915);
/* Always reload a context for powersaving. */
- if (i915_gem_switch_to_kernel_context(i915))
+ if (!load_power_context(i915))
goto err_wedged;
out_unlock:
- intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
mutex_unlock(&i915->drm.struct_mutex);
return;
err_wedged:
- if (!i915_terminally_wedged(&i915->gpu_error)) {
- DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
+ if (!i915_reset_failed(i915)) {
+ dev_err(i915->drm.dev,
+ "Failed to re-initialize GPU, declaring it wedged!\n");
i915_gem_set_wedged(i915);
}
goto out_unlock;
@@ -4781,6 +4602,8 @@ static int __i915_gem_restart_engines(void *data)
}
}
+ intel_engines_set_scheduler_caps(i915);
+
return 0;
}
@@ -4791,7 +4614,7 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
dev_priv->gt.last_init_time = ktime_get();
/* Double layer security blanket, see i915_gem_init() */
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
@@ -4816,10 +4639,9 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
init_unused_rings(dev_priv);
BUG_ON(!dev_priv->kernel_context);
- if (i915_terminally_wedged(&dev_priv->gpu_error)) {
- ret = -EIO;
+ ret = i915_terminally_wedged(dev_priv);
+ if (ret)
goto out;
- }
ret = i915_ppgtt_init_hw(dev_priv);
if (ret) {
@@ -4847,14 +4669,14 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
if (ret)
goto cleanup_uc;
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
return 0;
cleanup_uc:
intel_uc_fini_hw(dev_priv);
out:
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
return ret;
}
@@ -4864,7 +4686,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
struct i915_gem_context *ctx;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- int err;
+ int err = 0;
/*
* As we reset the gpu during very early sanitisation, the current
@@ -4897,36 +4719,27 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
goto err_active;
}
- err = i915_gem_switch_to_kernel_context(i915);
- if (err)
- goto err_active;
-
- if (i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED, HZ / 5)) {
- i915_gem_set_wedged(i915);
- err = -EIO; /* Caller will declare us wedged */
+ /* Flush the default context image to memory, and enable powersaving. */
+ if (!load_power_context(i915)) {
+ err = -EIO;
goto err_active;
}
- assert_kernel_context_is_current(i915);
-
- /*
- * Immediately park the GPU so that we enable powersaving and
- * treat it as idle. The next time we issue a request, we will
- * unpark and start using the engine->pinned_default_state, otherwise
- * it is in limbo and an early reset may fail.
- */
- __i915_gem_park(i915);
-
for_each_engine(engine, i915, id) {
+ struct intel_context *ce;
struct i915_vma *state;
void *vaddr;
- GEM_BUG_ON(to_intel_context(ctx, engine)->pin_count);
+ ce = intel_context_lookup(ctx, engine);
+ if (!ce)
+ continue;
- state = to_intel_context(ctx, engine)->state;
+ state = ce->state;
if (!state)
continue;
+ GEM_BUG_ON(intel_context_is_pinned(ce));
+
/*
* As we will hold a reference to the logical state, it will
* not be torn down with the context, and importantly the
@@ -4944,6 +4757,8 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
goto err_active;
engine->default_state = i915_gem_object_get(state->obj);
+ i915_gem_object_set_cache_coherency(engine->default_state,
+ I915_CACHE_LLC);
/* Check we can acquire the image of the context state */
vaddr = i915_gem_object_pin_map(engine->default_state,
@@ -4982,19 +4797,10 @@ out_ctx:
err_active:
/*
* If we have to abandon now, we expect the engines to be idle
- * and ready to be torn-down. First try to flush any remaining
- * request, ensure we are pointing at the kernel context and
- * then remove it.
+ * and ready to be torn-down. The quickest way we can accomplish
+ * this is by declaring ourselves wedged.
*/
- if (WARN_ON(i915_gem_switch_to_kernel_context(i915)))
- goto out_ctx;
-
- if (WARN_ON(i915_gem_wait_for_idle(i915,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT)))
- goto out_ctx;
-
- i915_gem_contexts_lost(i915);
+ i915_gem_set_wedged(i915);
goto out_ctx;
}
@@ -5047,13 +4853,10 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
- if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
- dev_priv->gt.resume = intel_lr_context_resume;
+ if (HAS_LOGICAL_RING_CONTEXTS(dev_priv))
dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
- } else {
- dev_priv->gt.resume = intel_legacy_submission_resume;
+ else
dev_priv->gt.cleanup_engine = intel_engine_cleanup;
- }
i915_timelines_init(dev_priv);
@@ -5076,7 +4879,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
* just magically go away.
*/
mutex_lock(&dev_priv->drm.struct_mutex);
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
ret = i915_gem_init_ggtt(dev_priv);
if (ret) {
@@ -5138,7 +4941,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
goto err_init_hw;
}
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
mutex_unlock(&dev_priv->drm.struct_mutex);
return 0;
@@ -5152,7 +4955,7 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
err_init_hw:
mutex_unlock(&dev_priv->drm.struct_mutex);
- WARN_ON(i915_gem_suspend(dev_priv));
+ i915_gem_suspend(dev_priv);
i915_gem_suspend_late(dev_priv);
i915_gem_drain_workqueue(dev_priv);
@@ -5173,7 +4976,7 @@ err_scratch:
i915_gem_fini_scratch(dev_priv);
err_ggtt:
err_unlock:
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
mutex_unlock(&dev_priv->drm.struct_mutex);
err_uc_misc:
@@ -5192,7 +4995,7 @@ err_uc_misc:
* wedged. But we only want to do this where the GPU is angry,
* for all other failure, such as an allocation failure, bail.
*/
- if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
+ if (!i915_reset_failed(dev_priv)) {
i915_load_error(dev_priv,
"Failed to initialize GPU, declaring it wedged!\n");
i915_gem_set_wedged(dev_priv);
@@ -5305,36 +5108,7 @@ static void i915_gem_init__mm(struct drm_i915_private *i915)
int i915_gem_init_early(struct drm_i915_private *dev_priv)
{
- int err = -ENOMEM;
-
- dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
- if (!dev_priv->objects)
- goto err_out;
-
- dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
- if (!dev_priv->vmas)
- goto err_objects;
-
- dev_priv->luts = KMEM_CACHE(i915_lut_handle, 0);
- if (!dev_priv->luts)
- goto err_vmas;
-
- dev_priv->requests = KMEM_CACHE(i915_request,
- SLAB_HWCACHE_ALIGN |
- SLAB_RECLAIM_ACCOUNT |
- SLAB_TYPESAFE_BY_RCU);
- if (!dev_priv->requests)
- goto err_luts;
-
- dev_priv->dependencies = KMEM_CACHE(i915_dependency,
- SLAB_HWCACHE_ALIGN |
- SLAB_RECLAIM_ACCOUNT);
- if (!dev_priv->dependencies)
- goto err_requests;
-
- dev_priv->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
- if (!dev_priv->priorities)
- goto err_dependencies;
+ int err;
INIT_LIST_HEAD(&dev_priv->gt.active_rings);
INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
@@ -5348,6 +5122,7 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
mutex_init(&dev_priv->gpu_error.wedge_mutex);
+ init_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
@@ -5358,19 +5133,6 @@ int i915_gem_init_early(struct drm_i915_private *dev_priv)
DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
return 0;
-
-err_dependencies:
- kmem_cache_destroy(dev_priv->dependencies);
-err_requests:
- kmem_cache_destroy(dev_priv->requests);
-err_luts:
- kmem_cache_destroy(dev_priv->luts);
-err_vmas:
- kmem_cache_destroy(dev_priv->vmas);
-err_objects:
- kmem_cache_destroy(dev_priv->objects);
-err_out:
- return err;
}
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
@@ -5380,15 +5142,7 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
WARN_ON(dev_priv->mm.object_count);
- kmem_cache_destroy(dev_priv->priorities);
- kmem_cache_destroy(dev_priv->dependencies);
- kmem_cache_destroy(dev_priv->requests);
- kmem_cache_destroy(dev_priv->luts);
- kmem_cache_destroy(dev_priv->vmas);
- kmem_cache_destroy(dev_priv->objects);
-
- /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
- rcu_barrier();
+ cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
i915_gemfs_fini(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index b0e4b976880c..9074eb1e843f 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -73,14 +73,14 @@ struct drm_i915_private;
#define GEM_TRACE_DUMP_ON(expr) BUILD_BUG_ON_INVALID(expr)
#endif
-#define I915_NUM_ENGINES 8
+#define I915_GEM_IDLE_TIMEOUT (HZ / 5)
void i915_gem_park(struct drm_i915_private *i915);
void i915_gem_unpark(struct drm_i915_private *i915);
static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
{
- if (atomic_inc_return(&t->count) == 1)
+ if (!atomic_fetch_inc(&t->count))
tasklet_unlock_wait(t);
}
@@ -89,4 +89,9 @@ static inline bool __tasklet_is_enabled(const struct tasklet_struct *t)
return !atomic_read(&t->count);
}
+static inline bool __tasklet_enable(struct tasklet_struct *t)
+{
+ return atomic_dec_and_test(&t->count);
+}
+
#endif /* __I915_GEM_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 280813a4bf82..dd728b26b5aa 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -88,12 +88,32 @@
#include <linux/log2.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_globals.h"
#include "i915_trace.h"
+#include "i915_user_extensions.h"
#include "intel_lrc_reg.h"
#include "intel_workarounds.h"
+#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE (1 << 1)
+#define I915_CONTEXT_PARAM_VM 0x9
+
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
+static struct i915_global_gem_context {
+ struct i915_global base;
+ struct kmem_cache *slab_luts;
+} global;
+
+struct i915_lut_handle *i915_lut_handle_alloc(void)
+{
+ return kmem_cache_alloc(global.slab_luts, GFP_KERNEL);
+}
+
+void i915_lut_handle_free(struct i915_lut_handle *lut)
+{
+ return kmem_cache_free(global.slab_luts, lut);
+}
+
static void lut_close(struct i915_gem_context *ctx)
{
struct i915_lut_handle *lut, *ln;
@@ -102,14 +122,17 @@ static void lut_close(struct i915_gem_context *ctx)
list_for_each_entry_safe(lut, ln, &ctx->handles_list, ctx_link) {
list_del(&lut->obj_link);
- kmem_cache_free(ctx->i915->luts, lut);
+ i915_lut_handle_free(lut);
}
+ INIT_LIST_HEAD(&ctx->handles_list);
rcu_read_lock();
radix_tree_for_each_slot(slot, &ctx->handles_vma, &iter, 0) {
struct i915_vma *vma = rcu_dereference_raw(*slot);
radix_tree_iter_delete(&ctx->handles_vma, &iter, slot);
+
+ vma->open_count--;
__i915_gem_object_release_unless_active(vma->obj);
}
rcu_read_unlock();
@@ -206,25 +229,26 @@ static void release_hw_id(struct i915_gem_context *ctx)
static void i915_gem_context_free(struct i915_gem_context *ctx)
{
- unsigned int n;
+ struct intel_context *it, *n;
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
+ GEM_BUG_ON(!list_empty(&ctx->active_engines));
release_hw_id(ctx);
i915_ppgtt_put(ctx->ppgtt);
- for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
- struct intel_context *ce = &ctx->__engine[n];
+ rbtree_postorder_for_each_entry_safe(it, n, &ctx->hw_contexts, node)
+ intel_context_put(it);
- if (ce->ops)
- ce->ops->destroy(ce);
- }
+ if (ctx->timeline)
+ i915_timeline_put(ctx->timeline);
kfree(ctx->name);
put_pid(ctx->pid);
list_del(&ctx->link);
+ mutex_destroy(&ctx->mutex);
kfree_rcu(ctx, rcu);
}
@@ -291,8 +315,6 @@ static void context_close(struct i915_gem_context *ctx)
* the ppgtt).
*/
lut_close(ctx);
- if (ctx->ppgtt)
- i915_ppgtt_close(&ctx->ppgtt->vm);
ctx->file_priv = ERR_PTR(-EBADF);
i915_gem_context_put(ctx);
@@ -307,7 +329,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
address_mode = INTEL_LEGACY_32B_CONTEXT;
- if (ppgtt && i915_vm_is_48bit(&ppgtt->vm))
+ if (ppgtt && i915_vm_is_4lvl(&ppgtt->vm))
address_mode = INTEL_LEGACY_64B_CONTEXT;
desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
@@ -322,134 +344,115 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
return desc;
}
-static void intel_context_retire(struct i915_active_request *active,
- struct i915_request *rq)
-{
- struct intel_context *ce =
- container_of(active, typeof(*ce), active_tracker);
-
- intel_context_unpin(ce);
-}
-
-void
-intel_context_init(struct intel_context *ce,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
-{
- ce->gem_context = ctx;
-
- INIT_LIST_HEAD(&ce->signal_link);
- INIT_LIST_HEAD(&ce->signals);
-
- /* Use the whole device by default */
- ce->sseu = intel_device_default_sseu(ctx->i915);
-
- i915_active_request_init(&ce->active_tracker,
- NULL, intel_context_retire);
-}
-
static struct i915_gem_context *
-__create_hw_context(struct drm_i915_private *dev_priv,
- struct drm_i915_file_private *file_priv)
+__create_context(struct drm_i915_private *dev_priv)
{
struct i915_gem_context *ctx;
- unsigned int n;
- int ret;
+ int i;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (ctx == NULL)
+ if (!ctx)
return ERR_PTR(-ENOMEM);
kref_init(&ctx->ref);
list_add_tail(&ctx->link, &dev_priv->contexts.list);
ctx->i915 = dev_priv;
ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
+ INIT_LIST_HEAD(&ctx->active_engines);
+ mutex_init(&ctx->mutex);
- for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++)
- intel_context_init(&ctx->__engine[n], ctx, dev_priv->engine[n]);
+ ctx->hw_contexts = RB_ROOT;
+ spin_lock_init(&ctx->hw_contexts_lock);
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->handles_list);
INIT_LIST_HEAD(&ctx->hw_id_link);
- /* Default context will never have a file_priv */
- ret = DEFAULT_CONTEXT_HANDLE;
- if (file_priv) {
- ret = idr_alloc(&file_priv->context_idr, ctx,
- DEFAULT_CONTEXT_HANDLE, 0, GFP_KERNEL);
- if (ret < 0)
- goto err_lut;
- }
- ctx->user_handle = ret;
-
- ctx->file_priv = file_priv;
- if (file_priv) {
- ctx->pid = get_task_pid(current, PIDTYPE_PID);
- ctx->name = kasprintf(GFP_KERNEL, "%s[%d]/%x",
- current->comm,
- pid_nr(ctx->pid),
- ctx->user_handle);
- if (!ctx->name) {
- ret = -ENOMEM;
- goto err_pid;
- }
- }
-
/* NB: Mark all slices as needing a remap so that when the context first
* loads it will restore whatever remap state already exists. If there
* is no remap info, it will be a NOP. */
ctx->remap_slice = ALL_L3_SLICES(dev_priv);
i915_gem_context_set_bannable(ctx);
+ i915_gem_context_set_recoverable(ctx);
+
ctx->ring_size = 4 * PAGE_SIZE;
ctx->desc_template =
default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
+ for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
+ ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
+
return ctx;
+}
-err_pid:
- put_pid(ctx->pid);
- idr_remove(&file_priv->context_idr, ctx->user_handle);
-err_lut:
- context_close(ctx);
- return ERR_PTR(ret);
+static struct i915_hw_ppgtt *
+__set_ppgtt(struct i915_gem_context *ctx, struct i915_hw_ppgtt *ppgtt)
+{
+ struct i915_hw_ppgtt *old = ctx->ppgtt;
+
+ ctx->ppgtt = i915_ppgtt_get(ppgtt);
+ ctx->desc_template = default_desc_template(ctx->i915, ppgtt);
+
+ return old;
}
-static void __destroy_hw_context(struct i915_gem_context *ctx,
- struct drm_i915_file_private *file_priv)
+static void __assign_ppgtt(struct i915_gem_context *ctx,
+ struct i915_hw_ppgtt *ppgtt)
{
- idr_remove(&file_priv->context_idr, ctx->user_handle);
- context_close(ctx);
+ if (ppgtt == ctx->ppgtt)
+ return;
+
+ ppgtt = __set_ppgtt(ctx, ppgtt);
+ if (ppgtt)
+ i915_ppgtt_put(ppgtt);
}
static struct i915_gem_context *
-i915_gem_create_context(struct drm_i915_private *dev_priv,
- struct drm_i915_file_private *file_priv)
+i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
{
struct i915_gem_context *ctx;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ BUILD_BUG_ON(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &
+ ~I915_CONTEXT_CREATE_FLAGS_UNKNOWN);
+ if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE &&
+ !HAS_EXECLISTS(dev_priv))
+ return ERR_PTR(-EINVAL);
+
/* Reap the most stale context */
contexts_free_first(dev_priv);
- ctx = __create_hw_context(dev_priv, file_priv);
+ ctx = __create_context(dev_priv);
if (IS_ERR(ctx))
return ctx;
if (HAS_FULL_PPGTT(dev_priv)) {
struct i915_hw_ppgtt *ppgtt;
- ppgtt = i915_ppgtt_create(dev_priv, file_priv);
+ ppgtt = i915_ppgtt_create(dev_priv);
if (IS_ERR(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
- __destroy_hw_context(ctx, file_priv);
+ context_close(ctx);
return ERR_CAST(ppgtt);
}
- ctx->ppgtt = ppgtt;
- ctx->desc_template = default_desc_template(dev_priv, ppgtt);
+ __assign_ppgtt(ctx, ppgtt);
+ i915_ppgtt_put(ppgtt);
+ }
+
+ if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) {
+ struct i915_timeline *timeline;
+
+ timeline = i915_timeline_create(dev_priv, NULL);
+ if (IS_ERR(timeline)) {
+ context_close(ctx);
+ return ERR_CAST(timeline);
+ }
+
+ ctx->timeline = timeline;
}
trace_i915_context_create(ctx);
@@ -480,10 +483,17 @@ i915_gem_context_create_gvt(struct drm_device *dev)
if (ret)
return ERR_PTR(ret);
- ctx = i915_gem_create_context(to_i915(dev), NULL);
+ ctx = i915_gem_create_context(to_i915(dev), 0);
if (IS_ERR(ctx))
goto out;
+ ret = i915_gem_context_pin_hw_id(ctx);
+ if (ret) {
+ context_close(ctx);
+ ctx = ERR_PTR(ret);
+ goto out;
+ }
+
ctx->file_priv = ERR_PTR(-EBADF);
i915_gem_context_set_closed(ctx); /* not user accessible */
i915_gem_context_clear_bannable(ctx);
@@ -516,7 +526,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
struct i915_gem_context *ctx;
int err;
- ctx = i915_gem_create_context(i915, NULL);
+ ctx = i915_gem_create_context(i915, 0);
if (IS_ERR(ctx))
return ctx;
@@ -552,7 +562,7 @@ static void init_contexts(struct drm_i915_private *i915)
static bool needs_preempt_context(struct drm_i915_private *i915)
{
- return HAS_LOGICAL_RING_PREEMPTION(i915);
+ return HAS_EXECLISTS(i915);
}
int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
@@ -563,7 +573,7 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
GEM_BUG_ON(dev_priv->kernel_context);
GEM_BUG_ON(dev_priv->preempt_context);
- intel_engine_init_ctx_wa(dev_priv->engine[RCS]);
+ intel_engine_init_ctx_wa(dev_priv->engine[RCS0]);
init_contexts(dev_priv);
/* lowest priority; idle task */
@@ -624,31 +634,87 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915)
static int context_idr_cleanup(int id, void *p, void *data)
{
- struct i915_gem_context *ctx = p;
+ context_close(p);
+ return 0;
+}
- context_close(ctx);
+static int vm_idr_cleanup(int id, void *p, void *data)
+{
+ i915_ppgtt_put(p);
return 0;
}
+static int gem_context_register(struct i915_gem_context *ctx,
+ struct drm_i915_file_private *fpriv)
+{
+ int ret;
+
+ ctx->file_priv = fpriv;
+ if (ctx->ppgtt)
+ ctx->ppgtt->vm.file = fpriv;
+
+ ctx->pid = get_task_pid(current, PIDTYPE_PID);
+ ctx->name = kasprintf(GFP_KERNEL, "%s[%d]",
+ current->comm, pid_nr(ctx->pid));
+ if (!ctx->name) {
+ ret = -ENOMEM;
+ goto err_pid;
+ }
+
+ /* And finally expose ourselves to userspace via the idr */
+ mutex_lock(&fpriv->context_idr_lock);
+ ret = idr_alloc(&fpriv->context_idr, ctx, 0, 0, GFP_KERNEL);
+ mutex_unlock(&fpriv->context_idr_lock);
+ if (ret >= 0)
+ goto out;
+
+ kfree(fetch_and_zero(&ctx->name));
+err_pid:
+ put_pid(fetch_and_zero(&ctx->pid));
+out:
+ return ret;
+}
+
int i915_gem_context_open(struct drm_i915_private *i915,
struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
struct i915_gem_context *ctx;
+ int err;
+
+ mutex_init(&file_priv->context_idr_lock);
+ mutex_init(&file_priv->vm_idr_lock);
idr_init(&file_priv->context_idr);
+ idr_init_base(&file_priv->vm_idr, 1);
mutex_lock(&i915->drm.struct_mutex);
- ctx = i915_gem_create_context(i915, file_priv);
+ ctx = i915_gem_create_context(i915, 0);
mutex_unlock(&i915->drm.struct_mutex);
if (IS_ERR(ctx)) {
- idr_destroy(&file_priv->context_idr);
- return PTR_ERR(ctx);
+ err = PTR_ERR(ctx);
+ goto err;
}
+ err = gem_context_register(ctx, file_priv);
+ if (err < 0)
+ goto err_ctx;
+
GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
+ GEM_BUG_ON(err > 0);
return 0;
+
+err_ctx:
+ mutex_lock(&i915->drm.struct_mutex);
+ context_close(ctx);
+ mutex_unlock(&i915->drm.struct_mutex);
+err:
+ idr_destroy(&file_priv->vm_idr);
+ idr_destroy(&file_priv->context_idr);
+ mutex_destroy(&file_priv->vm_idr_lock);
+ mutex_destroy(&file_priv->context_idr_lock);
+ return err;
}
void i915_gem_context_close(struct drm_file *file)
@@ -659,6 +725,100 @@ void i915_gem_context_close(struct drm_file *file)
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
idr_destroy(&file_priv->context_idr);
+ mutex_destroy(&file_priv->context_idr_lock);
+
+ idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
+ idr_destroy(&file_priv->vm_idr);
+ mutex_destroy(&file_priv->vm_idr_lock);
+}
+
+int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct drm_i915_gem_vm_control *args = data;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct i915_hw_ppgtt *ppgtt;
+ int err;
+
+ if (!HAS_FULL_PPGTT(i915))
+ return -ENODEV;
+
+ if (args->flags)
+ return -EINVAL;
+
+ ppgtt = i915_ppgtt_create(i915);
+ if (IS_ERR(ppgtt))
+ return PTR_ERR(ppgtt);
+
+ ppgtt->vm.file = file_priv;
+
+ if (args->extensions) {
+ err = i915_user_extensions(u64_to_user_ptr(args->extensions),
+ NULL, 0,
+ ppgtt);
+ if (err)
+ goto err_put;
+ }
+
+ err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
+ if (err)
+ goto err_put;
+
+ err = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL);
+ if (err < 0)
+ goto err_unlock;
+
+ GEM_BUG_ON(err == 0); /* reserved for default/unassigned ppgtt */
+ ppgtt->user_handle = err;
+
+ mutex_unlock(&file_priv->vm_idr_lock);
+
+ args->vm_id = err;
+ return 0;
+
+err_unlock:
+ mutex_unlock(&file_priv->vm_idr_lock);
+err_put:
+ i915_ppgtt_put(ppgtt);
+ return err;
+}
+
+int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct drm_i915_gem_vm_control *args = data;
+ struct i915_hw_ppgtt *ppgtt;
+ int err;
+ u32 id;
+
+ if (args->flags)
+ return -EINVAL;
+
+ if (args->extensions)
+ return -EINVAL;
+
+ id = args->vm_id;
+ if (!id)
+ return -ENOENT;
+
+ err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
+ if (err)
+ return err;
+
+ ppgtt = idr_remove(&file_priv->vm_idr, id);
+ if (ppgtt) {
+ GEM_BUG_ON(ppgtt->user_handle != id);
+ ppgtt->user_handle = 0;
+ }
+
+ mutex_unlock(&file_priv->vm_idr_lock);
+ if (!ppgtt)
+ return -ENOENT;
+
+ i915_ppgtt_put(ppgtt);
+ return 0;
}
static struct i915_request *
@@ -671,10 +831,9 @@ last_request_on_engine(struct i915_timeline *timeline,
rq = i915_active_request_raw(&timeline->last_request,
&engine->i915->drm.struct_mutex);
- if (rq && rq->engine == engine) {
- GEM_TRACE("last request for %s on engine %s: %llx:%llu\n",
- timeline->name, engine->name,
- rq->fence.context, rq->fence.seqno);
+ if (rq && rq->engine->mask & engine->mask) {
+ GEM_TRACE("last request on engine %s: %llx:%llu\n",
+ engine->name, rq->fence.context, rq->fence.seqno);
GEM_BUG_ON(rq->timeline != timeline);
return rq;
}
@@ -682,81 +841,104 @@ last_request_on_engine(struct i915_timeline *timeline,
return NULL;
}
-static bool engine_has_kernel_context_barrier(struct intel_engine_cs *engine)
+struct context_barrier_task {
+ struct i915_active base;
+ void (*task)(void *data);
+ void *data;
+};
+
+static void cb_retire(struct i915_active *base)
+{
+ struct context_barrier_task *cb = container_of(base, typeof(*cb), base);
+
+ if (cb->task)
+ cb->task(cb->data);
+
+ i915_active_fini(&cb->base);
+ kfree(cb);
+}
+
+I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
+static int context_barrier_task(struct i915_gem_context *ctx,
+ intel_engine_mask_t engines,
+ int (*emit)(struct i915_request *rq, void *data),
+ void (*task)(void *data),
+ void *data)
{
- struct drm_i915_private *i915 = engine->i915;
- const struct intel_context * const ce =
- to_intel_context(i915->kernel_context, engine);
- struct i915_timeline *barrier = ce->ring->timeline;
- struct intel_ring *ring;
- bool any_active = false;
+ struct drm_i915_private *i915 = ctx->i915;
+ struct context_barrier_task *cb;
+ struct intel_context *ce, *next;
+ intel_wakeref_t wakeref;
+ int err = 0;
lockdep_assert_held(&i915->drm.struct_mutex);
- list_for_each_entry(ring, &i915->gt.active_rings, active_link) {
- struct i915_request *rq;
+ GEM_BUG_ON(!task);
- rq = last_request_on_engine(ring->timeline, engine);
- if (!rq)
- continue;
+ cb = kmalloc(sizeof(*cb), GFP_KERNEL);
+ if (!cb)
+ return -ENOMEM;
- any_active = true;
+ i915_active_init(i915, &cb->base, cb_retire);
+ i915_active_acquire(&cb->base);
- if (rq->hw_context == ce)
+ wakeref = intel_runtime_pm_get(i915);
+ rbtree_postorder_for_each_entry_safe(ce, next, &ctx->hw_contexts, node) {
+ struct intel_engine_cs *engine = ce->engine;
+ struct i915_request *rq;
+
+ if (!(engine->mask & engines))
continue;
- /*
- * Was this request submitted after the previous
- * switch-to-kernel-context?
- */
- if (!i915_timeline_sync_is_later(barrier, &rq->fence)) {
- GEM_TRACE("%s needs barrier for %llx:%lld\n",
- ring->timeline->name,
- rq->fence.context,
- rq->fence.seqno);
- return false;
+ if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
+ engine->mask)) {
+ err = -ENXIO;
+ break;
+ }
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
}
- GEM_TRACE("%s has barrier after %llx:%lld\n",
- ring->timeline->name,
- rq->fence.context,
- rq->fence.seqno);
+ err = 0;
+ if (emit)
+ err = emit(rq, data);
+ if (err == 0)
+ err = i915_active_ref(&cb->base, rq->fence.context, rq);
+
+ i915_request_add(rq);
+ if (err)
+ break;
}
+ intel_runtime_pm_put(i915, wakeref);
- /*
- * If any other timeline was still active and behind the last barrier,
- * then our last switch-to-kernel-context must still be queued and
- * will run last (leaving the engine in the kernel context when it
- * eventually idles).
- */
- if (any_active)
- return true;
+ cb->task = err ? NULL : task; /* caller needs to unwind instead */
+ cb->data = data;
- /* The engine is idle; check that it is idling in the kernel context. */
- return engine->last_retired_context == ce;
+ i915_active_release(&cb->base);
+
+ return err;
}
-int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
+int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
+ intel_engine_mask_t mask)
{
struct intel_engine_cs *engine;
- enum intel_engine_id id;
GEM_TRACE("awake?=%s\n", yesno(i915->gt.awake));
lockdep_assert_held(&i915->drm.struct_mutex);
GEM_BUG_ON(!i915->kernel_context);
- i915_retire_requests(i915);
+ /* Inoperable, so presume the GPU is safely pointing into the void! */
+ if (i915_terminally_wedged(i915))
+ return 0;
- for_each_engine(engine, i915, id) {
+ for_each_engine_masked(engine, i915, mask, mask) {
struct intel_ring *ring;
struct i915_request *rq;
- GEM_BUG_ON(!to_intel_context(i915->kernel_context, engine));
- if (engine_has_kernel_context_barrier(engine))
- continue;
-
- GEM_TRACE("emit barrier on %s\n", engine->name);
-
rq = i915_request_alloc(engine, i915->kernel_context);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -779,7 +961,6 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
i915_sw_fence_await_sw_fence_gfp(&rq->submit,
&prev->submit,
I915_FENCE_GFP);
- i915_timeline_sync_set(rq->timeline, &prev->fence);
}
i915_request_add(rq);
@@ -788,183 +969,173 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915)
return 0;
}
-static bool client_is_banned(struct drm_i915_file_private *file_priv)
+static int get_ppgtt(struct drm_i915_file_private *file_priv,
+ struct i915_gem_context *ctx,
+ struct drm_i915_gem_context_param *args)
{
- return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
-}
-
-int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_i915_gem_context_create *args = data;
- struct drm_i915_file_private *file_priv = file->driver_priv;
- struct i915_gem_context *ctx;
+ struct i915_hw_ppgtt *ppgtt;
int ret;
- if (!DRIVER_CAPS(dev_priv)->has_logical_contexts)
+ return -EINVAL; /* nothing to see here; please move along */
+
+ if (!ctx->ppgtt)
return -ENODEV;
- if (args->pad != 0)
- return -EINVAL;
+ /* XXX rcu acquire? */
+ ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
+ if (ret)
+ return ret;
- if (client_is_banned(file_priv)) {
- DRM_DEBUG("client %s[%d] banned from creating ctx\n",
- current->comm,
- pid_nr(get_task_pid(current, PIDTYPE_PID)));
+ ppgtt = i915_ppgtt_get(ctx->ppgtt);
+ mutex_unlock(&ctx->i915->drm.struct_mutex);
- return -EIO;
+ ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
+ if (ret)
+ goto err_put;
+
+ if (!ppgtt->user_handle) {
+ ret = idr_alloc(&file_priv->vm_idr, ppgtt, 0, 0, GFP_KERNEL);
+ GEM_BUG_ON(!ret);
+ if (ret < 0)
+ goto err_unlock;
+
+ ppgtt->user_handle = ret;
+ i915_ppgtt_get(ppgtt);
}
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
+ args->size = 0;
+ args->value = ppgtt->user_handle;
- ctx = i915_gem_create_context(dev_priv, file_priv);
- mutex_unlock(&dev->struct_mutex);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
+ ret = 0;
+err_unlock:
+ mutex_unlock(&file_priv->vm_idr_lock);
+err_put:
+ i915_ppgtt_put(ppgtt);
+ return ret;
+}
- GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
+static void set_ppgtt_barrier(void *data)
+{
+ struct i915_hw_ppgtt *old = data;
- args->ctx_id = ctx->user_handle;
- DRM_DEBUG("HW context %d created\n", args->ctx_id);
+ if (INTEL_GEN(old->vm.i915) < 8)
+ gen6_ppgtt_unpin_all(old);
- return 0;
+ i915_ppgtt_put(old);
}
-int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
+static int emit_ppgtt_update(struct i915_request *rq, void *data)
{
- struct drm_i915_gem_context_destroy *args = data;
- struct drm_i915_file_private *file_priv = file->driver_priv;
- struct i915_gem_context *ctx;
- int ret;
+ struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
+ struct intel_engine_cs *engine = rq->engine;
+ u32 base = engine->mmio_base;
+ u32 *cs;
+ int i;
- if (args->pad != 0)
- return -EINVAL;
+ if (i915_vm_is_4lvl(&ppgtt->vm)) {
+ const dma_addr_t pd_daddr = px_dma(&ppgtt->pml4);
- if (args->ctx_id == DEFAULT_CONTEXT_HANDLE)
- return -ENOENT;
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
- if (!ctx)
- return -ENOENT;
+ *cs++ = MI_LOAD_REGISTER_IMM(2);
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- goto out;
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0));
+ *cs++ = upper_32_bits(pd_daddr);
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0));
+ *cs++ = lower_32_bits(pd_daddr);
- __destroy_hw_context(ctx, file_priv);
- mutex_unlock(&dev->struct_mutex);
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+ } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) {
+ cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES);
+ for (i = GEN8_3LVL_PDPES; i--; ) {
+ const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
+ *cs++ = upper_32_bits(pd_daddr);
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
+ *cs++ = lower_32_bits(pd_daddr);
+ }
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+ } else {
+ /* ppGTT is not part of the legacy context image */
+ gen6_ppgtt_pin(ppgtt);
+ }
-out:
- i915_gem_context_put(ctx);
return 0;
}
-static int get_sseu(struct i915_gem_context *ctx,
- struct drm_i915_gem_context_param *args)
+static int set_ppgtt(struct drm_i915_file_private *file_priv,
+ struct i915_gem_context *ctx,
+ struct drm_i915_gem_context_param *args)
{
- struct drm_i915_gem_context_param_sseu user_sseu;
- struct intel_engine_cs *engine;
- struct intel_context *ce;
- int ret;
-
- if (args->size == 0)
- goto out;
- else if (args->size < sizeof(user_sseu))
- return -EINVAL;
-
- if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
- sizeof(user_sseu)))
- return -EFAULT;
+ struct i915_hw_ppgtt *ppgtt, *old;
+ int err;
- if (user_sseu.flags || user_sseu.rsvd)
- return -EINVAL;
+ return -EINVAL; /* nothing to see here; please move along */
- engine = intel_engine_lookup_user(ctx->i915,
- user_sseu.engine_class,
- user_sseu.engine_instance);
- if (!engine)
+ if (args->size)
return -EINVAL;
- /* Only use for mutex here is to serialize get_param and set_param. */
- ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
- if (ret)
- return ret;
-
- ce = to_intel_context(ctx, engine);
-
- user_sseu.slice_mask = ce->sseu.slice_mask;
- user_sseu.subslice_mask = ce->sseu.subslice_mask;
- user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
- user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
+ if (!ctx->ppgtt)
+ return -ENODEV;
- mutex_unlock(&ctx->i915->drm.struct_mutex);
+ if (upper_32_bits(args->value))
+ return -ENOENT;
- if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
- sizeof(user_sseu)))
- return -EFAULT;
+ err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
+ if (err)
+ return err;
-out:
- args->size = sizeof(user_sseu);
+ ppgtt = idr_find(&file_priv->vm_idr, args->value);
+ if (ppgtt) {
+ GEM_BUG_ON(ppgtt->user_handle != args->value);
+ i915_ppgtt_get(ppgtt);
+ }
+ mutex_unlock(&file_priv->vm_idr_lock);
+ if (!ppgtt)
+ return -ENOENT;
- return 0;
-}
+ err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex);
+ if (err)
+ goto out;
-int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
-{
- struct drm_i915_file_private *file_priv = file->driver_priv;
- struct drm_i915_gem_context_param *args = data;
- struct i915_gem_context *ctx;
- int ret = 0;
+ if (ppgtt == ctx->ppgtt)
+ goto unlock;
- ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
- if (!ctx)
- return -ENOENT;
+ /* Teardown the existing obj:vma cache, it will have to be rebuilt. */
+ lut_close(ctx);
- switch (args->param) {
- case I915_CONTEXT_PARAM_BAN_PERIOD:
- ret = -EINVAL;
- break;
- case I915_CONTEXT_PARAM_NO_ZEROMAP:
- args->size = 0;
- args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
- break;
- case I915_CONTEXT_PARAM_GTT_SIZE:
- args->size = 0;
+ old = __set_ppgtt(ctx, ppgtt);
- if (ctx->ppgtt)
- args->value = ctx->ppgtt->vm.total;
- else if (to_i915(dev)->mm.aliasing_ppgtt)
- args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
- else
- args->value = to_i915(dev)->ggtt.vm.total;
- break;
- case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
- args->size = 0;
- args->value = i915_gem_context_no_error_capture(ctx);
- break;
- case I915_CONTEXT_PARAM_BANNABLE:
- args->size = 0;
- args->value = i915_gem_context_is_bannable(ctx);
- break;
- case I915_CONTEXT_PARAM_PRIORITY:
- args->size = 0;
- args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
- break;
- case I915_CONTEXT_PARAM_SSEU:
- ret = get_sseu(ctx, args);
- break;
- default:
- ret = -EINVAL;
- break;
+ /*
+ * We need to flush any requests using the current ppgtt before
+ * we release it as the requests do not hold a reference themselves,
+ * only indirectly through the context.
+ */
+ err = context_barrier_task(ctx, ALL_ENGINES,
+ emit_ppgtt_update,
+ set_ppgtt_barrier,
+ old);
+ if (err) {
+ ctx->ppgtt = old;
+ ctx->desc_template = default_desc_template(ctx->i915, old);
+ i915_ppgtt_put(ppgtt);
}
- i915_gem_context_put(ctx);
- return ret;
+unlock:
+ mutex_unlock(&ctx->i915->drm.struct_mutex);
+
+out:
+ i915_ppgtt_put(ppgtt);
+ return err;
}
static int gen8_emit_rpcs_config(struct i915_request *rq,
@@ -993,39 +1164,35 @@ static int gen8_emit_rpcs_config(struct i915_request *rq,
}
static int
-gen8_modify_rpcs_gpu(struct intel_context *ce,
- struct intel_engine_cs *engine,
- struct intel_sseu sseu)
+gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
{
- struct drm_i915_private *i915 = engine->i915;
- struct i915_request *rq, *prev;
+ struct drm_i915_private *i915 = ce->engine->i915;
+ struct i915_request *rq;
intel_wakeref_t wakeref;
int ret;
- GEM_BUG_ON(!ce->pin_count);
+ lockdep_assert_held(&ce->pin_mutex);
- lockdep_assert_held(&i915->drm.struct_mutex);
+ /*
+ * If the context is not idle, we have to submit an ordered request to
+ * modify its context image via the kernel context (writing to our own
+ * image, or into the registers directory, does not stick). Pristine
+ * and idle contexts will be configured on pinning.
+ */
+ if (!intel_context_is_pinned(ce))
+ return 0;
/* Submitting requests etc needs the hw awake. */
wakeref = intel_runtime_pm_get(i915);
- rq = i915_request_alloc(engine, i915->kernel_context);
+ rq = i915_request_alloc(ce->engine, i915->kernel_context);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
goto out_put;
}
/* Queue this switch after all other activity by this context. */
- prev = i915_active_request_raw(&ce->ring->timeline->last_request,
- &i915->drm.struct_mutex);
- if (prev && !i915_request_completed(prev)) {
- ret = i915_request_await_dma_fence(rq, &prev->fence);
- if (ret < 0)
- goto out_add;
- }
-
- /* Order all following requests to be after. */
- ret = i915_timeline_set_barrier(ce->ring->timeline, rq);
+ ret = i915_active_request_set(&ce->ring->timeline->last_request, rq);
if (ret)
goto out_add;
@@ -1057,27 +1224,26 @@ __i915_gem_context_reconfigure_sseu(struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
struct intel_sseu sseu)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
+ struct intel_context *ce;
int ret = 0;
GEM_BUG_ON(INTEL_GEN(ctx->i915) < 8);
- GEM_BUG_ON(engine->id != RCS);
+ GEM_BUG_ON(engine->id != RCS0);
+
+ ce = intel_context_pin_lock(ctx, engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
/* Nothing to do if unmodified. */
if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
- return 0;
-
- /*
- * If context is not idle we have to submit an ordered request to modify
- * its context image via the kernel context. Pristine and idle contexts
- * will be configured on pinning.
- */
- if (ce->pin_count)
- ret = gen8_modify_rpcs_gpu(ce, engine, sseu);
+ goto unlock;
+ ret = gen8_modify_rpcs(ce, sseu);
if (!ret)
ce->sseu = sseu;
+unlock:
+ intel_context_pin_unlock(ce);
return ret;
}
@@ -1220,8 +1386,8 @@ static int set_sseu(struct i915_gem_context *ctx,
return -EINVAL;
engine = intel_engine_lookup_user(i915,
- user_sseu.engine_class,
- user_sseu.engine_instance);
+ user_sseu.engine.engine_class,
+ user_sseu.engine.engine_instance);
if (!engine)
return -EINVAL;
@@ -1242,22 +1408,13 @@ static int set_sseu(struct i915_gem_context *ctx,
return 0;
}
-int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file)
+static int ctx_setparam(struct drm_i915_file_private *fpriv,
+ struct i915_gem_context *ctx,
+ struct drm_i915_gem_context_param *args)
{
- struct drm_i915_file_private *file_priv = file->driver_priv;
- struct drm_i915_gem_context_param *args = data;
- struct i915_gem_context *ctx;
int ret = 0;
- ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
- if (!ctx)
- return -ENOENT;
-
switch (args->param) {
- case I915_CONTEXT_PARAM_BAN_PERIOD:
- ret = -EINVAL;
- break;
case I915_CONTEXT_PARAM_NO_ZEROMAP:
if (args->size)
ret = -EINVAL;
@@ -1266,6 +1423,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
else
clear_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
break;
+
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
if (args->size)
ret = -EINVAL;
@@ -1274,6 +1432,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
else
i915_gem_context_clear_no_error_capture(ctx);
break;
+
case I915_CONTEXT_PARAM_BANNABLE:
if (args->size)
ret = -EINVAL;
@@ -1285,13 +1444,22 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
i915_gem_context_clear_bannable(ctx);
break;
+ case I915_CONTEXT_PARAM_RECOVERABLE:
+ if (args->size)
+ ret = -EINVAL;
+ else if (args->value)
+ i915_gem_context_set_recoverable(ctx);
+ else
+ i915_gem_context_clear_recoverable(ctx);
+ break;
+
case I915_CONTEXT_PARAM_PRIORITY:
{
s64 priority = args->value;
if (args->size)
ret = -EINVAL;
- else if (!(to_i915(dev)->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
+ else if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY))
ret = -ENODEV;
else if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
priority < I915_CONTEXT_MIN_USER_PRIORITY)
@@ -1304,14 +1472,266 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
I915_USER_PRIORITY(priority);
}
break;
+
case I915_CONTEXT_PARAM_SSEU:
ret = set_sseu(ctx, args);
break;
+
+ case I915_CONTEXT_PARAM_VM:
+ ret = set_ppgtt(fpriv, ctx, args);
+ break;
+
+ case I915_CONTEXT_PARAM_BAN_PERIOD:
default:
ret = -EINVAL;
break;
}
+ return ret;
+}
+
+struct create_ext {
+ struct i915_gem_context *ctx;
+ struct drm_i915_file_private *fpriv;
+};
+
+static int create_setparam(struct i915_user_extension __user *ext, void *data)
+{
+ struct drm_i915_gem_context_create_ext_setparam local;
+ const struct create_ext *arg = data;
+
+ if (copy_from_user(&local, ext, sizeof(local)))
+ return -EFAULT;
+
+ if (local.param.ctx_id)
+ return -EINVAL;
+
+ return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
+}
+
+static const i915_user_extension_fn create_extensions[] = {
+ [I915_CONTEXT_CREATE_EXT_SETPARAM] = create_setparam,
+};
+
+static bool client_is_banned(struct drm_i915_file_private *file_priv)
+{
+ return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
+}
+
+int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct drm_i915_gem_context_create_ext *args = data;
+ struct create_ext ext_data;
+ int ret;
+
+ if (!DRIVER_CAPS(i915)->has_logical_contexts)
+ return -ENODEV;
+
+ if (args->flags & I915_CONTEXT_CREATE_FLAGS_UNKNOWN)
+ return -EINVAL;
+
+ ret = i915_terminally_wedged(i915);
+ if (ret)
+ return ret;
+
+ ext_data.fpriv = file->driver_priv;
+ if (client_is_banned(ext_data.fpriv)) {
+ DRM_DEBUG("client %s[%d] banned from creating ctx\n",
+ current->comm,
+ pid_nr(get_task_pid(current, PIDTYPE_PID)));
+ return -EIO;
+ }
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ return ret;
+
+ ext_data.ctx = i915_gem_create_context(i915, args->flags);
+ mutex_unlock(&dev->struct_mutex);
+ if (IS_ERR(ext_data.ctx))
+ return PTR_ERR(ext_data.ctx);
+
+ if (args->flags & I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS) {
+ ret = i915_user_extensions(u64_to_user_ptr(args->extensions),
+ create_extensions,
+ ARRAY_SIZE(create_extensions),
+ &ext_data);
+ if (ret)
+ goto err_ctx;
+ }
+
+ ret = gem_context_register(ext_data.ctx, ext_data.fpriv);
+ if (ret < 0)
+ goto err_ctx;
+
+ args->ctx_id = ret;
+ DRM_DEBUG("HW context %d created\n", args->ctx_id);
+
+ return 0;
+
+err_ctx:
+ mutex_lock(&dev->struct_mutex);
+ context_close(ext_data.ctx);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_context_destroy *args = data;
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct i915_gem_context *ctx;
+
+ if (args->pad != 0)
+ return -EINVAL;
+
+ if (!args->ctx_id)
+ return -ENOENT;
+
+ if (mutex_lock_interruptible(&file_priv->context_idr_lock))
+ return -EINTR;
+
+ ctx = idr_remove(&file_priv->context_idr, args->ctx_id);
+ mutex_unlock(&file_priv->context_idr_lock);
+ if (!ctx)
+ return -ENOENT;
+
+ mutex_lock(&dev->struct_mutex);
+ context_close(ctx);
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+}
+
+static int get_sseu(struct i915_gem_context *ctx,
+ struct drm_i915_gem_context_param *args)
+{
+ struct drm_i915_gem_context_param_sseu user_sseu;
+ struct intel_engine_cs *engine;
+ struct intel_context *ce;
+
+ if (args->size == 0)
+ goto out;
+ else if (args->size < sizeof(user_sseu))
+ return -EINVAL;
+
+ if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value),
+ sizeof(user_sseu)))
+ return -EFAULT;
+
+ if (user_sseu.flags || user_sseu.rsvd)
+ return -EINVAL;
+
+ engine = intel_engine_lookup_user(ctx->i915,
+ user_sseu.engine.engine_class,
+ user_sseu.engine.engine_instance);
+ if (!engine)
+ return -EINVAL;
+
+ ce = intel_context_pin_lock(ctx, engine); /* serialises with set_sseu */
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ user_sseu.slice_mask = ce->sseu.slice_mask;
+ user_sseu.subslice_mask = ce->sseu.subslice_mask;
+ user_sseu.min_eus_per_subslice = ce->sseu.min_eus_per_subslice;
+ user_sseu.max_eus_per_subslice = ce->sseu.max_eus_per_subslice;
+
+ intel_context_pin_unlock(ce);
+
+ if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu,
+ sizeof(user_sseu)))
+ return -EFAULT;
+
+out:
+ args->size = sizeof(user_sseu);
+
+ return 0;
+}
+
+int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct drm_i915_gem_context_param *args = data;
+ struct i915_gem_context *ctx;
+ int ret = 0;
+
+ ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
+ if (!ctx)
+ return -ENOENT;
+
+ switch (args->param) {
+ case I915_CONTEXT_PARAM_NO_ZEROMAP:
+ args->size = 0;
+ args->value = test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags);
+ break;
+
+ case I915_CONTEXT_PARAM_GTT_SIZE:
+ args->size = 0;
+ if (ctx->ppgtt)
+ args->value = ctx->ppgtt->vm.total;
+ else if (to_i915(dev)->mm.aliasing_ppgtt)
+ args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
+ else
+ args->value = to_i915(dev)->ggtt.vm.total;
+ break;
+
+ case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
+ args->size = 0;
+ args->value = i915_gem_context_no_error_capture(ctx);
+ break;
+
+ case I915_CONTEXT_PARAM_BANNABLE:
+ args->size = 0;
+ args->value = i915_gem_context_is_bannable(ctx);
+ break;
+
+ case I915_CONTEXT_PARAM_RECOVERABLE:
+ args->size = 0;
+ args->value = i915_gem_context_is_recoverable(ctx);
+ break;
+
+ case I915_CONTEXT_PARAM_PRIORITY:
+ args->size = 0;
+ args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
+ break;
+
+ case I915_CONTEXT_PARAM_SSEU:
+ ret = get_sseu(ctx, args);
+ break;
+
+ case I915_CONTEXT_PARAM_VM:
+ ret = get_ppgtt(file_priv, ctx, args);
+ break;
+
+ case I915_CONTEXT_PARAM_BAN_PERIOD:
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ i915_gem_context_put(ctx);
+ return ret;
+}
+
+int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+ struct drm_i915_gem_context_param *args = data;
+ struct i915_gem_context *ctx;
+ int ret;
+
+ ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
+ if (!ctx)
+ return -ENOENT;
+
+ ret = ctx_setparam(file_priv, ctx, args);
+
i915_gem_context_put(ctx);
return ret;
}
@@ -1385,3 +1805,28 @@ out_unlock:
#include "selftests/mock_context.c"
#include "selftests/i915_gem_context.c"
#endif
+
+static void i915_global_gem_context_shrink(void)
+{
+ kmem_cache_shrink(global.slab_luts);
+}
+
+static void i915_global_gem_context_exit(void)
+{
+ kmem_cache_destroy(global.slab_luts);
+}
+
+static struct i915_global_gem_context global = { {
+ .shrink = i915_global_gem_context_shrink,
+ .exit = i915_global_gem_context_exit,
+} };
+
+int __init i915_global_gem_context_init(void)
+{
+ global.slab_luts = KMEM_CACHE(i915_lut_handle, 0);
+ if (!global.slab_luts)
+ return -ENOMEM;
+
+ i915_global_register(&global.base);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index ca150a764c24..23dcb01bfd82 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -25,210 +25,16 @@
#ifndef __I915_GEM_CONTEXT_H__
#define __I915_GEM_CONTEXT_H__
-#include <linux/bitops.h>
-#include <linux/list.h>
-#include <linux/radix-tree.h>
+#include "i915_gem_context_types.h"
#include "i915_gem.h"
#include "i915_scheduler.h"
+#include "intel_context.h"
#include "intel_device_info.h"
-struct pid;
-
struct drm_device;
struct drm_file;
-struct drm_i915_private;
-struct drm_i915_file_private;
-struct i915_hw_ppgtt;
-struct i915_request;
-struct i915_vma;
-struct intel_ring;
-
-#define DEFAULT_CONTEXT_HANDLE 0
-
-struct intel_context;
-
-struct intel_context_ops {
- void (*unpin)(struct intel_context *ce);
- void (*destroy)(struct intel_context *ce);
-};
-
-/*
- * Powergating configuration for a particular (context,engine).
- */
-struct intel_sseu {
- u8 slice_mask;
- u8 subslice_mask;
- u8 min_eus_per_subslice;
- u8 max_eus_per_subslice;
-};
-
-/**
- * struct i915_gem_context - client state
- *
- * The struct i915_gem_context represents the combined view of the driver and
- * logical hardware state for a particular client.
- */
-struct i915_gem_context {
- /** i915: i915 device backpointer */
- struct drm_i915_private *i915;
-
- /** file_priv: owning file descriptor */
- struct drm_i915_file_private *file_priv;
-
- /**
- * @ppgtt: unique address space (GTT)
- *
- * In full-ppgtt mode, each context has its own address space ensuring
- * complete seperation of one client from all others.
- *
- * In other modes, this is a NULL pointer with the expectation that
- * the caller uses the shared global GTT.
- */
- struct i915_hw_ppgtt *ppgtt;
-
- /**
- * @pid: process id of creator
- *
- * Note that who created the context may not be the principle user,
- * as the context may be shared across a local socket. However,
- * that should only affect the default context, all contexts created
- * explicitly by the client are expected to be isolated.
- */
- struct pid *pid;
-
- /**
- * @name: arbitrary name
- *
- * A name is constructed for the context from the creator's process
- * name, pid and user handle in order to uniquely identify the
- * context in messages.
- */
- const char *name;
-
- /** link: place with &drm_i915_private.context_list */
- struct list_head link;
- struct llist_node free_link;
-
- /**
- * @ref: reference count
- *
- * A reference to a context is held by both the client who created it
- * and on each request submitted to the hardware using the request
- * (to ensure the hardware has access to the state until it has
- * finished all pending writes). See i915_gem_context_get() and
- * i915_gem_context_put() for access.
- */
- struct kref ref;
-
- /**
- * @rcu: rcu_head for deferred freeing.
- */
- struct rcu_head rcu;
-
- /**
- * @user_flags: small set of booleans controlled by the user
- */
- unsigned long user_flags;
-#define UCONTEXT_NO_ZEROMAP 0
-#define UCONTEXT_NO_ERROR_CAPTURE 1
-#define UCONTEXT_BANNABLE 2
-
- /**
- * @flags: small set of booleans
- */
- unsigned long flags;
-#define CONTEXT_BANNED 0
-#define CONTEXT_CLOSED 1
-#define CONTEXT_FORCE_SINGLE_SUBMISSION 2
-
- /**
- * @hw_id: - unique identifier for the context
- *
- * The hardware needs to uniquely identify the context for a few
- * functions like fault reporting, PASID, scheduling. The
- * &drm_i915_private.context_hw_ida is used to assign a unqiue
- * id for the lifetime of the context.
- *
- * @hw_id_pin_count: - number of times this context had been pinned
- * for use (should be, at most, once per engine).
- *
- * @hw_id_link: - all contexts with an assigned id are tracked
- * for possible repossession.
- */
- unsigned int hw_id;
- atomic_t hw_id_pin_count;
- struct list_head hw_id_link;
-
- /**
- * @user_handle: userspace identifier
- *
- * A unique per-file identifier is generated from
- * &drm_i915_file_private.contexts.
- */
- u32 user_handle;
-
- struct i915_sched_attr sched;
-
- /** engine: per-engine logical HW state */
- struct intel_context {
- struct i915_gem_context *gem_context;
- struct intel_engine_cs *active;
- struct list_head signal_link;
- struct list_head signals;
- struct i915_vma *state;
- struct intel_ring *ring;
- u32 *lrc_reg_state;
- u64 lrc_desc;
- int pin_count;
-
- /**
- * active_tracker: Active tracker for the external rq activity
- * on this intel_context object.
- */
- struct i915_active_request active_tracker;
-
- const struct intel_context_ops *ops;
-
- /** sseu: Control eu/slice partitioning */
- struct intel_sseu sseu;
- } __engine[I915_NUM_ENGINES];
-
- /** ring_size: size for allocating the per-engine ring buffer */
- u32 ring_size;
- /** desc_template: invariant fields for the HW context descriptor */
- u32 desc_template;
-
- /** guilty_count: How many times this context has caused a GPU hang. */
- atomic_t guilty_count;
- /**
- * @active_count: How many times this context was active during a GPU
- * hang, but did not cause it.
- */
- atomic_t active_count;
-
-#define CONTEXT_SCORE_GUILTY 10
-#define CONTEXT_SCORE_BAN_THRESHOLD 40
- /** ban_score: Accumulated score of all hangs caused by this context. */
- atomic_t ban_score;
-
- /** remap_slice: Bitmask of cache lines that need remapping */
- u8 remap_slice;
-
- /** handles_vma: rbtree to look up our context specific obj/vma for
- * the user handle. (user handles are per fd, but the binding is
- * per vm, which may be one per context or shared with the global GTT)
- */
- struct radix_tree_root handles_vma;
-
- /** handles_list: reverse list of all the rbtree entries in use for
- * this context, which allows us to free all the allocations on
- * context close.
- */
- struct list_head handles_list;
-};
-
static inline bool i915_gem_context_is_closed(const struct i915_gem_context *ctx)
{
return test_bit(CONTEXT_CLOSED, &ctx->flags);
@@ -270,6 +76,21 @@ static inline void i915_gem_context_clear_bannable(struct i915_gem_context *ctx)
clear_bit(UCONTEXT_BANNABLE, &ctx->user_flags);
}
+static inline bool i915_gem_context_is_recoverable(const struct i915_gem_context *ctx)
+{
+ return test_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
+}
+
+static inline void i915_gem_context_set_recoverable(struct i915_gem_context *ctx)
+{
+ set_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
+}
+
+static inline void i915_gem_context_clear_recoverable(struct i915_gem_context *ctx)
+{
+ clear_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
+}
+
static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
{
return test_bit(CONTEXT_BANNED, &ctx->flags);
@@ -305,45 +126,11 @@ static inline void i915_gem_context_unpin_hw_id(struct i915_gem_context *ctx)
atomic_dec(&ctx->hw_id_pin_count);
}
-static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
-{
- return c->user_handle == DEFAULT_CONTEXT_HANDLE;
-}
-
static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
{
return !ctx->file_priv;
}
-static inline struct intel_context *
-to_intel_context(struct i915_gem_context *ctx,
- const struct intel_engine_cs *engine)
-{
- return &ctx->__engine[engine->id];
-}
-
-static inline struct intel_context *
-intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
-{
- return engine->context_pin(engine, ctx);
-}
-
-static inline void __intel_context_pin(struct intel_context *ce)
-{
- GEM_BUG_ON(!ce->pin_count);
- ce->pin_count++;
-}
-
-static inline void intel_context_unpin(struct intel_context *ce)
-{
- GEM_BUG_ON(!ce->pin_count);
- if (--ce->pin_count)
- return;
-
- GEM_BUG_ON(!ce->ops);
- ce->ops->unpin(ce);
-}
-
/* i915_gem_context.c */
int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
void i915_gem_contexts_lost(struct drm_i915_private *dev_priv);
@@ -354,12 +141,18 @@ int i915_gem_context_open(struct drm_i915_private *i915,
void i915_gem_context_close(struct drm_file *file);
int i915_switch_context(struct i915_request *rq);
-int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv);
+int i915_gem_switch_to_kernel_context(struct drm_i915_private *i915,
+ intel_engine_mask_t engine_mask);
void i915_gem_context_release(struct kref *ctx_ref);
struct i915_gem_context *
i915_gem_context_create_gvt(struct drm_device *dev);
+int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
@@ -386,8 +179,7 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx)
kref_put(&ctx->ref, i915_gem_context_release);
}
-void intel_context_init(struct intel_context *ce,
- struct i915_gem_context *ctx,
- struct intel_engine_cs *engine);
+struct i915_lut_handle *i915_lut_handle_alloc(void);
+void i915_lut_handle_free(struct i915_lut_handle *lut);
#endif /* !__I915_GEM_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_context_types.h b/drivers/gpu/drm/i915/i915_gem_context_types.h
new file mode 100644
index 000000000000..e2ec58b10fb2
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_context_types.h
@@ -0,0 +1,175 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_GEM_CONTEXT_TYPES_H__
+#define __I915_GEM_CONTEXT_TYPES_H__
+
+#include <linux/atomic.h>
+#include <linux/list.h>
+#include <linux/llist.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/radix-tree.h>
+#include <linux/rbtree.h>
+#include <linux/rcupdate.h>
+#include <linux/types.h>
+
+#include "i915_scheduler.h"
+#include "intel_context_types.h"
+
+struct pid;
+
+struct drm_i915_private;
+struct drm_i915_file_private;
+struct i915_hw_ppgtt;
+struct i915_timeline;
+struct intel_ring;
+
+/**
+ * struct i915_gem_context - client state
+ *
+ * The struct i915_gem_context represents the combined view of the driver and
+ * logical hardware state for a particular client.
+ */
+struct i915_gem_context {
+ /** i915: i915 device backpointer */
+ struct drm_i915_private *i915;
+
+ /** file_priv: owning file descriptor */
+ struct drm_i915_file_private *file_priv;
+
+ struct i915_timeline *timeline;
+
+ /**
+ * @ppgtt: unique address space (GTT)
+ *
+ * In full-ppgtt mode, each context has its own address space ensuring
+ * complete seperation of one client from all others.
+ *
+ * In other modes, this is a NULL pointer with the expectation that
+ * the caller uses the shared global GTT.
+ */
+ struct i915_hw_ppgtt *ppgtt;
+
+ /**
+ * @pid: process id of creator
+ *
+ * Note that who created the context may not be the principle user,
+ * as the context may be shared across a local socket. However,
+ * that should only affect the default context, all contexts created
+ * explicitly by the client are expected to be isolated.
+ */
+ struct pid *pid;
+
+ /**
+ * @name: arbitrary name
+ *
+ * A name is constructed for the context from the creator's process
+ * name, pid and user handle in order to uniquely identify the
+ * context in messages.
+ */
+ const char *name;
+
+ /** link: place with &drm_i915_private.context_list */
+ struct list_head link;
+ struct llist_node free_link;
+
+ /**
+ * @ref: reference count
+ *
+ * A reference to a context is held by both the client who created it
+ * and on each request submitted to the hardware using the request
+ * (to ensure the hardware has access to the state until it has
+ * finished all pending writes). See i915_gem_context_get() and
+ * i915_gem_context_put() for access.
+ */
+ struct kref ref;
+
+ /**
+ * @rcu: rcu_head for deferred freeing.
+ */
+ struct rcu_head rcu;
+
+ /**
+ * @user_flags: small set of booleans controlled by the user
+ */
+ unsigned long user_flags;
+#define UCONTEXT_NO_ZEROMAP 0
+#define UCONTEXT_NO_ERROR_CAPTURE 1
+#define UCONTEXT_BANNABLE 2
+#define UCONTEXT_RECOVERABLE 3
+
+ /**
+ * @flags: small set of booleans
+ */
+ unsigned long flags;
+#define CONTEXT_BANNED 0
+#define CONTEXT_CLOSED 1
+#define CONTEXT_FORCE_SINGLE_SUBMISSION 2
+
+ /**
+ * @hw_id: - unique identifier for the context
+ *
+ * The hardware needs to uniquely identify the context for a few
+ * functions like fault reporting, PASID, scheduling. The
+ * &drm_i915_private.context_hw_ida is used to assign a unqiue
+ * id for the lifetime of the context.
+ *
+ * @hw_id_pin_count: - number of times this context had been pinned
+ * for use (should be, at most, once per engine).
+ *
+ * @hw_id_link: - all contexts with an assigned id are tracked
+ * for possible repossession.
+ */
+ unsigned int hw_id;
+ atomic_t hw_id_pin_count;
+ struct list_head hw_id_link;
+
+ struct list_head active_engines;
+ struct mutex mutex;
+
+ struct i915_sched_attr sched;
+
+ /** hw_contexts: per-engine logical HW state */
+ struct rb_root hw_contexts;
+ spinlock_t hw_contexts_lock;
+
+ /** ring_size: size for allocating the per-engine ring buffer */
+ u32 ring_size;
+ /** desc_template: invariant fields for the HW context descriptor */
+ u32 desc_template;
+
+ /** guilty_count: How many times this context has caused a GPU hang. */
+ atomic_t guilty_count;
+ /**
+ * @active_count: How many times this context was active during a GPU
+ * hang, but did not cause it.
+ */
+ atomic_t active_count;
+
+ /**
+ * @hang_timestamp: The last time(s) this context caused a GPU hang
+ */
+ unsigned long hang_timestamp[2];
+#define CONTEXT_FAST_HANG_JIFFIES (120 * HZ) /* 3 hangs within 120s? Banned! */
+
+ /** remap_slice: Bitmask of cache lines that need remapping */
+ u8 remap_slice;
+
+ /** handles_vma: rbtree to look up our context specific obj/vma for
+ * the user handle. (user handles are per fd, but the binding is
+ * per vm, which may be one per context or shared with the global GTT)
+ */
+ struct radix_tree_root handles_vma;
+
+ /** handles_list: reverse list of all the rbtree entries in use for
+ * this context, which allows us to free all the allocations on
+ * context close.
+ */
+ struct list_head handles_list;
+};
+
+#endif /* __I915_GEM_CONTEXT_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 02f7298bfe57..5a101a9462d8 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -107,6 +107,7 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+ i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
}
@@ -300,7 +301,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
get_dma_buf(dma_buf);
- obj = i915_gem_object_alloc(to_i915(dev));
+ obj = i915_gem_object_alloc();
if (obj == NULL) {
ret = -ENOMEM;
goto fail_detach;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 68d74c50ac39..060f5903544a 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -38,31 +38,21 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl {
static bool ggtt_is_idle(struct drm_i915_private *i915)
{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- if (i915->gt.active_requests)
- return false;
-
- for_each_engine(engine, i915, id) {
- if (!intel_engine_has_kernel_context(engine))
- return false;
- }
-
- return true;
+ return !i915->gt.active_requests;
}
static int ggtt_flush(struct drm_i915_private *i915)
{
int err;
- /* Not everything in the GGTT is tracked via vma (otherwise we
+ /*
+ * Not everything in the GGTT is tracked via vma (otherwise we
* could evict as required with minimal stalling) so we are forced
* to idle the GPU and explicitly retire outstanding requests in
* the hopes that we can then remove contexts and the like only
* bound by their active reference.
*/
- err = i915_gem_switch_to_kernel_context(i915);
+ err = i915_gem_switch_to_kernel_context(i915, i915->gt.active_engines);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 16f80a448820..c83d2a195d15 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -794,8 +794,8 @@ static int eb_wait_for_ring(const struct i915_execbuffer *eb)
* keeping all of their resources pinned.
*/
- ce = to_intel_context(eb->ctx, eb->engine);
- if (!ce->ring) /* first use, assume empty! */
+ ce = intel_context_lookup(eb->ctx, eb->engine);
+ if (!ce || !ce->ring) /* first use, assume empty! */
return 0;
rq = __eb_wait_for_ring(ce->ring);
@@ -849,12 +849,12 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
}
vma = i915_vma_instance(obj, eb->vm, NULL);
- if (unlikely(IS_ERR(vma))) {
+ if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
}
- lut = kmem_cache_alloc(eb->i915->luts, GFP_KERNEL);
+ lut = i915_lut_handle_alloc();
if (unlikely(!lut)) {
err = -ENOMEM;
goto err_obj;
@@ -862,7 +862,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
err = radix_tree_insert(handles_vma, handle, vma);
if (unlikely(err)) {
- kmem_cache_free(eb->i915->luts, lut);
+ i915_lut_handle_free(lut);
goto err_obj;
}
@@ -1001,7 +1001,10 @@ static void reloc_gpu_flush(struct reloc_cache *cache)
{
GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32));
cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
+
+ __i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size);
i915_gem_object_unpin_map(cache->rq->batch->obj);
+
i915_gem_chipset_flush(cache->rq->i915);
i915_request_add(cache->rq);
@@ -1214,10 +1217,6 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
if (IS_ERR(cmd))
return PTR_ERR(cmd);
- err = i915_gem_object_set_to_wc_domain(obj, false);
- if (err)
- goto err_unmap;
-
batch = i915_vma_instance(obj, vma->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
@@ -1958,7 +1957,7 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
u32 *cs;
int i;
- if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS) {
+ if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS0) {
DRM_DEBUG("sol reset is gen7/rcs only\n");
return -EINVAL;
}
@@ -2083,11 +2082,11 @@ gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
#define I915_USER_RINGS (4)
static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
- [I915_EXEC_DEFAULT] = RCS,
- [I915_EXEC_RENDER] = RCS,
- [I915_EXEC_BLT] = BCS,
- [I915_EXEC_BSD] = VCS,
- [I915_EXEC_VEBOX] = VECS
+ [I915_EXEC_DEFAULT] = RCS0,
+ [I915_EXEC_RENDER] = RCS0,
+ [I915_EXEC_BLT] = BCS0,
+ [I915_EXEC_BSD] = VCS0,
+ [I915_EXEC_VEBOX] = VECS0
};
static struct intel_engine_cs *
@@ -2110,7 +2109,7 @@ eb_select_engine(struct drm_i915_private *dev_priv,
return NULL;
}
- if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
+ if (user_ring_id == I915_EXEC_BSD && HAS_ENGINE(dev_priv, VCS1)) {
unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
@@ -2313,10 +2312,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (args->flags & I915_EXEC_IS_PINNED)
eb.batch_flags |= I915_DISPATCH_PINNED;
- eb.engine = eb_select_engine(eb.i915, file, args);
- if (!eb.engine)
- return -EINVAL;
-
if (args->flags & I915_EXEC_FENCE_IN) {
in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
if (!in_fence)
@@ -2341,6 +2336,12 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (unlikely(err))
goto err_destroy;
+ eb.engine = eb_select_engine(eb.i915, file, args);
+ if (!eb.engine) {
+ err = -EINVAL;
+ goto err_engine;
+ }
+
/*
* Take a local wakeref for preparing to dispatch the execbuf as
* we expect to access the hardware fairly frequently in the
@@ -2506,6 +2507,7 @@ err_unlock:
mutex_unlock(&dev->struct_mutex);
err_rpm:
intel_runtime_pm_put(eb.i915, wakeref);
+err_engine:
i915_gem_context_put(eb.ctx);
err_destroy:
eb_destroy(&eb);
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index e037e94792f3..3084f52e3372 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -210,6 +210,7 @@ static int fence_update(struct drm_i915_fence_reg *fence,
struct i915_vma *vma)
{
intel_wakeref_t wakeref;
+ struct i915_vma *old;
int ret;
if (vma) {
@@ -229,49 +230,55 @@ static int fence_update(struct drm_i915_fence_reg *fence,
return ret;
}
- if (fence->vma) {
- struct i915_vma *old = fence->vma;
-
+ old = xchg(&fence->vma, NULL);
+ if (old) {
ret = i915_active_request_retire(&old->last_fence,
&old->obj->base.dev->struct_mutex);
- if (ret)
+ if (ret) {
+ fence->vma = old;
return ret;
+ }
i915_vma_flush_writes(old);
- }
- if (fence->vma && fence->vma != vma) {
- /* Ensure that all userspace CPU access is completed before
+ /*
+ * Ensure that all userspace CPU access is completed before
* stealing the fence.
*/
- GEM_BUG_ON(fence->vma->fence != fence);
- i915_vma_revoke_mmap(fence->vma);
-
- fence->vma->fence = NULL;
- fence->vma = NULL;
+ if (old != vma) {
+ GEM_BUG_ON(old->fence != fence);
+ i915_vma_revoke_mmap(old);
+ old->fence = NULL;
+ }
list_move(&fence->link, &fence->i915->mm.fence_list);
}
- /* We only need to update the register itself if the device is awake.
+ /*
+ * We only need to update the register itself if the device is awake.
* If the device is currently powered down, we will defer the write
* to the runtime resume, see i915_gem_restore_fences().
+ *
+ * This only works for removing the fence register, on acquisition
+ * the caller must hold the rpm wakeref. The fence register must
+ * be cleared before we can use any other fences to ensure that
+ * the new fences do not overlap the elided clears, confusing HW.
*/
wakeref = intel_runtime_pm_get_if_in_use(fence->i915);
- if (wakeref) {
- fence_write(fence, vma);
- intel_runtime_pm_put(fence->i915, wakeref);
+ if (!wakeref) {
+ GEM_BUG_ON(vma);
+ return 0;
}
- if (vma) {
- if (fence->vma != vma) {
- vma->fence = fence;
- fence->vma = vma;
- }
+ WRITE_ONCE(fence->vma, vma);
+ fence_write(fence, vma);
+ if (vma) {
+ vma->fence = fence;
list_move_tail(&fence->link, &fence->i915->mm.fence_list);
}
+ intel_runtime_pm_put(fence->i915, wakeref);
return 0;
}
@@ -436,32 +443,6 @@ void i915_unreserve_fence(struct drm_i915_fence_reg *fence)
}
/**
- * i915_gem_revoke_fences - revoke fence state
- * @dev_priv: i915 device private
- *
- * Removes all GTT mmappings via the fence registers. This forces any user
- * of the fence to reacquire that fence before continuing with their access.
- * One use is during GPU reset where the fence register is lost and we need to
- * revoke concurrent userspace access via GTT mmaps until the hardware has been
- * reset and the fence registers have been restored.
- */
-void i915_gem_revoke_fences(struct drm_i915_private *dev_priv)
-{
- int i;
-
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
- for (i = 0; i < dev_priv->num_fence_regs; i++) {
- struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
-
- GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
-
- if (fence->vma)
- i915_vma_revoke_mmap(fence->vma);
- }
-}
-
-/**
* i915_gem_restore_fences - restore fence state
* @dev_priv: i915 device private
*
@@ -473,9 +454,10 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
{
int i;
+ rcu_read_lock(); /* keep obj alive as we dereference */
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
- struct i915_vma *vma = reg->vma;
+ struct i915_vma *vma = READ_ONCE(reg->vma);
GEM_BUG_ON(vma && vma->fence != reg);
@@ -483,18 +465,12 @@ void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
* Commit delayed tiling changes if we have an object still
* attached to the fence, otherwise just clear the fence.
*/
- if (vma && !i915_gem_object_is_tiled(vma->obj)) {
- GEM_BUG_ON(!reg->dirty);
- GEM_BUG_ON(i915_vma_has_userfault(vma));
-
- list_move(&reg->link, &dev_priv->mm.fence_list);
- vma->fence = NULL;
+ if (vma && !i915_gem_object_is_tiled(vma->obj))
vma = NULL;
- }
fence_write(reg, vma);
- reg->vma = vma;
}
+ rcu_read_unlock();
}
/**
@@ -609,8 +585,38 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
- } else if (IS_MOBILE(dev_priv) ||
- IS_I915G(dev_priv) || IS_I945G(dev_priv)) {
+ } else if (IS_G45(dev_priv) || IS_I965G(dev_priv) || IS_G33(dev_priv)) {
+ /* The 965, G33, and newer, have a very flexible memory
+ * configuration. It will enable dual-channel mode
+ * (interleaving) on as much memory as it can, and the GPU
+ * will additionally sometimes enable different bit 6
+ * swizzling for tiled objects from the CPU.
+ *
+ * Here's what I found on the G965:
+ * slot fill memory size swizzling
+ * 0A 0B 1A 1B 1-ch 2-ch
+ * 512 0 0 0 512 0 O
+ * 512 0 512 0 16 1008 X
+ * 512 0 0 512 16 1008 X
+ * 0 512 0 512 16 1008 X
+ * 1024 1024 1024 0 2048 1024 O
+ *
+ * We could probably detect this based on either the DRB
+ * matching, which was the case for the swizzling required in
+ * the table above, or from the 1-ch value being less than
+ * the minimum size of a rank.
+ *
+ * Reports indicate that the swizzling actually
+ * varies depending upon page placement inside the
+ * channels, i.e. we see swizzled pages where the
+ * banks of memory are paired and unswizzled on the
+ * uneven portion, so leave that as unknown.
+ */
+ if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) {
+ swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+ swizzle_y = I915_BIT_6_SWIZZLE_9;
+ }
+ } else {
u32 dcc;
/* On 9xx chipsets, channel interleave by the CPU is
@@ -660,37 +666,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
}
- } else {
- /* The 965, G33, and newer, have a very flexible memory
- * configuration. It will enable dual-channel mode
- * (interleaving) on as much memory as it can, and the GPU
- * will additionally sometimes enable different bit 6
- * swizzling for tiled objects from the CPU.
- *
- * Here's what I found on the G965:
- * slot fill memory size swizzling
- * 0A 0B 1A 1B 1-ch 2-ch
- * 512 0 0 0 512 0 O
- * 512 0 512 0 16 1008 X
- * 512 0 0 512 16 1008 X
- * 0 512 0 512 16 1008 X
- * 1024 1024 1024 0 2048 1024 O
- *
- * We could probably detect this based on either the DRB
- * matching, which was the case for the swizzling required in
- * the table above, or from the 1-ch value being less than
- * the minimum size of a rank.
- *
- * Reports indicate that the swizzling actually
- * varies depending upon page placement inside the
- * channels, i.e. we see swizzled pages where the
- * banks of memory are paired and unswizzled on the
- * uneven portion, so leave that as unknown.
- */
- if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) {
- swizzle_x = I915_BIT_6_SWIZZLE_9_10;
- swizzle_y = I915_BIT_6_SWIZZLE_9;
- }
}
if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN ||
@@ -790,8 +765,7 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
int i;
if (obj->bit_17 == NULL) {
- obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
- sizeof(long), GFP_KERNEL);
+ obj->bit_17 = bitmap_zalloc(page_count, GFP_KERNEL);
if (obj->bit_17 == NULL) {
DRM_ERROR("Failed to allocate memory for bit 17 "
"record\n");
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index d646d37eec2f..8f460cc4cc1f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -584,7 +584,7 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
* for all.
*/
size = I915_GTT_PAGE_SIZE_4K;
- if (i915_vm_is_48bit(vm) &&
+ if (i915_vm_is_4lvl(vm) &&
HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
size = I915_GTT_PAGE_SIZE_64K;
gfp |= __GFP_NOWARN;
@@ -613,7 +613,7 @@ setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
vm->scratch_page.page = page;
vm->scratch_page.daddr = addr;
- vm->scratch_page.order = order;
+ vm->scratch_order = order;
return 0;
unmap_page:
@@ -632,10 +632,11 @@ skip:
static void cleanup_scratch_page(struct i915_address_space *vm)
{
struct i915_page_dma *p = &vm->scratch_page;
+ int order = vm->scratch_order;
- dma_unmap_page(vm->dma, p->daddr, BIT(p->order) << PAGE_SHIFT,
+ dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT,
PCI_DMA_BIDIRECTIONAL);
- __free_pages(p->page, p->order);
+ __free_pages(p->page, order);
}
static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
@@ -726,18 +727,13 @@ static void __pdp_fini(struct i915_page_directory_pointer *pdp)
pdp->page_directory = NULL;
}
-static inline bool use_4lvl(const struct i915_address_space *vm)
-{
- return i915_vm_is_48bit(vm);
-}
-
static struct i915_page_directory_pointer *
alloc_pdp(struct i915_address_space *vm)
{
struct i915_page_directory_pointer *pdp;
int ret = -ENOMEM;
- GEM_BUG_ON(!use_4lvl(vm));
+ GEM_BUG_ON(!i915_vm_is_4lvl(vm));
pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
if (!pdp)
@@ -766,7 +762,7 @@ static void free_pdp(struct i915_address_space *vm,
{
__pdp_fini(pdp);
- if (!use_4lvl(vm))
+ if (!i915_vm_is_4lvl(vm))
return;
cleanup_px(vm, pdp);
@@ -791,14 +787,15 @@ static void gen8_initialize_pml4(struct i915_address_space *vm,
memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4);
}
-/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
+/*
+ * PDE TLBs are a pain to invalidate on GEN8+. When we modify
* the page table structures, we mark them dirty so that
* context switching/execlist queuing code takes extra steps
* to ensure that tlbs are flushed.
*/
static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
{
- ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->vm.i915)->ring_mask;
+ ppgtt->pd_dirty_engines = ALL_ENGINES;
}
/* Removes entries from a single page table, releasing it if it's empty.
@@ -809,8 +806,6 @@ static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
u64 start, u64 length)
{
unsigned int num_entries = gen8_pte_count(start, length);
- unsigned int pte = gen8_pte_index(start);
- unsigned int pte_end = pte + num_entries;
gen8_pte_t *vaddr;
GEM_BUG_ON(num_entries > pt->used_ptes);
@@ -820,8 +815,7 @@ static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
return true;
vaddr = kmap_atomic_px(pt);
- while (pte < pte_end)
- vaddr[pte++] = vm->scratch_pte;
+ memset64(vaddr + gen8_pte_index(start), vm->scratch_pte, num_entries);
kunmap_atomic(vaddr);
return false;
@@ -872,7 +866,7 @@ static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
gen8_ppgtt_pdpe_t *vaddr;
pdp->page_directory[pdpe] = pd;
- if (!use_4lvl(vm))
+ if (!i915_vm_is_4lvl(vm))
return;
vaddr = kmap_atomic_px(pdp);
@@ -937,7 +931,7 @@ static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp;
unsigned int pml4e;
- GEM_BUG_ON(!use_4lvl(vm));
+ GEM_BUG_ON(!i915_vm_is_4lvl(vm));
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
GEM_BUG_ON(pdp == vm->scratch_pdp);
@@ -1219,7 +1213,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
GEM_BUG_ON(!clone->has_read_only);
- vm->scratch_page.order = clone->scratch_page.order;
+ vm->scratch_order = clone->scratch_order;
vm->scratch_pte = clone->scratch_pte;
vm->scratch_pt = clone->scratch_pt;
vm->scratch_pd = clone->scratch_pd;
@@ -1234,7 +1228,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
vm->scratch_pte =
gen8_pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC,
- PTE_READ_ONLY);
+ vm->has_read_only);
vm->scratch_pt = alloc_pt(vm);
if (IS_ERR(vm->scratch_pt)) {
@@ -1248,7 +1242,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
goto free_pt;
}
- if (use_4lvl(vm)) {
+ if (i915_vm_is_4lvl(vm)) {
vm->scratch_pdp = alloc_pdp(vm);
if (IS_ERR(vm->scratch_pdp)) {
ret = PTR_ERR(vm->scratch_pdp);
@@ -1258,7 +1252,7 @@ static int gen8_init_scratch(struct i915_address_space *vm)
gen8_initialize_pt(vm, vm->scratch_pt);
gen8_initialize_pd(vm, vm->scratch_pd);
- if (use_4lvl(vm))
+ if (i915_vm_is_4lvl(vm))
gen8_initialize_pdp(vm, vm->scratch_pdp);
return 0;
@@ -1280,7 +1274,7 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
enum vgt_g2v_type msg;
int i;
- if (use_4lvl(vm)) {
+ if (i915_vm_is_4lvl(vm)) {
const u64 daddr = px_dma(&ppgtt->pml4);
I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
@@ -1310,7 +1304,7 @@ static void gen8_free_scratch(struct i915_address_space *vm)
if (!vm->scratch_page.daddr)
return;
- if (use_4lvl(vm))
+ if (i915_vm_is_4lvl(vm))
free_pdp(vm, vm->scratch_pdp);
free_pd(vm, vm->scratch_pd);
free_pt(vm, vm->scratch_pt);
@@ -1356,7 +1350,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
if (intel_vgpu_active(dev_priv))
gen8_ppgtt_notify_vgt(ppgtt, false);
- if (use_4lvl(vm))
+ if (i915_vm_is_4lvl(vm))
gen8_ppgtt_cleanup_4lvl(ppgtt);
else
gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, &ppgtt->pdp);
@@ -1519,6 +1513,23 @@ unwind:
return -ENOMEM;
}
+static void ppgtt_init(struct drm_i915_private *i915,
+ struct i915_hw_ppgtt *ppgtt)
+{
+ kref_init(&ppgtt->ref);
+
+ ppgtt->vm.i915 = i915;
+ ppgtt->vm.dma = &i915->drm.pdev->dev;
+ ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
+
+ i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
+
+ ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
+ ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
+ ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
+ ppgtt->vm.vma_ops.clear_pages = clear_pages;
+}
+
/*
* GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
* with a net effect resembling a 2-level page table in normal x86 terms. Each
@@ -1535,19 +1546,15 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
if (!ppgtt)
return ERR_PTR(-ENOMEM);
- kref_init(&ppgtt->ref);
+ ppgtt_init(i915, ppgtt);
- ppgtt->vm.i915 = i915;
- ppgtt->vm.dma = &i915->drm.pdev->dev;
-
- ppgtt->vm.total = HAS_FULL_48BIT_PPGTT(i915) ?
- 1ULL << 48 :
- 1ULL << 32;
-
- /* From bdw, there is support for read-only pages in the PPGTT. */
- ppgtt->vm.has_read_only = true;
-
- i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
+ /*
+ * From bdw, there is hw support for read-only pages in the PPGTT.
+ *
+ * Gen11 has HSDES#:1807136187 unresolved. Disable ro support
+ * for now.
+ */
+ ppgtt->vm.has_read_only = INTEL_GEN(i915) != 11;
/* There are only few exceptions for gen >=6. chv and bxt.
* And we are not sure about the latter so play safe for now.
@@ -1559,7 +1566,7 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
if (err)
goto err_free;
- if (use_4lvl(&ppgtt->vm)) {
+ if (i915_vm_is_4lvl(&ppgtt->vm)) {
err = setup_px(&ppgtt->vm, &ppgtt->pml4);
if (err)
goto err_scratch;
@@ -1592,11 +1599,6 @@ static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
- ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
- ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
- ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
- ppgtt->vm.vma_ops.clear_pages = clear_pages;
-
return ppgtt;
err_scratch:
@@ -1672,8 +1674,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
while (num_entries) {
struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++];
- const unsigned int end = min(pte + num_entries, GEN6_PTES);
- const unsigned int count = end - pte;
+ const unsigned int count = min(num_entries, GEN6_PTES - pte);
gen6_pte_t *vaddr;
GEM_BUG_ON(pt == vm->scratch_pt);
@@ -1693,9 +1694,7 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
*/
vaddr = kmap_atomic_px(pt);
- do {
- vaddr[pte++] = scratch_pte;
- } while (pte < end);
+ memset32(vaddr + pte, scratch_pte, count);
kunmap_atomic(vaddr);
pte = 0;
@@ -1913,7 +1912,7 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
GEM_BUG_ON(size > ggtt->vm.total);
- vma = kmem_cache_zalloc(i915->vmas, GFP_KERNEL);
+ vma = i915_vma_alloc();
if (!vma)
return ERR_PTR(-ENOMEM);
@@ -1943,6 +1942,8 @@ int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
int err;
+ GEM_BUG_ON(ppgtt->base.vm.closed);
+
/*
* Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
* which will be pinned into every active context.
@@ -1981,6 +1982,17 @@ void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
i915_vma_unpin(ppgtt->vma);
}
+void gen6_ppgtt_unpin_all(struct i915_hw_ppgtt *base)
+{
+ struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
+
+ if (!ppgtt->pin_count)
+ return;
+
+ ppgtt->pin_count = 0;
+ i915_vma_unpin(ppgtt->vma);
+}
+
static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
{
struct i915_ggtt * const ggtt = &i915->ggtt;
@@ -1991,25 +2003,13 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
if (!ppgtt)
return ERR_PTR(-ENOMEM);
- kref_init(&ppgtt->base.ref);
-
- ppgtt->base.vm.i915 = i915;
- ppgtt->base.vm.dma = &i915->drm.pdev->dev;
-
- ppgtt->base.vm.total = I915_PDES * GEN6_PTES * I915_GTT_PAGE_SIZE;
-
- i915_address_space_init(&ppgtt->base.vm, VM_CLASS_PPGTT);
+ ppgtt_init(i915, &ppgtt->base);
ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
- ppgtt->base.vm.vma_ops.bind_vma = ppgtt_bind_vma;
- ppgtt->base.vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
- ppgtt->base.vm.vma_ops.set_pages = ppgtt_set_pages;
- ppgtt->base.vm.vma_ops.clear_pages = clear_pages;
-
ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;
err = gen6_ppgtt_init_scratch(ppgtt);
@@ -2087,8 +2087,7 @@ __hw_ppgtt_create(struct drm_i915_private *i915)
}
struct i915_hw_ppgtt *
-i915_ppgtt_create(struct drm_i915_private *i915,
- struct drm_i915_file_private *fpriv)
+i915_ppgtt_create(struct drm_i915_private *i915)
{
struct i915_hw_ppgtt *ppgtt;
@@ -2096,19 +2095,11 @@ i915_ppgtt_create(struct drm_i915_private *i915,
if (IS_ERR(ppgtt))
return ppgtt;
- ppgtt->vm.file = fpriv;
-
trace_i915_ppgtt_create(&ppgtt->vm);
return ppgtt;
}
-void i915_ppgtt_close(struct i915_address_space *vm)
-{
- GEM_BUG_ON(vm->closed);
- vm->closed = true;
-}
-
static void ppgtt_destroy_vma(struct i915_address_space *vm)
{
struct list_head *phases[] = {
@@ -2675,7 +2666,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
struct i915_hw_ppgtt *ppgtt;
int err;
- ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM));
+ ppgtt = i915_ppgtt_create(i915);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
@@ -3701,7 +3692,7 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
}
ret = 0;
- if (unlikely(IS_ERR(vma->pages))) {
+ if (IS_ERR(vma->pages)) {
ret = PTR_ERR(vma->pages);
vma->pages = NULL;
DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 03ade71b8d9a..f597f35b109b 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -213,7 +213,6 @@ struct i915_vma;
struct i915_page_dma {
struct page *page;
- int order;
union {
dma_addr_t daddr;
@@ -293,6 +292,7 @@ struct i915_address_space {
#define VM_CLASS_PPGTT 1
u64 scratch_pte;
+ int scratch_order;
struct i915_page_dma scratch_page;
struct i915_page_table *scratch_pt;
struct i915_page_directory *scratch_pd;
@@ -348,7 +348,7 @@ struct i915_address_space {
#define i915_is_ggtt(vm) ((vm)->is_ggtt)
static inline bool
-i915_vm_is_48bit(const struct i915_address_space *vm)
+i915_vm_is_4lvl(const struct i915_address_space *vm)
{
return (vm->total - 1) >> 32;
}
@@ -356,7 +356,7 @@ i915_vm_is_48bit(const struct i915_address_space *vm)
static inline bool
i915_vm_has_scratch_64K(struct i915_address_space *vm)
{
- return vm->scratch_page.order == get_order(I915_GTT_PAGE_SIZE_64K);
+ return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K);
}
/* The Graphics Translation Table is the way in which GEN hardware translates a
@@ -390,12 +390,14 @@ struct i915_hw_ppgtt {
struct i915_address_space vm;
struct kref ref;
- unsigned long pd_dirty_rings;
+ intel_engine_mask_t pd_dirty_engines;
union {
struct i915_pml4 pml4; /* GEN8+ & 48b PPGTT */
struct i915_page_directory_pointer pdp; /* GEN8+ */
struct i915_page_directory pd; /* GEN6-7 */
};
+
+ u32 user_handle;
};
struct gen6_hw_ppgtt {
@@ -488,7 +490,7 @@ static inline u32 gen6_pde_index(u32 addr)
static inline unsigned int
i915_pdpes_per_pdp(const struct i915_address_space *vm)
{
- if (i915_vm_is_48bit(vm))
+ if (i915_vm_is_4lvl(vm))
return GEN8_PML4ES_PER_PML4;
return GEN8_3LVL_PDPES;
@@ -603,15 +605,16 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv);
void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv);
+
+struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv);
void i915_ppgtt_release(struct kref *kref);
-struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
- struct drm_i915_file_private *fpriv);
-void i915_ppgtt_close(struct i915_address_space *vm);
-static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
+
+static inline struct i915_hw_ppgtt *i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
{
- if (ppgtt)
- kref_get(&ppgtt->ref);
+ kref_get(&ppgtt->ref);
+ return ppgtt;
}
+
static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
{
if (ppgtt)
@@ -620,6 +623,7 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
int gen6_ppgtt_pin(struct i915_hw_ppgtt *base);
void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base);
+void gen6_ppgtt_unpin_all(struct i915_hw_ppgtt *base);
void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c
index fddde1033e74..ab627ed1269c 100644
--- a/drivers/gpu/drm/i915/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/i915_gem_internal.c
@@ -193,7 +193,7 @@ i915_gem_object_create_internal(struct drm_i915_private *i915,
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc(i915);
+ obj = i915_gem_object_alloc();
if (!obj)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/i915/i915_gem_object.c b/drivers/gpu/drm/i915/i915_gem_object.c
index aab8cdd80e6d..ac6a5ab84586 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/i915_gem_object.c
@@ -24,6 +24,22 @@
#include "i915_drv.h"
#include "i915_gem_object.h"
+#include "i915_globals.h"
+
+static struct i915_global_object {
+ struct i915_global base;
+ struct kmem_cache *slab_objects;
+} global;
+
+struct drm_i915_gem_object *i915_gem_object_alloc(void)
+{
+ return kmem_cache_zalloc(global.slab_objects, GFP_KERNEL);
+}
+
+void i915_gem_object_free(struct drm_i915_gem_object *obj)
+{
+ return kmem_cache_free(global.slab_objects, obj);
+}
/**
* Mark up the object's coherency levels for a given cache_level
@@ -46,3 +62,29 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
obj->cache_dirty =
!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
}
+
+static void i915_global_objects_shrink(void)
+{
+ kmem_cache_shrink(global.slab_objects);
+}
+
+static void i915_global_objects_exit(void)
+{
+ kmem_cache_destroy(global.slab_objects);
+}
+
+static struct i915_global_object global = { {
+ .shrink = i915_global_objects_shrink,
+ .exit = i915_global_objects_exit,
+} };
+
+int __init i915_global_objects_init(void)
+{
+ global.slab_objects =
+ KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
+ if (!global.slab_objects)
+ return -ENOMEM;
+
+ i915_global_register(&global.base);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index fab040331cdb..ca93a40c0c87 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -304,6 +304,9 @@ to_intel_bo(struct drm_gem_object *gem)
return container_of(gem, struct drm_i915_gem_object, base);
}
+struct drm_i915_gem_object *i915_gem_object_alloc(void);
+void i915_gem_object_free(struct drm_i915_gem_object *obj);
+
/**
* i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
* @filp: DRM file private date
@@ -499,5 +502,8 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
unsigned int cache_level);
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
-#endif
+void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
+ struct sg_table *pages,
+ bool needs_clflush);
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 90baf9086d0a..9440024c763f 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -42,7 +42,7 @@ struct intel_render_state {
static const struct intel_renderstate_rodata *
render_state_get_rodata(const struct intel_engine_cs *engine)
{
- if (engine->id != RCS)
+ if (engine->id != RCS0)
return NULL;
switch (INTEL_GEN(engine->i915)) {
@@ -164,7 +164,7 @@ static int render_state_setup(struct intel_render_state *so,
drm_clflush_virt_range(d, i * sizeof(u32));
kunmap_atomic(d);
- ret = i915_gem_object_set_to_gtt_domain(so->obj, false);
+ ret = 0;
out:
i915_gem_obj_finish_shmem_access(so->obj);
return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 74a9661479ca..0a8082cfc761 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -565,7 +565,7 @@ _i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *obj;
unsigned int cache_level;
- obj = i915_gem_object_alloc(dev_priv);
+ obj = i915_gem_object_alloc();
if (obj == NULL)
return NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 16cc9ddbce34..a9b5329dae3b 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -301,11 +301,11 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
/* Try to preallocate memory required to save swizzling on put-pages */
if (i915_gem_object_needs_bit17_swizzle(obj)) {
if (!obj->bit_17) {
- obj->bit_17 = kcalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT),
- sizeof(long), GFP_KERNEL);
+ obj->bit_17 = bitmap_zalloc(obj->base.size >> PAGE_SHIFT,
+ GFP_KERNEL);
}
} else {
- kfree(obj->bit_17);
+ bitmap_free(obj->bit_17);
obj->bit_17 = NULL;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 1d3f9a31ad61..215bf3fef10c 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -673,9 +673,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
if (!pages)
return;
- if (obj->mm.madv != I915_MADV_WILLNEED)
- obj->mm.dirty = false;
-
+ __i915_gem_object_release_shmem(obj, pages, true);
i915_gem_gtt_finish_pages(obj, pages);
for_each_sgt_page(page, sgt_iter, pages) {
@@ -795,7 +793,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
return -ENODEV;
}
- obj = i915_gem_object_alloc(dev_priv);
+ obj = i915_gem_object_alloc();
if (obj == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_globals.c b/drivers/gpu/drm/i915/i915_globals.c
new file mode 100644
index 000000000000..81e5c2ce336b
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_globals.c
@@ -0,0 +1,125 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+
+#include "i915_active.h"
+#include "i915_gem_context.h"
+#include "i915_gem_object.h"
+#include "i915_globals.h"
+#include "i915_request.h"
+#include "i915_scheduler.h"
+#include "i915_vma.h"
+
+static LIST_HEAD(globals);
+
+static atomic_t active;
+static atomic_t epoch;
+static struct park_work {
+ struct rcu_work work;
+ int epoch;
+} park;
+
+static void i915_globals_shrink(void)
+{
+ struct i915_global *global;
+
+ /*
+ * kmem_cache_shrink() discards empty slabs and reorders partially
+ * filled slabs to prioritise allocating from the mostly full slabs,
+ * with the aim of reducing fragmentation.
+ */
+ list_for_each_entry(global, &globals, link)
+ global->shrink();
+}
+
+static void __i915_globals_park(struct work_struct *work)
+{
+ /* Confirm nothing woke up in the last grace period */
+ if (park.epoch == atomic_read(&epoch))
+ i915_globals_shrink();
+}
+
+void __init i915_global_register(struct i915_global *global)
+{
+ GEM_BUG_ON(!global->shrink);
+ GEM_BUG_ON(!global->exit);
+
+ list_add_tail(&global->link, &globals);
+}
+
+static void __i915_globals_cleanup(void)
+{
+ struct i915_global *global, *next;
+
+ list_for_each_entry_safe_reverse(global, next, &globals, link)
+ global->exit();
+}
+
+static __initconst int (* const initfn[])(void) = {
+ i915_global_active_init,
+ i915_global_context_init,
+ i915_global_gem_context_init,
+ i915_global_objects_init,
+ i915_global_request_init,
+ i915_global_scheduler_init,
+ i915_global_vma_init,
+};
+
+int __init i915_globals_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(initfn); i++) {
+ int err;
+
+ err = initfn[i]();
+ if (err) {
+ __i915_globals_cleanup();
+ return err;
+ }
+ }
+
+ INIT_RCU_WORK(&park.work, __i915_globals_park);
+ return 0;
+}
+
+void i915_globals_park(void)
+{
+ /*
+ * Defer shrinking the global slab caches (and other work) until
+ * after a RCU grace period has completed with no activity. This
+ * is to try and reduce the latency impact on the consumers caused
+ * by us shrinking the caches the same time as they are trying to
+ * allocate, with the assumption being that if we idle long enough
+ * for an RCU grace period to elapse since the last use, it is likely
+ * to be longer until we need the caches again.
+ */
+ if (!atomic_dec_and_test(&active))
+ return;
+
+ park.epoch = atomic_inc_return(&epoch);
+ queue_rcu_work(system_wq, &park.work);
+}
+
+void i915_globals_unpark(void)
+{
+ atomic_inc(&epoch);
+ atomic_inc(&active);
+}
+
+void __exit i915_globals_exit(void)
+{
+ /* Flush any residual park_work */
+ atomic_inc(&epoch);
+ flush_rcu_work(&park.work);
+
+ __i915_globals_cleanup();
+
+ /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
+ rcu_barrier();
+}
diff --git a/drivers/gpu/drm/i915/i915_globals.h b/drivers/gpu/drm/i915/i915_globals.h
new file mode 100644
index 000000000000..04c1ce107fc0
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_globals.h
@@ -0,0 +1,35 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef _I915_GLOBALS_H_
+#define _I915_GLOBALS_H_
+
+typedef void (*i915_global_func_t)(void);
+
+struct i915_global {
+ struct list_head link;
+
+ i915_global_func_t shrink;
+ i915_global_func_t exit;
+};
+
+void i915_global_register(struct i915_global *global);
+
+int i915_globals_init(void);
+void i915_globals_park(void);
+void i915_globals_unpark(void);
+void i915_globals_exit(void);
+
+/* constructors */
+int i915_global_active_init(void);
+int i915_global_context_init(void);
+int i915_global_gem_context_init(void);
+int i915_global_objects_init(void);
+int i915_global_request_init(void);
+int i915_global_scheduler_init(void);
+int i915_global_vma_init(void);
+
+#endif /* _I915_GLOBALS_H_ */
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index aa6791255252..f51ff683dd2e 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -380,19 +380,16 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
err_printf(m, "%s [%d]:\n", name, count);
while (count--) {
- err_printf(m, " %08x_%08x %8u %02x %02x %02x",
+ err_printf(m, " %08x_%08x %8u %02x %02x",
upper_32_bits(err->gtt_offset),
lower_32_bits(err->gtt_offset),
err->size,
err->read_domains,
- err->write_domain,
- err->wseqno);
+ err->write_domain);
err_puts(m, tiling_flag(err->tiling));
err_puts(m, dirty_flag(err->dirty));
err_puts(m, purgeable_flag(err->purgeable));
err_puts(m, err->userptr ? " userptr" : "");
- err_puts(m, err->engine != -1 ? " " : "");
- err_puts(m, engine_name(m->i915, err->engine));
err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
if (err->name)
@@ -414,7 +411,7 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
err_printf(m, " INSTDONE: 0x%08x\n",
ee->instdone.instdone);
- if (ee->engine_id != RCS || INTEL_GEN(m->i915) <= 3)
+ if (ee->engine_id != RCS0 || INTEL_GEN(m->i915) <= 3)
return;
err_printf(m, " SC_INSTDONE: 0x%08x\n",
@@ -434,11 +431,6 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
ee->instdone.row[slice][subslice]);
}
-static const char *bannable(const struct drm_i915_error_context *ctx)
-{
- return ctx->bannable ? "" : " (unbannable)";
-}
-
static void error_print_request(struct drm_i915_error_state_buf *m,
const char *prefix,
const struct drm_i915_error_request *erq,
@@ -447,9 +439,8 @@ static void error_print_request(struct drm_i915_error_state_buf *m,
if (!erq->seqno)
return;
- err_printf(m, "%s pid %d, ban score %d, seqno %8x:%08x%s%s, prio %d, emitted %dms, start %08x, head %08x, tail %08x\n",
- prefix, erq->pid, erq->ban_score,
- erq->context, erq->seqno,
+ err_printf(m, "%s pid %d, seqno %8x:%08x%s%s, prio %d, emitted %dms, start %08x, head %08x, tail %08x\n",
+ prefix, erq->pid, erq->context, erq->seqno,
test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
&erq->flags) ? "!" : "",
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
@@ -463,10 +454,9 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
const char *header,
const struct drm_i915_error_context *ctx)
{
- err_printf(m, "%s%s[%d] user_handle %d hw_id %d, prio %d, ban score %d%s guilty %d active %d\n",
- header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id,
- ctx->sched_attr.priority, ctx->ban_score, bannable(ctx),
- ctx->guilty, ctx->active);
+ err_printf(m, "%s%s[%d] hw_id %d, prio %d, guilty %d active %d\n",
+ header, ctx->comm, ctx->pid, ctx->hw_id,
+ ctx->sched_attr.priority, ctx->guilty, ctx->active);
}
static void error_print_engine(struct drm_i915_error_state_buf *m,
@@ -512,13 +502,6 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
if (INTEL_GEN(m->i915) >= 6) {
err_printf(m, " RC PSMI: 0x%08x\n", ee->rc_psmi);
err_printf(m, " FAULT_REG: 0x%08x\n", ee->fault_reg);
- err_printf(m, " SYNC_0: 0x%08x\n",
- ee->semaphore_mboxes[0]);
- err_printf(m, " SYNC_1: 0x%08x\n",
- ee->semaphore_mboxes[1]);
- if (HAS_VEBOX(m->i915))
- err_printf(m, " SYNC_2: 0x%08x\n",
- ee->semaphore_mboxes[2]);
}
if (HAS_PPGTT(m->i915)) {
err_printf(m, " GFX_MODE: 0x%08x\n", ee->vm_info.gfx_mode);
@@ -533,8 +516,6 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
ee->vm_info.pp_dir_base);
}
}
- err_printf(m, " seqno: 0x%08x\n", ee->seqno);
- err_printf(m, " last_seqno: 0x%08x\n", ee->last_seqno);
err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head);
err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail);
err_printf(m, " hangcheck timestamp: %dms (%lu%s)\n",
@@ -688,16 +669,17 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
if (!error->engine[i].context.pid)
continue;
- err_printf(m, "Active process (on ring %s): %s [%d], score %d%s\n",
+ err_printf(m, "Active process (on ring %s): %s [%d]\n",
engine_name(m->i915, i),
error->engine[i].context.comm,
- error->engine[i].context.pid,
- error->engine[i].context.ban_score,
- bannable(&error->engine[i].context));
+ error->engine[i].context.pid);
}
err_printf(m, "Reset count: %u\n", error->reset_count);
err_printf(m, "Suspend count: %u\n", error->suspend_count);
err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
+ err_printf(m, "Subplatform: 0x%x\n",
+ intel_subplatform(&error->runtime_info,
+ error->device_info.platform));
err_print_pciid(m, m->i915);
err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
@@ -779,13 +761,9 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
if (obj) {
err_puts(m, m->i915->engine[i]->name);
if (ee->context.pid)
- err_printf(m, " (submitted by %s [%d], ctx %d [%d], score %d%s)",
+ err_printf(m, " (submitted by %s [%d])",
ee->context.comm,
- ee->context.pid,
- ee->context.handle,
- ee->context.hw_id,
- ee->context.ban_score,
- bannable(&ee->context));
+ ee->context.pid);
err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
upper_32_bits(obj->gtt_offset),
lower_32_bits(obj->gtt_offset));
@@ -1061,27 +1039,6 @@ i915_error_object_create(struct drm_i915_private *i915,
return dst;
}
-/* The error capture is special as tries to run underneath the normal
- * locking rules - so we use the raw version of the i915_active_request lookup.
- */
-static inline u32
-__active_get_seqno(struct i915_active_request *active)
-{
- struct i915_request *request;
-
- request = __i915_active_request_peek(active);
- return request ? request->global_seqno : 0;
-}
-
-static inline int
-__active_get_engine_id(struct i915_active_request *active)
-{
- struct i915_request *request;
-
- request = __i915_active_request_peek(active);
- return request ? request->engine->id : -1;
-}
-
static void capture_bo(struct drm_i915_error_buffer *err,
struct i915_vma *vma)
{
@@ -1090,9 +1047,6 @@ static void capture_bo(struct drm_i915_error_buffer *err,
err->size = obj->base.size;
err->name = obj->base.name;
- err->wseqno = __active_get_seqno(&obj->frontbuffer_write);
- err->engine = __active_get_engine_id(&obj->frontbuffer_write);
-
err->gtt_offset = vma->node.start;
err->read_domains = obj->read_domains;
err->write_domain = obj->write_domain;
@@ -1142,7 +1096,7 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err,
* It's only a small step better than a random number in its current form.
*/
static u32 i915_error_generate_code(struct i915_gpu_state *error,
- unsigned long engine_mask)
+ intel_engine_mask_t engine_mask)
{
/*
* IPEHR would be an ideal way to detect errors, as it's the gross
@@ -1178,18 +1132,6 @@ static void gem_record_fences(struct i915_gpu_state *error)
error->nfence = i;
}
-static void gen6_record_semaphore_state(struct intel_engine_cs *engine,
- struct drm_i915_error_engine *ee)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- ee->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
- ee->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
- if (HAS_VEBOX(dev_priv))
- ee->semaphore_mboxes[2] =
- I915_READ(RING_SYNC_2(engine->mmio_base));
-}
-
static void error_record_engine_registers(struct i915_gpu_state *error,
struct intel_engine_cs *engine,
struct drm_i915_error_engine *ee)
@@ -1197,44 +1139,40 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
struct drm_i915_private *dev_priv = engine->i915;
if (INTEL_GEN(dev_priv) >= 6) {
- ee->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
- if (INTEL_GEN(dev_priv) >= 8) {
+ ee->rc_psmi = ENGINE_READ(engine, RING_PSMI_CTL);
+ if (INTEL_GEN(dev_priv) >= 8)
ee->fault_reg = I915_READ(GEN8_RING_FAULT_REG);
- } else {
- gen6_record_semaphore_state(engine, ee);
+ else
ee->fault_reg = I915_READ(RING_FAULT_REG(engine));
- }
}
if (INTEL_GEN(dev_priv) >= 4) {
- ee->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
- ee->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
- ee->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
- ee->instps = I915_READ(RING_INSTPS(engine->mmio_base));
- ee->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
+ ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
+ ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
+ ee->ipehr = ENGINE_READ(engine, RING_IPEHR);
+ ee->instps = ENGINE_READ(engine, RING_INSTPS);
+ ee->bbaddr = ENGINE_READ(engine, RING_BBADDR);
if (INTEL_GEN(dev_priv) >= 8) {
- ee->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
- ee->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
+ ee->faddr |= (u64)ENGINE_READ(engine, RING_DMA_FADD_UDW) << 32;
+ ee->bbaddr |= (u64)ENGINE_READ(engine, RING_BBADDR_UDW) << 32;
}
- ee->bbstate = I915_READ(RING_BBSTATE(engine->mmio_base));
+ ee->bbstate = ENGINE_READ(engine, RING_BBSTATE);
} else {
- ee->faddr = I915_READ(DMA_FADD_I8XX);
- ee->ipeir = I915_READ(IPEIR);
- ee->ipehr = I915_READ(IPEHR);
+ ee->faddr = ENGINE_READ(engine, DMA_FADD_I8XX);
+ ee->ipeir = ENGINE_READ(engine, IPEIR);
+ ee->ipehr = ENGINE_READ(engine, IPEHR);
}
intel_engine_get_instdone(engine, &ee->instdone);
- ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
+ ee->instpm = ENGINE_READ(engine, RING_INSTPM);
ee->acthd = intel_engine_get_active_head(engine);
- ee->seqno = intel_engine_get_seqno(engine);
- ee->last_seqno = intel_engine_last_submit(engine);
- ee->start = I915_READ_START(engine);
- ee->head = I915_READ_HEAD(engine);
- ee->tail = I915_READ_TAIL(engine);
- ee->ctl = I915_READ_CTL(engine);
+ ee->start = ENGINE_READ(engine, RING_START);
+ ee->head = ENGINE_READ(engine, RING_HEAD);
+ ee->tail = ENGINE_READ(engine, RING_TAIL);
+ ee->ctl = ENGINE_READ(engine, RING_CTL);
if (INTEL_GEN(dev_priv) > 2)
- ee->mode = I915_READ_MODE(engine);
+ ee->mode = ENGINE_READ(engine, RING_MI_MODE);
if (!HWS_NEEDS_PHYSICAL(dev_priv)) {
i915_reg_t mmio;
@@ -1242,16 +1180,17 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
if (IS_GEN(dev_priv, 7)) {
switch (engine->id) {
default:
- case RCS:
+ MISSING_CASE(engine->id);
+ case RCS0:
mmio = RENDER_HWS_PGA_GEN7;
break;
- case BCS:
+ case BCS0:
mmio = BLT_HWS_PGA_GEN7;
break;
- case VCS:
+ case VCS0:
mmio = BSD_HWS_PGA_GEN7;
break;
- case VECS:
+ case VECS0:
mmio = VEBOX_HWS_PGA_GEN7;
break;
}
@@ -1276,20 +1215,23 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
ee->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
- if (IS_GEN(dev_priv, 6))
+ if (IS_GEN(dev_priv, 6)) {
ee->vm_info.pp_dir_base =
- I915_READ(RING_PP_DIR_BASE_READ(engine));
- else if (IS_GEN(dev_priv, 7))
+ ENGINE_READ(engine, RING_PP_DIR_BASE_READ);
+ } else if (IS_GEN(dev_priv, 7)) {
ee->vm_info.pp_dir_base =
- I915_READ(RING_PP_DIR_BASE(engine));
- else if (INTEL_GEN(dev_priv) >= 8)
+ ENGINE_READ(engine, RING_PP_DIR_BASE);
+ } else if (INTEL_GEN(dev_priv) >= 8) {
+ u32 base = engine->mmio_base;
+
for (i = 0; i < 4; i++) {
ee->vm_info.pdp[i] =
- I915_READ(GEN8_RING_PDP_UDW(engine, i));
+ I915_READ(GEN8_RING_PDP_UDW(base, i));
ee->vm_info.pdp[i] <<= 32;
ee->vm_info.pdp[i] |=
- I915_READ(GEN8_RING_PDP_LDW(engine, i));
+ I915_READ(GEN8_RING_PDP_LDW(base, i));
}
+ }
}
}
@@ -1299,10 +1241,9 @@ static void record_request(struct i915_request *request,
struct i915_gem_context *ctx = request->gem_context;
erq->flags = request->fence.flags;
- erq->context = ctx->hw_id;
+ erq->context = request->fence.context;
+ erq->seqno = request->fence.seqno;
erq->sched_attr = request->sched.attr;
- erq->ban_score = atomic_read(&ctx->ban_score);
- erq->seqno = request->global_seqno;
erq->jiffies = request->emitted_jiffies;
erq->start = i915_ggtt_offset(request->ring->vma);
erq->head = request->head;
@@ -1393,11 +1334,8 @@ static void record_context(struct drm_i915_error_context *e,
rcu_read_unlock();
}
- e->handle = ctx->user_handle;
e->hw_id = ctx->hw_id;
e->sched_attr = ctx->sched;
- e->ban_score = atomic_read(&ctx->ban_score);
- e->bannable = i915_gem_context_is_bannable(ctx);
e->guilty = atomic_read(&ctx->guilty_count);
e->active = atomic_read(&ctx->active_count);
}
@@ -1476,7 +1414,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
error_record_engine_registers(error, engine, ee);
error_record_engine_execlists(engine, ee);
- request = i915_gem_find_active_request(engine);
+ request = intel_engine_find_active_request(engine);
if (request) {
struct i915_gem_context *ctx = request->gem_context;
struct intel_ring *ring;
@@ -1669,7 +1607,7 @@ static void capture_reg_state(struct i915_gpu_state *error)
}
if (INTEL_GEN(dev_priv) >= 5)
- error->ccid = I915_READ(CCID);
+ error->ccid = I915_READ(CCID(RENDER_RING_BASE));
/* 3: Feature specific registers */
if (IS_GEN_RANGE(dev_priv, 6, 7)) {
@@ -1697,16 +1635,17 @@ static void capture_reg_state(struct i915_gpu_state *error)
error->gtier[0] = I915_READ(GTIER);
error->ngtier = 1;
} else if (IS_GEN(dev_priv, 2)) {
- error->ier = I915_READ16(IER);
+ error->ier = I915_READ16(GEN2_IER);
} else if (!IS_VALLEYVIEW(dev_priv)) {
- error->ier = I915_READ(IER);
+ error->ier = I915_READ(GEN2_IER);
}
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
}
static const char *
-error_msg(struct i915_gpu_state *error, unsigned long engines, const char *msg)
+error_msg(struct i915_gpu_state *error,
+ intel_engine_mask_t engines, const char *msg)
{
int len;
int i;
@@ -1716,7 +1655,7 @@ error_msg(struct i915_gpu_state *error, unsigned long engines, const char *msg)
engines &= ~BIT(i);
len = scnprintf(error->error_msg, sizeof(error->error_msg),
- "GPU HANG: ecode %d:%lx:0x%08x",
+ "GPU HANG: ecode %d:%x:0x%08x",
INTEL_GEN(error->i915), engines,
i915_error_generate_code(error, engines));
if (engines) {
@@ -1855,7 +1794,7 @@ i915_capture_gpu_state(struct drm_i915_private *i915)
* to pick up.
*/
void i915_capture_error_state(struct drm_i915_private *i915,
- unsigned long engine_mask,
+ intel_engine_mask_t engine_mask,
const char *msg)
{
static bool warned;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 53b1f22dd365..5dc761e85d9d 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -94,8 +94,6 @@ struct i915_gpu_state {
u32 cpu_ring_head;
u32 cpu_ring_tail;
- u32 last_seqno;
-
/* Register state */
u32 start;
u32 tail;
@@ -108,24 +106,19 @@ struct i915_gpu_state {
u32 bbstate;
u32 instpm;
u32 instps;
- u32 seqno;
u64 bbaddr;
u64 acthd;
u32 fault_reg;
u64 faddr;
u32 rc_psmi; /* sleep state */
- u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
struct intel_instdone instdone;
struct drm_i915_error_context {
char comm[TASK_COMM_LEN];
pid_t pid;
- u32 handle;
u32 hw_id;
- int ban_score;
int active;
int guilty;
- bool bannable;
struct i915_sched_attr sched_attr;
} context;
@@ -149,7 +142,6 @@ struct i915_gpu_state {
long jiffies;
pid_t pid;
u32 context;
- int ban_score;
u32 seqno;
u32 start;
u32 head;
@@ -170,7 +162,6 @@ struct i915_gpu_state {
struct drm_i915_error_buffer {
u32 size;
u32 name;
- u32 wseqno;
u64 gtt_offset;
u32 read_domains;
u32 write_domain;
@@ -179,7 +170,6 @@ struct i915_gpu_state {
u32 dirty:1;
u32 purgeable:1;
u32 userptr:1;
- s32 engine:4;
u32 cache_level:3;
} *active_bo[I915_NUM_ENGINES], *pinned_bo;
u32 active_bo_count[I915_NUM_ENGINES], pinned_bo_count;
@@ -205,38 +195,12 @@ struct i915_gpu_error {
atomic_t pending_fb_pin;
/**
- * State variable controlling the reset flow and count
- *
- * This is a counter which gets incremented when reset is triggered,
- *
- * Before the reset commences, the I915_RESET_BACKOFF bit is set
- * meaning that any waiters holding onto the struct_mutex should
- * relinquish the lock immediately in order for the reset to start.
- *
- * If reset is not completed successfully, the I915_WEDGE bit is
- * set meaning that hardware is terminally sour and there is no
- * recovery. All waiters on the reset_queue will be woken when
- * that happens.
- *
- * This counter is used by the wait_seqno code to notice that reset
- * event happened and it needs to restart the entire ioctl (since most
- * likely the seqno it waited for won't ever signal anytime soon).
- *
- * This is important for lock-free wait paths, where no contended lock
- * naturally enforces the correct ordering between the bail-out of the
- * waiter and the gpu reset work code.
- */
- unsigned long reset_count;
-
- /**
* flags: Control various stages of the GPU reset
*
- * #I915_RESET_BACKOFF - When we start a reset, we want to stop any
- * other users acquiring the struct_mutex. To do this we set the
- * #I915_RESET_BACKOFF bit in the error flags when we detect a reset
- * and then check for that bit before acquiring the struct_mutex (in
- * i915_mutex_lock_interruptible()?). I915_RESET_BACKOFF serves a
- * secondary role in preventing two concurrent global reset attempts.
+ * #I915_RESET_BACKOFF - When we start a global reset, we need to
+ * serialise with any other users attempting to do the same, and
+ * any global resources that may be clobber by the reset (such as
+ * FENCE registers).
*
* #I915_RESET_ENGINE[num_engines] - Since the driver doesn't need to
* acquire the struct_mutex to reset an engine, we need an explicit
@@ -255,6 +219,9 @@ struct i915_gpu_error {
#define I915_RESET_ENGINE 2
#define I915_WEDGED (BITS_PER_LONG - 1)
+ /** Number of times the device has been reset (global) */
+ u32 reset_count;
+
/** Number of times an engine has been reset */
u32 reset_engine_count[I915_NUM_ENGINES];
@@ -272,6 +239,8 @@ struct i915_gpu_error {
*/
wait_queue_head_t reset_queue;
+ struct srcu_struct reset_backoff_srcu;
+
struct i915_gpu_restart *restart;
};
@@ -294,7 +263,7 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
void i915_capture_error_state(struct drm_i915_private *dev_priv,
- unsigned long engine_mask,
+ intel_engine_mask_t engine_mask,
const char *error_msg);
static inline struct i915_gpu_state *
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 441d2674b272..b92cfd69134b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -28,15 +28,19 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/sysrq.h>
-#include <linux/slab.h>
#include <linux/circ_buf.h>
-#include <drm/drm_irq.h>
+#include <linux/cpuidle.h>
+#include <linux/slab.h>
+#include <linux/sysrq.h>
+
#include <drm/drm_drv.h>
+#include <drm/drm_irq.h>
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include "intel_psr.h"
/**
* DOC: interrupt handling
@@ -132,92 +136,120 @@ static const u32 hpd_icp[HPD_NUM_PINS] = {
[HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP
};
-/* IIR can theoretically queue up two events. Be paranoid. */
-#define GEN8_IRQ_RESET_NDX(type, which) do { \
- I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
- POSTING_READ(GEN8_##type##_IMR(which)); \
- I915_WRITE(GEN8_##type##_IER(which), 0); \
- I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
- POSTING_READ(GEN8_##type##_IIR(which)); \
- I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
- POSTING_READ(GEN8_##type##_IIR(which)); \
-} while (0)
-
-#define GEN3_IRQ_RESET(type) do { \
- I915_WRITE(type##IMR, 0xffffffff); \
- POSTING_READ(type##IMR); \
- I915_WRITE(type##IER, 0); \
- I915_WRITE(type##IIR, 0xffffffff); \
- POSTING_READ(type##IIR); \
- I915_WRITE(type##IIR, 0xffffffff); \
- POSTING_READ(type##IIR); \
-} while (0)
-
-#define GEN2_IRQ_RESET(type) do { \
- I915_WRITE16(type##IMR, 0xffff); \
- POSTING_READ16(type##IMR); \
- I915_WRITE16(type##IER, 0); \
- I915_WRITE16(type##IIR, 0xffff); \
- POSTING_READ16(type##IIR); \
- I915_WRITE16(type##IIR, 0xffff); \
- POSTING_READ16(type##IIR); \
-} while (0)
+static void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
+ i915_reg_t iir, i915_reg_t ier)
+{
+ intel_uncore_write(uncore, imr, 0xffffffff);
+ intel_uncore_posting_read(uncore, imr);
+
+ intel_uncore_write(uncore, ier, 0);
+
+ /* IIR can theoretically queue up two events. Be paranoid. */
+ intel_uncore_write(uncore, iir, 0xffffffff);
+ intel_uncore_posting_read(uncore, iir);
+ intel_uncore_write(uncore, iir, 0xffffffff);
+ intel_uncore_posting_read(uncore, iir);
+}
+
+static void gen2_irq_reset(struct intel_uncore *uncore)
+{
+ intel_uncore_write16(uncore, GEN2_IMR, 0xffff);
+ intel_uncore_posting_read16(uncore, GEN2_IMR);
+
+ intel_uncore_write16(uncore, GEN2_IER, 0);
+
+ /* IIR can theoretically queue up two events. Be paranoid. */
+ intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
+ intel_uncore_posting_read16(uncore, GEN2_IIR);
+ intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
+ intel_uncore_posting_read16(uncore, GEN2_IIR);
+}
+
+#define GEN8_IRQ_RESET_NDX(uncore, type, which) \
+({ \
+ unsigned int which_ = which; \
+ gen3_irq_reset((uncore), GEN8_##type##_IMR(which_), \
+ GEN8_##type##_IIR(which_), GEN8_##type##_IER(which_)); \
+})
+
+#define GEN3_IRQ_RESET(uncore, type) \
+ gen3_irq_reset((uncore), type##IMR, type##IIR, type##IER)
+
+#define GEN2_IRQ_RESET(uncore) \
+ gen2_irq_reset(uncore)
/*
* We should clear IMR at preinstall/uninstall, and just check at postinstall.
*/
-static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv,
- i915_reg_t reg)
+static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
{
- u32 val = I915_READ(reg);
+ u32 val = intel_uncore_read(uncore, reg);
if (val == 0)
return;
WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
i915_mmio_reg_offset(reg), val);
- I915_WRITE(reg, 0xffffffff);
- POSTING_READ(reg);
- I915_WRITE(reg, 0xffffffff);
- POSTING_READ(reg);
+ intel_uncore_write(uncore, reg, 0xffffffff);
+ intel_uncore_posting_read(uncore, reg);
+ intel_uncore_write(uncore, reg, 0xffffffff);
+ intel_uncore_posting_read(uncore, reg);
}
-static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv,
- i915_reg_t reg)
+static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
{
- u16 val = I915_READ16(reg);
+ u16 val = intel_uncore_read16(uncore, GEN2_IIR);
if (val == 0)
return;
WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
- i915_mmio_reg_offset(reg), val);
- I915_WRITE16(reg, 0xffff);
- POSTING_READ16(reg);
- I915_WRITE16(reg, 0xffff);
- POSTING_READ16(reg);
-}
-
-#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
- gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
- I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
- I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
- POSTING_READ(GEN8_##type##_IMR(which)); \
-} while (0)
-
-#define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \
- gen3_assert_iir_is_zero(dev_priv, type##IIR); \
- I915_WRITE(type##IER, (ier_val)); \
- I915_WRITE(type##IMR, (imr_val)); \
- POSTING_READ(type##IMR); \
-} while (0)
-
-#define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \
- gen2_assert_iir_is_zero(dev_priv, type##IIR); \
- I915_WRITE16(type##IER, (ier_val)); \
- I915_WRITE16(type##IMR, (imr_val)); \
- POSTING_READ16(type##IMR); \
-} while (0)
+ i915_mmio_reg_offset(GEN2_IIR), val);
+ intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
+ intel_uncore_posting_read16(uncore, GEN2_IIR);
+ intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
+ intel_uncore_posting_read16(uncore, GEN2_IIR);
+}
+
+static void gen3_irq_init(struct intel_uncore *uncore,
+ i915_reg_t imr, u32 imr_val,
+ i915_reg_t ier, u32 ier_val,
+ i915_reg_t iir)
+{
+ gen3_assert_iir_is_zero(uncore, iir);
+
+ intel_uncore_write(uncore, ier, ier_val);
+ intel_uncore_write(uncore, imr, imr_val);
+ intel_uncore_posting_read(uncore, imr);
+}
+
+static void gen2_irq_init(struct intel_uncore *uncore,
+ u32 imr_val, u32 ier_val)
+{
+ gen2_assert_iir_is_zero(uncore);
+
+ intel_uncore_write16(uncore, GEN2_IER, ier_val);
+ intel_uncore_write16(uncore, GEN2_IMR, imr_val);
+ intel_uncore_posting_read16(uncore, GEN2_IMR);
+}
+
+#define GEN8_IRQ_INIT_NDX(uncore, type, which, imr_val, ier_val) \
+({ \
+ unsigned int which_ = which; \
+ gen3_irq_init((uncore), \
+ GEN8_##type##_IMR(which_), imr_val, \
+ GEN8_##type##_IER(which_), ier_val, \
+ GEN8_##type##_IIR(which_)); \
+})
+
+#define GEN3_IRQ_INIT(uncore, type, imr_val, ier_val) \
+ gen3_irq_init((uncore), \
+ type##IMR, imr_val, \
+ type##IER, ier_val, \
+ type##IIR)
+
+#define GEN2_IRQ_INIT(uncore, imr_val, ier_val) \
+ gen2_irq_init((uncore), imr_val, ier_val)
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
@@ -268,7 +300,7 @@ static bool gen11_reset_one_iir(struct drm_i915_private * const i915,
const unsigned int bank,
const unsigned int bit)
{
- void __iomem * const regs = i915->regs;
+ void __iomem * const regs = i915->uncore.regs;
u32 dw;
lockdep_assert_held(&i915->irq_lock);
@@ -365,24 +397,41 @@ static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
}
-static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
+static void write_pm_imr(struct drm_i915_private *dev_priv)
{
- if (INTEL_GEN(dev_priv) >= 11)
- return GEN11_GPM_WGBOXPERF_INTR_MASK;
- else if (INTEL_GEN(dev_priv) >= 8)
- return GEN8_GT_IMR(2);
- else
- return GEN6_PMIMR;
+ i915_reg_t reg;
+ u32 mask = dev_priv->pm_imr;
+
+ if (INTEL_GEN(dev_priv) >= 11) {
+ reg = GEN11_GPM_WGBOXPERF_INTR_MASK;
+ /* pm is in upper half */
+ mask = mask << 16;
+ } else if (INTEL_GEN(dev_priv) >= 8) {
+ reg = GEN8_GT_IMR(2);
+ } else {
+ reg = GEN6_PMIMR;
+ }
+
+ I915_WRITE(reg, mask);
+ POSTING_READ(reg);
}
-static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
+static void write_pm_ier(struct drm_i915_private *dev_priv)
{
- if (INTEL_GEN(dev_priv) >= 11)
- return GEN11_GPM_WGBOXPERF_INTR_ENABLE;
- else if (INTEL_GEN(dev_priv) >= 8)
- return GEN8_GT_IER(2);
- else
- return GEN6_PMIER;
+ i915_reg_t reg;
+ u32 mask = dev_priv->pm_ier;
+
+ if (INTEL_GEN(dev_priv) >= 11) {
+ reg = GEN11_GPM_WGBOXPERF_INTR_ENABLE;
+ /* pm is in upper half */
+ mask = mask << 16;
+ } else if (INTEL_GEN(dev_priv) >= 8) {
+ reg = GEN8_GT_IER(2);
+ } else {
+ reg = GEN6_PMIER;
+ }
+
+ I915_WRITE(reg, mask);
}
/**
@@ -407,8 +456,7 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
if (new_val != dev_priv->pm_imr) {
dev_priv->pm_imr = new_val;
- I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr);
- POSTING_READ(gen6_pm_imr(dev_priv));
+ write_pm_imr(dev_priv);
}
}
@@ -449,7 +497,7 @@ static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mas
lockdep_assert_held(&dev_priv->irq_lock);
dev_priv->pm_ier |= enable_mask;
- I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
+ write_pm_ier(dev_priv);
gen6_unmask_pm_irq(dev_priv, enable_mask);
/* unmask_pm_irq provides an implicit barrier (POSTING_READ) */
}
@@ -460,7 +508,7 @@ static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_m
dev_priv->pm_ier &= ~disable_mask;
__gen6_mask_pm_irq(dev_priv, disable_mask);
- I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
+ write_pm_ier(dev_priv);
/* though a barrier is missing here, but don't really need a one */
}
@@ -748,13 +796,21 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
POSTING_READ(reg);
}
+static bool i915_has_asle(struct drm_i915_private *dev_priv)
+{
+ if (!dev_priv->opregion.asle)
+ return false;
+
+ return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
+}
+
/**
* i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
* @dev_priv: i915 device private
*/
static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
{
- if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
+ if (!i915_has_asle(dev_priv))
return;
spin_lock_irq(&dev_priv->irq_lock);
@@ -1288,6 +1344,18 @@ static void gen6_pm_rps_work(struct work_struct *work)
rps->last_adj = adj;
+ /*
+ * Limit deboosting and boosting to keep ourselves at the extremes
+ * when in the respective power modes (i.e. slowly decrease frequencies
+ * while in the HIGH_POWER zone and slowly increase frequencies while
+ * in the LOW_POWER zone). On idle, we will hit the timeout and drop
+ * to the next level quickly, and conversely if busy we expect to
+ * hit a waitboost and rapidly switch into max power.
+ */
+ if ((adj < 0 && rps->power.mode == HIGH_POWER) ||
+ (adj > 0 && rps->power.mode == LOW_POWER))
+ rps->last_adj = 0;
+
/* sysfs frequency interfaces may have snuck in while servicing the
* interrupt
*/
@@ -1415,20 +1483,20 @@ static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
if (gt_iir & ILK_BSD_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
}
static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
u32 gt_iir)
{
if (gt_iir & GT_RENDER_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
if (gt_iir & GT_BSD_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
if (gt_iir & GT_BLT_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[BCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[BCS0]);
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
@@ -1449,7 +1517,7 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
if (iir & GT_RENDER_USER_INTERRUPT) {
intel_engine_breadcrumbs_irq(engine);
- tasklet |= USES_GUC_SUBMISSION(engine->i915);
+ tasklet |= intel_engine_needs_breadcrumb_tasklet(engine);
}
if (tasklet)
@@ -1459,12 +1527,12 @@ gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
static void gen8_gt_irq_ack(struct drm_i915_private *i915,
u32 master_ctl, u32 gt_iir[4])
{
- void __iomem * const regs = i915->regs;
+ void __iomem * const regs = i915->uncore.regs;
#define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \
GEN8_GT_BCS_IRQ | \
+ GEN8_GT_VCS0_IRQ | \
GEN8_GT_VCS1_IRQ | \
- GEN8_GT_VCS2_IRQ | \
GEN8_GT_VECS_IRQ | \
GEN8_GT_PM_IRQ | \
GEN8_GT_GUC_IRQ)
@@ -1475,7 +1543,7 @@ static void gen8_gt_irq_ack(struct drm_i915_private *i915,
raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
}
- if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
+ if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
if (likely(gt_iir[1]))
raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
@@ -1498,21 +1566,21 @@ static void gen8_gt_irq_handler(struct drm_i915_private *i915,
u32 master_ctl, u32 gt_iir[4])
{
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
- gen8_cs_irq_handler(i915->engine[RCS],
+ gen8_cs_irq_handler(i915->engine[RCS0],
gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
- gen8_cs_irq_handler(i915->engine[BCS],
+ gen8_cs_irq_handler(i915->engine[BCS0],
gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
}
- if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
- gen8_cs_irq_handler(i915->engine[VCS],
+ if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
+ gen8_cs_irq_handler(i915->engine[VCS0],
+ gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT);
+ gen8_cs_irq_handler(i915->engine[VCS1],
gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
- gen8_cs_irq_handler(i915->engine[VCS2],
- gt_iir[1] >> GEN8_VCS2_IRQ_SHIFT);
}
if (master_ctl & GEN8_GT_VECS_IRQ) {
- gen8_cs_irq_handler(i915->engine[VECS],
+ gen8_cs_irq_handler(i915->engine[VECS0],
gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
}
@@ -1693,7 +1761,9 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
{
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- u32 crcs[5];
+ u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
+
+ trace_intel_pipe_crc(crtc, crcs);
spin_lock(&pipe_crc->lock);
/*
@@ -1712,11 +1782,6 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
}
spin_unlock(&pipe_crc->lock);
- crcs[0] = crc0;
- crcs[1] = crc1;
- crcs[2] = crc2;
- crcs[3] = crc3;
- crcs[4] = crc4;
drm_crtc_add_crc_entry(&crtc->base, true,
drm_crtc_accurate_vblank_count(&crtc->base),
crcs);
@@ -1775,6 +1840,25 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
/* The RPS events need forcewake, so we add them to a work queue and mask their
* IMR bits until the work is done. Other interrupts can be processed without
* the work queue. */
+static void gen11_rps_irq_handler(struct drm_i915_private *i915, u32 pm_iir)
+{
+ struct intel_rps *rps = &i915->gt_pm.rps;
+ const u32 events = i915->pm_rps_events & pm_iir;
+
+ lockdep_assert_held(&i915->irq_lock);
+
+ if (unlikely(!events))
+ return;
+
+ gen6_mask_pm_irq(i915, events);
+
+ if (!rps->interrupts_enabled)
+ return;
+
+ rps->pm_iir |= events;
+ schedule_work(&rps->work);
+}
+
static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
@@ -1792,13 +1876,11 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
if (INTEL_GEN(dev_priv) >= 8)
return;
- if (HAS_VEBOX(dev_priv)) {
- if (pm_iir & PM_VEBOX_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[VECS]);
+ if (pm_iir & PM_VEBOX_USER_INTERRUPT)
+ intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]);
- if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
- DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
- }
+ if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
+ DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
}
static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir)
@@ -2667,6 +2749,25 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
}
+static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
+{
+ u32 mask = GEN8_AUX_CHANNEL_A;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ mask |= GEN9_AUX_CHANNEL_B |
+ GEN9_AUX_CHANNEL_C |
+ GEN9_AUX_CHANNEL_D;
+
+ if (IS_CNL_WITH_PORT_F(dev_priv))
+ mask |= CNL_AUX_CHANNEL_F;
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ mask |= ICL_AUX_CHANNEL_E |
+ CNL_AUX_CHANNEL_F;
+
+ return mask;
+}
+
static irqreturn_t
gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
{
@@ -2722,20 +2823,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(GEN8_DE_PORT_IIR, iir);
ret = IRQ_HANDLED;
- tmp_mask = GEN8_AUX_CHANNEL_A;
- if (INTEL_GEN(dev_priv) >= 9)
- tmp_mask |= GEN9_AUX_CHANNEL_B |
- GEN9_AUX_CHANNEL_C |
- GEN9_AUX_CHANNEL_D;
-
- if (INTEL_GEN(dev_priv) >= 11)
- tmp_mask |= ICL_AUX_CHANNEL_E;
-
- if (IS_CNL_WITH_PORT_F(dev_priv) ||
- INTEL_GEN(dev_priv) >= 11)
- tmp_mask |= CNL_AUX_CHANNEL_F;
-
- if (iir & tmp_mask) {
+ if (iir & gen8_de_port_aux_mask(dev_priv)) {
dp_aux_irq_handler(dev_priv);
found = true;
}
@@ -2816,11 +2904,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(SDEIIR, iir);
ret = IRQ_HANDLED;
- if (HAS_PCH_ICP(dev_priv))
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
icp_irq_handler(dev_priv, iir);
- else if (HAS_PCH_SPT(dev_priv) ||
- HAS_PCH_KBP(dev_priv) ||
- HAS_PCH_CNP(dev_priv))
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
spt_irq_handler(dev_priv, iir);
else
cpt_irq_handler(dev_priv, iir);
@@ -2857,7 +2943,7 @@ static inline void gen8_master_intr_enable(void __iomem * const regs)
static irqreturn_t gen8_irq_handler(int irq, void *arg)
{
struct drm_i915_private *dev_priv = to_i915(arg);
- void __iomem * const regs = dev_priv->regs;
+ void __iomem * const regs = dev_priv->uncore.regs;
u32 master_ctl;
u32 gt_iir[4];
@@ -2891,7 +2977,7 @@ static u32
gen11_gt_engine_identity(struct drm_i915_private * const i915,
const unsigned int bank, const unsigned int bit)
{
- void __iomem * const regs = i915->regs;
+ void __iomem * const regs = i915->uncore.regs;
u32 timeout_ts;
u32 ident;
@@ -2926,7 +3012,7 @@ gen11_other_irq_handler(struct drm_i915_private * const i915,
const u8 instance, const u16 iir)
{
if (instance == OTHER_GTPM_INSTANCE)
- return gen6_rps_irq_handler(i915, iir);
+ return gen11_rps_irq_handler(i915, iir);
WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
instance, iir);
@@ -2975,7 +3061,7 @@ static void
gen11_gt_bank_handler(struct drm_i915_private * const i915,
const unsigned int bank)
{
- void __iomem * const regs = i915->regs;
+ void __iomem * const regs = i915->uncore.regs;
unsigned long intr_dw;
unsigned int bit;
@@ -2983,14 +3069,8 @@ gen11_gt_bank_handler(struct drm_i915_private * const i915,
intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
- if (unlikely(!intr_dw)) {
- DRM_ERROR("GT_INTR_DW%u blank!\n", bank);
- return;
- }
-
for_each_set_bit(bit, &intr_dw, 32) {
- const u32 ident = gen11_gt_engine_identity(i915,
- bank, bit);
+ const u32 ident = gen11_gt_engine_identity(i915, bank, bit);
gen11_gt_identity_handler(i915, ident);
}
@@ -3018,7 +3098,7 @@ gen11_gt_irq_handler(struct drm_i915_private * const i915,
static u32
gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
{
- void __iomem * const regs = dev_priv->regs;
+ void __iomem * const regs = dev_priv->uncore.regs;
u32 iir;
if (!(master_ctl & GEN11_GU_MISC_IRQ))
@@ -3059,7 +3139,7 @@ static inline void gen11_master_intr_enable(void __iomem * const regs)
static irqreturn_t gen11_irq_handler(int irq, void *arg)
{
struct drm_i915_private * const i915 = to_i915(arg);
- void __iomem * const regs = i915->regs;
+ void __iomem * const regs = i915->uncore.regs;
u32 master_ctl;
u32 gu_misc_iir;
@@ -3112,6 +3192,16 @@ static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe)
return 0;
}
+static int i945gm_enable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ if (dev_priv->i945gm_vblank.enabled++ == 0)
+ schedule_work(&dev_priv->i945gm_vblank.work);
+
+ return i8xx_enable_vblank(dev, pipe);
+}
+
static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -3176,6 +3266,16 @@ static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
+static void i945gm_disable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ i8xx_disable_vblank(dev, pipe);
+
+ if (--dev_priv->i945gm_vblank.enabled == 0)
+ schedule_work(&dev_priv->i945gm_vblank.work);
+}
+
static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -3209,12 +3309,68 @@ static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
+static void i945gm_vblank_work_func(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, struct drm_i915_private, i945gm_vblank.work);
+
+ /*
+ * Vblank interrupts fail to wake up the device from C3,
+ * hence we want to prevent C3 usage while vblank interrupts
+ * are enabled.
+ */
+ pm_qos_update_request(&dev_priv->i945gm_vblank.pm_qos,
+ READ_ONCE(dev_priv->i945gm_vblank.enabled) ?
+ dev_priv->i945gm_vblank.c3_disable_latency :
+ PM_QOS_DEFAULT_VALUE);
+}
+
+static int cstate_disable_latency(const char *name)
+{
+ const struct cpuidle_driver *drv;
+ int i;
+
+ drv = cpuidle_get_driver();
+ if (!drv)
+ return 0;
+
+ for (i = 0; i < drv->state_count; i++) {
+ const struct cpuidle_state *state = &drv->states[i];
+
+ if (!strcmp(state->name, name))
+ return state->exit_latency ?
+ state->exit_latency - 1 : 0;
+ }
+
+ return 0;
+}
+
+static void i945gm_vblank_work_init(struct drm_i915_private *dev_priv)
+{
+ INIT_WORK(&dev_priv->i945gm_vblank.work,
+ i945gm_vblank_work_func);
+
+ dev_priv->i945gm_vblank.c3_disable_latency =
+ cstate_disable_latency("C3");
+ pm_qos_add_request(&dev_priv->i945gm_vblank.pm_qos,
+ PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+}
+
+static void i945gm_vblank_work_fini(struct drm_i915_private *dev_priv)
+{
+ cancel_work_sync(&dev_priv->i945gm_vblank.work);
+ pm_qos_remove_request(&dev_priv->i945gm_vblank.pm_qos);
+}
+
static void ibx_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
if (HAS_PCH_NOP(dev_priv))
return;
- GEN3_IRQ_RESET(SDE);
+ GEN3_IRQ_RESET(uncore, SDE);
if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv))
I915_WRITE(SERR_INT, 0xffffffff);
@@ -3242,13 +3398,17 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev)
static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv)
{
- GEN3_IRQ_RESET(GT);
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
+ GEN3_IRQ_RESET(uncore, GT);
if (INTEL_GEN(dev_priv) >= 6)
- GEN3_IRQ_RESET(GEN6_PM);
+ GEN3_IRQ_RESET(uncore, GEN6_PM);
}
static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
if (IS_CHERRYVIEW(dev_priv))
I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
else
@@ -3259,12 +3419,14 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
i9xx_pipestat_irq_reset(dev_priv);
- GEN3_IRQ_RESET(VLV_);
+ GEN3_IRQ_RESET(uncore, VLV_);
dev_priv->irq_mask = ~0u;
}
static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
u32 pipestat_mask;
u32 enable_mask;
enum pipe pipe;
@@ -3289,7 +3451,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
dev_priv->irq_mask = ~enable_mask;
- GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
+ GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask);
}
/* drm_dma.h hooks
@@ -3297,8 +3459,9 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
static void ironlake_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
- GEN3_IRQ_RESET(DE);
+ GEN3_IRQ_RESET(uncore, DE);
if (IS_GEN(dev_priv, 7))
I915_WRITE(GEN7_ERR_INT, 0xffffffff);
@@ -3329,18 +3492,21 @@ static void valleyview_irq_reset(struct drm_device *dev)
static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
{
- GEN8_IRQ_RESET_NDX(GT, 0);
- GEN8_IRQ_RESET_NDX(GT, 1);
- GEN8_IRQ_RESET_NDX(GT, 2);
- GEN8_IRQ_RESET_NDX(GT, 3);
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
+ GEN8_IRQ_RESET_NDX(uncore, GT, 0);
+ GEN8_IRQ_RESET_NDX(uncore, GT, 1);
+ GEN8_IRQ_RESET_NDX(uncore, GT, 2);
+ GEN8_IRQ_RESET_NDX(uncore, GT, 3);
}
static void gen8_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
int pipe;
- gen8_master_intr_disable(dev_priv->regs);
+ gen8_master_intr_disable(dev_priv->uncore.regs);
gen8_gt_irq_reset(dev_priv);
@@ -3350,11 +3516,11 @@ static void gen8_irq_reset(struct drm_device *dev)
for_each_pipe(dev_priv, pipe)
if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
- GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
+ GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
- GEN3_IRQ_RESET(GEN8_DE_PORT_);
- GEN3_IRQ_RESET(GEN8_DE_MISC_);
- GEN3_IRQ_RESET(GEN8_PCU_);
+ GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
+ GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
+ GEN3_IRQ_RESET(uncore, GEN8_PCU_);
if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_reset(dev_priv);
@@ -3380,9 +3546,10 @@ static void gen11_gt_irq_reset(struct drm_i915_private *dev_priv)
static void gen11_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_uncore *uncore = &dev_priv->uncore;
int pipe;
- gen11_master_intr_disable(dev_priv->regs);
+ gen11_master_intr_disable(dev_priv->uncore.regs);
gen11_gt_irq_reset(dev_priv);
@@ -3394,21 +3561,23 @@ static void gen11_irq_reset(struct drm_device *dev)
for_each_pipe(dev_priv, pipe)
if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
- GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
+ GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
- GEN3_IRQ_RESET(GEN8_DE_PORT_);
- GEN3_IRQ_RESET(GEN8_DE_MISC_);
- GEN3_IRQ_RESET(GEN11_DE_HPD_);
- GEN3_IRQ_RESET(GEN11_GU_MISC_);
- GEN3_IRQ_RESET(GEN8_PCU_);
+ GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_);
+ GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_);
+ GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_);
+ GEN3_IRQ_RESET(uncore, GEN11_GU_MISC_);
+ GEN3_IRQ_RESET(uncore, GEN8_PCU_);
- if (HAS_PCH_ICP(dev_priv))
- GEN3_IRQ_RESET(SDE);
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
+ GEN3_IRQ_RESET(uncore, SDE);
}
void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
u8 pipe_mask)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
enum pipe pipe;
@@ -3420,7 +3589,7 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
}
for_each_pipe_masked(dev_priv, pipe, pipe_mask)
- GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
+ GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
dev_priv->de_irq_mask[pipe],
~dev_priv->de_irq_mask[pipe] | extra_ier);
@@ -3430,6 +3599,7 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
u8 pipe_mask)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
enum pipe pipe;
spin_lock_irq(&dev_priv->irq_lock);
@@ -3440,7 +3610,7 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
}
for_each_pipe_masked(dev_priv, pipe, pipe_mask)
- GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
+ GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe);
spin_unlock_irq(&dev_priv->irq_lock);
@@ -3451,13 +3621,14 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
static void cherryview_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
I915_WRITE(GEN8_MASTER_IRQ, 0);
POSTING_READ(GEN8_MASTER_IRQ);
gen8_gt_irq_reset(dev_priv);
- GEN3_IRQ_RESET(GEN8_PCU_);
+ GEN3_IRQ_RESET(uncore, GEN8_PCU_);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
@@ -3583,7 +3754,7 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
gen11_hpd_detection_setup(dev_priv);
- if (HAS_PCH_ICP(dev_priv))
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
icp_hpd_irq_setup(dev_priv);
}
@@ -3729,7 +3900,7 @@ static void ibx_irq_postinstall(struct drm_device *dev)
else
mask = SDE_GMBUS_CPT;
- gen3_assert_iir_is_zero(dev_priv, SDEIIR);
+ gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
I915_WRITE(SDEIMR, ~mask);
if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
@@ -3742,6 +3913,7 @@ static void ibx_irq_postinstall(struct drm_device *dev)
static void gen5_gt_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 pm_irqs, gt_irqs;
pm_irqs = gt_irqs = 0;
@@ -3760,26 +3932,27 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
}
- GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
+ GEN3_IRQ_INIT(uncore, GT, dev_priv->gt_irq_mask, gt_irqs);
if (INTEL_GEN(dev_priv) >= 6) {
/*
* RPS interrupts will get enabled/disabled on demand when RPS
* itself is enabled/disabled.
*/
- if (HAS_VEBOX(dev_priv)) {
+ if (HAS_ENGINE(dev_priv, VECS0)) {
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT;
}
dev_priv->pm_imr = 0xffffffff;
- GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs);
+ GEN3_IRQ_INIT(uncore, GEN6_PM, dev_priv->pm_imr, pm_irqs);
}
}
static int ironlake_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 display_mask, extra_mask;
if (INTEL_GEN(dev_priv) >= 7) {
@@ -3798,7 +3971,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
}
if (IS_HASWELL(dev_priv)) {
- gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
+ gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
display_mask |= DE_EDP_PSR_INT_HSW;
}
@@ -3807,7 +3980,8 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
ibx_irq_pre_postinstall(dev);
- GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
+ GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
+ display_mask | extra_mask);
gen5_gt_irq_postinstall(dev);
@@ -3877,35 +4051,42 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
/* These are interrupts we'll toggle with the ring mask register */
u32 gt_interrupts[] = {
- GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
- GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
- GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
- GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
+ (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
+ GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),
+
+ (GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
+ GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),
+
0,
- GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
- };
+
+ (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
+ GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
+ };
dev_priv->pm_ier = 0x0;
dev_priv->pm_imr = ~dev_priv->pm_ier;
- GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
- GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
+ GEN8_IRQ_INIT_NDX(uncore, GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
+ GEN8_IRQ_INIT_NDX(uncore, GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
/*
* RPS interrupts will get enabled/disabled on demand when RPS itself
* is enabled/disabled. Same wil be the case for GuC interrupts.
*/
- GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
- GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
+ GEN8_IRQ_INIT_NDX(uncore, GT, 2, dev_priv->pm_imr, dev_priv->pm_ier);
+ GEN8_IRQ_INIT_NDX(uncore, GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
}
static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
+
u32 de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
u32 de_pipe_enables;
u32 de_port_masked = GEN8_AUX_CHANNEL_A;
@@ -3941,7 +4122,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
else if (IS_BROADWELL(dev_priv))
de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
- gen3_assert_iir_is_zero(dev_priv, EDP_PSR_IIR);
+ gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR);
intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
for_each_pipe(dev_priv, pipe) {
@@ -3949,20 +4130,21 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
if (intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_PIPE(pipe)))
- GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
+ GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe,
dev_priv->de_irq_mask[pipe],
de_pipe_enables);
}
- GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
- GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
+ GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
+ GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
if (INTEL_GEN(dev_priv) >= 11) {
u32 de_hpd_masked = 0;
u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK |
GEN11_DE_TBT_HOTPLUG_MASK;
- GEN3_IRQ_INIT(GEN11_DE_HPD_, ~de_hpd_masked, de_hpd_enables);
+ GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
+ de_hpd_enables);
gen11_hpd_detection_setup(dev_priv);
} else if (IS_GEN9_LP(dev_priv)) {
bxt_hpd_detection_setup(dev_priv);
@@ -3984,7 +4166,7 @@ static int gen8_irq_postinstall(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_postinstall(dev);
- gen8_master_intr_enable(dev_priv->regs);
+ gen8_master_intr_enable(dev_priv->uncore.regs);
return 0;
}
@@ -4025,7 +4207,7 @@ static void icp_irq_postinstall(struct drm_device *dev)
I915_WRITE(SDEIER, 0xffffffff);
POSTING_READ(SDEIER);
- gen3_assert_iir_is_zero(dev_priv, SDEIIR);
+ gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
I915_WRITE(SDEIMR, ~mask);
icp_hpd_detection_setup(dev_priv);
@@ -4034,19 +4216,20 @@ static void icp_irq_postinstall(struct drm_device *dev)
static int gen11_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 gu_misc_masked = GEN11_GU_MISC_GSE;
- if (HAS_PCH_ICP(dev_priv))
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
icp_irq_postinstall(dev);
gen11_gt_irq_postinstall(dev_priv);
gen8_de_irq_postinstall(dev_priv);
- GEN3_IRQ_INIT(GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
+ GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked);
I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE);
- gen11_master_intr_enable(dev_priv->regs);
+ gen11_master_intr_enable(dev_priv->uncore.regs);
POSTING_READ(GEN11_GFX_MSTR_IRQ);
return 0;
@@ -4072,15 +4255,17 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
static void i8xx_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
i9xx_pipestat_irq_reset(dev_priv);
- GEN2_IRQ_RESET();
+ GEN2_IRQ_RESET(uncore);
}
static int i8xx_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
u16 enable_mask;
I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE |
@@ -4098,7 +4283,7 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
I915_MASTER_ERROR_INTERRUPT |
I915_USER_INTERRUPT;
- GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
+ GEN2_IRQ_INIT(uncore, dev_priv->irq_mask, enable_mask);
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
@@ -4202,7 +4387,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
u16 eir = 0, eir_stuck = 0;
u16 iir;
- iir = I915_READ16(IIR);
+ iir = I915_READ16(GEN2_IIR);
if (iir == 0)
break;
@@ -4215,10 +4400,10 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
if (iir & I915_MASTER_ERROR_INTERRUPT)
i8xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
- I915_WRITE16(IIR, iir);
+ I915_WRITE16(GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i8xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4234,6 +4419,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
static void i915_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
if (I915_HAS_HOTPLUG(dev_priv)) {
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
@@ -4242,12 +4428,13 @@ static void i915_irq_reset(struct drm_device *dev)
i9xx_pipestat_irq_reset(dev_priv);
- GEN3_IRQ_RESET();
+ GEN3_IRQ_RESET(uncore, GEN2_);
}
static int i915_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE |
@@ -4274,7 +4461,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
}
- GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
+ GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
@@ -4306,7 +4493,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
u32 hotplug_status = 0;
u32 iir;
- iir = I915_READ(IIR);
+ iir = I915_READ(GEN2_IIR);
if (iir == 0)
break;
@@ -4323,10 +4510,10 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
- I915_WRITE(IIR, iir);
+ I915_WRITE(GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4345,18 +4532,20 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
static void i965_irq_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
i9xx_pipestat_irq_reset(dev_priv);
- GEN3_IRQ_RESET();
+ GEN3_IRQ_RESET(uncore, GEN2_);
}
static int i965_irq_postinstall(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 enable_mask;
u32 error_mask;
@@ -4394,7 +4583,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
if (IS_G4X(dev_priv))
enable_mask |= I915_BSD_USER_INTERRUPT;
- GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask);
+ GEN3_IRQ_INIT(uncore, GEN2_, dev_priv->irq_mask, enable_mask);
/* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked check happy. */
@@ -4452,7 +4641,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
u32 hotplug_status = 0;
u32 iir;
- iir = I915_READ(IIR);
+ iir = I915_READ(GEN2_IIR);
if (iir == 0)
break;
@@ -4468,13 +4657,13 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck);
- I915_WRITE(IIR, iir);
+ I915_WRITE(GEN2_IIR, iir);
if (iir & I915_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[RCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[RCS0]);
if (iir & I915_BSD_USER_INTERRUPT)
- intel_engine_breadcrumbs_irq(dev_priv->engine[VCS]);
+ intel_engine_breadcrumbs_irq(dev_priv->engine[VCS0]);
if (iir & I915_MASTER_ERROR_INTERRUPT)
i9xx_error_irq_handler(dev_priv, eir, eir_stuck);
@@ -4503,6 +4692,9 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
struct intel_rps *rps = &dev_priv->gt_pm.rps;
int i;
+ if (IS_I945GM(dev_priv))
+ i945gm_vblank_work_init(dev_priv);
+
intel_hpd_init_work(dev_priv);
INIT_WORK(&rps->work, gen6_pm_rps_work);
@@ -4523,6 +4715,10 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
GEN6_PM_RP_DOWN_THRESHOLD |
GEN6_PM_RP_DOWN_TIMEOUT);
+ /* We share the register with other engine */
+ if (INTEL_GEN(dev_priv) > 9)
+ GEM_WARN_ON(dev_priv->pm_rps_events & 0xffff0000);
+
rps->pm_intrmsk_mbz = 0;
/*
@@ -4542,13 +4738,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
else if (INTEL_GEN(dev_priv) >= 3)
dev->driver->get_vblank_counter = i915_get_vblank_counter;
- /*
- * Opt out of the vblank disable timer on everything except gen2.
- * Gen2 doesn't have a hardware frame counter and so depends on
- * vblank interrupts to produce sane vblank seuquence numbers.
- */
- if (!IS_GEN(dev_priv, 2))
- dev->vblank_disable_immediate = true;
+ dev->vblank_disable_immediate = true;
/* Most platforms treat the display irq block as an always-on
* power domain. vlv/chv can disable it at runtime and need
@@ -4605,8 +4795,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->disable_vblank = gen8_disable_vblank;
if (IS_GEN9_LP(dev_priv))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
- else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) ||
- HAS_PCH_CNP(dev_priv))
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
else
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
@@ -4626,6 +4815,13 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->irq_uninstall = i8xx_irq_reset;
dev->driver->enable_vblank = i8xx_enable_vblank;
dev->driver->disable_vblank = i8xx_disable_vblank;
+ } else if (IS_I945GM(dev_priv)) {
+ dev->driver->irq_preinstall = i915_irq_reset;
+ dev->driver->irq_postinstall = i915_irq_postinstall;
+ dev->driver->irq_uninstall = i915_irq_reset;
+ dev->driver->irq_handler = i915_irq_handler;
+ dev->driver->enable_vblank = i945gm_enable_vblank;
+ dev->driver->disable_vblank = i945gm_disable_vblank;
} else if (IS_GEN(dev_priv, 3)) {
dev->driver->irq_preinstall = i915_irq_reset;
dev->driver->irq_postinstall = i915_irq_postinstall;
@@ -4656,6 +4852,9 @@ void intel_irq_fini(struct drm_i915_private *i915)
{
int i;
+ if (IS_I945GM(i915))
+ i945gm_vblank_work_fini(i915);
+
for (i = 0; i < MAX_L3_SLICES; ++i)
kfree(i915->l3_parity.remap_info[i]);
}
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 66f82f3f050f..f893c2cbce15 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -28,14 +28,45 @@
#include <drm/drm_drv.h>
-#include "i915_active.h"
#include "i915_drv.h"
+#include "i915_globals.h"
#include "i915_selftest.h"
+#include "intel_fbdev.h"
-#define PLATFORM(x) .platform = (x), .platform_mask = BIT(x)
+#define PLATFORM(x) .platform = (x)
#define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1)
-#define GEN_DEFAULT_PIPEOFFSETS \
+#define I845_PIPE_OFFSETS \
+ .pipe_offsets = { \
+ [TRANSCODER_A] = PIPE_A_OFFSET, \
+ }, \
+ .trans_offsets = { \
+ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+ }
+
+#define I9XX_PIPE_OFFSETS \
+ .pipe_offsets = { \
+ [TRANSCODER_A] = PIPE_A_OFFSET, \
+ [TRANSCODER_B] = PIPE_B_OFFSET, \
+ }, \
+ .trans_offsets = { \
+ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+ [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
+ }
+
+#define IVB_PIPE_OFFSETS \
+ .pipe_offsets = { \
+ [TRANSCODER_A] = PIPE_A_OFFSET, \
+ [TRANSCODER_B] = PIPE_B_OFFSET, \
+ [TRANSCODER_C] = PIPE_C_OFFSET, \
+ }, \
+ .trans_offsets = { \
+ [TRANSCODER_A] = TRANSCODER_A_OFFSET, \
+ [TRANSCODER_B] = TRANSCODER_B_OFFSET, \
+ [TRANSCODER_C] = TRANSCODER_C_OFFSET, \
+ }
+
+#define HSW_PIPE_OFFSETS \
.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
@@ -49,7 +80,7 @@
[TRANSCODER_EDP] = TRANSCODER_EDP_OFFSET, \
}
-#define GEN_CHV_PIPEOFFSETS \
+#define CHV_PIPE_OFFSETS \
.pipe_offsets = { \
[TRANSCODER_A] = PIPE_A_OFFSET, \
[TRANSCODER_B] = PIPE_B_OFFSET, \
@@ -61,21 +92,48 @@
[TRANSCODER_C] = CHV_TRANSCODER_C_OFFSET, \
}
-#define CURSOR_OFFSETS \
- .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
+#define I845_CURSOR_OFFSETS \
+ .cursor_offsets = { \
+ [PIPE_A] = CURSOR_A_OFFSET, \
+ }
+
+#define I9XX_CURSOR_OFFSETS \
+ .cursor_offsets = { \
+ [PIPE_A] = CURSOR_A_OFFSET, \
+ [PIPE_B] = CURSOR_B_OFFSET, \
+ }
+
+#define CHV_CURSOR_OFFSETS \
+ .cursor_offsets = { \
+ [PIPE_A] = CURSOR_A_OFFSET, \
+ [PIPE_B] = CURSOR_B_OFFSET, \
+ [PIPE_C] = CHV_CURSOR_C_OFFSET, \
+ }
#define IVB_CURSOR_OFFSETS \
- .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
+ .cursor_offsets = { \
+ [PIPE_A] = CURSOR_A_OFFSET, \
+ [PIPE_B] = IVB_CURSOR_B_OFFSET, \
+ [PIPE_C] = IVB_CURSOR_C_OFFSET, \
+ }
-#define BDW_COLORS \
- .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
+#define I9XX_COLORS \
+ .color = { .gamma_lut_size = 256 }
+#define I965_COLORS \
+ .color = { .gamma_lut_size = 129, \
+ .gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
+ }
+#define ILK_COLORS \
+ .color = { .gamma_lut_size = 1024 }
+#define IVB_COLORS \
+ .color = { .degamma_lut_size = 1024, .gamma_lut_size = 1024 }
#define CHV_COLORS \
.color = { .degamma_lut_size = 65, .gamma_lut_size = 257, \
.degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
.gamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING, \
}
#define GLK_COLORS \
- .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024, \
+ .color = { .degamma_lut_size = 33, .gamma_lut_size = 1024, \
.degamma_lut_tests = DRM_COLOR_LUT_NON_DECREASING | \
DRM_COLOR_LUT_EQUAL_CHANNELS, \
}
@@ -85,7 +143,26 @@
#define GEN_DEFAULT_PAGE_SIZES \
.page_sizes = I915_GTT_PAGE_SIZE_4K
-#define GEN2_FEATURES \
+#define I830_FEATURES \
+ GEN(2), \
+ .is_mobile = 1, \
+ .num_pipes = 2, \
+ .display.has_overlay = 1, \
+ .display.cursor_needs_physical = 1, \
+ .display.overlay_needs_physical = 1, \
+ .display.has_gmch = 1, \
+ .gpu_reset_clobbers_display = true, \
+ .hws_needs_physical = 1, \
+ .unfenced_needs_alignment = 1, \
+ .engine_mask = BIT(RCS0), \
+ .has_snoop = true, \
+ .has_coherent_ggtt = false, \
+ I9XX_PIPE_OFFSETS, \
+ I9XX_CURSOR_OFFSETS, \
+ I9XX_COLORS, \
+ GEN_DEFAULT_PAGE_SIZES
+
+#define I845_FEATURES \
GEN(2), \
.num_pipes = 1, \
.display.has_overlay = 1, \
@@ -94,37 +171,32 @@
.gpu_reset_clobbers_display = true, \
.hws_needs_physical = 1, \
.unfenced_needs_alignment = 1, \
- .ring_mask = RENDER_RING, \
+ .engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = false, \
- GEN_DEFAULT_PIPEOFFSETS, \
- GEN_DEFAULT_PAGE_SIZES, \
- CURSOR_OFFSETS
+ I845_PIPE_OFFSETS, \
+ I845_CURSOR_OFFSETS, \
+ I9XX_COLORS, \
+ GEN_DEFAULT_PAGE_SIZES
static const struct intel_device_info intel_i830_info = {
- GEN2_FEATURES,
+ I830_FEATURES,
PLATFORM(INTEL_I830),
- .is_mobile = 1,
- .display.cursor_needs_physical = 1,
- .num_pipes = 2, /* legal, last one wins */
};
static const struct intel_device_info intel_i845g_info = {
- GEN2_FEATURES,
+ I845_FEATURES,
PLATFORM(INTEL_I845G),
};
static const struct intel_device_info intel_i85x_info = {
- GEN2_FEATURES,
+ I830_FEATURES,
PLATFORM(INTEL_I85X),
- .is_mobile = 1,
- .num_pipes = 2, /* legal, last one wins */
- .display.cursor_needs_physical = 1,
.display.has_fbc = 1,
};
static const struct intel_device_info intel_i865g_info = {
- GEN2_FEATURES,
+ I845_FEATURES,
PLATFORM(INTEL_I865G),
};
@@ -133,12 +205,13 @@ static const struct intel_device_info intel_i865g_info = {
.num_pipes = 2, \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
- .ring_mask = RENDER_RING, \
+ .engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = true, \
- GEN_DEFAULT_PIPEOFFSETS, \
- GEN_DEFAULT_PAGE_SIZES, \
- CURSOR_OFFSETS
+ I9XX_PIPE_OFFSETS, \
+ I9XX_CURSOR_OFFSETS, \
+ I9XX_COLORS, \
+ GEN_DEFAULT_PAGE_SIZES
static const struct intel_device_info intel_i915g_info = {
GEN3_FEATURES,
@@ -196,7 +269,14 @@ static const struct intel_device_info intel_g33_info = {
.display.has_overlay = 1,
};
-static const struct intel_device_info intel_pineview_info = {
+static const struct intel_device_info intel_pineview_g_info = {
+ GEN3_FEATURES,
+ PLATFORM(INTEL_PINEVIEW),
+ .display.has_hotplug = 1,
+ .display.has_overlay = 1,
+};
+
+static const struct intel_device_info intel_pineview_m_info = {
GEN3_FEATURES,
PLATFORM(INTEL_PINEVIEW),
.is_mobile = 1,
@@ -210,12 +290,13 @@ static const struct intel_device_info intel_pineview_info = {
.display.has_hotplug = 1, \
.display.has_gmch = 1, \
.gpu_reset_clobbers_display = true, \
- .ring_mask = RENDER_RING, \
+ .engine_mask = BIT(RCS0), \
.has_snoop = true, \
.has_coherent_ggtt = true, \
- GEN_DEFAULT_PIPEOFFSETS, \
- GEN_DEFAULT_PAGE_SIZES, \
- CURSOR_OFFSETS
+ I9XX_PIPE_OFFSETS, \
+ I9XX_CURSOR_OFFSETS, \
+ I965_COLORS, \
+ GEN_DEFAULT_PAGE_SIZES
static const struct intel_device_info intel_i965g_info = {
GEN4_FEATURES,
@@ -239,7 +320,7 @@ static const struct intel_device_info intel_i965gm_info = {
static const struct intel_device_info intel_g45_info = {
GEN4_FEATURES,
PLATFORM(INTEL_G45),
- .ring_mask = RENDER_RING | BSD_RING,
+ .engine_mask = BIT(RCS0) | BIT(VCS0),
.gpu_reset_clobbers_display = false,
};
@@ -249,7 +330,7 @@ static const struct intel_device_info intel_gm45_info = {
.is_mobile = 1,
.display.has_fbc = 1,
.display.supports_tv = 1,
- .ring_mask = RENDER_RING | BSD_RING,
+ .engine_mask = BIT(RCS0) | BIT(VCS0),
.gpu_reset_clobbers_display = false,
};
@@ -257,14 +338,15 @@ static const struct intel_device_info intel_gm45_info = {
GEN(5), \
.num_pipes = 2, \
.display.has_hotplug = 1, \
- .ring_mask = RENDER_RING | BSD_RING, \
+ .engine_mask = BIT(RCS0) | BIT(VCS0), \
.has_snoop = true, \
.has_coherent_ggtt = true, \
/* ilk does support rc6, but we do not implement [power] contexts */ \
.has_rc6 = 0, \
- GEN_DEFAULT_PIPEOFFSETS, \
- GEN_DEFAULT_PAGE_SIZES, \
- CURSOR_OFFSETS
+ I9XX_PIPE_OFFSETS, \
+ I9XX_CURSOR_OFFSETS, \
+ ILK_COLORS, \
+ GEN_DEFAULT_PAGE_SIZES
static const struct intel_device_info intel_ironlake_d_info = {
GEN5_FEATURES,
@@ -283,15 +365,17 @@ static const struct intel_device_info intel_ironlake_m_info = {
.num_pipes = 2, \
.display.has_hotplug = 1, \
.display.has_fbc = 1, \
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+ .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
.has_coherent_ggtt = true, \
.has_llc = 1, \
.has_rc6 = 1, \
.has_rc6p = 1, \
- .ppgtt = INTEL_PPGTT_ALIASING, \
- GEN_DEFAULT_PIPEOFFSETS, \
- GEN_DEFAULT_PAGE_SIZES, \
- CURSOR_OFFSETS
+ .ppgtt_type = INTEL_PPGTT_ALIASING, \
+ .ppgtt_size = 31, \
+ I9XX_PIPE_OFFSETS, \
+ I9XX_CURSOR_OFFSETS, \
+ ILK_COLORS, \
+ GEN_DEFAULT_PAGE_SIZES
#define SNB_D_PLATFORM \
GEN6_FEATURES, \
@@ -328,15 +412,17 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = {
.num_pipes = 3, \
.display.has_hotplug = 1, \
.display.has_fbc = 1, \
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
+ .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \
.has_coherent_ggtt = true, \
.has_llc = 1, \
.has_rc6 = 1, \
.has_rc6p = 1, \
- .ppgtt = INTEL_PPGTT_FULL, \
- GEN_DEFAULT_PIPEOFFSETS, \
- GEN_DEFAULT_PAGE_SIZES, \
- IVB_CURSOR_OFFSETS
+ .ppgtt_type = INTEL_PPGTT_FULL, \
+ .ppgtt_size = 31, \
+ IVB_PIPE_OFFSETS, \
+ IVB_CURSOR_OFFSETS, \
+ IVB_COLORS, \
+ GEN_DEFAULT_PAGE_SIZES
#define IVB_D_PLATFORM \
GEN7_FEATURES, \
@@ -386,24 +472,27 @@ static const struct intel_device_info intel_valleyview_info = {
.has_rc6 = 1,
.display.has_gmch = 1,
.display.has_hotplug = 1,
- .ppgtt = INTEL_PPGTT_FULL,
+ .ppgtt_type = INTEL_PPGTT_FULL,
+ .ppgtt_size = 31,
.has_snoop = true,
.has_coherent_ggtt = false,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
+ .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0),
.display_mmio_offset = VLV_DISPLAY_BASE,
+ I9XX_PIPE_OFFSETS,
+ I9XX_CURSOR_OFFSETS,
+ I965_COLORS,
GEN_DEFAULT_PAGE_SIZES,
- GEN_DEFAULT_PIPEOFFSETS,
- CURSOR_OFFSETS
};
#define G75_FEATURES \
GEN7_FEATURES, \
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
+ .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
.display.has_ddi = 1, \
.has_fpga_dbg = 1, \
.display.has_psr = 1, \
.display.has_dp_mst = 1, \
.has_rc6p = 0 /* RC6p removed-by HSW */, \
+ HSW_PIPE_OFFSETS, \
.has_runtime_pm = 1
#define HSW_PLATFORM \
@@ -429,11 +518,11 @@ static const struct intel_device_info intel_haswell_gt3_info = {
#define GEN8_FEATURES \
G75_FEATURES, \
GEN(8), \
- BDW_COLORS, \
.page_sizes = I915_GTT_PAGE_SIZE_4K | \
I915_GTT_PAGE_SIZE_2M, \
.has_logical_ring_contexts = 1, \
- .ppgtt = INTEL_PPGTT_FULL_4LVL, \
+ .ppgtt_type = INTEL_PPGTT_FULL, \
+ .ppgtt_size = 48, \
.has_64bit_reloc = 1, \
.has_reset_engine = 1
@@ -462,7 +551,8 @@ static const struct intel_device_info intel_broadwell_rsvd_info = {
static const struct intel_device_info intel_broadwell_gt3_info = {
BDW_PLATFORM,
.gt = 3,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+ .engine_mask =
+ BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
static const struct intel_device_info intel_cherryview_info = {
@@ -471,21 +561,22 @@ static const struct intel_device_info intel_cherryview_info = {
.num_pipes = 3,
.display.has_hotplug = 1,
.is_lp = 1,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
+ .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0),
.has_64bit_reloc = 1,
.has_runtime_pm = 1,
.has_rc6 = 1,
.has_logical_ring_contexts = 1,
.display.has_gmch = 1,
- .ppgtt = INTEL_PPGTT_FULL,
+ .ppgtt_type = INTEL_PPGTT_FULL,
+ .ppgtt_size = 32,
.has_reset_engine = 1,
.has_snoop = true,
.has_coherent_ggtt = false,
.display_mmio_offset = VLV_DISPLAY_BASE,
- GEN_DEFAULT_PAGE_SIZES,
- GEN_CHV_PIPEOFFSETS,
- CURSOR_OFFSETS,
+ CHV_PIPE_OFFSETS,
+ CHV_CURSOR_OFFSETS,
CHV_COLORS,
+ GEN_DEFAULT_PAGE_SIZES,
};
#define GEN9_DEFAULT_PAGE_SIZES \
@@ -521,7 +612,8 @@ static const struct intel_device_info intel_skylake_gt2_info = {
#define SKL_GT3_PLUS_PLATFORM \
SKL_PLATFORM, \
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING
+ .engine_mask = \
+ BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1)
static const struct intel_device_info intel_skylake_gt3_info = {
@@ -538,7 +630,7 @@ static const struct intel_device_info intel_skylake_gt4_info = {
GEN(9), \
.is_lp = 1, \
.display.has_hotplug = 1, \
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
+ .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
.num_pipes = 3, \
.has_64bit_reloc = 1, \
.display.has_ddi = 1, \
@@ -552,15 +644,16 @@ static const struct intel_device_info intel_skylake_gt4_info = {
.has_logical_ring_contexts = 1, \
.has_logical_ring_preemption = 1, \
.has_guc = 1, \
- .ppgtt = INTEL_PPGTT_FULL_4LVL, \
+ .ppgtt_type = INTEL_PPGTT_FULL, \
+ .ppgtt_size = 48, \
.has_reset_engine = 1, \
.has_snoop = true, \
.has_coherent_ggtt = false, \
.display.has_ipc = 1, \
- GEN9_DEFAULT_PAGE_SIZES, \
- GEN_DEFAULT_PIPEOFFSETS, \
+ HSW_PIPE_OFFSETS, \
IVB_CURSOR_OFFSETS, \
- BDW_COLORS
+ IVB_COLORS, \
+ GEN9_DEFAULT_PAGE_SIZES
static const struct intel_device_info intel_broxton_info = {
GEN9_LP_FEATURES,
@@ -592,7 +685,8 @@ static const struct intel_device_info intel_kabylake_gt2_info = {
static const struct intel_device_info intel_kabylake_gt3_info = {
KBL_PLATFORM,
.gt = 3,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+ .engine_mask =
+ BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
#define CFL_PLATFORM \
@@ -612,7 +706,8 @@ static const struct intel_device_info intel_coffeelake_gt2_info = {
static const struct intel_device_info intel_coffeelake_gt3_info = {
CFL_PLATFORM,
.gt = 3,
- .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
+ .engine_mask =
+ BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS1),
};
#define GEN10_FEATURES \
@@ -648,13 +743,22 @@ static const struct intel_device_info intel_cannonlake_info = {
}, \
GEN(11), \
.ddb_size = 2048, \
- .has_logical_ring_elsq = 1
+ .has_logical_ring_elsq = 1, \
+ .color = { .degamma_lut_size = 33, .gamma_lut_size = 1024 }
static const struct intel_device_info intel_icelake_11_info = {
GEN11_FEATURES,
PLATFORM(INTEL_ICELAKE),
+ .engine_mask =
+ BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
+};
+
+static const struct intel_device_info intel_elkhartlake_info = {
+ GEN11_FEATURES,
+ PLATFORM(INTEL_ELKHARTLAKE),
.is_alpha_support = 1,
- .ring_mask = RENDER_RING | BLT_RING | VEBOX_RING | BSD_RING | BSD3_RING,
+ .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VCS0),
+ .ppgtt_size = 36,
};
#undef GEN
@@ -680,7 +784,8 @@ static const struct pci_device_id pciidlist[] = {
INTEL_I965GM_IDS(&intel_i965gm_info),
INTEL_GM45_IDS(&intel_gm45_info),
INTEL_G45_IDS(&intel_g45_info),
- INTEL_PINEVIEW_IDS(&intel_pineview_info),
+ INTEL_PINEVIEW_G_IDS(&intel_pineview_g_info),
+ INTEL_PINEVIEW_M_IDS(&intel_pineview_m_info),
INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
INTEL_SNB_D_GT1_IDS(&intel_sandybridge_d_gt1_info),
@@ -722,8 +827,11 @@ static const struct pci_device_id pciidlist[] = {
INTEL_WHL_U_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_AML_CFL_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_WHL_U_GT3_IDS(&intel_coffeelake_gt3_info),
+ INTEL_CML_GT1_IDS(&intel_coffeelake_gt1_info),
+ INTEL_CML_GT2_IDS(&intel_coffeelake_gt2_info),
INTEL_CNL_IDS(&intel_cannonlake_info),
INTEL_ICL_11_IDS(&intel_icelake_11_info),
+ INTEL_EHL_IDS(&intel_elkhartlake_info),
{0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, pciidlist);
@@ -801,7 +909,9 @@ static int __init i915_init(void)
bool use_kms = true;
int err;
- i915_global_active_init();
+ err = i915_globals_init();
+ if (err)
+ return err;
err = i915_mock_selftests();
if (err)
@@ -834,7 +944,7 @@ static void __exit i915_exit(void)
return;
pci_unregister_driver(&i915_pci_driver);
- i915_global_active_exit();
+ i915_globals_exit();
}
module_init(i915_init);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 9ebf99f3d8d3..39a4804091d7 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1202,7 +1202,7 @@ static int i915_oa_read(struct i915_perf_stream *stream,
static struct intel_context *oa_pin_context(struct drm_i915_private *i915,
struct i915_gem_context *ctx)
{
- struct intel_engine_cs *engine = i915->engine[RCS];
+ struct intel_engine_cs *engine = i915->engine[RCS0];
struct intel_context *ce;
int ret;
@@ -1364,7 +1364,7 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
free_oa_buffer(dev_priv);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
intel_runtime_pm_put(dev_priv, stream->wakeref);
if (stream->ctx)
@@ -1509,9 +1509,7 @@ static int alloc_oa_buffer(struct drm_i915_private *dev_priv)
goto unlock;
}
- ret = i915_gem_object_set_cache_level(bo, I915_CACHE_LLC);
- if (ret)
- goto err_unref;
+ i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC);
/* PreHSW required 512K alignment, HSW requires 16M */
vma = i915_gem_object_ggtt_pin(bo, NULL, 0, SZ_16M, 0);
@@ -1629,13 +1627,14 @@ static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
* It's fine to put out-of-date values into these per-context registers
* in the case that the OA unit has been disabled.
*/
-static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
- u32 *reg_state,
- const struct i915_oa_config *oa_config)
+static void
+gen8_update_reg_state_unlocked(struct intel_context *ce,
+ u32 *reg_state,
+ const struct i915_oa_config *oa_config)
{
- struct drm_i915_private *dev_priv = ctx->i915;
- u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
- u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
+ struct drm_i915_private *i915 = ce->gem_context->i915;
+ u32 ctx_oactxctrl = i915->perf.oa.ctx_oactxctrl_offset;
+ u32 ctx_flexeu0 = i915->perf.oa.ctx_flexeu0_offset;
/* The MMIO offsets for Flex EU registers aren't contiguous */
i915_reg_t flex_regs[] = {
EU_PERF_CNTL0,
@@ -1649,8 +1648,8 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
int i;
CTX_REG(reg_state, ctx_oactxctrl, GEN8_OACTXCONTROL,
- (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
- (dev_priv->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
+ (i915->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
+ (i915->perf.oa.periodic ? GEN8_OA_TIMER_ENABLE : 0) |
GEN8_OA_COUNTER_RESUME);
for (i = 0; i < ARRAY_SIZE(flex_regs); i++) {
@@ -1678,10 +1677,9 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
CTX_REG(reg_state, state_offset, flex_regs[i], value);
}
- CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
- gen8_make_rpcs(dev_priv,
- &to_intel_context(ctx,
- dev_priv->engine[RCS])->sseu));
+ CTX_REG(reg_state,
+ CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
+ gen8_make_rpcs(i915, &ce->sseu));
}
/*
@@ -1711,7 +1709,7 @@ static void gen8_update_reg_state_unlocked(struct i915_gem_context *ctx,
static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
const struct i915_oa_config *oa_config)
{
- struct intel_engine_cs *engine = dev_priv->engine[RCS];
+ struct intel_engine_cs *engine = dev_priv->engine[RCS0];
unsigned int map_type = i915_coherent_map_type(dev_priv);
struct i915_gem_context *ctx;
struct i915_request *rq;
@@ -1740,11 +1738,11 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
/* Update all contexts now that we've stalled the submission. */
list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
- struct intel_context *ce = to_intel_context(ctx, engine);
+ struct intel_context *ce = intel_context_lookup(ctx, engine);
u32 *regs;
/* OA settings will be set upon first use */
- if (!ce->state)
+ if (!ce || !ce->state)
continue;
regs = i915_gem_object_pin_map(ce->state->obj, map_type);
@@ -1754,7 +1752,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
ce->state->obj->mm.dirty = true;
regs += LRC_STATE_PN * PAGE_SIZE / sizeof(*regs);
- gen8_update_reg_state_unlocked(ctx, regs, oa_config);
+ gen8_update_reg_state_unlocked(ce, regs, oa_config);
i915_gem_object_unpin_map(ce->state->obj);
}
@@ -1922,10 +1920,10 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream)
static void gen7_oa_disable(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = &stream->dev_priv->uncore;
- I915_WRITE(GEN7_OACONTROL, 0);
- if (intel_wait_for_register(dev_priv,
+ intel_uncore_write(uncore, GEN7_OACONTROL, 0);
+ if (intel_wait_for_register(uncore,
GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
50))
DRM_ERROR("wait for OA to be disabled timed out\n");
@@ -1933,10 +1931,10 @@ static void gen7_oa_disable(struct i915_perf_stream *stream)
static void gen8_oa_disable(struct i915_perf_stream *stream)
{
- struct drm_i915_private *dev_priv = stream->dev_priv;
+ struct intel_uncore *uncore = &stream->dev_priv->uncore;
- I915_WRITE(GEN8_OACONTROL, 0);
- if (intel_wait_for_register(dev_priv,
+ intel_uncore_write(uncore, GEN8_OACONTROL, 0);
+ if (intel_wait_for_register(uncore,
GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
50))
DRM_ERROR("wait for OA to be disabled timed out\n");
@@ -2093,7 +2091,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
* references will effectively disable RC6.
*/
stream->wakeref = intel_runtime_pm_get(dev_priv);
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
ret = alloc_oa_buffer(dev_priv);
if (ret)
@@ -2127,7 +2125,7 @@ err_lock:
err_oa_buf_alloc:
put_oa_config(dev_priv, stream->oa_config);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
intel_runtime_pm_put(dev_priv, stream->wakeref);
err_config:
@@ -2138,17 +2136,17 @@ err_config:
}
void i915_oa_init_reg_state(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx,
- u32 *reg_state)
+ struct intel_context *ce,
+ u32 *regs)
{
struct i915_perf_stream *stream;
- if (engine->id != RCS)
+ if (engine->class != RENDER_CLASS)
return;
stream = engine->i915->perf.oa.exclusive_stream;
if (stream)
- gen8_update_reg_state_unlocked(ctx, reg_state, stream->oa_config);
+ gen8_update_reg_state_unlocked(ce, regs, stream->oa_config);
}
/**
@@ -2881,12 +2879,24 @@ void i915_perf_register(struct drm_i915_private *dev_priv)
sysfs_attr_init(&dev_priv->perf.oa.test_config.sysfs_metric_id.attr);
- if (IS_HASWELL(dev_priv)) {
- i915_perf_load_test_config_hsw(dev_priv);
- } else if (IS_BROADWELL(dev_priv)) {
- i915_perf_load_test_config_bdw(dev_priv);
- } else if (IS_CHERRYVIEW(dev_priv)) {
- i915_perf_load_test_config_chv(dev_priv);
+ if (INTEL_GEN(dev_priv) >= 11) {
+ i915_perf_load_test_config_icl(dev_priv);
+ } else if (IS_CANNONLAKE(dev_priv)) {
+ i915_perf_load_test_config_cnl(dev_priv);
+ } else if (IS_COFFEELAKE(dev_priv)) {
+ if (IS_CFL_GT2(dev_priv))
+ i915_perf_load_test_config_cflgt2(dev_priv);
+ if (IS_CFL_GT3(dev_priv))
+ i915_perf_load_test_config_cflgt3(dev_priv);
+ } else if (IS_GEMINILAKE(dev_priv)) {
+ i915_perf_load_test_config_glk(dev_priv);
+ } else if (IS_KABYLAKE(dev_priv)) {
+ if (IS_KBL_GT2(dev_priv))
+ i915_perf_load_test_config_kblgt2(dev_priv);
+ else if (IS_KBL_GT3(dev_priv))
+ i915_perf_load_test_config_kblgt3(dev_priv);
+ } else if (IS_BROXTON(dev_priv)) {
+ i915_perf_load_test_config_bxt(dev_priv);
} else if (IS_SKYLAKE(dev_priv)) {
if (IS_SKL_GT2(dev_priv))
i915_perf_load_test_config_sklgt2(dev_priv);
@@ -2894,25 +2904,13 @@ void i915_perf_register(struct drm_i915_private *dev_priv)
i915_perf_load_test_config_sklgt3(dev_priv);
else if (IS_SKL_GT4(dev_priv))
i915_perf_load_test_config_sklgt4(dev_priv);
- } else if (IS_BROXTON(dev_priv)) {
- i915_perf_load_test_config_bxt(dev_priv);
- } else if (IS_KABYLAKE(dev_priv)) {
- if (IS_KBL_GT2(dev_priv))
- i915_perf_load_test_config_kblgt2(dev_priv);
- else if (IS_KBL_GT3(dev_priv))
- i915_perf_load_test_config_kblgt3(dev_priv);
- } else if (IS_GEMINILAKE(dev_priv)) {
- i915_perf_load_test_config_glk(dev_priv);
- } else if (IS_COFFEELAKE(dev_priv)) {
- if (IS_CFL_GT2(dev_priv))
- i915_perf_load_test_config_cflgt2(dev_priv);
- if (IS_CFL_GT3(dev_priv))
- i915_perf_load_test_config_cflgt3(dev_priv);
- } else if (IS_CANNONLAKE(dev_priv)) {
- i915_perf_load_test_config_cnl(dev_priv);
- } else if (IS_ICELAKE(dev_priv)) {
- i915_perf_load_test_config_icl(dev_priv);
- }
+ } else if (IS_CHERRYVIEW(dev_priv)) {
+ i915_perf_load_test_config_chv(dev_priv);
+ } else if (IS_BROADWELL(dev_priv)) {
+ i915_perf_load_test_config_bdw(dev_priv);
+ } else if (IS_HASWELL(dev_priv)) {
+ i915_perf_load_test_config_hsw(dev_priv);
+}
if (dev_priv->perf.oa.test_config.id == 0)
goto sysfs_error;
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index b745c49a5af6..46a52da3db29 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -102,7 +102,7 @@ static bool pmu_needs_timer(struct drm_i915_private *i915, bool gpu_active)
*
* Use RCS as proxy for all engines.
*/
- else if (intel_engine_supports_stats(i915->engine[RCS]))
+ else if (intel_engine_supports_stats(i915->engine[RCS0]))
enable &= ~BIT(I915_SAMPLE_BUSY);
/*
@@ -149,14 +149,6 @@ void i915_pmu_gt_unparked(struct drm_i915_private *i915)
spin_unlock_irq(&i915->pmu.lock);
}
-static bool grab_forcewake(struct drm_i915_private *i915, bool fw)
-{
- if (!fw)
- intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
-
- return true;
-}
-
static void
add_sample(struct i915_pmu_sample *sample, u32 val)
{
@@ -169,49 +161,48 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns)
struct intel_engine_cs *engine;
enum intel_engine_id id;
intel_wakeref_t wakeref;
- bool fw = false;
+ unsigned long flags;
if ((dev_priv->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
return;
- if (!dev_priv->gt.awake)
- return;
-
- wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
+ wakeref = 0;
+ if (READ_ONCE(dev_priv->gt.awake))
+ wakeref = intel_runtime_pm_get_if_in_use(dev_priv);
if (!wakeref)
return;
+ spin_lock_irqsave(&dev_priv->uncore.lock, flags);
for_each_engine(engine, dev_priv, id) {
- u32 current_seqno = intel_engine_get_seqno(engine);
- u32 last_seqno = intel_engine_last_submit(engine);
+ struct intel_engine_pmu *pmu = &engine->pmu;
+ bool busy;
u32 val;
- val = !i915_seqno_passed(current_seqno, last_seqno);
-
- if (val)
- add_sample(&engine->pmu.sample[I915_SAMPLE_BUSY],
- period_ns);
-
- if (val && (engine->pmu.enable &
- (BIT(I915_SAMPLE_WAIT) | BIT(I915_SAMPLE_SEMA)))) {
- fw = grab_forcewake(dev_priv, fw);
-
- val = I915_READ_FW(RING_CTL(engine->mmio_base));
- } else {
- val = 0;
- }
+ val = I915_READ_FW(RING_CTL(engine->mmio_base));
+ if (val == 0) /* powerwell off => engine idle */
+ continue;
if (val & RING_WAIT)
- add_sample(&engine->pmu.sample[I915_SAMPLE_WAIT],
- period_ns);
-
+ add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
if (val & RING_WAIT_SEMAPHORE)
- add_sample(&engine->pmu.sample[I915_SAMPLE_SEMA],
- period_ns);
- }
+ add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns);
- if (fw)
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ /*
+ * While waiting on a semaphore or event, MI_MODE reports the
+ * ring as idle. However, previously using the seqno, and with
+ * execlists sampling, we account for the ring waiting as the
+ * engine being busy. Therefore, we record the sample as being
+ * busy if either waiting or !idle.
+ */
+ busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT);
+ if (!busy) {
+ val = I915_READ_FW(RING_MI_MODE(engine->mmio_base));
+ busy = !(val & MODE_IDLE);
+ }
+ if (busy)
+ add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
+ }
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
intel_runtime_pm_put(dev_priv, wakeref);
}
diff --git a/drivers/gpu/drm/i915/i915_priolist_types.h b/drivers/gpu/drm/i915/i915_priolist_types.h
new file mode 100644
index 000000000000..cc44ebd3b553
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_priolist_types.h
@@ -0,0 +1,42 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef _I915_PRIOLIST_TYPES_H_
+#define _I915_PRIOLIST_TYPES_H_
+
+#include <linux/list.h>
+#include <linux/rbtree.h>
+
+#include <uapi/drm/i915_drm.h>
+
+enum {
+ I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
+ I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
+ I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
+
+ I915_PRIORITY_INVALID = INT_MIN
+};
+
+#define I915_USER_PRIORITY_SHIFT 3
+#define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
+
+#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
+#define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
+
+#define I915_PRIORITY_WAIT ((u8)BIT(0))
+#define I915_PRIORITY_NEWCLIENT ((u8)BIT(1))
+#define I915_PRIORITY_NOSEMAPHORE ((u8)BIT(2))
+
+#define __NO_PREEMPTION (I915_PRIORITY_WAIT)
+
+struct i915_priolist {
+ struct list_head requests[I915_PRIORITY_COUNT];
+ struct rb_node node;
+ unsigned long used;
+ int priority;
+};
+
+#endif /* _I915_PRIOLIST_TYPES_H_ */
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h
index eeaa3d506d95..969e514916ab 100644
--- a/drivers/gpu/drm/i915/i915_pvinfo.h
+++ b/drivers/gpu/drm/i915/i915_pvinfo.h
@@ -52,7 +52,7 @@ enum vgt_g2v_type {
/*
* VGT capabilities type
*/
-#define VGT_CAPS_FULL_48BIT_PPGTT BIT(2)
+#define VGT_CAPS_FULL_PPGTT BIT(2)
#define VGT_CAPS_HWSP_EMULATION BIT(3)
#define VGT_CAPS_HUGE_GTT BIT(4)
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index cbcb957b7141..782183b78f49 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -10,12 +10,34 @@
#include "i915_query.h"
#include <uapi/drm/i915_drm.h>
+static int copy_query_item(void *query_hdr, size_t query_sz,
+ u32 total_length,
+ struct drm_i915_query_item *query_item)
+{
+ if (query_item->length == 0)
+ return total_length;
+
+ if (query_item->length < total_length)
+ return -EINVAL;
+
+ if (copy_from_user(query_hdr, u64_to_user_ptr(query_item->data_ptr),
+ query_sz))
+ return -EFAULT;
+
+ if (!access_ok(u64_to_user_ptr(query_item->data_ptr),
+ total_length))
+ return -EFAULT;
+
+ return 0;
+}
+
static int query_topology_info(struct drm_i915_private *dev_priv,
struct drm_i915_query_item *query_item)
{
const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
struct drm_i915_query_topology_info topo;
u32 slice_length, subslice_length, eu_length, total_length;
+ int ret;
if (query_item->flags != 0)
return -EINVAL;
@@ -33,23 +55,14 @@ static int query_topology_info(struct drm_i915_private *dev_priv,
total_length = sizeof(topo) + slice_length + subslice_length + eu_length;
- if (query_item->length == 0)
- return total_length;
-
- if (query_item->length < total_length)
- return -EINVAL;
-
- if (copy_from_user(&topo, u64_to_user_ptr(query_item->data_ptr),
- sizeof(topo)))
- return -EFAULT;
+ ret = copy_query_item(&topo, sizeof(topo), total_length,
+ query_item);
+ if (ret != 0)
+ return ret;
if (topo.flags != 0)
return -EINVAL;
- if (!access_ok(u64_to_user_ptr(query_item->data_ptr),
- total_length))
- return -EFAULT;
-
memset(&topo, 0, sizeof(topo));
topo.max_slices = sseu->max_slices;
topo.max_subslices = sseu->max_subslices;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 047855dd8c6b..b74824f0b5b1 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -25,6 +25,9 @@
#ifndef _I915_REG_H_
#define _I915_REG_H_
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+
/**
* DOC: The i915 register macro definition style guide
*
@@ -59,15 +62,13 @@
* significant to least significant bit. Indent the register content macros
* using two extra spaces between ``#define`` and the macro name.
*
- * For bit fields, define a ``_MASK`` and a ``_SHIFT`` macro. Define bit field
- * contents so that they are already shifted in place, and can be directly
- * OR'd. For convenience, function-like macros may be used to define bit fields,
- * but do note that the macros may be needed to read as well as write the
- * register contents.
+ * Define bit fields using ``REG_GENMASK(h, l)``. Define bit field contents
+ * using ``REG_FIELD_PREP(mask, value)``. This will define the values already
+ * shifted in place, so they can be directly OR'd together. For convenience,
+ * function-like macros may be used to define bit fields, but do note that the
+ * macros may be needed to read as well as write the register contents.
*
- * Define bits using ``(1 << N)`` instead of ``BIT(N)``. We may change this in
- * the future, but this is the prevailing style. Do **not** add ``_BIT`` suffix
- * to the name.
+ * Define bits using ``REG_BIT(N)``. Do **not** add ``_BIT`` suffix to the name.
*
* Group the register and its contents together without blank lines, separate
* from other registers and their contents with one blank line.
@@ -105,17 +106,78 @@
* #define _FOO_A 0xf000
* #define _FOO_B 0xf001
* #define FOO(pipe) _MMIO_PIPE(pipe, _FOO_A, _FOO_B)
- * #define FOO_ENABLE (1 << 31)
- * #define FOO_MODE_MASK (0xf << 16)
- * #define FOO_MODE_SHIFT 16
- * #define FOO_MODE_BAR (0 << 16)
- * #define FOO_MODE_BAZ (1 << 16)
- * #define FOO_MODE_QUX_SNB (2 << 16)
+ * #define FOO_ENABLE REG_BIT(31)
+ * #define FOO_MODE_MASK REG_GENMASK(19, 16)
+ * #define FOO_MODE_BAR REG_FIELD_PREP(FOO_MODE_MASK, 0)
+ * #define FOO_MODE_BAZ REG_FIELD_PREP(FOO_MODE_MASK, 1)
+ * #define FOO_MODE_QUX_SNB REG_FIELD_PREP(FOO_MODE_MASK, 2)
*
* #define BAR _MMIO(0xb000)
* #define GEN8_BAR _MMIO(0xb888)
*/
+/**
+ * REG_BIT() - Prepare a u32 bit value
+ * @__n: 0-based bit number
+ *
+ * Local wrapper for BIT() to force u32, with compile time checks.
+ *
+ * @return: Value with bit @__n set.
+ */
+#define REG_BIT(__n) \
+ ((u32)(BIT(__n) + \
+ BUILD_BUG_ON_ZERO(__builtin_constant_p(__n) && \
+ ((__n) < 0 || (__n) > 31))))
+
+/**
+ * REG_GENMASK() - Prepare a continuous u32 bitmask
+ * @__high: 0-based high bit
+ * @__low: 0-based low bit
+ *
+ * Local wrapper for GENMASK() to force u32, with compile time checks.
+ *
+ * @return: Continuous bitmask from @__high to @__low, inclusive.
+ */
+#define REG_GENMASK(__high, __low) \
+ ((u32)(GENMASK(__high, __low) + \
+ BUILD_BUG_ON_ZERO(__builtin_constant_p(__high) && \
+ __builtin_constant_p(__low) && \
+ ((__low) < 0 || (__high) > 31 || (__low) > (__high)))))
+
+/*
+ * Local integer constant expression version of is_power_of_2().
+ */
+#define IS_POWER_OF_2(__x) ((__x) && (((__x) & ((__x) - 1)) == 0))
+
+/**
+ * REG_FIELD_PREP() - Prepare a u32 bitfield value
+ * @__mask: shifted mask defining the field's length and position
+ * @__val: value to put in the field
+
+ * Local copy of FIELD_PREP() to generate an integer constant expression, force
+ * u32 and for consistency with REG_FIELD_GET(), REG_BIT() and REG_GENMASK().
+ *
+ * @return: @__val masked and shifted into the field defined by @__mask.
+ */
+#define REG_FIELD_PREP(__mask, __val) \
+ ((u32)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) + \
+ BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) + \
+ BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U32_MAX) + \
+ BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \
+ BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))
+
+/**
+ * REG_FIELD_GET() - Extract a u32 bitfield value
+ * @__mask: shifted mask defining the field's length and position
+ * @__val: value to extract the bitfield value from
+ *
+ * Local wrapper for FIELD_GET() to force u32 and for consistency with
+ * REG_FIELD_PREP(), REG_BIT() and REG_GENMASK().
+ *
+ * @return: Masked and shifted value of the field defined by @__mask in @__val.
+ */
+#define REG_FIELD_GET(__mask, __val) ((u32)FIELD_GET(__mask, __val))
+
typedef struct {
u32 reg;
} i915_reg_t;
@@ -210,14 +272,14 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
/* Engine ID */
-#define RCS_HW 0
-#define VCS_HW 1
-#define BCS_HW 2
-#define VECS_HW 3
-#define VCS2_HW 4
-#define VCS3_HW 6
-#define VCS4_HW 7
-#define VECS2_HW 12
+#define RCS0_HW 0
+#define VCS0_HW 1
+#define BCS0_HW 2
+#define VECS0_HW 3
+#define VCS1_HW 4
+#define VCS2_HW 6
+#define VCS3_HW 7
+#define VECS1_HW 12
/* Engine class */
@@ -372,13 +434,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN11_VECS_SFC_USAGE(engine) _MMIO((engine)->mmio_base + 0x2014)
#define GEN11_VECS_SFC_USAGE_BIT (1 << 0)
-#define RING_PP_DIR_BASE(engine) _MMIO((engine)->mmio_base + 0x228)
-#define RING_PP_DIR_BASE_READ(engine) _MMIO((engine)->mmio_base + 0x518)
-#define RING_PP_DIR_DCLV(engine) _MMIO((engine)->mmio_base + 0x220)
+#define RING_PP_DIR_BASE(base) _MMIO((base) + 0x228)
+#define RING_PP_DIR_BASE_READ(base) _MMIO((base) + 0x518)
+#define RING_PP_DIR_DCLV(base) _MMIO((base) + 0x220)
#define PP_DIR_DCLV_2G 0xffffffff
-#define GEN8_RING_PDP_UDW(engine, n) _MMIO((engine)->mmio_base + 0x270 + (n) * 8 + 4)
-#define GEN8_RING_PDP_LDW(engine, n) _MMIO((engine)->mmio_base + 0x270 + (n) * 8)
+#define GEN8_RING_PDP_UDW(base, n) _MMIO((base) + 0x270 + (n) * 8 + 4)
+#define GEN8_RING_PDP_LDW(base, n) _MMIO((base) + 0x270 + (n) * 8)
#define GEN8_R_PWR_CLK_STATE _MMIO(0x20C8)
#define GEN8_RPCS_ENABLE (1 << 31)
@@ -1044,7 +1106,32 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
/* See configdb bunit SB addr map */
#define BUNIT_REG_BISOC 0x11
-#define PUNIT_REG_DSPFREQ 0x36
+/* PUNIT_REG_*SSPM0 */
+#define _SSPM0_SSC(val) ((val) << 0)
+#define SSPM0_SSC_MASK _SSPM0_SSC(0x3)
+#define SSPM0_SSC_PWR_ON _SSPM0_SSC(0x0)
+#define SSPM0_SSC_CLK_GATE _SSPM0_SSC(0x1)
+#define SSPM0_SSC_RESET _SSPM0_SSC(0x2)
+#define SSPM0_SSC_PWR_GATE _SSPM0_SSC(0x3)
+#define _SSPM0_SSS(val) ((val) << 24)
+#define SSPM0_SSS_MASK _SSPM0_SSS(0x3)
+#define SSPM0_SSS_PWR_ON _SSPM0_SSS(0x0)
+#define SSPM0_SSS_CLK_GATE _SSPM0_SSS(0x1)
+#define SSPM0_SSS_RESET _SSPM0_SSS(0x2)
+#define SSPM0_SSS_PWR_GATE _SSPM0_SSS(0x3)
+
+/* PUNIT_REG_*SSPM1 */
+#define SSPM1_FREQSTAT_SHIFT 24
+#define SSPM1_FREQSTAT_MASK (0x1f << SSPM1_FREQSTAT_SHIFT)
+#define SSPM1_FREQGUAR_SHIFT 8
+#define SSPM1_FREQGUAR_MASK (0x1f << SSPM1_FREQGUAR_SHIFT)
+#define SSPM1_FREQ_SHIFT 0
+#define SSPM1_FREQ_MASK (0x1f << SSPM1_FREQ_SHIFT)
+
+#define PUNIT_REG_VEDSSPM0 0x32
+#define PUNIT_REG_VEDSSPM1 0x33
+
+#define PUNIT_REG_DSPSSPM 0x36
#define DSPFREQSTAT_SHIFT_CHV 24
#define DSPFREQSTAT_MASK_CHV (0x1f << DSPFREQSTAT_SHIFT_CHV)
#define DSPFREQGUAR_SHIFT_CHV 8
@@ -1069,6 +1156,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define DP_SSS_RESET(pipe) _DP_SSS(0x2, (pipe))
#define DP_SSS_PWR_GATE(pipe) _DP_SSS(0x3, (pipe))
+#define PUNIT_REG_ISPSSPM0 0x39
+#define PUNIT_REG_ISPSSPM1 0x3a
+
/*
* i915_power_well_id:
*
@@ -1860,13 +1950,13 @@ enum i915_power_well_id {
#define _CNL_PORT_TX_DW4_LN1_AE 0x1624D0
#define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(4, (port)))
#define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)))
-#define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \
+#define CNL_PORT_TX_DW4_LN(ln, port) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \
((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \
_CNL_PORT_TX_DW4_LN0_AE)))
#define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port))
#define ICL_PORT_TX_DW4_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(4, port))
#define ICL_PORT_TX_DW4_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(4, 0, port))
-#define ICL_PORT_TX_DW4_LN(port, ln) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, port))
+#define ICL_PORT_TX_DW4_LN(ln, port) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, port))
#define LOADGEN_SELECT (1 << 31)
#define POST_CURSOR_1(x) ((x) << 12)
#define POST_CURSOR_1_MASK (0x3F << 12)
@@ -1893,11 +1983,11 @@ enum i915_power_well_id {
#define ICL_PORT_TX_DW7_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(7, port))
#define ICL_PORT_TX_DW7_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(7, port))
#define ICL_PORT_TX_DW7_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, port))
-#define ICL_PORT_TX_DW7_LN(port, ln) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port))
+#define ICL_PORT_TX_DW7_LN(ln, port) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port))
#define N_SCALAR(x) ((x) << 24)
#define N_SCALAR_MASK (0x7F << 24)
-#define MG_PHY_PORT_LN(port, ln, ln0p1, ln0p2, ln1p1) \
+#define MG_PHY_PORT_LN(ln, port, ln0p1, ln0p2, ln1p1) \
_MMIO(_PORT((port) - PORT_C, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1)))
#define MG_TX_LINK_PARAMS_TX1LN0_PORT1 0x16812C
@@ -1908,8 +1998,8 @@ enum i915_power_well_id {
#define MG_TX_LINK_PARAMS_TX1LN1_PORT3 0x16A52C
#define MG_TX_LINK_PARAMS_TX1LN0_PORT4 0x16B12C
#define MG_TX_LINK_PARAMS_TX1LN1_PORT4 0x16B52C
-#define MG_TX1_LINK_PARAMS(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \
+#define MG_TX1_LINK_PARAMS(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \
MG_TX_LINK_PARAMS_TX1LN0_PORT2, \
MG_TX_LINK_PARAMS_TX1LN1_PORT1)
@@ -1921,8 +2011,8 @@ enum i915_power_well_id {
#define MG_TX_LINK_PARAMS_TX2LN1_PORT3 0x16A4AC
#define MG_TX_LINK_PARAMS_TX2LN0_PORT4 0x16B0AC
#define MG_TX_LINK_PARAMS_TX2LN1_PORT4 0x16B4AC
-#define MG_TX2_LINK_PARAMS(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \
+#define MG_TX2_LINK_PARAMS(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \
MG_TX_LINK_PARAMS_TX2LN0_PORT2, \
MG_TX_LINK_PARAMS_TX2LN1_PORT1)
#define CRI_USE_FS32 (1 << 5)
@@ -1935,8 +2025,8 @@ enum i915_power_well_id {
#define MG_TX_PISO_READLOAD_TX1LN1_PORT3 0x16A54C
#define MG_TX_PISO_READLOAD_TX1LN0_PORT4 0x16B14C
#define MG_TX_PISO_READLOAD_TX1LN1_PORT4 0x16B54C
-#define MG_TX1_PISO_READLOAD(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \
+#define MG_TX1_PISO_READLOAD(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \
MG_TX_PISO_READLOAD_TX1LN0_PORT2, \
MG_TX_PISO_READLOAD_TX1LN1_PORT1)
@@ -1948,8 +2038,8 @@ enum i915_power_well_id {
#define MG_TX_PISO_READLOAD_TX2LN1_PORT3 0x16A4CC
#define MG_TX_PISO_READLOAD_TX2LN0_PORT4 0x16B0CC
#define MG_TX_PISO_READLOAD_TX2LN1_PORT4 0x16B4CC
-#define MG_TX2_PISO_READLOAD(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \
+#define MG_TX2_PISO_READLOAD(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \
MG_TX_PISO_READLOAD_TX2LN0_PORT2, \
MG_TX_PISO_READLOAD_TX2LN1_PORT1)
#define CRI_CALCINIT (1 << 1)
@@ -1962,8 +2052,8 @@ enum i915_power_well_id {
#define MG_TX_SWINGCTRL_TX1LN1_PORT3 0x16A548
#define MG_TX_SWINGCTRL_TX1LN0_PORT4 0x16B148
#define MG_TX_SWINGCTRL_TX1LN1_PORT4 0x16B548
-#define MG_TX1_SWINGCTRL(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_TX_SWINGCTRL_TX1LN0_PORT1, \
+#define MG_TX1_SWINGCTRL(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_TX_SWINGCTRL_TX1LN0_PORT1, \
MG_TX_SWINGCTRL_TX1LN0_PORT2, \
MG_TX_SWINGCTRL_TX1LN1_PORT1)
@@ -1975,8 +2065,8 @@ enum i915_power_well_id {
#define MG_TX_SWINGCTRL_TX2LN1_PORT3 0x16A4C8
#define MG_TX_SWINGCTRL_TX2LN0_PORT4 0x16B0C8
#define MG_TX_SWINGCTRL_TX2LN1_PORT4 0x16B4C8
-#define MG_TX2_SWINGCTRL(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_TX_SWINGCTRL_TX2LN0_PORT1, \
+#define MG_TX2_SWINGCTRL(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_TX_SWINGCTRL_TX2LN0_PORT1, \
MG_TX_SWINGCTRL_TX2LN0_PORT2, \
MG_TX_SWINGCTRL_TX2LN1_PORT1)
#define CRI_TXDEEMPH_OVERRIDE_17_12(x) ((x) << 0)
@@ -1990,8 +2080,8 @@ enum i915_power_well_id {
#define MG_TX_DRVCTRL_TX1LN1_TXPORT3 0x16A544
#define MG_TX_DRVCTRL_TX1LN0_TXPORT4 0x16B144
#define MG_TX_DRVCTRL_TX1LN1_TXPORT4 0x16B544
-#define MG_TX1_DRVCTRL(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \
+#define MG_TX1_DRVCTRL(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \
MG_TX_DRVCTRL_TX1LN0_TXPORT2, \
MG_TX_DRVCTRL_TX1LN1_TXPORT1)
@@ -2003,8 +2093,8 @@ enum i915_power_well_id {
#define MG_TX_DRVCTRL_TX2LN1_PORT3 0x16A4C4
#define MG_TX_DRVCTRL_TX2LN0_PORT4 0x16B0C4
#define MG_TX_DRVCTRL_TX2LN1_PORT4 0x16B4C4
-#define MG_TX2_DRVCTRL(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_TX_DRVCTRL_TX2LN0_PORT1, \
+#define MG_TX2_DRVCTRL(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_TX_DRVCTRL_TX2LN0_PORT1, \
MG_TX_DRVCTRL_TX2LN0_PORT2, \
MG_TX_DRVCTRL_TX2LN1_PORT1)
#define CRI_TXDEEMPH_OVERRIDE_11_6(x) ((x) << 24)
@@ -2023,8 +2113,8 @@ enum i915_power_well_id {
#define MG_CLKHUB_LN1_PORT3 0x16A79C
#define MG_CLKHUB_LN0_PORT4 0x16B39C
#define MG_CLKHUB_LN1_PORT4 0x16B79C
-#define MG_CLKHUB(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_CLKHUB_LN0_PORT1, \
+#define MG_CLKHUB(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_CLKHUB_LN0_PORT1, \
MG_CLKHUB_LN0_PORT2, \
MG_CLKHUB_LN1_PORT1)
#define CFG_LOW_RATE_LKREN_EN (1 << 11)
@@ -2037,8 +2127,8 @@ enum i915_power_well_id {
#define MG_TX_DCC_TX1LN1_PORT3 0x16A510
#define MG_TX_DCC_TX1LN0_PORT4 0x16B110
#define MG_TX_DCC_TX1LN1_PORT4 0x16B510
-#define MG_TX1_DCC(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_TX_DCC_TX1LN0_PORT1, \
+#define MG_TX1_DCC(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_TX_DCC_TX1LN0_PORT1, \
MG_TX_DCC_TX1LN0_PORT2, \
MG_TX_DCC_TX1LN1_PORT1)
#define MG_TX_DCC_TX2LN0_PORT1 0x168090
@@ -2049,8 +2139,8 @@ enum i915_power_well_id {
#define MG_TX_DCC_TX2LN1_PORT3 0x16A490
#define MG_TX_DCC_TX2LN0_PORT4 0x16B090
#define MG_TX_DCC_TX2LN1_PORT4 0x16B490
-#define MG_TX2_DCC(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_TX_DCC_TX2LN0_PORT1, \
+#define MG_TX2_DCC(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_TX_DCC_TX2LN0_PORT1, \
MG_TX_DCC_TX2LN0_PORT2, \
MG_TX_DCC_TX2LN1_PORT1)
#define CFG_AMI_CK_DIV_OVERRIDE_VAL(x) ((x) << 25)
@@ -2065,8 +2155,8 @@ enum i915_power_well_id {
#define MG_DP_MODE_LN1_ACU_PORT3 0x16A7A0
#define MG_DP_MODE_LN0_ACU_PORT4 0x16B3A0
#define MG_DP_MODE_LN1_ACU_PORT4 0x16B7A0
-#define MG_DP_MODE(port, ln) \
- MG_PHY_PORT_LN(port, ln, MG_DP_MODE_LN0_ACU_PORT1, \
+#define MG_DP_MODE(ln, port) \
+ MG_PHY_PORT_LN(ln, port, MG_DP_MODE_LN0_ACU_PORT1, \
MG_DP_MODE_LN0_ACU_PORT2, \
MG_DP_MODE_LN1_ACU_PORT1)
#define MG_DP_MODE_CFG_DP_X2_MODE (1 << 7)
@@ -2356,8 +2446,10 @@ enum i915_power_well_id {
#define RING_HWS_PGA(base) _MMIO((base) + 0x80)
#define RING_HWS_PGA_GEN6(base) _MMIO((base) + 0x2080)
#define RING_RESET_CTL(base) _MMIO((base) + 0xd0)
-#define RESET_CTL_REQUEST_RESET (1 << 0)
-#define RESET_CTL_READY_TO_RESET (1 << 1)
+#define RESET_CTL_CAT_ERROR REG_BIT(2)
+#define RESET_CTL_READY_TO_RESET REG_BIT(1)
+#define RESET_CTL_REQUEST_RESET REG_BIT(0)
+
#define RING_SEMA_WAIT_POLL(base) _MMIO((base) + 0x24c)
#define HSW_GTT_CACHE_EN _MMIO(0x4024)
@@ -2478,12 +2570,12 @@ enum i915_power_well_id {
#define HWS_START_ADDRESS_SHIFT 4
#define PWRCTXA _MMIO(0x2088) /* 965GM+ only */
#define PWRCTX_EN (1 << 0)
-#define IPEIR _MMIO(0x2088)
-#define IPEHR _MMIO(0x208c)
+#define IPEIR(base) _MMIO((base) + 0x88)
+#define IPEHR(base) _MMIO((base) + 0x8c)
#define GEN2_INSTDONE _MMIO(0x2090)
#define NOPID _MMIO(0x2094)
#define HWSTAM _MMIO(0x2098)
-#define DMA_FADD_I8XX _MMIO(0x20d0)
+#define DMA_FADD_I8XX(base) _MMIO((base) + 0xd0)
#define RING_BBSTATE(base) _MMIO((base) + 0x110)
#define RING_BB_PPGTT (1 << 5)
#define RING_SBBADDR(base) _MMIO((base) + 0x114) /* hsw+ */
@@ -2623,10 +2715,10 @@ enum i915_power_well_id {
#define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030)
#define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034)
#define SCPD0 _MMIO(0x209c) /* 915+ only */
-#define IER _MMIO(0x20a0)
-#define IIR _MMIO(0x20a4)
-#define IMR _MMIO(0x20a8)
-#define ISR _MMIO(0x20ac)
+#define GEN2_IER _MMIO(0x20a0)
+#define GEN2_IIR _MMIO(0x20a4)
+#define GEN2_IMR _MMIO(0x20a8)
+#define GEN2_ISR _MMIO(0x20ac)
#define VLV_GUNIT_CLOCK_GATE _MMIO(VLV_DISPLAY_BASE + 0x2060)
#define GINT_DIS (1 << 22)
#define GCFG_DIS (1 << 8)
@@ -2657,7 +2749,7 @@ enum i915_power_well_id {
#define INSTPM_FORCE_ORDERING (1 << 7) /* GEN6+ */
#define INSTPM_TLB_INVALIDATE (1 << 9)
#define INSTPM_SYNC_FLUSH (1 << 5)
-#define ACTHD _MMIO(0x20c8)
+#define ACTHD(base) _MMIO((base) + 0xc8)
#define MEM_MODE _MMIO(0x20cc)
#define MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1 << 3) /* 830 only */
#define MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1 << 2) /* 830/845 only */
@@ -3857,7 +3949,7 @@ enum i915_power_well_id {
/*
* Logical Context regs
*/
-#define CCID _MMIO(0x2180)
+#define CCID(base) _MMIO((base) + 0x180)
#define CCID_EN BIT(0)
#define CCID_EXTENDED_STATE_RESTORE BIT(2)
#define CCID_EXTENDED_STATE_SAVE BIT(3)
@@ -3989,6 +4081,15 @@ enum {
/* Pipe A CRC regs */
#define _PIPE_CRC_CTL_A 0x60050
#define PIPE_CRC_ENABLE (1 << 31)
+/* skl+ source selection */
+#define PIPE_CRC_SOURCE_PLANE_1_SKL (0 << 28)
+#define PIPE_CRC_SOURCE_PLANE_2_SKL (2 << 28)
+#define PIPE_CRC_SOURCE_DMUX_SKL (4 << 28)
+#define PIPE_CRC_SOURCE_PLANE_3_SKL (6 << 28)
+#define PIPE_CRC_SOURCE_PLANE_4_SKL (7 << 28)
+#define PIPE_CRC_SOURCE_PLANE_5_SKL (5 << 28)
+#define PIPE_CRC_SOURCE_PLANE_6_SKL (3 << 28)
+#define PIPE_CRC_SOURCE_PLANE_7_SKL (1 << 28)
/* ivb+ source selection */
#define PIPE_CRC_SOURCE_PRIMARY_IVB (0 << 29)
#define PIPE_CRC_SOURCE_SPRITE_IVB (1 << 29)
@@ -4110,42 +4211,6 @@ enum {
#define PIPESRC(trans) _MMIO_TRANS2(trans, _PIPEASRC)
#define PIPE_MULT(trans) _MMIO_TRANS2(trans, _PIPE_MULT_A)
-/* VLV eDP PSR registers */
-#define _PSRCTLA (VLV_DISPLAY_BASE + 0x60090)
-#define _PSRCTLB (VLV_DISPLAY_BASE + 0x61090)
-#define VLV_EDP_PSR_ENABLE (1 << 0)
-#define VLV_EDP_PSR_RESET (1 << 1)
-#define VLV_EDP_PSR_MODE_MASK (7 << 2)
-#define VLV_EDP_PSR_MODE_HW_TIMER (1 << 3)
-#define VLV_EDP_PSR_MODE_SW_TIMER (1 << 2)
-#define VLV_EDP_PSR_SINGLE_FRAME_UPDATE (1 << 7)
-#define VLV_EDP_PSR_ACTIVE_ENTRY (1 << 8)
-#define VLV_EDP_PSR_SRC_TRANSMITTER_STATE (1 << 9)
-#define VLV_EDP_PSR_DBL_FRAME (1 << 10)
-#define VLV_EDP_PSR_FRAME_COUNT_MASK (0xff << 16)
-#define VLV_EDP_PSR_IDLE_FRAME_SHIFT 16
-#define VLV_PSRCTL(pipe) _MMIO_PIPE(pipe, _PSRCTLA, _PSRCTLB)
-
-#define _VSCSDPA (VLV_DISPLAY_BASE + 0x600a0)
-#define _VSCSDPB (VLV_DISPLAY_BASE + 0x610a0)
-#define VLV_EDP_PSR_SDP_FREQ_MASK (3 << 30)
-#define VLV_EDP_PSR_SDP_FREQ_ONCE (1 << 31)
-#define VLV_EDP_PSR_SDP_FREQ_EVFRAME (1 << 30)
-#define VLV_VSCSDP(pipe) _MMIO_PIPE(pipe, _VSCSDPA, _VSCSDPB)
-
-#define _PSRSTATA (VLV_DISPLAY_BASE + 0x60094)
-#define _PSRSTATB (VLV_DISPLAY_BASE + 0x61094)
-#define VLV_EDP_PSR_LAST_STATE_MASK (7 << 3)
-#define VLV_EDP_PSR_CURR_STATE_MASK 7
-#define VLV_EDP_PSR_DISABLED (0 << 0)
-#define VLV_EDP_PSR_INACTIVE (1 << 0)
-#define VLV_EDP_PSR_IN_TRANS_TO_ACTIVE (2 << 0)
-#define VLV_EDP_PSR_ACTIVE_NORFB_UP (3 << 0)
-#define VLV_EDP_PSR_ACTIVE_SF_UPDATE (4 << 0)
-#define VLV_EDP_PSR_EXIT (5 << 0)
-#define VLV_EDP_PSR_IN_TRANS (1 << 7)
-#define VLV_PSRSTAT(pipe) _MMIO_PIPE(pipe, _PSRSTATA, _PSRSTATB)
-
/* HSW+ eDP PSR registers */
#define HSW_EDP_PSR_BASE 0x64800
#define BDW_EDP_PSR_BASE 0x6f800
@@ -4168,6 +4233,7 @@ enum {
#define EDP_PSR_TP2_TP3_TIME_100us (1 << 8)
#define EDP_PSR_TP2_TP3_TIME_2500us (2 << 8)
#define EDP_PSR_TP2_TP3_TIME_0us (3 << 8)
+#define EDP_PSR_TP4_TIME_0US (3 << 6) /* ICL+ */
#define EDP_PSR_TP1_TIME_500us (0 << 4)
#define EDP_PSR_TP1_TIME_100us (1 << 4)
#define EDP_PSR_TP1_TIME_2500us (2 << 4)
@@ -4612,13 +4678,14 @@ enum {
#define VIDEO_DIP_ENABLE (1 << 31)
#define VIDEO_DIP_PORT(port) ((port) << 29)
#define VIDEO_DIP_PORT_MASK (3 << 29)
-#define VIDEO_DIP_ENABLE_GCP (1 << 25)
+#define VIDEO_DIP_ENABLE_GCP (1 << 25) /* ilk+ */
#define VIDEO_DIP_ENABLE_AVI (1 << 21)
#define VIDEO_DIP_ENABLE_VENDOR (2 << 21)
-#define VIDEO_DIP_ENABLE_GAMUT (4 << 21)
+#define VIDEO_DIP_ENABLE_GAMUT (4 << 21) /* ilk+ */
#define VIDEO_DIP_ENABLE_SPD (8 << 21)
#define VIDEO_DIP_SELECT_AVI (0 << 19)
#define VIDEO_DIP_SELECT_VENDOR (1 << 19)
+#define VIDEO_DIP_SELECT_GAMUT (2 << 19)
#define VIDEO_DIP_SELECT_SPD (3 << 19)
#define VIDEO_DIP_SELECT_MASK (3 << 19)
#define VIDEO_DIP_FREQ_ONCE (0 << 16)
@@ -4653,18 +4720,17 @@ enum {
#define _PP_STATUS 0x61200
#define PP_STATUS(pps_idx) _MMIO_PPS(pps_idx, _PP_STATUS)
-#define PP_ON (1 << 31)
+#define PP_ON REG_BIT(31)
#define _PP_CONTROL_1 0xc7204
#define _PP_CONTROL_2 0xc7304
#define ICP_PP_CONTROL(x) _MMIO(((x) == 1) ? _PP_CONTROL_1 : \
_PP_CONTROL_2)
-#define POWER_CYCLE_DELAY_MASK (0x1f << 4)
-#define POWER_CYCLE_DELAY_SHIFT 4
-#define VDD_OVERRIDE_FORCE (1 << 3)
-#define BACKLIGHT_ENABLE (1 << 2)
-#define PWR_DOWN_ON_RESET (1 << 1)
-#define PWR_STATE_TARGET (1 << 0)
+#define POWER_CYCLE_DELAY_MASK REG_GENMASK(8, 4)
+#define VDD_OVERRIDE_FORCE REG_BIT(3)
+#define BACKLIGHT_ENABLE REG_BIT(2)
+#define PWR_DOWN_ON_RESET REG_BIT(1)
+#define PWR_STATE_TARGET REG_BIT(0)
/*
* Indicates that all dependencies of the panel are on:
*
@@ -4672,62 +4738,53 @@ enum {
* - pipe enabled
* - LVDS/DVOB/DVOC on
*/
-#define PP_READY (1 << 30)
-#define PP_SEQUENCE_NONE (0 << 28)
-#define PP_SEQUENCE_POWER_UP (1 << 28)
-#define PP_SEQUENCE_POWER_DOWN (2 << 28)
-#define PP_SEQUENCE_MASK (3 << 28)
-#define PP_SEQUENCE_SHIFT 28
-#define PP_CYCLE_DELAY_ACTIVE (1 << 27)
-#define PP_SEQUENCE_STATE_MASK 0x0000000f
-#define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0)
-#define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0)
-#define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0)
-#define PP_SEQUENCE_STATE_OFF_S0_3 (0x3 << 0)
-#define PP_SEQUENCE_STATE_ON_IDLE (0x8 << 0)
-#define PP_SEQUENCE_STATE_ON_S1_0 (0x9 << 0)
-#define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0)
-#define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0)
-#define PP_SEQUENCE_STATE_RESET (0xf << 0)
+#define PP_READY REG_BIT(30)
+#define PP_SEQUENCE_MASK REG_GENMASK(29, 28)
+#define PP_SEQUENCE_NONE REG_FIELD_PREP(PP_SEQUENCE_MASK, 0)
+#define PP_SEQUENCE_POWER_UP REG_FIELD_PREP(PP_SEQUENCE_MASK, 1)
+#define PP_SEQUENCE_POWER_DOWN REG_FIELD_PREP(PP_SEQUENCE_MASK, 2)
+#define PP_CYCLE_DELAY_ACTIVE REG_BIT(27)
+#define PP_SEQUENCE_STATE_MASK REG_GENMASK(3, 0)
+#define PP_SEQUENCE_STATE_OFF_IDLE REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x0)
+#define PP_SEQUENCE_STATE_OFF_S0_1 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x1)
+#define PP_SEQUENCE_STATE_OFF_S0_2 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x2)
+#define PP_SEQUENCE_STATE_OFF_S0_3 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x3)
+#define PP_SEQUENCE_STATE_ON_IDLE REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x8)
+#define PP_SEQUENCE_STATE_ON_S1_1 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0x9)
+#define PP_SEQUENCE_STATE_ON_S1_2 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xa)
+#define PP_SEQUENCE_STATE_ON_S1_3 REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xb)
+#define PP_SEQUENCE_STATE_RESET REG_FIELD_PREP(PP_SEQUENCE_STATE_MASK, 0xf)
#define _PP_CONTROL 0x61204
#define PP_CONTROL(pps_idx) _MMIO_PPS(pps_idx, _PP_CONTROL)
-#define PANEL_UNLOCK_REGS (0xabcd << 16)
-#define PANEL_UNLOCK_MASK (0xffff << 16)
-#define BXT_POWER_CYCLE_DELAY_MASK 0x1f0
-#define BXT_POWER_CYCLE_DELAY_SHIFT 4
-#define EDP_FORCE_VDD (1 << 3)
-#define EDP_BLC_ENABLE (1 << 2)
-#define PANEL_POWER_RESET (1 << 1)
-#define PANEL_POWER_ON (1 << 0)
+#define PANEL_UNLOCK_MASK REG_GENMASK(31, 16)
+#define PANEL_UNLOCK_REGS REG_FIELD_PREP(PANEL_UNLOCK_MASK, 0xabcd)
+#define BXT_POWER_CYCLE_DELAY_MASK REG_GENMASK(8, 4)
+#define EDP_FORCE_VDD REG_BIT(3)
+#define EDP_BLC_ENABLE REG_BIT(2)
+#define PANEL_POWER_RESET REG_BIT(1)
+#define PANEL_POWER_ON REG_BIT(0)
#define _PP_ON_DELAYS 0x61208
#define PP_ON_DELAYS(pps_idx) _MMIO_PPS(pps_idx, _PP_ON_DELAYS)
-#define PANEL_PORT_SELECT_SHIFT 30
-#define PANEL_PORT_SELECT_MASK (3 << 30)
-#define PANEL_PORT_SELECT_LVDS (0 << 30)
-#define PANEL_PORT_SELECT_DPA (1 << 30)
-#define PANEL_PORT_SELECT_DPC (2 << 30)
-#define PANEL_PORT_SELECT_DPD (3 << 30)
-#define PANEL_PORT_SELECT_VLV(port) ((port) << 30)
-#define PANEL_POWER_UP_DELAY_MASK 0x1fff0000
-#define PANEL_POWER_UP_DELAY_SHIFT 16
-#define PANEL_LIGHT_ON_DELAY_MASK 0x1fff
-#define PANEL_LIGHT_ON_DELAY_SHIFT 0
+#define PANEL_PORT_SELECT_MASK REG_GENMASK(31, 30)
+#define PANEL_PORT_SELECT_LVDS REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 0)
+#define PANEL_PORT_SELECT_DPA REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 1)
+#define PANEL_PORT_SELECT_DPC REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 2)
+#define PANEL_PORT_SELECT_DPD REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, 3)
+#define PANEL_PORT_SELECT_VLV(port) REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, port)
+#define PANEL_POWER_UP_DELAY_MASK REG_GENMASK(28, 16)
+#define PANEL_LIGHT_ON_DELAY_MASK REG_GENMASK(12, 0)
#define _PP_OFF_DELAYS 0x6120C
#define PP_OFF_DELAYS(pps_idx) _MMIO_PPS(pps_idx, _PP_OFF_DELAYS)
-#define PANEL_POWER_DOWN_DELAY_MASK 0x1fff0000
-#define PANEL_POWER_DOWN_DELAY_SHIFT 16
-#define PANEL_LIGHT_OFF_DELAY_MASK 0x1fff
-#define PANEL_LIGHT_OFF_DELAY_SHIFT 0
+#define PANEL_POWER_DOWN_DELAY_MASK REG_GENMASK(28, 16)
+#define PANEL_LIGHT_OFF_DELAY_MASK REG_GENMASK(12, 0)
#define _PP_DIVISOR 0x61210
#define PP_DIVISOR(pps_idx) _MMIO_PPS(pps_idx, _PP_DIVISOR)
-#define PP_REFERENCE_DIVIDER_MASK 0xffffff00
-#define PP_REFERENCE_DIVIDER_SHIFT 8
-#define PANEL_POWER_CYCLE_DELAY_MASK 0x1f
-#define PANEL_POWER_CYCLE_DELAY_SHIFT 0
+#define PP_REFERENCE_DIVIDER_MASK REG_GENMASK(31, 8)
+#define PANEL_POWER_CYCLE_DELAY_MASK REG_GENMASK(4, 0)
/* Panel fitting */
#define PFIT_CONTROL _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x61230)
@@ -5590,9 +5647,15 @@ enum {
#define PIPECONF_SINGLE_WIDE 0
#define PIPECONF_PIPE_UNLOCKED 0
#define PIPECONF_PIPE_LOCKED (1 << 25)
-#define PIPECONF_PALETTE 0
-#define PIPECONF_GAMMA (1 << 24)
#define PIPECONF_FORCE_BORDER (1 << 25)
+#define PIPECONF_GAMMA_MODE_MASK_I9XX (1 << 24) /* gmch */
+#define PIPECONF_GAMMA_MODE_MASK_ILK (3 << 24) /* ilk-ivb */
+#define PIPECONF_GAMMA_MODE_8BIT (0 << 24) /* gmch,ilk-ivb */
+#define PIPECONF_GAMMA_MODE_10BIT (1 << 24) /* gmch,ilk-ivb */
+#define PIPECONF_GAMMA_MODE_12BIT (2 << 24) /* ilk-ivb */
+#define PIPECONF_GAMMA_MODE_SPLIT (3 << 24) /* ivb */
+#define PIPECONF_GAMMA_MODE(x) ((x) << 24) /* pass in GAMMA_MODE_MODE_* */
+#define PIPECONF_GAMMA_MODE_SHIFT 24
#define PIPECONF_INTERLACE_MASK (7 << 21)
#define PIPECONF_INTERLACE_MASK_HSW (3 << 21)
/* Note that pre-gen3 does not support interlaced display directly. Panel
@@ -5698,6 +5761,10 @@ enum {
#define PIPEFRAMEPIXEL(pipe) _MMIO_PIPE2(pipe, _PIPEAFRAMEPIXEL)
#define PIPESTAT(pipe) _MMIO_PIPE2(pipe, _PIPEASTAT)
+#define _PIPEAGCMAX 0x70010
+#define _PIPEBGCMAX 0x71010
+#define PIPEGCMAX(pipe, i) _MMIO_PIPE2(pipe, _PIPEAGCMAX + (i) * 4)
+
#define _PIPE_MISC_A 0x70030
#define _PIPE_MISC_B 0x71030
#define PIPEMISC_YUV420_ENABLE (1 << 27)
@@ -5998,6 +6065,7 @@ enum {
#define _CUR_WM_TRANS_A_0 0x70168
#define _CUR_WM_TRANS_B_0 0x71168
#define PLANE_WM_EN (1 << 31)
+#define PLANE_WM_IGNORE_LINES (1 << 30)
#define PLANE_WM_LINES_SHIFT 14
#define PLANE_WM_LINES_MASK 0x1f
#define PLANE_WM_BLOCKS_MASK 0x7ff /* skl+: 10 bits, icl+ 11 bits */
@@ -6124,7 +6192,7 @@ enum {
#define MCURSOR_PIPE_SELECT_SHIFT 28
#define MCURSOR_PIPE_SELECT(pipe) ((pipe) << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
-#define MCURSOR_PIPE_CSC_ENABLE (1 << 24)
+#define MCURSOR_PIPE_CSC_ENABLE (1 << 24) /* ilk+ */
#define MCURSOR_ROTATE_180 (1 << 15)
#define MCURSOR_TRICKLE_FEED_DISABLE (1 << 14)
#define _CURABASE 0x70084
@@ -6179,7 +6247,7 @@ enum {
#define DISPPLANE_RGBA888 (0xf << 26)
#define DISPPLANE_STEREO_ENABLE (1 << 25)
#define DISPPLANE_STEREO_DISABLE 0
-#define DISPPLANE_PIPE_CSC_ENABLE (1 << 24)
+#define DISPPLANE_PIPE_CSC_ENABLE (1 << 24) /* ilk+ */
#define DISPPLANE_SEL_PIPE_SHIFT 24
#define DISPPLANE_SEL_PIPE_MASK (3 << DISPPLANE_SEL_PIPE_SHIFT)
#define DISPPLANE_SEL_PIPE(pipe) ((pipe) << DISPPLANE_SEL_PIPE_SHIFT)
@@ -6557,13 +6625,22 @@ enum {
#define PLANE_CTL_FORMAT_YUV422 (0 << 24)
#define PLANE_CTL_FORMAT_NV12 (1 << 24)
#define PLANE_CTL_FORMAT_XRGB_2101010 (2 << 24)
+#define PLANE_CTL_FORMAT_P010 (3 << 24)
#define PLANE_CTL_FORMAT_XRGB_8888 (4 << 24)
+#define PLANE_CTL_FORMAT_P012 (5 << 24)
#define PLANE_CTL_FORMAT_XRGB_16161616F (6 << 24)
+#define PLANE_CTL_FORMAT_P016 (7 << 24)
#define PLANE_CTL_FORMAT_AYUV (8 << 24)
#define PLANE_CTL_FORMAT_INDEXED (12 << 24)
#define PLANE_CTL_FORMAT_RGB_565 (14 << 24)
#define ICL_PLANE_CTL_FORMAT_MASK (0x1f << 23)
#define PLANE_CTL_PIPE_CSC_ENABLE (1 << 23) /* Pre-GLK */
+#define PLANE_CTL_FORMAT_Y210 (1 << 23)
+#define PLANE_CTL_FORMAT_Y212 (3 << 23)
+#define PLANE_CTL_FORMAT_Y216 (5 << 23)
+#define PLANE_CTL_FORMAT_Y410 (7 << 23)
+#define PLANE_CTL_FORMAT_Y412 (9 << 23)
+#define PLANE_CTL_FORMAT_Y416 (0xb << 23)
#define PLANE_CTL_KEY_ENABLE_MASK (0x3 << 21)
#define PLANE_CTL_KEY_ENABLE_SOURCE (1 << 21)
#define PLANE_CTL_KEY_ENABLE_DESTINATION (2 << 21)
@@ -7102,14 +7179,25 @@ enum {
#define _LGC_PALETTE_B 0x4a800
#define LGC_PALETTE(pipe, i) _MMIO(_PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) + (i) * 4)
+/* ilk/snb precision palette */
+#define _PREC_PALETTE_A 0x4b000
+#define _PREC_PALETTE_B 0x4c000
+#define PREC_PALETTE(pipe, i) _MMIO(_PIPE(pipe, _PREC_PALETTE_A, _PREC_PALETTE_B) + (i) * 4)
+
+#define _PREC_PIPEAGCMAX 0x4d000
+#define _PREC_PIPEBGCMAX 0x4d010
+#define PREC_PIPEGCMAX(pipe, i) _MMIO(_PIPE(pipe, _PIPEAGCMAX, _PIPEBGCMAX) + (i) * 4)
+
#define _GAMMA_MODE_A 0x4a480
#define _GAMMA_MODE_B 0x4ac80
#define GAMMA_MODE(pipe) _MMIO_PIPE(pipe, _GAMMA_MODE_A, _GAMMA_MODE_B)
-#define GAMMA_MODE_MODE_MASK (3 << 0)
-#define GAMMA_MODE_MODE_8BIT (0 << 0)
-#define GAMMA_MODE_MODE_10BIT (1 << 0)
-#define GAMMA_MODE_MODE_12BIT (2 << 0)
-#define GAMMA_MODE_MODE_SPLIT (3 << 0)
+#define PRE_CSC_GAMMA_ENABLE (1 << 31)
+#define POST_CSC_GAMMA_ENABLE (1 << 30)
+#define GAMMA_MODE_MODE_MASK (3 << 0)
+#define GAMMA_MODE_MODE_8BIT (0 << 0)
+#define GAMMA_MODE_MODE_10BIT (1 << 0)
+#define GAMMA_MODE_MODE_12BIT (2 << 0)
+#define GAMMA_MODE_MODE_SPLIT (3 << 0)
/* DMC/CSR */
#define CSR_PROGRAM(i) _MMIO(0x80000 + (i) * 4)
@@ -7204,8 +7292,8 @@ enum {
#define GEN8_GT_VECS_IRQ (1 << 6)
#define GEN8_GT_GUC_IRQ (1 << 5)
#define GEN8_GT_PM_IRQ (1 << 4)
-#define GEN8_GT_VCS2_IRQ (1 << 3)
-#define GEN8_GT_VCS1_IRQ (1 << 2)
+#define GEN8_GT_VCS1_IRQ (1 << 3) /* NB: VCS2 in bspec! */
+#define GEN8_GT_VCS0_IRQ (1 << 2) /* NB: VCS1 in bpsec! */
#define GEN8_GT_BCS_IRQ (1 << 1)
#define GEN8_GT_RCS_IRQ (1 << 0)
@@ -7226,8 +7314,8 @@ enum {
#define GEN8_RCS_IRQ_SHIFT 0
#define GEN8_BCS_IRQ_SHIFT 16
-#define GEN8_VCS1_IRQ_SHIFT 0
-#define GEN8_VCS2_IRQ_SHIFT 16
+#define GEN8_VCS0_IRQ_SHIFT 0 /* NB: VCS1 in bspec! */
+#define GEN8_VCS1_IRQ_SHIFT 16 /* NB: VCS2 in bpsec! */
#define GEN8_VECS_IRQ_SHIFT 0
#define GEN8_WD_IRQ_SHIFT 16
@@ -7613,13 +7701,13 @@ enum {
#define GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE (1 << 2)
/*GEN11 chicken */
-#define _PIPEA_CHICKEN 0x70038
-#define _PIPEB_CHICKEN 0x71038
-#define _PIPEC_CHICKEN 0x72038
-#define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7)
-#define PM_FILL_MAINTAIN_DBUF_FULLNESS (1 << 0)
-#define PIPE_CHICKEN(pipe) _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\
- _PIPEB_CHICKEN)
+#define _PIPEA_CHICKEN 0x70038
+#define _PIPEB_CHICKEN 0x71038
+#define _PIPEC_CHICKEN 0x72038
+#define PIPE_CHICKEN(pipe) _MMIO_PIPE(pipe, _PIPEA_CHICKEN,\
+ _PIPEB_CHICKEN)
+#define PIXEL_ROUNDING_TRUNC_FB_PASSTHRU (1 << 15)
+#define PER_PIXEL_ALPHA_BYPASS_EN (1 << 7)
/* PCH */
@@ -8089,10 +8177,11 @@ enum {
#define _ICL_VIDEO_DIP_PPS_ECC_B 0x613D4
#define HSW_TVIDEO_DIP_CTL(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_CTL_A)
+#define HSW_TVIDEO_DIP_GCP(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GCP_A)
#define HSW_TVIDEO_DIP_AVI_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_AVI_DATA_A + (i) * 4)
#define HSW_TVIDEO_DIP_VS_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VS_DATA_A + (i) * 4)
#define HSW_TVIDEO_DIP_SPD_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_SPD_DATA_A + (i) * 4)
-#define HSW_TVIDEO_DIP_GCP(trans) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GCP_A)
+#define HSW_TVIDEO_DIP_GMP_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_GMP_DATA_A + (i) * 4)
#define HSW_TVIDEO_DIP_VSC_DATA(trans, i) _MMIO_TRANS2(trans, _HSW_VIDEO_DIP_VSC_DATA_A + (i) * 4)
#define ICL_VIDEO_DIP_PPS_DATA(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_DATA_A + (i) * 4)
#define ICL_VIDEO_DIP_PPS_ECC(trans, i) _MMIO_TRANS2(trans, _ICL_VIDEO_DIP_PPS_ECC_A + (i) * 4)
@@ -8600,8 +8689,9 @@ enum {
#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4)
#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8)
#define GEN9_PG_ENABLE _MMIO(0xA210)
-#define GEN9_RENDER_PG_ENABLE (1 << 0)
-#define GEN9_MEDIA_PG_ENABLE (1 << 1)
+#define GEN9_RENDER_PG_ENABLE REG_BIT(0)
+#define GEN9_MEDIA_PG_ENABLE REG_BIT(1)
+#define GEN11_MEDIA_SAMPLER_PG_ENABLE REG_BIT(2)
#define GEN8_PUSHBUS_CONTROL _MMIO(0xA248)
#define GEN8_PUSHBUS_ENABLE _MMIO(0xA250)
#define GEN8_PUSHBUS_SHIFT _MMIO(0xA25C)
@@ -8616,6 +8706,11 @@ enum {
#define GEN6_PMIER _MMIO(0x4402C)
#define GEN6_PM_MBOX_EVENT (1 << 25)
#define GEN6_PM_THERMAL_EVENT (1 << 24)
+
+/*
+ * For Gen11 these are in the upper word of the GPM_WGBOXPERF
+ * registers. Shifting is handled on accessing the imr and ier.
+ */
#define GEN6_PM_RP_DOWN_TIMEOUT (1 << 6)
#define GEN6_PM_RP_UP_THRESHOLD (1 << 5)
#define GEN6_PM_RP_DOWN_THRESHOLD (1 << 4)
@@ -9741,7 +9836,7 @@ enum skl_power_gate {
#define DPLL_CFGCR1_KDIV(x) ((x) << 6)
#define DPLL_CFGCR1_KDIV_1 (1 << 6)
#define DPLL_CFGCR1_KDIV_2 (2 << 6)
-#define DPLL_CFGCR1_KDIV_4 (4 << 6)
+#define DPLL_CFGCR1_KDIV_3 (4 << 6)
#define DPLL_CFGCR1_PDIV_MASK (0xf << 2)
#define DPLL_CFGCR1_PDIV_SHIFT (2)
#define DPLL_CFGCR1_PDIV(x) ((x) << 2)
@@ -9810,16 +9905,29 @@ enum skl_power_gate {
#define BXT_DRAM_WIDTH_X64 (0x3 << 4)
#define BXT_DRAM_SIZE_MASK (0x7 << 6)
#define BXT_DRAM_SIZE_SHIFT 6
-#define BXT_DRAM_SIZE_4GB (0x0 << 6)
-#define BXT_DRAM_SIZE_6GB (0x1 << 6)
-#define BXT_DRAM_SIZE_8GB (0x2 << 6)
-#define BXT_DRAM_SIZE_12GB (0x3 << 6)
-#define BXT_DRAM_SIZE_16GB (0x4 << 6)
+#define BXT_DRAM_SIZE_4GBIT (0x0 << 6)
+#define BXT_DRAM_SIZE_6GBIT (0x1 << 6)
+#define BXT_DRAM_SIZE_8GBIT (0x2 << 6)
+#define BXT_DRAM_SIZE_12GBIT (0x3 << 6)
+#define BXT_DRAM_SIZE_16GBIT (0x4 << 6)
+#define BXT_DRAM_TYPE_MASK (0x7 << 22)
+#define BXT_DRAM_TYPE_SHIFT 22
+#define BXT_DRAM_TYPE_DDR3 (0x0 << 22)
+#define BXT_DRAM_TYPE_LPDDR3 (0x1 << 22)
+#define BXT_DRAM_TYPE_LPDDR4 (0x2 << 22)
+#define BXT_DRAM_TYPE_DDR4 (0x4 << 22)
#define SKL_MEMORY_FREQ_MULTIPLIER_HZ 266666666
#define SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5E04)
#define SKL_REQ_DATA_MASK (0xF << 0)
+#define SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5000)
+#define SKL_DRAM_DDR_TYPE_MASK (0x3 << 0)
+#define SKL_DRAM_DDR_TYPE_DDR4 (0 << 0)
+#define SKL_DRAM_DDR_TYPE_DDR3 (1 << 0)
+#define SKL_DRAM_DDR_TYPE_LPDDR3 (2 << 0)
+#define SKL_DRAM_DDR_TYPE_LPDDR4 (3 << 0)
+
#define SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x500C)
#define SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5010)
#define SKL_DRAM_S_SHIFT 16
@@ -9831,8 +9939,21 @@ enum skl_power_gate {
#define SKL_DRAM_WIDTH_X32 (0x2 << 8)
#define SKL_DRAM_RANK_MASK (0x1 << 10)
#define SKL_DRAM_RANK_SHIFT 10
-#define SKL_DRAM_RANK_SINGLE (0x0 << 10)
-#define SKL_DRAM_RANK_DUAL (0x1 << 10)
+#define SKL_DRAM_RANK_1 (0x0 << 10)
+#define SKL_DRAM_RANK_2 (0x1 << 10)
+#define SKL_DRAM_RANK_MASK (0x1 << 10)
+#define CNL_DRAM_SIZE_MASK 0x7F
+#define CNL_DRAM_WIDTH_MASK (0x3 << 7)
+#define CNL_DRAM_WIDTH_SHIFT 7
+#define CNL_DRAM_WIDTH_X8 (0x0 << 7)
+#define CNL_DRAM_WIDTH_X16 (0x1 << 7)
+#define CNL_DRAM_WIDTH_X32 (0x2 << 7)
+#define CNL_DRAM_RANK_MASK (0x3 << 9)
+#define CNL_DRAM_RANK_SHIFT 9
+#define CNL_DRAM_RANK_1 (0x0 << 9)
+#define CNL_DRAM_RANK_2 (0x1 << 9)
+#define CNL_DRAM_RANK_3 (0x2 << 9)
+#define CNL_DRAM_RANK_4 (0x3 << 9)
/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
* since on HSW we can't write to it using I915_WRITE. */
@@ -9877,10 +9998,14 @@ enum skl_power_gate {
#define _PIPE_A_CSC_COEFF_BU 0x4901c
#define _PIPE_A_CSC_COEFF_RV_GV 0x49020
#define _PIPE_A_CSC_COEFF_BV 0x49024
+
#define _PIPE_A_CSC_MODE 0x49028
-#define CSC_BLACK_SCREEN_OFFSET (1 << 2)
-#define CSC_POSITION_BEFORE_GAMMA (1 << 1)
-#define CSC_MODE_YUV_TO_RGB (1 << 0)
+#define ICL_CSC_ENABLE (1 << 31)
+#define ICL_OUTPUT_CSC_ENABLE (1 << 30)
+#define CSC_BLACK_SCREEN_OFFSET (1 << 2)
+#define CSC_POSITION_BEFORE_GAMMA (1 << 1)
+#define CSC_MODE_YUV_TO_RGB (1 << 0)
+
#define _PIPE_A_CSC_PREOFF_HI 0x49030
#define _PIPE_A_CSC_PREOFF_ME 0x49034
#define _PIPE_A_CSC_PREOFF_LO 0x49038
@@ -9916,6 +10041,70 @@ enum skl_power_gate {
#define PIPE_CSC_POSTOFF_ME(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
#define PIPE_CSC_POSTOFF_LO(pipe) _MMIO_PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
+/* Pipe Output CSC */
+#define _PIPE_A_OUTPUT_CSC_COEFF_RY_GY 0x49050
+#define _PIPE_A_OUTPUT_CSC_COEFF_BY 0x49054
+#define _PIPE_A_OUTPUT_CSC_COEFF_RU_GU 0x49058
+#define _PIPE_A_OUTPUT_CSC_COEFF_BU 0x4905c
+#define _PIPE_A_OUTPUT_CSC_COEFF_RV_GV 0x49060
+#define _PIPE_A_OUTPUT_CSC_COEFF_BV 0x49064
+#define _PIPE_A_OUTPUT_CSC_PREOFF_HI 0x49068
+#define _PIPE_A_OUTPUT_CSC_PREOFF_ME 0x4906c
+#define _PIPE_A_OUTPUT_CSC_PREOFF_LO 0x49070
+#define _PIPE_A_OUTPUT_CSC_POSTOFF_HI 0x49074
+#define _PIPE_A_OUTPUT_CSC_POSTOFF_ME 0x49078
+#define _PIPE_A_OUTPUT_CSC_POSTOFF_LO 0x4907c
+
+#define _PIPE_B_OUTPUT_CSC_COEFF_RY_GY 0x49150
+#define _PIPE_B_OUTPUT_CSC_COEFF_BY 0x49154
+#define _PIPE_B_OUTPUT_CSC_COEFF_RU_GU 0x49158
+#define _PIPE_B_OUTPUT_CSC_COEFF_BU 0x4915c
+#define _PIPE_B_OUTPUT_CSC_COEFF_RV_GV 0x49160
+#define _PIPE_B_OUTPUT_CSC_COEFF_BV 0x49164
+#define _PIPE_B_OUTPUT_CSC_PREOFF_HI 0x49168
+#define _PIPE_B_OUTPUT_CSC_PREOFF_ME 0x4916c
+#define _PIPE_B_OUTPUT_CSC_PREOFF_LO 0x49170
+#define _PIPE_B_OUTPUT_CSC_POSTOFF_HI 0x49174
+#define _PIPE_B_OUTPUT_CSC_POSTOFF_ME 0x49178
+#define _PIPE_B_OUTPUT_CSC_POSTOFF_LO 0x4917c
+
+#define PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe) _MMIO_PIPE(pipe,\
+ _PIPE_A_OUTPUT_CSC_COEFF_RY_GY,\
+ _PIPE_B_OUTPUT_CSC_COEFF_RY_GY)
+#define PIPE_CSC_OUTPUT_COEFF_BY(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_COEFF_BY, \
+ _PIPE_B_OUTPUT_CSC_COEFF_BY)
+#define PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_COEFF_RU_GU, \
+ _PIPE_B_OUTPUT_CSC_COEFF_RU_GU)
+#define PIPE_CSC_OUTPUT_COEFF_BU(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_COEFF_BU, \
+ _PIPE_B_OUTPUT_CSC_COEFF_BU)
+#define PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_COEFF_RV_GV, \
+ _PIPE_B_OUTPUT_CSC_COEFF_RV_GV)
+#define PIPE_CSC_OUTPUT_COEFF_BV(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_COEFF_BV, \
+ _PIPE_B_OUTPUT_CSC_COEFF_BV)
+#define PIPE_CSC_OUTPUT_PREOFF_HI(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_PREOFF_HI, \
+ _PIPE_B_OUTPUT_CSC_PREOFF_HI)
+#define PIPE_CSC_OUTPUT_PREOFF_ME(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_PREOFF_ME, \
+ _PIPE_B_OUTPUT_CSC_PREOFF_ME)
+#define PIPE_CSC_OUTPUT_PREOFF_LO(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_PREOFF_LO, \
+ _PIPE_B_OUTPUT_CSC_PREOFF_LO)
+#define PIPE_CSC_OUTPUT_POSTOFF_HI(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_POSTOFF_HI, \
+ _PIPE_B_OUTPUT_CSC_POSTOFF_HI)
+#define PIPE_CSC_OUTPUT_POSTOFF_ME(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_POSTOFF_ME, \
+ _PIPE_B_OUTPUT_CSC_POSTOFF_ME)
+#define PIPE_CSC_OUTPUT_POSTOFF_LO(pipe) _MMIO_PIPE(pipe, \
+ _PIPE_A_OUTPUT_CSC_POSTOFF_LO, \
+ _PIPE_B_OUTPUT_CSC_POSTOFF_LO)
+
/* pipe degamma/gamma LUTs on IVB+ */
#define _PAL_PREC_INDEX_A 0x4A400
#define _PAL_PREC_INDEX_B 0x4AC00
@@ -9924,6 +10113,7 @@ enum skl_power_gate {
#define PAL_PREC_SPLIT_MODE (1 << 31)
#define PAL_PREC_AUTO_INCREMENT (1 << 15)
#define PAL_PREC_INDEX_VALUE_MASK (0x3ff << 0)
+#define PAL_PREC_INDEX_VALUE(x) ((x) << 0)
#define _PAL_PREC_DATA_A 0x4A404
#define _PAL_PREC_DATA_B 0x4AC04
#define _PAL_PREC_DATA_C 0x4B404
@@ -9941,6 +10131,7 @@ enum skl_power_gate {
#define PREC_PAL_DATA(pipe) _MMIO_PIPE(pipe, _PAL_PREC_DATA_A, _PAL_PREC_DATA_B)
#define PREC_PAL_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_GC_MAX_A, _PAL_PREC_GC_MAX_B) + (i) * 4)
#define PREC_PAL_EXT_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_EXT_GC_MAX_A, _PAL_PREC_EXT_GC_MAX_B) + (i) * 4)
+#define PREC_PAL_EXT2_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_EXT2_GC_MAX_A, _PAL_PREC_EXT2_GC_MAX_B) + (i) * 4)
#define _PRE_CSC_GAMC_INDEX_A 0x4A484
#define _PRE_CSC_GAMC_INDEX_B 0x4AC84
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index c2a5c48c7541..b836721d3b13 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -22,15 +22,31 @@
*
*/
-#include <linux/prefetch.h>
#include <linux/dma-fence-array.h>
+#include <linux/irq_work.h>
+#include <linux/prefetch.h>
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/sched/signal.h>
-#include "i915_drv.h"
#include "i915_active.h"
+#include "i915_drv.h"
+#include "i915_globals.h"
#include "i915_reset.h"
+#include "intel_pm.h"
+
+struct execute_cb {
+ struct list_head link;
+ struct irq_work work;
+ struct i915_sw_fence *fence;
+};
+
+static struct i915_global_request {
+ struct i915_global base;
+ struct kmem_cache *slab_requests;
+ struct kmem_cache *slab_dependencies;
+ struct kmem_cache *slab_execute_cbs;
+} global;
static const char *i915_fence_get_driver_name(struct dma_fence *fence)
{
@@ -51,7 +67,7 @@ static const char *i915_fence_get_timeline_name(struct dma_fence *fence)
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
return "signaled";
- return to_request(fence)->timeline->name;
+ return to_request(fence)->gem_context->name ?: "[i915]";
}
static bool i915_fence_signaled(struct dma_fence *fence)
@@ -68,7 +84,9 @@ static signed long i915_fence_wait(struct dma_fence *fence,
bool interruptible,
signed long timeout)
{
- return i915_request_wait(to_request(fence), interruptible, timeout);
+ return i915_request_wait(to_request(fence),
+ interruptible | I915_WAIT_PRIORITY,
+ timeout);
}
static void i915_fence_release(struct dma_fence *fence)
@@ -83,8 +101,9 @@ static void i915_fence_release(struct dma_fence *fence)
* caught trying to reuse dead objects.
*/
i915_sw_fence_fini(&rq->submit);
+ i915_sw_fence_fini(&rq->semaphore);
- kmem_cache_free(rq->i915->requests, rq);
+ kmem_cache_free(global.slab_requests, rq);
}
const struct dma_fence_ops i915_fence_ops = {
@@ -150,7 +169,6 @@ static void advance_ring(struct i915_request *request)
* is just about to be. Either works, if we miss the last two
* noops - they are safe to be replayed on a reset.
*/
- GEM_TRACE("marking %s as inactive\n", ring->timeline->name);
tail = READ_ONCE(request->tail);
list_del(&ring->active_link);
} else {
@@ -177,12 +195,10 @@ static void free_capture_list(struct i915_request *request)
static void __retire_engine_request(struct intel_engine_cs *engine,
struct i915_request *rq)
{
- GEM_TRACE("%s(%s) fence %llx:%lld, global=%d, current %d:%d\n",
+ GEM_TRACE("%s(%s) fence %llx:%lld, current %d\n",
__func__, engine->name,
rq->fence.context, rq->fence.seqno,
- rq->global_seqno,
- hwsp_seqno(rq),
- intel_engine_get_seqno(engine));
+ hwsp_seqno(rq));
GEM_BUG_ON(!i915_request_completed(rq));
@@ -241,12 +257,10 @@ static void i915_request_retire(struct i915_request *request)
{
struct i915_active_request *active, *next;
- GEM_TRACE("%s fence %llx:%lld, global=%d, current %d:%d\n",
+ GEM_TRACE("%s fence %llx:%lld, current %d\n",
request->engine->name,
request->fence.context, request->fence.seqno,
- request->global_seqno,
- hwsp_seqno(request),
- intel_engine_get_seqno(request->engine));
+ hwsp_seqno(request));
lockdep_assert_held(&request->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
@@ -288,15 +302,13 @@ static void i915_request_retire(struct i915_request *request)
i915_request_remove_from_client(request);
- /* Retirement decays the ban score as it is a sign of ctx progress */
- atomic_dec_if_positive(&request->gem_context->ban_score);
intel_context_unpin(request->hw_context);
__retire_engine_upto(request->engine, request);
unreserve_gt(request->i915);
- i915_sched_node_fini(request->i915, &request->sched);
+ i915_sched_node_fini(&request->sched);
i915_request_put(request);
}
@@ -305,12 +317,10 @@ void i915_request_retire_upto(struct i915_request *rq)
struct intel_ring *ring = rq->ring;
struct i915_request *tmp;
- GEM_TRACE("%s fence %llx:%lld, global=%d, current %d:%d\n",
+ GEM_TRACE("%s fence %llx:%lld, current %d\n",
rq->engine->name,
rq->fence.context, rq->fence.seqno,
- rq->global_seqno,
- hwsp_seqno(rq),
- intel_engine_get_seqno(rq->engine));
+ hwsp_seqno(rq));
lockdep_assert_held(&rq->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_request_completed(rq));
@@ -326,9 +336,67 @@ void i915_request_retire_upto(struct i915_request *rq)
} while (tmp != rq);
}
-static u32 timeline_get_seqno(struct i915_timeline *tl)
+static void irq_execute_cb(struct irq_work *wrk)
+{
+ struct execute_cb *cb = container_of(wrk, typeof(*cb), work);
+
+ i915_sw_fence_complete(cb->fence);
+ kmem_cache_free(global.slab_execute_cbs, cb);
+}
+
+static void __notify_execute_cb(struct i915_request *rq)
+{
+ struct execute_cb *cb;
+
+ lockdep_assert_held(&rq->lock);
+
+ if (list_empty(&rq->execute_cb))
+ return;
+
+ list_for_each_entry(cb, &rq->execute_cb, link)
+ irq_work_queue(&cb->work);
+
+ /*
+ * XXX Rollback on __i915_request_unsubmit()
+ *
+ * In the future, perhaps when we have an active time-slicing scheduler,
+ * it will be interesting to unsubmit parallel execution and remove
+ * busywaits from the GPU until their master is restarted. This is
+ * quite hairy, we have to carefully rollback the fence and do a
+ * preempt-to-idle cycle on the target engine, all the while the
+ * master execute_cb may refire.
+ */
+ INIT_LIST_HEAD(&rq->execute_cb);
+}
+
+static int
+i915_request_await_execution(struct i915_request *rq,
+ struct i915_request *signal,
+ gfp_t gfp)
{
- return tl->seqno += 1 + tl->has_initial_breadcrumb;
+ struct execute_cb *cb;
+
+ if (i915_request_is_active(signal))
+ return 0;
+
+ cb = kmem_cache_alloc(global.slab_execute_cbs, gfp);
+ if (!cb)
+ return -ENOMEM;
+
+ cb->fence = &rq->submit;
+ i915_sw_fence_await(cb->fence);
+ init_irq_work(&cb->work, irq_execute_cb);
+
+ spin_lock_irq(&signal->lock);
+ if (i915_request_is_active(signal)) {
+ i915_sw_fence_complete(cb->fence);
+ kmem_cache_free(global.slab_execute_cbs, cb);
+ } else {
+ list_add_tail(&cb->link, &signal->execute_cb);
+ }
+ spin_unlock_irq(&signal->lock);
+
+ return 0;
}
static void move_to_timeline(struct i915_request *request,
@@ -342,42 +410,33 @@ static void move_to_timeline(struct i915_request *request,
spin_unlock(&request->timeline->lock);
}
-static u32 next_global_seqno(struct i915_timeline *tl)
-{
- if (!++tl->seqno)
- ++tl->seqno;
- return tl->seqno;
-}
-
void __i915_request_submit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
- u32 seqno;
- GEM_TRACE("%s fence %llx:%lld -> global=%d, current %d:%d\n",
+ GEM_TRACE("%s fence %llx:%lld -> current %d\n",
engine->name,
request->fence.context, request->fence.seqno,
- engine->timeline.seqno + 1,
- hwsp_seqno(request),
- intel_engine_get_seqno(engine));
+ hwsp_seqno(request));
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->timeline.lock);
- GEM_BUG_ON(request->global_seqno);
-
- seqno = next_global_seqno(&engine->timeline);
- GEM_BUG_ON(!seqno);
- GEM_BUG_ON(intel_engine_signaled(engine, seqno));
+ if (i915_gem_context_is_banned(request->gem_context))
+ i915_request_skip(request, -EIO);
/* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
+
GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
- request->global_seqno = seqno;
+
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
!i915_request_enable_breadcrumb(request))
intel_engine_queue_breadcrumbs(engine);
+
+ __notify_execute_cb(request);
+
spin_unlock(&request->lock);
engine->emit_fini_breadcrumb(request,
@@ -406,12 +465,10 @@ void __i915_request_unsubmit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
- GEM_TRACE("%s fence %llx:%lld <- global=%d, current %d:%d\n",
+ GEM_TRACE("%s fence %llx:%lld, current %d\n",
engine->name,
request->fence.context, request->fence.seqno,
- request->global_seqno,
- hwsp_seqno(request),
- intel_engine_get_seqno(engine));
+ hwsp_seqno(request));
GEM_BUG_ON(!irqs_disabled());
lockdep_assert_held(&engine->timeline.lock);
@@ -420,18 +477,25 @@ void __i915_request_unsubmit(struct i915_request *request)
* Only unwind in reverse order, required so that the per-context list
* is kept in seqno/ring order.
*/
- GEM_BUG_ON(!request->global_seqno);
- GEM_BUG_ON(request->global_seqno != engine->timeline.seqno);
- GEM_BUG_ON(intel_engine_has_completed(engine, request->global_seqno));
- engine->timeline.seqno--;
/* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
- request->global_seqno = 0;
+
+ /*
+ * As we do not allow WAIT to preempt inflight requests,
+ * once we have executed a request, along with triggering
+ * any execution callbacks, we must preserve its ordering
+ * within the non-preemptible FIFO.
+ */
+ BUILD_BUG_ON(__NO_PREEMPTION & ~I915_PRIORITY_MASK); /* only internal */
+ request->sched.attr.priority |= __NO_PREEMPTION;
+
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
i915_request_cancel_breadcrumb(request);
+
GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
clear_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
+
spin_unlock(&request->lock);
/* Transfer back from the global per-engine timeline to per-context */
@@ -489,6 +553,36 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
return NOTIFY_DONE;
}
+static int __i915_sw_fence_call
+semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+ struct i915_request *request =
+ container_of(fence, typeof(*request), semaphore);
+
+ switch (state) {
+ case FENCE_COMPLETE:
+ /*
+ * We only check a small portion of our dependencies
+ * and so cannot guarantee that there remains no
+ * semaphore chain across all. Instead of opting
+ * for the full NOSEMAPHORE boost, we go for the
+ * smaller (but still preempting) boost of
+ * NEWCLIENT. This will be enough to boost over
+ * a busywaiting request (as that cannot be
+ * NEWCLIENT) without accidentally boosting
+ * a busywait over real work elsewhere.
+ */
+ i915_schedule_bump_priority(request, I915_PRIORITY_NEWCLIENT);
+ break;
+
+ case FENCE_FREE:
+ i915_request_put(request);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
static void ring_retire_requests(struct intel_ring *ring)
{
struct i915_request *rq, *rn;
@@ -518,12 +612,7 @@ i915_request_alloc_slow(struct intel_context *ce)
ring_retire_requests(ring);
out:
- return kmem_cache_alloc(ce->gem_context->i915->requests, GFP_KERNEL);
-}
-
-static int add_timeline_barrier(struct i915_request *rq)
-{
- return i915_request_await_active_request(rq, &rq->timeline->barrier);
+ return kmem_cache_alloc(global.slab_requests, GFP_KERNEL);
}
/**
@@ -539,8 +628,10 @@ struct i915_request *
i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
{
struct drm_i915_private *i915 = engine->i915;
- struct i915_request *rq;
struct intel_context *ce;
+ struct i915_timeline *tl;
+ struct i915_request *rq;
+ u32 seqno;
int ret;
lockdep_assert_held(&i915->drm.struct_mutex);
@@ -556,8 +647,9 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
* EIO if the GPU is already wedged.
*/
- if (i915_terminally_wedged(&i915->gpu_error))
- return ERR_PTR(-EIO);
+ ret = i915_terminally_wedged(i915);
+ if (ret)
+ return ERR_PTR(ret);
/*
* Pinning the contexts may generate requests in order to acquire
@@ -569,6 +661,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
return ERR_CAST(ce);
reserve_gt(i915);
+ mutex_lock(&ce->ring->timeline->mutex);
/* Move our oldest request to the slab-cache (if not in use!) */
rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
@@ -605,7 +698,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
*
* Do not use kmem_cache_zalloc() here!
*/
- rq = kmem_cache_alloc(i915->requests,
+ rq = kmem_cache_alloc(global.slab_requests,
GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
if (unlikely(!rq)) {
rq = i915_request_alloc_slow(ce);
@@ -615,32 +708,36 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
}
}
- rq->rcustate = get_state_synchronize_rcu();
-
INIT_LIST_HEAD(&rq->active_list);
+ INIT_LIST_HEAD(&rq->execute_cb);
+
+ tl = ce->ring->timeline;
+ ret = i915_timeline_get_seqno(tl, rq, &seqno);
+ if (ret)
+ goto err_free;
+
rq->i915 = i915;
rq->engine = engine;
rq->gem_context = ctx;
rq->hw_context = ce;
rq->ring = ce->ring;
- rq->timeline = ce->ring->timeline;
+ rq->timeline = tl;
GEM_BUG_ON(rq->timeline == &engine->timeline);
- rq->hwsp_seqno = rq->timeline->hwsp_seqno;
+ rq->hwsp_seqno = tl->hwsp_seqno;
+ rq->hwsp_cacheline = tl->hwsp_cacheline;
+ rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
spin_lock_init(&rq->lock);
- dma_fence_init(&rq->fence,
- &i915_fence_ops,
- &rq->lock,
- rq->timeline->fence_context,
- timeline_get_seqno(rq->timeline));
+ dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
+ tl->fence_context, seqno);
/* We bump the ref for the fence chain */
i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify);
+ i915_sw_fence_init(&i915_request_get(rq)->semaphore, semaphore_notify);
i915_sched_node_init(&rq->sched);
/* No zalloc, must clear what we need by hand */
- rq->global_seqno = 0;
rq->file_priv = NULL;
rq->batch = NULL;
rq->capture_list = NULL;
@@ -668,10 +765,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
*/
rq->head = rq->ring->emit;
- ret = add_timeline_barrier(rq);
- if (ret)
- goto err_unwind;
-
ret = engine->request_alloc(rq);
if (ret)
goto err_unwind;
@@ -682,7 +775,10 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
rq->infix = rq->ring->emit; /* end of header; start of user payload */
/* Check that we didn't interrupt ourselves with a new request */
+ lockdep_assert_held(&rq->timeline->mutex);
GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno);
+ rq->cookie = lockdep_pin_lock(&rq->timeline->mutex);
+
return rq;
err_unwind:
@@ -693,14 +789,76 @@ err_unwind:
GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
- kmem_cache_free(i915->requests, rq);
+err_free:
+ kmem_cache_free(global.slab_requests, rq);
err_unreserve:
+ mutex_unlock(&ce->ring->timeline->mutex);
unreserve_gt(i915);
intel_context_unpin(ce);
return ERR_PTR(ret);
}
static int
+emit_semaphore_wait(struct i915_request *to,
+ struct i915_request *from,
+ gfp_t gfp)
+{
+ u32 hwsp_offset;
+ u32 *cs;
+ int err;
+
+ GEM_BUG_ON(!from->timeline->has_initial_breadcrumb);
+ GEM_BUG_ON(INTEL_GEN(to->i915) < 8);
+
+ /* Just emit the first semaphore we see as request space is limited. */
+ if (to->sched.semaphores & from->engine->mask)
+ return i915_sw_fence_await_dma_fence(&to->submit,
+ &from->fence, 0,
+ I915_FENCE_GFP);
+
+ err = i915_sw_fence_await_dma_fence(&to->semaphore,
+ &from->fence, 0,
+ I915_FENCE_GFP);
+ if (err < 0)
+ return err;
+
+ /* We need to pin the signaler's HWSP until we are finished reading. */
+ err = i915_timeline_read_hwsp(from, to, &hwsp_offset);
+ if (err)
+ return err;
+
+ /* Only submit our spinner after the signaler is running! */
+ err = i915_request_await_execution(to, from, gfp);
+ if (err)
+ return err;
+
+ cs = intel_ring_begin(to, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /*
+ * Using greater-than-or-equal here means we have to worry
+ * about seqno wraparound. To side step that issue, we swap
+ * the timeline HWSP upon wrapping, so that everyone listening
+ * for the old (pre-wrap) values do not see the much smaller
+ * (post-wrap) values than they were expecting (and so wait
+ * forever).
+ */
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_GTE_SDD;
+ *cs++ = from->fence.seqno;
+ *cs++ = hwsp_offset;
+ *cs++ = 0;
+
+ intel_ring_advance(to, cs);
+ to->sched.semaphores |= from->engine->mask;
+ to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
+ return 0;
+}
+
+static int
i915_request_await_request(struct i915_request *to, struct i915_request *from)
{
int ret;
@@ -712,9 +870,7 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
return 0;
if (to->engine->schedule) {
- ret = i915_sched_node_add_dependency(to->i915,
- &to->sched,
- &from->sched);
+ ret = i915_sched_node_add_dependency(&to->sched, &from->sched);
if (ret < 0)
return ret;
}
@@ -723,6 +879,9 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
&from->submit,
I915_FENCE_GFP);
+ } else if (intel_engine_has_semaphores(to->engine) &&
+ to->gem_context->sched.priority >= I915_PRIORITY_NORMAL) {
+ ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
} else {
ret = i915_sw_fence_await_dma_fence(&to->submit,
&from->fence, 0,
@@ -873,6 +1032,60 @@ void i915_request_skip(struct i915_request *rq, int error)
memset(vaddr + head, 0, rq->postfix - head);
}
+static struct i915_request *
+__i915_request_add_to_timeline(struct i915_request *rq)
+{
+ struct i915_timeline *timeline = rq->timeline;
+ struct i915_request *prev;
+
+ /*
+ * Dependency tracking and request ordering along the timeline
+ * is special cased so that we can eliminate redundant ordering
+ * operations while building the request (we know that the timeline
+ * itself is ordered, and here we guarantee it).
+ *
+ * As we know we will need to emit tracking along the timeline,
+ * we embed the hooks into our request struct -- at the cost of
+ * having to have specialised no-allocation interfaces (which will
+ * be beneficial elsewhere).
+ *
+ * A second benefit to open-coding i915_request_await_request is
+ * that we can apply a slight variant of the rules specialised
+ * for timelines that jump between engines (such as virtual engines).
+ * If we consider the case of virtual engine, we must emit a dma-fence
+ * to prevent scheduling of the second request until the first is
+ * complete (to maximise our greedy late load balancing) and this
+ * precludes optimising to use semaphores serialisation of a single
+ * timeline across engines.
+ */
+ prev = i915_active_request_raw(&timeline->last_request,
+ &rq->i915->drm.struct_mutex);
+ if (prev && !i915_request_completed(prev)) {
+ if (is_power_of_2(prev->engine->mask | rq->engine->mask))
+ i915_sw_fence_await_sw_fence(&rq->submit,
+ &prev->submit,
+ &rq->submitq);
+ else
+ __i915_sw_fence_await_dma_fence(&rq->submit,
+ &prev->fence,
+ &rq->dmaq);
+ if (rq->engine->schedule)
+ __i915_sched_node_add_dependency(&rq->sched,
+ &prev->sched,
+ &rq->dep,
+ 0);
+ }
+
+ spin_lock_irq(&timeline->lock);
+ list_add_tail(&rq->link, &timeline->requests);
+ spin_unlock_irq(&timeline->lock);
+
+ GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
+ __i915_active_request_set(&timeline->last_request, rq);
+
+ return prev;
+}
+
/*
* NB: This function is not allowed to fail. Doing so would mean the the
* request is not being tracked for completion but the work itself is
@@ -889,7 +1102,9 @@ void i915_request_add(struct i915_request *request)
GEM_TRACE("%s fence %llx:%lld\n",
engine->name, request->fence.context, request->fence.seqno);
- lockdep_assert_held(&request->i915->drm.struct_mutex);
+ lockdep_assert_held(&request->timeline->mutex);
+ lockdep_unpin_lock(&request->timeline->mutex, request->cookie);
+
trace_i915_request_add(request);
/*
@@ -917,37 +1132,12 @@ void i915_request_add(struct i915_request *request)
GEM_BUG_ON(IS_ERR(cs));
request->postfix = intel_ring_offset(request, cs);
- /*
- * Seal the request and mark it as pending execution. Note that
- * we may inspect this state, without holding any locks, during
- * hangcheck. Hence we apply the barrier to ensure that we do not
- * see a more recent value in the hws than we are tracking.
- */
-
- prev = i915_active_request_raw(&timeline->last_request,
- &request->i915->drm.struct_mutex);
- if (prev && !i915_request_completed(prev)) {
- i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
- &request->submitq);
- if (engine->schedule)
- __i915_sched_node_add_dependency(&request->sched,
- &prev->sched,
- &request->dep,
- 0);
- }
-
- spin_lock_irq(&timeline->lock);
- list_add_tail(&request->link, &timeline->requests);
- spin_unlock_irq(&timeline->lock);
-
- GEM_BUG_ON(timeline->seqno != request->fence.seqno);
- __i915_active_request_set(&timeline->last_request, request);
+ prev = __i915_request_add_to_timeline(request);
list_add_tail(&request->ring_link, &ring->request_list);
- if (list_is_first(&request->ring_link, &ring->request_list)) {
- GEM_TRACE("marking %s as active\n", ring->timeline->name);
+ if (list_is_first(&request->ring_link, &ring->request_list))
list_add(&ring->active_link, &request->i915->gt.active_rings);
- }
+ request->i915->gt.active_engines |= request->engine->mask;
request->emitted_jiffies = jiffies;
/*
@@ -962,11 +1152,27 @@ void i915_request_add(struct i915_request *request)
* run at the earliest possible convenience.
*/
local_bh_disable();
+ i915_sw_fence_commit(&request->semaphore);
rcu_read_lock(); /* RCU serialisation for set-wedged protection */
if (engine->schedule) {
struct i915_sched_attr attr = request->gem_context->sched;
/*
+ * Boost actual workloads past semaphores!
+ *
+ * With semaphores we spin on one engine waiting for another,
+ * simply to reduce the latency of starting our work when
+ * the signaler completes. However, if there is any other
+ * work that we could be doing on this engine instead, that
+ * is better utilisation and will reduce the overall duration
+ * of the current work. To avoid PI boosting a semaphore
+ * far in the distance past over useful work, we keep a history
+ * of any semaphore use along our dependency chain.
+ */
+ if (!(request->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
+ attr.priority |= I915_PRIORITY_NOSEMAPHORE;
+
+ /*
* Boost priorities to new clients (new request flows).
*
* Allow interactive/synchronous clients to jump ahead of
@@ -1000,6 +1206,8 @@ void i915_request_add(struct i915_request *request)
*/
if (prev && i915_request_completed(prev))
i915_request_retire_upto(prev);
+
+ mutex_unlock(&request->timeline->mutex);
}
static unsigned long local_clock_us(unsigned int *cpu)
@@ -1136,8 +1344,25 @@ long i915_request_wait(struct i915_request *rq,
if (__i915_spin_request(rq, state, 5))
goto out;
- if (flags & I915_WAIT_PRIORITY)
+ /*
+ * This client is about to stall waiting for the GPU. In many cases
+ * this is undesirable and limits the throughput of the system, as
+ * many clients cannot continue processing user input/output whilst
+ * blocked. RPS autotuning may take tens of milliseconds to respond
+ * to the GPU load and thus incurs additional latency for the client.
+ * We can circumvent that by promoting the GPU frequency to maximum
+ * before we sleep. This makes the GPU throttle up much more quickly
+ * (good for benchmarks and user experience, e.g. window animations),
+ * but at a cost of spending more power processing the workload
+ * (bad for battery).
+ */
+ if (flags & I915_WAIT_PRIORITY) {
+ if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6)
+ gen6_rps_boost(rq);
+ local_bh_disable(); /* suspend tasklets for reprioritisation */
i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT);
+ local_bh_enable(); /* kick tasklets en masse */
+ }
wait.tsk = current;
if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake))
@@ -1179,11 +1404,66 @@ void i915_retire_requests(struct drm_i915_private *i915)
if (!i915->gt.active_requests)
return;
- list_for_each_entry_safe(ring, tmp, &i915->gt.active_rings, active_link)
+ list_for_each_entry_safe(ring, tmp,
+ &i915->gt.active_rings, active_link) {
+ intel_ring_get(ring); /* last rq holds reference! */
ring_retire_requests(ring);
+ intel_ring_put(ring);
+ }
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_request.c"
#include "selftests/i915_request.c"
#endif
+
+static void i915_global_request_shrink(void)
+{
+ kmem_cache_shrink(global.slab_dependencies);
+ kmem_cache_shrink(global.slab_execute_cbs);
+ kmem_cache_shrink(global.slab_requests);
+}
+
+static void i915_global_request_exit(void)
+{
+ kmem_cache_destroy(global.slab_dependencies);
+ kmem_cache_destroy(global.slab_execute_cbs);
+ kmem_cache_destroy(global.slab_requests);
+}
+
+static struct i915_global_request global = { {
+ .shrink = i915_global_request_shrink,
+ .exit = i915_global_request_exit,
+} };
+
+int __init i915_global_request_init(void)
+{
+ global.slab_requests = KMEM_CACHE(i915_request,
+ SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT |
+ SLAB_TYPESAFE_BY_RCU);
+ if (!global.slab_requests)
+ return -ENOMEM;
+
+ global.slab_execute_cbs = KMEM_CACHE(execute_cb,
+ SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT |
+ SLAB_TYPESAFE_BY_RCU);
+ if (!global.slab_execute_cbs)
+ goto err_requests;
+
+ global.slab_dependencies = KMEM_CACHE(i915_dependency,
+ SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT);
+ if (!global.slab_dependencies)
+ goto err_execute_cbs;
+
+ i915_global_register(&global.base);
+ return 0;
+
+err_execute_cbs:
+ kmem_cache_destroy(global.slab_execute_cbs);
+err_requests:
+ kmem_cache_destroy(global.slab_requests);
+ return -ENOMEM;
+}
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 40f3e8dcbdd5..a982664618c2 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -26,9 +26,11 @@
#define I915_REQUEST_H
#include <linux/dma-fence.h>
+#include <linux/lockdep.h>
#include "i915_gem.h"
#include "i915_scheduler.h"
+#include "i915_selftest.h"
#include "i915_sw_fence.h"
#include <uapi/drm/i915_drm.h>
@@ -37,6 +39,7 @@ struct drm_file;
struct drm_i915_gem_object;
struct i915_request;
struct i915_timeline;
+struct i915_timeline_cacheline;
struct i915_capture_list {
struct i915_capture_list *next;
@@ -119,6 +122,15 @@ struct i915_request {
unsigned long rcustate;
/*
+ * We pin the timeline->mutex while constructing the request to
+ * ensure that no caller accidentally drops it during construction.
+ * The timeline->mutex must be held to ensure that only this caller
+ * can use the ring and manipulate the associated timeline during
+ * construction.
+ */
+ struct pin_cookie cookie;
+
+ /*
* Fences for the various phases in the request's lifetime.
*
* The submit fence is used to await upon all of the request's
@@ -126,7 +138,12 @@ struct i915_request {
* It is used by the driver to then queue the request for execution.
*/
struct i915_sw_fence submit;
- wait_queue_entry_t submitq;
+ union {
+ wait_queue_entry_t submitq;
+ struct i915_sw_dma_fence_cb dmaq;
+ };
+ struct list_head execute_cb;
+ struct i915_sw_fence semaphore;
/*
* A list of everyone we wait upon, and everyone who waits upon us.
@@ -147,13 +164,15 @@ struct i915_request {
*/
const u32 *hwsp_seqno;
- /**
- * GEM sequence number associated with this request on the
- * global execution timeline. It is zero when the request is not
- * on the HW queue (i.e. not on the engine timeline list).
- * Its value is guarded by the timeline spinlock.
+ /*
+ * If we need to access the timeline's seqno for this request in
+ * another request, we need to keep a read reference to this associated
+ * cacheline, so that we do not free and recycle it before the foreign
+ * observers have completed. Hence, we keep a pointer to the cacheline
+ * inside the timeline's HWSP vma, but it is only valid while this
+ * request has not completed and guarded by the timeline mutex.
*/
- u32 global_seqno;
+ struct i915_timeline_cacheline *hwsp_cacheline;
/** Position in the ring of the start of the request */
u32 head;
@@ -204,6 +223,11 @@ struct i915_request {
struct drm_i915_file_private *file_priv;
/** file_priv list entry for this request */
struct list_head client_link;
+
+ I915_SELFTEST_DECLARE(struct {
+ struct list_head link;
+ unsigned long delay;
+ } mock;)
};
#define I915_FENCE_GFP (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
@@ -247,30 +271,6 @@ i915_request_put(struct i915_request *rq)
dma_fence_put(&rq->fence);
}
-/**
- * i915_request_global_seqno - report the current global seqno
- * @request - the request
- *
- * A request is assigned a global seqno only when it is on the hardware
- * execution queue. The global seqno can be used to maintain a list of
- * requests on the same engine in retirement order, for example for
- * constructing a priority queue for waiting. Prior to its execution, or
- * if it is subsequently removed in the event of preemption, its global
- * seqno is zero. As both insertion and removal from the execution queue
- * may operate in IRQ context, it is not guarded by the usual struct_mutex
- * BKL. Instead those relying on the global seqno must be prepared for its
- * value to change between reads. Only when the request is complete can
- * the global seqno be stable (due to the memory barriers on submitting
- * the commands to the hardware to write the breadcrumb, if the HWS shows
- * that it has passed the global seqno and the global seqno is unchanged
- * after the read, it is indeed complete).
- */
-static inline u32
-i915_request_global_seqno(const struct i915_request *request)
-{
- return READ_ONCE(request->global_seqno);
-}
-
int i915_request_await_object(struct i915_request *to,
struct drm_i915_gem_object *obj,
bool write);
@@ -358,10 +358,27 @@ static inline bool __i915_request_has_started(const struct i915_request *rq)
* i915_request_started - check if the request has begun being executed
* @rq: the request
*
- * Returns true if the request has been submitted to hardware, and the hardware
- * has advanced passed the end of the previous request and so should be either
- * currently processing the request (though it may be preempted and so
- * not necessarily the next request to complete) or have completed the request.
+ * If the timeline is not using initial breadcrumbs, a request is
+ * considered started if the previous request on its timeline (i.e.
+ * context) has been signaled.
+ *
+ * If the timeline is using semaphores, it will also be emitting an
+ * "initial breadcrumb" after the semaphores are complete and just before
+ * it began executing the user payload. A request can therefore be active
+ * on the HW and not yet started as it is still busywaiting on its
+ * dependencies (via HW semaphores).
+ *
+ * If the request has started, its dependencies will have been signaled
+ * (either by fences or by semaphores) and it will have begun processing
+ * the user payload.
+ *
+ * However, even if a request has started, it may have been preempted and
+ * so no longer active, or it may have already completed.
+ *
+ * See also i915_request_is_active().
+ *
+ * Returns true if the request has begun executing the user payload, or
+ * has completed:
*/
static inline bool i915_request_started(const struct i915_request *rq)
{
diff --git a/drivers/gpu/drm/i915/i915_reset.c b/drivers/gpu/drm/i915/i915_reset.c
index 0e0ddf2e6815..677d59304e78 100644
--- a/drivers/gpu/drm/i915/i915_reset.c
+++ b/drivers/gpu/drm/i915/i915_reset.c
@@ -18,28 +18,39 @@
/* XXX How to handle concurrent GGTT updates using tiling registers? */
#define RESET_UNDER_STOP_MACHINE 0
+static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
+{
+ intel_uncore_rmw(uncore, reg, 0, set);
+}
+
+static void rmw_clear(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
+{
+ intel_uncore_rmw(uncore, reg, clr, 0);
+}
+
+static void rmw_set_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 set)
+{
+ intel_uncore_rmw_fw(uncore, reg, 0, set);
+}
+
+static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr)
+{
+ intel_uncore_rmw_fw(uncore, reg, clr, 0);
+}
+
static void engine_skip_context(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
struct i915_gem_context *hung_ctx = rq->gem_context;
- struct i915_timeline *timeline = rq->timeline;
lockdep_assert_held(&engine->timeline.lock);
- GEM_BUG_ON(timeline == &engine->timeline);
-
- spin_lock(&timeline->lock);
-
- if (i915_request_is_active(rq)) {
- list_for_each_entry_continue(rq,
- &engine->timeline.requests, link)
- if (rq->gem_context == hung_ctx)
- i915_request_skip(rq, -EIO);
- }
- list_for_each_entry(rq, &timeline->requests, link)
- i915_request_skip(rq, -EIO);
+ if (!i915_request_is_active(rq))
+ return;
- spin_unlock(&timeline->lock);
+ list_for_each_entry_continue(rq, &engine->timeline.requests, link)
+ if (rq->gem_context == hung_ctx)
+ i915_request_skip(rq, -EIO);
}
static void client_mark_guilty(struct drm_i915_file_private *file_priv,
@@ -68,23 +79,29 @@ static void client_mark_guilty(struct drm_i915_file_private *file_priv,
static bool context_mark_guilty(struct i915_gem_context *ctx)
{
- unsigned int score;
- bool banned, bannable;
+ unsigned long prev_hang;
+ bool banned;
+ int i;
atomic_inc(&ctx->guilty_count);
- bannable = i915_gem_context_is_bannable(ctx);
- score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
- banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
-
- /* Cool contexts don't accumulate client ban score */
- if (!bannable)
+ /* Cool contexts are too cool to be banned! (Used for reset testing.) */
+ if (!i915_gem_context_is_bannable(ctx))
return false;
+ /* Record the timestamp for the last N hangs */
+ prev_hang = ctx->hang_timestamp[0];
+ for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp) - 1; i++)
+ ctx->hang_timestamp[i] = ctx->hang_timestamp[i + 1];
+ ctx->hang_timestamp[i] = jiffies;
+
+ /* If we have hung N+1 times in rapid succession, we ban the context! */
+ banned = !i915_gem_context_is_recoverable(ctx);
+ if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
+ banned = true;
if (banned) {
- DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, banned\n",
- ctx->name, atomic_read(&ctx->guilty_count),
- score);
+ DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n",
+ ctx->name, atomic_read(&ctx->guilty_count));
i915_gem_context_set_banned(ctx);
}
@@ -101,6 +118,12 @@ static void context_mark_innocent(struct i915_gem_context *ctx)
void i915_reset_request(struct i915_request *rq, bool guilty)
{
+ GEM_TRACE("%s rq=%llx:%lld, guilty? %s\n",
+ rq->engine->name,
+ rq->fence.context,
+ rq->fence.seqno,
+ yesno(guilty));
+
lockdep_assert_held(&rq->engine->timeline.lock);
GEM_BUG_ON(i915_request_completed(rq));
@@ -116,38 +139,43 @@ void i915_reset_request(struct i915_request *rq, bool guilty)
static void gen3_stop_engine(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct intel_uncore *uncore = engine->uncore;
const u32 base = engine->mmio_base;
+ GEM_TRACE("%s\n", engine->name);
+
if (intel_engine_stop_cs(engine))
- DRM_DEBUG_DRIVER("%s: timed out on STOP_RING\n", engine->name);
+ GEM_TRACE("%s: timed out on STOP_RING\n", engine->name);
- I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base)));
- POSTING_READ_FW(RING_HEAD(base)); /* paranoia */
+ intel_uncore_write_fw(uncore,
+ RING_HEAD(base),
+ intel_uncore_read_fw(uncore, RING_TAIL(base)));
+ intel_uncore_posting_read_fw(uncore, RING_HEAD(base)); /* paranoia */
- I915_WRITE_FW(RING_HEAD(base), 0);
- I915_WRITE_FW(RING_TAIL(base), 0);
- POSTING_READ_FW(RING_TAIL(base));
+ intel_uncore_write_fw(uncore, RING_HEAD(base), 0);
+ intel_uncore_write_fw(uncore, RING_TAIL(base), 0);
+ intel_uncore_posting_read_fw(uncore, RING_TAIL(base));
/* The ring must be empty before it is disabled */
- I915_WRITE_FW(RING_CTL(base), 0);
+ intel_uncore_write_fw(uncore, RING_CTL(base), 0);
/* Check acts as a post */
- if (I915_READ_FW(RING_HEAD(base)) != 0)
- DRM_DEBUG_DRIVER("%s: ring head not parked\n",
- engine->name);
+ if (intel_uncore_read_fw(uncore, RING_HEAD(base)))
+ GEM_TRACE("%s: ring head [%x] not parked\n",
+ engine->name,
+ intel_uncore_read_fw(uncore, RING_HEAD(base)));
}
static void i915_stop_engines(struct drm_i915_private *i915,
- unsigned int engine_mask)
+ intel_engine_mask_t engine_mask)
{
struct intel_engine_cs *engine;
- enum intel_engine_id id;
+ intel_engine_mask_t tmp;
if (INTEL_GEN(i915) < 3)
return;
- for_each_engine_masked(engine, i915, engine_mask, id)
+ for_each_engine_masked(engine, i915, engine_mask, tmp)
gen3_stop_engine(engine);
}
@@ -160,7 +188,7 @@ static bool i915_in_reset(struct pci_dev *pdev)
}
static int i915_do_reset(struct drm_i915_private *i915,
- unsigned int engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned int retry)
{
struct pci_dev *pdev = i915->drm.pdev;
@@ -189,7 +217,7 @@ static bool g4x_reset_complete(struct pci_dev *pdev)
}
static int g33_do_reset(struct drm_i915_private *i915,
- unsigned int engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned int retry)
{
struct pci_dev *pdev = i915->drm.pdev;
@@ -198,17 +226,17 @@ static int g33_do_reset(struct drm_i915_private *i915,
return wait_for_atomic(g4x_reset_complete(pdev), 50);
}
-static int g4x_do_reset(struct drm_i915_private *dev_priv,
- unsigned int engine_mask,
+static int g4x_do_reset(struct drm_i915_private *i915,
+ intel_engine_mask_t engine_mask,
unsigned int retry)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = i915->drm.pdev;
+ struct intel_uncore *uncore = &i915->uncore;
int ret;
/* WaVcpClkGateDisableForMediaReset:ctg,elk */
- I915_WRITE_FW(VDECCLK_GATE_D,
- I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
- POSTING_READ_FW(VDECCLK_GATE_D);
+ rmw_set_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
+ intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
pci_write_config_byte(pdev, I915_GDRST,
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
@@ -229,21 +257,22 @@ static int g4x_do_reset(struct drm_i915_private *dev_priv,
out:
pci_write_config_byte(pdev, I915_GDRST, 0);
- I915_WRITE_FW(VDECCLK_GATE_D,
- I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
- POSTING_READ_FW(VDECCLK_GATE_D);
+ rmw_clear_fw(uncore, VDECCLK_GATE_D, VCP_UNIT_CLOCK_GATE_DISABLE);
+ intel_uncore_posting_read_fw(uncore, VDECCLK_GATE_D);
return ret;
}
-static int ironlake_do_reset(struct drm_i915_private *dev_priv,
- unsigned int engine_mask,
+static int ironlake_do_reset(struct drm_i915_private *i915,
+ intel_engine_mask_t engine_mask,
unsigned int retry)
{
+ struct intel_uncore *uncore = &i915->uncore;
int ret;
- I915_WRITE_FW(ILK_GDSR, ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
- ret = __intel_wait_for_register_fw(dev_priv, ILK_GDSR,
+ intel_uncore_write_fw(uncore, ILK_GDSR,
+ ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
+ ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
ILK_GRDOM_RESET_ENABLE, 0,
5000, 0,
NULL);
@@ -252,8 +281,9 @@ static int ironlake_do_reset(struct drm_i915_private *dev_priv,
goto out;
}
- I915_WRITE_FW(ILK_GDSR, ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
- ret = __intel_wait_for_register_fw(dev_priv, ILK_GDSR,
+ intel_uncore_write_fw(uncore, ILK_GDSR,
+ ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
+ ret = __intel_wait_for_register_fw(uncore, ILK_GDSR,
ILK_GRDOM_RESET_ENABLE, 0,
5000, 0,
NULL);
@@ -263,15 +293,16 @@ static int ironlake_do_reset(struct drm_i915_private *dev_priv,
}
out:
- I915_WRITE_FW(ILK_GDSR, 0);
- POSTING_READ_FW(ILK_GDSR);
+ intel_uncore_write_fw(uncore, ILK_GDSR, 0);
+ intel_uncore_posting_read_fw(uncore, ILK_GDSR);
return ret;
}
/* Reset the hardware domains (GENX_GRDOM_*) specified by mask */
-static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
+static int gen6_hw_domain_reset(struct drm_i915_private *i915,
u32 hw_domain_mask)
{
+ struct intel_uncore *uncore = &i915->uncore;
int err;
/*
@@ -279,10 +310,10 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
* for fifo space for the write or forcewake the chip for
* the read
*/
- I915_WRITE_FW(GEN6_GDRST, hw_domain_mask);
+ intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
/* Wait for the device to ack the reset requests */
- err = __intel_wait_for_register_fw(dev_priv,
+ err = __intel_wait_for_register_fw(uncore,
GEN6_GDRST, hw_domain_mask, 0,
500, 0,
NULL);
@@ -294,36 +325,38 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
}
static int gen6_reset_engines(struct drm_i915_private *i915,
- unsigned int engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned int retry)
{
struct intel_engine_cs *engine;
- const u32 hw_engine_mask[I915_NUM_ENGINES] = {
- [RCS] = GEN6_GRDOM_RENDER,
- [BCS] = GEN6_GRDOM_BLT,
- [VCS] = GEN6_GRDOM_MEDIA,
- [VCS2] = GEN8_GRDOM_MEDIA2,
- [VECS] = GEN6_GRDOM_VECS,
+ const u32 hw_engine_mask[] = {
+ [RCS0] = GEN6_GRDOM_RENDER,
+ [BCS0] = GEN6_GRDOM_BLT,
+ [VCS0] = GEN6_GRDOM_MEDIA,
+ [VCS1] = GEN8_GRDOM_MEDIA2,
+ [VECS0] = GEN6_GRDOM_VECS,
};
u32 hw_mask;
if (engine_mask == ALL_ENGINES) {
hw_mask = GEN6_GRDOM_FULL;
} else {
- unsigned int tmp;
+ intel_engine_mask_t tmp;
hw_mask = 0;
- for_each_engine_masked(engine, i915, engine_mask, tmp)
+ for_each_engine_masked(engine, i915, engine_mask, tmp) {
+ GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
hw_mask |= hw_engine_mask[engine->id];
+ }
}
return gen6_hw_domain_reset(i915, hw_mask);
}
-static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv,
- struct intel_engine_cs *engine)
+static u32 gen11_lock_sfc(struct intel_engine_cs *engine)
{
- u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
+ struct intel_uncore *uncore = engine->uncore;
+ u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
i915_reg_t sfc_forced_lock, sfc_forced_lock_ack;
u32 sfc_forced_lock_bit, sfc_forced_lock_ack_bit;
i915_reg_t sfc_usage;
@@ -370,10 +403,9 @@ static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv,
* ends up being locked to the engine we want to reset, we have to reset
* it as well (we will unlock it once the reset sequence is completed).
*/
- I915_WRITE_FW(sfc_forced_lock,
- I915_READ_FW(sfc_forced_lock) | sfc_forced_lock_bit);
+ rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
- if (__intel_wait_for_register_fw(dev_priv,
+ if (__intel_wait_for_register_fw(uncore,
sfc_forced_lock_ack,
sfc_forced_lock_ack_bit,
sfc_forced_lock_ack_bit,
@@ -382,16 +414,16 @@ static u32 gen11_lock_sfc(struct drm_i915_private *dev_priv,
return 0;
}
- if (I915_READ_FW(sfc_usage) & sfc_usage_bit)
+ if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)
return sfc_reset_bit;
return 0;
}
-static void gen11_unlock_sfc(struct drm_i915_private *dev_priv,
- struct intel_engine_cs *engine)
+static void gen11_unlock_sfc(struct intel_engine_cs *engine)
{
- u8 vdbox_sfc_access = RUNTIME_INFO(dev_priv)->vdbox_sfc_access;
+ struct intel_uncore *uncore = engine->uncore;
+ u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access;
i915_reg_t sfc_forced_lock;
u32 sfc_forced_lock_bit;
@@ -413,38 +445,36 @@ static void gen11_unlock_sfc(struct drm_i915_private *dev_priv,
return;
}
- I915_WRITE_FW(sfc_forced_lock,
- I915_READ_FW(sfc_forced_lock) & ~sfc_forced_lock_bit);
+ rmw_clear_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit);
}
static int gen11_reset_engines(struct drm_i915_private *i915,
- unsigned int engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned int retry)
{
- const u32 hw_engine_mask[I915_NUM_ENGINES] = {
- [RCS] = GEN11_GRDOM_RENDER,
- [BCS] = GEN11_GRDOM_BLT,
- [VCS] = GEN11_GRDOM_MEDIA,
- [VCS2] = GEN11_GRDOM_MEDIA2,
- [VCS3] = GEN11_GRDOM_MEDIA3,
- [VCS4] = GEN11_GRDOM_MEDIA4,
- [VECS] = GEN11_GRDOM_VECS,
- [VECS2] = GEN11_GRDOM_VECS2,
+ const u32 hw_engine_mask[] = {
+ [RCS0] = GEN11_GRDOM_RENDER,
+ [BCS0] = GEN11_GRDOM_BLT,
+ [VCS0] = GEN11_GRDOM_MEDIA,
+ [VCS1] = GEN11_GRDOM_MEDIA2,
+ [VCS2] = GEN11_GRDOM_MEDIA3,
+ [VCS3] = GEN11_GRDOM_MEDIA4,
+ [VECS0] = GEN11_GRDOM_VECS,
+ [VECS1] = GEN11_GRDOM_VECS2,
};
struct intel_engine_cs *engine;
- unsigned int tmp;
+ intel_engine_mask_t tmp;
u32 hw_mask;
int ret;
- BUILD_BUG_ON(VECS2 + 1 != I915_NUM_ENGINES);
-
if (engine_mask == ALL_ENGINES) {
hw_mask = GEN11_GRDOM_FULL;
} else {
hw_mask = 0;
for_each_engine_masked(engine, i915, engine_mask, tmp) {
+ GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask));
hw_mask |= hw_engine_mask[engine->id];
- hw_mask |= gen11_lock_sfc(i915, engine);
+ hw_mask |= gen11_lock_sfc(engine);
}
}
@@ -452,46 +482,62 @@ static int gen11_reset_engines(struct drm_i915_private *i915,
if (engine_mask != ALL_ENGINES)
for_each_engine_masked(engine, i915, engine_mask, tmp)
- gen11_unlock_sfc(i915, engine);
+ gen11_unlock_sfc(engine);
return ret;
}
static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct intel_uncore *uncore = engine->uncore;
+ const i915_reg_t reg = RING_RESET_CTL(engine->mmio_base);
+ u32 request, mask, ack;
int ret;
- I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
- _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
+ ack = intel_uncore_read_fw(uncore, reg);
+ if (ack & RESET_CTL_CAT_ERROR) {
+ /*
+ * For catastrophic errors, ready-for-reset sequence
+ * needs to be bypassed: HAS#396813
+ */
+ request = RESET_CTL_CAT_ERROR;
+ mask = RESET_CTL_CAT_ERROR;
+
+ /* Catastrophic errors need to be cleared by HW */
+ ack = 0;
+ } else if (!(ack & RESET_CTL_READY_TO_RESET)) {
+ request = RESET_CTL_REQUEST_RESET;
+ mask = RESET_CTL_READY_TO_RESET;
+ ack = RESET_CTL_READY_TO_RESET;
+ } else {
+ return 0;
+ }
- ret = __intel_wait_for_register_fw(dev_priv,
- RING_RESET_CTL(engine->mmio_base),
- RESET_CTL_READY_TO_RESET,
- RESET_CTL_READY_TO_RESET,
- 700, 0,
- NULL);
+ intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request));
+ ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
+ 700, 0, NULL);
if (ret)
- DRM_ERROR("%s: reset request timeout\n", engine->name);
+ DRM_ERROR("%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
+ engine->name, request,
+ intel_uncore_read_fw(uncore, reg));
return ret;
}
static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
- _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
+ intel_uncore_write_fw(engine->uncore,
+ RING_RESET_CTL(engine->mmio_base),
+ _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
}
static int gen8_reset_engines(struct drm_i915_private *i915,
- unsigned int engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned int retry)
{
struct intel_engine_cs *engine;
const bool reset_non_ready = retry >= 1;
- unsigned int tmp;
+ intel_engine_mask_t tmp;
int ret;
for_each_engine_masked(engine, i915, engine_mask, tmp) {
@@ -527,14 +573,11 @@ skip_reset:
}
typedef int (*reset_func)(struct drm_i915_private *,
- unsigned int engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned int retry);
static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
{
- if (!i915_modparams.reset)
- return NULL;
-
if (INTEL_GEN(i915) >= 8)
return gen8_reset_engines;
else if (INTEL_GEN(i915) >= 6)
@@ -551,7 +594,8 @@ static reset_func intel_get_gpu_reset(struct drm_i915_private *i915)
return NULL;
}
-int intel_gpu_reset(struct drm_i915_private *i915, unsigned int engine_mask)
+int intel_gpu_reset(struct drm_i915_private *i915,
+ intel_engine_mask_t engine_mask)
{
const int retries = engine_mask == ALL_ENGINES ? RESET_MAX_RETRIES : 1;
reset_func reset;
@@ -566,7 +610,7 @@ int intel_gpu_reset(struct drm_i915_private *i915, unsigned int engine_mask)
* If the power well sleeps during the reset, the reset
* request may be dropped and never completes (causing -EIO).
*/
- intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
for (retry = 0; ret == -ETIMEDOUT && retry < retries; retry++) {
/*
* We stop engines, otherwise we might get failed reset and a
@@ -582,14 +626,15 @@ int intel_gpu_reset(struct drm_i915_private *i915, unsigned int engine_mask)
*
* FIXME: Wa for more modern gens needs to be validated
*/
- i915_stop_engines(i915, engine_mask);
+ if (retry)
+ i915_stop_engines(i915, engine_mask);
GEM_TRACE("engine_mask=%x\n", engine_mask);
preempt_disable();
ret = reset(i915, engine_mask, retry);
preempt_enable();
}
- intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
return ret;
}
@@ -599,6 +644,9 @@ bool intel_has_gpu_reset(struct drm_i915_private *i915)
if (USES_GUC(i915))
return false;
+ if (!i915_modparams.reset)
+ return NULL;
+
return intel_get_gpu_reset(i915);
}
@@ -615,9 +663,9 @@ int intel_reset_guc(struct drm_i915_private *i915)
GEM_BUG_ON(!HAS_GUC(i915));
- intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
ret = gen6_hw_domain_reset(i915, guc_domain);
- intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
return ret;
}
@@ -635,10 +683,36 @@ static void reset_prepare_engine(struct intel_engine_cs *engine)
* written to the powercontext is undefined and so we may lose
* GPU state upon resume, i.e. fail to restart after a reset.
*/
- intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
engine->reset.prepare(engine);
}
+static void revoke_mmaps(struct drm_i915_private *i915)
+{
+ int i;
+
+ for (i = 0; i < i915->num_fence_regs; i++) {
+ struct drm_vma_offset_node *node;
+ struct i915_vma *vma;
+ u64 vma_offset;
+
+ vma = READ_ONCE(i915->fence_regs[i].vma);
+ if (!vma)
+ continue;
+
+ if (!i915_vma_has_userfault(vma))
+ continue;
+
+ GEM_BUG_ON(vma->fence != &i915->fence_regs[i]);
+ node = &vma->obj->base.vma_node;
+ vma_offset = vma->ggtt_view.partial.offset << PAGE_SHIFT;
+ unmap_mapping_range(i915->drm.anon_inode->i_mapping,
+ drm_vma_node_offset_addr(node) + vma_offset,
+ vma->size,
+ 1);
+ }
+}
+
static void reset_prepare(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
@@ -647,10 +721,16 @@ static void reset_prepare(struct drm_i915_private *i915)
for_each_engine(engine, i915, id)
reset_prepare_engine(engine);
- intel_uc_sanitize(i915);
+ intel_uc_reset_prepare(i915);
+}
+
+static void gt_revoke(struct drm_i915_private *i915)
+{
+ revoke_mmaps(i915);
}
-static int gt_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
+static int gt_reset(struct drm_i915_private *i915,
+ intel_engine_mask_t stalled_mask)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -665,7 +745,7 @@ static int gt_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
return err;
for_each_engine(engine, i915, id)
- intel_engine_reset(engine, stalled_mask & ENGINE_MASK(id));
+ intel_engine_reset(engine, stalled_mask & engine->mask);
i915_gem_restore_fences(i915);
@@ -675,7 +755,7 @@ static int gt_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
static void reset_finish_engine(struct intel_engine_cs *engine)
{
engine->reset.finish(engine);
- intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
}
struct i915_gpu_restart {
@@ -722,8 +802,10 @@ static void reset_finish(struct drm_i915_private *i915)
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, i915, id)
+ for_each_engine(engine, i915, id) {
reset_finish_engine(engine);
+ intel_engine_signal_breadcrumbs(engine);
+ }
}
static void reset_restart(struct drm_i915_private *i915)
@@ -761,23 +843,19 @@ static void nop_submit_request(struct i915_request *request)
spin_lock_irqsave(&engine->timeline.lock, flags);
__i915_request_submit(request);
i915_request_mark_complete(request);
- intel_engine_write_global_seqno(engine, request->global_seqno);
spin_unlock_irqrestore(&engine->timeline.lock, flags);
intel_engine_queue_breadcrumbs(engine);
}
-void i915_gem_set_wedged(struct drm_i915_private *i915)
+static void __i915_gem_set_wedged(struct drm_i915_private *i915)
{
struct i915_gpu_error *error = &i915->gpu_error;
struct intel_engine_cs *engine;
enum intel_engine_id id;
- mutex_lock(&error->wedge_mutex);
- if (test_bit(I915_WEDGED, &error->flags)) {
- mutex_unlock(&error->wedge_mutex);
+ if (test_bit(I915_WEDGED, &error->flags))
return;
- }
if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(i915)) {
struct drm_printer p = drm_debug_printer(__func__);
@@ -793,11 +871,10 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
* rolling the global seqno forward (since this would complete requests
* for which we haven't set the fence error to EIO yet).
*/
- for_each_engine(engine, i915, id)
- reset_prepare_engine(engine);
+ reset_prepare(i915);
/* Even if the GPU reset fails, it should still stop the engines */
- if (INTEL_GEN(i915) >= 5)
+ if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
intel_gpu_reset(i915, ALL_ENGINES);
for_each_engine(engine, i915, id) {
@@ -811,31 +888,35 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
* either this call here to intel_engine_write_global_seqno, or the one
* in nop_submit_request.
*/
- synchronize_rcu();
+ synchronize_rcu_expedited();
/* Mark all executing requests as skipped */
for_each_engine(engine, i915, id)
engine->cancel_requests(engine);
- for_each_engine(engine, i915, id) {
- reset_finish_engine(engine);
- intel_engine_signal_breadcrumbs(engine);
- }
+ reset_finish(i915);
smp_mb__before_atomic();
set_bit(I915_WEDGED, &error->flags);
GEM_TRACE("end\n");
- mutex_unlock(&error->wedge_mutex);
+}
- wake_up_all(&error->reset_queue);
+void i915_gem_set_wedged(struct drm_i915_private *i915)
+{
+ struct i915_gpu_error *error = &i915->gpu_error;
+ intel_wakeref_t wakeref;
+
+ mutex_lock(&error->wedge_mutex);
+ with_intel_runtime_pm(i915, wakeref)
+ __i915_gem_set_wedged(i915);
+ mutex_unlock(&error->wedge_mutex);
}
-bool i915_gem_unset_wedged(struct drm_i915_private *i915)
+static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
{
struct i915_gpu_error *error = &i915->gpu_error;
struct i915_timeline *tl;
- bool ret = false;
if (!test_bit(I915_WEDGED, &error->flags))
return true;
@@ -843,8 +924,6 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
if (!i915->gt.scratch) /* Never full initialised, recovery impossible */
return false;
- mutex_lock(&error->wedge_mutex);
-
GEM_TRACE("start\n");
/*
@@ -860,30 +939,20 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
mutex_lock(&i915->gt.timelines.mutex);
list_for_each_entry(tl, &i915->gt.timelines.active_list, link) {
struct i915_request *rq;
- long timeout;
rq = i915_active_request_get_unlocked(&tl->last_request);
if (!rq)
continue;
/*
- * We can't use our normal waiter as we want to
- * avoid recursively trying to handle the current
- * reset. The basic dma_fence_default_wait() installs
- * a callback for dma_fence_signal(), which is
- * triggered by our nop handler (indirectly, the
- * callback enables the signaler thread which is
- * woken by the nop_submit_request() advancing the seqno
- * and when the seqno passes the fence, the signaler
- * then signals the fence waking us up).
+ * All internal dependencies (i915_requests) will have
+ * been flushed by the set-wedge, but we may be stuck waiting
+ * for external fences. These should all be capped to 10s
+ * (I915_FENCE_TIMEOUT) so this wait should not be unbounded
+ * in the worst case.
*/
- timeout = dma_fence_default_wait(&rq->fence, true,
- MAX_SCHEDULE_TIMEOUT);
+ dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT);
i915_request_put(rq);
- if (timeout < 0) {
- mutex_unlock(&i915->gt.timelines.mutex);
- goto unlock;
- }
}
mutex_unlock(&i915->gt.timelines.mutex);
@@ -904,57 +973,38 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
clear_bit(I915_WEDGED, &i915->gpu_error.flags);
- ret = true;
-unlock:
- mutex_unlock(&i915->gpu_error.wedge_mutex);
- return ret;
+ return true;
}
-struct __i915_reset {
- struct drm_i915_private *i915;
- unsigned int stalled_mask;
-};
-
-static int __i915_reset__BKL(void *data)
+bool i915_gem_unset_wedged(struct drm_i915_private *i915)
{
- struct __i915_reset *arg = data;
- int err;
+ struct i915_gpu_error *error = &i915->gpu_error;
+ bool result;
- err = intel_gpu_reset(arg->i915, ALL_ENGINES);
- if (err)
- return err;
+ mutex_lock(&error->wedge_mutex);
+ result = __i915_gem_unset_wedged(i915);
+ mutex_unlock(&error->wedge_mutex);
- return gt_reset(arg->i915, arg->stalled_mask);
+ return result;
}
-#if RESET_UNDER_STOP_MACHINE
-/*
- * XXX An alternative to using stop_machine would be to park only the
- * processes that have a GGTT mmap. By remote parking the threads (SIGSTOP)
- * we should be able to prevent their memmory accesses via the lost fence
- * registers over the course of the reset without the potential recursive
- * of mutexes between the pagefault handler and reset.
- *
- * See igt/gem_mmap_gtt/hang
- */
-#define __do_reset(fn, arg) stop_machine(fn, arg, NULL)
-#else
-#define __do_reset(fn, arg) fn(arg)
-#endif
-
-static int do_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
+static int do_reset(struct drm_i915_private *i915,
+ intel_engine_mask_t stalled_mask)
{
- struct __i915_reset arg = { i915, stalled_mask };
int err, i;
- err = __do_reset(__i915_reset__BKL, &arg);
+ gt_revoke(i915);
+
+ err = intel_gpu_reset(i915, ALL_ENGINES);
for (i = 0; err && i < RESET_MAX_RETRIES; i++) {
- msleep(100);
- err = __do_reset(__i915_reset__BKL, &arg);
+ msleep(10 * (i + 1));
+ err = intel_gpu_reset(i915, ALL_ENGINES);
}
+ if (err)
+ return err;
- return err;
+ return gt_reset(i915, stalled_mask);
}
/**
@@ -966,8 +1016,6 @@ static int do_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
* Reset the chip. Useful if a hang is detected. Marks the device as wedged
* on failure.
*
- * Caller must hold the struct_mutex.
- *
* Procedure is fairly simple:
* - reset the chip using the reset reg
* - re-init context state
@@ -977,7 +1025,7 @@ static int do_reset(struct drm_i915_private *i915, unsigned int stalled_mask)
* - re-init display
*/
void i915_reset(struct drm_i915_private *i915,
- unsigned int stalled_mask,
+ intel_engine_mask_t stalled_mask,
const char *reason)
{
struct i915_gpu_error *error = &i915->gpu_error;
@@ -990,7 +1038,7 @@ void i915_reset(struct drm_i915_private *i915,
GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
/* Clear any previous failed attempts at recovery. Time to try again. */
- if (!i915_gem_unset_wedged(i915))
+ if (!__i915_gem_unset_wedged(i915))
return;
if (reason)
@@ -1007,11 +1055,17 @@ void i915_reset(struct drm_i915_private *i915,
goto error;
}
+ if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
+ intel_runtime_pm_disable_interrupts(i915);
+
if (do_reset(i915, stalled_mask)) {
dev_err(i915->drm.dev, "Failed to reset chip\n");
goto taint;
}
+ if (INTEL_INFO(i915)->gpu_reset_clobbers_display)
+ intel_runtime_pm_enable_interrupts(i915);
+
intel_overlay_reset(i915);
/*
@@ -1033,7 +1087,7 @@ void i915_reset(struct drm_i915_private *i915,
finish:
reset_finish(i915);
- if (!i915_terminally_wedged(error))
+ if (!__i915_wedged(error))
reset_restart(i915);
return;
@@ -1052,14 +1106,14 @@ taint:
*/
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
error:
- i915_gem_set_wedged(i915);
+ __i915_gem_set_wedged(i915);
goto finish;
}
static inline int intel_gt_reset_engine(struct drm_i915_private *i915,
struct intel_engine_cs *engine)
{
- return intel_gpu_reset(i915, intel_engine_flag(engine));
+ return intel_gpu_reset(i915, engine->mask);
}
/**
@@ -1144,7 +1198,12 @@ static void i915_reset_device(struct drm_i915_private *i915,
i915_wedge_on_timeout(&w, i915, 5 * HZ) {
intel_prepare_reset(i915);
+ /* Flush everyone using a resource about to be clobbered */
+ synchronize_srcu_expedited(&error->reset_backoff_srcu);
+
+ mutex_lock(&error->wedge_mutex);
i915_reset(i915, engine_mask, reason);
+ mutex_unlock(&error->wedge_mutex);
intel_finish_reset(i915);
}
@@ -1153,44 +1212,50 @@ static void i915_reset_device(struct drm_i915_private *i915,
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_done_event);
}
-void i915_clear_error_registers(struct drm_i915_private *dev_priv)
+static void clear_register(struct intel_uncore *uncore, i915_reg_t reg)
{
+ intel_uncore_rmw(uncore, reg, 0, 0);
+}
+
+void i915_clear_error_registers(struct drm_i915_private *i915)
+{
+ struct intel_uncore *uncore = &i915->uncore;
u32 eir;
- if (!IS_GEN(dev_priv, 2))
- I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER));
+ if (!IS_GEN(i915, 2))
+ clear_register(uncore, PGTBL_ER);
- if (INTEL_GEN(dev_priv) < 4)
- I915_WRITE(IPEIR, I915_READ(IPEIR));
+ if (INTEL_GEN(i915) < 4)
+ clear_register(uncore, IPEIR(RENDER_RING_BASE));
else
- I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965));
+ clear_register(uncore, IPEIR_I965);
- I915_WRITE(EIR, I915_READ(EIR));
- eir = I915_READ(EIR);
+ clear_register(uncore, EIR);
+ eir = intel_uncore_read(uncore, EIR);
if (eir) {
/*
* some errors might have become stuck,
* mask them.
*/
DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir);
- I915_WRITE(EMR, I915_READ(EMR) | eir);
- I915_WRITE(IIR, I915_MASTER_ERROR_INTERRUPT);
+ rmw_set(uncore, EMR, eir);
+ intel_uncore_write(uncore, GEN2_IIR,
+ I915_MASTER_ERROR_INTERRUPT);
}
- if (INTEL_GEN(dev_priv) >= 8) {
- I915_WRITE(GEN8_RING_FAULT_REG,
- I915_READ(GEN8_RING_FAULT_REG) & ~RING_FAULT_VALID);
- POSTING_READ(GEN8_RING_FAULT_REG);
- } else if (INTEL_GEN(dev_priv) >= 6) {
+ if (INTEL_GEN(i915) >= 8) {
+ rmw_clear(uncore, GEN8_RING_FAULT_REG, RING_FAULT_VALID);
+ intel_uncore_posting_read(uncore, GEN8_RING_FAULT_REG);
+ } else if (INTEL_GEN(i915) >= 6) {
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, dev_priv, id) {
- I915_WRITE(RING_FAULT_REG(engine),
- I915_READ(RING_FAULT_REG(engine)) &
- ~RING_FAULT_VALID);
+ for_each_engine(engine, i915, id) {
+ rmw_clear(uncore,
+ RING_FAULT_REG(engine), RING_FAULT_VALID);
+ intel_uncore_posting_read(uncore,
+ RING_FAULT_REG(engine));
}
- POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
}
}
@@ -1208,13 +1273,14 @@ void i915_clear_error_registers(struct drm_i915_private *dev_priv)
* of a ring dump etc.).
*/
void i915_handle_error(struct drm_i915_private *i915,
- u32 engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned long flags,
const char *fmt, ...)
{
+ struct i915_gpu_error *error = &i915->gpu_error;
struct intel_engine_cs *engine;
intel_wakeref_t wakeref;
- unsigned int tmp;
+ intel_engine_mask_t tmp;
char error_msg[80];
char *msg = NULL;
@@ -1237,7 +1303,7 @@ void i915_handle_error(struct drm_i915_private *i915,
*/
wakeref = intel_runtime_pm_get(i915);
- engine_mask &= INTEL_INFO(i915)->ring_mask;
+ engine_mask &= INTEL_INFO(i915)->engine_mask;
if (flags & I915_ERROR_CAPTURE) {
i915_capture_error_state(i915, engine_mask, msg);
@@ -1248,20 +1314,19 @@ void i915_handle_error(struct drm_i915_private *i915,
* Try engine reset when available. We fall back to full reset if
* single reset fails.
*/
- if (intel_has_reset_engine(i915) &&
- !i915_terminally_wedged(&i915->gpu_error)) {
+ if (intel_has_reset_engine(i915) && !__i915_wedged(error)) {
for_each_engine_masked(engine, i915, engine_mask, tmp) {
BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE);
if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
- &i915->gpu_error.flags))
+ &error->flags))
continue;
if (i915_reset_engine(engine, msg) == 0)
- engine_mask &= ~intel_engine_flag(engine);
+ engine_mask &= ~engine->mask;
clear_bit(I915_RESET_ENGINE + engine->id,
- &i915->gpu_error.flags);
- wake_up_bit(&i915->gpu_error.flags,
+ &error->flags);
+ wake_up_bit(&error->flags,
I915_RESET_ENGINE + engine->id);
}
}
@@ -1270,18 +1335,20 @@ void i915_handle_error(struct drm_i915_private *i915,
goto out;
/* Full reset needs the mutex, stop any other user trying to do so. */
- if (test_and_set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags)) {
- wait_event(i915->gpu_error.reset_queue,
- !test_bit(I915_RESET_BACKOFF,
- &i915->gpu_error.flags));
- goto out;
+ if (test_and_set_bit(I915_RESET_BACKOFF, &error->flags)) {
+ wait_event(error->reset_queue,
+ !test_bit(I915_RESET_BACKOFF, &error->flags));
+ goto out; /* piggy-back on the other reset */
}
+ /* Make sure i915_reset_trylock() sees the I915_RESET_BACKOFF */
+ synchronize_rcu_expedited();
+
/* Prevent any other reset-engine attempt. */
for_each_engine(engine, i915, tmp) {
while (test_and_set_bit(I915_RESET_ENGINE + engine->id,
- &i915->gpu_error.flags))
- wait_on_bit(&i915->gpu_error.flags,
+ &error->flags))
+ wait_on_bit(&error->flags,
I915_RESET_ENGINE + engine->id,
TASK_UNINTERRUPTIBLE);
}
@@ -1290,16 +1357,74 @@ void i915_handle_error(struct drm_i915_private *i915,
for_each_engine(engine, i915, tmp) {
clear_bit(I915_RESET_ENGINE + engine->id,
- &i915->gpu_error.flags);
+ &error->flags);
}
- clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
- wake_up_all(&i915->gpu_error.reset_queue);
+ clear_bit(I915_RESET_BACKOFF, &error->flags);
+ wake_up_all(&error->reset_queue);
out:
intel_runtime_pm_put(i915, wakeref);
}
+int i915_reset_trylock(struct drm_i915_private *i915)
+{
+ struct i915_gpu_error *error = &i915->gpu_error;
+ int srcu;
+
+ might_lock(&error->reset_backoff_srcu);
+ might_sleep();
+
+ rcu_read_lock();
+ while (test_bit(I915_RESET_BACKOFF, &error->flags)) {
+ rcu_read_unlock();
+
+ if (wait_event_interruptible(error->reset_queue,
+ !test_bit(I915_RESET_BACKOFF,
+ &error->flags)))
+ return -EINTR;
+
+ rcu_read_lock();
+ }
+ srcu = srcu_read_lock(&error->reset_backoff_srcu);
+ rcu_read_unlock();
+
+ return srcu;
+}
+
+void i915_reset_unlock(struct drm_i915_private *i915, int tag)
+__releases(&i915->gpu_error.reset_backoff_srcu)
+{
+ struct i915_gpu_error *error = &i915->gpu_error;
+
+ srcu_read_unlock(&error->reset_backoff_srcu, tag);
+}
+
+int i915_terminally_wedged(struct drm_i915_private *i915)
+{
+ struct i915_gpu_error *error = &i915->gpu_error;
+
+ might_sleep();
+
+ if (!__i915_wedged(error))
+ return 0;
+
+ /* Reset still in progress? Maybe we will recover? */
+ if (!test_bit(I915_RESET_BACKOFF, &error->flags))
+ return -EIO;
+
+ /* XXX intel_reset_finish() still takes struct_mutex!!! */
+ if (mutex_is_locked(&i915->drm.struct_mutex))
+ return -EAGAIN;
+
+ if (wait_event_interruptible(error->reset_queue,
+ !test_bit(I915_RESET_BACKOFF,
+ &error->flags)))
+ return -EINTR;
+
+ return __i915_wedged(error) ? -EIO : 0;
+}
+
bool i915_reset_flush(struct drm_i915_private *i915)
{
int err;
diff --git a/drivers/gpu/drm/i915/i915_reset.h b/drivers/gpu/drm/i915/i915_reset.h
index f2d347f319df..3c0450289b8f 100644
--- a/drivers/gpu/drm/i915/i915_reset.h
+++ b/drivers/gpu/drm/i915/i915_reset.h
@@ -9,14 +9,18 @@
#include <linux/compiler.h>
#include <linux/types.h>
+#include <linux/srcu.h>
+
+#include "intel_engine_types.h"
struct drm_i915_private;
+struct i915_request;
struct intel_engine_cs;
struct intel_guc;
__printf(4, 5)
void i915_handle_error(struct drm_i915_private *i915,
- u32 engine_mask,
+ intel_engine_mask_t engine_mask,
unsigned long flags,
const char *fmt, ...);
#define I915_ERROR_CAPTURE BIT(0)
@@ -24,7 +28,7 @@ void i915_handle_error(struct drm_i915_private *i915,
void i915_clear_error_registers(struct drm_i915_private *i915);
void i915_reset(struct drm_i915_private *i915,
- unsigned int stalled_mask,
+ intel_engine_mask_t stalled_mask,
const char *reason);
int i915_reset_engine(struct intel_engine_cs *engine,
const char *reason);
@@ -32,10 +36,16 @@ int i915_reset_engine(struct intel_engine_cs *engine,
void i915_reset_request(struct i915_request *rq, bool guilty);
bool i915_reset_flush(struct drm_i915_private *i915);
+int __must_check i915_reset_trylock(struct drm_i915_private *i915);
+void i915_reset_unlock(struct drm_i915_private *i915, int tag);
+
+int i915_terminally_wedged(struct drm_i915_private *i915);
+
bool intel_has_gpu_reset(struct drm_i915_private *i915);
bool intel_has_reset_engine(struct drm_i915_private *i915);
-int intel_gpu_reset(struct drm_i915_private *i915, u32 engine_mask);
+int intel_gpu_reset(struct drm_i915_private *i915,
+ intel_engine_mask_t engine_mask);
int intel_reset_guc(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 8bc042551692..39bc4f54e272 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -7,9 +7,16 @@
#include <linux/mutex.h>
#include "i915_drv.h"
+#include "i915_globals.h"
#include "i915_request.h"
#include "i915_scheduler.h"
+static struct i915_global_scheduler {
+ struct i915_global base;
+ struct kmem_cache *slab_dependencies;
+ struct kmem_cache *slab_priorities;
+} global;
+
static DEFINE_SPINLOCK(schedule_lock);
static const struct i915_request *
@@ -18,6 +25,11 @@ node_to_request(const struct i915_sched_node *node)
return container_of(node, const struct i915_request, sched);
}
+static inline bool node_started(const struct i915_sched_node *node)
+{
+ return i915_request_started(node_to_request(node));
+}
+
static inline bool node_signaled(const struct i915_sched_node *node)
{
return i915_request_completed(node_to_request(node));
@@ -29,19 +41,20 @@ void i915_sched_node_init(struct i915_sched_node *node)
INIT_LIST_HEAD(&node->waiters_list);
INIT_LIST_HEAD(&node->link);
node->attr.priority = I915_PRIORITY_INVALID;
+ node->semaphores = 0;
+ node->flags = 0;
}
static struct i915_dependency *
-i915_dependency_alloc(struct drm_i915_private *i915)
+i915_dependency_alloc(void)
{
- return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
+ return kmem_cache_alloc(global.slab_dependencies, GFP_KERNEL);
}
static void
-i915_dependency_free(struct drm_i915_private *i915,
- struct i915_dependency *dep)
+i915_dependency_free(struct i915_dependency *dep)
{
- kmem_cache_free(i915->dependencies, dep);
+ kmem_cache_free(global.slab_dependencies, dep);
}
bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
@@ -51,7 +64,7 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
{
bool ret = false;
- spin_lock(&schedule_lock);
+ spin_lock_irq(&schedule_lock);
if (!node_signaled(signal)) {
INIT_LIST_HEAD(&dep->dfs_link);
@@ -60,39 +73,42 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
dep->signaler = signal;
dep->flags = flags;
+ /* Keep track of whether anyone on this chain has a semaphore */
+ if (signal->flags & I915_SCHED_HAS_SEMAPHORE_CHAIN &&
+ !node_started(signal))
+ node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
+
ret = true;
}
- spin_unlock(&schedule_lock);
+ spin_unlock_irq(&schedule_lock);
return ret;
}
-int i915_sched_node_add_dependency(struct drm_i915_private *i915,
- struct i915_sched_node *node,
+int i915_sched_node_add_dependency(struct i915_sched_node *node,
struct i915_sched_node *signal)
{
struct i915_dependency *dep;
- dep = i915_dependency_alloc(i915);
+ dep = i915_dependency_alloc();
if (!dep)
return -ENOMEM;
if (!__i915_sched_node_add_dependency(node, signal, dep,
I915_DEPENDENCY_ALLOC))
- i915_dependency_free(i915, dep);
+ i915_dependency_free(dep);
return 0;
}
-void i915_sched_node_fini(struct drm_i915_private *i915,
- struct i915_sched_node *node)
+void i915_sched_node_fini(struct i915_sched_node *node)
{
struct i915_dependency *dep, *tmp;
GEM_BUG_ON(!list_empty(&node->link));
- spin_lock(&schedule_lock);
+ spin_lock_irq(&schedule_lock);
/*
* Everyone we depended upon (the fences we wait to be signaled)
@@ -106,7 +122,7 @@ void i915_sched_node_fini(struct drm_i915_private *i915,
list_del(&dep->wait_link);
if (dep->flags & I915_DEPENDENCY_ALLOC)
- i915_dependency_free(i915, dep);
+ i915_dependency_free(dep);
}
/* Remove ourselves from everyone who depends upon us */
@@ -116,10 +132,10 @@ void i915_sched_node_fini(struct drm_i915_private *i915,
list_del(&dep->signal_link);
if (dep->flags & I915_DEPENDENCY_ALLOC)
- i915_dependency_free(i915, dep);
+ i915_dependency_free(dep);
}
- spin_unlock(&schedule_lock);
+ spin_unlock_irq(&schedule_lock);
}
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
@@ -193,7 +209,7 @@ find_priolist:
if (prio == I915_PRIORITY_NORMAL) {
p = &execlists->default_priolist;
} else {
- p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
+ p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
/* Convert an allocation failure to a priority bump */
if (unlikely(!p)) {
prio = I915_PRIORITY_NORMAL; /* recurses just once */
@@ -301,6 +317,10 @@ static void __i915_schedule(struct i915_request *rq,
list_for_each_entry(dep, &dfs, dfs_link) {
struct i915_sched_node *node = dep->signaler;
+ /* If we are already flying, we know we have no signalers */
+ if (node_started(node))
+ continue;
+
/*
* Within an engine, there can be no cycle, but we may
* refer to the same dependency chain multiple times
@@ -313,7 +333,6 @@ static void __i915_schedule(struct i915_request *rq,
if (node_signaled(p->signaler))
continue;
- GEM_BUG_ON(p->signaler->attr.priority < node->attr.priority);
if (prio > READ_ONCE(p->signaler->attr.priority))
list_move_tail(&p->dfs_link, &dfs);
}
@@ -337,7 +356,7 @@ static void __i915_schedule(struct i915_request *rq,
memset(&cache, 0, sizeof(cache));
engine = rq->engine;
- spin_lock_irq(&engine->timeline.lock);
+ spin_lock(&engine->timeline.lock);
/* Fifo and depth-first replacement ensure our deps execute before us */
list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
@@ -388,30 +407,73 @@ static void __i915_schedule(struct i915_request *rq,
tasklet_hi_schedule(&engine->execlists.tasklet);
}
- spin_unlock_irq(&engine->timeline.lock);
+ spin_unlock(&engine->timeline.lock);
}
void i915_schedule(struct i915_request *rq, const struct i915_sched_attr *attr)
{
- spin_lock(&schedule_lock);
+ spin_lock_irq(&schedule_lock);
__i915_schedule(rq, attr);
- spin_unlock(&schedule_lock);
+ spin_unlock_irq(&schedule_lock);
}
void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump)
{
struct i915_sched_attr attr;
+ unsigned long flags;
GEM_BUG_ON(bump & ~I915_PRIORITY_MASK);
if (READ_ONCE(rq->sched.attr.priority) == I915_PRIORITY_INVALID)
return;
- spin_lock_bh(&schedule_lock);
+ spin_lock_irqsave(&schedule_lock, flags);
attr = rq->sched.attr;
attr.priority |= bump;
__i915_schedule(rq, &attr);
- spin_unlock_bh(&schedule_lock);
+ spin_unlock_irqrestore(&schedule_lock, flags);
+}
+
+void __i915_priolist_free(struct i915_priolist *p)
+{
+ kmem_cache_free(global.slab_priorities, p);
+}
+
+static void i915_global_scheduler_shrink(void)
+{
+ kmem_cache_shrink(global.slab_dependencies);
+ kmem_cache_shrink(global.slab_priorities);
+}
+
+static void i915_global_scheduler_exit(void)
+{
+ kmem_cache_destroy(global.slab_dependencies);
+ kmem_cache_destroy(global.slab_priorities);
+}
+
+static struct i915_global_scheduler global = { {
+ .shrink = i915_global_scheduler_shrink,
+ .exit = i915_global_scheduler_exit,
+} };
+
+int __init i915_global_scheduler_init(void)
+{
+ global.slab_dependencies = KMEM_CACHE(i915_dependency,
+ SLAB_HWCACHE_ALIGN);
+ if (!global.slab_dependencies)
+ return -ENOMEM;
+
+ global.slab_priorities = KMEM_CACHE(i915_priolist,
+ SLAB_HWCACHE_ALIGN);
+ if (!global.slab_priorities)
+ goto err_priorities;
+
+ i915_global_register(&global.base);
+ return 0;
+
+err_priorities:
+ kmem_cache_destroy(global.slab_priorities);
+ return -ENOMEM;
}
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index dbe9cb7ecd82..07d243acf553 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -8,80 +8,22 @@
#define _I915_SCHEDULER_H_
#include <linux/bitops.h>
+#include <linux/list.h>
#include <linux/kernel.h>
-#include <uapi/drm/i915_drm.h>
+#include "i915_scheduler_types.h"
-struct drm_i915_private;
-struct i915_request;
-struct intel_engine_cs;
+#define priolist_for_each_request(it, plist, idx) \
+ for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
+ list_for_each_entry(it, &(plist)->requests[idx], sched.link)
-enum {
- I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
- I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
- I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
-
- I915_PRIORITY_INVALID = INT_MIN
-};
-
-#define I915_USER_PRIORITY_SHIFT 2
-#define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
-
-#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
-#define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
-
-#define I915_PRIORITY_WAIT ((u8)BIT(0))
-#define I915_PRIORITY_NEWCLIENT ((u8)BIT(1))
-
-struct i915_sched_attr {
- /**
- * @priority: execution and service priority
- *
- * All clients are equal, but some are more equal than others!
- *
- * Requests from a context with a greater (more positive) value of
- * @priority will be executed before those with a lower @priority
- * value, forming a simple QoS.
- *
- * The &drm_i915_private.kernel_context is assigned the lowest priority.
- */
- int priority;
-};
-
-/*
- * "People assume that time is a strict progression of cause to effect, but
- * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
- * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
- *
- * Requests exist in a complex web of interdependencies. Each request
- * has to wait for some other request to complete before it is ready to be run
- * (e.g. we have to wait until the pixels have been rendering into a texture
- * before we can copy from it). We track the readiness of a request in terms
- * of fences, but we also need to keep the dependency tree for the lifetime
- * of the request (beyond the life of an individual fence). We use the tree
- * at various points to reorder the requests whilst keeping the requests
- * in order with respect to their various dependencies.
- *
- * There is no active component to the "scheduler". As we know the dependency
- * DAG of each request, we are able to insert it into a sorted queue when it
- * is ready, and are able to reorder its portion of the graph to accommodate
- * dynamic priority changes.
- */
-struct i915_sched_node {
- struct list_head signalers_list; /* those before us, we depend upon */
- struct list_head waiters_list; /* those after us, they depend upon us */
- struct list_head link;
- struct i915_sched_attr attr;
-};
-
-struct i915_dependency {
- struct i915_sched_node *signaler;
- struct list_head signal_link;
- struct list_head wait_link;
- struct list_head dfs_link;
- unsigned long flags;
-#define I915_DEPENDENCY_ALLOC BIT(0)
-};
+#define priolist_for_each_request_consume(it, n, plist, idx) \
+ for (; \
+ (plist)->used ? (idx = __ffs((plist)->used)), 1 : 0; \
+ (plist)->used &= ~BIT(idx)) \
+ list_for_each_entry_safe(it, n, \
+ &(plist)->requests[idx], \
+ sched.link)
void i915_sched_node_init(struct i915_sched_node *node);
@@ -90,12 +32,10 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
struct i915_dependency *dep,
unsigned long flags);
-int i915_sched_node_add_dependency(struct drm_i915_private *i915,
- struct i915_sched_node *node,
+int i915_sched_node_add_dependency(struct i915_sched_node *node,
struct i915_sched_node *signal);
-void i915_sched_node_fini(struct drm_i915_private *i915,
- struct i915_sched_node *node);
+void i915_sched_node_fini(struct i915_sched_node *node);
void i915_schedule(struct i915_request *request,
const struct i915_sched_attr *attr);
@@ -105,4 +45,11 @@ void i915_schedule_bump_priority(struct i915_request *rq, unsigned int bump);
struct list_head *
i915_sched_lookup_priolist(struct intel_engine_cs *engine, int prio);
+void __i915_priolist_free(struct i915_priolist *p);
+static inline void i915_priolist_free(struct i915_priolist *p)
+{
+ if (p->priority != I915_PRIORITY_NORMAL)
+ __i915_priolist_free(p);
+}
+
#endif /* _I915_SCHEDULER_H_ */
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
new file mode 100644
index 000000000000..f1af3916a808
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -0,0 +1,72 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef _I915_SCHEDULER_TYPES_H_
+#define _I915_SCHEDULER_TYPES_H_
+
+#include <linux/list.h>
+
+#include "i915_priolist_types.h"
+#include "intel_engine_types.h"
+
+struct drm_i915_private;
+struct i915_request;
+struct intel_engine_cs;
+
+struct i915_sched_attr {
+ /**
+ * @priority: execution and service priority
+ *
+ * All clients are equal, but some are more equal than others!
+ *
+ * Requests from a context with a greater (more positive) value of
+ * @priority will be executed before those with a lower @priority
+ * value, forming a simple QoS.
+ *
+ * The &drm_i915_private.kernel_context is assigned the lowest priority.
+ */
+ int priority;
+};
+
+/*
+ * "People assume that time is a strict progression of cause to effect, but
+ * actually, from a nonlinear, non-subjective viewpoint, it's more like a big
+ * ball of wibbly-wobbly, timey-wimey ... stuff." -The Doctor, 2015
+ *
+ * Requests exist in a complex web of interdependencies. Each request
+ * has to wait for some other request to complete before it is ready to be run
+ * (e.g. we have to wait until the pixels have been rendering into a texture
+ * before we can copy from it). We track the readiness of a request in terms
+ * of fences, but we also need to keep the dependency tree for the lifetime
+ * of the request (beyond the life of an individual fence). We use the tree
+ * at various points to reorder the requests whilst keeping the requests
+ * in order with respect to their various dependencies.
+ *
+ * There is no active component to the "scheduler". As we know the dependency
+ * DAG of each request, we are able to insert it into a sorted queue when it
+ * is ready, and are able to reorder its portion of the graph to accommodate
+ * dynamic priority changes.
+ */
+struct i915_sched_node {
+ struct list_head signalers_list; /* those before us, we depend upon */
+ struct list_head waiters_list; /* those after us, they depend upon us */
+ struct list_head link;
+ struct i915_sched_attr attr;
+ unsigned int flags;
+#define I915_SCHED_HAS_SEMAPHORE_CHAIN BIT(0)
+ intel_engine_mask_t semaphores;
+};
+
+struct i915_dependency {
+ struct i915_sched_node *signaler;
+ struct list_head signal_link;
+ struct list_head wait_link;
+ struct list_head dfs_link;
+ unsigned long flags;
+#define I915_DEPENDENCY_ALLOC BIT(0)
+};
+
+#endif /* _I915_SCHEDULER_TYPES_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index d2f2a9c2fabd..95f3dab1b229 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -25,8 +25,10 @@
*/
#include <drm/i915_drm.h>
-#include "intel_drv.h"
+
#include "i915_reg.h"
+#include "intel_drv.h"
+#include "intel_fbc.h"
static void i915_save_display(struct drm_i915_private *dev_priv)
{
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 7c58b049ecb5..5387aafd3424 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -192,7 +192,7 @@ static void __i915_sw_fence_complete(struct i915_sw_fence *fence,
__i915_sw_fence_notify(fence, FENCE_FREE);
}
-static void i915_sw_fence_complete(struct i915_sw_fence *fence)
+void i915_sw_fence_complete(struct i915_sw_fence *fence)
{
debug_fence_assert(fence);
@@ -202,7 +202,7 @@ static void i915_sw_fence_complete(struct i915_sw_fence *fence)
__i915_sw_fence_complete(fence, NULL);
}
-static void i915_sw_fence_await(struct i915_sw_fence *fence)
+void i915_sw_fence_await(struct i915_sw_fence *fence)
{
debug_fence_assert(fence);
WARN_ON(atomic_inc_return(&fence->pending) <= 1);
@@ -359,11 +359,6 @@ int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
return __i915_sw_fence_await_sw_fence(fence, signaler, NULL, gfp);
}
-struct i915_sw_dma_fence_cb {
- struct dma_fence_cb base;
- struct i915_sw_fence *fence;
-};
-
struct i915_sw_dma_fence_cb_timer {
struct i915_sw_dma_fence_cb base;
struct dma_fence *dma;
@@ -480,6 +475,40 @@ int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
return ret;
}
+static void __dma_i915_sw_fence_wake(struct dma_fence *dma,
+ struct dma_fence_cb *data)
+{
+ struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
+
+ i915_sw_fence_complete(cb->fence);
+}
+
+int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
+ struct dma_fence *dma,
+ struct i915_sw_dma_fence_cb *cb)
+{
+ int ret;
+
+ debug_fence_assert(fence);
+
+ if (dma_fence_is_signaled(dma))
+ return 0;
+
+ cb->fence = fence;
+ i915_sw_fence_await(fence);
+
+ ret = dma_fence_add_callback(dma, &cb->base, __dma_i915_sw_fence_wake);
+ if (ret == 0) {
+ ret = 1;
+ } else {
+ i915_sw_fence_complete(fence);
+ if (ret == -ENOENT) /* fence already signaled */
+ ret = 0;
+ }
+
+ return ret;
+}
+
int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct reservation_object *resv,
const struct dma_fence_ops *exclude,
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index 0e055ea0179f..9cb5c3b307a6 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -9,14 +9,13 @@
#ifndef _I915_SW_FENCE_H_
#define _I915_SW_FENCE_H_
+#include <linux/dma-fence.h>
#include <linux/gfp.h>
#include <linux/kref.h>
#include <linux/notifier.h> /* for NOTIFY_DONE */
#include <linux/wait.h>
struct completion;
-struct dma_fence;
-struct dma_fence_ops;
struct reservation_object;
struct i915_sw_fence {
@@ -68,10 +67,20 @@ int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
struct i915_sw_fence *after,
gfp_t gfp);
+
+struct i915_sw_dma_fence_cb {
+ struct dma_fence_cb base;
+ struct i915_sw_fence *fence;
+};
+
+int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
+ struct dma_fence *dma,
+ struct i915_sw_dma_fence_cb *cb);
int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
struct dma_fence *dma,
unsigned long timeout,
gfp_t gfp);
+
int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
struct reservation_object *resv,
const struct dma_fence_ops *exclude,
@@ -79,6 +88,9 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
unsigned long timeout,
gfp_t gfp);
+void i915_sw_fence_await(struct i915_sw_fence *fence);
+void i915_sw_fence_complete(struct i915_sw_fence *fence);
+
static inline bool i915_sw_fence_signaled(const struct i915_sw_fence *fence)
{
return atomic_read(&fence->pending) <= 0;
diff --git a/drivers/gpu/drm/i915/i915_timeline.c b/drivers/gpu/drm/i915/i915_timeline.c
index b2202d2e58a2..5fbea0892f33 100644
--- a/drivers/gpu/drm/i915/i915_timeline.c
+++ b/drivers/gpu/drm/i915/i915_timeline.c
@@ -6,19 +6,32 @@
#include "i915_drv.h"
-#include "i915_timeline.h"
+#include "i915_active.h"
#include "i915_syncmap.h"
+#include "i915_timeline.h"
+
+#define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit)))
+#define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit))
struct i915_timeline_hwsp {
- struct i915_vma *vma;
+ struct i915_gt_timelines *gt;
struct list_head free_link;
+ struct i915_vma *vma;
u64 free_bitmap;
};
-static inline struct i915_timeline_hwsp *
-i915_timeline_hwsp(const struct i915_timeline *tl)
+struct i915_timeline_cacheline {
+ struct i915_active active;
+ struct i915_timeline_hwsp *hwsp;
+ void *vaddr;
+#define CACHELINE_BITS 6
+#define CACHELINE_FREE CACHELINE_BITS
+};
+
+static inline struct drm_i915_private *
+hwsp_to_i915(struct i915_timeline_hwsp *hwsp)
{
- return tl->hwsp_ggtt->private;
+ return container_of(hwsp->gt, struct drm_i915_private, gt.timelines);
}
static struct i915_vma *__hwsp_alloc(struct drm_i915_private *i915)
@@ -71,6 +84,7 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
vma->private = hwsp;
hwsp->vma = vma;
hwsp->free_bitmap = ~0ull;
+ hwsp->gt = gt;
spin_lock(&gt->hwsp_lock);
list_add(&hwsp->free_link, &gt->hwsp_free_list);
@@ -88,14 +102,9 @@ hwsp_alloc(struct i915_timeline *timeline, unsigned int *cacheline)
return hwsp->vma;
}
-static void hwsp_free(struct i915_timeline *timeline)
+static void __idle_hwsp_free(struct i915_timeline_hwsp *hwsp, int cacheline)
{
- struct i915_gt_timelines *gt = &timeline->i915->gt.timelines;
- struct i915_timeline_hwsp *hwsp;
-
- hwsp = i915_timeline_hwsp(timeline);
- if (!hwsp) /* leave global HWSP alone! */
- return;
+ struct i915_gt_timelines *gt = hwsp->gt;
spin_lock(&gt->hwsp_lock);
@@ -103,7 +112,8 @@ static void hwsp_free(struct i915_timeline *timeline)
if (!hwsp->free_bitmap)
list_add_tail(&hwsp->free_link, &gt->hwsp_free_list);
- hwsp->free_bitmap |= BIT_ULL(timeline->hwsp_offset / CACHELINE_BYTES);
+ GEM_BUG_ON(cacheline >= BITS_PER_TYPE(hwsp->free_bitmap));
+ hwsp->free_bitmap |= BIT_ULL(cacheline);
/* And if no one is left using it, give the page back to the system */
if (hwsp->free_bitmap == ~0ull) {
@@ -115,9 +125,78 @@ static void hwsp_free(struct i915_timeline *timeline)
spin_unlock(&gt->hwsp_lock);
}
+static void __idle_cacheline_free(struct i915_timeline_cacheline *cl)
+{
+ GEM_BUG_ON(!i915_active_is_idle(&cl->active));
+
+ i915_gem_object_unpin_map(cl->hwsp->vma->obj);
+ i915_vma_put(cl->hwsp->vma);
+ __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS));
+
+ i915_active_fini(&cl->active);
+ kfree(cl);
+}
+
+static void __cacheline_retire(struct i915_active *active)
+{
+ struct i915_timeline_cacheline *cl =
+ container_of(active, typeof(*cl), active);
+
+ i915_vma_unpin(cl->hwsp->vma);
+ if (ptr_test_bit(cl->vaddr, CACHELINE_FREE))
+ __idle_cacheline_free(cl);
+}
+
+static struct i915_timeline_cacheline *
+cacheline_alloc(struct i915_timeline_hwsp *hwsp, unsigned int cacheline)
+{
+ struct i915_timeline_cacheline *cl;
+ void *vaddr;
+
+ GEM_BUG_ON(cacheline >= BIT(CACHELINE_BITS));
+
+ cl = kmalloc(sizeof(*cl), GFP_KERNEL);
+ if (!cl)
+ return ERR_PTR(-ENOMEM);
+
+ vaddr = i915_gem_object_pin_map(hwsp->vma->obj, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ kfree(cl);
+ return ERR_CAST(vaddr);
+ }
+
+ i915_vma_get(hwsp->vma);
+ cl->hwsp = hwsp;
+ cl->vaddr = page_pack_bits(vaddr, cacheline);
+
+ i915_active_init(hwsp_to_i915(hwsp), &cl->active, __cacheline_retire);
+
+ return cl;
+}
+
+static void cacheline_acquire(struct i915_timeline_cacheline *cl)
+{
+ if (cl && i915_active_acquire(&cl->active))
+ __i915_vma_pin(cl->hwsp->vma);
+}
+
+static void cacheline_release(struct i915_timeline_cacheline *cl)
+{
+ if (cl)
+ i915_active_release(&cl->active);
+}
+
+static void cacheline_free(struct i915_timeline_cacheline *cl)
+{
+ GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE));
+ cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE);
+
+ if (i915_active_is_idle(&cl->active))
+ __idle_cacheline_free(cl);
+}
+
int i915_timeline_init(struct drm_i915_private *i915,
struct i915_timeline *timeline,
- const char *name,
struct i915_vma *hwsp)
{
void *vaddr;
@@ -133,37 +212,47 @@ int i915_timeline_init(struct drm_i915_private *i915,
BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES);
timeline->i915 = i915;
- timeline->name = name;
timeline->pin_count = 0;
timeline->has_initial_breadcrumb = !hwsp;
+ timeline->hwsp_cacheline = NULL;
- timeline->hwsp_offset = I915_GEM_HWS_SEQNO_ADDR;
if (!hwsp) {
+ struct i915_timeline_cacheline *cl;
unsigned int cacheline;
hwsp = hwsp_alloc(timeline, &cacheline);
if (IS_ERR(hwsp))
return PTR_ERR(hwsp);
+ cl = cacheline_alloc(hwsp->private, cacheline);
+ if (IS_ERR(cl)) {
+ __idle_hwsp_free(hwsp->private, cacheline);
+ return PTR_ERR(cl);
+ }
+
+ timeline->hwsp_cacheline = cl;
timeline->hwsp_offset = cacheline * CACHELINE_BYTES;
- }
- timeline->hwsp_ggtt = i915_vma_get(hwsp);
- vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB);
- if (IS_ERR(vaddr)) {
- hwsp_free(timeline);
- i915_vma_put(hwsp);
- return PTR_ERR(vaddr);
+ vaddr = page_mask_bits(cl->vaddr);
+ } else {
+ timeline->hwsp_offset = I915_GEM_HWS_SEQNO_ADDR;
+
+ vaddr = i915_gem_object_pin_map(hwsp->obj, I915_MAP_WB);
+ if (IS_ERR(vaddr))
+ return PTR_ERR(vaddr);
}
timeline->hwsp_seqno =
memset(vaddr + timeline->hwsp_offset, 0, CACHELINE_BYTES);
+ timeline->hwsp_ggtt = i915_vma_get(hwsp);
+ GEM_BUG_ON(timeline->hwsp_offset >= hwsp->size);
+
timeline->fence_context = dma_fence_context_alloc(1);
spin_lock_init(&timeline->lock);
+ mutex_init(&timeline->mutex);
- INIT_ACTIVE_REQUEST(&timeline->barrier);
INIT_ACTIVE_REQUEST(&timeline->last_request);
INIT_LIST_HEAD(&timeline->requests);
@@ -236,18 +325,19 @@ void i915_timeline_fini(struct i915_timeline *timeline)
{
GEM_BUG_ON(timeline->pin_count);
GEM_BUG_ON(!list_empty(&timeline->requests));
- GEM_BUG_ON(i915_active_request_isset(&timeline->barrier));
i915_syncmap_free(&timeline->sync);
- hwsp_free(timeline);
- i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
+ if (timeline->hwsp_cacheline)
+ cacheline_free(timeline->hwsp_cacheline);
+ else
+ i915_gem_object_unpin_map(timeline->hwsp_ggtt->obj);
+
i915_vma_put(timeline->hwsp_ggtt);
}
struct i915_timeline *
i915_timeline_create(struct drm_i915_private *i915,
- const char *name,
struct i915_vma *global_hwsp)
{
struct i915_timeline *timeline;
@@ -257,7 +347,7 @@ i915_timeline_create(struct drm_i915_private *i915,
if (!timeline)
return ERR_PTR(-ENOMEM);
- err = i915_timeline_init(i915, timeline, name, global_hwsp);
+ err = i915_timeline_init(i915, timeline, global_hwsp);
if (err) {
kfree(timeline);
return ERR_PTR(err);
@@ -284,6 +374,7 @@ int i915_timeline_pin(struct i915_timeline *tl)
i915_ggtt_offset(tl->hwsp_ggtt) +
offset_in_page(tl->hwsp_offset);
+ cacheline_acquire(tl->hwsp_cacheline);
timeline_add_to_active(tl);
return 0;
@@ -293,6 +384,157 @@ unpin:
return err;
}
+static u32 timeline_advance(struct i915_timeline *tl)
+{
+ GEM_BUG_ON(!tl->pin_count);
+ GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb);
+
+ return tl->seqno += 1 + tl->has_initial_breadcrumb;
+}
+
+static void timeline_rollback(struct i915_timeline *tl)
+{
+ tl->seqno -= 1 + tl->has_initial_breadcrumb;
+}
+
+static noinline int
+__i915_timeline_get_seqno(struct i915_timeline *tl,
+ struct i915_request *rq,
+ u32 *seqno)
+{
+ struct i915_timeline_cacheline *cl;
+ unsigned int cacheline;
+ struct i915_vma *vma;
+ void *vaddr;
+ int err;
+
+ /*
+ * If there is an outstanding GPU reference to this cacheline,
+ * such as it being sampled by a HW semaphore on another timeline,
+ * we cannot wraparound our seqno value (the HW semaphore does
+ * a strict greater-than-or-equals compare, not i915_seqno_passed).
+ * So if the cacheline is still busy, we must detach ourselves
+ * from it and leave it inflight alongside its users.
+ *
+ * However, if nobody is watching and we can guarantee that nobody
+ * will, we could simply reuse the same cacheline.
+ *
+ * if (i915_active_request_is_signaled(&tl->last_request) &&
+ * i915_active_is_signaled(&tl->hwsp_cacheline->active))
+ * return 0;
+ *
+ * That seems unlikely for a busy timeline that needed to wrap in
+ * the first place, so just replace the cacheline.
+ */
+
+ vma = hwsp_alloc(tl, &cacheline);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_rollback;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ if (err) {
+ __idle_hwsp_free(vma->private, cacheline);
+ goto err_rollback;
+ }
+
+ cl = cacheline_alloc(vma->private, cacheline);
+ if (IS_ERR(cl)) {
+ err = PTR_ERR(cl);
+ __idle_hwsp_free(vma->private, cacheline);
+ goto err_unpin;
+ }
+ GEM_BUG_ON(cl->hwsp->vma != vma);
+
+ /*
+ * Attach the old cacheline to the current request, so that we only
+ * free it after the current request is retired, which ensures that
+ * all writes into the cacheline from previous requests are complete.
+ */
+ err = i915_active_ref(&tl->hwsp_cacheline->active,
+ tl->fence_context, rq);
+ if (err)
+ goto err_cacheline;
+
+ cacheline_release(tl->hwsp_cacheline); /* ownership now xfered to rq */
+ cacheline_free(tl->hwsp_cacheline);
+
+ i915_vma_unpin(tl->hwsp_ggtt); /* binding kept alive by old cacheline */
+ i915_vma_put(tl->hwsp_ggtt);
+
+ tl->hwsp_ggtt = i915_vma_get(vma);
+
+ vaddr = page_mask_bits(cl->vaddr);
+ tl->hwsp_offset = cacheline * CACHELINE_BYTES;
+ tl->hwsp_seqno =
+ memset(vaddr + tl->hwsp_offset, 0, CACHELINE_BYTES);
+
+ tl->hwsp_offset += i915_ggtt_offset(vma);
+
+ cacheline_acquire(cl);
+ tl->hwsp_cacheline = cl;
+
+ *seqno = timeline_advance(tl);
+ GEM_BUG_ON(i915_seqno_passed(*tl->hwsp_seqno, *seqno));
+ return 0;
+
+err_cacheline:
+ cacheline_free(cl);
+err_unpin:
+ i915_vma_unpin(vma);
+err_rollback:
+ timeline_rollback(tl);
+ return err;
+}
+
+int i915_timeline_get_seqno(struct i915_timeline *tl,
+ struct i915_request *rq,
+ u32 *seqno)
+{
+ *seqno = timeline_advance(tl);
+
+ /* Replace the HWSP on wraparound for HW semaphores */
+ if (unlikely(!*seqno && tl->hwsp_cacheline))
+ return __i915_timeline_get_seqno(tl, rq, seqno);
+
+ return 0;
+}
+
+static int cacheline_ref(struct i915_timeline_cacheline *cl,
+ struct i915_request *rq)
+{
+ return i915_active_ref(&cl->active, rq->fence.context, rq);
+}
+
+int i915_timeline_read_hwsp(struct i915_request *from,
+ struct i915_request *to,
+ u32 *hwsp)
+{
+ struct i915_timeline_cacheline *cl = from->hwsp_cacheline;
+ struct i915_timeline *tl = from->timeline;
+ int err;
+
+ GEM_BUG_ON(to->timeline == tl);
+
+ mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING);
+ err = i915_request_completed(from);
+ if (!err)
+ err = cacheline_ref(cl, to);
+ if (!err) {
+ if (likely(cl == tl->hwsp_cacheline)) {
+ *hwsp = tl->hwsp_offset;
+ } else { /* across a seqno wrap, recover the original offset */
+ *hwsp = i915_ggtt_offset(cl->hwsp->vma) +
+ ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) *
+ CACHELINE_BYTES;
+ }
+ }
+ mutex_unlock(&tl->mutex);
+
+ return err;
+}
+
void i915_timeline_unpin(struct i915_timeline *tl)
{
GEM_BUG_ON(!tl->pin_count);
@@ -300,6 +542,7 @@ void i915_timeline_unpin(struct i915_timeline *tl)
return;
timeline_remove_from_active(tl);
+ cacheline_release(tl->hwsp_cacheline);
/*
* Since this timeline is idle, all bariers upon which we were waiting
diff --git a/drivers/gpu/drm/i915/i915_timeline.h b/drivers/gpu/drm/i915/i915_timeline.h
index 7bec7d2e45bf..27668a1a69a3 100644
--- a/drivers/gpu/drm/i915/i915_timeline.h
+++ b/drivers/gpu/drm/i915/i915_timeline.h
@@ -25,76 +25,14 @@
#ifndef I915_TIMELINE_H
#define I915_TIMELINE_H
-#include <linux/list.h>
-#include <linux/kref.h>
+#include <linux/lockdep.h>
#include "i915_active.h"
-#include "i915_request.h"
#include "i915_syncmap.h"
-#include "i915_utils.h"
-
-struct i915_vma;
-struct i915_timeline_hwsp;
-
-struct i915_timeline {
- u64 fence_context;
- u32 seqno;
-
- spinlock_t lock;
-#define TIMELINE_CLIENT 0 /* default subclass */
-#define TIMELINE_ENGINE 1
-
- unsigned int pin_count;
- const u32 *hwsp_seqno;
- struct i915_vma *hwsp_ggtt;
- u32 hwsp_offset;
-
- bool has_initial_breadcrumb;
-
- /**
- * List of breadcrumbs associated with GPU requests currently
- * outstanding.
- */
- struct list_head requests;
-
- /* Contains an RCU guarded pointer to the last request. No reference is
- * held to the request, users must carefully acquire a reference to
- * the request using i915_active_request_get_request_rcu(), or hold the
- * struct_mutex.
- */
- struct i915_active_request last_request;
-
- /**
- * We track the most recent seqno that we wait on in every context so
- * that we only have to emit a new await and dependency on a more
- * recent sync point. As the contexts may be executed out-of-order, we
- * have to track each individually and can not rely on an absolute
- * global_seqno. When we know that all tracked fences are completed
- * (i.e. when the driver is idle), we know that the syncmap is
- * redundant and we can discard it without loss of generality.
- */
- struct i915_syncmap *sync;
-
- /**
- * Barrier provides the ability to serialize ordering between different
- * timelines.
- *
- * Users can call i915_timeline_set_barrier which will make all
- * subsequent submissions to this timeline be executed only after the
- * barrier has been completed.
- */
- struct i915_active_request barrier;
-
- struct list_head link;
- const char *name;
- struct drm_i915_private *i915;
-
- struct kref kref;
-};
+#include "i915_timeline_types.h"
int i915_timeline_init(struct drm_i915_private *i915,
struct i915_timeline *tl,
- const char *name,
struct i915_vma *hwsp);
void i915_timeline_fini(struct i915_timeline *tl);
@@ -119,7 +57,6 @@ i915_timeline_set_subclass(struct i915_timeline *timeline,
struct i915_timeline *
i915_timeline_create(struct drm_i915_private *i915,
- const char *name,
struct i915_vma *global_hwsp);
static inline struct i915_timeline *
@@ -160,25 +97,17 @@ static inline bool i915_timeline_sync_is_later(struct i915_timeline *tl,
}
int i915_timeline_pin(struct i915_timeline *tl);
+int i915_timeline_get_seqno(struct i915_timeline *tl,
+ struct i915_request *rq,
+ u32 *seqno);
void i915_timeline_unpin(struct i915_timeline *tl);
+int i915_timeline_read_hwsp(struct i915_request *from,
+ struct i915_request *until,
+ u32 *hwsp_offset);
+
void i915_timelines_init(struct drm_i915_private *i915);
void i915_timelines_park(struct drm_i915_private *i915);
void i915_timelines_fini(struct drm_i915_private *i915);
-/**
- * i915_timeline_set_barrier - orders submission between different timelines
- * @timeline: timeline to set the barrier on
- * @rq: request after which new submissions can proceed
- *
- * Sets the passed in request as the serialization point for all subsequent
- * submissions on @timeline. Subsequent requests will not be submitted to GPU
- * until the barrier has been completed.
- */
-static inline int
-i915_timeline_set_barrier(struct i915_timeline *tl, struct i915_request *rq)
-{
- return i915_active_request_set(&tl->barrier, rq);
-}
-
#endif
diff --git a/drivers/gpu/drm/i915/i915_timeline_types.h b/drivers/gpu/drm/i915/i915_timeline_types.h
new file mode 100644
index 000000000000..5256a0b5c5f7
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_timeline_types.h
@@ -0,0 +1,70 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2016 Intel Corporation
+ */
+
+#ifndef __I915_TIMELINE_TYPES_H__
+#define __I915_TIMELINE_TYPES_H__
+
+#include <linux/list.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+#include "i915_active_types.h"
+
+struct drm_i915_private;
+struct i915_vma;
+struct i915_timeline_cacheline;
+struct i915_syncmap;
+
+struct i915_timeline {
+ u64 fence_context;
+ u32 seqno;
+
+ spinlock_t lock;
+#define TIMELINE_CLIENT 0 /* default subclass */
+#define TIMELINE_ENGINE 1
+ struct mutex mutex; /* protects the flow of requests */
+
+ unsigned int pin_count;
+ const u32 *hwsp_seqno;
+ struct i915_vma *hwsp_ggtt;
+ u32 hwsp_offset;
+
+ struct i915_timeline_cacheline *hwsp_cacheline;
+
+ bool has_initial_breadcrumb;
+
+ /**
+ * List of breadcrumbs associated with GPU requests currently
+ * outstanding.
+ */
+ struct list_head requests;
+
+ /* Contains an RCU guarded pointer to the last request. No reference is
+ * held to the request, users must carefully acquire a reference to
+ * the request using i915_active_request_get_request_rcu(), or hold the
+ * struct_mutex.
+ */
+ struct i915_active_request last_request;
+
+ /**
+ * We track the most recent seqno that we wait on in every context so
+ * that we only have to emit a new await and dependency on a more
+ * recent sync point. As the contexts may be executed out-of-order, we
+ * have to track each individually and can not rely on an absolute
+ * global_seqno. When we know that all tracked fences are completed
+ * (i.e. when the driver is idle), we know that the syncmap is
+ * redundant and we can discard it without loss of generality.
+ */
+ struct i915_syncmap *sync;
+
+ struct list_head link;
+ struct drm_i915_private *i915;
+
+ struct kref kref;
+};
+
+#endif /* __I915_TIMELINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index eab313c3163c..12893304c8f8 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -18,6 +18,87 @@
/* watermark/fifo updates */
+TRACE_EVENT(intel_pipe_enable,
+ TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
+ TP_ARGS(dev_priv, pipe),
+
+ TP_STRUCT__entry(
+ __array(u32, frame, 3)
+ __array(u32, scanline, 3)
+ __field(enum pipe, pipe)
+ ),
+
+ TP_fast_assign(
+ enum pipe _pipe;
+ for_each_pipe(dev_priv, _pipe) {
+ __entry->frame[_pipe] =
+ dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe);
+ __entry->scanline[_pipe] =
+ intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe));
+ }
+ __entry->pipe = pipe;
+ ),
+
+ TP_printk("pipe %c enable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
+ pipe_name(__entry->pipe),
+ __entry->frame[PIPE_A], __entry->scanline[PIPE_A],
+ __entry->frame[PIPE_B], __entry->scanline[PIPE_B],
+ __entry->frame[PIPE_C], __entry->scanline[PIPE_C])
+);
+
+TRACE_EVENT(intel_pipe_disable,
+ TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
+ TP_ARGS(dev_priv, pipe),
+
+ TP_STRUCT__entry(
+ __array(u32, frame, 3)
+ __array(u32, scanline, 3)
+ __field(enum pipe, pipe)
+ ),
+
+ TP_fast_assign(
+ enum pipe _pipe;
+ for_each_pipe(dev_priv, _pipe) {
+ __entry->frame[_pipe] =
+ dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, _pipe);
+ __entry->scanline[_pipe] =
+ intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, _pipe));
+ }
+ __entry->pipe = pipe;
+ ),
+
+ TP_printk("pipe %c disable, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
+ pipe_name(__entry->pipe),
+ __entry->frame[PIPE_A], __entry->scanline[PIPE_A],
+ __entry->frame[PIPE_B], __entry->scanline[PIPE_B],
+ __entry->frame[PIPE_C], __entry->scanline[PIPE_C])
+);
+
+TRACE_EVENT(intel_pipe_crc,
+ TP_PROTO(struct intel_crtc *crtc, const u32 *crcs),
+ TP_ARGS(crtc, crcs),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __array(u32, crcs, 5)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
+ crtc->pipe);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ memcpy(__entry->crcs, crcs, sizeof(__entry->crcs));
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u crc=%08x %08x %08x %08x %08x",
+ pipe_name(__entry->pipe), __entry->frame, __entry->scanline,
+ __entry->crcs[0], __entry->crcs[1], __entry->crcs[2],
+ __entry->crcs[3], __entry->crcs[4])
+);
+
TRACE_EVENT(intel_cpu_fifo_underrun,
TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
TP_ARGS(dev_priv, pipe),
@@ -627,7 +708,6 @@ DECLARE_EVENT_CLASS(i915_request,
__field(u16, class)
__field(u16, instance)
__field(u32, seqno)
- __field(u32, global)
),
TP_fast_assign(
@@ -637,13 +717,11 @@ DECLARE_EVENT_CLASS(i915_request,
__entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
- __entry->global = rq->global_seqno;
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u",
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u",
__entry->dev, __entry->class, __entry->instance,
- __entry->hw_id, __entry->ctx, __entry->seqno,
- __entry->global)
+ __entry->hw_id, __entry->ctx, __entry->seqno)
);
DEFINE_EVENT(i915_request, i915_request_add,
@@ -673,7 +751,6 @@ TRACE_EVENT(i915_request_in,
__field(u16, class)
__field(u16, instance)
__field(u32, seqno)
- __field(u32, global_seqno)
__field(u32, port)
__field(u32, prio)
),
@@ -685,15 +762,14 @@ TRACE_EVENT(i915_request_in,
__entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
- __entry->global_seqno = rq->global_seqno;
__entry->prio = rq->sched.attr.priority;
__entry->port = port;
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, prio=%u, global=%u, port=%u",
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, prio=%u, port=%u",
__entry->dev, __entry->class, __entry->instance,
__entry->hw_id, __entry->ctx, __entry->seqno,
- __entry->prio, __entry->global_seqno, __entry->port)
+ __entry->prio, __entry->port)
);
TRACE_EVENT(i915_request_out,
@@ -707,7 +783,6 @@ TRACE_EVENT(i915_request_out,
__field(u16, class)
__field(u16, instance)
__field(u32, seqno)
- __field(u32, global_seqno)
__field(u32, completed)
),
@@ -718,14 +793,13 @@ TRACE_EVENT(i915_request_out,
__entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
- __entry->global_seqno = rq->global_seqno;
__entry->completed = i915_request_completed(rq);
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u, completed?=%u",
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, completed?=%u",
__entry->dev, __entry->class, __entry->instance,
__entry->hw_id, __entry->ctx, __entry->seqno,
- __entry->global_seqno, __entry->completed)
+ __entry->completed)
);
#else
@@ -768,7 +842,6 @@ TRACE_EVENT(i915_request_wait_begin,
__field(u16, class)
__field(u16, instance)
__field(u32, seqno)
- __field(u32, global)
__field(unsigned int, flags)
),
@@ -785,14 +858,13 @@ TRACE_EVENT(i915_request_wait_begin,
__entry->instance = rq->engine->instance;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
- __entry->global = rq->global_seqno;
__entry->flags = flags;
),
- TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, global=%u, blocking=%u, flags=0x%x",
+ TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, blocking=%u, flags=0x%x",
__entry->dev, __entry->class, __entry->instance,
__entry->hw_id, __entry->ctx, __entry->seqno,
- __entry->global, !!(__entry->flags & I915_WAIT_LOCKED),
+ !!(__entry->flags & I915_WAIT_LOCKED),
__entry->flags)
);
diff --git a/drivers/gpu/drm/i915/i915_user_extensions.c b/drivers/gpu/drm/i915/i915_user_extensions.c
new file mode 100644
index 000000000000..c822d0aafd2d
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_user_extensions.c
@@ -0,0 +1,61 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include <linux/nospec.h>
+#include <linux/sched/signal.h>
+#include <linux/uaccess.h>
+
+#include <uapi/drm/i915_drm.h>
+
+#include "i915_user_extensions.h"
+#include "i915_utils.h"
+
+int i915_user_extensions(struct i915_user_extension __user *ext,
+ const i915_user_extension_fn *tbl,
+ unsigned int count,
+ void *data)
+{
+ unsigned int stackdepth = 512;
+
+ while (ext) {
+ int i, err;
+ u32 name;
+ u64 next;
+
+ if (!stackdepth--) /* recursion vs useful flexibility */
+ return -E2BIG;
+
+ err = check_user_mbz(&ext->flags);
+ if (err)
+ return err;
+
+ for (i = 0; i < ARRAY_SIZE(ext->rsvd); i++) {
+ err = check_user_mbz(&ext->rsvd[i]);
+ if (err)
+ return err;
+ }
+
+ if (get_user(name, &ext->name))
+ return -EFAULT;
+
+ err = -EINVAL;
+ if (name < count) {
+ name = array_index_nospec(name, count);
+ if (tbl[name])
+ err = tbl[name](ext, data);
+ }
+ if (err)
+ return err;
+
+ if (get_user(next, &ext->next_extension) ||
+ overflows_type(next, ext))
+ return -EFAULT;
+
+ ext = u64_to_user_ptr(next);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_user_extensions.h b/drivers/gpu/drm/i915/i915_user_extensions.h
new file mode 100644
index 000000000000..a14bf6bba9a1
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_user_extensions.h
@@ -0,0 +1,20 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#ifndef I915_USER_EXTENSIONS_H
+#define I915_USER_EXTENSIONS_H
+
+struct i915_user_extension;
+
+typedef int (*i915_user_extension_fn)(struct i915_user_extension __user *ext,
+ void *data);
+
+int i915_user_extensions(struct i915_user_extension __user *ext,
+ const i915_user_extension_fn *tbl,
+ unsigned int count,
+ void *data);
+
+#endif /* I915_USER_EXTENSIONS_H */
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 540e20eb032c..2dbe8933b50a 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -105,6 +105,37 @@
__T; \
})
+/*
+ * container_of_user: Extract the superclass from a pointer to a member.
+ *
+ * Exactly like container_of() with the exception that it plays nicely
+ * with sparse for __user @ptr.
+ */
+#define container_of_user(ptr, type, member) ({ \
+ void __user *__mptr = (void __user *)(ptr); \
+ BUILD_BUG_ON_MSG(!__same_type(*(ptr), ((type *)0)->member) && \
+ !__same_type(*(ptr), void), \
+ "pointer type mismatch in container_of()"); \
+ ((type __user *)(__mptr - offsetof(type, member))); })
+
+/*
+ * check_user_mbz: Check that a user value exists and is zero
+ *
+ * Frequently in our uABI we reserve space for future extensions, and
+ * two ensure that userspace is prepared we enforce that space must
+ * be zero. (Then any future extension can safely assume a default value
+ * of 0.)
+ *
+ * check_user_mbz() combines checking that the user pointer is accessible
+ * and that the contained value is zero.
+ *
+ * Returns: -EFAULT if not accessible, -EINVAL if !zero, or 0 on success.
+ */
+#define check_user_mbz(U) ({ \
+ typeof(*(U)) mbz__; \
+ get_user(mbz__, (U)) ? -EFAULT : mbz__ ? -EINVAL : 0; \
+})
+
static inline u64 ptr_to_u64(const void *ptr)
{
return (uintptr_t)ptr;
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 869cf4a3b6de..94d3992b599d 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -60,30 +60,31 @@
*/
void i915_check_vgpu(struct drm_i915_private *dev_priv)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
u64 magic;
u16 version_major;
BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
- magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
+ magic = __raw_uncore_read64(uncore, vgtif_reg(magic));
if (magic != VGT_MAGIC)
return;
- version_major = __raw_i915_read16(dev_priv, vgtif_reg(version_major));
+ version_major = __raw_uncore_read16(uncore, vgtif_reg(version_major));
if (version_major < VGT_VERSION_MAJOR) {
DRM_INFO("VGT interface version mismatch!\n");
return;
}
- dev_priv->vgpu.caps = __raw_i915_read32(dev_priv, vgtif_reg(vgt_caps));
+ dev_priv->vgpu.caps = __raw_uncore_read32(uncore, vgtif_reg(vgt_caps));
dev_priv->vgpu.active = true;
DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
}
-bool intel_vgpu_has_full_48bit_ppgtt(struct drm_i915_private *dev_priv)
+bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv)
{
- return dev_priv->vgpu.caps & VGT_CAPS_FULL_48BIT_PPGTT;
+ return dev_priv->vgpu.caps & VGT_CAPS_FULL_PPGTT;
}
struct _balloon_info_ {
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index 551acc390046..ebe1b7bced98 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -28,7 +28,7 @@
void i915_check_vgpu(struct drm_i915_private *dev_priv);
-bool intel_vgpu_has_full_48bit_ppgtt(struct drm_i915_private *dev_priv);
+bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv);
static inline bool
intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 41b5bcb803cb..961268f66c63 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -25,11 +25,27 @@
#include "i915_vma.h"
#include "i915_drv.h"
+#include "i915_globals.h"
#include "intel_ringbuffer.h"
#include "intel_frontbuffer.h"
#include <drm/drm_gem.h>
+static struct i915_global_vma {
+ struct i915_global base;
+ struct kmem_cache *slab_vmas;
+} global;
+
+struct i915_vma *i915_vma_alloc(void)
+{
+ return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
+}
+
+void i915_vma_free(struct i915_vma *vma)
+{
+ return kmem_cache_free(global.slab_vmas, vma);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
#include <linux/stackdepot.h>
@@ -112,7 +128,7 @@ vma_create(struct drm_i915_gem_object *obj,
/* The aliasing_ppgtt should never be used directly! */
GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
- vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
+ vma = i915_vma_alloc();
if (vma == NULL)
return ERR_PTR(-ENOMEM);
@@ -187,7 +203,7 @@ vma_create(struct drm_i915_gem_object *obj,
cmp = i915_vma_compare(pos, vm, view);
if (cmp == 0) {
spin_unlock(&obj->vma.lock);
- kmem_cache_free(vm->i915->vmas, vma);
+ i915_vma_free(vma);
return pos;
}
@@ -219,7 +235,7 @@ vma_create(struct drm_i915_gem_object *obj,
return vma;
err_vma:
- kmem_cache_free(vm->i915->vmas, vma);
+ i915_vma_free(vma);
return ERR_PTR(-E2BIG);
}
@@ -800,8 +816,6 @@ void i915_vma_reopen(struct i915_vma *vma)
static void __i915_vma_destroy(struct i915_vma *vma)
{
- struct drm_i915_private *i915 = vma->vm->i915;
-
GEM_BUG_ON(vma->node.allocated);
GEM_BUG_ON(vma->fence);
@@ -822,7 +836,7 @@ static void __i915_vma_destroy(struct i915_vma *vma)
i915_active_fini(&vma->active);
- kmem_cache_free(i915->vmas, vma);
+ i915_vma_free(vma);
}
void i915_vma_destroy(struct i915_vma *vma)
@@ -1038,3 +1052,28 @@ unpin:
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_vma.c"
#endif
+
+static void i915_global_vma_shrink(void)
+{
+ kmem_cache_shrink(global.slab_vmas);
+}
+
+static void i915_global_vma_exit(void)
+{
+ kmem_cache_destroy(global.slab_vmas);
+}
+
+static struct i915_global_vma global = { {
+ .shrink = i915_global_vma_shrink,
+ .exit = i915_global_vma_exit,
+} };
+
+int __init i915_global_vma_init(void)
+{
+ global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
+ if (!global.slab_vmas)
+ return -ENOMEM;
+
+ i915_global_register(&global.base);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 7c742027f866..6eab70953a57 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -440,4 +440,7 @@ void i915_vma_parked(struct drm_i915_private *i915);
list_for_each_entry(V, &(OBJ)->vma.list, obj_link) \
for_each_until(!i915_vma_is_ggtt(V))
+struct i915_vma *i915_vma_alloc(void);
+void i915_vma_free(struct i915_vma *vma);
+
#endif
diff --git a/drivers/gpu/drm/i915/icl_dsi.c b/drivers/gpu/drm/i915/icl_dsi.c
index 641e0778fa9c..9d962ea1e635 100644
--- a/drivers/gpu/drm/i915/icl_dsi.c
+++ b/drivers/gpu/drm/i915/icl_dsi.c
@@ -25,9 +25,13 @@
* Jani Nikula <jani.nikula@intel.com>
*/
-#include <drm/drm_mipi_dsi.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_mipi_dsi.h>
+
+#include "intel_connector.h"
+#include "intel_ddi.h"
#include "intel_dsi.h"
+#include "intel_panel.h"
static inline int header_credits_available(struct drm_i915_private *dev_priv,
enum transcoder dsi_trans)
@@ -246,13 +250,13 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
for (lane = 0; lane <= 3; lane++) {
/* Bspec: must not use GRP register for write */
- tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane));
+ tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, port));
tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
tmp |= POST_CURSOR_1(0x0);
tmp |= POST_CURSOR_2(0x0);
tmp |= CURSOR_COEFF(0x3f);
- I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp);
+ I915_WRITE(ICL_PORT_TX_DW4_LN(lane, port), tmp);
}
}
}
@@ -399,11 +403,11 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
tmp &= ~LOADGEN_SELECT;
I915_WRITE(ICL_PORT_TX_DW4_AUX(port), tmp);
for (lane = 0; lane <= 3; lane++) {
- tmp = I915_READ(ICL_PORT_TX_DW4_LN(port, lane));
+ tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, port));
tmp &= ~LOADGEN_SELECT;
if (lane != 2)
tmp |= LOADGEN_SELECT;
- I915_WRITE(ICL_PORT_TX_DW4_LN(port, lane), tmp);
+ I915_WRITE(ICL_PORT_TX_DW4_LN(lane, port), tmp);
}
}
@@ -876,7 +880,8 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
I915_WRITE(PIPECONF(dsi_trans), tmp);
/* wait for transcoder to be enabled */
- if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans),
+ if (intel_wait_for_register(&dev_priv->uncore,
+ PIPECONF(dsi_trans),
I965_PIPECONF_ACTIVE,
I965_PIPECONF_ACTIVE, 10))
DRM_ERROR("DSI transcoder not enabled\n");
@@ -1054,7 +1059,8 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
I915_WRITE(PIPECONF(dsi_trans), tmp);
/* wait for transcoder to be disabled */
- if (intel_wait_for_register(dev_priv, PIPECONF(dsi_trans),
+ if (intel_wait_for_register(&dev_priv->uncore,
+ PIPECONF(dsi_trans),
I965_PIPECONF_ACTIVE, 0, 50))
DRM_ERROR("DSI trancoder not disabled\n");
}
@@ -1146,13 +1152,11 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
intel_wakeref_t wakeref;
wakeref = fetch_and_zero(&intel_dsi->io_wakeref[port]);
- if (wakeref) {
- intel_display_power_put(dev_priv,
- port == PORT_A ?
- POWER_DOMAIN_PORT_DDI_A_IO :
- POWER_DOMAIN_PORT_DDI_B_IO,
- wakeref);
- }
+ intel_display_power_put(dev_priv,
+ port == PORT_A ?
+ POWER_DOMAIN_PORT_DDI_A_IO :
+ POWER_DOMAIN_PORT_DDI_B_IO,
+ wakeref);
}
/* set mode to DDI */
@@ -1194,11 +1198,10 @@ static void gen11_dsi_get_config(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- u32 pll_id;
/* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */
- pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
- pipe_config->port_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
+ pipe_config->port_clock =
+ cnl_calc_wrpll_link(dev_priv, &pipe_config->dpll_hw_state);
pipe_config->base.adjusted_mode.crtc_clock = intel_dsi->pclk;
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
}
@@ -1367,7 +1370,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
struct intel_encoder *encoder;
struct intel_connector *intel_connector;
struct drm_connector *connector;
- struct drm_display_mode *scan, *fixed_mode = NULL;
+ struct drm_display_mode *fixed_mode;
enum port port;
if (!intel_bios_is_dsi_present(dev_priv, &port))
@@ -1417,15 +1420,8 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
/* attach connector to encoder */
intel_connector_attach_encoder(intel_connector, encoder);
- /* fill mode info from VBT */
mutex_lock(&dev->mode_config.mutex);
- intel_dsi_vbt_get_modes(intel_dsi);
- list_for_each_entry(scan, &connector->probed_modes, head) {
- if (scan->type & DRM_MODE_TYPE_PREFERRED) {
- fixed_mode = drm_mode_duplicate(dev, scan);
- break;
- }
- }
+ fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
mutex_unlock(&dev->mode_config.mutex);
if (!fixed_mode) {
@@ -1433,12 +1429,9 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
goto err;
}
- connector->display_info.width_mm = fixed_mode->width_mm;
- connector->display_info.height_mm = fixed_mode->height_mm;
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
intel_panel_setup_backlight(connector, INVALID_PIPE);
-
if (dev_priv->vbt.dsi.config->dual_link)
intel_dsi->ports = BIT(PORT_A) | BIT(PORT_B);
else
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 7cf9290ea34a..8c8fae32ec50 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -35,6 +35,8 @@
#include <drm/drm_plane_helper.h>
#include "intel_drv.h"
+#include "intel_hdcp.h"
+#include "intel_sprite.h"
/**
* intel_digital_connector_atomic_get_property - hook for connector->atomic_get_property.
@@ -126,6 +128,7 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn,
*/
if (new_conn_state->force_audio != old_conn_state->force_audio ||
new_conn_state->broadcast_rgb != old_conn_state->broadcast_rgb ||
+ new_conn_state->base.colorspace != old_conn_state->base.colorspace ||
new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio ||
new_conn_state->base.content_type != old_conn_state->base.content_type ||
new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode)
@@ -234,10 +237,11 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
if (plane_state && plane_state->base.fb &&
plane_state->base.fb->format->is_yuv &&
plane_state->base.fb->format->num_planes > 1) {
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
if (IS_GEN(dev_priv, 9) &&
!IS_GEMINILAKE(dev_priv)) {
mode = SKL_PS_SCALER_MODE_NV12;
- } else if (icl_is_hdr_plane(to_intel_plane(plane_state->base.plane))) {
+ } else if (icl_is_hdr_plane(dev_priv, plane->id)) {
/*
* On gen11+'s HDR planes we only use the scaler for
* scaling. They have a dedicated chroma upsampler, so
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index db0965904439..d11681d71add 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -35,7 +35,10 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
+#include "intel_atomic_plane.h"
#include "intel_drv.h"
+#include "intel_pm.h"
+#include "intel_sprite.h"
struct intel_plane *intel_plane_alloc(void)
{
@@ -121,6 +124,7 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
new_crtc_state->active_planes &= ~BIT(plane->id);
new_crtc_state->nv12_planes &= ~BIT(plane->id);
+ new_crtc_state->c8_planes &= ~BIT(plane->id);
new_plane_state->base.visible = false;
if (!new_plane_state->base.crtc && !old_plane_state->base.crtc)
@@ -135,9 +139,13 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
new_crtc_state->active_planes |= BIT(plane->id);
if (new_plane_state->base.visible &&
- new_plane_state->base.fb->format->format == DRM_FORMAT_NV12)
+ is_planar_yuv_format(new_plane_state->base.fb->format->format))
new_crtc_state->nv12_planes |= BIT(plane->id);
+ if (new_plane_state->base.visible &&
+ new_plane_state->base.fb->format->format == DRM_FORMAT_C8)
+ new_crtc_state->c8_planes |= BIT(plane->id);
+
if (new_plane_state->base.visible || old_plane_state->base.visible)
new_crtc_state->update_planes |= BIT(plane->id);
@@ -214,6 +222,35 @@ skl_next_plane_to_commit(struct intel_atomic_state *state,
return NULL;
}
+void intel_update_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+
+ trace_intel_update_plane(&plane->base, crtc);
+ plane->update_plane(plane, crtc_state, plane_state);
+}
+
+void intel_update_slave(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+
+ trace_intel_update_plane(&plane->base, crtc);
+ plane->update_slave(plane, crtc_state, plane_state);
+}
+
+void intel_disable_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+
+ trace_intel_disable_plane(&plane->base, crtc);
+ plane->disable_plane(plane, crtc_state);
+}
+
void skl_update_planes_on_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -238,8 +275,7 @@ void skl_update_planes_on_crtc(struct intel_atomic_state *state,
intel_atomic_get_new_plane_state(state, plane);
if (new_plane_state->base.visible) {
- trace_intel_update_plane(&plane->base, crtc);
- plane->update_plane(plane, new_crtc_state, new_plane_state);
+ intel_update_plane(plane, new_crtc_state, new_plane_state);
} else if (new_plane_state->slave) {
struct intel_plane *master =
new_plane_state->linked_plane;
@@ -256,11 +292,9 @@ void skl_update_planes_on_crtc(struct intel_atomic_state *state,
new_plane_state =
intel_atomic_get_new_plane_state(state, master);
- trace_intel_update_plane(&plane->base, crtc);
- plane->update_slave(plane, new_crtc_state, new_plane_state);
+ intel_update_slave(plane, new_crtc_state, new_plane_state);
} else {
- trace_intel_disable_plane(&plane->base, crtc);
- plane->disable_plane(plane, new_crtc_state);
+ intel_disable_plane(plane, new_crtc_state);
}
}
}
@@ -280,13 +314,10 @@ void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
!(update_mask & BIT(plane->id)))
continue;
- if (new_plane_state->base.visible) {
- trace_intel_update_plane(&plane->base, crtc);
- plane->update_plane(plane, new_crtc_state, new_plane_state);
- } else {
- trace_intel_disable_plane(&plane->base, crtc);
- plane->disable_plane(plane, new_crtc_state);
- }
+ if (new_plane_state->base.visible)
+ intel_update_plane(plane, new_crtc_state, new_plane_state);
+ else
+ intel_disable_plane(plane, new_crtc_state);
}
}
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.h b/drivers/gpu/drm/i915/intel_atomic_plane.h
new file mode 100644
index 000000000000..14678620440f
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_ATOMIC_PLANE_H__
+#define __INTEL_ATOMIC_PLANE_H__
+
+struct drm_plane;
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_plane;
+struct intel_plane_state;
+
+extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
+
+void intel_update_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+void intel_update_slave(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+void intel_disable_plane(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
+struct intel_plane *intel_plane_alloc(void);
+void intel_plane_free(struct intel_plane *plane);
+struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
+void intel_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state);
+void skl_update_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *old_plane_state,
+ struct intel_plane_state *intel_state);
+
+#endif /* __INTEL_ATOMIC_PLANE_H__ */
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 5104c6bbd66f..bca4cc025d3d 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -21,14 +21,16 @@
* DEALINGS IN THE SOFTWARE.
*/
-#include <linux/kernel.h>
#include <linux/component.h>
+#include <linux/kernel.h>
+
+#include <drm/drm_edid.h>
#include <drm/i915_component.h>
#include <drm/intel_lpe_audio.h>
-#include "intel_drv.h"
-#include <drm/drm_edid.h>
#include "i915_drv.h"
+#include "intel_audio.h"
+#include "intel_drv.h"
/**
* DOC: High Definition Audio over HDMI and Display Port
@@ -741,27 +743,91 @@ void intel_init_audio_hooks(struct drm_i915_private *dev_priv)
}
}
-static void i915_audio_component_get_power(struct device *kdev)
+static void glk_force_audio_cdclk(struct drm_i915_private *dev_priv,
+ bool enable)
{
- intel_display_power_get(kdev_to_i915(kdev), POWER_DOMAIN_AUDIO);
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ int ret;
+
+ drm_modeset_acquire_init(&ctx, 0);
+ state = drm_atomic_state_alloc(&dev_priv->drm);
+ if (WARN_ON(!state))
+ return;
+
+ state->acquire_ctx = &ctx;
+
+retry:
+ to_intel_atomic_state(state)->cdclk.force_min_cdclk_changed = true;
+ to_intel_atomic_state(state)->cdclk.force_min_cdclk =
+ enable ? 2 * 96000 : 0;
+
+ /*
+ * Protects dev_priv->cdclk.force_min_cdclk
+ * Need to lock this here in case we have no active pipes
+ * and thus wouldn't lock it during the commit otherwise.
+ */
+ ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
+ &ctx);
+ if (!ret)
+ ret = drm_atomic_commit(state);
+
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+
+ WARN_ON(ret);
+
+ drm_atomic_state_put(state);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
}
-static void i915_audio_component_put_power(struct device *kdev)
+static unsigned long i915_audio_component_get_power(struct device *kdev)
{
- intel_display_power_put_unchecked(kdev_to_i915(kdev),
- POWER_DOMAIN_AUDIO);
+ struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+ intel_wakeref_t ret;
+
+ /* Catch potential impedance mismatches before they occur! */
+ BUILD_BUG_ON(sizeof(intel_wakeref_t) > sizeof(unsigned long));
+
+ ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
+
+ /* Force CDCLK to 2*BCLK as long as we need audio to be powered. */
+ if (dev_priv->audio_power_refcount++ == 0)
+ if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ glk_force_audio_cdclk(dev_priv, true);
+
+ return ret;
+}
+
+static void i915_audio_component_put_power(struct device *kdev,
+ unsigned long cookie)
+{
+ struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+
+ /* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
+ if (--dev_priv->audio_power_refcount == 0)
+ if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ glk_force_audio_cdclk(dev_priv, false);
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO, cookie);
}
static void i915_audio_component_codec_wake_override(struct device *kdev,
bool enable)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
+ unsigned long cookie;
u32 tmp;
if (!IS_GEN(dev_priv, 9))
return;
- i915_audio_component_get_power(kdev);
+ cookie = i915_audio_component_get_power(kdev);
/*
* Enable/disable generating the codec wake signal, overriding the
@@ -779,7 +845,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
usleep_range(1000, 1500);
}
- i915_audio_component_put_power(kdev);
+ i915_audio_component_put_power(kdev, cookie);
}
/* Get CDCLK in kHz */
@@ -850,12 +916,13 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
struct i915_audio_component *acomp = dev_priv->audio_component;
struct intel_encoder *encoder;
struct intel_crtc *crtc;
+ unsigned long cookie;
int err = 0;
if (!HAS_DDI(dev_priv))
return 0;
- i915_audio_component_get_power(kdev);
+ cookie = i915_audio_component_get_power(kdev);
mutex_lock(&dev_priv->av_mutex);
/* 1. get the pipe */
@@ -875,7 +942,7 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
unlock:
mutex_unlock(&dev_priv->av_mutex);
- i915_audio_component_put_power(kdev);
+ i915_audio_component_put_power(kdev, cookie);
return err;
}
@@ -980,7 +1047,7 @@ static const struct component_ops i915_audio_component_bind_ops = {
* We ignore any error during registration and continue with reduced
* functionality (i.e. without HDMI audio).
*/
-void i915_audio_component_init(struct drm_i915_private *dev_priv)
+static void i915_audio_component_init(struct drm_i915_private *dev_priv)
{
int ret;
@@ -1003,7 +1070,7 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv)
* Deregisters the audio component, breaking any existing binding to the
* corresponding snd_hda_intel driver's master component.
*/
-void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
+static void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
{
if (!dev_priv->audio_component_registered)
return;
diff --git a/drivers/gpu/drm/i915/intel_audio.h b/drivers/gpu/drm/i915/intel_audio.h
new file mode 100644
index 000000000000..a3657c7a7ba2
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_audio.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_AUDIO_H__
+#define __INTEL_AUDIO_H__
+
+struct drm_connector_state;
+struct drm_i915_private;
+struct intel_crtc_state;
+struct intel_encoder;
+
+void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
+void intel_audio_codec_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_audio_codec_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state);
+void intel_audio_init(struct drm_i915_private *dev_priv);
+void intel_audio_deinit(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_AUDIO_H__ */
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 4364f42cac6b..1dc8d03ff127 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -760,6 +760,31 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
dev_priv->vbt.psr.tp1_wakeup_time_us = psr_table->tp1_wakeup_time * 100;
dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = psr_table->tp2_tp3_wakeup_time * 100;
}
+
+ if (bdb->version >= 226) {
+ u32 wakeup_time = psr_table->psr2_tp2_tp3_wakeup_time;
+
+ wakeup_time = (wakeup_time >> (2 * panel_type)) & 0x3;
+ switch (wakeup_time) {
+ case 0:
+ wakeup_time = 500;
+ break;
+ case 1:
+ wakeup_time = 100;
+ break;
+ case 3:
+ wakeup_time = 50;
+ break;
+ default:
+ case 2:
+ wakeup_time = 2500;
+ break;
+ }
+ dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us = wakeup_time;
+ } else {
+ /* Reusing PSR1 wakeup time for PSR2 in older VBTs */
+ dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us = dev_priv->vbt.psr.tp2_tp3_wakeup_time_us;
+ }
}
static void parse_dsi_backlight_ports(struct drm_i915_private *dev_priv,
@@ -1222,10 +1247,11 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
if (!info->alternate_ddc_pin)
return;
- for_each_port_masked(p, (1 << port) - 1) {
+ for (p = PORT_A; p < I915_MAX_PORTS; p++) {
struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
- if (info->alternate_ddc_pin != i->alternate_ddc_pin)
+ if (p == port || !i->present ||
+ info->alternate_ddc_pin != i->alternate_ddc_pin)
continue;
DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
@@ -1239,8 +1265,8 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
* port. Otherwise they share the same ddc bin and
* system couldn't communicate with them separately.
*
- * Due to parsing the ports in alphabetical order,
- * a higher port will always clobber a lower one.
+ * Due to parsing the ports in child device order,
+ * a later device will always clobber an earlier one.
*/
i->supports_dvi = false;
i->supports_hdmi = false;
@@ -1258,10 +1284,11 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
if (!info->alternate_aux_channel)
return;
- for_each_port_masked(p, (1 << port) - 1) {
+ for (p = PORT_A; p < I915_MAX_PORTS; p++) {
struct ddi_vbt_port_info *i = &dev_priv->vbt.ddi_port_info[p];
- if (info->alternate_aux_channel != i->alternate_aux_channel)
+ if (p == port || !i->present ||
+ info->alternate_aux_channel != i->alternate_aux_channel)
continue;
DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
@@ -1275,8 +1302,8 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
* port. Otherwise they share the same aux channel
* and system couldn't communicate with them separately.
*
- * Due to parsing the ports in alphabetical order,
- * a higher port will always clobber a lower one.
+ * Due to parsing the ports in child device order,
+ * a later device will always clobber an earlier one.
*/
i->supports_dp = false;
i->alternate_aux_channel = 0;
@@ -1324,48 +1351,57 @@ static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
return 0;
}
-static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
- u8 bdb_version)
+static enum port dvo_port_to_port(u8 dvo_port)
{
- struct child_device_config *it, *child = NULL;
- struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
- int i, j;
- bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
- /* Each DDI port can have more than one value on the "DVO Port" field,
+ /*
+ * Each DDI port can have more than one value on the "DVO Port" field,
* so look for all the possible values for each port.
*/
- int dvo_ports[][3] = {
- {DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
- {DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
- {DVO_PORT_HDMIC, DVO_PORT_DPC, -1},
- {DVO_PORT_HDMID, DVO_PORT_DPD, -1},
- {DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
- {DVO_PORT_HDMIF, DVO_PORT_DPF, -1},
+ static const int dvo_ports[][3] = {
+ [PORT_A] = { DVO_PORT_HDMIA, DVO_PORT_DPA, -1},
+ [PORT_B] = { DVO_PORT_HDMIB, DVO_PORT_DPB, -1},
+ [PORT_C] = { DVO_PORT_HDMIC, DVO_PORT_DPC, -1},
+ [PORT_D] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1},
+ [PORT_E] = { DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE},
+ [PORT_F] = { DVO_PORT_HDMIF, DVO_PORT_DPF, -1},
};
+ enum port port;
+ int i;
- /*
- * Find the first child device to reference the port, report if more
- * than one found.
- */
- for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
- it = dev_priv->vbt.child_dev + i;
-
- for (j = 0; j < 3; j++) {
- if (dvo_ports[port][j] == -1)
+ for (port = PORT_A; port < ARRAY_SIZE(dvo_ports); port++) {
+ for (i = 0; i < ARRAY_SIZE(dvo_ports[port]); i++) {
+ if (dvo_ports[port][i] == -1)
break;
- if (it->dvo_port == dvo_ports[port][j]) {
- if (child) {
- DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
- port_name(port));
- } else {
- child = it;
- }
- }
+ if (dvo_port == dvo_ports[port][i])
+ return port;
}
}
- if (!child)
+
+ return PORT_NONE;
+}
+
+static void parse_ddi_port(struct drm_i915_private *dev_priv,
+ const struct child_device_config *child,
+ u8 bdb_version)
+{
+ struct ddi_vbt_port_info *info;
+ bool is_dvi, is_hdmi, is_dp, is_edp, is_crt;
+ enum port port;
+
+ port = dvo_port_to_port(child->dvo_port);
+ if (port == PORT_NONE)
+ return;
+
+ info = &dev_priv->vbt.ddi_port_info[port];
+
+ if (info->present) {
+ DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
+ port_name(port));
return;
+ }
+
+ info->present = true;
is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING;
is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT;
@@ -1498,19 +1534,20 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
static void parse_ddi_ports(struct drm_i915_private *dev_priv, u8 bdb_version)
{
- enum port port;
+ const struct child_device_config *child;
+ int i;
if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv))
return;
- if (!dev_priv->vbt.child_dev_num)
- return;
-
if (bdb_version < 155)
return;
- for (port = PORT_A; port < I915_MAX_PORTS; port++)
- parse_ddi_port(dev_priv, port, bdb_version);
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ child = dev_priv->vbt.child_dev + i;
+
+ parse_ddi_port(dev_priv, child, bdb_version);
+ }
}
static void
@@ -2094,8 +2131,8 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
dvo_port = child->dvo_port;
if (dvo_port == DVO_PORT_MIPIA ||
- (dvo_port == DVO_PORT_MIPIB && IS_ICELAKE(dev_priv)) ||
- (dvo_port == DVO_PORT_MIPIC && !IS_ICELAKE(dev_priv))) {
+ (dvo_port == DVO_PORT_MIPIB && INTEL_GEN(dev_priv) >= 11) ||
+ (dvo_port == DVO_PORT_MIPIC && INTEL_GEN(dev_priv) < 11)) {
if (port)
*port = dvo_port - DVO_PORT_MIPIA;
return true;
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 09ed90c0ba00..3cbffd400b1b 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -27,8 +27,6 @@
#include "i915_drv.h"
-#define task_asleep(tsk) ((tsk)->state & TASK_NORMAL && !(tsk)->on_rq)
-
static void irq_enable(struct intel_engine_cs *engine)
{
if (!engine->irq_enable)
@@ -82,7 +80,7 @@ static inline bool __request_completed(const struct i915_request *rq)
return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno);
}
-bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
+void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
struct intel_context *ce, *cn;
@@ -146,19 +144,13 @@ bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
dma_fence_signal(&rq->fence);
i915_request_put(rq);
}
-
- return !list_empty(&signal);
}
-bool intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
+void intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine)
{
- bool result;
-
local_irq_disable();
- result = intel_engine_breadcrumbs_irq(engine);
+ intel_engine_breadcrumbs_irq(engine);
local_irq_enable();
-
- return result;
}
static void signal_irq_work(struct irq_work *work)
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index 15ba950dee00..ae40a8679314 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -21,6 +21,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include "intel_cdclk.h"
#include "intel_drv.h"
/**
@@ -234,7 +235,8 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
else
return 0;
- tmp = I915_READ(IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO);
+ tmp = I915_READ(IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv) ?
+ HPLLVCO_MOBILE : HPLLVCO);
vco = vco_table[tmp & 0x7];
if (vco == 0)
@@ -468,7 +470,7 @@ static void vlv_get_cdclk(struct drm_i915_private *dev_priv,
cdclk_state->vco);
mutex_lock(&dev_priv->pcu_lock);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
mutex_unlock(&dev_priv->pcu_lock);
if (IS_VALLEYVIEW(dev_priv))
@@ -516,7 +518,8 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
}
static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state)
+ const struct intel_cdclk_state *cdclk_state,
+ enum pipe pipe)
{
int cdclk = cdclk_state->cdclk;
u32 val, cmd = cdclk_state->voltage_level;
@@ -543,11 +546,11 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
mutex_lock(&dev_priv->pcu_lock);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
val &= ~DSPFREQGUAR_MASK;
val |= (cmd << DSPFREQGUAR_SHIFT);
- vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
- if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
+ if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
50)) {
DRM_ERROR("timed out waiting for CDclk change\n");
@@ -598,7 +601,8 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
}
static void chv_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state)
+ const struct intel_cdclk_state *cdclk_state,
+ enum pipe pipe)
{
int cdclk = cdclk_state->cdclk;
u32 val, cmd = cdclk_state->voltage_level;
@@ -624,11 +628,11 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
mutex_lock(&dev_priv->pcu_lock);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
val &= ~DSPFREQGUAR_MASK_CHV;
val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
- vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
- if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
+ if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
50)) {
DRM_ERROR("timed out waiting for CDclk change\n");
@@ -697,7 +701,8 @@ static void bdw_get_cdclk(struct drm_i915_private *dev_priv,
}
static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state)
+ const struct intel_cdclk_state *cdclk_state,
+ enum pipe pipe)
{
int cdclk = cdclk_state->cdclk;
u32 val;
@@ -964,7 +969,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
5))
DRM_ERROR("DPLL0 not locked\n");
@@ -978,16 +983,17 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
static void skl_dpll0_disable(struct drm_i915_private *dev_priv)
{
I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
- if (intel_wait_for_register(dev_priv,
- LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
- 1))
+ if (intel_wait_for_register(&dev_priv->uncore,
+ LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
+ 1))
DRM_ERROR("Couldn't disable DPLL0\n");
dev_priv->cdclk.hw.vco = 0;
}
static void skl_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state)
+ const struct intel_cdclk_state *cdclk_state,
+ enum pipe pipe)
{
int cdclk = cdclk_state->cdclk;
int vco = cdclk_state->vco;
@@ -1123,16 +1129,7 @@ sanitize:
dev_priv->cdclk.hw.vco = -1;
}
-/**
- * skl_init_cdclk - Initialize CDCLK on SKL
- * @dev_priv: i915 device
- *
- * Initialize CDCLK for SKL and derivatives. This is generally
- * done only during the display core initialization sequence,
- * after which the DMC will take care of turning CDCLK off/on
- * as needed.
- */
-void skl_init_cdclk(struct drm_i915_private *dev_priv)
+static void skl_init_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state;
@@ -1158,17 +1155,10 @@ void skl_init_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.cdclk = skl_calc_cdclk(0, cdclk_state.vco);
cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk);
- skl_set_cdclk(dev_priv, &cdclk_state);
+ skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
-/**
- * skl_uninit_cdclk - Uninitialize CDCLK on SKL
- * @dev_priv: i915 device
- *
- * Uninitialize CDCLK for SKL and derivatives. This is done only
- * during the display core uninitialization sequence.
- */
-void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
+static void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
@@ -1176,7 +1166,7 @@ void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.vco = 0;
cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk);
- skl_set_cdclk(dev_priv, &cdclk_state);
+ skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
static int bxt_calc_cdclk(int min_cdclk)
@@ -1323,7 +1313,7 @@ static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
I915_WRITE(BXT_DE_PLL_ENABLE, 0);
/* Timeout 200us */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0,
1))
DRM_ERROR("timeout waiting for DE PLL unlock\n");
@@ -1344,7 +1334,7 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
/* Timeout 200us */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
BXT_DE_PLL_ENABLE,
BXT_DE_PLL_LOCK,
BXT_DE_PLL_LOCK,
@@ -1355,7 +1345,8 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
}
static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state)
+ const struct intel_cdclk_state *cdclk_state,
+ enum pipe pipe)
{
int cdclk = cdclk_state->cdclk;
int vco = cdclk_state->vco;
@@ -1408,11 +1399,10 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
bxt_de_pll_enable(dev_priv, vco);
val = divider | skl_cdclk_decimal(cdclk);
- /*
- * FIXME if only the cd2x divider needs changing, it could be done
- * without shutting off the pipe (if only one pipe is active).
- */
- val |= BXT_CDCLK_CD2X_PIPE_NONE;
+ if (pipe == INVALID_PIPE)
+ val |= BXT_CDCLK_CD2X_PIPE_NONE;
+ else
+ val |= BXT_CDCLK_CD2X_PIPE(pipe);
/*
* Disable SSA Precharge when CD clock frequency < 500 MHz,
* enable otherwise.
@@ -1421,6 +1411,9 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
I915_WRITE(CDCLK_CTL, val);
+ if (pipe != INVALID_PIPE)
+ intel_wait_for_vblank(dev_priv, pipe);
+
mutex_lock(&dev_priv->pcu_lock);
/*
* The timeout isn't specified, the 2ms used here is based on
@@ -1490,16 +1483,7 @@ sanitize:
dev_priv->cdclk.hw.vco = -1;
}
-/**
- * bxt_init_cdclk - Initialize CDCLK on BXT
- * @dev_priv: i915 device
- *
- * Initialize CDCLK for BXT and derivatives. This is generally
- * done only during the display core initialization sequence,
- * after which the DMC will take care of turning CDCLK off/on
- * as needed.
- */
-void bxt_init_cdclk(struct drm_i915_private *dev_priv)
+static void bxt_init_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state;
@@ -1525,17 +1509,10 @@ void bxt_init_cdclk(struct drm_i915_private *dev_priv)
}
cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk);
- bxt_set_cdclk(dev_priv, &cdclk_state);
+ bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
-/**
- * bxt_uninit_cdclk - Uninitialize CDCLK on BXT
- * @dev_priv: i915 device
- *
- * Uninitialize CDCLK for BXT and derivatives. This is done only
- * during the display core uninitialization sequence.
- */
-void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
+static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
@@ -1543,7 +1520,7 @@ void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.vco = 0;
cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk);
- bxt_set_cdclk(dev_priv, &cdclk_state);
+ bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
static int cnl_calc_cdclk(int min_cdclk)
@@ -1663,7 +1640,8 @@ static void cnl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
}
static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state)
+ const struct intel_cdclk_state *cdclk_state,
+ enum pipe pipe)
{
int cdclk = cdclk_state->cdclk;
int vco = cdclk_state->vco;
@@ -1704,13 +1682,15 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv,
cnl_cdclk_pll_enable(dev_priv, vco);
val = divider | skl_cdclk_decimal(cdclk);
- /*
- * FIXME if only the cd2x divider needs changing, it could be done
- * without shutting off the pipe (if only one pipe is active).
- */
- val |= BXT_CDCLK_CD2X_PIPE_NONE;
+ if (pipe == INVALID_PIPE)
+ val |= BXT_CDCLK_CD2X_PIPE_NONE;
+ else
+ val |= BXT_CDCLK_CD2X_PIPE(pipe);
I915_WRITE(CDCLK_CTL, val);
+ if (pipe != INVALID_PIPE)
+ intel_wait_for_vblank(dev_priv, pipe);
+
/* inform PCU of the change */
mutex_lock(&dev_priv->pcu_lock);
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
@@ -1847,7 +1827,8 @@ static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
}
static void icl_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state)
+ const struct intel_cdclk_state *cdclk_state,
+ enum pipe pipe)
{
unsigned int cdclk = cdclk_state->cdclk;
unsigned int vco = cdclk_state->vco;
@@ -1872,6 +1853,11 @@ static void icl_set_cdclk(struct drm_i915_private *dev_priv,
if (dev_priv->cdclk.hw.vco != vco)
cnl_cdclk_pll_enable(dev_priv, vco);
+ /*
+ * On ICL CD2X_DIV can only be 1, so we'll never end up changing the
+ * divider here synchronized to a pipe while CDCLK is on, nor will we
+ * need the corresponding vblank wait.
+ */
I915_WRITE(CDCLK_CTL, ICL_CDCLK_CD2X_PIPE_NONE |
skl_cdclk_decimal(cdclk));
@@ -1959,16 +1945,7 @@ out:
icl_calc_voltage_level(cdclk_state->cdclk);
}
-/**
- * icl_init_cdclk - Initialize CDCLK on ICL
- * @dev_priv: i915 device
- *
- * Initialize CDCLK for ICL. This consists mainly of initializing
- * dev_priv->cdclk.hw and sanitizing the state of the hardware if needed. This
- * is generally done only during the display core initialization sequence, after
- * which the DMC will take care of turning CDCLK off/on as needed.
- */
-void icl_init_cdclk(struct drm_i915_private *dev_priv)
+static void icl_init_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state sanitized_state;
u32 val;
@@ -2002,17 +1979,10 @@ sanitize:
sanitized_state.voltage_level =
icl_calc_voltage_level(sanitized_state.cdclk);
- icl_set_cdclk(dev_priv, &sanitized_state);
+ icl_set_cdclk(dev_priv, &sanitized_state, INVALID_PIPE);
}
-/**
- * icl_uninit_cdclk - Uninitialize CDCLK on ICL
- * @dev_priv: i915 device
- *
- * Uninitialize CDCLK for ICL. This is done only during the display core
- * uninitialization sequence.
- */
-void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
+static void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
@@ -2020,19 +1990,10 @@ void icl_uninit_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.vco = 0;
cdclk_state.voltage_level = icl_calc_voltage_level(cdclk_state.cdclk);
- icl_set_cdclk(dev_priv, &cdclk_state);
+ icl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
-/**
- * cnl_init_cdclk - Initialize CDCLK on CNL
- * @dev_priv: i915 device
- *
- * Initialize CDCLK for CNL. This is generally
- * done only during the display core initialization sequence,
- * after which the DMC will take care of turning CDCLK off/on
- * as needed.
- */
-void cnl_init_cdclk(struct drm_i915_private *dev_priv)
+static void cnl_init_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state;
@@ -2048,17 +2009,10 @@ void cnl_init_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.vco = cnl_cdclk_pll_vco(dev_priv, cdclk_state.cdclk);
cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk);
- cnl_set_cdclk(dev_priv, &cdclk_state);
+ cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
}
-/**
- * cnl_uninit_cdclk - Uninitialize CDCLK on CNL
- * @dev_priv: i915 device
- *
- * Uninitialize CDCLK for CNL. This is done only
- * during the display core uninitialization sequence.
- */
-void cnl_uninit_cdclk(struct drm_i915_private *dev_priv)
+static void cnl_uninit_cdclk(struct drm_i915_private *dev_priv)
{
struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
@@ -2066,7 +2020,47 @@ void cnl_uninit_cdclk(struct drm_i915_private *dev_priv)
cdclk_state.vco = 0;
cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk);
- cnl_set_cdclk(dev_priv, &cdclk_state);
+ cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+}
+
+/**
+ * intel_cdclk_init - Initialize CDCLK
+ * @i915: i915 device
+ *
+ * Initialize CDCLK. This consists mainly of initializing dev_priv->cdclk.hw and
+ * sanitizing the state of the hardware if needed. This is generally done only
+ * during the display core initialization sequence, after which the DMC will
+ * take care of turning CDCLK off/on as needed.
+ */
+void intel_cdclk_init(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) >= 11)
+ icl_init_cdclk(i915);
+ else if (IS_CANNONLAKE(i915))
+ cnl_init_cdclk(i915);
+ else if (IS_GEN9_BC(i915))
+ skl_init_cdclk(i915);
+ else if (IS_GEN9_LP(i915))
+ bxt_init_cdclk(i915);
+}
+
+/**
+ * intel_cdclk_uninit - Uninitialize CDCLK
+ * @i915: i915 device
+ *
+ * Uninitialize CDCLK. This is done only during the display core
+ * uninitialization sequence.
+ */
+void intel_cdclk_uninit(struct drm_i915_private *i915)
+{
+ if (INTEL_GEN(i915) >= 11)
+ icl_uninit_cdclk(i915);
+ else if (IS_CANNONLAKE(i915))
+ cnl_uninit_cdclk(i915);
+ else if (IS_GEN9_BC(i915))
+ skl_uninit_cdclk(i915);
+ else if (IS_GEN9_LP(i915))
+ bxt_uninit_cdclk(i915);
}
/**
@@ -2086,6 +2080,28 @@ bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
}
/**
+ * intel_cdclk_needs_cd2x_update - Determine if two CDCLK states require a cd2x divider update
+ * @dev_priv: Not a CDCLK state, it's the drm_i915_private!
+ * @a: first CDCLK state
+ * @b: second CDCLK state
+ *
+ * Returns:
+ * True if the CDCLK states require just a cd2x divider update, false if not.
+ */
+bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *a,
+ const struct intel_cdclk_state *b)
+{
+ /* Older hw doesn't have the capability */
+ if (INTEL_GEN(dev_priv) < 10 && !IS_GEN9_LP(dev_priv))
+ return false;
+
+ return a->cdclk != b->cdclk &&
+ a->vco == b->vco &&
+ a->ref == b->ref;
+}
+
+/**
* intel_cdclk_changed - Determine if two CDCLK states are different
* @a: first CDCLK state
* @b: second CDCLK state
@@ -2100,6 +2116,26 @@ bool intel_cdclk_changed(const struct intel_cdclk_state *a,
a->voltage_level != b->voltage_level;
}
+/**
+ * intel_cdclk_swap_state - make atomic CDCLK configuration effective
+ * @state: atomic state
+ *
+ * This is the CDCLK version of drm_atomic_helper_swap_state() since the
+ * helper does not handle driver-specific global state.
+ *
+ * Similarly to the atomic helpers this function does a complete swap,
+ * i.e. it also puts the old state into @state. This is used by the commit
+ * code to determine how CDCLK has changed (for instance did it increase or
+ * decrease).
+ */
+void intel_cdclk_swap_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+
+ swap(state->cdclk.logical, dev_priv->cdclk.logical);
+ swap(state->cdclk.actual, dev_priv->cdclk.actual);
+}
+
void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
const char *context)
{
@@ -2113,12 +2149,14 @@ void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
* intel_set_cdclk - Push the CDCLK state to the hardware
* @dev_priv: i915 device
* @cdclk_state: new CDCLK state
+ * @pipe: pipe with which to synchronize the update
*
* Program the hardware based on the passed in CDCLK state,
* if necessary.
*/
-void intel_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state)
+static void intel_set_cdclk(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *cdclk_state,
+ enum pipe pipe)
{
if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state))
return;
@@ -2128,7 +2166,7 @@ void intel_set_cdclk(struct drm_i915_private *dev_priv,
intel_dump_cdclk_state(cdclk_state, "Changing CDCLK to");
- dev_priv->display.set_cdclk(dev_priv, cdclk_state);
+ dev_priv->display.set_cdclk(dev_priv, cdclk_state, pipe);
if (WARN(intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state),
"cdclk state doesn't match!\n")) {
@@ -2137,6 +2175,46 @@ void intel_set_cdclk(struct drm_i915_private *dev_priv,
}
}
+/**
+ * intel_set_cdclk_pre_plane_update - Push the CDCLK state to the hardware
+ * @dev_priv: i915 device
+ * @old_state: old CDCLK state
+ * @new_state: new CDCLK state
+ * @pipe: pipe with which to synchronize the update
+ *
+ * Program the hardware before updating the HW plane state based on the passed
+ * in CDCLK state, if necessary.
+ */
+void
+intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *old_state,
+ const struct intel_cdclk_state *new_state,
+ enum pipe pipe)
+{
+ if (pipe == INVALID_PIPE || old_state->cdclk <= new_state->cdclk)
+ intel_set_cdclk(dev_priv, new_state, pipe);
+}
+
+/**
+ * intel_set_cdclk_post_plane_update - Push the CDCLK state to the hardware
+ * @dev_priv: i915 device
+ * @old_state: old CDCLK state
+ * @new_state: new CDCLK state
+ * @pipe: pipe with which to synchronize the update
+ *
+ * Program the hardware after updating the HW plane state based on the passed
+ * in CDCLK state, if necessary.
+ */
+void
+intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *old_state,
+ const struct intel_cdclk_state *new_state,
+ enum pipe pipe)
+{
+ if (pipe != INVALID_PIPE && old_state->cdclk > new_state->cdclk)
+ intel_set_cdclk(dev_priv, new_state, pipe);
+}
+
static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
int pixel_rate)
{
@@ -2187,19 +2265,8 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
/*
* According to BSpec, "The CD clock frequency must be at least twice
* the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
- *
- * FIXME: Check the actual, not default, BCLK being used.
- *
- * FIXME: This does not depend on ->has_audio because the higher CDCLK
- * is required for audio probe, also when there are no audio capable
- * displays connected at probe time. This leads to unnecessarily high
- * CDCLK when audio is not required.
- *
- * FIXME: This limit is only applied when there are displays connected
- * at probe time. If we probe without displays, we'll still end up using
- * the platform minimum CDCLK, failing audio probe.
*/
- if (INTEL_GEN(dev_priv) >= 9)
+ if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)
min_cdclk = max(2 * 96000, min_cdclk);
/*
@@ -2239,7 +2306,7 @@ static int intel_compute_min_cdclk(struct drm_atomic_state *state)
intel_state->min_cdclk[i] = min_cdclk;
}
- min_cdclk = 0;
+ min_cdclk = intel_state->cdclk.force_min_cdclk;
for_each_pipe(dev_priv, pipe)
min_cdclk = max(intel_state->min_cdclk[pipe], min_cdclk);
@@ -2300,7 +2367,8 @@ static int vlv_modeset_calc_cdclk(struct drm_atomic_state *state)
vlv_calc_voltage_level(dev_priv, cdclk);
if (!intel_state->active_crtcs) {
- cdclk = vlv_calc_cdclk(dev_priv, 0);
+ cdclk = vlv_calc_cdclk(dev_priv,
+ intel_state->cdclk.force_min_cdclk);
intel_state->cdclk.actual.cdclk = cdclk;
intel_state->cdclk.actual.voltage_level =
@@ -2333,7 +2401,7 @@ static int bdw_modeset_calc_cdclk(struct drm_atomic_state *state)
bdw_calc_voltage_level(cdclk);
if (!intel_state->active_crtcs) {
- cdclk = bdw_calc_cdclk(0);
+ cdclk = bdw_calc_cdclk(intel_state->cdclk.force_min_cdclk);
intel_state->cdclk.actual.cdclk = cdclk;
intel_state->cdclk.actual.voltage_level =
@@ -2405,7 +2473,7 @@ static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
skl_calc_voltage_level(cdclk);
if (!intel_state->active_crtcs) {
- cdclk = skl_calc_cdclk(0, vco);
+ cdclk = skl_calc_cdclk(intel_state->cdclk.force_min_cdclk, vco);
intel_state->cdclk.actual.vco = vco;
intel_state->cdclk.actual.cdclk = cdclk;
@@ -2444,10 +2512,10 @@ static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
if (!intel_state->active_crtcs) {
if (IS_GEMINILAKE(dev_priv)) {
- cdclk = glk_calc_cdclk(0);
+ cdclk = glk_calc_cdclk(intel_state->cdclk.force_min_cdclk);
vco = glk_de_pll_vco(dev_priv, cdclk);
} else {
- cdclk = bxt_calc_cdclk(0);
+ cdclk = bxt_calc_cdclk(intel_state->cdclk.force_min_cdclk);
vco = bxt_de_pll_vco(dev_priv, cdclk);
}
@@ -2483,7 +2551,7 @@ static int cnl_modeset_calc_cdclk(struct drm_atomic_state *state)
cnl_compute_min_voltage_level(intel_state));
if (!intel_state->active_crtcs) {
- cdclk = cnl_calc_cdclk(0);
+ cdclk = cnl_calc_cdclk(intel_state->cdclk.force_min_cdclk);
vco = cnl_cdclk_pll_vco(dev_priv, cdclk);
intel_state->cdclk.actual.vco = vco;
@@ -2519,7 +2587,7 @@ static int icl_modeset_calc_cdclk(struct drm_atomic_state *state)
cnl_compute_min_voltage_level(intel_state));
if (!intel_state->active_crtcs) {
- cdclk = icl_calc_cdclk(0, ref);
+ cdclk = icl_calc_cdclk(intel_state->cdclk.force_min_cdclk, ref);
vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk);
intel_state->cdclk.actual.vco = vco;
@@ -2560,7 +2628,7 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
*/
void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
{
- if (IS_ICELAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 11) {
if (dev_priv->cdclk.hw.ref == 24000)
dev_priv->max_cdclk_freq = 648000;
else
@@ -2668,7 +2736,7 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv)
rawclk |= CNP_RAWCLK_DEN(DIV_ROUND_CLOSEST(numerator * 1000,
fraction) - 1);
- if (HAS_PCH_ICP(dev_priv))
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
rawclk |= ICP_RAWCLK_NUM(numerator);
}
@@ -2723,7 +2791,7 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv)
*/
void intel_update_rawclk(struct drm_i915_private *dev_priv)
{
- if (HAS_PCH_CNP(dev_priv) || HAS_PCH_ICP(dev_priv))
+ if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
dev_priv->rawclk_freq = cnp_rawclk(dev_priv);
else if (HAS_PCH_SPLIT(dev_priv))
dev_priv->rawclk_freq = pch_rawclk(dev_priv);
@@ -2744,18 +2812,13 @@ void intel_update_rawclk(struct drm_i915_private *dev_priv)
*/
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
{
- if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->display.set_cdclk = chv_set_cdclk;
- dev_priv->display.modeset_calc_cdclk =
- vlv_modeset_calc_cdclk;
- } else if (IS_VALLEYVIEW(dev_priv)) {
- dev_priv->display.set_cdclk = vlv_set_cdclk;
- dev_priv->display.modeset_calc_cdclk =
- vlv_modeset_calc_cdclk;
- } else if (IS_BROADWELL(dev_priv)) {
- dev_priv->display.set_cdclk = bdw_set_cdclk;
+ if (INTEL_GEN(dev_priv) >= 11) {
+ dev_priv->display.set_cdclk = icl_set_cdclk;
+ dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk;
+ } else if (IS_CANNONLAKE(dev_priv)) {
+ dev_priv->display.set_cdclk = cnl_set_cdclk;
dev_priv->display.modeset_calc_cdclk =
- bdw_modeset_calc_cdclk;
+ cnl_modeset_calc_cdclk;
} else if (IS_GEN9_LP(dev_priv)) {
dev_priv->display.set_cdclk = bxt_set_cdclk;
dev_priv->display.modeset_calc_cdclk =
@@ -2764,23 +2827,28 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.set_cdclk = skl_set_cdclk;
dev_priv->display.modeset_calc_cdclk =
skl_modeset_calc_cdclk;
- } else if (IS_CANNONLAKE(dev_priv)) {
- dev_priv->display.set_cdclk = cnl_set_cdclk;
+ } else if (IS_BROADWELL(dev_priv)) {
+ dev_priv->display.set_cdclk = bdw_set_cdclk;
dev_priv->display.modeset_calc_cdclk =
- cnl_modeset_calc_cdclk;
- } else if (IS_ICELAKE(dev_priv)) {
- dev_priv->display.set_cdclk = icl_set_cdclk;
- dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk;
+ bdw_modeset_calc_cdclk;
+ } else if (IS_CHERRYVIEW(dev_priv)) {
+ dev_priv->display.set_cdclk = chv_set_cdclk;
+ dev_priv->display.modeset_calc_cdclk =
+ vlv_modeset_calc_cdclk;
+ } else if (IS_VALLEYVIEW(dev_priv)) {
+ dev_priv->display.set_cdclk = vlv_set_cdclk;
+ dev_priv->display.modeset_calc_cdclk =
+ vlv_modeset_calc_cdclk;
}
- if (IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
dev_priv->display.get_cdclk = icl_get_cdclk;
else if (IS_CANNONLAKE(dev_priv))
dev_priv->display.get_cdclk = cnl_get_cdclk;
- else if (IS_GEN9_BC(dev_priv))
- dev_priv->display.get_cdclk = skl_get_cdclk;
else if (IS_GEN9_LP(dev_priv))
dev_priv->display.get_cdclk = bxt_get_cdclk;
+ else if (IS_GEN9_BC(dev_priv))
+ dev_priv->display.get_cdclk = skl_get_cdclk;
else if (IS_BROADWELL(dev_priv))
dev_priv->display.get_cdclk = bdw_get_cdclk;
else if (IS_HASWELL(dev_priv))
diff --git a/drivers/gpu/drm/i915/intel_cdclk.h b/drivers/gpu/drm/i915/intel_cdclk.h
new file mode 100644
index 000000000000..4d6f7f5f8930
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_cdclk.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_CDCLK_H__
+#define __INTEL_CDCLK_H__
+
+#include <linux/types.h>
+
+#include "intel_display.h"
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_cdclk_state;
+struct intel_crtc_state;
+
+int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
+void intel_cdclk_init(struct drm_i915_private *i915);
+void intel_cdclk_uninit(struct drm_i915_private *i915);
+void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
+void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
+void intel_update_cdclk(struct drm_i915_private *dev_priv);
+void intel_update_rawclk(struct drm_i915_private *dev_priv);
+bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *a,
+ const struct intel_cdclk_state *b);
+bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
+ const struct intel_cdclk_state *b);
+bool intel_cdclk_changed(const struct intel_cdclk_state *a,
+ const struct intel_cdclk_state *b);
+void intel_cdclk_swap_state(struct intel_atomic_state *state);
+void
+intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *old_state,
+ const struct intel_cdclk_state *new_state,
+ enum pipe pipe);
+void
+intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *old_state,
+ const struct intel_cdclk_state *new_state,
+ enum pipe pipe);
+void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
+ const char *context);
+
+#endif /* __INTEL_CDCLK_H__ */
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 71a1f12c6b2a..9093daabc290 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -22,6 +22,7 @@
*
*/
+#include "intel_color.h"
#include "intel_drv.h"
#define CTM_COEFF_SIGN (1ULL << 63)
@@ -40,23 +41,6 @@
#define CTM_COEFF_ABS(coeff) ((coeff) & (CTM_COEFF_SIGN - 1))
#define LEGACY_LUT_LENGTH 256
-
-/* Post offset values for RGB->YCBCR conversion */
-#define POSTOFF_RGB_TO_YUV_HI 0x800
-#define POSTOFF_RGB_TO_YUV_ME 0x100
-#define POSTOFF_RGB_TO_YUV_LO 0x800
-
-/*
- * These values are direct register values specified in the Bspec,
- * for RGB->YUV conversion matrix (colorspace BT709)
- */
-#define CSC_RGB_TO_YUV_RU_GU 0x2ba809d8
-#define CSC_RGB_TO_YUV_BU 0x37e80000
-#define CSC_RGB_TO_YUV_RY_GY 0x1e089cc0
-#define CSC_RGB_TO_YUV_BY 0xb5280000
-#define CSC_RGB_TO_YUV_RV_GV 0xbce89ad8
-#define CSC_RGB_TO_YUV_BV 0x1e080000
-
/*
* Extract the CSC coefficient from a CTM coefficient (in U32.32 fixed point
* format). This macro takes the coefficient we want transformed and the
@@ -69,10 +53,45 @@
#define ILK_CSC_COEFF_FP(coeff, fbits) \
(clamp_val(((coeff) >> (32 - (fbits) - 3)) + 4, 0, 0xfff) & 0xff8)
-#define ILK_CSC_COEFF_LIMITED_RANGE \
- ILK_CSC_COEFF_FP(CTM_COEFF_LIMITED_RANGE, 9)
-#define ILK_CSC_COEFF_1_0 \
- ((7 << 12) | ILK_CSC_COEFF_FP(CTM_COEFF_1_0, 8))
+#define ILK_CSC_COEFF_LIMITED_RANGE 0x0dc0
+#define ILK_CSC_COEFF_1_0 0x7800
+
+#define ILK_CSC_POSTOFF_LIMITED_RANGE (16 * (1 << 12) / 255)
+
+static const u16 ilk_csc_off_zero[3] = {};
+
+static const u16 ilk_csc_coeff_identity[9] = {
+ ILK_CSC_COEFF_1_0, 0, 0,
+ 0, ILK_CSC_COEFF_1_0, 0,
+ 0, 0, ILK_CSC_COEFF_1_0,
+};
+
+static const u16 ilk_csc_postoff_limited_range[3] = {
+ ILK_CSC_POSTOFF_LIMITED_RANGE,
+ ILK_CSC_POSTOFF_LIMITED_RANGE,
+ ILK_CSC_POSTOFF_LIMITED_RANGE,
+};
+
+static const u16 ilk_csc_coeff_limited_range[9] = {
+ ILK_CSC_COEFF_LIMITED_RANGE, 0, 0,
+ 0, ILK_CSC_COEFF_LIMITED_RANGE, 0,
+ 0, 0, ILK_CSC_COEFF_LIMITED_RANGE,
+};
+
+/*
+ * These values are direct register values specified in the Bspec,
+ * for RGB->YUV conversion matrix (colorspace BT709)
+ */
+static const u16 ilk_csc_coeff_rgb_to_ycbcr[9] = {
+ 0x1e08, 0x9cc0, 0xb528,
+ 0x2ba8, 0x09d8, 0x37e8,
+ 0xbce8, 0x9ad8, 0x1e08,
+};
+
+/* Post offset values for RGB->YCBCR conversion */
+static const u16 ilk_csc_postoff_rgb_to_ycbcr[3] = {
+ 0x0800, 0x0100, 0x0800,
+};
static bool lut_is_legacy(const struct drm_property_blob *lut)
{
@@ -113,145 +132,188 @@ static u64 *ctm_mult_by_limited(u64 *result, const u64 *input)
return result;
}
-static void ilk_load_ycbcr_conversion_matrix(struct intel_crtc *crtc)
+static void ilk_update_pipe_csc(struct intel_crtc *crtc,
+ const u16 preoff[3],
+ const u16 coeff[9],
+ const u16 postoff[3])
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
- I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
- I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
+ I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), preoff[0]);
+ I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), preoff[1]);
+ I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), preoff[2]);
- I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), CSC_RGB_TO_YUV_RU_GU);
- I915_WRITE(PIPE_CSC_COEFF_BU(pipe), CSC_RGB_TO_YUV_BU);
+ I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff[0] << 16 | coeff[1]);
+ I915_WRITE(PIPE_CSC_COEFF_BY(pipe), coeff[2] << 16);
- I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), CSC_RGB_TO_YUV_RY_GY);
- I915_WRITE(PIPE_CSC_COEFF_BY(pipe), CSC_RGB_TO_YUV_BY);
+ I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff[3] << 16 | coeff[4]);
+ I915_WRITE(PIPE_CSC_COEFF_BU(pipe), coeff[5] << 16);
- I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), CSC_RGB_TO_YUV_RV_GV);
- I915_WRITE(PIPE_CSC_COEFF_BV(pipe), CSC_RGB_TO_YUV_BV);
+ I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), coeff[6] << 16 | coeff[7]);
+ I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff[8] << 16);
- I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), POSTOFF_RGB_TO_YUV_HI);
- I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), POSTOFF_RGB_TO_YUV_ME);
- I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), POSTOFF_RGB_TO_YUV_LO);
- I915_WRITE(PIPE_CSC_MODE(pipe), 0);
+ if (INTEL_GEN(dev_priv) >= 7) {
+ I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff[0]);
+ I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff[1]);
+ I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff[2]);
+ }
}
-static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
+static void icl_update_output_csc(struct intel_crtc *crtc,
+ const u16 preoff[3],
+ const u16 coeff[9],
+ const u16 postoff[3])
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- bool limited_color_range = false;
enum pipe pipe = crtc->pipe;
- u16 coeffs[9] = {};
- int i;
+
+ I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_HI(pipe), preoff[0]);
+ I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_ME(pipe), preoff[1]);
+ I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_LO(pipe), preoff[2]);
+
+ I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe), coeff[0] << 16 | coeff[1]);
+ I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BY(pipe), coeff[2] << 16);
+
+ I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe), coeff[3] << 16 | coeff[4]);
+ I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BU(pipe), coeff[5] << 16);
+
+ I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe), coeff[6] << 16 | coeff[7]);
+ I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BV(pipe), coeff[8] << 16);
+
+ I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), postoff[0]);
+ I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), postoff[1]);
+ I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), postoff[2]);
+}
+
+static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
/*
* FIXME if there's a gamma LUT after the CSC, we should
* do the range compression using the gamma LUT instead.
*/
- if (INTEL_GEN(dev_priv) >= 8 || IS_HASWELL(dev_priv))
- limited_color_range = crtc_state->limited_color_range;
+ return crtc_state->limited_color_range &&
+ (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
+ IS_GEN_RANGE(dev_priv, 9, 10));
+}
- if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
- crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) {
- ilk_load_ycbcr_conversion_matrix(crtc);
- return;
- } else if (crtc_state->base.ctm) {
- struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
- const u64 *input;
- u64 temp[9];
+static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state,
+ u16 coeffs[9])
+{
+ const struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
+ const u64 *input;
+ u64 temp[9];
+ int i;
- if (limited_color_range)
- input = ctm_mult_by_limited(temp, ctm->matrix);
- else
- input = ctm->matrix;
+ if (ilk_csc_limited_range(crtc_state))
+ input = ctm_mult_by_limited(temp, ctm->matrix);
+ else
+ input = ctm->matrix;
+
+ /*
+ * Convert fixed point S31.32 input to format supported by the
+ * hardware.
+ */
+ for (i = 0; i < 9; i++) {
+ u64 abs_coeff = ((1ULL << 63) - 1) & input[i];
/*
- * Convert fixed point S31.32 input to format supported by the
+ * Clamp input value to min/max supported by
* hardware.
*/
- for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
- u64 abs_coeff = ((1ULL << 63) - 1) & input[i];
-
- /*
- * Clamp input value to min/max supported by
- * hardware.
- */
- abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1);
-
- /* sign bit */
- if (CTM_COEFF_NEGATIVE(input[i]))
- coeffs[i] |= 1 << 15;
-
- if (abs_coeff < CTM_COEFF_0_125)
- coeffs[i] |= (3 << 12) |
- ILK_CSC_COEFF_FP(abs_coeff, 12);
- else if (abs_coeff < CTM_COEFF_0_25)
- coeffs[i] |= (2 << 12) |
- ILK_CSC_COEFF_FP(abs_coeff, 11);
- else if (abs_coeff < CTM_COEFF_0_5)
- coeffs[i] |= (1 << 12) |
- ILK_CSC_COEFF_FP(abs_coeff, 10);
- else if (abs_coeff < CTM_COEFF_1_0)
- coeffs[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9);
- else if (abs_coeff < CTM_COEFF_2_0)
- coeffs[i] |= (7 << 12) |
- ILK_CSC_COEFF_FP(abs_coeff, 8);
- else
- coeffs[i] |= (6 << 12) |
- ILK_CSC_COEFF_FP(abs_coeff, 7);
- }
- } else {
- /*
- * Load an identity matrix if no coefficients are provided.
- *
- * TODO: Check what kind of values actually come out of the
- * pipe with these coeff/postoff values and adjust to get the
- * best accuracy. Perhaps we even need to take the bpc value
- * into consideration.
- */
- for (i = 0; i < 3; i++) {
- if (limited_color_range)
- coeffs[i * 3 + i] =
- ILK_CSC_COEFF_LIMITED_RANGE;
- else
- coeffs[i * 3 + i] = ILK_CSC_COEFF_1_0;
- }
+ abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_4_0 - 1);
+
+ coeffs[i] = 0;
+
+ /* sign bit */
+ if (CTM_COEFF_NEGATIVE(input[i]))
+ coeffs[i] |= 1 << 15;
+
+ if (abs_coeff < CTM_COEFF_0_125)
+ coeffs[i] |= (3 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 12);
+ else if (abs_coeff < CTM_COEFF_0_25)
+ coeffs[i] |= (2 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 11);
+ else if (abs_coeff < CTM_COEFF_0_5)
+ coeffs[i] |= (1 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 10);
+ else if (abs_coeff < CTM_COEFF_1_0)
+ coeffs[i] |= ILK_CSC_COEFF_FP(abs_coeff, 9);
+ else if (abs_coeff < CTM_COEFF_2_0)
+ coeffs[i] |= (7 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 8);
+ else
+ coeffs[i] |= (6 << 12) |
+ ILK_CSC_COEFF_FP(abs_coeff, 7);
}
+}
- I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeffs[0] << 16 | coeffs[1]);
- I915_WRITE(PIPE_CSC_COEFF_BY(pipe), coeffs[2] << 16);
-
- I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeffs[3] << 16 | coeffs[4]);
- I915_WRITE(PIPE_CSC_COEFF_BU(pipe), coeffs[5] << 16);
-
- I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), coeffs[6] << 16 | coeffs[7]);
- I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeffs[8] << 16);
+static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ bool limited_color_range = ilk_csc_limited_range(crtc_state);
- I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
- I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
- I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
+ if (crtc_state->base.ctm) {
+ u16 coeff[9];
+
+ ilk_csc_convert_ctm(crtc_state, coeff);
+ ilk_update_pipe_csc(crtc, ilk_csc_off_zero, coeff,
+ limited_color_range ?
+ ilk_csc_postoff_limited_range :
+ ilk_csc_off_zero);
+ } else if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) {
+ ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
+ ilk_csc_coeff_rgb_to_ycbcr,
+ ilk_csc_postoff_rgb_to_ycbcr);
+ } else if (limited_color_range) {
+ ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
+ ilk_csc_coeff_limited_range,
+ ilk_csc_postoff_limited_range);
+ } else if (crtc_state->csc_enable) {
+ /*
+ * On GLK+ both pipe CSC and degamma LUT are controlled
+ * by csc_enable. Hence for the cases where the degama
+ * LUT is needed but CSC is not we need to load an
+ * identity matrix.
+ */
+ WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_GEMINILAKE(dev_priv));
- if (INTEL_GEN(dev_priv) > 6) {
- u16 postoff = 0;
+ ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
+ ilk_csc_coeff_identity,
+ ilk_csc_off_zero);
+ }
- if (limited_color_range)
- postoff = (16 * (1 << 12) / 255) & 0x1fff;
+ I915_WRITE(PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode);
+}
- I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
- I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
- I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
+static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- I915_WRITE(PIPE_CSC_MODE(pipe), 0);
- } else {
- u32 mode = CSC_MODE_YUV_TO_RGB;
+ if (crtc_state->base.ctm) {
+ u16 coeff[9];
- if (limited_color_range)
- mode |= CSC_BLACK_SCREEN_OFFSET;
+ ilk_csc_convert_ctm(crtc_state, coeff);
+ ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
+ coeff, ilk_csc_off_zero);
+ }
- I915_WRITE(PIPE_CSC_MODE(pipe), mode);
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) {
+ icl_update_output_csc(crtc, ilk_csc_off_zero,
+ ilk_csc_coeff_rgb_to_ycbcr,
+ ilk_csc_postoff_rgb_to_ycbcr);
+ } else if (crtc_state->limited_color_range) {
+ icl_update_output_csc(crtc, ilk_csc_off_zero,
+ ilk_csc_coeff_limited_range,
+ ilk_csc_postoff_limited_range);
}
+
+ I915_WRITE(PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode);
}
/*
@@ -262,7 +324,6 @@ static void cherryview_load_csc_matrix(const struct intel_crtc_state *crtc_state
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- u32 mode;
if (crtc_state->base.ctm) {
const struct drm_color_ctm *ctm = crtc_state->base.ctm->data;
@@ -296,12 +357,30 @@ static void cherryview_load_csc_matrix(const struct intel_crtc_state *crtc_state
I915_WRITE(CGM_PIPE_CSC_COEFF8(pipe), coeffs[8]);
}
- mode = (crtc_state->base.ctm ? CGM_PIPE_MODE_CSC : 0);
- if (!crtc_state_is_legacy_gamma(crtc_state)) {
- mode |= (crtc_state->base.degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) |
- (crtc_state->base.gamma_lut ? CGM_PIPE_MODE_GAMMA : 0);
- }
- I915_WRITE(CGM_PIPE_MODE(pipe), mode);
+ I915_WRITE(CGM_PIPE_MODE(pipe), crtc_state->cgm_mode);
+}
+
+/* i965+ "10.6" bit interpolated format "even DW" (low 8 bits) */
+static u32 i965_lut_10p6_ldw(const struct drm_color_lut *color)
+{
+ return (color->red & 0xff) << 16 |
+ (color->green & 0xff) << 8 |
+ (color->blue & 0xff);
+}
+
+/* i965+ "10.6" interpolated format "odd DW" (high 8 bits) */
+static u32 i965_lut_10p6_udw(const struct drm_color_lut *color)
+{
+ return (color->red >> 8) << 16 |
+ (color->green >> 8) << 8 |
+ (color->blue >> 8);
+}
+
+static u32 ilk_lut_10(const struct drm_color_lut *color)
+{
+ return drm_color_lut_extract(color->red, 10) << 20 |
+ drm_color_lut_extract(color->green, 10) << 10 |
+ drm_color_lut_extract(color->blue, 10);
}
/* Loads the legacy palette/gamma unit for the CRTC. */
@@ -334,15 +413,6 @@ static void i9xx_load_luts_internal(const struct intel_crtc_state *crtc_state,
else
I915_WRITE(LGC_PALETTE(pipe, i), word);
}
- } else {
- for (i = 0; i < 256; i++) {
- u32 word = (i << 16) | (i << 8) | i;
-
- if (HAS_GMCH(dev_priv))
- I915_WRITE(PALETTE(pipe, i), word);
- else
- I915_WRITE(LGC_PALETTE(pipe, i), word);
- }
}
}
@@ -351,6 +421,34 @@ static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
i9xx_load_luts_internal(crtc_state, crtc_state->base.gamma_lut);
}
+static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ u32 val;
+
+ val = I915_READ(PIPECONF(pipe));
+ val &= ~PIPECONF_GAMMA_MODE_MASK_I9XX;
+ val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+ I915_WRITE(PIPECONF(pipe), val);
+}
+
+static void ilk_color_commit(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ u32 val;
+
+ val = I915_READ(PIPECONF(pipe));
+ val &= ~PIPECONF_GAMMA_MODE_MASK_ILK;
+ val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+ I915_WRITE(PIPECONF(pipe), val);
+
+ ilk_load_csc_matrix(crtc_state);
+}
+
static void hsw_color_commit(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
@@ -361,106 +459,219 @@ static void hsw_color_commit(const struct intel_crtc_state *crtc_state)
ilk_load_csc_matrix(crtc_state);
}
-static void bdw_load_degamma_lut(const struct intel_crtc_state *crtc_state)
+static void skl_color_commit(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
- u32 i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
enum pipe pipe = crtc->pipe;
+ u32 val = 0;
- I915_WRITE(PREC_PAL_INDEX(pipe),
- PAL_PREC_SPLIT_MODE | PAL_PREC_AUTO_INCREMENT);
+ /*
+ * We don't (yet) allow userspace to control the pipe background color,
+ * so force it to black, but apply pipe gamma and CSC appropriately
+ * so that its handling will match how we program our planes.
+ */
+ if (crtc_state->gamma_enable)
+ val |= SKL_BOTTOM_COLOR_GAMMA_ENABLE;
+ if (crtc_state->csc_enable)
+ val |= SKL_BOTTOM_COLOR_CSC_ENABLE;
+ I915_WRITE(SKL_BOTTOM_COLOR(pipe), val);
- if (degamma_lut) {
- const struct drm_color_lut *lut = degamma_lut->data;
+ I915_WRITE(GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode);
- for (i = 0; i < lut_size; i++) {
- u32 word =
- drm_color_lut_extract(lut[i].red, 10) << 20 |
- drm_color_lut_extract(lut[i].green, 10) << 10 |
- drm_color_lut_extract(lut[i].blue, 10);
+ if (INTEL_GEN(dev_priv) >= 11)
+ icl_load_csc_matrix(crtc_state);
+ else
+ ilk_load_csc_matrix(crtc_state);
+}
- I915_WRITE(PREC_PAL_DATA(pipe), word);
- }
- } else {
- for (i = 0; i < lut_size; i++) {
- u32 v = (i * ((1 << 10) - 1)) / (lut_size - 1);
+static void i965_load_lut_10p6(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_lut *lut = blob->data;
+ int i, lut_size = drm_color_lut_size(blob);
+ enum pipe pipe = crtc->pipe;
- I915_WRITE(PREC_PAL_DATA(pipe),
- (v << 20) | (v << 10) | v);
- }
+ for (i = 0; i < lut_size - 1; i++) {
+ I915_WRITE(PALETTE(pipe, 2 * i + 0),
+ i965_lut_10p6_ldw(&lut[i]));
+ I915_WRITE(PALETTE(pipe, 2 * i + 1),
+ i965_lut_10p6_udw(&lut[i]));
}
+
+ I915_WRITE(PIPEGCMAX(pipe, 0), lut[i].red);
+ I915_WRITE(PIPEGCMAX(pipe, 1), lut[i].green);
+ I915_WRITE(PIPEGCMAX(pipe, 2), lut[i].blue);
}
-static void bdw_load_gamma_lut(const struct intel_crtc_state *crtc_state, u32 offset)
+static void i965_load_luts(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
- u32 i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+
+ if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
+ i9xx_load_luts(crtc_state);
+ else
+ i965_load_lut_10p6(crtc, gamma_lut);
+}
+
+static void ilk_load_lut_10(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_lut *lut = blob->data;
+ int i, lut_size = drm_color_lut_size(blob);
enum pipe pipe = crtc->pipe;
- WARN_ON(offset & ~PAL_PREC_INDEX_VALUE_MASK);
+ for (i = 0; i < lut_size; i++)
+ I915_WRITE(PREC_PALETTE(pipe, i), ilk_lut_10(&lut[i]));
+}
- I915_WRITE(PREC_PAL_INDEX(pipe),
- (offset ? PAL_PREC_SPLIT_MODE : 0) |
- PAL_PREC_AUTO_INCREMENT |
- offset);
+static void ilk_load_luts(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
- if (gamma_lut) {
- const struct drm_color_lut *lut = gamma_lut->data;
+ if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
+ i9xx_load_luts(crtc_state);
+ else
+ ilk_load_lut_10(crtc, gamma_lut);
+}
- for (i = 0; i < lut_size; i++) {
- u32 word =
- (drm_color_lut_extract(lut[i].red, 10) << 20) |
- (drm_color_lut_extract(lut[i].green, 10) << 10) |
- drm_color_lut_extract(lut[i].blue, 10);
+static int ivb_lut_10_size(u32 prec_index)
+{
+ if (prec_index & PAL_PREC_SPLIT_MODE)
+ return 512;
+ else
+ return 1024;
+}
- I915_WRITE(PREC_PAL_DATA(pipe), word);
- }
+/*
+ * IVB/HSW Bspec / PAL_PREC_INDEX:
+ * "Restriction : Index auto increment mode is not
+ * supported and must not be enabled."
+ */
+static void ivb_load_lut_10(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob,
+ u32 prec_index)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int hw_lut_size = ivb_lut_10_size(prec_index);
+ const struct drm_color_lut *lut = blob->data;
+ int i, lut_size = drm_color_lut_size(blob);
+ enum pipe pipe = crtc->pipe;
- /* Program the max register to clamp values > 1.0. */
- i = lut_size - 1;
- I915_WRITE(PREC_PAL_GC_MAX(pipe, 0),
- drm_color_lut_extract(lut[i].red, 16));
- I915_WRITE(PREC_PAL_GC_MAX(pipe, 1),
- drm_color_lut_extract(lut[i].green, 16));
- I915_WRITE(PREC_PAL_GC_MAX(pipe, 2),
- drm_color_lut_extract(lut[i].blue, 16));
- } else {
- for (i = 0; i < lut_size; i++) {
- u32 v = (i * ((1 << 10) - 1)) / (lut_size - 1);
+ for (i = 0; i < hw_lut_size; i++) {
+ /* We discard half the user entries in split gamma mode */
+ const struct drm_color_lut *entry =
+ &lut[i * (lut_size - 1) / (hw_lut_size - 1)];
- I915_WRITE(PREC_PAL_DATA(pipe),
- (v << 20) | (v << 10) | v);
- }
+ I915_WRITE(PREC_PAL_INDEX(pipe), prec_index++);
+ I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_10(entry));
+ }
+
+ /*
+ * Reset the index, otherwise it prevents the legacy palette to be
+ * written properly.
+ */
+ I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+}
- I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), (1 << 16) - 1);
- I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), (1 << 16) - 1);
- I915_WRITE(PREC_PAL_GC_MAX(pipe, 2), (1 << 16) - 1);
+/* On BDW+ the index auto increment mode actually works */
+static void bdw_load_lut_10(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob,
+ u32 prec_index)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int hw_lut_size = ivb_lut_10_size(prec_index);
+ const struct drm_color_lut *lut = blob->data;
+ int i, lut_size = drm_color_lut_size(blob);
+ enum pipe pipe = crtc->pipe;
+
+ I915_WRITE(PREC_PAL_INDEX(pipe), prec_index |
+ PAL_PREC_AUTO_INCREMENT);
+
+ for (i = 0; i < hw_lut_size; i++) {
+ /* We discard half the user entries in split gamma mode */
+ const struct drm_color_lut *entry =
+ &lut[i * (lut_size - 1) / (hw_lut_size - 1)];
+
+ I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_10(entry));
}
+
+ /*
+ * Reset the index, otherwise it prevents the legacy palette to be
+ * written properly.
+ */
+ I915_WRITE(PREC_PAL_INDEX(pipe), 0);
}
-/* Loads the palette/gamma unit for the CRTC on Broadwell+. */
-static void broadwell_load_luts(const struct intel_crtc_state *crtc_state)
+static void ivb_load_lut_10_max(struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (crtc_state_is_legacy_gamma(crtc_state)) {
+ /* Program the max register to clamp values > 1.0. */
+ I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16);
+ I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16);
+ I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16);
+
+ /*
+ * Program the gc max 2 register to clamp values > 1.0.
+ * ToDo: Extend the ABI to be able to program values
+ * from 3.0 to 7.0
+ */
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
+ I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 0), 1 << 16);
+ I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 1), 1 << 16);
+ I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 2), 1 << 16);
+ }
+}
+
+static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+ const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+
+ if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
i9xx_load_luts(crtc_state);
+ } else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
+ ivb_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
+ PAL_PREC_INDEX_VALUE(0));
+ ivb_load_lut_10_max(crtc);
+ ivb_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE |
+ PAL_PREC_INDEX_VALUE(512));
} else {
- bdw_load_degamma_lut(crtc_state);
- bdw_load_gamma_lut(crtc_state,
- INTEL_INFO(dev_priv)->color.degamma_lut_size);
+ const struct drm_property_blob *blob = gamma_lut ?: degamma_lut;
- /*
- * Reset the index, otherwise it prevents the legacy palette to be
- * written properly.
- */
- I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+ ivb_load_lut_10(crtc, blob,
+ PAL_PREC_INDEX_VALUE(0));
+ ivb_load_lut_10_max(crtc);
+ }
+}
+
+static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+ const struct drm_property_blob *degamma_lut = crtc_state->base.degamma_lut;
+
+ if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
+ i9xx_load_luts(crtc_state);
+ } else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
+ bdw_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
+ PAL_PREC_INDEX_VALUE(0));
+ ivb_load_lut_10_max(crtc);
+ bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_SPLIT_MODE |
+ PAL_PREC_INDEX_VALUE(512));
+ } else {
+ const struct drm_property_blob *blob = gamma_lut ?: degamma_lut;
+
+ bdw_load_lut_10(crtc, blob,
+ PAL_PREC_INDEX_VALUE(0));
+ ivb_load_lut_10_max(crtc);
}
}
@@ -469,7 +680,8 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- const u32 lut_size = 33;
+ const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+ const struct drm_color_lut *lut = crtc_state->base.degamma_lut->data;
u32 i;
/*
@@ -480,39 +692,95 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), 0);
I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT);
+ for (i = 0; i < lut_size; i++) {
+ /*
+ * First 33 entries represent range from 0 to 1.0
+ * 34th and 35th entry will represent extended range
+ * inputs 3.0 and 7.0 respectively, currently clamped
+ * at 1.0. Since the precision is 16bit, the user
+ * value can be directly filled to register.
+ * The pipe degamma table in GLK+ onwards doesn't
+ * support different values per channel, so this just
+ * programs green value which will be equal to Red and
+ * Blue into the lut registers.
+ * ToDo: Extend to max 7.0. Enable 32 bit input value
+ * as compared to just 16 to achieve this.
+ */
+ I915_WRITE(PRE_CSC_GAMC_DATA(pipe), lut[i].green);
+ }
+
+ /* Clamp values > 1.0. */
+ while (i++ < 35)
+ I915_WRITE(PRE_CSC_GAMC_DATA(pipe), 1 << 16);
+}
+
+static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+ u32 i;
+
/*
- * FIXME: The pipe degamma table in geminilake doesn't support
- * different values per channel, so this just loads a linear table.
+ * When setting the auto-increment bit, the hardware seems to
+ * ignore the index bits, so we need to reset it to index 0
+ * separately.
*/
+ I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), 0);
+ I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT);
+
for (i = 0; i < lut_size; i++) {
- u32 v = (i * (1 << 16)) / (lut_size - 1);
+ u32 v = (i << 16) / (lut_size - 1);
I915_WRITE(PRE_CSC_GAMC_DATA(pipe), v);
}
/* Clamp values > 1.0. */
while (i++ < 35)
- I915_WRITE(PRE_CSC_GAMC_DATA(pipe), (1 << 16));
+ I915_WRITE(PRE_CSC_GAMC_DATA(pipe), 1 << 16);
}
static void glk_load_luts(const struct intel_crtc_state *crtc_state)
{
+ const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- glk_load_degamma_lut(crtc_state);
+ /*
+ * On GLK+ both pipe CSC and degamma LUT are controlled
+ * by csc_enable. Hence for the cases where the CSC is
+ * needed but degamma LUT is not we need to load a
+ * linear degamma LUT. In fact we'll just always load
+ * the degama LUT so that we don't have to reload
+ * it every time the pipe CSC is being enabled.
+ */
+ if (crtc_state->base.degamma_lut)
+ glk_load_degamma_lut(crtc_state);
+ else
+ glk_load_degamma_lut_linear(crtc_state);
- if (crtc_state_is_legacy_gamma(crtc_state)) {
+ if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
i9xx_load_luts(crtc_state);
} else {
- bdw_load_gamma_lut(crtc_state, 0);
+ bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
+ ivb_load_lut_10_max(crtc);
+ }
+}
- /*
- * Reset the index, otherwise it prevents the legacy palette to be
- * written properly.
- */
- I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+static void icl_load_luts(const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+
+ if (crtc_state->base.degamma_lut)
+ glk_load_degamma_lut(crtc_state);
+
+ if ((crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) ==
+ GAMMA_MODE_MODE_8BIT) {
+ i9xx_load_luts(crtc_state);
+ } else {
+ bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
+ ivb_load_lut_10_max(crtc);
}
}
@@ -527,7 +795,7 @@ static void cherryview_load_luts(const struct intel_crtc_state *crtc_state)
cherryview_load_csc_matrix(crtc_state);
if (crtc_state_is_legacy_gamma(crtc_state)) {
- i9xx_load_luts_internal(crtc_state, gamma_lut);
+ i9xx_load_luts(crtc_state);
return;
}
@@ -566,12 +834,6 @@ static void cherryview_load_luts(const struct intel_crtc_state *crtc_state)
I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 1), word1);
}
}
-
- /*
- * Also program a linear LUT in the legacy block (behind the
- * CGM block).
- */
- i9xx_load_luts_internal(crtc_state, NULL);
}
void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
@@ -585,8 +847,64 @@ void intel_color_commit(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
- if (dev_priv->display.color_commit)
- dev_priv->display.color_commit(crtc_state);
+ dev_priv->display.color_commit(crtc_state);
+}
+
+int intel_color_check(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+ return dev_priv->display.color_check(crtc_state);
+}
+
+static bool need_plane_update(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+
+ /*
+ * On pre-SKL the pipe gamma enable and pipe csc enable for
+ * the pipe bottom color are configured via the primary plane.
+ * We have to reconfigure that even if the plane is inactive.
+ */
+ return crtc_state->active_planes & BIT(plane->id) ||
+ (INTEL_GEN(dev_priv) < 9 &&
+ plane->id == PLANE_PRIMARY);
+}
+
+static int
+intel_color_add_affected_planes(struct intel_crtc_state *new_crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(new_crtc_state->base.state);
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_plane *plane;
+
+ if (!new_crtc_state->base.active ||
+ drm_atomic_crtc_needs_modeset(&new_crtc_state->base))
+ return 0;
+
+ if (new_crtc_state->gamma_enable == old_crtc_state->gamma_enable &&
+ new_crtc_state->csc_enable == old_crtc_state->csc_enable)
+ return 0;
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ struct intel_plane_state *plane_state;
+
+ if (!need_plane_update(plane, new_crtc_state))
+ continue;
+
+ plane_state = intel_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
+ new_crtc_state->update_planes |= BIT(plane->id);
+ }
+
+ return 0;
}
static int check_lut_size(const struct drm_property_blob *lut, int expected)
@@ -606,7 +924,7 @@ static int check_lut_size(const struct drm_property_blob *lut, int expected)
return 0;
}
-int intel_color_check(struct intel_crtc_state *crtc_state)
+static int check_luts(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut;
@@ -614,17 +932,19 @@ int intel_color_check(struct intel_crtc_state *crtc_state)
int gamma_length, degamma_length;
u32 gamma_tests, degamma_tests;
+ /* Always allow legacy gamma LUT with no further checking. */
+ if (crtc_state_is_legacy_gamma(crtc_state))
+ return 0;
+
+ /* C8 relies on its palette being stored in the legacy LUT */
+ if (crtc_state->c8_planes)
+ return -EINVAL;
+
degamma_length = INTEL_INFO(dev_priv)->color.degamma_lut_size;
gamma_length = INTEL_INFO(dev_priv)->color.gamma_lut_size;
degamma_tests = INTEL_INFO(dev_priv)->color.degamma_lut_tests;
gamma_tests = INTEL_INFO(dev_priv)->color.gamma_lut_tests;
- /* Always allow legacy gamma LUT with no further checking. */
- if (crtc_state_is_legacy_gamma(crtc_state)) {
- crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
- return 0;
- }
-
if (check_lut_size(degamma_lut, degamma_length) ||
check_lut_size(gamma_lut, gamma_length))
return -EINVAL;
@@ -633,12 +953,270 @@ int intel_color_check(struct intel_crtc_state *crtc_state)
drm_color_lut_check(gamma_lut, gamma_tests))
return -EINVAL;
- if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- crtc_state->gamma_mode = GAMMA_MODE_MODE_10BIT;
- else if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
- crtc_state->gamma_mode = GAMMA_MODE_MODE_SPLIT;
+ return 0;
+}
+
+static u32 i9xx_gamma_mode(struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable ||
+ crtc_state_is_legacy_gamma(crtc_state))
+ return GAMMA_MODE_MODE_8BIT;
else
- crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
+ return GAMMA_MODE_MODE_10BIT; /* i965+ only */
+}
+
+static int i9xx_color_check(struct intel_crtc_state *crtc_state)
+{
+ int ret;
+
+ ret = check_luts(crtc_state);
+ if (ret)
+ return ret;
+
+ crtc_state->gamma_enable =
+ crtc_state->base.gamma_lut &&
+ !crtc_state->c8_planes;
+
+ crtc_state->gamma_mode = i9xx_gamma_mode(crtc_state);
+
+ ret = intel_color_add_affected_planes(crtc_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static u32 chv_cgm_mode(const struct intel_crtc_state *crtc_state)
+{
+ u32 cgm_mode = 0;
+
+ if (crtc_state_is_legacy_gamma(crtc_state))
+ return 0;
+
+ if (crtc_state->base.degamma_lut)
+ cgm_mode |= CGM_PIPE_MODE_DEGAMMA;
+ if (crtc_state->base.ctm)
+ cgm_mode |= CGM_PIPE_MODE_CSC;
+ if (crtc_state->base.gamma_lut)
+ cgm_mode |= CGM_PIPE_MODE_GAMMA;
+
+ return cgm_mode;
+}
+
+/*
+ * CHV color pipeline:
+ * u0.10 -> CGM degamma -> u0.14 -> CGM csc -> u0.14 -> CGM gamma ->
+ * u0.10 -> WGC csc -> u0.10 -> pipe gamma -> u0.10
+ *
+ * We always bypass the WGC csc and use the CGM csc
+ * instead since it has degamma and better precision.
+ */
+static int chv_color_check(struct intel_crtc_state *crtc_state)
+{
+ int ret;
+
+ ret = check_luts(crtc_state);
+ if (ret)
+ return ret;
+
+ /*
+ * Pipe gamma will be used only for the legacy LUT.
+ * Otherwise we bypass it and use the CGM gamma instead.
+ */
+ crtc_state->gamma_enable =
+ crtc_state_is_legacy_gamma(crtc_state) &&
+ !crtc_state->c8_planes;
+
+ crtc_state->gamma_mode = GAMMA_MODE_MODE_8BIT;
+
+ crtc_state->cgm_mode = chv_cgm_mode(crtc_state);
+
+ ret = intel_color_add_affected_planes(crtc_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static u32 ilk_gamma_mode(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable ||
+ crtc_state_is_legacy_gamma(crtc_state))
+ return GAMMA_MODE_MODE_8BIT;
+ else
+ return GAMMA_MODE_MODE_10BIT;
+}
+
+static int ilk_color_check(struct intel_crtc_state *crtc_state)
+{
+ int ret;
+
+ ret = check_luts(crtc_state);
+ if (ret)
+ return ret;
+
+ crtc_state->gamma_enable =
+ crtc_state->base.gamma_lut &&
+ !crtc_state->c8_planes;
+
+ /*
+ * We don't expose the ctm on ilk/snb currently,
+ * nor do we enable YCbCr output. Also RGB limited
+ * range output is handled by the hw automagically.
+ */
+ crtc_state->csc_enable = false;
+
+ crtc_state->gamma_mode = ilk_gamma_mode(crtc_state);
+
+ crtc_state->csc_mode = 0;
+
+ ret = intel_color_add_affected_planes(crtc_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static u32 ivb_gamma_mode(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable ||
+ crtc_state_is_legacy_gamma(crtc_state))
+ return GAMMA_MODE_MODE_8BIT;
+ else if (crtc_state->base.gamma_lut &&
+ crtc_state->base.degamma_lut)
+ return GAMMA_MODE_MODE_SPLIT;
+ else
+ return GAMMA_MODE_MODE_10BIT;
+}
+
+static u32 ivb_csc_mode(const struct intel_crtc_state *crtc_state)
+{
+ bool limited_color_range = ilk_csc_limited_range(crtc_state);
+
+ /*
+ * CSC comes after the LUT in degamma, RGB->YCbCr,
+ * and RGB full->limited range mode.
+ */
+ if (crtc_state->base.degamma_lut ||
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
+ limited_color_range)
+ return 0;
+
+ return CSC_POSITION_BEFORE_GAMMA;
+}
+
+static int ivb_color_check(struct intel_crtc_state *crtc_state)
+{
+ bool limited_color_range = ilk_csc_limited_range(crtc_state);
+ int ret;
+
+ ret = check_luts(crtc_state);
+ if (ret)
+ return ret;
+
+ crtc_state->gamma_enable =
+ (crtc_state->base.gamma_lut ||
+ crtc_state->base.degamma_lut) &&
+ !crtc_state->c8_planes;
+
+ crtc_state->csc_enable =
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
+ crtc_state->base.ctm || limited_color_range;
+
+ crtc_state->gamma_mode = ivb_gamma_mode(crtc_state);
+
+ crtc_state->csc_mode = ivb_csc_mode(crtc_state);
+
+ ret = intel_color_add_affected_planes(crtc_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static u32 glk_gamma_mode(const struct intel_crtc_state *crtc_state)
+{
+ if (!crtc_state->gamma_enable ||
+ crtc_state_is_legacy_gamma(crtc_state))
+ return GAMMA_MODE_MODE_8BIT;
+ else
+ return GAMMA_MODE_MODE_10BIT;
+}
+
+static int glk_color_check(struct intel_crtc_state *crtc_state)
+{
+ int ret;
+
+ ret = check_luts(crtc_state);
+ if (ret)
+ return ret;
+
+ crtc_state->gamma_enable =
+ crtc_state->base.gamma_lut &&
+ !crtc_state->c8_planes;
+
+ /* On GLK+ degamma LUT is controlled by csc_enable */
+ crtc_state->csc_enable =
+ crtc_state->base.degamma_lut ||
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
+ crtc_state->base.ctm || crtc_state->limited_color_range;
+
+ crtc_state->gamma_mode = glk_gamma_mode(crtc_state);
+
+ crtc_state->csc_mode = 0;
+
+ ret = intel_color_add_affected_planes(crtc_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static u32 icl_gamma_mode(const struct intel_crtc_state *crtc_state)
+{
+ u32 gamma_mode = 0;
+
+ if (crtc_state->base.degamma_lut)
+ gamma_mode |= PRE_CSC_GAMMA_ENABLE;
+
+ if (crtc_state->base.gamma_lut &&
+ !crtc_state->c8_planes)
+ gamma_mode |= POST_CSC_GAMMA_ENABLE;
+
+ if (!crtc_state->base.gamma_lut ||
+ crtc_state_is_legacy_gamma(crtc_state))
+ gamma_mode |= GAMMA_MODE_MODE_8BIT;
+ else
+ gamma_mode |= GAMMA_MODE_MODE_10BIT;
+
+ return gamma_mode;
+}
+
+static u32 icl_csc_mode(const struct intel_crtc_state *crtc_state)
+{
+ u32 csc_mode = 0;
+
+ if (crtc_state->base.ctm)
+ csc_mode |= ICL_CSC_ENABLE;
+
+ if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB ||
+ crtc_state->limited_color_range)
+ csc_mode |= ICL_OUTPUT_CSC_ENABLE;
+
+ return csc_mode;
+}
+
+static int icl_color_check(struct intel_crtc_state *crtc_state)
+{
+ int ret;
+
+ ret = check_luts(crtc_state);
+ if (ret)
+ return ret;
+
+ crtc_state->gamma_mode = icl_gamma_mode(crtc_state);
+
+ crtc_state->csc_mode = icl_csc_mode(crtc_state);
return 0;
}
@@ -646,30 +1224,55 @@ int intel_color_check(struct intel_crtc_state *crtc_state)
void intel_color_init(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ bool has_ctm = INTEL_INFO(dev_priv)->color.degamma_lut_size != 0;
drm_mode_crtc_set_gamma_size(&crtc->base, 256);
- if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->display.load_luts = cherryview_load_luts;
- } else if (IS_HASWELL(dev_priv)) {
- dev_priv->display.load_luts = i9xx_load_luts;
- dev_priv->display.color_commit = hsw_color_commit;
- } else if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv) ||
- IS_BROXTON(dev_priv)) {
- dev_priv->display.load_luts = broadwell_load_luts;
- dev_priv->display.color_commit = hsw_color_commit;
- } else if (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) {
- dev_priv->display.load_luts = glk_load_luts;
- dev_priv->display.color_commit = hsw_color_commit;
+ if (HAS_GMCH(dev_priv)) {
+ if (IS_CHERRYVIEW(dev_priv)) {
+ dev_priv->display.color_check = chv_color_check;
+ dev_priv->display.color_commit = i9xx_color_commit;
+ dev_priv->display.load_luts = cherryview_load_luts;
+ } else if (INTEL_GEN(dev_priv) >= 4) {
+ dev_priv->display.color_check = i9xx_color_check;
+ dev_priv->display.color_commit = i9xx_color_commit;
+ dev_priv->display.load_luts = i965_load_luts;
+ } else {
+ dev_priv->display.color_check = i9xx_color_check;
+ dev_priv->display.color_commit = i9xx_color_commit;
+ dev_priv->display.load_luts = i9xx_load_luts;
+ }
} else {
- dev_priv->display.load_luts = i9xx_load_luts;
+ if (INTEL_GEN(dev_priv) >= 11)
+ dev_priv->display.color_check = icl_color_check;
+ else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
+ dev_priv->display.color_check = glk_color_check;
+ else if (INTEL_GEN(dev_priv) >= 7)
+ dev_priv->display.color_check = ivb_color_check;
+ else
+ dev_priv->display.color_check = ilk_color_check;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ dev_priv->display.color_commit = skl_color_commit;
+ else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ dev_priv->display.color_commit = hsw_color_commit;
+ else
+ dev_priv->display.color_commit = ilk_color_commit;
+
+ if (INTEL_GEN(dev_priv) >= 11)
+ dev_priv->display.load_luts = icl_load_luts;
+ else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ dev_priv->display.load_luts = glk_load_luts;
+ else if (INTEL_GEN(dev_priv) >= 8)
+ dev_priv->display.load_luts = bdw_load_luts;
+ else if (INTEL_GEN(dev_priv) >= 7)
+ dev_priv->display.load_luts = ivb_load_luts;
+ else
+ dev_priv->display.load_luts = ilk_load_luts;
}
- /* Enable color management support when we have degamma & gamma LUTs. */
- if (INTEL_INFO(dev_priv)->color.degamma_lut_size != 0 &&
- INTEL_INFO(dev_priv)->color.gamma_lut_size != 0)
- drm_crtc_enable_color_mgmt(&crtc->base,
- INTEL_INFO(dev_priv)->color.degamma_lut_size,
- true,
- INTEL_INFO(dev_priv)->color.gamma_lut_size);
+ drm_crtc_enable_color_mgmt(&crtc->base,
+ INTEL_INFO(dev_priv)->color.degamma_lut_size,
+ has_ctm,
+ INTEL_INFO(dev_priv)->color.gamma_lut_size);
}
diff --git a/drivers/gpu/drm/i915/intel_color.h b/drivers/gpu/drm/i915/intel_color.h
new file mode 100644
index 000000000000..b8a3ce609587
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_color.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_COLOR_H__
+#define __INTEL_COLOR_H__
+
+struct intel_crtc_state;
+struct intel_crtc;
+
+void intel_color_init(struct intel_crtc *crtc);
+int intel_color_check(struct intel_crtc_state *crtc_state);
+void intel_color_commit(const struct intel_crtc_state *crtc_state);
+void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
+
+#endif /* __INTEL_COLOR_H__ */
diff --git a/drivers/gpu/drm/i915/intel_combo_phy.c b/drivers/gpu/drm/i915/intel_combo_phy.c
index 3d0271cebf99..2bf4359d7e41 100644
--- a/drivers/gpu/drm/i915/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/intel_combo_phy.c
@@ -239,7 +239,8 @@ void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
for_each_combo_port_reverse(dev_priv, port) {
u32 val;
- if (!icl_combo_phy_verify_state(dev_priv, port))
+ if (port == PORT_A &&
+ !icl_combo_phy_verify_state(dev_priv, port))
DRM_WARN("Port %c combo PHY HW state changed unexpectedly\n",
port_name(port));
diff --git a/drivers/gpu/drm/i915/intel_connector.c b/drivers/gpu/drm/i915/intel_connector.c
index ee16758747c5..073b6c3ab7cc 100644
--- a/drivers/gpu/drm/i915/intel_connector.c
+++ b/drivers/gpu/drm/i915/intel_connector.c
@@ -23,12 +23,17 @@
* DEALINGS IN THE SOFTWARE.
*/
-#include <linux/slab.h>
#include <linux/i2c.h>
+#include <linux/slab.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
-#include "intel_drv.h"
+
#include "i915_drv.h"
+#include "intel_connector.h"
+#include "intel_drv.h"
+#include "intel_hdcp.h"
+#include "intel_panel.h"
int intel_connector_init(struct intel_connector *connector)
{
@@ -88,6 +93,8 @@ void intel_connector_destroy(struct drm_connector *connector)
kfree(intel_connector->detect_edid);
+ intel_hdcp_cleanup(intel_connector);
+
if (!IS_ERR_OR_NULL(intel_connector->edid))
kfree(intel_connector->edid);
@@ -265,3 +272,11 @@ intel_attach_aspect_ratio_property(struct drm_connector *connector)
connector->dev->mode_config.aspect_ratio_property,
DRM_MODE_PICTURE_ASPECT_NONE);
}
+
+void
+intel_attach_colorspace_property(struct drm_connector *connector)
+{
+ if (!drm_mode_create_colorspace_property(connector))
+ drm_object_attach_property(&connector->base,
+ connector->colorspace_property, 0);
+}
diff --git a/drivers/gpu/drm/i915/intel_connector.h b/drivers/gpu/drm/i915/intel_connector.h
new file mode 100644
index 000000000000..93a7375c8196
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_connector.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_CONNECTOR_H__
+#define __INTEL_CONNECTOR_H__
+
+#include "intel_display.h"
+
+struct drm_connector;
+struct edid;
+struct i2c_adapter;
+struct intel_connector;
+struct intel_encoder;
+
+int intel_connector_init(struct intel_connector *connector);
+struct intel_connector *intel_connector_alloc(void);
+void intel_connector_free(struct intel_connector *connector);
+void intel_connector_destroy(struct drm_connector *connector);
+int intel_connector_register(struct drm_connector *connector);
+void intel_connector_unregister(struct drm_connector *connector);
+void intel_connector_attach_encoder(struct intel_connector *connector,
+ struct intel_encoder *encoder);
+bool intel_connector_get_hw_state(struct intel_connector *connector);
+enum pipe intel_connector_get_pipe(struct intel_connector *connector);
+int intel_connector_update_modes(struct drm_connector *connector,
+ struct edid *edid);
+int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
+void intel_attach_force_audio_property(struct drm_connector *connector);
+void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
+void intel_attach_aspect_ratio_property(struct drm_connector *connector);
+void intel_attach_colorspace_property(struct drm_connector *connector);
+
+#endif /* __INTEL_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/i915/intel_context.c b/drivers/gpu/drm/i915/intel_context.c
new file mode 100644
index 000000000000..8931e0fee873
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_context.c
@@ -0,0 +1,269 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_gem_context.h"
+#include "i915_globals.h"
+#include "intel_context.h"
+#include "intel_ringbuffer.h"
+
+static struct i915_global_context {
+ struct i915_global base;
+ struct kmem_cache *slab_ce;
+} global;
+
+struct intel_context *intel_context_alloc(void)
+{
+ return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
+}
+
+void intel_context_free(struct intel_context *ce)
+{
+ kmem_cache_free(global.slab_ce, ce);
+}
+
+struct intel_context *
+intel_context_lookup(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context *ce = NULL;
+ struct rb_node *p;
+
+ spin_lock(&ctx->hw_contexts_lock);
+ p = ctx->hw_contexts.rb_node;
+ while (p) {
+ struct intel_context *this =
+ rb_entry(p, struct intel_context, node);
+
+ if (this->engine == engine) {
+ GEM_BUG_ON(this->gem_context != ctx);
+ ce = this;
+ break;
+ }
+
+ if (this->engine < engine)
+ p = p->rb_right;
+ else
+ p = p->rb_left;
+ }
+ spin_unlock(&ctx->hw_contexts_lock);
+
+ return ce;
+}
+
+struct intel_context *
+__intel_context_insert(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ struct intel_context *ce)
+{
+ struct rb_node **p, *parent;
+ int err = 0;
+
+ spin_lock(&ctx->hw_contexts_lock);
+
+ parent = NULL;
+ p = &ctx->hw_contexts.rb_node;
+ while (*p) {
+ struct intel_context *this;
+
+ parent = *p;
+ this = rb_entry(parent, struct intel_context, node);
+
+ if (this->engine == engine) {
+ err = -EEXIST;
+ ce = this;
+ break;
+ }
+
+ if (this->engine < engine)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+ if (!err) {
+ rb_link_node(&ce->node, parent, p);
+ rb_insert_color(&ce->node, &ctx->hw_contexts);
+ }
+
+ spin_unlock(&ctx->hw_contexts_lock);
+
+ return ce;
+}
+
+void __intel_context_remove(struct intel_context *ce)
+{
+ struct i915_gem_context *ctx = ce->gem_context;
+
+ spin_lock(&ctx->hw_contexts_lock);
+ rb_erase(&ce->node, &ctx->hw_contexts);
+ spin_unlock(&ctx->hw_contexts_lock);
+}
+
+static struct intel_context *
+intel_context_instance(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context *ce, *pos;
+
+ ce = intel_context_lookup(ctx, engine);
+ if (likely(ce))
+ return ce;
+
+ ce = intel_context_alloc();
+ if (!ce)
+ return ERR_PTR(-ENOMEM);
+
+ intel_context_init(ce, ctx, engine);
+
+ pos = __intel_context_insert(ctx, engine, ce);
+ if (unlikely(pos != ce)) /* Beaten! Use their HW context instead */
+ intel_context_free(ce);
+
+ GEM_BUG_ON(intel_context_lookup(ctx, engine) != pos);
+ return pos;
+}
+
+struct intel_context *
+intel_context_pin_lock(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+ __acquires(ce->pin_mutex)
+{
+ struct intel_context *ce;
+
+ ce = intel_context_instance(ctx, engine);
+ if (IS_ERR(ce))
+ return ce;
+
+ if (mutex_lock_interruptible(&ce->pin_mutex))
+ return ERR_PTR(-EINTR);
+
+ return ce;
+}
+
+struct intel_context *
+intel_context_pin(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ int err;
+
+ ce = intel_context_instance(ctx, engine);
+ if (IS_ERR(ce))
+ return ce;
+
+ if (likely(atomic_inc_not_zero(&ce->pin_count)))
+ return ce;
+
+ if (mutex_lock_interruptible(&ce->pin_mutex))
+ return ERR_PTR(-EINTR);
+
+ if (likely(!atomic_read(&ce->pin_count))) {
+ err = ce->ops->pin(ce);
+ if (err)
+ goto err;
+
+ i915_gem_context_get(ctx);
+ GEM_BUG_ON(ce->gem_context != ctx);
+
+ mutex_lock(&ctx->mutex);
+ list_add(&ce->active_link, &ctx->active_engines);
+ mutex_unlock(&ctx->mutex);
+
+ intel_context_get(ce);
+ smp_mb__before_atomic(); /* flush pin before it is visible */
+ }
+
+ atomic_inc(&ce->pin_count);
+ GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
+
+ mutex_unlock(&ce->pin_mutex);
+ return ce;
+
+err:
+ mutex_unlock(&ce->pin_mutex);
+ return ERR_PTR(err);
+}
+
+void intel_context_unpin(struct intel_context *ce)
+{
+ if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
+ return;
+
+ /* We may be called from inside intel_context_pin() to evict another */
+ intel_context_get(ce);
+ mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
+
+ if (likely(atomic_dec_and_test(&ce->pin_count))) {
+ ce->ops->unpin(ce);
+
+ mutex_lock(&ce->gem_context->mutex);
+ list_del(&ce->active_link);
+ mutex_unlock(&ce->gem_context->mutex);
+
+ i915_gem_context_put(ce->gem_context);
+ intel_context_put(ce);
+ }
+
+ mutex_unlock(&ce->pin_mutex);
+ intel_context_put(ce);
+}
+
+static void intel_context_retire(struct i915_active_request *active,
+ struct i915_request *rq)
+{
+ struct intel_context *ce =
+ container_of(active, typeof(*ce), active_tracker);
+
+ intel_context_unpin(ce);
+}
+
+void
+intel_context_init(struct intel_context *ce,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ kref_init(&ce->ref);
+
+ ce->gem_context = ctx;
+ ce->engine = engine;
+ ce->ops = engine->cops;
+
+ INIT_LIST_HEAD(&ce->signal_link);
+ INIT_LIST_HEAD(&ce->signals);
+
+ mutex_init(&ce->pin_mutex);
+
+ /* Use the whole device by default */
+ ce->sseu = intel_device_default_sseu(ctx->i915);
+
+ i915_active_request_init(&ce->active_tracker,
+ NULL, intel_context_retire);
+}
+
+static void i915_global_context_shrink(void)
+{
+ kmem_cache_shrink(global.slab_ce);
+}
+
+static void i915_global_context_exit(void)
+{
+ kmem_cache_destroy(global.slab_ce);
+}
+
+static struct i915_global_context global = { {
+ .shrink = i915_global_context_shrink,
+ .exit = i915_global_context_exit,
+} };
+
+int __init i915_global_context_init(void)
+{
+ global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
+ if (!global.slab_ce)
+ return -ENOMEM;
+
+ i915_global_register(&global.base);
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_context.h b/drivers/gpu/drm/i915/intel_context.h
new file mode 100644
index 000000000000..ebc861b1a49e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_context.h
@@ -0,0 +1,87 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_CONTEXT_H__
+#define __INTEL_CONTEXT_H__
+
+#include <linux/lockdep.h>
+
+#include "intel_context_types.h"
+#include "intel_engine_types.h"
+
+struct intel_context *intel_context_alloc(void);
+void intel_context_free(struct intel_context *ce);
+
+void intel_context_init(struct intel_context *ce,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine);
+
+/**
+ * intel_context_lookup - Find the matching HW context for this (ctx, engine)
+ * @ctx - the parent GEM context
+ * @engine - the target HW engine
+ *
+ * May return NULL if the HW context hasn't been instantiated (i.e. unused).
+ */
+struct intel_context *
+intel_context_lookup(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine);
+
+/**
+ * intel_context_pin_lock - Stablises the 'pinned' status of the HW context
+ * @ctx - the parent GEM context
+ * @engine - the target HW engine
+ *
+ * Acquire a lock on the pinned status of the HW context, such that the context
+ * can neither be bound to the GPU or unbound whilst the lock is held, i.e.
+ * intel_context_is_pinned() remains stable.
+ */
+struct intel_context *
+intel_context_pin_lock(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine);
+
+static inline bool
+intel_context_is_pinned(struct intel_context *ce)
+{
+ return atomic_read(&ce->pin_count);
+}
+
+static inline void intel_context_pin_unlock(struct intel_context *ce)
+__releases(ce->pin_mutex)
+{
+ mutex_unlock(&ce->pin_mutex);
+}
+
+struct intel_context *
+__intel_context_insert(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ struct intel_context *ce);
+void
+__intel_context_remove(struct intel_context *ce);
+
+struct intel_context *
+intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine);
+
+static inline void __intel_context_pin(struct intel_context *ce)
+{
+ GEM_BUG_ON(!intel_context_is_pinned(ce));
+ atomic_inc(&ce->pin_count);
+}
+
+void intel_context_unpin(struct intel_context *ce);
+
+static inline struct intel_context *intel_context_get(struct intel_context *ce)
+{
+ kref_get(&ce->ref);
+ return ce;
+}
+
+static inline void intel_context_put(struct intel_context *ce)
+{
+ kref_put(&ce->ref, ce->ops->destroy);
+}
+
+#endif /* __INTEL_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/intel_context_types.h b/drivers/gpu/drm/i915/intel_context_types.h
new file mode 100644
index 000000000000..68b4ca1611e0
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_context_types.h
@@ -0,0 +1,74 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_CONTEXT_TYPES__
+#define __INTEL_CONTEXT_TYPES__
+
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/types.h>
+
+#include "i915_active_types.h"
+
+struct i915_gem_context;
+struct i915_vma;
+struct intel_context;
+struct intel_ring;
+
+struct intel_context_ops {
+ int (*pin)(struct intel_context *ce);
+ void (*unpin)(struct intel_context *ce);
+
+ void (*reset)(struct intel_context *ce);
+ void (*destroy)(struct kref *kref);
+};
+
+/*
+ * Powergating configuration for a particular (context,engine).
+ */
+struct intel_sseu {
+ u8 slice_mask;
+ u8 subslice_mask;
+ u8 min_eus_per_subslice;
+ u8 max_eus_per_subslice;
+};
+
+struct intel_context {
+ struct kref ref;
+
+ struct i915_gem_context *gem_context;
+ struct intel_engine_cs *engine;
+ struct intel_engine_cs *active;
+
+ struct list_head active_link;
+ struct list_head signal_link;
+ struct list_head signals;
+
+ struct i915_vma *state;
+ struct intel_ring *ring;
+
+ u32 *lrc_reg_state;
+ u64 lrc_desc;
+
+ atomic_t pin_count;
+ struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
+
+ /**
+ * active_tracker: Active tracker for the external rq activity
+ * on this intel_context object.
+ */
+ struct i915_active_request active_tracker;
+
+ const struct intel_context_ops *ops;
+ struct rb_node node;
+
+ /** sseu: Control eu/slice partitioning */
+ struct intel_sseu sseu;
+};
+
+#endif /* __INTEL_CONTEXT_TYPES__ */
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 3716b2ee362f..b665c370111b 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -27,13 +27,18 @@
#include <linux/dmi.h>
#include <linux/i2c.h>
#include <linux/slab.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
-#include "intel_drv.h"
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
+#include "intel_connector.h"
+#include "intel_crt.h"
+#include "intel_ddi.h"
+#include "intel_drv.h"
/* Here's the desired hotplug mode */
#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \
@@ -435,7 +440,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
I915_WRITE(crt->adpa_reg, adpa);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
crt->adpa_reg,
ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
1000))
@@ -489,7 +494,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
I915_WRITE(crt->adpa_reg, adpa);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
crt->adpa_reg,
ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
1000)) {
@@ -542,7 +547,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
CRT_HOTPLUG_FORCE_DETECT,
CRT_HOTPLUG_FORCE_DETECT);
/* wait for FORCE_DETECT to go off */
- if (intel_wait_for_register(dev_priv, PORT_HOTPLUG_EN,
+ if (intel_wait_for_register(&dev_priv->uncore, PORT_HOTPLUG_EN,
CRT_HOTPLUG_FORCE_DETECT, 0,
1000))
DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
diff --git a/drivers/gpu/drm/i915/intel_crt.h b/drivers/gpu/drm/i915/intel_crt.h
new file mode 100644
index 000000000000..1b3fba359efc
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_crt.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_CRT_H__
+#define __INTEL_CRT_H__
+
+#include "i915_reg.h"
+
+enum pipe;
+struct drm_encoder;
+struct drm_i915_private;
+struct drm_i915_private;
+
+bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t adpa_reg, enum pipe *pipe);
+void intel_crt_init(struct drm_i915_private *dev_priv);
+void intel_crt_reset(struct drm_encoder *encoder);
+
+#endif /* __INTEL_CRT_H__ */
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index e8ac04c33e29..f43c2a2563a5 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -21,9 +21,12 @@
* IN THE SOFTWARE.
*
*/
+
#include <linux/firmware.h>
+
#include "i915_drv.h"
#include "i915_reg.h"
+#include "intel_csr.h"
/**
* DOC: csr support for dmc
@@ -486,7 +489,7 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) >= 12) {
/* Allow to load fw via parameter using the last known size */
csr->max_fw_size = GEN12_CSR_MAX_FW_SIZE;
- } else if (IS_ICELAKE(dev_priv)) {
+ } else if (IS_GEN(dev_priv, 11)) {
csr->fw_path = ICL_CSR_PATH;
csr->required_version = ICL_CSR_VERSION_REQUIRED;
csr->max_fw_size = ICL_CSR_MAX_FW_SIZE;
diff --git a/drivers/gpu/drm/i915/intel_csr.h b/drivers/gpu/drm/i915/intel_csr.h
new file mode 100644
index 000000000000..17a32c1e8a35
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_csr.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_CSR_H__
+#define __INTEL_CSR_H__
+
+struct drm_i915_private;
+
+void intel_csr_ucode_init(struct drm_i915_private *i915);
+void intel_csr_load_program(struct drm_i915_private *i915);
+void intel_csr_ucode_fini(struct drm_i915_private *i915);
+void intel_csr_ucode_suspend(struct drm_i915_private *i915);
+void intel_csr_ucode_resume(struct drm_i915_private *i915);
+
+#endif /* __INTEL_CSR_H__ */
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 98cea1f4b3bf..f181c26f62fd 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -26,9 +26,19 @@
*/
#include <drm/drm_scdc_helper.h>
+
#include "i915_drv.h"
+#include "intel_audio.h"
+#include "intel_connector.h"
+#include "intel_ddi.h"
+#include "intel_dp.h"
#include "intel_drv.h"
#include "intel_dsi.h"
+#include "intel_hdcp.h"
+#include "intel_hdmi.h"
+#include "intel_lspcon.h"
+#include "intel_panel.h"
+#include "intel_psr.h"
struct ddi_buf_trans {
u32 trans1; /* balance leg enable, de-emph level */
@@ -851,7 +861,7 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift;
- if (IS_ICELAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 11) {
if (intel_port_is_combophy(dev_priv, port))
icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI,
0, &n_entries);
@@ -1240,24 +1250,15 @@ static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
return (refclk * n * 100) / (p * r);
}
-static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
- enum intel_dpll_id pll_id)
+static int skl_calc_wrpll_link(const struct intel_dpll_hw_state *pll_state)
{
- i915_reg_t cfgcr1_reg, cfgcr2_reg;
- u32 cfgcr1_val, cfgcr2_val;
u32 p0, p1, p2, dco_freq;
- cfgcr1_reg = DPLL_CFGCR1(pll_id);
- cfgcr2_reg = DPLL_CFGCR2(pll_id);
-
- cfgcr1_val = I915_READ(cfgcr1_reg);
- cfgcr2_val = I915_READ(cfgcr2_reg);
-
- p0 = cfgcr2_val & DPLL_CFGCR2_PDIV_MASK;
- p2 = cfgcr2_val & DPLL_CFGCR2_KDIV_MASK;
+ p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
+ p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
- if (cfgcr2_val & DPLL_CFGCR2_QDIV_MODE(1))
- p1 = (cfgcr2_val & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
+ if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
+ p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
else
p1 = 1;
@@ -1292,10 +1293,11 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
break;
}
- dco_freq = (cfgcr1_val & DPLL_CFGCR1_DCO_INTEGER_MASK) * 24 * 1000;
+ dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK)
+ * 24 * 1000;
- dco_freq += (((cfgcr1_val & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) * 24 *
- 1000) / 0x8000;
+ dco_freq += (((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9)
+ * 24 * 1000) / 0x8000;
if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
return 0;
@@ -1304,24 +1306,15 @@ static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
}
int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
- enum intel_dpll_id pll_id)
+ struct intel_dpll_hw_state *pll_state)
{
- u32 cfgcr0, cfgcr1;
u32 p0, p1, p2, dco_freq, ref_clock;
- if (INTEL_GEN(dev_priv) >= 11) {
- cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id));
- cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(pll_id));
- } else {
- cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id));
- cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(pll_id));
- }
-
- p0 = cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
- p2 = cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
+ p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
+ p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
- if (cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
- p1 = (cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
+ if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
+ p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
DPLL_CFGCR1_QDIV_RATIO_SHIFT;
else
p1 = 1;
@@ -1349,16 +1342,17 @@ int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
case DPLL_CFGCR1_KDIV_2:
p2 = 2;
break;
- case DPLL_CFGCR1_KDIV_4:
- p2 = 4;
+ case DPLL_CFGCR1_KDIV_3:
+ p2 = 3;
break;
}
ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
- dco_freq = (cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) * ref_clock;
+ dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK)
+ * ref_clock;
- dco_freq += (((cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
+ dco_freq += (((pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000;
if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
@@ -1390,25 +1384,21 @@ static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
}
static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv,
- enum port port)
+ const struct intel_dpll_hw_state *pll_state)
{
- enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- u32 mg_pll_div0, mg_clktop_hsclkctl;
- u32 m1, m2_int, m2_frac, div1, div2, refclk;
+ u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
u64 tmp;
- refclk = dev_priv->cdclk.hw.ref;
-
- mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
- mg_clktop_hsclkctl = I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
+ ref_clock = dev_priv->cdclk.hw.ref;
- m1 = I915_READ(MG_PLL_DIV1(tc_port)) & MG_PLL_DIV1_FBPREDIV_MASK;
- m2_int = mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
- m2_frac = (mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) ?
- (mg_pll_div0 & MG_PLL_DIV0_FBDIV_FRAC_MASK) >>
- MG_PLL_DIV0_FBDIV_FRAC_SHIFT : 0;
+ m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
+ m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
+ m2_frac = (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) ?
+ (pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_FRAC_MASK) >>
+ MG_PLL_DIV0_FBDIV_FRAC_SHIFT : 0;
- switch (mg_clktop_hsclkctl & MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
+ switch (pll_state->mg_clktop2_hsclkctl &
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
div1 = 2;
break;
@@ -1422,12 +1412,14 @@ static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv,
div1 = 7;
break;
default:
- MISSING_CASE(mg_clktop_hsclkctl);
+ MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
return 0;
}
- div2 = (mg_clktop_hsclkctl & MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
+ div2 = (pll_state->mg_clktop2_hsclkctl &
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
+
/* div2 value of 0 is same as 1 means no div */
if (div2 == 0)
div2 = 1;
@@ -1436,8 +1428,8 @@ static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv,
* Adjust the original formula to delay the division by 2^22 in order to
* minimize possible rounding errors.
*/
- tmp = (u64)m1 * m2_int * refclk +
- (((u64)m1 * m2_frac * refclk) >> 22);
+ tmp = (u64)m1 * m2_int * ref_clock +
+ (((u64)m1 * m2_frac * ref_clock) >> 22);
tmp = div_u64(tmp, 5 * div1 * div2);
return tmp;
@@ -1471,25 +1463,24 @@ static void icl_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
enum port port = encoder->port;
- int link_clock = 0;
- u32 pll_id;
+ int link_clock;
- pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
if (intel_port_is_combophy(dev_priv, port)) {
- if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
- link_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
- else
- link_clock = icl_calc_dp_combo_pll_link(dev_priv,
- pll_id);
+ link_clock = cnl_calc_wrpll_link(dev_priv, pll_state);
} else {
+ enum intel_dpll_id pll_id = intel_get_shared_dpll_id(dev_priv,
+ pipe_config->shared_dpll);
+
if (pll_id == DPLL_ID_ICL_TBTPLL)
link_clock = icl_calc_tbt_pll_link(dev_priv, port);
else
- link_clock = icl_calc_mg_pll_link(dev_priv, port);
+ link_clock = icl_calc_mg_pll_link(dev_priv, pll_state);
}
pipe_config->port_clock = link_clock;
+
ddi_dotclock_get(pipe_config);
}
@@ -1497,18 +1488,13 @@ static void cnl_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- int link_clock = 0;
- u32 cfgcr0;
- enum intel_dpll_id pll_id;
-
- pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
+ struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
+ int link_clock;
- cfgcr0 = I915_READ(CNL_DPLL_CFGCR0(pll_id));
-
- if (cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
- link_clock = cnl_calc_wrpll_link(dev_priv, pll_id);
+ if (pll_state->cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
+ link_clock = cnl_calc_wrpll_link(dev_priv, pll_state);
} else {
- link_clock = cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK;
+ link_clock = pll_state->cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK;
switch (link_clock) {
case DPLL_CFGCR0_LINK_RATE_810:
@@ -1548,22 +1534,20 @@ static void cnl_ddi_clock_get(struct intel_encoder *encoder,
}
static void skl_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
+ struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- int link_clock = 0;
- u32 dpll_ctl1;
- enum intel_dpll_id pll_id;
-
- pll_id = intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll);
+ struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
+ int link_clock;
- dpll_ctl1 = I915_READ(DPLL_CTRL1);
-
- if (dpll_ctl1 & DPLL_CTRL1_HDMI_MODE(pll_id)) {
- link_clock = skl_calc_wrpll_link(dev_priv, pll_id);
+ /*
+ * ctrl1 register is already shifted for each pll, just use 0 to get
+ * the internal shift for each field
+ */
+ if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0)) {
+ link_clock = skl_calc_wrpll_link(pll_state);
} else {
- link_clock = dpll_ctl1 & DPLL_CTRL1_LINK_RATE_MASK(pll_id);
- link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(pll_id);
+ link_clock = pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0);
+ link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(0);
switch (link_clock) {
case DPLL_CTRL1_LINK_RATE_810:
@@ -1643,24 +1627,17 @@ static void hsw_ddi_clock_get(struct intel_encoder *encoder,
ddi_dotclock_get(pipe_config);
}
-static int bxt_calc_pll_link(struct intel_crtc_state *crtc_state)
+static int bxt_calc_pll_link(const struct intel_dpll_hw_state *pll_state)
{
- struct intel_dpll_hw_state *state;
struct dpll clock;
- /* For DDI ports we always use a shared PLL. */
- if (WARN_ON(!crtc_state->shared_dpll))
- return 0;
-
- state = &crtc_state->dpll_hw_state;
-
clock.m1 = 2;
- clock.m2 = (state->pll0 & PORT_PLL_M2_MASK) << 22;
- if (state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
- clock.m2 |= state->pll2 & PORT_PLL_M2_FRAC_MASK;
- clock.n = (state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
- clock.p1 = (state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
- clock.p2 = (state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
+ clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
+ if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
+ clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
+ clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
+ clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
+ clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
return chv_calc_dpll_params(100000, &clock);
}
@@ -1668,7 +1645,8 @@ static int bxt_calc_pll_link(struct intel_crtc_state *crtc_state)
static void bxt_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- pipe_config->port_clock = bxt_calc_pll_link(pipe_config);
+ pipe_config->port_clock =
+ bxt_calc_pll_link(&pipe_config->dpll_hw_state);
ddi_dotclock_get(pipe_config);
}
@@ -1678,7 +1656,7 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- if (IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
icl_ddi_clock_get(encoder, pipe_config);
else if (IS_CANNONLAKE(dev_priv))
cnl_ddi_clock_get(encoder, pipe_config);
@@ -1911,7 +1889,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
goto out;
}
- if (port == PORT_A)
+ if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A)
cpu_transcoder = TRANSCODER_EDP;
else
cpu_transcoder = (enum transcoder) pipe;
@@ -1973,7 +1951,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
if (!(tmp & DDI_BUF_CTL_ENABLE))
goto out;
- if (port == PORT_A) {
+ if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A) {
tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
@@ -2224,7 +2202,7 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
enum port port = encoder->port;
int n_entries;
- if (IS_ICELAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 11) {
if (intel_port_is_combophy(dev_priv, port))
icl_get_combo_buf_trans(dev_priv, port, encoder->type,
intel_dp->link_rate, &n_entries);
@@ -2316,13 +2294,13 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
/* Program PORT_TX_DW4 */
/* We cannot write to GRP. It would overrite individual loadgen */
for (ln = 0; ln < 4; ln++) {
- val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln));
+ val = I915_READ(CNL_PORT_TX_DW4_LN(ln, port));
val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
- I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val);
+ I915_WRITE(CNL_PORT_TX_DW4_LN(ln, port), val);
}
/* Program PORT_TX_DW5 */
@@ -2378,14 +2356,14 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
* > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
*/
for (ln = 0; ln <= 3; ln++) {
- val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln));
+ val = I915_READ(CNL_PORT_TX_DW4_LN(ln, port));
val &= ~LOADGEN_SELECT;
if ((rate <= 600000 && width == 4 && ln >= 1) ||
(rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
val |= LOADGEN_SELECT;
}
- I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val);
+ I915_WRITE(CNL_PORT_TX_DW4_LN(ln, port), val);
}
/* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
@@ -2447,13 +2425,13 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
/* Program PORT_TX_DW4 */
/* We cannot write to GRP. It would overwrite individual loadgen. */
for (ln = 0; ln <= 3; ln++) {
- val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln));
+ val = I915_READ(ICL_PORT_TX_DW4_LN(ln, port));
val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
- I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val);
+ I915_WRITE(ICL_PORT_TX_DW4_LN(ln, port), val);
}
/* Program PORT_TX_DW7 */
@@ -2504,14 +2482,14 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
* > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
*/
for (ln = 0; ln <= 3; ln++) {
- val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln));
+ val = I915_READ(ICL_PORT_TX_DW4_LN(ln, port));
val &= ~LOADGEN_SELECT;
if ((rate <= 600000 && width == 4 && ln >= 1) ||
(rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
val |= LOADGEN_SELECT;
}
- I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val);
+ I915_WRITE(ICL_PORT_TX_DW4_LN(ln, port), val);
}
/* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
@@ -2554,33 +2532,33 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
/* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_LINK_PARAMS(port, ln));
+ val = I915_READ(MG_TX1_LINK_PARAMS(ln, port));
val &= ~CRI_USE_FS32;
- I915_WRITE(MG_TX1_LINK_PARAMS(port, ln), val);
+ I915_WRITE(MG_TX1_LINK_PARAMS(ln, port), val);
- val = I915_READ(MG_TX2_LINK_PARAMS(port, ln));
+ val = I915_READ(MG_TX2_LINK_PARAMS(ln, port));
val &= ~CRI_USE_FS32;
- I915_WRITE(MG_TX2_LINK_PARAMS(port, ln), val);
+ I915_WRITE(MG_TX2_LINK_PARAMS(ln, port), val);
}
/* Program MG_TX_SWINGCTRL with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_SWINGCTRL(port, ln));
+ val = I915_READ(MG_TX1_SWINGCTRL(ln, port));
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
ddi_translations[level].cri_txdeemph_override_17_12);
- I915_WRITE(MG_TX1_SWINGCTRL(port, ln), val);
+ I915_WRITE(MG_TX1_SWINGCTRL(ln, port), val);
- val = I915_READ(MG_TX2_SWINGCTRL(port, ln));
+ val = I915_READ(MG_TX2_SWINGCTRL(ln, port));
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
ddi_translations[level].cri_txdeemph_override_17_12);
- I915_WRITE(MG_TX2_SWINGCTRL(port, ln), val);
+ I915_WRITE(MG_TX2_SWINGCTRL(ln, port), val);
}
/* Program MG_TX_DRVCTRL with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_DRVCTRL(port, ln));
+ val = I915_READ(MG_TX1_DRVCTRL(ln, port));
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
@@ -2588,9 +2566,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
CRI_TXDEEMPH_OVERRIDE_11_6(
ddi_translations[level].cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_EN;
- I915_WRITE(MG_TX1_DRVCTRL(port, ln), val);
+ I915_WRITE(MG_TX1_DRVCTRL(ln, port), val);
- val = I915_READ(MG_TX2_DRVCTRL(port, ln));
+ val = I915_READ(MG_TX2_DRVCTRL(ln, port));
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
@@ -2598,7 +2576,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
CRI_TXDEEMPH_OVERRIDE_11_6(
ddi_translations[level].cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_EN;
- I915_WRITE(MG_TX2_DRVCTRL(port, ln), val);
+ I915_WRITE(MG_TX2_DRVCTRL(ln, port), val);
/* FIXME: Program CRI_LOADGEN_SEL after the spec is updated */
}
@@ -2609,17 +2587,17 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
* values from table for which TX1 and TX2 enabled.
*/
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_CLKHUB(port, ln));
+ val = I915_READ(MG_CLKHUB(ln, port));
if (link_clock < 300000)
val |= CFG_LOW_RATE_LKREN_EN;
else
val &= ~CFG_LOW_RATE_LKREN_EN;
- I915_WRITE(MG_CLKHUB(port, ln), val);
+ I915_WRITE(MG_CLKHUB(ln, port), val);
}
/* Program the MG_TX_DCC<LN, port being used> based on the link frequency */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_DCC(port, ln));
+ val = I915_READ(MG_TX1_DCC(ln, port));
val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
if (link_clock <= 500000) {
val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
@@ -2627,9 +2605,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
}
- I915_WRITE(MG_TX1_DCC(port, ln), val);
+ I915_WRITE(MG_TX1_DCC(ln, port), val);
- val = I915_READ(MG_TX2_DCC(port, ln));
+ val = I915_READ(MG_TX2_DCC(ln, port));
val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
if (link_clock <= 500000) {
val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
@@ -2637,18 +2615,18 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
}
- I915_WRITE(MG_TX2_DCC(port, ln), val);
+ I915_WRITE(MG_TX2_DCC(ln, port), val);
}
/* Program MG_TX_PISO_READLOAD with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_PISO_READLOAD(port, ln));
+ val = I915_READ(MG_TX1_PISO_READLOAD(ln, port));
val |= CRI_CALCINIT;
- I915_WRITE(MG_TX1_PISO_READLOAD(port, ln), val);
+ I915_WRITE(MG_TX1_PISO_READLOAD(ln, port), val);
- val = I915_READ(MG_TX2_PISO_READLOAD(port, ln));
+ val = I915_READ(MG_TX2_PISO_READLOAD(ln, port));
val |= CRI_CALCINIT;
- I915_WRITE(MG_TX2_PISO_READLOAD(port, ln), val);
+ I915_WRITE(MG_TX2_PISO_READLOAD(ln, port), val);
}
}
@@ -2697,7 +2675,7 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp)
struct intel_encoder *encoder = &dport->base;
int level = intel_ddi_dp_level(intel_dp);
- if (IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
icl_ddi_vswing_sequence(encoder, intel_dp->link_rate,
level, encoder->type);
else if (IS_CANNONLAKE(dev_priv))
@@ -2866,7 +2844,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
mutex_lock(&dev_priv->dpll_lock);
- if (IS_ICELAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 11) {
if (!intel_port_is_combophy(dev_priv, port))
I915_WRITE(DDI_CLK_SEL(port),
icl_pll_to_ddi_clk_sel(encoder, crtc_state));
@@ -2908,7 +2886,7 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
- if (IS_ICELAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 11) {
if (!intel_port_is_combophy(dev_priv, port))
I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
} else if (IS_CANNONLAKE(dev_priv)) {
@@ -2927,21 +2905,20 @@ static void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port)
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum port port = dig_port->base.port;
enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
u32 val;
- int i;
+ int ln;
if (tc_port == PORT_TC_NONE)
return;
- for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
- val = I915_READ(mg_regs[i]);
+ for (ln = 0; ln < 2; ln++) {
+ val = I915_READ(MG_DP_MODE(ln, port));
val |= MG_DP_MODE_CFG_TR2PWR_GATING |
MG_DP_MODE_CFG_TRPWR_GATING |
MG_DP_MODE_CFG_CLNPWR_GATING |
MG_DP_MODE_CFG_DIGPWR_GATING |
MG_DP_MODE_CFG_GAONPWR_GATING;
- I915_WRITE(mg_regs[i], val);
+ I915_WRITE(MG_DP_MODE(ln, port), val);
}
val = I915_READ(MG_MISC_SUS0(tc_port));
@@ -2960,21 +2937,20 @@ static void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port)
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum port port = dig_port->base.port;
enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
- i915_reg_t mg_regs[2] = { MG_DP_MODE(port, 0), MG_DP_MODE(port, 1) };
u32 val;
- int i;
+ int ln;
if (tc_port == PORT_TC_NONE)
return;
- for (i = 0; i < ARRAY_SIZE(mg_regs); i++) {
- val = I915_READ(mg_regs[i]);
+ for (ln = 0; ln < 2; ln++) {
+ val = I915_READ(MG_DP_MODE(ln, port));
val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING |
MG_DP_MODE_CFG_TRPWR_GATING |
MG_DP_MODE_CFG_CLNPWR_GATING |
MG_DP_MODE_CFG_DIGPWR_GATING |
MG_DP_MODE_CFG_GAONPWR_GATING);
- I915_WRITE(mg_regs[i], val);
+ I915_WRITE(MG_DP_MODE(ln, port), val);
}
val = I915_READ(MG_MISC_SUS0(tc_port));
@@ -2998,8 +2974,8 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
if (tc_port == PORT_TC_NONE || intel_dig_port->tc_type == TC_PORT_TBT)
return;
- ln0 = I915_READ(MG_DP_MODE(port, 0));
- ln1 = I915_READ(MG_DP_MODE(port, 1));
+ ln0 = I915_READ(MG_DP_MODE(0, port));
+ ln1 = I915_READ(MG_DP_MODE(1, port));
switch (intel_dig_port->tc_type) {
case TC_PORT_TYPEC:
@@ -3049,8 +3025,8 @@ static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port)
return;
}
- I915_WRITE(MG_DP_MODE(port, 0), ln0);
- I915_WRITE(MG_DP_MODE(port, 1), ln1);
+ I915_WRITE(MG_DP_MODE(0, port), ln0);
+ I915_WRITE(MG_DP_MODE(1, port), ln1);
}
static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
@@ -3077,7 +3053,7 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
val |= DP_TP_CTL_FEC_ENABLE;
I915_WRITE(DP_TP_CTL(port), val);
- if (intel_wait_for_register(dev_priv, DP_TP_STATUS(port),
+ if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port),
DP_TP_STATUS_FEC_ENABLE_LIVE,
DP_TP_STATUS_FEC_ENABLE_LIVE,
1))
@@ -3125,7 +3101,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
icl_program_mg_dp_mode(dig_port);
icl_disable_phy_clock_gating(dig_port);
- if (IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
level, encoder->type);
else if (IS_CANNONLAKE(dev_priv))
@@ -3174,7 +3150,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
icl_program_mg_dp_mode(dig_port);
icl_disable_phy_clock_gating(dig_port);
- if (IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
icl_ddi_vswing_sequence(encoder, crtc_state->port_clock,
level, INTEL_OUTPUT_HDMI);
else if (IS_CANNONLAKE(dev_priv))
@@ -3555,7 +3531,9 @@ static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder,
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
- intel_psr_enable(intel_dp, crtc_state);
+ intel_ddi_set_pipe_settings(crtc_state);
+
+ intel_psr_update(intel_dp, crtc_state);
intel_edp_drrs_enable(intel_dp, crtc_state);
intel_panel_update_backlight(encoder, crtc_state, conn_state);
@@ -3710,7 +3688,7 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
struct intel_crtc_state *crtc_state)
{
- if (IS_ICELAKE(dev_priv) && crtc_state->port_clock > 594000)
+ if (INTEL_GEN(dev_priv) >= 11 && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 1;
else if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 2;
@@ -3763,7 +3741,10 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->has_hdmi_sink = true;
intel_dig_port = enc_to_dig_port(&encoder->base);
- if (intel_dig_port->infoframe_enabled(encoder, pipe_config))
+ pipe_config->infoframes.enable |=
+ intel_hdmi_infoframes_enabled(encoder, pipe_config);
+
+ if (pipe_config->infoframes.enable)
pipe_config->has_infoframe = true;
if (temp & TRANS_DDI_HDMI_SCRAMBLING)
@@ -3827,6 +3808,18 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
bxt_ddi_phy_get_lane_lat_optim_mask(encoder);
intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
+
+ intel_hdmi_read_gcp_infoframe(encoder, pipe_config);
+
+ intel_read_infoframe(encoder, pipe_config,
+ HDMI_INFOFRAME_TYPE_AVI,
+ &pipe_config->infoframes.avi);
+ intel_read_infoframe(encoder, pipe_config,
+ HDMI_INFOFRAME_TYPE_SPD,
+ &pipe_config->infoframes.spd);
+ intel_read_infoframe(encoder, pipe_config,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ &pipe_config->infoframes.hdmi);
}
static enum intel_output_type
@@ -3855,7 +3848,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
enum port port = encoder->port;
int ret;
- if (port == PORT_A)
+ if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A)
pipe_config->cpu_transcoder = TRANSCODER_EDP;
if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
@@ -3959,15 +3952,7 @@ static int modeset_pipe(struct drm_crtc *crtc,
goto out;
}
- crtc_state->mode_changed = true;
-
- ret = drm_atomic_add_affected_connectors(state, crtc);
- if (ret)
- goto out;
-
- ret = drm_atomic_add_affected_planes(state, crtc);
- if (ret)
- goto out;
+ crtc_state->connectors_changed = true;
ret = drm_atomic_commit(state);
out:
diff --git a/drivers/gpu/drm/i915/intel_ddi.h b/drivers/gpu/drm/i915/intel_ddi.h
new file mode 100644
index 000000000000..9cf69175942e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_ddi.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DDI_H__
+#define __INTEL_DDI_H__
+
+#include <drm/i915_drm.h>
+
+#include "intel_display.h"
+
+struct drm_connector_state;
+struct drm_i915_private;
+struct intel_connector;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_dp;
+struct intel_dpll_hw_state;
+struct intel_encoder;
+
+void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state);
+void hsw_fdi_link_train(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state);
+void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
+bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
+void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
+void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
+void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
+void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
+void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
+void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
+bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
+void intel_ddi_get_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config);
+void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
+ bool state);
+void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
+ struct intel_crtc_state *crtc_state);
+u32 bxt_signal_levels(struct intel_dp *intel_dp);
+u32 ddi_signal_levels(struct intel_dp *intel_dp);
+u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
+u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
+ u8 voltage_swing);
+int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
+ bool enable);
+void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
+int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
+ struct intel_dpll_hw_state *state);
+
+#endif /* __INTEL_DDI_H__ */
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 855a5074ad77..6af480b95bc6 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -57,6 +57,7 @@ static const char * const platform_names[] = {
PLATFORM_NAME(COFFEELAKE),
PLATFORM_NAME(CANNONLAKE),
PLATFORM_NAME(ICELAKE),
+ PLATFORM_NAME(ELKHARTLAKE),
};
#undef PLATFORM_NAME
@@ -155,9 +156,15 @@ static void gen11_sseu_info_init(struct drm_i915_private *dev_priv)
u8 eu_en;
int s;
- sseu->max_slices = 1;
- sseu->max_subslices = 8;
- sseu->max_eus_per_subslice = 8;
+ if (IS_ELKHARTLAKE(dev_priv)) {
+ sseu->max_slices = 1;
+ sseu->max_subslices = 4;
+ sseu->max_eus_per_subslice = 8;
+ } else {
+ sseu->max_slices = 1;
+ sseu->max_subslices = 8;
+ sseu->max_eus_per_subslice = 8;
+ }
s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK;
ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE);
@@ -707,6 +714,99 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
return 0;
}
+#undef INTEL_VGA_DEVICE
+#define INTEL_VGA_DEVICE(id, info) (id)
+
+static const u16 subplatform_ult_ids[] = {
+ INTEL_HSW_ULT_GT1_IDS(0),
+ INTEL_HSW_ULT_GT2_IDS(0),
+ INTEL_HSW_ULT_GT3_IDS(0),
+ INTEL_BDW_ULT_GT1_IDS(0),
+ INTEL_BDW_ULT_GT2_IDS(0),
+ INTEL_BDW_ULT_GT3_IDS(0),
+ INTEL_BDW_ULT_RSVD_IDS(0),
+ INTEL_SKL_ULT_GT1_IDS(0),
+ INTEL_SKL_ULT_GT2_IDS(0),
+ INTEL_SKL_ULT_GT3_IDS(0),
+ INTEL_KBL_ULT_GT1_IDS(0),
+ INTEL_KBL_ULT_GT2_IDS(0),
+ INTEL_KBL_ULT_GT3_IDS(0),
+ INTEL_CFL_U_GT2_IDS(0),
+ INTEL_CFL_U_GT3_IDS(0),
+ INTEL_WHL_U_GT1_IDS(0),
+ INTEL_WHL_U_GT2_IDS(0),
+ INTEL_WHL_U_GT3_IDS(0)
+};
+
+static const u16 subplatform_ulx_ids[] = {
+ INTEL_HSW_ULX_GT1_IDS(0),
+ INTEL_HSW_ULX_GT2_IDS(0),
+ INTEL_BDW_ULX_GT1_IDS(0),
+ INTEL_BDW_ULX_GT2_IDS(0),
+ INTEL_BDW_ULX_GT3_IDS(0),
+ INTEL_BDW_ULX_RSVD_IDS(0),
+ INTEL_SKL_ULX_GT1_IDS(0),
+ INTEL_SKL_ULX_GT2_IDS(0),
+ INTEL_KBL_ULX_GT1_IDS(0),
+ INTEL_KBL_ULX_GT2_IDS(0)
+};
+
+static const u16 subplatform_aml_ids[] = {
+ INTEL_AML_KBL_GT2_IDS(0),
+ INTEL_AML_CFL_GT2_IDS(0)
+};
+
+static const u16 subplatform_portf_ids[] = {
+ INTEL_CNL_PORT_F_IDS(0),
+ INTEL_ICL_PORT_F_IDS(0)
+};
+
+static bool find_devid(u16 id, const u16 *p, unsigned int num)
+{
+ for (; num; num--, p++) {
+ if (*p == id)
+ return true;
+ }
+
+ return false;
+}
+
+void intel_device_info_subplatform_init(struct drm_i915_private *i915)
+{
+ const struct intel_device_info *info = INTEL_INFO(i915);
+ const struct intel_runtime_info *rinfo = RUNTIME_INFO(i915);
+ const unsigned int pi = __platform_mask_index(rinfo, info->platform);
+ const unsigned int pb = __platform_mask_bit(rinfo, info->platform);
+ u16 devid = INTEL_DEVID(i915);
+ u32 mask = 0;
+
+ /* Make sure IS_<platform> checks are working. */
+ RUNTIME_INFO(i915)->platform_mask[pi] = BIT(pb);
+
+ /* Find and mark subplatform bits based on the PCI device id. */
+ if (find_devid(devid, subplatform_ult_ids,
+ ARRAY_SIZE(subplatform_ult_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_ULT);
+ } else if (find_devid(devid, subplatform_ulx_ids,
+ ARRAY_SIZE(subplatform_ulx_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_ULX);
+ if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+ /* ULX machines are also considered ULT. */
+ mask |= BIT(INTEL_SUBPLATFORM_ULT);
+ }
+ } else if (find_devid(devid, subplatform_aml_ids,
+ ARRAY_SIZE(subplatform_aml_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_AML);
+ } else if (find_devid(devid, subplatform_portf_ids,
+ ARRAY_SIZE(subplatform_portf_ids))) {
+ mask = BIT(INTEL_SUBPLATFORM_PORTF);
+ }
+
+ GEM_BUG_ON(mask & ~INTEL_SUBPLATFORM_BITS);
+
+ RUNTIME_INFO(i915)->platform_mask[pi] |= mask;
+}
+
/**
* intel_device_info_runtime_init - initialize runtime info
* @dev_priv: the i915 device
@@ -738,9 +838,9 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
runtime->num_scalers[PIPE_C] = 1;
}
- BUILD_BUG_ON(I915_NUM_ENGINES > BITS_PER_TYPE(intel_ring_mask_t));
+ BUILD_BUG_ON(BITS_PER_TYPE(intel_engine_mask_t) < I915_NUM_ENGINES);
- if (IS_GEN(dev_priv, 11))
+ if (INTEL_GEN(dev_priv) >= 11)
for_each_pipe(dev_priv, pipe)
runtime->num_sprites[pipe] = 6;
else if (IS_GEN(dev_priv, 10) || IS_GEMINILAKE(dev_priv))
@@ -844,7 +944,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
DRM_INFO("Disabling ppGTT for VT-d support\n");
- info->ppgtt = INTEL_PPGTT_NONE;
+ info->ppgtt_type = INTEL_PPGTT_NONE;
}
/* Initialize command stream timestamp frequency */
@@ -871,23 +971,24 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
unsigned int logical_vdbox = 0;
unsigned int i;
u32 media_fuse;
+ u16 vdbox_mask;
+ u16 vebox_mask;
if (INTEL_GEN(dev_priv) < 11)
return;
media_fuse = ~I915_READ(GEN11_GT_VEBOX_VDBOX_DISABLE);
- RUNTIME_INFO(dev_priv)->vdbox_enable = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
- RUNTIME_INFO(dev_priv)->vebox_enable = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
- GEN11_GT_VEBOX_DISABLE_SHIFT;
+ vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
+ vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
+ GEN11_GT_VEBOX_DISABLE_SHIFT;
- DRM_DEBUG_DRIVER("vdbox enable: %04x\n", RUNTIME_INFO(dev_priv)->vdbox_enable);
for (i = 0; i < I915_MAX_VCS; i++) {
if (!HAS_ENGINE(dev_priv, _VCS(i)))
continue;
- if (!(BIT(i) & RUNTIME_INFO(dev_priv)->vdbox_enable)) {
- info->ring_mask &= ~ENGINE_MASK(_VCS(i));
+ if (!(BIT(i) & vdbox_mask)) {
+ info->engine_mask &= ~BIT(_VCS(i));
DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
continue;
}
@@ -899,15 +1000,20 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
if (logical_vdbox++ % 2 == 0)
RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
}
+ DRM_DEBUG_DRIVER("vdbox enable: %04x, instances: %04lx\n",
+ vdbox_mask, VDBOX_MASK(dev_priv));
+ GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv));
- DRM_DEBUG_DRIVER("vebox enable: %04x\n", RUNTIME_INFO(dev_priv)->vebox_enable);
for (i = 0; i < I915_MAX_VECS; i++) {
if (!HAS_ENGINE(dev_priv, _VECS(i)))
continue;
- if (!(BIT(i) & RUNTIME_INFO(dev_priv)->vebox_enable)) {
- info->ring_mask &= ~ENGINE_MASK(_VECS(i));
+ if (!(BIT(i) & vebox_mask)) {
+ info->engine_mask &= ~BIT(_VECS(i));
DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
}
}
+ DRM_DEBUG_DRIVER("vebox enable: %04x, instances: %04lx\n",
+ vebox_mask, VEBOX_MASK(dev_priv));
+ GEM_BUG_ON(vebox_mask != VEBOX_MASK(dev_priv));
}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index e8b8661df746..0e579f158016 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -27,6 +27,7 @@
#include <uapi/drm/i915_drm.h>
+#include "intel_engine_types.h"
#include "intel_display.h"
struct drm_printer;
@@ -73,14 +74,29 @@ enum intel_platform {
INTEL_CANNONLAKE,
/* gen11 */
INTEL_ICELAKE,
+ INTEL_ELKHARTLAKE,
INTEL_MAX_PLATFORMS
};
-enum intel_ppgtt {
+/*
+ * Subplatform bits share the same namespace per parent platform. In other words
+ * it is fine for the same bit to be used on multiple parent platforms.
+ */
+
+#define INTEL_SUBPLATFORM_BITS (3)
+
+/* HSW/BDW/SKL/KBL/CFL */
+#define INTEL_SUBPLATFORM_ULT (0)
+#define INTEL_SUBPLATFORM_ULX (1)
+#define INTEL_SUBPLATFORM_AML (2)
+
+/* CNL/ICL */
+#define INTEL_SUBPLATFORM_PORTF (0)
+
+enum intel_ppgtt_type {
INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE,
INTEL_PPGTT_ALIASING = I915_GEM_PPGTT_ALIASING,
INTEL_PPGTT_FULL = I915_GEM_PPGTT_FULL,
- INTEL_PPGTT_FULL_4LVL,
};
#define DEV_INFO_FOR_EACH_FLAG(func) \
@@ -150,19 +166,18 @@ struct sseu_dev_info {
u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES];
};
-typedef u8 intel_ring_mask_t;
-
struct intel_device_info {
u16 gen_mask;
u8 gen;
u8 gt; /* GT number, 0 if undefined */
- intel_ring_mask_t ring_mask; /* Rings supported by the HW */
+ intel_engine_mask_t engine_mask; /* Engines supported by the HW */
enum intel_platform platform;
- u32 platform_mask;
- enum intel_ppgtt ppgtt;
+ enum intel_ppgtt_type ppgtt_type;
+ unsigned int ppgtt_size; /* log2, e.g. 31/32/48 bits */
+
unsigned int page_sizes; /* page sizes supported by the HW */
u32 display_mmio_offset;
@@ -195,22 +210,28 @@ struct intel_device_info {
};
struct intel_runtime_info {
+ /*
+ * Platform mask is used for optimizing or-ed IS_PLATFORM calls into
+ * into single runtime conditionals, and also to provide groundwork
+ * for future per platform, or per SKU build optimizations.
+ *
+ * Array can be extended when necessary if the corresponding
+ * BUILD_BUG_ON is hit.
+ */
+ u32 platform_mask[2];
+
u16 device_id;
u8 num_sprites[I915_MAX_PIPES];
u8 num_scalers[I915_MAX_PIPES];
- u8 num_rings;
+ u8 num_engines;
/* Slice/subslice/EU info */
struct sseu_dev_info sseu;
u32 cs_timestamp_frequency_khz;
- /* Enabled (not fused off) media engine bitmasks. */
- u8 vdbox_enable;
- u8 vebox_enable;
-
/* Media engine access to SFC per instance */
u8 vdbox_sfc_access;
};
@@ -269,6 +290,7 @@ static inline void sseu_set_eus(struct sseu_dev_info *sseu,
const char *intel_platform_name(enum intel_platform platform);
+void intel_device_info_subplatform_init(struct drm_i915_private *dev_priv);
void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
void intel_device_info_dump_flags(const struct intel_device_info *info,
struct drm_printer *p);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 421aac80a838..3bd40a4a6739 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -46,19 +46,29 @@
#include "i915_drv.h"
#include "i915_gem_clflush.h"
+#include "i915_reset.h"
#include "i915_trace.h"
+#include "intel_atomic_plane.h"
+#include "intel_color.h"
+#include "intel_cdclk.h"
+#include "intel_crt.h"
+#include "intel_ddi.h"
+#include "intel_dp.h"
#include "intel_drv.h"
#include "intel_dsi.h"
+#include "intel_dvo.h"
+#include "intel_fbc.h"
+#include "intel_fbdev.h"
#include "intel_frontbuffer.h"
-
-#include "intel_drv.h"
-#include "intel_dsi.h"
-#include "intel_frontbuffer.h"
-
-#include "i915_drv.h"
-#include "i915_gem_clflush.h"
-#include "i915_reset.h"
-#include "i915_trace.h"
+#include "intel_hdcp.h"
+#include "intel_hdmi.h"
+#include "intel_lvds.h"
+#include "intel_pipe_crc.h"
+#include "intel_pm.h"
+#include "intel_psr.h"
+#include "intel_sdvo.h"
+#include "intel_sprite.h"
+#include "intel_tv.h"
/* Primary plane formats for gen <= 3 */
static const u32 i8xx_primary_formats[] = {
@@ -115,8 +125,8 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
static void chv_prepare_pll(struct intel_crtc *crtc,
const struct intel_crtc_state *pipe_config);
-static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
-static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
+static void intel_begin_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
+static void intel_finish_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
static void intel_crtc_init_scalers(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
@@ -467,10 +477,11 @@ static const struct intel_limit intel_limits_bxt = {
};
static void
-skl_wa_clkgate(struct drm_i915_private *dev_priv, int pipe, bool enable)
+skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable)
{
if (enable)
I915_WRITE(CLKGATE_DIS_PSL(pipe),
+ I915_READ(CLKGATE_DIS_PSL(pipe)) |
DUPS1_GATING_DIS | DUPS2_GATING_DIS);
else
I915_WRITE(CLKGATE_DIS_PSL(pipe),
@@ -595,7 +606,7 @@ i9xx_select_p2_div(const struct intel_limit *limit,
const struct intel_crtc_state *crtc_state,
int target)
{
- struct drm_device *dev = crtc_state->base.crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
/*
@@ -603,7 +614,7 @@ i9xx_select_p2_div(const struct intel_limit *limit,
* We haven't figured out how to reliably set up different
* single/dual channel state, if we even can.
*/
- if (intel_is_dual_link_lvds(dev))
+ if (intel_is_dual_link_lvds(dev_priv))
return limit->p2.p2_fast;
else
return limit->p2.p2_slow;
@@ -951,14 +962,15 @@ chv_find_best_dpll(const struct intel_limit *limit,
return found;
}
-bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
+bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
struct dpll *best_clock)
{
int refclk = 100000;
const struct intel_limit *limit = &intel_limits_bxt;
return chv_find_best_dpll(limit, crtc_state,
- target_clock, refclk, NULL, best_clock);
+ crtc_state->port_clock, refclk,
+ NULL, best_clock);
}
bool intel_crtc_active(struct intel_crtc *crtc)
@@ -1039,7 +1051,7 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
i915_reg_t reg = PIPECONF(cpu_transcoder);
/* Wait for the Pipe State to go off */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
reg, I965_PIPECONF_ACTIVE, 0,
100))
WARN(1, "pipe_off wait timed out\n");
@@ -1345,7 +1357,7 @@ static void _vlv_enable_pll(struct intel_crtc *crtc,
POSTING_READ(DPLL(pipe));
udelay(150);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
DPLL(pipe),
DPLL_LOCK_VLV,
DPLL_LOCK_VLV,
@@ -1398,7 +1410,7 @@ static void _chv_enable_pll(struct intel_crtc *crtc,
I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
/* Check PLL is locked */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1))
DRM_ERROR("PLL %d failed to lock\n", pipe);
@@ -1441,17 +1453,12 @@ static void chv_enable_pll(struct intel_crtc *crtc,
}
}
-static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv)
+static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
{
- struct intel_crtc *crtc;
- int count = 0;
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- count += crtc->base.state->active &&
- intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
- }
+ if (IS_I830(dev_priv))
+ return false;
- return count;
+ return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
}
static void i9xx_enable_pll(struct intel_crtc *crtc,
@@ -1465,29 +1472,15 @@ static void i9xx_enable_pll(struct intel_crtc *crtc,
assert_pipe_disabled(dev_priv, crtc->pipe);
/* PLL is protected by panel, make sure we can write it */
- if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv))
+ if (i9xx_has_pps(dev_priv))
assert_panel_unlocked(dev_priv, crtc->pipe);
- /* Enable DVO 2x clock on both PLLs if necessary */
- if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) {
- /*
- * It appears to be important that we don't enable this
- * for the current pipe before otherwise configuring the
- * PLL. No idea how this should be handled if multiple
- * DVO outputs are enabled simultaneosly.
- */
- dpll |= DPLL_DVO_2X_MODE;
- I915_WRITE(DPLL(!crtc->pipe),
- I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
- }
-
/*
* Apparently we need to have VGA mode enabled prior to changing
* the P1/P2 dividers. Otherwise the DPLL will keep using the old
* dividers, even though the register value does change.
*/
- I915_WRITE(reg, 0);
-
+ I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
I915_WRITE(reg, dpll);
/* Wait for the clocks to stabilize. */
@@ -1520,16 +1513,6 @@ static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- /* Disable DVO 2x clock on both PLLs if necessary */
- if (IS_I830(dev_priv) &&
- intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO) &&
- !intel_num_dvo_pipes(dev_priv)) {
- I915_WRITE(DPLL(PIPE_B),
- I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
- I915_WRITE(DPLL(PIPE_A),
- I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
- }
-
/* Don't disable pipe or pipe PLLs if needed */
if (IS_I830(dev_priv))
return;
@@ -1608,7 +1591,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
BUG();
}
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
dpll_reg, port_mask, expected_mask,
1000))
WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
@@ -1658,17 +1641,18 @@ static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_s
}
val &= ~TRANS_INTERLACE_MASK;
- if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
+ if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
if (HAS_PCH_IBX(dev_priv) &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
val |= TRANS_LEGACY_INTERLACED_ILK;
else
val |= TRANS_INTERLACED;
- else
+ } else {
val |= TRANS_PROGRESSIVE;
+ }
I915_WRITE(reg, val | TRANS_ENABLE);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
100))
DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
@@ -1698,7 +1682,7 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
val |= TRANS_PROGRESSIVE;
I915_WRITE(LPT_TRANSCONF, val);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
LPT_TRANSCONF,
TRANS_STATE_ENABLE,
TRANS_STATE_ENABLE,
@@ -1724,7 +1708,7 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
val &= ~TRANS_ENABLE;
I915_WRITE(reg, val);
/* wait for PCH transcoder off, transcoder state */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
reg, TRANS_STATE_ENABLE, 0,
50))
DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
@@ -1746,7 +1730,7 @@ void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
val &= ~TRANS_ENABLE;
I915_WRITE(LPT_TRANSCONF, val);
/* wait for PCH transcoder off, transcoder state */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
50))
DRM_ERROR("Failed to disable PCH transcoder\n");
@@ -1830,6 +1814,8 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
/* FIXME: assert CPU port conditions for SNB+ */
}
+ trace_intel_pipe_enable(dev_priv, pipe);
+
reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
if (val & PIPECONF_ENABLE) {
@@ -1869,6 +1855,8 @@ static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
*/
assert_planes_disabled(crtc);
+ trace_intel_pipe_disable(dev_priv, pipe);
+
reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
if ((val & PIPECONF_ENABLE) == 0)
@@ -2677,6 +2665,24 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
return DRM_FORMAT_RGB565;
case PLANE_CTL_FORMAT_NV12:
return DRM_FORMAT_NV12;
+ case PLANE_CTL_FORMAT_P010:
+ return DRM_FORMAT_P010;
+ case PLANE_CTL_FORMAT_P012:
+ return DRM_FORMAT_P012;
+ case PLANE_CTL_FORMAT_P016:
+ return DRM_FORMAT_P016;
+ case PLANE_CTL_FORMAT_Y210:
+ return DRM_FORMAT_Y210;
+ case PLANE_CTL_FORMAT_Y212:
+ return DRM_FORMAT_Y212;
+ case PLANE_CTL_FORMAT_Y216:
+ return DRM_FORMAT_Y216;
+ case PLANE_CTL_FORMAT_Y410:
+ return DRM_FORMAT_XVYU2101010;
+ case PLANE_CTL_FORMAT_Y412:
+ return DRM_FORMAT_XVYU12_16161616;
+ case PLANE_CTL_FORMAT_Y416:
+ return DRM_FORMAT_XVYU16161616;
default:
case PLANE_CTL_FORMAT_XRGB_8888:
if (rgb_order) {
@@ -2695,6 +2701,18 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
return DRM_FORMAT_XBGR2101010;
else
return DRM_FORMAT_XRGB2101010;
+ case PLANE_CTL_FORMAT_XRGB_16161616F:
+ if (rgb_order) {
+ if (alpha)
+ return DRM_FORMAT_ABGR16161616F;
+ else
+ return DRM_FORMAT_XBGR16161616F;
+ } else {
+ if (alpha)
+ return DRM_FORMAT_ARGB16161616F;
+ else
+ return DRM_FORMAT_XRGB16161616F;
+ }
}
}
@@ -2825,8 +2843,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
if (plane->id == PLANE_PRIMARY)
intel_pre_disable_primary_noatomic(&crtc->base);
- trace_intel_disable_plane(&plane->base, crtc);
- plane->disable_plane(plane, crtc_state);
+ intel_disable_plane(plane, crtc_state);
}
static void
@@ -3176,7 +3193,7 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
* Handle the AUX surface first since
* the main surface setup depends on it.
*/
- if (fb->format->format == DRM_FORMAT_NV12) {
+ if (is_planar_yuv_format(fb->format->format)) {
ret = skl_check_nv12_aux_surface(plane_state);
if (ret)
return ret;
@@ -3230,9 +3247,10 @@ static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dspcntr = 0;
- dspcntr |= DISPPLANE_GAMMA_ENABLE;
+ if (crtc_state->gamma_enable)
+ dspcntr |= DISPPLANE_GAMMA_ENABLE;
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ if (crtc_state->csc_enable)
dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
if (INTEL_GEN(dev_priv) < 5)
@@ -3459,7 +3477,7 @@ static void i9xx_disable_plane(struct intel_plane *plane,
*
* On pre-g4x there is no way to gamma correct the
* pipe bottom color but we'll keep on doing this
- * anyway.
+ * anyway so that the crtc state readout works correctly.
*/
dspcntr = i9xx_plane_ctl_crtc(crtc_state);
@@ -3590,6 +3608,12 @@ static u32 skl_plane_ctl_format(u32 pixel_format)
return PLANE_CTL_FORMAT_XRGB_2101010;
case DRM_FORMAT_XBGR2101010:
return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ return PLANE_CTL_FORMAT_XRGB_16161616F;
case DRM_FORMAT_YUYV:
return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
case DRM_FORMAT_YVYU:
@@ -3600,6 +3624,24 @@ static u32 skl_plane_ctl_format(u32 pixel_format)
return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
case DRM_FORMAT_NV12:
return PLANE_CTL_FORMAT_NV12;
+ case DRM_FORMAT_P010:
+ return PLANE_CTL_FORMAT_P010;
+ case DRM_FORMAT_P012:
+ return PLANE_CTL_FORMAT_P012;
+ case DRM_FORMAT_P016:
+ return PLANE_CTL_FORMAT_P016;
+ case DRM_FORMAT_Y210:
+ return PLANE_CTL_FORMAT_Y210;
+ case DRM_FORMAT_Y212:
+ return PLANE_CTL_FORMAT_Y212;
+ case DRM_FORMAT_Y216:
+ return PLANE_CTL_FORMAT_Y216;
+ case DRM_FORMAT_XVYU2101010:
+ return PLANE_CTL_FORMAT_Y410;
+ case DRM_FORMAT_XVYU12_16161616:
+ return PLANE_CTL_FORMAT_Y412;
+ case DRM_FORMAT_XVYU16161616:
+ return PLANE_CTL_FORMAT_Y416;
default:
MISSING_CASE(pixel_format);
}
@@ -3710,8 +3752,11 @@ u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
return plane_ctl;
- plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
- plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
+ if (crtc_state->gamma_enable)
+ plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
+
+ if (crtc_state->csc_enable)
+ plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
return plane_ctl;
}
@@ -3763,8 +3808,11 @@ u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
if (INTEL_GEN(dev_priv) >= 11)
return plane_color_ctl;
- plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
- plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
+ if (crtc_state->gamma_enable)
+ plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
+
+ if (crtc_state->csc_enable)
+ plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
return plane_color_ctl;
}
@@ -3772,6 +3820,8 @@ u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
+ struct drm_i915_private *dev_priv =
+ to_i915(plane_state->base.plane->dev);
const struct drm_framebuffer *fb = plane_state->base.fb;
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
u32 plane_color_ctl = 0;
@@ -3779,7 +3829,7 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
- if (fb->format->is_yuv && !icl_is_hdr_plane(plane)) {
+ if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
else
@@ -3921,9 +3971,6 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
* The display has been reset as well,
* so need a full re-initialization.
*/
- intel_runtime_pm_disable_interrupts(dev_priv);
- intel_runtime_pm_enable_interrupts(dev_priv);
-
intel_pps_unlock_regs_wa(dev_priv);
intel_modeset_init_hw(dev);
intel_init_clock_gating(dev_priv);
@@ -3963,13 +4010,13 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
* and rounding for per-pixel values 00 and 0xff
*/
tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
-
/*
- * W/A for underruns with linear/X-tiled with
- * WM1+ disabled.
+ * Display WA # 1605353570: icl
+ * Set the pixel rounding bit to 1 for allowing
+ * passthrough of Frame buffer pixels unmodified
+ * across pipe
*/
- tmp |= PM_FILL_MAINTAIN_DBUF_FULLNESS;
-
+ tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
I915_WRITE(PIPE_CHICKEN(pipe), tmp);
}
@@ -4008,16 +4055,6 @@ static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_sta
ironlake_pfit_disable(old_crtc_state);
}
- /*
- * We don't (yet) allow userspace to control the pipe background color,
- * so force it to black, but apply pipe gamma and CSC so that its
- * handling will match how we program our planes.
- */
- if (INTEL_GEN(dev_priv) >= 9)
- I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
- SKL_BOTTOM_COLOR_GAMMA_ENABLE |
- SKL_BOTTOM_COLOR_CSC_ENABLE);
-
if (INTEL_GEN(dev_priv) >= 11)
icl_set_pipe_chicken(crtc);
}
@@ -5036,19 +5073,19 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
return 0;
}
- if (format && format->format == DRM_FORMAT_NV12 &&
+ if (format && is_planar_yuv_format(format->format) &&
(src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
- DRM_DEBUG_KMS("NV12: src dimensions not met\n");
+ DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
return -EINVAL;
}
/* range checks */
if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
- (IS_GEN(dev_priv, 11) &&
+ (INTEL_GEN(dev_priv) >= 11 &&
(src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
- (!IS_GEN(dev_priv, 11) &&
+ (INTEL_GEN(dev_priv) < 11 &&
(src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
@@ -5105,14 +5142,15 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
{
struct intel_plane *intel_plane =
to_intel_plane(plane_state->base.plane);
+ struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
struct drm_framebuffer *fb = plane_state->base.fb;
int ret;
bool force_detach = !fb || !plane_state->base.visible;
bool need_scaler = false;
/* Pre-gen11 and SDR planes always need a scaler for planar formats. */
- if (!icl_is_hdr_plane(intel_plane) &&
- fb && fb->format->format == DRM_FORMAT_NV12)
+ if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
+ fb && is_planar_yuv_format(fb->format->format))
need_scaler = true;
ret = skl_update_scaler(crtc_state, force_detach,
@@ -5144,11 +5182,24 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_NV12:
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ case DRM_FORMAT_Y210:
+ case DRM_FORMAT_Y212:
+ case DRM_FORMAT_Y216:
+ case DRM_FORMAT_XVYU2101010:
+ case DRM_FORMAT_XVYU12_16161616:
+ case DRM_FORMAT_XVYU16161616:
break;
default:
DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
@@ -5259,7 +5310,7 @@ void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
* and don't wait for vblanks until the end of crtc_enable, then
* the HW state readout code will complain that the expected
* IPS_CTL value is not the one we read. */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
IPS_CTL, IPS_ENABLE, IPS_ENABLE,
50))
DRM_ERROR("Timed out waiting for IPS enable\n");
@@ -5284,7 +5335,7 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
* 42ms timeout value leads to occasional timeouts so use 100ms
* instead.
*/
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
IPS_CTL, IPS_ENABLE, 0,
100))
DRM_ERROR("Timed out waiting for IPS disable\n");
@@ -5490,7 +5541,7 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
/* Display WA 827 */
if (needs_nv12_wa(dev_priv, old_crtc_state) &&
!needs_nv12_wa(dev_priv, pipe_config)) {
- skl_wa_clkgate(dev_priv, crtc->pipe, false);
+ skl_wa_827(dev_priv, crtc->pipe, false);
}
}
@@ -5529,7 +5580,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
/* Display WA 827 */
if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
needs_nv12_wa(dev_priv, pipe_config)) {
- skl_wa_clkgate(dev_priv, crtc->pipe, true);
+ skl_wa_827(dev_priv, crtc->pipe, true);
}
/*
@@ -5603,7 +5654,7 @@ static void intel_crtc_disable_planes(struct intel_atomic_state *state,
!(update_mask & BIT(plane->id)))
continue;
- plane->disable_plane(plane, new_crtc_state);
+ intel_disable_plane(plane, new_crtc_state);
if (old_plane_state->base.visible)
fb_bits |= plane->frontbuffer_bit;
@@ -5754,6 +5805,14 @@ static void intel_encoders_update_pipe(struct drm_crtc *crtc,
}
}
+static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+
+ plane->disable_plane(plane, crtc_state);
+}
+
static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_atomic_state *old_state)
{
@@ -5819,6 +5878,8 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
*/
intel_color_load_luts(pipe_config);
intel_color_commit(pipe_config);
+ /* update DSPCNTR to configure gamma for pipe bottom color */
+ intel_disable_primary_plane(pipe_config);
if (dev_priv->display.initial_watermarks != NULL)
dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
@@ -5947,6 +6008,9 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
*/
intel_color_load_luts(pipe_config);
intel_color_commit(pipe_config);
+ /* update DSPCNTR to configure gamma/csc for pipe bottom color */
+ if (INTEL_GEN(dev_priv) < 9)
+ intel_disable_primary_plane(pipe_config);
if (INTEL_GEN(dev_priv) >= 11)
icl_set_pipe_chicken(intel_crtc);
@@ -6127,7 +6191,10 @@ bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
if (port == PORT_NONE)
return false;
- if (IS_ICELAKE(dev_priv))
+ if (IS_ELKHARTLAKE(dev_priv))
+ return port <= PORT_C;
+
+ if (INTEL_GEN(dev_priv) >= 11)
return port <= PORT_B;
return false;
@@ -6135,7 +6202,7 @@ bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
{
- if (IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
return port >= PORT_C && port <= PORT_F;
return false;
@@ -6304,6 +6371,8 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
intel_color_load_luts(pipe_config);
intel_color_commit(pipe_config);
+ /* update DSPCNTR to configure gamma for pipe bottom color */
+ intel_disable_primary_plane(pipe_config);
dev_priv->display.initial_watermarks(old_intel_state,
pipe_config);
@@ -6361,6 +6430,8 @@ static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
intel_color_load_luts(pipe_config);
intel_color_commit(pipe_config);
+ /* update DSPCNTR to configure gamma for pipe bottom color */
+ intel_disable_primary_plane(pipe_config);
if (dev_priv->display.initial_watermarks != NULL)
dev_priv->display.initial_watermarks(old_intel_state,
@@ -6743,7 +6814,13 @@ static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
if (!hsw_crtc_state_ips_capable(crtc_state))
return false;
- if (crtc_state->ips_force_disable)
+ /*
+ * When IPS gets enabled, the pipe CRC changes. Since IPS gets
+ * enabled and disabled dynamically based on package C states,
+ * user space can't make reliable use of the CRCs, so let's just
+ * completely disable it.
+ */
+ if (crtc_state->crc_enabled)
return false;
/* IPS should be fine as long as at least one plane is enabled. */
@@ -6818,8 +6895,7 @@ static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
static int intel_crtc_compute_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
int clock_limit = dev_priv->max_dotclk_freq;
@@ -6869,7 +6945,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
}
if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
- intel_is_dual_link_lvds(dev)) {
+ intel_is_dual_link_lvds(dev_priv)) {
DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
return -EINVAL;
}
@@ -7486,7 +7562,19 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
dpll |= PLL_P2_DIVIDE_BY_4;
}
- if (!IS_I830(dev_priv) &&
+ /*
+ * Bspec:
+ * "[Almador Errata}: For the correct operation of the muxed DVO pins
+ * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
+ * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
+ * Enable) must be set to “1” in both the DPLL A Control Register
+ * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
+ *
+ * For simplicity We simply keep both bits always enabled in
+ * both DPLLS. The spec says we should disable the DVO 2X clock
+ * when not needed, but this seems to work fine in practice.
+ */
+ if (IS_I830(dev_priv) ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
dpll |= DPLL_DVO_2X_MODE;
@@ -7694,13 +7782,16 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
else
pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
- } else
+ } else {
pipeconf |= PIPECONF_PROGRESSIVE;
+ }
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
crtc_state->limited_color_range)
pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
+ pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+
I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
POSTING_READ(PIPECONF(crtc->pipe));
}
@@ -7744,8 +7835,7 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct intel_limit *limit;
int refclk = 96000;
@@ -7758,7 +7848,7 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
}
- if (intel_is_dual_link_lvds(dev))
+ if (intel_is_dual_link_lvds(dev_priv))
limit = &intel_limits_g4x_dual_channel_lvds;
else
limit = &intel_limits_g4x_single_channel_lvds;
@@ -7894,14 +7984,22 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
return 0;
}
+static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
+{
+ if (IS_I830(dev_priv))
+ return false;
+
+ return INTEL_GEN(dev_priv) >= 4 ||
+ IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
+}
+
static void i9xx_get_pfit_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 tmp;
- if (INTEL_GEN(dev_priv) <= 3 &&
- (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
+ if (!i9xx_has_pfit(dev_priv))
return;
tmp = I915_READ(PFIT_CONTROL);
@@ -8108,6 +8206,24 @@ static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
pipe_config->output_format = output;
}
+static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
+ u32 tmp;
+
+ tmp = I915_READ(DSPCNTR(i9xx_plane));
+
+ if (tmp & DISPPLANE_GAMMA_ENABLE)
+ crtc_state->gamma_enable = true;
+
+ if (!HAS_GMCH(dev_priv) &&
+ tmp & DISPPLANE_PIPE_CSC_ENABLE)
+ crtc_state->csc_enable = true;
+}
+
static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
@@ -8153,6 +8269,14 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
(tmp & PIPECONF_COLOR_RANGE_SELECT))
pipe_config->limited_color_range = true;
+ pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
+ PIPECONF_GAMMA_MODE_SHIFT;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
+
+ i9xx_get_pipe_color_config(pipe_config);
+
if (INTEL_GEN(dev_priv) < 4)
pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
@@ -8185,14 +8309,6 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
}
pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
- /*
- * DPLL_DVO_2X_MODE must be enabled for both DPLLs
- * on 830. Filter it out here so that we don't
- * report errors due to that.
- */
- if (IS_I830(dev_priv))
- pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
-
pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
} else {
@@ -8692,6 +8808,8 @@ static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
if (crtc_state->limited_color_range)
val |= PIPECONF_COLOR_RANGE_SELECT;
+ val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
+
I915_WRITE(PIPECONF(pipe), val);
POSTING_READ(PIPECONF(pipe));
}
@@ -8772,13 +8890,11 @@ static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
}
-static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
+static void ironlake_compute_dpll(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
struct dpll *reduced_clock)
{
- struct drm_crtc *crtc = &intel_crtc->base;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll, fp, fp2;
int factor;
@@ -8787,10 +8903,12 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if ((intel_panel_use_ssc(dev_priv) &&
dev_priv->vbt.lvds_ssc_freq == 100000) ||
- (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev)))
+ (HAS_PCH_IBX(dev_priv) &&
+ intel_is_dual_link_lvds(dev_priv)))
factor = 25;
- } else if (crtc_state->sdvo_tv_clock)
+ } else if (crtc_state->sdvo_tv_clock) {
factor = 20;
+ }
fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
@@ -8877,8 +8995,7 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct intel_limit *limit;
int refclk = 120000;
@@ -8896,7 +9013,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
refclk = dev_priv->vbt.lvds_ssc_freq;
}
- if (intel_is_dual_link_lvds(dev)) {
+ if (intel_is_dual_link_lvds(dev_priv)) {
if (refclk == 100000)
limit = &intel_limits_ironlake_dual_lvds_100m;
else
@@ -8920,7 +9037,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
ironlake_compute_dpll(crtc, crtc_state, NULL);
- if (!intel_get_shared_dpll(crtc, crtc_state, NULL)) {
+ if (!intel_get_shared_dpll(crtc_state, NULL)) {
DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
pipe_name(crtc->pipe));
return -EINVAL;
@@ -9226,6 +9343,13 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
if (tmp & PIPECONF_COLOR_RANGE_SELECT)
pipe_config->limited_color_range = true;
+ pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
+ PIPECONF_GAMMA_MODE_SHIFT;
+
+ pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
+
+ i9xx_get_pipe_color_config(pipe_config);
+
if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
struct intel_shared_dpll *pll;
enum intel_dpll_id pll_id;
@@ -9371,7 +9495,8 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
I915_WRITE(LCPLL_CTL, val);
POSTING_READ(LCPLL_CTL);
- if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
+ if (intel_wait_for_register(&dev_priv->uncore,
+ LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
DRM_ERROR("LCPLL still locked\n");
val = hsw_read_dcomp(dev_priv);
@@ -9409,7 +9534,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
* Make sure we're not on PC8 state before disabling PC8, otherwise
* we'll hang the machine. To prevent PC8 state, just enable force_wake.
*/
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -9426,7 +9551,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
val &= ~LCPLL_PLL_DISABLE;
I915_WRITE(LCPLL_CTL, val);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
5))
DRM_ERROR("LCPLL not locked yet\n");
@@ -9441,7 +9566,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
DRM_ERROR("Switching back to LCPLL failed\n");
}
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
intel_update_cdclk(dev_priv);
intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
@@ -9510,11 +9635,11 @@ static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
to_intel_atomic_state(crtc_state->base.state);
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
- IS_ICELAKE(dev_priv)) {
+ INTEL_GEN(dev_priv) >= 11) {
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
- if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) {
+ if (!intel_get_shared_dpll(crtc_state, encoder)) {
DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
pipe_name(crtc->pipe));
return -EINVAL;
@@ -9552,9 +9677,6 @@ static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
temp = I915_READ(DPCLKA_CFGCR0_ICL) &
DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
-
- if (WARN_ON(!intel_dpll_is_combophy(id)))
- return;
} else if (intel_port_is_tc(dev_priv, port)) {
id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, port));
} else {
@@ -9643,20 +9765,25 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
- u64 *power_domain_mask)
+ u64 *power_domain_mask,
+ intel_wakeref_t *wakerefs)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
- unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
+ unsigned long panel_transcoder_mask = 0;
unsigned long enabled_panel_transcoders = 0;
enum transcoder panel_transcoder;
+ intel_wakeref_t wf;
u32 tmp;
- if (IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
panel_transcoder_mask |=
BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
+ if (HAS_TRANSCODER_EDP(dev_priv))
+ panel_transcoder_mask |= BIT(TRANSCODER_EDP);
+
/*
* The pipe->transcoder mapping is fixed with the exception of the eDP
* and DSI transcoders handled below.
@@ -9713,10 +9840,13 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
enabled_panel_transcoders != BIT(TRANSCODER_EDP));
power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
+
+ wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wf)
return false;
- WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
+ wakerefs[power_domain] = wf;
*power_domain_mask |= BIT_ULL(power_domain);
tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
@@ -9726,13 +9856,15 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
- u64 *power_domain_mask)
+ u64 *power_domain_mask,
+ intel_wakeref_t *wakerefs)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum intel_display_power_domain power_domain;
- enum port port;
enum transcoder cpu_transcoder;
+ intel_wakeref_t wf;
+ enum port port;
u32 tmp;
for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
@@ -9742,10 +9874,13 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
cpu_transcoder = TRANSCODER_DSI_C;
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
+
+ wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wf)
continue;
- WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
+ wakerefs[power_domain] = wf;
*power_domain_mask |= BIT_ULL(power_domain);
/*
@@ -9786,7 +9921,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
- if (IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
icelake_get_ddi_pll(dev_priv, port, pipe_config);
else if (IS_CANNONLAKE(dev_priv))
cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
@@ -9824,6 +9959,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
enum intel_display_power_domain power_domain;
u64 power_domain_mask;
bool active;
@@ -9831,16 +9967,21 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
intel_crtc_init_scalers(crtc, pipe_config);
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (!wf)
return false;
+
+ wakerefs[power_domain] = wf;
power_domain_mask = BIT_ULL(power_domain);
pipe_config->shared_dpll = NULL;
- active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
+ active = hsw_get_transcoder_state(crtc, pipe_config,
+ &power_domain_mask, wakerefs);
if (IS_GEN9_LP(dev_priv) &&
- bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
+ bxt_get_dsi_transcoder_state(crtc, pipe_config,
+ &power_domain_mask, wakerefs)) {
WARN_ON(active);
active = true;
}
@@ -9849,7 +9990,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
goto out;
if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
- IS_ICELAKE(dev_priv)) {
+ INTEL_GEN(dev_priv) >= 11) {
haswell_get_ddi_port_state(crtc, pipe_config);
intel_get_pipe_timings(crtc, pipe_config);
}
@@ -9857,12 +9998,28 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
intel_get_pipe_src_size(crtc, pipe_config);
intel_get_crtc_ycbcr_config(crtc, pipe_config);
- pipe_config->gamma_mode =
- I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
+ pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
+
+ pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
+
+ if (INTEL_GEN(dev_priv) >= 9) {
+ u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
+
+ if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
+ pipe_config->gamma_enable = true;
+
+ if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
+ pipe_config->csc_enable = true;
+ } else {
+ i9xx_get_pipe_color_config(pipe_config);
+ }
power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
- if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
- WARN_ON(power_domain_mask & BIT_ULL(power_domain));
+ WARN_ON(power_domain_mask & BIT_ULL(power_domain));
+
+ wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
+ if (wf) {
+ wakerefs[power_domain] = wf;
power_domain_mask |= BIT_ULL(power_domain);
if (INTEL_GEN(dev_priv) >= 9)
@@ -9894,7 +10051,8 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
out:
for_each_power_domain(power_domain, power_domain_mask)
- intel_display_power_put_unchecked(dev_priv, power_domain);
+ intel_display_power_put(dev_priv,
+ power_domain, wakerefs[power_domain]);
return active;
}
@@ -10030,7 +10188,12 @@ i845_cursor_max_stride(struct intel_plane *plane,
static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
- return CURSOR_GAMMA_ENABLE;
+ u32 cntl = 0;
+
+ if (crtc_state->gamma_enable)
+ cntl |= CURSOR_GAMMA_ENABLE;
+
+ return cntl;
}
static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
@@ -10184,9 +10347,10 @@ static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
if (INTEL_GEN(dev_priv) >= 11)
return cntl;
- cntl |= MCURSOR_GAMMA_ENABLE;
+ if (crtc_state->gamma_enable)
+ cntl = MCURSOR_GAMMA_ENABLE;
- if (HAS_DDI(dev_priv))
+ if (crtc_state->csc_enable)
cntl |= MCURSOR_PIPE_CSC_ENABLE;
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
@@ -11134,7 +11298,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
}
if (!linked_state) {
- DRM_DEBUG_KMS("Need %d free Y planes for NV12\n",
+ DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
hweight8(crtc_state->nv12_planes));
return -EINVAL;
@@ -11175,16 +11339,11 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
return ret;
}
- if (mode_changed || crtc_state->color_mgmt_changed) {
+ if (mode_changed || pipe_config->update_pipe ||
+ crtc_state->color_mgmt_changed) {
ret = intel_color_check(pipe_config);
if (ret)
return ret;
-
- /*
- * Changing color management on Intel hardware is
- * handled as part of planes update.
- */
- crtc_state->planes_changed = true;
}
ret = 0;
@@ -11355,6 +11514,16 @@ intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id,
m_n->link_m, m_n->link_n, m_n->tu);
}
+static void
+intel_dump_infoframe(struct drm_i915_private *dev_priv,
+ const union hdmi_infoframe *frame)
+{
+ if ((drm_debug & DRM_UT_KMS) == 0)
+ return;
+
+ hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
+}
+
#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
static const char * const output_type_str[] = {
@@ -11458,6 +11627,22 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
pipe_config->has_audio, pipe_config->has_infoframe);
+ DRM_DEBUG_KMS("infoframes enabled: 0x%x\n",
+ pipe_config->infoframes.enable);
+
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
+ DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
+ intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
+ intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
+ if (pipe_config->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
+ intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
+
DRM_DEBUG_KMS("requested mode:\n");
drm_mode_debug_printmodeline(&pipe_config->base.mode);
DRM_DEBUG_KMS("adjusted mode:\n");
@@ -11606,7 +11791,7 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
saved_state->shared_dpll = crtc_state->shared_dpll;
saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
saved_state->pch_pfit.force_thru = crtc_state->pch_pfit.force_thru;
- saved_state->ips_force_disable = crtc_state->ips_force_disable;
+ saved_state->crc_enabled = crtc_state->crc_enabled;
if (IS_G4X(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
saved_state->wm = crtc_state->wm;
@@ -11825,6 +12010,37 @@ intel_compare_link_m_n(const struct intel_link_m_n *m_n,
return false;
}
+static bool
+intel_compare_infoframe(const union hdmi_infoframe *a,
+ const union hdmi_infoframe *b)
+{
+ return memcmp(a, b, sizeof(*a)) == 0;
+}
+
+static void
+pipe_config_infoframe_err(struct drm_i915_private *dev_priv,
+ bool adjust, const char *name,
+ const union hdmi_infoframe *a,
+ const union hdmi_infoframe *b)
+{
+ if (adjust) {
+ if ((drm_debug & DRM_UT_KMS) == 0)
+ return;
+
+ drm_dbg(DRM_UT_KMS, "mismatch in %s infoframe", name);
+ drm_dbg(DRM_UT_KMS, "expected:");
+ hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
+ drm_dbg(DRM_UT_KMS, "found");
+ hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
+ } else {
+ drm_err("mismatch in %s infoframe", name);
+ drm_err("expected:");
+ hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
+ drm_err("found");
+ hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
+ }
+}
+
static void __printf(3, 4)
pipe_config_err(bool adjust, const char *name, const char *format, ...)
{
@@ -12008,7 +12224,17 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
} \
} while (0)
-#define PIPE_CONF_QUIRK(quirk) \
+#define PIPE_CONF_CHECK_INFOFRAME(name) do { \
+ if (!intel_compare_infoframe(&current_config->infoframes.name, \
+ &pipe_config->infoframes.name)) { \
+ pipe_config_infoframe_err(dev_priv, adjust, __stringify(name), \
+ &current_config->infoframes.name, \
+ &pipe_config->infoframes.name); \
+ ret = false; \
+ } \
+} while (0)
+
+#define PIPE_CONF_QUIRK(quirk) \
((current_config->quirks | pipe_config->quirks) & (quirk))
PIPE_CONF_CHECK_I(cpu_transcoder);
@@ -12089,6 +12315,14 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_I(scaler_state.scaler_id);
PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
+
+ PIPE_CONF_CHECK_X(gamma_mode);
+ if (IS_CHERRYVIEW(dev_priv))
+ PIPE_CONF_CHECK_X(cgm_mode);
+ else
+ PIPE_CONF_CHECK_X(csc_mode);
+ PIPE_CONF_CHECK_BOOL(gamma_enable);
+ PIPE_CONF_CHECK_BOOL(csc_enable);
}
PIPE_CONF_CHECK_BOOL(double_wide);
@@ -12137,6 +12371,12 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
PIPE_CONF_CHECK_I(min_voltage_level);
+ PIPE_CONF_CHECK_X(infoframes.enable);
+ PIPE_CONF_CHECK_X(infoframes.gcp);
+ PIPE_CONF_CHECK_INFOFRAME(avi);
+ PIPE_CONF_CHECK_INFOFRAME(spd);
+ PIPE_CONF_CHECK_INFOFRAME(hdmi);
+
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_BOOL
@@ -12171,12 +12411,15 @@ static void verify_wm_state(struct drm_crtc *crtc,
struct drm_crtc_state *new_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct skl_ddb_allocation hw_ddb, *sw_ddb;
- struct skl_pipe_wm hw_wm, *sw_wm;
- struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
+ struct skl_hw_state {
+ struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
+ struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
+ struct skl_ddb_allocation ddb;
+ struct skl_pipe_wm wm;
+ } *hw;
+ struct skl_ddb_allocation *sw_ddb;
+ struct skl_pipe_wm *sw_wm;
struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
- struct skl_ddb_entry hw_ddb_y[I915_MAX_PLANES];
- struct skl_ddb_entry hw_ddb_uv[I915_MAX_PLANES];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
const enum pipe pipe = intel_crtc->pipe;
int plane, level, max_level = ilk_wm_max_level(dev_priv);
@@ -12184,22 +12427,29 @@ static void verify_wm_state(struct drm_crtc *crtc,
if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
return;
- skl_pipe_wm_get_hw_state(intel_crtc, &hw_wm);
+ hw = kzalloc(sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ return;
+
+ skl_pipe_wm_get_hw_state(intel_crtc, &hw->wm);
sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
- skl_pipe_ddb_get_hw_state(intel_crtc, hw_ddb_y, hw_ddb_uv);
+ skl_pipe_ddb_get_hw_state(intel_crtc, hw->ddb_y, hw->ddb_uv);
- skl_ddb_get_hw_state(dev_priv, &hw_ddb);
+ skl_ddb_get_hw_state(dev_priv, &hw->ddb);
sw_ddb = &dev_priv->wm.skl_hw.ddb;
- if (INTEL_GEN(dev_priv) >= 11)
- if (hw_ddb.enabled_slices != sw_ddb->enabled_slices)
- DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
- sw_ddb->enabled_slices,
- hw_ddb.enabled_slices);
+ if (INTEL_GEN(dev_priv) >= 11 &&
+ hw->ddb.enabled_slices != sw_ddb->enabled_slices)
+ DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
+ sw_ddb->enabled_slices,
+ hw->ddb.enabled_slices);
+
/* planes */
for_each_universal_plane(dev_priv, pipe, plane) {
- hw_plane_wm = &hw_wm.planes[plane];
+ struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
+
+ hw_plane_wm = &hw->wm.planes[plane];
sw_plane_wm = &sw_wm->planes[plane];
/* Watermarks */
@@ -12231,7 +12481,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
}
/* DDB */
- hw_ddb_entry = &hw_ddb_y[plane];
+ hw_ddb_entry = &hw->ddb_y[plane];
sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
@@ -12249,7 +12499,9 @@ static void verify_wm_state(struct drm_crtc *crtc,
* once the plane becomes visible, we can skip this check
*/
if (1) {
- hw_plane_wm = &hw_wm.planes[PLANE_CURSOR];
+ struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
+
+ hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
/* Watermarks */
@@ -12281,7 +12533,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
}
/* DDB */
- hw_ddb_entry = &hw_ddb_y[PLANE_CURSOR];
+ hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
@@ -12291,6 +12543,8 @@ static void verify_wm_state(struct drm_crtc *crtc,
hw_ddb_entry->start, hw_ddb_entry->end);
}
}
+
+ kfree(hw);
}
static void
@@ -12447,7 +12701,8 @@ intel_verify_planes(struct intel_atomic_state *state)
for_each_new_intel_plane_in_state(state, plane,
plane_state, i)
- assert_plane(plane, plane_state->base.visible);
+ assert_plane(plane, plane_state->slave ||
+ plane_state->base.visible);
}
static void
@@ -12769,10 +13024,16 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
return -EINVAL;
}
+ /* keep the current setting */
+ if (!intel_state->cdclk.force_min_cdclk_changed)
+ intel_state->cdclk.force_min_cdclk =
+ dev_priv->cdclk.force_min_cdclk;
+
intel_state->modeset = true;
intel_state->active_crtcs = dev_priv->active_crtcs;
intel_state->cdclk.logical = dev_priv->cdclk.logical;
intel_state->cdclk.actual = dev_priv->cdclk.actual;
+ intel_state->cdclk.pipe = INVALID_PIPE;
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (new_crtc_state->active)
@@ -12792,6 +13053,8 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
* adjusted_mode bits in the crtc directly.
*/
if (dev_priv->display.modeset_calc_cdclk) {
+ enum pipe pipe;
+
ret = dev_priv->display.modeset_calc_cdclk(state);
if (ret < 0)
return ret;
@@ -12808,12 +13071,36 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
return ret;
}
+ if (is_power_of_2(intel_state->active_crtcs)) {
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+
+ pipe = ilog2(intel_state->active_crtcs);
+ crtc = &intel_get_crtc_for_pipe(dev_priv, pipe)->base;
+ crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ if (crtc_state && needs_modeset(crtc_state))
+ pipe = INVALID_PIPE;
+ } else {
+ pipe = INVALID_PIPE;
+ }
+
/* All pipes must be switched off while we change the cdclk. */
- if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
- &intel_state->cdclk.actual)) {
+ if (pipe != INVALID_PIPE &&
+ intel_cdclk_needs_cd2x_update(dev_priv,
+ &dev_priv->cdclk.actual,
+ &intel_state->cdclk.actual)) {
+ ret = intel_lock_all_pipes(state);
+ if (ret < 0)
+ return ret;
+
+ intel_state->cdclk.pipe = pipe;
+ } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
+ &intel_state->cdclk.actual)) {
ret = intel_modeset_all_pipes(state);
if (ret < 0)
return ret;
+
+ intel_state->cdclk.pipe = INVALID_PIPE;
}
DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
@@ -12822,8 +13109,6 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
intel_state->cdclk.logical.voltage_level,
intel_state->cdclk.actual.voltage_level);
- } else {
- to_intel_atomic_state(state)->cdclk.logical = dev_priv->cdclk.logical;
}
intel_modeset_clear_plls(state);
@@ -12864,7 +13149,7 @@ static int intel_atomic_check(struct drm_device *dev,
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *crtc_state;
int ret, i;
- bool any_ms = false;
+ bool any_ms = intel_state->cdclk.force_min_cdclk_changed;
/* Catch I915_MODE_FLAG_INHERITED */
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
@@ -12989,14 +13274,14 @@ static void intel_update_crtc(struct drm_crtc *crtc,
else if (new_plane_state)
intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
- intel_begin_crtc_commit(crtc, old_crtc_state);
+ intel_begin_crtc_commit(to_intel_atomic_state(state), intel_crtc);
if (INTEL_GEN(dev_priv) >= 9)
skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
else
i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
- intel_finish_crtc_commit(crtc, old_crtc_state);
+ intel_finish_crtc_commit(to_intel_atomic_state(state), intel_crtc);
}
static void intel_update_crtcs(struct drm_atomic_state *state)
@@ -13224,7 +13509,10 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
if (intel_state->modeset) {
drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
- intel_set_cdclk(dev_priv, &dev_priv->cdclk.actual);
+ intel_set_cdclk_pre_plane_update(dev_priv,
+ &intel_state->cdclk.actual,
+ &dev_priv->cdclk.actual,
+ intel_state->cdclk.pipe);
/*
* SKL workaround: bspec recommends we disable the SAGV when we
@@ -13253,6 +13541,12 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
dev_priv->display.update_crtcs(state);
+ if (intel_state->modeset)
+ intel_set_cdclk_post_plane_update(dev_priv,
+ &intel_state->cdclk.actual,
+ &dev_priv->cdclk.actual,
+ intel_state->cdclk.pipe);
+
/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
* already, but still need the state for the delayed optimization. To
* fix this:
@@ -13313,7 +13607,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
* so enable debugging for the next modeset - and hope we catch
* the culprit.
*/
- intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
+ intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
}
@@ -13454,8 +13748,10 @@ static int intel_atomic_commit(struct drm_device *dev,
intel_state->min_voltage_level,
sizeof(intel_state->min_voltage_level));
dev_priv->active_crtcs = intel_state->active_crtcs;
- dev_priv->cdclk.logical = intel_state->cdclk.logical;
- dev_priv->cdclk.actual = intel_state->cdclk.actual;
+ dev_priv->cdclk.force_min_cdclk =
+ intel_state->cdclk.force_min_cdclk;
+
+ intel_cdclk_swap_state(intel_state);
}
drm_atomic_state_get(state);
@@ -13506,7 +13802,7 @@ static int do_rps_boost(struct wait_queue_entry *_wait,
* vblank without our intervention, so leave RPS alone.
*/
if (!i915_request_started(rq))
- gen6_rps_boost(rq, NULL);
+ gen6_rps_boost(rq);
i915_request_put(rq);
drm_crtc_vblank_put(wait->crtc);
@@ -13767,7 +14063,7 @@ skl_max_scale(const struct intel_crtc_state *crtc_state,
* or
* cdclk/crtc_clock
*/
- mult = pixel_format == DRM_FORMAT_NV12 ? 2 : 3;
+ mult = is_planar_yuv_format(pixel_format) ? 2 : 3;
tmpclk1 = (1 << 16) * mult - 1;
tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
max_scale = min(tmpclk1, tmpclk2);
@@ -13775,39 +14071,35 @@ skl_max_scale(const struct intel_crtc_state *crtc_state,
return max_scale;
}
-static void intel_begin_crtc_commit(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+static void intel_begin_crtc_commit(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *old_intel_cstate =
- to_intel_crtc_state(old_crtc_state);
- struct intel_atomic_state *old_intel_state =
- to_intel_atomic_state(old_crtc_state->state);
- struct intel_crtc_state *intel_cstate =
- intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
- bool modeset = needs_modeset(&intel_cstate->base);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ bool modeset = needs_modeset(&new_crtc_state->base);
/* Perform vblank evasion around commit operation */
- intel_pipe_update_start(intel_cstate);
+ intel_pipe_update_start(new_crtc_state);
if (modeset)
goto out;
- if (intel_cstate->base.color_mgmt_changed ||
- intel_cstate->update_pipe)
- intel_color_commit(intel_cstate);
+ if (new_crtc_state->base.color_mgmt_changed ||
+ new_crtc_state->update_pipe)
+ intel_color_commit(new_crtc_state);
- if (intel_cstate->update_pipe)
- intel_update_pipe_config(old_intel_cstate, intel_cstate);
+ if (new_crtc_state->update_pipe)
+ intel_update_pipe_config(old_crtc_state, new_crtc_state);
else if (INTEL_GEN(dev_priv) >= 9)
- skl_detach_scalers(intel_cstate);
+ skl_detach_scalers(new_crtc_state);
out:
if (dev_priv->display.atomic_update_watermarks)
- dev_priv->display.atomic_update_watermarks(old_intel_state,
- intel_cstate);
+ dev_priv->display.atomic_update_watermarks(state,
+ new_crtc_state);
}
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
@@ -13826,21 +14118,20 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
}
}
-static void intel_finish_crtc_commit(struct drm_crtc *crtc,
- struct drm_crtc_state *old_crtc_state)
+static void intel_finish_crtc_commit(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_atomic_state *old_intel_state =
- to_intel_atomic_state(old_crtc_state->state);
+ struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
struct intel_crtc_state *new_crtc_state =
- intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc);
+ intel_atomic_get_new_crtc_state(state, crtc);
intel_pipe_update_end(new_crtc_state);
if (new_crtc_state->update_pipe &&
!needs_modeset(&new_crtc_state->base) &&
- old_crtc_state->mode.private_flags & I915_MODE_FLAG_INHERITED)
- intel_crtc_arm_fifo_underrun(intel_crtc, new_crtc_state);
+ old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
+ intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
}
/**
@@ -14039,14 +14330,11 @@ intel_legacy_cursor_update(struct drm_plane *plane,
*/
crtc_state->active_planes = new_crtc_state->active_planes;
- if (plane->state->visible) {
- trace_intel_update_plane(plane, to_intel_crtc(crtc));
- intel_plane->update_plane(intel_plane, crtc_state,
- to_intel_plane_state(plane->state));
- } else {
- trace_intel_disable_plane(plane, to_intel_crtc(crtc));
- intel_plane->disable_plane(intel_plane, crtc_state);
- }
+ if (plane->state->visible)
+ intel_update_plane(intel_plane, crtc_state,
+ to_intel_plane_state(plane->state));
+ else
+ intel_disable_plane(intel_plane, crtc_state);
intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
@@ -14496,7 +14784,12 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
- if (IS_ICELAKE(dev_priv)) {
+ if (IS_ELKHARTLAKE(dev_priv)) {
+ intel_ddi_init(dev_priv, PORT_A);
+ intel_ddi_init(dev_priv, PORT_B);
+ intel_ddi_init(dev_priv, PORT_C);
+ icl_dsi_init(dev_priv);
+ } else if (INTEL_GEN(dev_priv) >= 11) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_B);
intel_ddi_init(dev_priv, PORT_C);
@@ -15397,6 +15690,8 @@ int intel_modeset_init(struct drm_device *dev)
intel_update_czclk(dev_priv);
intel_modeset_init_hw(dev);
+ intel_hdcp_component_init(dev_priv);
+
if (dev_priv->max_cdclk_freq == 0)
intel_update_max_cdclk(dev_priv);
@@ -15472,7 +15767,7 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
pipe_name(pipe), clock.vco, clock.dot);
fp = i9xx_dpll_compute_fp(&clock);
- dpll = (I915_READ(DPLL(pipe)) & DPLL_DVO_2X_MODE) |
+ dpll = DPLL_DVO_2X_MODE |
DPLL_VGA_MODE_DIS |
((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
PLL_P2_DIVIDE_BY_4 |
@@ -16254,6 +16549,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
/* flush any delayed tasks or pending work */
flush_scheduled_work();
+ intel_hdcp_component_fini(dev_priv);
+
drm_mode_config_cleanup(dev);
intel_overlay_cleanup(dev_priv);
@@ -16300,8 +16597,6 @@ struct intel_display_error_state {
u32 power_well_driver;
- int num_transcoders;
-
struct intel_cursor_error_state {
u32 control;
u32 position;
@@ -16326,6 +16621,7 @@ struct intel_display_error_state {
} plane[I915_MAX_PIPES];
struct intel_transcoder_error_state {
+ bool available;
bool power_domain_on;
enum transcoder cpu_transcoder;
@@ -16352,6 +16648,8 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
};
int i;
+ BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
+
if (!HAS_DISPLAY(dev_priv))
return NULL;
@@ -16392,14 +16690,13 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
error->pipe[i].stat = I915_READ(PIPESTAT(i));
}
- /* Note: this does not include DSI transcoders. */
- error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
- if (HAS_DDI(dev_priv))
- error->num_transcoders++; /* Account for eDP. */
-
- for (i = 0; i < error->num_transcoders; i++) {
+ for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
enum transcoder cpu_transcoder = transcoders[i];
+ if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
+ continue;
+
+ error->transcoder[i].available = true;
error->transcoder[i].power_domain_on =
__intel_display_power_is_enabled(dev_priv,
POWER_DOMAIN_TRANSCODER(cpu_transcoder));
@@ -16463,7 +16760,10 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, " BASE: %08x\n", error->cursor[i].base);
}
- for (i = 0; i < error->num_transcoders; i++) {
+ for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
+ if (!error->transcoder[i].available)
+ continue;
+
err_printf(m, "CPU transcoder: %s\n",
transcoder_name(error->transcoder[i].cpu_transcoder));
err_printf(m, " Power: %s\n",
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 48da4a969a0a..560274d1c50b 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -25,22 +25,34 @@
*
*/
-#include <linux/i2c.h>
-#include <linux/slab.h>
#include <linux/export.h>
-#include <linux/types.h>
+#include <linux/i2c.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/types.h>
#include <asm/byteorder.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_hdcp.h>
#include <drm/drm_probe_helper.h>
-#include "intel_drv.h"
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
+#include "intel_audio.h"
+#include "intel_connector.h"
+#include "intel_ddi.h"
+#include "intel_dp.h"
+#include "intel_drv.h"
+#include "intel_hdcp.h"
+#include "intel_hdmi.h"
+#include "intel_lspcon.h"
+#include "intel_lvds.h"
+#include "intel_panel.h"
+#include "intel_psr.h"
#define DP_DPRX_ESI_LEN 14
@@ -949,8 +961,11 @@ static void intel_pps_get_registers(struct intel_dp *intel_dp,
regs->pp_stat = PP_STATUS(pps_idx);
regs->pp_on = PP_ON_DELAYS(pps_idx);
regs->pp_off = PP_OFF_DELAYS(pps_idx);
- if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv) &&
- !HAS_PCH_ICP(dev_priv))
+
+ /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
+ if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
+ regs->pp_div = INVALID_MMIO_REG;
+ else
regs->pp_div = PP_DIVISOR(pps_idx);
}
@@ -1720,12 +1735,6 @@ void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
}
}
-struct link_config_limits {
- int min_clock, max_clock;
- int min_lane_count, max_lane_count;
- int min_bpp, max_bpp;
-};
-
static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
const struct intel_crtc_state *pipe_config)
{
@@ -1788,7 +1797,7 @@ static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
}
/* Adjust link config limits based on compliance test requests. */
-static void
+void
intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct link_config_limits *limits)
@@ -1972,6 +1981,14 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
return 0;
}
+int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB)
+ return 6 * 3;
+ else
+ return 8 * 3;
+}
+
static int
intel_dp_compute_link_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
@@ -1995,7 +2012,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
limits.min_lane_count = 1;
limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
- limits.min_bpp = 6 * 3;
+ limits.min_bpp = intel_dp_min_bpp(pipe_config);
limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
if (intel_dp_is_edp(intel_dp)) {
@@ -2058,6 +2075,29 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
return 0;
}
+bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ const struct intel_digital_connector_state *intel_conn_state =
+ to_intel_digital_connector_state(conn_state);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
+
+ if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
+ /*
+ * See:
+ * CEA-861-E - 5.1 Default Encoding Parameters
+ * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
+ */
+ return crtc_state->pipe_bpp != 18 &&
+ drm_default_rgb_quant_range(adjusted_mode) ==
+ HDMI_QUANTIZATION_RANGE_LIMITED;
+ } else {
+ return intel_conn_state->broadcast_rgb ==
+ INTEL_BROADCAST_RGB_LIMITED;
+ }
+}
+
int
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
@@ -2074,7 +2114,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
to_intel_digital_connector_state(conn_state);
bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
DP_DPCD_QUIRK_CONSTANT_N);
- int ret;
+ int ret, output_bpp;
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
pipe_config->has_pch_encoder = true;
@@ -2123,40 +2163,25 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (ret < 0)
return ret;
- if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
- /*
- * See:
- * CEA-861-E - 5.1 Default Encoding Parameters
- * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
- */
- pipe_config->limited_color_range =
- pipe_config->pipe_bpp != 18 &&
- drm_default_rgb_quant_range(adjusted_mode) ==
- HDMI_QUANTIZATION_RANGE_LIMITED;
- } else {
- pipe_config->limited_color_range =
- intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED;
- }
+ pipe_config->limited_color_range =
+ intel_dp_limited_color_range(pipe_config, conn_state);
- if (!pipe_config->dsc_params.compression_enable)
- intel_link_compute_m_n(pipe_config->pipe_bpp,
- pipe_config->lane_count,
- adjusted_mode->crtc_clock,
- pipe_config->port_clock,
- &pipe_config->dp_m_n,
- constant_n);
+ if (pipe_config->dsc_params.compression_enable)
+ output_bpp = pipe_config->dsc_params.compressed_bpp;
else
- intel_link_compute_m_n(pipe_config->dsc_params.compressed_bpp,
- pipe_config->lane_count,
- adjusted_mode->crtc_clock,
- pipe_config->port_clock,
- &pipe_config->dp_m_n,
- constant_n);
+ output_bpp = pipe_config->pipe_bpp;
+
+ intel_link_compute_m_n(output_bpp,
+ pipe_config->lane_count,
+ adjusted_mode->crtc_clock,
+ pipe_config->port_clock,
+ &pipe_config->dp_m_n,
+ constant_n);
if (intel_connector->panel.downclock_mode != NULL &&
dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
pipe_config->has_drrs = true;
- intel_link_compute_m_n(pipe_config->pipe_bpp,
+ intel_link_compute_m_n(output_bpp,
pipe_config->lane_count,
intel_connector->panel.downclock_mode->clock,
pipe_config->port_clock,
@@ -2296,7 +2321,7 @@ static void wait_panel_status(struct intel_dp *intel_dp,
I915_READ(pp_stat_reg),
I915_READ(pp_ctrl_reg));
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
pp_stat_reg, mask, value,
5000))
DRM_ERROR("Panel status timeout: status %08x control %08x\n",
@@ -3885,7 +3910,7 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
if (port == PORT_A)
return;
- if (intel_wait_for_register(dev_priv,DP_TP_STATUS(port),
+ if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port),
DP_TP_STATUS_IDLE_DONE,
DP_TP_STATUS_IDLE_DONE,
1))
@@ -4731,7 +4756,7 @@ static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
intel_dp_handle_test_request(intel_dp);
if (val & DP_CP_IRQ)
- intel_hdcp_check_link(intel_dp->attached_connector);
+ intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
if (val & DP_SINK_SPECIFIC_IRQ)
DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
@@ -5574,6 +5599,18 @@ void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
edp_panel_vdd_off_sync(intel_dp);
}
+static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
+{
+ long ret;
+
+#define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count))
+ ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C,
+ msecs_to_jiffies(timeout));
+
+ if (!ret)
+ DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n");
+}
+
static
int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
u8 *an)
@@ -5798,6 +5835,336 @@ int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port,
return 0;
}
+struct hdcp2_dp_errata_stream_type {
+ u8 msg_id;
+ u8 stream_type;
+} __packed;
+
+static struct hdcp2_dp_msg_data {
+ u8 msg_id;
+ u32 offset;
+ bool msg_detectable;
+ u32 timeout;
+ u32 timeout2; /* Added for non_paired situation */
+ } hdcp2_msg_data[] = {
+ {HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0},
+ {HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
+ false, HDCP_2_2_CERT_TIMEOUT_MS, 0},
+ {HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
+ false, 0, 0},
+ {HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
+ false, 0, 0},
+ {HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
+ true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
+ HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS},
+ {HDCP_2_2_AKE_SEND_PAIRING_INFO,
+ DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
+ HDCP_2_2_PAIRING_TIMEOUT_MS, 0},
+ {HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0},
+ {HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
+ false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0},
+ {HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
+ 0, 0},
+ {HDCP_2_2_REP_SEND_RECVID_LIST,
+ DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
+ HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0},
+ {HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
+ 0, 0},
+ {HDCP_2_2_REP_STREAM_MANAGE,
+ DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
+ 0, 0},
+ {HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
+ false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0},
+/* local define to shovel this through the write_2_2 interface */
+#define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50
+ {HDCP_2_2_ERRATA_DP_STREAM_TYPE,
+ DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
+ 0, 0},
+ };
+
+static inline
+int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
+ u8 *rx_status)
+{
+ ssize_t ret;
+
+ ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+ DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
+ HDCP_2_2_DP_RXSTATUS_LEN);
+ if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
+ DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
+ return ret >= 0 ? -EIO : ret;
+ }
+
+ return 0;
+}
+
+static
+int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
+ u8 msg_id, bool *msg_ready)
+{
+ u8 rx_status;
+ int ret;
+
+ *msg_ready = false;
+ ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
+ if (ret < 0)
+ return ret;
+
+ switch (msg_id) {
+ case HDCP_2_2_AKE_SEND_HPRIME:
+ if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status))
+ *msg_ready = true;
+ break;
+ case HDCP_2_2_AKE_SEND_PAIRING_INFO:
+ if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status))
+ *msg_ready = true;
+ break;
+ case HDCP_2_2_REP_SEND_RECVID_LIST:
+ if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
+ *msg_ready = true;
+ break;
+ default:
+ DRM_ERROR("Unidentified msg_id: %d\n", msg_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static ssize_t
+intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
+ struct hdcp2_dp_msg_data *hdcp2_msg_data)
+{
+ struct intel_dp *dp = &intel_dig_port->dp;
+ struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
+ u8 msg_id = hdcp2_msg_data->msg_id;
+ int ret, timeout;
+ bool msg_ready = false;
+
+ if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired)
+ timeout = hdcp2_msg_data->timeout2;
+ else
+ timeout = hdcp2_msg_data->timeout;
+
+ /*
+ * There is no way to detect the CERT, LPRIME and STREAM_READY
+ * availability. So Wait for timeout and read the msg.
+ */
+ if (!hdcp2_msg_data->msg_detectable) {
+ mdelay(timeout);
+ ret = 0;
+ } else {
+ /*
+ * As we want to check the msg availability at timeout, Ignoring
+ * the timeout at wait for CP_IRQ.
+ */
+ intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
+ ret = hdcp2_detect_msg_availability(intel_dig_port,
+ msg_id, &msg_ready);
+ if (!msg_ready)
+ ret = -ETIMEDOUT;
+ }
+
+ if (ret)
+ DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n",
+ hdcp2_msg_data->msg_id, ret, timeout);
+
+ return ret;
+}
+
+static struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++)
+ if (hdcp2_msg_data[i].msg_id == msg_id)
+ return &hdcp2_msg_data[i];
+
+ return NULL;
+}
+
+static
+int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
+ void *buf, size_t size)
+{
+ struct intel_dp *dp = &intel_dig_port->dp;
+ struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
+ unsigned int offset;
+ u8 *byte = buf;
+ ssize_t ret, bytes_to_write, len;
+ struct hdcp2_dp_msg_data *hdcp2_msg_data;
+
+ hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
+ if (!hdcp2_msg_data)
+ return -EINVAL;
+
+ offset = hdcp2_msg_data->offset;
+
+ /* No msg_id in DP HDCP2.2 msgs */
+ bytes_to_write = size - 1;
+ byte++;
+
+ hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count);
+
+ while (bytes_to_write) {
+ len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
+ DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
+
+ ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux,
+ offset, (void *)byte, len);
+ if (ret < 0)
+ return ret;
+
+ bytes_to_write -= ret;
+ byte += ret;
+ offset += ret;
+ }
+
+ return size;
+}
+
+static
+ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port)
+{
+ u8 rx_info[HDCP_2_2_RXINFO_LEN];
+ u32 dev_cnt;
+ ssize_t ret;
+
+ ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+ DP_HDCP_2_2_REG_RXINFO_OFFSET,
+ (void *)rx_info, HDCP_2_2_RXINFO_LEN);
+ if (ret != HDCP_2_2_RXINFO_LEN)
+ return ret >= 0 ? -EIO : ret;
+
+ dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
+ HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
+
+ if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT)
+ dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT;
+
+ ret = sizeof(struct hdcp2_rep_send_receiverid_list) -
+ HDCP_2_2_RECEIVER_IDS_MAX_LEN +
+ (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN);
+
+ return ret;
+}
+
+static
+int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
+ u8 msg_id, void *buf, size_t size)
+{
+ unsigned int offset;
+ u8 *byte = buf;
+ ssize_t ret, bytes_to_recv, len;
+ struct hdcp2_dp_msg_data *hdcp2_msg_data;
+
+ hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
+ if (!hdcp2_msg_data)
+ return -EINVAL;
+ offset = hdcp2_msg_data->offset;
+
+ ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data);
+ if (ret < 0)
+ return ret;
+
+ if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
+ ret = get_receiver_id_list_size(intel_dig_port);
+ if (ret < 0)
+ return ret;
+
+ size = ret;
+ }
+ bytes_to_recv = size - 1;
+
+ /* DP adaptation msgs has no msg_id */
+ byte++;
+
+ while (bytes_to_recv) {
+ len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
+ DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;
+
+ ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset,
+ (void *)byte, len);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret);
+ return ret;
+ }
+
+ bytes_to_recv -= ret;
+ byte += ret;
+ offset += ret;
+ }
+ byte = buf;
+ *byte = msg_id;
+
+ return size;
+}
+
+static
+int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
+ bool is_repeater, u8 content_type)
+{
+ struct hdcp2_dp_errata_stream_type stream_type_msg;
+
+ if (is_repeater)
+ return 0;
+
+ /*
+ * Errata for DP: As Stream type is used for encryption, Receiver
+ * should be communicated with stream type for the decryption of the
+ * content.
+ * Repeater will be communicated with stream type as a part of it's
+ * auth later in time.
+ */
+ stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
+ stream_type_msg.stream_type = content_type;
+
+ return intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
+ sizeof(stream_type_msg));
+}
+
+static
+int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
+{
+ u8 rx_status;
+ int ret;
+
+ ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
+ if (ret)
+ return ret;
+
+ if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status))
+ ret = HDCP_REAUTH_REQUEST;
+ else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status))
+ ret = HDCP_LINK_INTEGRITY_FAILURE;
+ else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
+ ret = HDCP_TOPOLOGY_CHANGE;
+
+ return ret;
+}
+
+static
+int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port,
+ bool *capable)
+{
+ u8 rx_caps[3];
+ int ret;
+
+ *capable = false;
+ ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
+ DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
+ rx_caps, HDCP_2_2_RXCAPS_LEN);
+ if (ret != HDCP_2_2_RXCAPS_LEN)
+ return ret >= 0 ? -EIO : ret;
+
+ if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
+ HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2]))
+ *capable = true;
+
+ return 0;
+}
+
static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
.write_an_aksv = intel_dp_hdcp_write_an_aksv,
.read_bksv = intel_dp_hdcp_read_bksv,
@@ -5810,6 +6177,12 @@ static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
.toggle_signalling = intel_dp_hdcp_toggle_signalling,
.check_link = intel_dp_hdcp_check_link,
.hdcp_capable = intel_dp_hdcp_capable,
+ .write_2_2_msg = intel_dp_hdcp2_write_msg,
+ .read_2_2_msg = intel_dp_hdcp2_read_msg,
+ .config_stream_type = intel_dp_hdcp2_config_stream_type,
+ .check_2_2_link = intel_dp_hdcp2_check_link,
+ .hdcp_2_2_capable = intel_dp_hdcp2_capable,
+ .protocol = HDCP_PROTOCOL_DP,
};
static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
@@ -6023,43 +6396,34 @@ static void
intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
+ u32 pp_on, pp_off, pp_ctl;
struct pps_registers regs;
intel_pps_get_registers(intel_dp, &regs);
- /* Workaround: Need to write PP_CONTROL with the unlock key as
- * the very first thing. */
pp_ctl = ironlake_get_pp_control(intel_dp);
+ /* Ensure PPS is unlocked */
+ if (!HAS_DDI(dev_priv))
+ I915_WRITE(regs.pp_ctrl, pp_ctl);
+
pp_on = I915_READ(regs.pp_on);
pp_off = I915_READ(regs.pp_off);
- if (!IS_GEN9_LP(dev_priv) && !HAS_PCH_CNP(dev_priv) &&
- !HAS_PCH_ICP(dev_priv)) {
- I915_WRITE(regs.pp_ctrl, pp_ctl);
- pp_div = I915_READ(regs.pp_div);
- }
/* Pull timing values out of registers */
- seq->t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
- PANEL_POWER_UP_DELAY_SHIFT;
+ seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
+ seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
+ seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
+ seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
- seq->t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
- PANEL_LIGHT_ON_DELAY_SHIFT;
+ if (i915_mmio_reg_valid(regs.pp_div)) {
+ u32 pp_div;
- seq->t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
- PANEL_LIGHT_OFF_DELAY_SHIFT;
-
- seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
- PANEL_POWER_DOWN_DELAY_SHIFT;
+ pp_div = I915_READ(regs.pp_div);
- if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
- HAS_PCH_ICP(dev_priv)) {
- seq->t11_t12 = ((pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
- BXT_POWER_CYCLE_DELAY_SHIFT) * 1000;
+ seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
} else {
- seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
- PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
+ seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
}
}
@@ -6184,7 +6548,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
bool force_disable_vdd)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 pp_on, pp_off, pp_div, port_sel = 0;
+ u32 pp_on, pp_off, port_sel = 0;
int div = dev_priv->rawclk_freq / 1000;
struct pps_registers regs;
enum port port = dp_to_dig_port(intel_dp)->base.port;
@@ -6219,23 +6583,10 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
I915_WRITE(regs.pp_ctrl, pp);
}
- pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
- (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
- pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
- (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
- /* Compute the divisor for the pp clock, simply match the Bspec
- * formula. */
- if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
- HAS_PCH_ICP(dev_priv)) {
- pp_div = I915_READ(regs.pp_ctrl);
- pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
- pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
- << BXT_POWER_CYCLE_DELAY_SHIFT);
- } else {
- pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
- pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
- << PANEL_POWER_CYCLE_DELAY_SHIFT);
- }
+ pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
+ REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
+ pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
+ REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
/* Haswell doesn't have any port selection bits for the panel
* power sequencer any more. */
@@ -6262,19 +6613,29 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
I915_WRITE(regs.pp_on, pp_on);
I915_WRITE(regs.pp_off, pp_off);
- if (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
- HAS_PCH_ICP(dev_priv))
- I915_WRITE(regs.pp_ctrl, pp_div);
- else
- I915_WRITE(regs.pp_div, pp_div);
+
+ /*
+ * Compute the divisor for the pp clock, simply match the Bspec formula.
+ */
+ if (i915_mmio_reg_valid(regs.pp_div)) {
+ I915_WRITE(regs.pp_div,
+ REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) |
+ REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
+ } else {
+ u32 pp_ctl;
+
+ pp_ctl = I915_READ(regs.pp_ctrl);
+ pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
+ pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
+ I915_WRITE(regs.pp_ctrl, pp_ctl);
+ }
DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
I915_READ(regs.pp_on),
I915_READ(regs.pp_off),
- (IS_GEN9_LP(dev_priv) || HAS_PCH_CNP(dev_priv) ||
- HAS_PCH_ICP(dev_priv)) ?
- (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
- I915_READ(regs.pp_div));
+ i915_mmio_reg_valid(regs.pp_div) ?
+ I915_READ(regs.pp_div) :
+ (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
}
static void intel_dp_pps_init(struct intel_dp *intel_dp)
@@ -6645,9 +7006,7 @@ intel_dp_drrs_init(struct intel_connector *connector,
return NULL;
}
- downclock_mode = intel_find_panel_downclock(dev_priv, fixed_mode,
- &connector->base);
-
+ downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
if (!downclock_mode) {
DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
return NULL;
@@ -6669,7 +7028,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct drm_display_mode *fixed_mode = NULL;
struct drm_display_mode *downclock_mode = NULL;
bool has_dpcd;
- struct drm_display_mode *scan;
enum pipe pipe = INVALID_PIPE;
intel_wakeref_t wakeref;
struct edid *edid;
@@ -6685,7 +7043,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* eDP and LVDS bail out early in this case to prevent interfering
* with an already powered-on LVDS power sequencer.
*/
- if (intel_get_lvds_encoder(&dev_priv->drm)) {
+ if (intel_get_lvds_encoder(dev_priv)) {
WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
DRM_INFO("LVDS was detected, not registering eDP\n");
@@ -6722,26 +7080,13 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
}
intel_connector->edid = edid;
- /* prefer fixed mode from EDID if available */
- list_for_each_entry(scan, &connector->probed_modes, head) {
- if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
- fixed_mode = drm_mode_duplicate(dev, scan);
- downclock_mode = intel_dp_drrs_init(
- intel_connector, fixed_mode);
- break;
- }
- }
+ fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
+ if (fixed_mode)
+ downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
/* fallback to VBT if available for eDP */
- if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
- fixed_mode = drm_mode_duplicate(dev,
- dev_priv->vbt.lfp_lvds_vbt_mode);
- if (fixed_mode) {
- fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
- connector->display_info.width_mm = fixed_mode->width_mm;
- connector->display_info.height_mm = fixed_mode->height_mm;
- }
- }
+ if (!fixed_mode)
+ fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
mutex_unlock(&dev->mode_config.mutex);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/intel_dp.h b/drivers/gpu/drm/i915/intel_dp.h
new file mode 100644
index 000000000000..5e9e8d13de6e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DP_H__
+#define __INTEL_DP_H__
+
+#include <linux/types.h>
+
+#include <drm/i915_drm.h>
+
+#include "i915_reg.h"
+
+enum pipe;
+struct drm_connector_state;
+struct drm_encoder;
+struct drm_i915_private;
+struct drm_modeset_acquire_ctx;
+struct intel_connector;
+struct intel_crtc_state;
+struct intel_digital_port;
+struct intel_dp;
+struct intel_encoder;
+
+struct link_config_limits {
+ int min_clock, max_clock;
+ int min_lane_count, max_lane_count;
+ int min_bpp, max_bpp;
+};
+
+void intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ struct link_config_limits *limits);
+bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+int intel_dp_min_bpp(const struct intel_crtc_state *crtc_state);
+bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t dp_reg, enum port port,
+ enum pipe *pipe);
+bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg,
+ enum port port);
+bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector);
+void intel_dp_set_link_params(struct intel_dp *intel_dp,
+ int link_rate, u8 lane_count,
+ bool link_mst);
+int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
+ int link_rate, u8 lane_count);
+int intel_dp_retrain_link(struct intel_encoder *encoder,
+ struct drm_modeset_acquire_ctx *ctx);
+void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ bool enable);
+void intel_dp_encoder_reset(struct drm_encoder *encoder);
+void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
+void intel_dp_encoder_flush_work(struct drm_encoder *encoder);
+int intel_dp_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state);
+bool intel_dp_is_edp(struct intel_dp *intel_dp);
+bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
+enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
+ bool long_hpd);
+void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_edp_backlight_off(const struct drm_connector_state *conn_state);
+void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
+void intel_edp_panel_on(struct intel_dp *intel_dp);
+void intel_edp_panel_off(struct intel_dp *intel_dp);
+void intel_dp_mst_suspend(struct drm_i915_private *dev_priv);
+void intel_dp_mst_resume(struct drm_i915_private *dev_priv);
+int intel_dp_max_link_rate(struct intel_dp *intel_dp);
+int intel_dp_max_lane_count(struct intel_dp *intel_dp);
+int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
+void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
+u32 intel_dp_pack_aux(const u8 *src, int src_bytes);
+
+void intel_edp_drrs_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_edp_drrs_disable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits);
+void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits);
+
+void
+intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
+ u8 dp_train_pat);
+void
+intel_dp_set_signal_levels(struct intel_dp *intel_dp);
+void intel_dp_set_idle_link_train(struct intel_dp *intel_dp);
+u8
+intel_dp_voltage_max(struct intel_dp *intel_dp);
+u8
+intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing);
+void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
+ u8 *link_bw, u8 *rate_select);
+bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
+bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
+bool
+intel_dp_get_link_status(struct intel_dp *intel_dp, u8 *link_status);
+u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
+ int mode_clock, int mode_hdisplay);
+u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
+ int mode_hdisplay);
+
+bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
+int intel_dp_link_required(int pixel_clock, int bpp);
+int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
+bool intel_digital_port_connected(struct intel_encoder *encoder);
+void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *dig_port);
+
+static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
+{
+ return ~((1 << lane_count) - 1) & 0xf;
+}
+
+#endif /* __INTEL_DP_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index b59c87daa4f7..54b069333e2f 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -21,6 +21,7 @@
* IN THE SOFTWARE.
*/
+#include "intel_dp.h"
#include "intel_drv.h"
static void
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index fb67cd931117..8839eaea8371 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -23,78 +23,119 @@
*
*/
-#include "i915_drv.h"
-#include "intel_drv.h"
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
+#include "i915_drv.h"
+#include "intel_audio.h"
+#include "intel_connector.h"
+#include "intel_ddi.h"
+#include "intel_dp.h"
+#include "intel_drv.h"
+
+static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct link_config_limits *limits)
+{
+ struct drm_atomic_state *state = crtc_state->base.state;
+ struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
+ struct intel_dp *intel_dp = &intel_mst->primary->dp;
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->base.adjusted_mode;
+ void *port = connector->port;
+ bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
+ DP_DPCD_QUIRK_CONSTANT_N);
+ int bpp, slots = -EINVAL;
+
+ crtc_state->lane_count = limits->max_lane_count;
+ crtc_state->port_clock = limits->max_clock;
+
+ for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
+ crtc_state->pipe_bpp = bpp;
+
+ crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock,
+ crtc_state->pipe_bpp);
+
+ slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
+ port, crtc_state->pbn);
+ if (slots == -EDEADLK)
+ return slots;
+ if (slots >= 0)
+ break;
+ }
+
+ if (slots < 0) {
+ DRM_DEBUG_KMS("failed finding vcpi slots:%d\n", slots);
+ return slots;
+ }
+
+ intel_link_compute_m_n(crtc_state->pipe_bpp,
+ crtc_state->lane_count,
+ adjusted_mode->crtc_clock,
+ crtc_state->port_clock,
+ &crtc_state->dp_m_n,
+ constant_n);
+ crtc_state->dp_m_n.tu = slots;
+
+ return 0;
+}
+
static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
- struct intel_digital_port *intel_dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct drm_connector *connector = conn_state->connector;
- void *port = to_intel_connector(connector)->port;
- struct drm_atomic_state *state = pipe_config->base.state;
- struct drm_crtc *crtc = pipe_config->base.crtc;
- struct drm_crtc_state *old_crtc_state =
- drm_atomic_get_old_crtc_state(state, crtc);
- int bpp;
- int lane_count, slots =
- to_intel_crtc_state(old_crtc_state)->dp_m_n.tu;
- const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- int mst_pbn;
- bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
- DP_DPCD_QUIRK_CONSTANT_N);
+ struct intel_dp *intel_dp = &intel_mst->primary->dp;
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+ struct intel_digital_connector_state *intel_conn_state =
+ to_intel_digital_connector_state(conn_state);
+ const struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
+ void *port = connector->port;
+ struct link_config_limits limits;
+ int ret;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
pipe_config->has_pch_encoder = false;
- bpp = 24;
- if (intel_dp->compliance.test_data.bpc) {
- bpp = intel_dp->compliance.test_data.bpc * 3;
- DRM_DEBUG_KMS("Setting pipe bpp to %d\n",
- bpp);
- }
+
+ if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
+ pipe_config->has_audio =
+ drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, port);
+ else
+ pipe_config->has_audio =
+ intel_conn_state->force_audio == HDMI_AUDIO_ON;
+
/*
* for MST we always configure max link bw - the spec doesn't
* seem to suggest we should do otherwise.
*/
- lane_count = intel_dp_max_lane_count(intel_dp);
-
- pipe_config->lane_count = lane_count;
-
- pipe_config->pipe_bpp = bpp;
+ limits.min_clock =
+ limits.max_clock = intel_dp_max_link_rate(intel_dp);
- pipe_config->port_clock = intel_dp_max_link_rate(intel_dp);
+ limits.min_lane_count =
+ limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
- if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, port))
- pipe_config->has_audio = true;
+ limits.min_bpp = intel_dp_min_bpp(pipe_config);
+ limits.max_bpp = pipe_config->pipe_bpp;
- mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
- pipe_config->pbn = mst_pbn;
+ intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
- slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr, port,
- mst_pbn);
- if (slots < 0) {
- DRM_DEBUG_KMS("failed finding vcpi slots:%d\n",
- slots);
- return slots;
- }
-
- intel_link_compute_m_n(bpp, lane_count,
- adjusted_mode->crtc_clock,
- pipe_config->port_clock,
- &pipe_config->dp_m_n,
- constant_n);
+ ret = intel_dp_mst_compute_link_config(encoder, pipe_config,
+ conn_state, &limits);
+ if (ret)
+ return ret;
- pipe_config->dp_m_n.tu = slots;
+ pipe_config->limited_color_range =
+ intel_dp_limited_color_range(pipe_config, conn_state);
if (IS_GEN9_LP(dev_priv))
pipe_config->lane_lat_optim_mask =
@@ -117,7 +158,11 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
struct drm_crtc *new_crtc = new_conn_state->crtc;
struct drm_crtc_state *crtc_state;
struct drm_dp_mst_topology_mgr *mgr;
- int ret = 0;
+ int ret;
+
+ ret = intel_digital_connector_atomic_check(connector, new_conn_state);
+ if (ret)
+ return ret;
if (!old_conn_state->crtc)
return 0;
@@ -289,7 +334,7 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
DP_TP_STATUS(port),
DP_TP_STATUS_ACT_SENT,
DP_TP_STATUS_ACT_SENT,
@@ -354,11 +399,13 @@ intel_dp_mst_detect(struct drm_connector *connector, bool force)
static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
.detect = intel_dp_mst_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_get_property = intel_digital_connector_atomic_get_property,
+ .atomic_set_property = intel_digital_connector_atomic_set_property,
.late_register = intel_connector_register,
.early_unregister = intel_connector_unregister,
.destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
static int intel_dp_mst_get_modes(struct drm_connector *connector)
@@ -373,7 +420,6 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
- int bpp = 24; /* MST uses fixed bpp */
int max_rate, mode_rate, max_lanes, max_link_clock;
if (drm_connector_is_unregistered(connector))
@@ -386,7 +432,7 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
max_lanes = intel_dp_max_lane_count(intel_dp);
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
- mode_rate = intel_dp_link_required(mode->clock, bpp);
+ mode_rate = intel_dp_link_required(mode->clock, 18);
/* TODO - validate mode against available PBN for link */
if (mode->clock < 10000)
@@ -487,6 +533,10 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
if (ret)
goto err;
+ intel_attach_force_audio_property(connector);
+ intel_attach_broadcast_rgb_property(connector);
+ drm_connector_attach_max_bpc_property(connector, 6, 12);
+
return connector;
err:
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index 95cb8b154f87..ab4ac7158b79 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -21,6 +21,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include "intel_dp.h"
#include "intel_drv.h"
/**
@@ -341,7 +342,7 @@ static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
enum dpio_phy phy)
{
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
BXT_PORT_REF_DW3(phy),
GRC_DONE, GRC_DONE,
10))
@@ -383,7 +384,8 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
* The flag should get set in 100us according to the HW team, but
* use 1ms due to occasional timeouts observed with that.
*/
- if (intel_wait_for_register_fw(dev_priv, BXT_PORT_CL1CM_DW0(phy),
+ if (intel_wait_for_register_fw(&dev_priv->uncore,
+ BXT_PORT_CL1CM_DW0(phy),
PHY_RESERVED | PHY_POWER_GOOD,
PHY_POWER_GOOD,
1))
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 0a42d11c4c33..e01c057ce50b 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -241,11 +241,11 @@ out:
}
static struct intel_shared_dpll *
-intel_find_shared_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
+intel_find_shared_dpll(struct intel_crtc_state *crtc_state,
enum intel_dpll_id range_min,
enum intel_dpll_id range_max)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll, *unused_pll = NULL;
struct intel_shared_dpll_state *shared_dpll;
@@ -420,9 +420,10 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
}
static struct intel_shared_dpll *
-ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+ibx_get_dpll(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
enum intel_dpll_id i;
@@ -436,7 +437,7 @@ ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
crtc->base.base.id, crtc->base.name,
pll->info->name);
} else {
- pll = intel_find_shared_dpll(crtc, crtc_state,
+ pll = intel_find_shared_dpll(crtc_state,
DPLL_ID_PCH_PLL_A,
DPLL_ID_PCH_PLL_B);
}
@@ -764,15 +765,13 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
*r2_out = best.r2;
}
-static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock,
- struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(struct intel_crtc_state *crtc_state)
{
struct intel_shared_dpll *pll;
u32 val;
unsigned int p, n2, r2;
- hsw_ddi_calculate_wrpll(clock * 1000, &r2, &n2, &p);
+ hsw_ddi_calculate_wrpll(crtc_state->port_clock * 1000, &r2, &n2, &p);
val = WRPLL_PLL_ENABLE | WRPLL_PLL_LCPLL |
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
@@ -780,7 +779,7 @@ static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock,
crtc_state->dpll_hw_state.wrpll = val;
- pll = intel_find_shared_dpll(crtc, crtc_state,
+ pll = intel_find_shared_dpll(crtc_state,
DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
if (!pll)
@@ -790,11 +789,12 @@ static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock,
}
static struct intel_shared_dpll *
-hsw_ddi_dp_get_dpll(struct intel_encoder *encoder, int clock)
+hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
struct intel_shared_dpll *pll;
enum intel_dpll_id pll_id;
+ int clock = crtc_state->port_clock;
switch (clock / 2) {
case 81000:
@@ -820,19 +820,18 @@ hsw_ddi_dp_get_dpll(struct intel_encoder *encoder, int clock)
}
static struct intel_shared_dpll *
-hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+hsw_get_dpll(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
struct intel_shared_dpll *pll;
- int clock = crtc_state->port_clock;
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- pll = hsw_ddi_hdmi_get_dpll(clock, crtc, crtc_state);
+ pll = hsw_ddi_hdmi_get_dpll(crtc_state);
} else if (intel_crtc_has_dp_encoder(crtc_state)) {
- pll = hsw_ddi_dp_get_dpll(encoder, clock);
+ pll = hsw_ddi_dp_get_dpll(crtc_state);
} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
if (WARN_ON(crtc_state->port_clock / 2 != 135000))
return NULL;
@@ -840,7 +839,7 @@ hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
crtc_state->dpll_hw_state.spll =
SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
- pll = intel_find_shared_dpll(crtc, crtc_state,
+ pll = intel_find_shared_dpll(crtc_state,
DPLL_ID_SPLL, DPLL_ID_SPLL);
} else {
return NULL;
@@ -961,7 +960,7 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(regs[id].ctl,
I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
DPLL_STATUS,
DPLL_LOCK(id),
DPLL_LOCK(id),
@@ -1308,9 +1307,7 @@ skip_remaining_dividers:
return true;
}
-static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- int clock)
+static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
{
u32 ctrl1, cfgcr1, cfgcr2;
struct skl_wrpll_params wrpll_params = { 0, };
@@ -1323,7 +1320,8 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
- if (!skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params))
+ if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
+ &wrpll_params))
return false;
cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
@@ -1346,8 +1344,7 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
}
static bool
-skl_ddi_dp_set_dpll_hw_state(int clock,
- struct intel_dpll_hw_state *dpll_hw_state)
+skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
u32 ctrl1;
@@ -1356,7 +1353,7 @@ skl_ddi_dp_set_dpll_hw_state(int clock,
* as the DPLL id in this function.
*/
ctrl1 = DPLL_CTRL1_OVERRIDE(0);
- switch (clock / 2) {
+ switch (crtc_state->port_clock / 2) {
case 81000:
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
break;
@@ -1378,44 +1375,43 @@ skl_ddi_dp_set_dpll_hw_state(int clock,
break;
}
- dpll_hw_state->ctrl1 = ctrl1;
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ crtc_state->dpll_hw_state.ctrl1 = ctrl1;
+
return true;
}
static struct intel_shared_dpll *
-skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+skl_get_dpll(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
struct intel_shared_dpll *pll;
- int clock = crtc_state->port_clock;
bool bret;
- struct intel_dpll_hw_state dpll_hw_state;
-
- memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- bret = skl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock);
+ bret = skl_ddi_hdmi_pll_dividers(crtc_state);
if (!bret) {
DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
return NULL;
}
} else if (intel_crtc_has_dp_encoder(crtc_state)) {
- bret = skl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state);
+ bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
if (!bret) {
DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
return NULL;
}
- crtc_state->dpll_hw_state = dpll_hw_state;
} else {
return NULL;
}
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
- pll = intel_find_shared_dpll(crtc, crtc_state,
+ pll = intel_find_shared_dpll(crtc_state,
DPLL_ID_SKL_DPLL0,
DPLL_ID_SKL_DPLL0);
else
- pll = intel_find_shared_dpll(crtc, crtc_state,
+ pll = intel_find_shared_dpll(crtc_state,
DPLL_ID_SKL_DPLL1,
DPLL_ID_SKL_DPLL3);
if (!pll)
@@ -1692,10 +1688,10 @@ static const struct bxt_clk_div bxt_dp_clk_val[] = {
};
static bool
-bxt_ddi_hdmi_pll_dividers(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state, int clock,
+bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
struct bxt_clk_div *clk_div)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct dpll best_clock;
/* Calculate HDMI div */
@@ -1703,9 +1699,10 @@ bxt_ddi_hdmi_pll_dividers(struct intel_crtc *intel_crtc,
* FIXME: tie the following calculation into
* i9xx_crtc_compute_clock
*/
- if (!bxt_find_best_dpll(crtc_state, clock, &best_clock)) {
+ if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
- clock, pipe_name(intel_crtc->pipe));
+ crtc_state->port_clock,
+ pipe_name(crtc->pipe));
return false;
}
@@ -1722,8 +1719,10 @@ bxt_ddi_hdmi_pll_dividers(struct intel_crtc *intel_crtc,
return true;
}
-static void bxt_ddi_dp_pll_dividers(int clock, struct bxt_clk_div *clk_div)
+static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
+ struct bxt_clk_div *clk_div)
{
+ int clock = crtc_state->port_clock;
int i;
*clk_div = bxt_dp_clk_val[0];
@@ -1737,14 +1736,17 @@ static void bxt_ddi_dp_pll_dividers(int clock, struct bxt_clk_div *clk_div)
clk_div->vco = clock * 10 / 2 * clk_div->p1 * clk_div->p2;
}
-static bool bxt_ddi_set_dpll_hw_state(int clock,
- struct bxt_clk_div *clk_div,
- struct intel_dpll_hw_state *dpll_hw_state)
+static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
+ const struct bxt_clk_div *clk_div)
{
+ struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
+ int clock = crtc_state->port_clock;
int vco = clk_div->vco;
u32 prop_coef, int_coef, gain_ctl, targ_cnt;
u32 lanestagger;
+ memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
+
if (vco >= 6200000 && vco <= 6700000) {
prop_coef = 4;
int_coef = 9;
@@ -1804,55 +1806,45 @@ static bool bxt_ddi_set_dpll_hw_state(int clock,
}
static bool
-bxt_ddi_dp_set_dpll_hw_state(int clock,
- struct intel_dpll_hw_state *dpll_hw_state)
+bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
- struct bxt_clk_div clk_div = {0};
+ struct bxt_clk_div clk_div = {};
- bxt_ddi_dp_pll_dividers(clock, &clk_div);
+ bxt_ddi_dp_pll_dividers(crtc_state, &clk_div);
- return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state);
+ return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
}
static bool
-bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state, int clock,
- struct intel_dpll_hw_state *dpll_hw_state)
+bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
- struct bxt_clk_div clk_div = { };
+ struct bxt_clk_div clk_div = {};
- bxt_ddi_hdmi_pll_dividers(intel_crtc, crtc_state, clock, &clk_div);
+ bxt_ddi_hdmi_pll_dividers(crtc_state, &clk_div);
- return bxt_ddi_set_dpll_hw_state(clock, &clk_div, dpll_hw_state);
+ return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
}
static struct intel_shared_dpll *
-bxt_get_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder)
+bxt_get_dpll(struct intel_crtc_state *crtc_state,
+ struct intel_encoder *encoder)
{
- struct intel_dpll_hw_state dpll_hw_state = { };
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
- int i, clock = crtc_state->port_clock;
+ enum intel_dpll_id id;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
- !bxt_ddi_hdmi_set_dpll_hw_state(crtc, crtc_state, clock,
- &dpll_hw_state))
+ !bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
return NULL;
if (intel_crtc_has_dp_encoder(crtc_state) &&
- !bxt_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
+ !bxt_ddi_dp_set_dpll_hw_state(crtc_state))
return NULL;
- memset(&crtc_state->dpll_hw_state, 0,
- sizeof(crtc_state->dpll_hw_state));
-
- crtc_state->dpll_hw_state = dpll_hw_state;
-
/* 1:1 mapping between ports and PLLs */
- i = (enum intel_dpll_id) encoder->port;
- pll = intel_get_shared_dpll_by_id(dev_priv, i);
+ id = (enum intel_dpll_id) encoder->port;
+ pll = intel_get_shared_dpll_by_id(dev_priv, id);
DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
crtc->base.base.id, crtc->base.name, pll->info->name);
@@ -1911,8 +1903,7 @@ static void intel_ddi_pll_init(struct drm_device *dev)
struct intel_dpll_mgr {
const struct dpll_info *dpll_info;
- struct intel_shared_dpll *(*get_dpll)(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
+ struct intel_shared_dpll *(*get_dpll)(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder);
void (*dump_hw_state)(struct drm_i915_private *dev_priv,
@@ -1986,7 +1977,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
CNL_DPLL_ENABLE(id),
PLL_POWER_STATE,
PLL_POWER_STATE,
@@ -2027,7 +2018,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 7. Wait for PLL lock status in DPLL_ENABLE. */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
CNL_DPLL_ENABLE(id),
PLL_LOCK,
PLL_LOCK,
@@ -2075,7 +2066,7 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
CNL_DPLL_ENABLE(id),
PLL_LOCK,
0,
@@ -2097,7 +2088,7 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
I915_WRITE(CNL_DPLL_ENABLE(id), val);
/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
CNL_DPLL_ENABLE(id),
PLL_POWER_STATE,
0,
@@ -2242,11 +2233,11 @@ int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv)
}
static bool
-cnl_ddi_calculate_wrpll(int clock,
- struct drm_i915_private *dev_priv,
+cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
struct skl_wrpll_params *wrpll_params)
{
- u32 afe_clock = clock * 5;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ u32 afe_clock = crtc_state->port_clock * 5;
u32 ref_clock;
u32 dco_min = 7998000;
u32 dco_max = 10000000;
@@ -2282,23 +2273,20 @@ cnl_ddi_calculate_wrpll(int clock,
ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
- cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock, pdiv, qdiv,
- kdiv);
+ cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
+ pdiv, qdiv, kdiv);
return true;
}
-static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- int clock)
+static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 cfgcr0, cfgcr1;
struct skl_wrpll_params wrpll_params = { 0, };
cfgcr0 = DPLL_CFGCR0_HDMI_MODE;
- if (!cnl_ddi_calculate_wrpll(clock, dev_priv, &wrpll_params))
+ if (!cnl_ddi_calculate_wrpll(crtc_state, &wrpll_params))
return false;
cfgcr0 |= DPLL_CFGCR0_DCO_FRACTION(wrpll_params.dco_fraction) |
@@ -2319,14 +2307,13 @@ static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
}
static bool
-cnl_ddi_dp_set_dpll_hw_state(int clock,
- struct intel_dpll_hw_state *dpll_hw_state)
+cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
u32 cfgcr0;
cfgcr0 = DPLL_CFGCR0_SSC_ENABLE;
- switch (clock / 2) {
+ switch (crtc_state->port_clock / 2) {
case 81000:
cfgcr0 |= DPLL_CFGCR0_LINK_RATE_810;
break;
@@ -2356,41 +2343,40 @@ cnl_ddi_dp_set_dpll_hw_state(int clock,
break;
}
- dpll_hw_state->cfgcr0 = cfgcr0;
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
+
+ crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
+
return true;
}
static struct intel_shared_dpll *
-cnl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+cnl_get_dpll(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
struct intel_shared_dpll *pll;
- int clock = crtc_state->port_clock;
bool bret;
- struct intel_dpll_hw_state dpll_hw_state;
-
- memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- bret = cnl_ddi_hdmi_pll_dividers(crtc, crtc_state, clock);
+ bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
if (!bret) {
DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
return NULL;
}
} else if (intel_crtc_has_dp_encoder(crtc_state)) {
- bret = cnl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state);
+ bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
if (!bret) {
DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
return NULL;
}
- crtc_state->dpll_hw_state = dpll_hw_state;
} else {
DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n",
crtc_state->output_types);
return NULL;
}
- pll = intel_find_shared_dpll(crtc, crtc_state,
+ pll = intel_find_shared_dpll(crtc_state,
DPLL_ID_SKL_DPLL0,
DPLL_ID_SKL_DPLL2);
if (!pll) {
@@ -2431,47 +2417,69 @@ static const struct intel_dpll_mgr cnl_pll_mgr = {
.dump_hw_state = cnl_dump_hw_state,
};
+struct icl_combo_pll_params {
+ int clock;
+ struct skl_wrpll_params wrpll;
+};
+
/*
* These values alrea already adjusted: they're the bits we write to the
* registers, not the logical values.
*/
-static const struct skl_wrpll_params icl_dp_combo_pll_24MHz_values[] = {
- { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */
- .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */
- .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */
- .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */
- .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */
- .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2},
- { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */
- .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */
- .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */
- .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
+static const struct icl_combo_pll_params icl_dp_combo_pll_24MHz_values[] = {
+ { 540000,
+ { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [0]: 5.4 */
+ .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 270000,
+ { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [1]: 2.7 */
+ .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 162000,
+ { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [2]: 1.62 */
+ .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 324000,
+ { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [3]: 3.24 */
+ .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 216000,
+ { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [4]: 2.16 */
+ .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
+ { 432000,
+ { .dco_integer = 0x168, .dco_fraction = 0x0000, /* [5]: 4.32 */
+ .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 648000,
+ { .dco_integer = 0x195, .dco_fraction = 0x0000, /* [6]: 6.48 */
+ .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 810000,
+ { .dco_integer = 0x151, .dco_fraction = 0x4000, /* [7]: 8.1 */
+ .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
};
+
/* Also used for 38.4 MHz values. */
-static const struct skl_wrpll_params icl_dp_combo_pll_19_2MHz_values[] = {
- { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */
- .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */
- .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */
- .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */
- .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */
- .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2},
- { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */
- .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */
- .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
- { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */
- .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0},
+static const struct icl_combo_pll_params icl_dp_combo_pll_19_2MHz_values[] = {
+ { 540000,
+ { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [0]: 5.4 */
+ .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 270000,
+ { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [1]: 2.7 */
+ .pdiv = 0x2 /* 3 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 162000,
+ { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [2]: 1.62 */
+ .pdiv = 0x4 /* 5 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 324000,
+ { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [3]: 3.24 */
+ .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 216000,
+ { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [4]: 2.16 */
+ .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 1, .qdiv_ratio = 2, }, },
+ { 432000,
+ { .dco_integer = 0x1C2, .dco_fraction = 0x0000, /* [5]: 4.32 */
+ .pdiv = 0x1 /* 2 */, .kdiv = 2, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 648000,
+ { .dco_integer = 0x1FA, .dco_fraction = 0x2000, /* [6]: 6.48 */
+ .pdiv = 0x2 /* 3 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
+ { 810000,
+ { .dco_integer = 0x1A5, .dco_fraction = 0x7000, /* [7]: 8.1 */
+ .pdiv = 0x1 /* 2 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }, },
};
static const struct skl_wrpll_params icl_tbt_pll_24MHz_values = {
@@ -2484,72 +2492,53 @@ static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = {
.pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0,
};
-static bool icl_calc_dp_combo_pll(struct drm_i915_private *dev_priv, int clock,
+static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
struct skl_wrpll_params *pll_params)
{
- const struct skl_wrpll_params *params;
-
- params = dev_priv->cdclk.hw.ref == 24000 ?
- icl_dp_combo_pll_24MHz_values :
- icl_dp_combo_pll_19_2MHz_values;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ const struct icl_combo_pll_params *params =
+ dev_priv->cdclk.hw.ref == 24000 ?
+ icl_dp_combo_pll_24MHz_values :
+ icl_dp_combo_pll_19_2MHz_values;
+ int clock = crtc_state->port_clock;
+ int i;
- switch (clock) {
- case 540000:
- *pll_params = params[0];
- break;
- case 270000:
- *pll_params = params[1];
- break;
- case 162000:
- *pll_params = params[2];
- break;
- case 324000:
- *pll_params = params[3];
- break;
- case 216000:
- *pll_params = params[4];
- break;
- case 432000:
- *pll_params = params[5];
- break;
- case 648000:
- *pll_params = params[6];
- break;
- case 810000:
- *pll_params = params[7];
- break;
- default:
- MISSING_CASE(clock);
- return false;
+ for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
+ if (clock == params[i].clock) {
+ *pll_params = params[i].wrpll;
+ return true;
+ }
}
- return true;
+ MISSING_CASE(clock);
+ return false;
}
-static bool icl_calc_tbt_pll(struct drm_i915_private *dev_priv, int clock,
+static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
struct skl_wrpll_params *pll_params)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
*pll_params = dev_priv->cdclk.hw.ref == 24000 ?
icl_tbt_pll_24MHz_values : icl_tbt_pll_19_2MHz_values;
return true;
}
static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder, int clock,
- struct intel_dpll_hw_state *pll_state)
+ struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
u32 cfgcr0, cfgcr1;
struct skl_wrpll_params pll_params = { 0 };
bool ret;
if (intel_port_is_tc(dev_priv, encoder->port))
- ret = icl_calc_tbt_pll(dev_priv, clock, &pll_params);
+ ret = icl_calc_tbt_pll(crtc_state, &pll_params);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
- ret = cnl_ddi_calculate_wrpll(clock, dev_priv, &pll_params);
+ ret = cnl_ddi_calculate_wrpll(crtc_state, &pll_params);
else
- ret = icl_calc_dp_combo_pll(dev_priv, clock, &pll_params);
+ ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
if (!ret)
return false;
@@ -2563,82 +2552,16 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
DPLL_CFGCR1_PDIV(pll_params.pdiv) |
DPLL_CFGCR1_CENTRAL_FREQ_8400;
- pll_state->cfgcr0 = cfgcr0;
- pll_state->cfgcr1 = cfgcr1;
- return true;
-}
-
-int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
- u32 pll_id)
-{
- u32 cfgcr0, cfgcr1;
- u32 pdiv, kdiv, qdiv_mode, qdiv_ratio, dco_integer, dco_fraction;
- const struct skl_wrpll_params *params;
- int index, n_entries, link_clock;
-
- /* Read back values from DPLL CFGCR registers */
- cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(pll_id));
- cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(pll_id));
-
- dco_integer = cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK;
- dco_fraction = (cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
- DPLL_CFGCR0_DCO_FRACTION_SHIFT;
- pdiv = (cfgcr1 & DPLL_CFGCR1_PDIV_MASK) >> DPLL_CFGCR1_PDIV_SHIFT;
- kdiv = (cfgcr1 & DPLL_CFGCR1_KDIV_MASK) >> DPLL_CFGCR1_KDIV_SHIFT;
- qdiv_mode = (cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1)) >>
- DPLL_CFGCR1_QDIV_MODE_SHIFT;
- qdiv_ratio = (cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
- DPLL_CFGCR1_QDIV_RATIO_SHIFT;
-
- params = dev_priv->cdclk.hw.ref == 24000 ?
- icl_dp_combo_pll_24MHz_values :
- icl_dp_combo_pll_19_2MHz_values;
- n_entries = ARRAY_SIZE(icl_dp_combo_pll_24MHz_values);
-
- for (index = 0; index < n_entries; index++) {
- if (dco_integer == params[index].dco_integer &&
- dco_fraction == params[index].dco_fraction &&
- pdiv == params[index].pdiv &&
- kdiv == params[index].kdiv &&
- qdiv_mode == params[index].qdiv_mode &&
- qdiv_ratio == params[index].qdiv_ratio)
- break;
- }
+ memset(&crtc_state->dpll_hw_state, 0,
+ sizeof(crtc_state->dpll_hw_state));
- /* Map PLL Index to Link Clock */
- switch (index) {
- default:
- MISSING_CASE(index);
- /* fall through */
- case 0:
- link_clock = 540000;
- break;
- case 1:
- link_clock = 270000;
- break;
- case 2:
- link_clock = 162000;
- break;
- case 3:
- link_clock = 324000;
- break;
- case 4:
- link_clock = 216000;
- break;
- case 5:
- link_clock = 432000;
- break;
- case 6:
- link_clock = 648000;
- break;
- case 7:
- link_clock = 810000;
- break;
- }
+ crtc_state->dpll_hw_state.cfgcr0 = cfgcr0;
+ crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
- return link_clock;
+ return true;
}
+
static enum tc_port icl_pll_id_to_tc_port(enum intel_dpll_id id)
{
return id - DPLL_ID_ICL_MGPLL1;
@@ -2649,11 +2572,6 @@ enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port)
return tc_port + DPLL_ID_ICL_MGPLL1;
}
-bool intel_dpll_is_combophy(enum intel_dpll_id id)
-{
- return id == DPLL_ID_ICL_DPLL0 || id == DPLL_ID_ICL_DPLL1;
-}
-
static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
u32 *target_dco_khz,
struct intel_dpll_hw_state *state)
@@ -2728,12 +2646,12 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
* The specification for this function uses real numbers, so the math had to be
* adapted to integer-only calculation, that's why it looks so different.
*/
-static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
- struct intel_encoder *encoder, int clock,
- struct intel_dpll_hw_state *pll_state)
+static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct intel_dpll_hw_state *pll_state = &crtc_state->dpll_hw_state;
int refclk_khz = dev_priv->cdclk.hw.ref;
+ int clock = crtc_state->port_clock;
u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
u32 iref_ndiv, iref_trim, iref_pulse_w;
u32 prop_coeff, int_coeff;
@@ -2743,6 +2661,8 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
bool use_ssc = false;
bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
+ memset(pll_state, 0, sizeof(*pll_state));
+
if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
pll_state)) {
DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock);
@@ -2892,23 +2812,20 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
}
static struct intel_shared_dpll *
-icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
+icl_get_dpll(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
struct intel_digital_port *intel_dig_port;
struct intel_shared_dpll *pll;
- struct intel_dpll_hw_state pll_state = {};
enum port port = encoder->port;
enum intel_dpll_id min, max;
- int clock = crtc_state->port_clock;
bool ret;
if (intel_port_is_combophy(dev_priv, port)) {
min = DPLL_ID_ICL_DPLL0;
max = DPLL_ID_ICL_DPLL1;
- ret = icl_calc_dpll_state(crtc_state, encoder, clock,
- &pll_state);
+ ret = icl_calc_dpll_state(crtc_state, encoder);
} else if (intel_port_is_tc(dev_priv, port)) {
if (encoder->type == INTEL_OUTPUT_DP_MST) {
struct intel_dp_mst_encoder *mst_encoder;
@@ -2922,16 +2839,14 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
if (intel_dig_port->tc_type == TC_PORT_TBT) {
min = DPLL_ID_ICL_TBTPLL;
max = min;
- ret = icl_calc_dpll_state(crtc_state, encoder, clock,
- &pll_state);
+ ret = icl_calc_dpll_state(crtc_state, encoder);
} else {
enum tc_port tc_port;
tc_port = intel_port_to_tc(dev_priv, port);
min = icl_tc_port_to_pll_id(tc_port);
max = min;
- ret = icl_calc_mg_pll_state(crtc_state, encoder, clock,
- &pll_state);
+ ret = icl_calc_mg_pll_state(crtc_state);
}
} else {
MISSING_CASE(port);
@@ -2943,9 +2858,8 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
return NULL;
}
- crtc_state->dpll_hw_state = pll_state;
- pll = intel_find_shared_dpll(crtc, crtc_state, min, max);
+ pll = intel_find_shared_dpll(crtc_state, min, max);
if (!pll) {
DRM_DEBUG_KMS("No PLL selected\n");
return NULL;
@@ -2956,19 +2870,72 @@ icl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
return pll;
}
-static i915_reg_t icl_pll_id_to_enable_reg(enum intel_dpll_id id)
+static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
{
- if (intel_dpll_is_combophy(id))
- return CNL_DPLL_ENABLE(id);
- else if (id == DPLL_ID_ICL_TBTPLL)
- return TBT_PLL_ENABLE;
+ const enum intel_dpll_id id = pll->info->id;
+ enum tc_port tc_port = icl_pll_id_to_tc_port(id);
+ intel_wakeref_t wakeref;
+ bool ret = false;
+ u32 val;
+
+ wakeref = intel_display_power_get_if_enabled(dev_priv,
+ POWER_DOMAIN_PLLS);
+ if (!wakeref)
+ return false;
- return MG_PLL_ENABLE(icl_pll_id_to_tc_port(id));
+ val = I915_READ(MG_PLL_ENABLE(tc_port));
+ if (!(val & PLL_ENABLE))
+ goto out;
+
+ hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(tc_port));
+ hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
+
+ hw_state->mg_clktop2_coreclkctl1 =
+ I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
+ hw_state->mg_clktop2_coreclkctl1 &=
+ MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
+
+ hw_state->mg_clktop2_hsclkctl =
+ I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
+ hw_state->mg_clktop2_hsclkctl &=
+ MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
+
+ hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
+ hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(tc_port));
+ hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(tc_port));
+ hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(tc_port));
+ hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(tc_port));
+
+ hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(tc_port));
+ hw_state->mg_pll_tdc_coldst_bias =
+ I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
+
+ if (dev_priv->cdclk.hw.ref == 38400) {
+ hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
+ hw_state->mg_pll_bias_mask = 0;
+ } else {
+ hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
+ hw_state->mg_pll_bias_mask = -1U;
+ }
+
+ hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
+ hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
+
+ ret = true;
+out:
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS, wakeref);
+ return ret;
}
static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
- struct intel_dpll_hw_state *hw_state)
+ struct intel_dpll_hw_state *hw_state,
+ i915_reg_t enable_reg)
{
const enum intel_dpll_id id = pll->info->id;
intel_wakeref_t wakeref;
@@ -2980,54 +2947,12 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
if (!wakeref)
return false;
- val = I915_READ(icl_pll_id_to_enable_reg(id));
+ val = I915_READ(enable_reg);
if (!(val & PLL_ENABLE))
goto out;
- if (intel_dpll_is_combophy(id) ||
- id == DPLL_ID_ICL_TBTPLL) {
- hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
- hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
- } else {
- enum tc_port tc_port = icl_pll_id_to_tc_port(id);
-
- hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(tc_port));
- hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
-
- hw_state->mg_clktop2_coreclkctl1 =
- I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
- hw_state->mg_clktop2_coreclkctl1 &=
- MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
-
- hw_state->mg_clktop2_hsclkctl =
- I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
- hw_state->mg_clktop2_hsclkctl &=
- MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
- MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
- MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
- MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
-
- hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
- hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(tc_port));
- hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(tc_port));
- hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(tc_port));
- hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(tc_port));
-
- hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(tc_port));
- hw_state->mg_pll_tdc_coldst_bias =
- I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
-
- if (dev_priv->cdclk.hw.ref == 38400) {
- hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
- hw_state->mg_pll_bias_mask = 0;
- } else {
- hw_state->mg_pll_tdc_coldst_bias_mask = -1U;
- hw_state->mg_pll_bias_mask = -1U;
- }
-
- hw_state->mg_pll_tdc_coldst_bias &= hw_state->mg_pll_tdc_coldst_bias_mask;
- hw_state->mg_pll_bias &= hw_state->mg_pll_bias_mask;
- }
+ hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
+ hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
ret = true;
out:
@@ -3035,6 +2960,21 @@ out:
return ret;
}
+static bool combo_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ return icl_pll_get_hw_state(dev_priv, pll, hw_state,
+ CNL_DPLL_ENABLE(pll->info->id));
+}
+
+static bool tbt_pll_get_hw_state(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ struct intel_dpll_hw_state *hw_state)
+{
+ return icl_pll_get_hw_state(dev_priv, pll, hw_state, TBT_PLL_ENABLE);
+}
+
static void icl_dpll_write(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
@@ -3096,11 +3036,10 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
}
-static void icl_pll_enable(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
+static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ i915_reg_t enable_reg)
{
- const enum intel_dpll_id id = pll->info->id;
- i915_reg_t enable_reg = icl_pll_id_to_enable_reg(id);
u32 val;
val = I915_READ(enable_reg);
@@ -3111,37 +3050,90 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
* The spec says we need to "wait" but it also says it should be
* immediate.
*/
- if (intel_wait_for_register(dev_priv, enable_reg, PLL_POWER_STATE,
- PLL_POWER_STATE, 1))
- DRM_ERROR("PLL %d Power not enabled\n", id);
+ if (intel_wait_for_register(&dev_priv->uncore, enable_reg,
+ PLL_POWER_STATE, PLL_POWER_STATE, 1))
+ DRM_ERROR("PLL %d Power not enabled\n", pll->info->id);
+}
- if (intel_dpll_is_combophy(id) || id == DPLL_ID_ICL_TBTPLL)
- icl_dpll_write(dev_priv, pll);
- else
- icl_mg_pll_write(dev_priv, pll);
+static void icl_pll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ i915_reg_t enable_reg)
+{
+ u32 val;
+
+ val = I915_READ(enable_reg);
+ val |= PLL_ENABLE;
+ I915_WRITE(enable_reg, val);
+
+ /* Timeout is actually 600us. */
+ if (intel_wait_for_register(&dev_priv->uncore, enable_reg,
+ PLL_LOCK, PLL_LOCK, 1))
+ DRM_ERROR("PLL %d not locked\n", pll->info->id);
+}
+
+static void combo_pll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ i915_reg_t enable_reg = CNL_DPLL_ENABLE(pll->info->id);
+
+ icl_pll_power_enable(dev_priv, pll, enable_reg);
+
+ icl_dpll_write(dev_priv, pll);
/*
* DVFS pre sequence would be here, but in our driver the cdclk code
* paths should already be setting the appropriate voltage, hence we do
- * nothign here.
+ * nothing here.
*/
- val = I915_READ(enable_reg);
- val |= PLL_ENABLE;
- I915_WRITE(enable_reg, val);
+ icl_pll_enable(dev_priv, pll, enable_reg);
- if (intel_wait_for_register(dev_priv, enable_reg, PLL_LOCK, PLL_LOCK,
- 1)) /* 600us actually. */
- DRM_ERROR("PLL %d not locked\n", id);
+ /* DVFS post sequence would be here. See the comment above. */
+}
+
+static void tbt_pll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ icl_pll_power_enable(dev_priv, pll, TBT_PLL_ENABLE);
+
+ icl_dpll_write(dev_priv, pll);
+
+ /*
+ * DVFS pre sequence would be here, but in our driver the cdclk code
+ * paths should already be setting the appropriate voltage, hence we do
+ * nothing here.
+ */
+
+ icl_pll_enable(dev_priv, pll, TBT_PLL_ENABLE);
+
+ /* DVFS post sequence would be here. See the comment above. */
+}
+
+static void mg_pll_enable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ i915_reg_t enable_reg =
+ MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
+
+ icl_pll_power_enable(dev_priv, pll, enable_reg);
+
+ icl_mg_pll_write(dev_priv, pll);
+
+ /*
+ * DVFS pre sequence would be here, but in our driver the cdclk code
+ * paths should already be setting the appropriate voltage, hence we do
+ * nothing here.
+ */
+
+ icl_pll_enable(dev_priv, pll, enable_reg);
/* DVFS post sequence would be here. See the comment above. */
}
static void icl_pll_disable(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
+ struct intel_shared_dpll *pll,
+ i915_reg_t enable_reg)
{
- const enum intel_dpll_id id = pll->info->id;
- i915_reg_t enable_reg = icl_pll_id_to_enable_reg(id);
u32 val;
/* The first steps are done by intel_ddi_post_disable(). */
@@ -3157,8 +3149,9 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
I915_WRITE(enable_reg, val);
/* Timeout is actually 1us. */
- if (intel_wait_for_register(dev_priv, enable_reg, PLL_LOCK, 0, 1))
- DRM_ERROR("PLL %d locked\n", id);
+ if (intel_wait_for_register(&dev_priv->uncore,
+ enable_reg, PLL_LOCK, 0, 1))
+ DRM_ERROR("PLL %d locked\n", pll->info->id);
/* DVFS post sequence would be here. See the comment above. */
@@ -3170,9 +3163,30 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
* The spec says we need to "wait" but it also says it should be
* immediate.
*/
- if (intel_wait_for_register(dev_priv, enable_reg, PLL_POWER_STATE, 0,
- 1))
- DRM_ERROR("PLL %d Power not disabled\n", id);
+ if (intel_wait_for_register(&dev_priv->uncore,
+ enable_reg, PLL_POWER_STATE, 0, 1))
+ DRM_ERROR("PLL %d Power not disabled\n", pll->info->id);
+}
+
+static void combo_pll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ icl_pll_disable(dev_priv, pll, CNL_DPLL_ENABLE(pll->info->id));
+}
+
+static void tbt_pll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ icl_pll_disable(dev_priv, pll, TBT_PLL_ENABLE);
+}
+
+static void mg_pll_disable(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ i915_reg_t enable_reg =
+ MG_PLL_ENABLE(icl_pll_id_to_tc_port(pll->info->id));
+
+ icl_pll_disable(dev_priv, pll, enable_reg);
}
static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
@@ -3197,20 +3211,32 @@ static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
hw_state->mg_pll_tdc_coldst_bias);
}
-static const struct intel_shared_dpll_funcs icl_pll_funcs = {
- .enable = icl_pll_enable,
- .disable = icl_pll_disable,
- .get_hw_state = icl_pll_get_hw_state,
+static const struct intel_shared_dpll_funcs combo_pll_funcs = {
+ .enable = combo_pll_enable,
+ .disable = combo_pll_disable,
+ .get_hw_state = combo_pll_get_hw_state,
+};
+
+static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
+ .enable = tbt_pll_enable,
+ .disable = tbt_pll_disable,
+ .get_hw_state = tbt_pll_get_hw_state,
+};
+
+static const struct intel_shared_dpll_funcs mg_pll_funcs = {
+ .enable = mg_pll_enable,
+ .disable = mg_pll_disable,
+ .get_hw_state = mg_pll_get_hw_state,
};
static const struct dpll_info icl_plls[] = {
- { "DPLL 0", &icl_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
- { "DPLL 1", &icl_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
- { "TBT PLL", &icl_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
- { "MG PLL 1", &icl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
- { "MG PLL 2", &icl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
- { "MG PLL 3", &icl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
- { "MG PLL 4", &icl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
+ { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
+ { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
+ { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 },
+ { "MG PLL 1", &mg_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 },
+ { "MG PLL 2", &mg_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 },
+ { "MG PLL 3", &mg_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 },
+ { "MG PLL 4", &mg_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 },
{ },
};
@@ -3220,6 +3246,18 @@ static const struct intel_dpll_mgr icl_pll_mgr = {
.dump_hw_state = icl_dump_hw_state,
};
+static const struct dpll_info ehl_plls[] = {
+ { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 },
+ { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 },
+ { },
+};
+
+static const struct intel_dpll_mgr ehl_pll_mgr = {
+ .dpll_info = ehl_plls,
+ .get_dpll = icl_get_dpll,
+ .dump_hw_state = icl_dump_hw_state,
+};
+
/**
* intel_shared_dpll_init - Initialize shared DPLLs
* @dev: drm device
@@ -3233,7 +3271,9 @@ void intel_shared_dpll_init(struct drm_device *dev)
const struct dpll_info *dpll_info;
int i;
- if (IS_ICELAKE(dev_priv))
+ if (IS_ELKHARTLAKE(dev_priv))
+ dpll_mgr = &ehl_pll_mgr;
+ else if (INTEL_GEN(dev_priv) >= 11)
dpll_mgr = &icl_pll_mgr;
else if (IS_CANNONLAKE(dev_priv))
dpll_mgr = &cnl_pll_mgr;
@@ -3271,31 +3311,29 @@ void intel_shared_dpll_init(struct drm_device *dev)
/**
* intel_get_shared_dpll - get a shared DPLL for CRTC and encoder combination
- * @crtc: CRTC
- * @crtc_state: atomic state for @crtc
+ * @crtc_state: atomic state for the crtc
* @encoder: encoder
*
* Find an appropriate DPLL for the given CRTC and encoder combination. A
- * reference from the @crtc to the returned pll is registered in the atomic
- * state. That configuration is made effective by calling
+ * reference from the @crtc_state to the returned pll is registered in the
+ * atomic state. That configuration is made effective by calling
* intel_shared_dpll_swap_state(). The reference should be released by calling
* intel_release_shared_dpll().
*
* Returns:
- * A shared DPLL to be used by @crtc and @encoder with the given @crtc_state.
+ * A shared DPLL to be used by @crtc_state and @encoder.
*/
struct intel_shared_dpll *
-intel_get_shared_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
+intel_get_shared_dpll(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
if (WARN_ON(!dpll_mgr))
return NULL;
- return dpll_mgr->get_dpll(crtc, crtc_state, encoder);
+ return dpll_mgr->get_dpll(crtc_state, encoder);
}
/**
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index 40e8391a92f2..bd8124cc81ed 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -327,8 +327,7 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
bool state);
#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
-struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *state,
+struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc_state *state,
struct intel_encoder *encoder);
void intel_release_shared_dpll(struct intel_shared_dpll *dpll,
struct intel_crtc *crtc,
@@ -341,8 +340,6 @@ void intel_shared_dpll_init(struct drm_device *dev);
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
struct intel_dpll_hw_state *hw_state);
-int icl_calc_dp_combo_pll_link(struct drm_i915_private *dev_priv,
- u32 pll_id);
int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv);
enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port);
bool intel_dpll_is_combophy(enum intel_dpll_id id);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index d5660ac1b0d6..a38b9cff5cd0 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -27,22 +27,24 @@
#include <linux/async.h>
#include <linux/i2c.h>
-#include <linux/hdmi.h>
#include <linux/sched/clock.h>
#include <linux/stackdepot.h>
-#include <drm/i915_drm.h>
-#include "i915_drv.h"
+
+#include <drm/drm_atomic.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_encoder.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_dp_dual_mode_helper.h>
#include <drm/drm_dp_mst_helper.h>
+#include <drm/drm_encoder.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_vblank.h>
-#include <drm/drm_atomic.h>
+#include <drm/i915_drm.h>
+#include <drm/i915_mei_hdcp_interface.h>
#include <media/cec-notifier.h>
+#include "i915_drv.h"
+
struct drm_printer;
/**
@@ -325,6 +327,13 @@ struct intel_panel {
struct intel_digital_port;
+enum check_link_response {
+ HDCP_LINK_PROTECTED = 0,
+ HDCP_TOPOLOGY_CHANGE,
+ HDCP_LINK_INTEGRITY_FAILURE,
+ HDCP_REAUTH_REQUEST
+};
+
/*
* This structure serves as a translation layer between the generic HDCP code
* and the bus-specific code. What that means is that HDCP over HDMI differs
@@ -397,6 +406,32 @@ struct intel_hdcp_shim {
/* Detects panel's hdcp capability. This is optional for HDMI. */
int (*hdcp_capable)(struct intel_digital_port *intel_dig_port,
bool *hdcp_capable);
+
+ /* HDCP adaptation(DP/HDMI) required on the port */
+ enum hdcp_wired_protocol protocol;
+
+ /* Detects whether sink is HDCP2.2 capable */
+ int (*hdcp_2_2_capable)(struct intel_digital_port *intel_dig_port,
+ bool *capable);
+
+ /* Write HDCP2.2 messages */
+ int (*write_2_2_msg)(struct intel_digital_port *intel_dig_port,
+ void *buf, size_t size);
+
+ /* Read HDCP2.2 messages */
+ int (*read_2_2_msg)(struct intel_digital_port *intel_dig_port,
+ u8 msg_id, void *buf, size_t size);
+
+ /*
+ * Implementation of DP HDCP2.2 Errata for the communication of stream
+ * type to Receivers. In DP HDCP2.2 Stream type is one of the input to
+ * the HDCP2.2 Cipher for En/De-Cryption. Not applicable for HDMI.
+ */
+ int (*config_stream_type)(struct intel_digital_port *intel_dig_port,
+ bool is_repeater, u8 type);
+
+ /* HDCP2.2 Link Integrity Check */
+ int (*check_2_2_link)(struct intel_digital_port *intel_dig_port);
};
struct intel_hdcp {
@@ -406,6 +441,50 @@ struct intel_hdcp {
u64 value;
struct delayed_work check_work;
struct work_struct prop_work;
+
+ /* HDCP1.4 Encryption status */
+ bool hdcp_encrypted;
+
+ /* HDCP2.2 related definitions */
+ /* Flag indicates whether this connector supports HDCP2.2 or not. */
+ bool hdcp2_supported;
+
+ /* HDCP2.2 Encryption status */
+ bool hdcp2_encrypted;
+
+ /*
+ * Content Stream Type defined by content owner. TYPE0(0x0) content can
+ * flow in the link protected by HDCP2.2 or HDCP1.4, where as TYPE1(0x1)
+ * content can flow only through a link protected by HDCP2.2.
+ */
+ u8 content_type;
+ struct hdcp_port_data port_data;
+
+ bool is_paired;
+ bool is_repeater;
+
+ /*
+ * Count of ReceiverID_List received. Initialized to 0 at AKE_INIT.
+ * Incremented after processing the RepeaterAuth_Send_ReceiverID_List.
+ * When it rolls over re-auth has to be triggered.
+ */
+ u32 seq_num_v;
+
+ /*
+ * Count of RepeaterAuth_Stream_Manage msg propagated.
+ * Initialized to 0 on AKE_INIT. Incremented after every successful
+ * transmission of RepeaterAuth_Stream_Manage message. When it rolls
+ * over re-Auth has to be triggered.
+ */
+ u32 seq_num_m;
+
+ /*
+ * Work queue to signal the CP_IRQ. Used for the waiters to read the
+ * available information from HDCP DP sink.
+ */
+ wait_queue_head_t cp_irq_queue;
+ atomic_t cp_irq_count;
+ int cp_irq_count_cached;
};
struct intel_connector {
@@ -480,6 +559,11 @@ struct intel_atomic_state {
* state only when all crtc's are DPMS off.
*/
struct intel_cdclk_state actual;
+
+ int force_min_cdclk;
+ bool force_min_cdclk_changed;
+ /* pipe to which cd2x update is synchronized */
+ enum pipe pipe;
} cdclk;
bool dpll_set, modeset;
@@ -923,7 +1007,8 @@ struct intel_crtc_state {
struct intel_link_m_n fdi_m_n;
bool ips_enabled;
- bool ips_force_disable;
+
+ bool crc_enabled;
bool enable_fbc;
@@ -944,13 +1029,30 @@ struct intel_crtc_state {
/* Gamma mode programmed on the pipe */
u32 gamma_mode;
+ union {
+ /* CSC mode programmed on the pipe */
+ u32 csc_mode;
+
+ /* CHV CGM mode */
+ u32 cgm_mode;
+ };
+
/* bitmask of visible planes (enum plane_id) */
u8 active_planes;
u8 nv12_planes;
+ u8 c8_planes;
/* bitmask of planes that will be updated during the commit */
u8 update_planes;
+ struct {
+ u32 enable;
+ u32 gcp;
+ union hdmi_infoframe avi;
+ union hdmi_infoframe spd;
+ union hdmi_infoframe hdmi;
+ } infoframes;
+
/* HDMI scrambling status */
bool hdmi_scrambling;
@@ -963,6 +1065,12 @@ struct intel_crtc_state {
/* Output down scaling is done in LSPCON device */
bool lspcon_downsampling;
+ /* enable pipe gamma? */
+ bool gamma_enable;
+
+ /* enable pipe csc? */
+ bool csc_enable;
+
/* Display Stream compression state */
struct {
bool compression_enable;
@@ -991,9 +1099,6 @@ struct intel_crtc {
struct intel_crtc_state *config;
- /* global reset count when the last flip was submitted */
- unsigned int reset_count;
-
/* Access to these should be protected by dev_priv->irq_lock. */
bool cpu_fifo_underrun_disabled;
bool pch_fifo_underrun_disabled;
@@ -1262,11 +1367,15 @@ struct intel_digital_port {
const struct intel_crtc_state *crtc_state,
unsigned int type,
const void *frame, ssize_t len);
+ void (*read_infoframe)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len);
void (*set_infoframes)(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
- bool (*infoframe_enabled)(struct intel_encoder *encoder,
+ u32 (*infoframes_enabled)(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config);
};
@@ -1494,6 +1603,7 @@ void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
+void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
static inline u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915,
u32 mask)
@@ -1521,85 +1631,8 @@ void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv);
void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv);
void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv);
-/* intel_crt.c */
-bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
- i915_reg_t adpa_reg, enum pipe *pipe);
-void intel_crt_init(struct drm_i915_private *dev_priv);
-void intel_crt_reset(struct drm_encoder *encoder);
-
-/* intel_ddi.c */
-void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state);
-void hsw_fdi_link_train(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state);
-void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
-bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
-void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
-void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
-void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
-void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
-void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
-void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
-bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
-void intel_ddi_get_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config);
-
-void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
- bool state);
-void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
- struct intel_crtc_state *crtc_state);
-u32 bxt_signal_levels(struct intel_dp *intel_dp);
-u32 ddi_signal_levels(struct intel_dp *intel_dp);
-u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
-u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
- u8 voltage_swing);
-int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
- bool enable);
-void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
-int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
- enum intel_dpll_id pll_id);
-
-unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
- int color_plane, unsigned int height);
-
-/* intel_audio.c */
-void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
-void intel_audio_codec_enable(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
-void intel_audio_codec_disable(struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state);
-void i915_audio_component_init(struct drm_i915_private *dev_priv);
-void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
-void intel_audio_init(struct drm_i915_private *dev_priv);
-void intel_audio_deinit(struct drm_i915_private *dev_priv);
-
-/* intel_cdclk.c */
-int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
-void skl_init_cdclk(struct drm_i915_private *dev_priv);
-void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
-void cnl_init_cdclk(struct drm_i915_private *dev_priv);
-void cnl_uninit_cdclk(struct drm_i915_private *dev_priv);
-void bxt_init_cdclk(struct drm_i915_private *dev_priv);
-void bxt_uninit_cdclk(struct drm_i915_private *dev_priv);
-void icl_init_cdclk(struct drm_i915_private *dev_priv);
-void icl_uninit_cdclk(struct drm_i915_private *dev_priv);
-void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
-void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
-void intel_update_cdclk(struct drm_i915_private *dev_priv);
-void intel_update_rawclk(struct drm_i915_private *dev_priv);
-bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b);
-bool intel_cdclk_changed(const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b);
-void intel_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state);
-void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
- const char *context);
-
/* intel_display.c */
+void intel_plane_destroy(struct drm_plane *plane);
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
@@ -1614,6 +1647,8 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv);
unsigned int intel_fb_xy_to_linear(int x, int y,
const struct intel_plane_state *state,
int plane);
+unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
+ int color_plane, unsigned int height);
void intel_add_fb_offsets(int *x, int *y,
const struct intel_plane_state *state, int plane);
unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
@@ -1740,7 +1775,7 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state,
enum link_m_n_set m_n);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
-bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
+bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
struct dpll *best_clock);
int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
@@ -1781,106 +1816,9 @@ unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation);
-/* intel_connector.c */
-int intel_connector_init(struct intel_connector *connector);
-struct intel_connector *intel_connector_alloc(void);
-void intel_connector_free(struct intel_connector *connector);
-void intel_connector_destroy(struct drm_connector *connector);
-int intel_connector_register(struct drm_connector *connector);
-void intel_connector_unregister(struct drm_connector *connector);
-void intel_connector_attach_encoder(struct intel_connector *connector,
- struct intel_encoder *encoder);
-bool intel_connector_get_hw_state(struct intel_connector *connector);
-enum pipe intel_connector_get_pipe(struct intel_connector *connector);
-int intel_connector_update_modes(struct drm_connector *connector,
- struct edid *edid);
-int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
-void intel_attach_force_audio_property(struct drm_connector *connector);
-void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
-void intel_attach_aspect_ratio_property(struct drm_connector *connector);
-
-/* intel_csr.c */
-void intel_csr_ucode_init(struct drm_i915_private *);
-void intel_csr_load_program(struct drm_i915_private *);
-void intel_csr_ucode_fini(struct drm_i915_private *);
-void intel_csr_ucode_suspend(struct drm_i915_private *);
-void intel_csr_ucode_resume(struct drm_i915_private *);
-
-/* intel_dp.c */
-bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
- i915_reg_t dp_reg, enum port port,
- enum pipe *pipe);
-bool intel_dp_init(struct drm_i915_private *dev_priv, i915_reg_t output_reg,
- enum port port);
-bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
- struct intel_connector *intel_connector);
-void intel_dp_set_link_params(struct intel_dp *intel_dp,
- int link_rate, u8 lane_count,
- bool link_mst);
-int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
- int link_rate, u8 lane_count);
+/* intel_dp_link_training.c */
void intel_dp_start_link_train(struct intel_dp *intel_dp);
void intel_dp_stop_link_train(struct intel_dp *intel_dp);
-int intel_dp_retrain_link(struct intel_encoder *encoder,
- struct drm_modeset_acquire_ctx *ctx);
-void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
-void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- bool enable);
-void intel_dp_encoder_reset(struct drm_encoder *encoder);
-void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
-void intel_dp_encoder_flush_work(struct drm_encoder *encoder);
-int intel_dp_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state);
-bool intel_dp_is_edp(struct intel_dp *intel_dp);
-bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
-enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
- bool long_hpd);
-void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
-void intel_edp_backlight_off(const struct drm_connector_state *conn_state);
-void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
-void intel_edp_panel_on(struct intel_dp *intel_dp);
-void intel_edp_panel_off(struct intel_dp *intel_dp);
-void intel_dp_mst_suspend(struct drm_i915_private *dev_priv);
-void intel_dp_mst_resume(struct drm_i915_private *dev_priv);
-int intel_dp_max_link_rate(struct intel_dp *intel_dp);
-int intel_dp_max_lane_count(struct intel_dp *intel_dp);
-int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
-void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
-void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
-u32 intel_dp_pack_aux(const u8 *src, int src_bytes);
-void intel_plane_destroy(struct drm_plane *plane);
-void intel_edp_drrs_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
-void intel_edp_drrs_disable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
-void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
- unsigned int frontbuffer_bits);
-void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
- unsigned int frontbuffer_bits);
-
-void
-intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
- u8 dp_train_pat);
-void
-intel_dp_set_signal_levels(struct intel_dp *intel_dp);
-void intel_dp_set_idle_link_train(struct intel_dp *intel_dp);
-u8
-intel_dp_voltage_max(struct intel_dp *intel_dp);
-u8
-intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing);
-void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
- u8 *link_bw, u8 *rate_select);
-bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
-bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
-bool
-intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE]);
-u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
- int mode_clock, int mode_hdisplay);
-u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
- int mode_hdisplay);
/* intel_vdsc.c */
int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
@@ -1888,18 +1826,6 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
enum intel_display_power_domain
intel_dsc_power_domain(const struct intel_crtc_state *crtc_state);
-static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
-{
- return ~((1 << lane_count) - 1) & 0xf;
-}
-
-bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
-int intel_dp_link_required(int pixel_clock, int bpp);
-int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
-bool intel_digital_port_connected(struct intel_encoder *encoder);
-void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
- struct intel_digital_port *dig_port);
-
/* intel_dp_aux_backlight.c */
int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
@@ -1915,100 +1841,11 @@ void icl_dsi_init(struct drm_i915_private *dev_priv);
/* intel_dsi_dcs_backlight.c */
int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
-/* intel_dvo.c */
-void intel_dvo_init(struct drm_i915_private *dev_priv);
/* intel_hotplug.c */
void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
bool intel_encoder_hotplug(struct intel_encoder *encoder,
struct intel_connector *connector);
-/* legacy fbdev emulation in intel_fbdev.c */
-#ifdef CONFIG_DRM_FBDEV_EMULATION
-extern int intel_fbdev_init(struct drm_device *dev);
-extern void intel_fbdev_initial_config_async(struct drm_device *dev);
-extern void intel_fbdev_unregister(struct drm_i915_private *dev_priv);
-extern void intel_fbdev_fini(struct drm_i915_private *dev_priv);
-extern void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
-extern void intel_fbdev_output_poll_changed(struct drm_device *dev);
-extern void intel_fbdev_restore_mode(struct drm_device *dev);
-#else
-static inline int intel_fbdev_init(struct drm_device *dev)
-{
- return 0;
-}
-
-static inline void intel_fbdev_initial_config_async(struct drm_device *dev)
-{
-}
-
-static inline void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
-{
-}
-
-static inline void intel_fbdev_fini(struct drm_i915_private *dev_priv)
-{
-}
-
-static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
-{
-}
-
-static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
-{
-}
-
-static inline void intel_fbdev_restore_mode(struct drm_device *dev)
-{
-}
-#endif
-
-/* intel_fbc.c */
-void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
- struct intel_atomic_state *state);
-bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
-void intel_fbc_pre_update(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state);
-void intel_fbc_post_update(struct intel_crtc *crtc);
-void intel_fbc_init(struct drm_i915_private *dev_priv);
-void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv);
-void intel_fbc_enable(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct intel_plane_state *plane_state);
-void intel_fbc_disable(struct intel_crtc *crtc);
-void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
-void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
- unsigned int frontbuffer_bits,
- enum fb_op_origin origin);
-void intel_fbc_flush(struct drm_i915_private *dev_priv,
- unsigned int frontbuffer_bits, enum fb_op_origin origin);
-void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
-void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv);
-int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv);
-
-/* intel_hdmi.c */
-void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg,
- enum port port);
-void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
- struct intel_connector *intel_connector);
-struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
-int intel_hdmi_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state);
-bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
- struct drm_connector *connector,
- bool high_tmds_clock_ratio,
- bool scrambling);
-void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
-void intel_infoframe_init(struct intel_digital_port *intel_dig_port);
-
-/* intel_lvds.c */
-bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
- i915_reg_t lvds_reg, enum pipe *pipe);
-void intel_lvds_init(struct drm_i915_private *dev_priv);
-struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev);
-bool intel_is_dual_link_lvds(struct drm_device *dev);
-
/* intel_overlay.c */
void intel_overlay_setup(struct drm_i915_private *dev_priv);
void intel_overlay_cleanup(struct drm_i915_private *dev_priv);
@@ -2019,86 +1856,6 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void intel_overlay_reset(struct drm_i915_private *dev_priv);
-
-/* intel_panel.c */
-int intel_panel_init(struct intel_panel *panel,
- struct drm_display_mode *fixed_mode,
- struct drm_display_mode *downclock_mode);
-void intel_panel_fini(struct intel_panel *panel);
-void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
- struct drm_display_mode *adjusted_mode);
-void intel_pch_panel_fitting(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config,
- int fitting_mode);
-void intel_gmch_panel_fitting(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config,
- int fitting_mode);
-void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state,
- u32 level, u32 max);
-int intel_panel_setup_backlight(struct drm_connector *connector,
- enum pipe pipe);
-void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
-void intel_panel_update_backlight(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
-void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
-extern struct drm_display_mode *intel_find_panel_downclock(
- struct drm_i915_private *dev_priv,
- struct drm_display_mode *fixed_mode,
- struct drm_connector *connector);
-
-#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
-int intel_backlight_device_register(struct intel_connector *connector);
-void intel_backlight_device_unregister(struct intel_connector *connector);
-#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
-static inline int intel_backlight_device_register(struct intel_connector *connector)
-{
- return 0;
-}
-static inline void intel_backlight_device_unregister(struct intel_connector *connector)
-{
-}
-#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
-
-/* intel_hdcp.c */
-void intel_hdcp_atomic_check(struct drm_connector *connector,
- struct drm_connector_state *old_state,
- struct drm_connector_state *new_state);
-int intel_hdcp_init(struct intel_connector *connector,
- const struct intel_hdcp_shim *hdcp_shim);
-int intel_hdcp_enable(struct intel_connector *connector);
-int intel_hdcp_disable(struct intel_connector *connector);
-int intel_hdcp_check_link(struct intel_connector *connector);
-bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
-bool intel_hdcp_capable(struct intel_connector *connector);
-
-/* intel_psr.c */
-#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
-void intel_psr_init_dpcd(struct intel_dp *intel_dp);
-void intel_psr_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
-void intel_psr_disable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *old_crtc_state);
-int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv,
- struct drm_modeset_acquire_ctx *ctx,
- u64 value);
-void intel_psr_invalidate(struct drm_i915_private *dev_priv,
- unsigned frontbuffer_bits,
- enum fb_op_origin origin);
-void intel_psr_flush(struct drm_i915_private *dev_priv,
- unsigned frontbuffer_bits,
- enum fb_op_origin origin);
-void intel_psr_init(struct drm_i915_private *dev_priv);
-void intel_psr_compute_config(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state);
-void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug);
-void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
-void intel_psr_short_pulse(struct intel_dp *intel_dp);
-int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
- u32 *out_value);
-bool intel_psr_enabled(struct intel_dp *intel_dp);
-
/* intel_quirks.c */
void intel_init_quirks(struct drm_i915_private *dev_priv);
@@ -2153,20 +1910,26 @@ void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
u8 req_slices);
static inline void
-assert_rpm_device_not_suspended(struct drm_i915_private *i915)
+assert_rpm_device_not_suspended(struct i915_runtime_pm *rpm)
{
- WARN_ONCE(i915->runtime_pm.suspended,
+ WARN_ONCE(rpm->suspended,
"Device suspended during HW access\n");
}
static inline void
-assert_rpm_wakelock_held(struct drm_i915_private *i915)
+__assert_rpm_wakelock_held(struct i915_runtime_pm *rpm)
{
- assert_rpm_device_not_suspended(i915);
- WARN_ONCE(!atomic_read(&i915->runtime_pm.wakeref_count),
+ assert_rpm_device_not_suspended(rpm);
+ WARN_ONCE(!atomic_read(&rpm->wakeref_count),
"RPM wakelock ref not held during HW access");
}
+static inline void
+assert_rpm_wakelock_held(struct drm_i915_private *i915)
+{
+ __assert_rpm_wakelock_held(&i915->runtime_pm);
+}
+
/**
* disable_rpm_wakeref_asserts - disable the RPM assert checks
* @i915: i915 device instance
@@ -2242,101 +2005,6 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
enum dpio_channel ch, bool override);
-
-/* intel_pm.c */
-void intel_init_clock_gating(struct drm_i915_private *dev_priv);
-void intel_suspend_hw(struct drm_i915_private *dev_priv);
-int ilk_wm_max_level(const struct drm_i915_private *dev_priv);
-void intel_update_watermarks(struct intel_crtc *crtc);
-void intel_init_pm(struct drm_i915_private *dev_priv);
-void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
-void intel_pm_setup(struct drm_i915_private *dev_priv);
-void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
-void intel_gpu_ips_teardown(void);
-void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
-void gen6_rps_busy(struct drm_i915_private *dev_priv);
-void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
-void gen6_rps_idle(struct drm_i915_private *dev_priv);
-void gen6_rps_boost(struct i915_request *rq, struct intel_rps_client *rps);
-void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
-void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
-void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
-void skl_wm_get_hw_state(struct drm_i915_private *dev_priv);
-void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
- struct skl_ddb_entry *ddb_y,
- struct skl_ddb_entry *ddb_uv);
-void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
- struct skl_ddb_allocation *ddb /* out */);
-void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
- struct skl_pipe_wm *out);
-void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
-void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
-bool intel_can_enable_sagv(struct drm_atomic_state *state);
-int intel_enable_sagv(struct drm_i915_private *dev_priv);
-int intel_disable_sagv(struct drm_i915_private *dev_priv);
-bool skl_wm_level_equals(const struct skl_wm_level *l1,
- const struct skl_wm_level *l2);
-bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
- const struct skl_ddb_entry entries[],
- int num_entries, int ignore_idx);
-void skl_write_plane_wm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state);
-void skl_write_cursor_wm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state);
-bool ilk_disable_lp_wm(struct drm_device *dev);
-int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
- struct intel_crtc_state *cstate);
-void intel_init_ipc(struct drm_i915_private *dev_priv);
-void intel_enable_ipc(struct drm_i915_private *dev_priv);
-
-/* intel_sdvo.c */
-bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
- i915_reg_t sdvo_reg, enum pipe *pipe);
-bool intel_sdvo_init(struct drm_i915_private *dev_priv,
- i915_reg_t reg, enum port port);
-
-
-/* intel_sprite.c */
-int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
- int usecs);
-struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
- enum pipe pipe, int plane);
-int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state);
-void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
-int intel_plane_check_stride(const struct intel_plane_state *plane_state);
-int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
-int chv_plane_check_rotation(const struct intel_plane_state *plane_state);
-struct intel_plane *
-skl_universal_plane_create(struct drm_i915_private *dev_priv,
- enum pipe pipe, enum plane_id plane_id);
-
-static inline bool icl_is_nv12_y_plane(enum plane_id id)
-{
- /* Don't need to do a gen check, these planes are only available on gen11 */
- if (id == PLANE_SPRITE4 || id == PLANE_SPRITE5)
- return true;
-
- return false;
-}
-
-static inline bool icl_is_hdr_plane(struct intel_plane *plane)
-{
- if (INTEL_GEN(to_i915(plane->base.dev)) < 11)
- return false;
-
- return plane->id < PLANE_SPRITE2;
-}
-
-/* intel_tv.c */
-void intel_tv_init(struct drm_i915_private *dev_priv);
-
/* intel_atomic.c */
int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
const struct drm_connector_state *state,
@@ -2373,64 +2041,4 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state);
-/* intel_atomic_plane.c */
-struct intel_plane *intel_plane_alloc(void);
-void intel_plane_free(struct intel_plane *plane);
-struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane);
-void intel_plane_destroy_state(struct drm_plane *plane,
- struct drm_plane_state *state);
-extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
-void skl_update_planes_on_crtc(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
-void i9xx_update_planes_on_crtc(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
-int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *old_plane_state,
- struct intel_plane_state *intel_state);
-
-/* intel_color.c */
-void intel_color_init(struct intel_crtc *crtc);
-int intel_color_check(struct intel_crtc_state *crtc_state);
-void intel_color_commit(const struct intel_crtc_state *crtc_state);
-void intel_color_load_luts(const struct intel_crtc_state *crtc_state);
-
-/* intel_lspcon.c */
-bool lspcon_init(struct intel_digital_port *intel_dig_port);
-void lspcon_resume(struct intel_lspcon *lspcon);
-void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
-void lspcon_write_infoframe(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- unsigned int type,
- const void *buf, ssize_t len);
-void lspcon_set_infoframes(struct intel_encoder *encoder,
- bool enable,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
-bool lspcon_infoframe_enabled(struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config);
-void lspcon_ycbcr420_config(struct drm_connector *connector,
- struct intel_crtc_state *crtc_state);
-
-/* intel_pipe_crc.c */
-#ifdef CONFIG_DEBUG_FS
-int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name);
-int intel_crtc_verify_crc_source(struct drm_crtc *crtc,
- const char *source_name, size_t *values_cnt);
-const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
- size_t *count);
-void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc);
-void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc);
-#else
-#define intel_crtc_set_crc_source NULL
-#define intel_crtc_verify_crc_source NULL
-#define intel_crtc_get_crc_sources NULL
-static inline void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc)
-{
-}
-
-static inline void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc)
-{
-}
-#endif
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index a9a19778dc7f..705a609050c0 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -189,7 +189,6 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
/* intel_dsi_vbt.c */
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
-int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi);
void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
enum mipi_seq seq_id);
void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec);
diff --git a/drivers/gpu/drm/i915/intel_dsi_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index 06a11c35a784..3074448446bc 100644
--- a/drivers/gpu/drm/i915/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -194,7 +194,7 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
break;
}
- if (!IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) < 11)
vlv_dsi_wait_for_fifo_empty(intel_dsi, port);
out:
@@ -365,7 +365,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
/* pull up/down */
value = *data++ & 1;
- if (IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
icl_exec_gpio(dev_priv, gpio_source, gpio_index, value);
else if (IS_VALLEYVIEW(dev_priv))
vlv_exec_gpio(dev_priv, gpio_source, gpio_number, value);
@@ -532,24 +532,6 @@ void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
msleep(msec);
}
-int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi)
-{
- struct intel_connector *connector = intel_dsi->attached_connector;
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
- if (!mode)
- return 0;
-
- mode->type |= DRM_MODE_TYPE_PREFERRED;
-
- drm_mode_probed_add(&connector->base, mode);
-
- return 1;
-}
-
#define ICL_PREPARE_CNT_MAX 0x7
#define ICL_CLK_ZERO_CNT_MAX 0xf
#define ICL_TRAIL_CNT_MAX 0x7
@@ -890,7 +872,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
intel_dsi->burst_mode_ratio = burst_mode_ratio;
- if (IS_ICELAKE(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 11)
icl_dphy_param_init(intel_dsi);
else
vlv_dphy_param_init(intel_dsi);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index a6c82482a841..adef81c8cccb 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -24,14 +24,20 @@
* Authors:
* Eric Anholt <eric@anholt.net>
*/
+
#include <linux/i2c.h>
#include <linux/slab.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include "intel_drv.h"
#include <drm/i915_drm.h>
-#include "i915_drv.h"
+
#include "dvo.h"
+#include "i915_drv.h"
+#include "intel_connector.h"
+#include "intel_drv.h"
+#include "intel_dvo.h"
+#include "intel_panel.h"
#define SIL164_ADDR 0x38
#define CH7xxx_ADDR 0x76
diff --git a/drivers/gpu/drm/i915/intel_dvo.h b/drivers/gpu/drm/i915/intel_dvo.h
new file mode 100644
index 000000000000..3ed0fdf8efff
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dvo.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DVO_H__
+#define __INTEL_DVO_H__
+
+struct drm_i915_private;
+
+void intel_dvo_init(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_DVO_H__ */
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 49fa43ff02ba..eea9bec04f1b 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -84,7 +84,6 @@ static const struct engine_class_info intel_engine_classes[] = {
#define MAX_MMIO_BASES 3
struct engine_info {
unsigned int hw_id;
- unsigned int uabi_id;
u8 class;
u8 instance;
/* mmio bases table *must* be sorted in reverse gen order */
@@ -95,27 +94,24 @@ struct engine_info {
};
static const struct engine_info intel_engines[] = {
- [RCS] = {
- .hw_id = RCS_HW,
- .uabi_id = I915_EXEC_RENDER,
+ [RCS0] = {
+ .hw_id = RCS0_HW,
.class = RENDER_CLASS,
.instance = 0,
.mmio_bases = {
{ .gen = 1, .base = RENDER_RING_BASE }
},
},
- [BCS] = {
- .hw_id = BCS_HW,
- .uabi_id = I915_EXEC_BLT,
+ [BCS0] = {
+ .hw_id = BCS0_HW,
.class = COPY_ENGINE_CLASS,
.instance = 0,
.mmio_bases = {
{ .gen = 6, .base = BLT_RING_BASE }
},
},
- [VCS] = {
- .hw_id = VCS_HW,
- .uabi_id = I915_EXEC_BSD,
+ [VCS0] = {
+ .hw_id = VCS0_HW,
.class = VIDEO_DECODE_CLASS,
.instance = 0,
.mmio_bases = {
@@ -124,9 +120,8 @@ static const struct engine_info intel_engines[] = {
{ .gen = 4, .base = BSD_RING_BASE }
},
},
- [VCS2] = {
- .hw_id = VCS2_HW,
- .uabi_id = I915_EXEC_BSD,
+ [VCS1] = {
+ .hw_id = VCS1_HW,
.class = VIDEO_DECODE_CLASS,
.instance = 1,
.mmio_bases = {
@@ -134,27 +129,24 @@ static const struct engine_info intel_engines[] = {
{ .gen = 8, .base = GEN8_BSD2_RING_BASE }
},
},
- [VCS3] = {
- .hw_id = VCS3_HW,
- .uabi_id = I915_EXEC_BSD,
+ [VCS2] = {
+ .hw_id = VCS2_HW,
.class = VIDEO_DECODE_CLASS,
.instance = 2,
.mmio_bases = {
{ .gen = 11, .base = GEN11_BSD3_RING_BASE }
},
},
- [VCS4] = {
- .hw_id = VCS4_HW,
- .uabi_id = I915_EXEC_BSD,
+ [VCS3] = {
+ .hw_id = VCS3_HW,
.class = VIDEO_DECODE_CLASS,
.instance = 3,
.mmio_bases = {
{ .gen = 11, .base = GEN11_BSD4_RING_BASE }
},
},
- [VECS] = {
- .hw_id = VECS_HW,
- .uabi_id = I915_EXEC_VEBOX,
+ [VECS0] = {
+ .hw_id = VECS0_HW,
.class = VIDEO_ENHANCEMENT_CLASS,
.instance = 0,
.mmio_bases = {
@@ -162,9 +154,8 @@ static const struct engine_info intel_engines[] = {
{ .gen = 7, .base = VEBOX_RING_BASE }
},
},
- [VECS2] = {
- .hw_id = VECS2_HW,
- .uabi_id = I915_EXEC_VEBOX,
+ [VECS1] = {
+ .hw_id = VECS1_HW,
.class = VIDEO_ENHANCEMENT_CLASS,
.instance = 1,
.mmio_bases = {
@@ -264,21 +255,17 @@ static void __sprint_engine_name(char *name, const struct engine_info *info)
void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask)
{
- struct drm_i915_private *dev_priv = engine->i915;
- i915_reg_t hwstam;
-
/*
* Though they added more rings on g4x/ilk, they did not add
* per-engine HWSTAM until gen6.
*/
- if (INTEL_GEN(dev_priv) < 6 && engine->class != RENDER_CLASS)
+ if (INTEL_GEN(engine->i915) < 6 && engine->class != RENDER_CLASS)
return;
- hwstam = RING_HWSTAM(engine->mmio_base);
- if (INTEL_GEN(dev_priv) >= 3)
- I915_WRITE(hwstam, mask);
+ if (INTEL_GEN(engine->i915) >= 3)
+ ENGINE_WRITE(engine, RING_HWSTAM, mask);
else
- I915_WRITE16(hwstam, mask);
+ ENGINE_WRITE16(engine, RING_HWSTAM, mask);
}
static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
@@ -313,15 +300,18 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
if (!engine)
return -ENOMEM;
+ BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
+
engine->id = id;
+ engine->mask = BIT(id);
engine->i915 = dev_priv;
+ engine->uncore = &dev_priv->uncore;
__sprint_engine_name(engine->name, info);
engine->hw_id = engine->guc_id = info->hw_id;
engine->mmio_base = __engine_mmio_base(dev_priv, info->mmio_bases);
engine->class = info->class;
engine->instance = info->instance;
- engine->uabi_id = info->uabi_id;
engine->uabi_class = intel_engine_classes[info->class].uabi_class;
engine->context_size = __intel_engine_context_size(dev_priv,
@@ -355,15 +345,15 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
{
struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
- const unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
+ const unsigned int engine_mask = INTEL_INFO(dev_priv)->engine_mask;
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned int mask = 0;
unsigned int i;
int err;
- WARN_ON(ring_mask == 0);
- WARN_ON(ring_mask &
+ WARN_ON(engine_mask == 0);
+ WARN_ON(engine_mask &
GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
if (i915_inject_load_failure())
@@ -377,7 +367,7 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
if (err)
goto cleanup;
- mask |= ENGINE_MASK(i);
+ mask |= BIT(i);
}
/*
@@ -385,16 +375,16 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv)
* are added to the driver by a warning and disabling the forgotten
* engines.
*/
- if (WARN_ON(mask != ring_mask))
- device_info->ring_mask = mask;
+ if (WARN_ON(mask != engine_mask))
+ device_info->engine_mask = mask;
/* We always presume we have at least RCS available for later probing */
- if (WARN_ON(!HAS_ENGINE(dev_priv, RCS))) {
+ if (WARN_ON(!HAS_ENGINE(dev_priv, RCS0))) {
err = -ENODEV;
goto cleanup;
}
- RUNTIME_INFO(dev_priv)->num_rings = hweight32(mask);
+ RUNTIME_INFO(dev_priv)->num_engines = hweight32(mask);
i915_check_and_clear_faults(dev_priv);
@@ -455,12 +445,6 @@ cleanup:
return err;
}
-void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno)
-{
- intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
- GEM_BUG_ON(intel_engine_get_seqno(engine) != seqno);
-}
-
static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
{
i915_gem_batch_pool_init(&engine->batch_pool, engine);
@@ -541,9 +525,7 @@ static int init_status_page(struct intel_engine_cs *engine)
return PTR_ERR(obj);
}
- ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
- if (ret)
- goto err;
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
@@ -594,7 +576,6 @@ int intel_engine_setup_common(struct intel_engine_cs *engine)
err = i915_timeline_init(engine->i915,
&engine->timeline,
- engine->name,
engine->status_page.vma);
if (err)
goto err_hwsp;
@@ -614,10 +595,44 @@ err_hwsp:
return err;
}
-static void __intel_context_unpin(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine)
+void intel_engines_set_scheduler_caps(struct drm_i915_private *i915)
{
- intel_context_unpin(to_intel_context(ctx, engine));
+ static const struct {
+ u8 engine;
+ u8 sched;
+ } map[] = {
+#define MAP(x, y) { ilog2(I915_ENGINE_HAS_##x), ilog2(I915_SCHEDULER_CAP_##y) }
+ MAP(PREEMPTION, PREEMPTION),
+ MAP(SEMAPHORES, SEMAPHORES),
+#undef MAP
+ };
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ u32 enabled, disabled;
+
+ enabled = 0;
+ disabled = 0;
+ for_each_engine(engine, i915, id) { /* all engines must agree! */
+ int i;
+
+ if (engine->schedule)
+ enabled |= (I915_SCHEDULER_CAP_ENABLED |
+ I915_SCHEDULER_CAP_PRIORITY);
+ else
+ disabled |= (I915_SCHEDULER_CAP_ENABLED |
+ I915_SCHEDULER_CAP_PRIORITY);
+
+ for (i = 0; i < ARRAY_SIZE(map); i++) {
+ if (engine->flags & BIT(map[i].engine))
+ enabled |= BIT(map[i].sched);
+ else
+ disabled |= BIT(map[i].sched);
+ }
+ }
+
+ i915->caps.scheduler = enabled & ~disabled;
+ if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_ENABLED))
+ i915->caps.scheduler = 0;
}
struct measure_breadcrumb {
@@ -639,7 +654,7 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
return -ENOMEM;
if (i915_timeline_init(engine->i915,
- &frame->timeline, "measure",
+ &frame->timeline,
engine->status_page.vma))
goto out_frame;
@@ -670,6 +685,20 @@ out_frame:
return dw;
}
+static int pin_context(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ struct intel_context **out)
+{
+ struct intel_context *ce;
+
+ ce = intel_context_pin(ctx, engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ *out = ce;
+ return 0;
+}
+
/**
* intel_engines_init_common - initialize cengine state which might require hw access
* @engine: Engine to initialize.
@@ -684,11 +713,8 @@ out_frame:
int intel_engine_init_common(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
- struct intel_context *ce;
int ret;
- engine->set_default_submission(engine);
-
/* We may need to do things with the shrinker which
* require us to immediately switch back to the default
* context. This can cause a problem as pinning the
@@ -696,39 +722,61 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
* be available. To avoid this we always pin the default
* context.
*/
- ce = intel_context_pin(i915->kernel_context, engine);
- if (IS_ERR(ce))
- return PTR_ERR(ce);
+ ret = pin_context(i915->kernel_context, engine,
+ &engine->kernel_context);
+ if (ret)
+ return ret;
/*
* Similarly the preempt context must always be available so that
- * we can interrupt the engine at any time.
+ * we can interrupt the engine at any time. However, as preemption
+ * is optional, we allow it to fail.
*/
- if (i915->preempt_context) {
- ce = intel_context_pin(i915->preempt_context, engine);
- if (IS_ERR(ce)) {
- ret = PTR_ERR(ce);
- goto err_unpin_kernel;
- }
- }
+ if (i915->preempt_context)
+ pin_context(i915->preempt_context, engine,
+ &engine->preempt_context);
ret = measure_breadcrumb_dw(engine);
if (ret < 0)
- goto err_unpin_preempt;
+ goto err_unpin;
engine->emit_fini_breadcrumb_dw = ret;
- return 0;
+ engine->set_default_submission(engine);
-err_unpin_preempt:
- if (i915->preempt_context)
- __intel_context_unpin(i915->preempt_context, engine);
+ return 0;
-err_unpin_kernel:
- __intel_context_unpin(i915->kernel_context, engine);
+err_unpin:
+ if (engine->preempt_context)
+ intel_context_unpin(engine->preempt_context);
+ intel_context_unpin(engine->kernel_context);
return ret;
}
+void intel_gt_resume(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * After resume, we may need to poke into the pinned kernel
+ * contexts to paper over any damage caused by the sudden suspend.
+ * Only the kernel contexts should remain pinned over suspend,
+ * allowing us to fixup the user contexts on their first pin.
+ */
+ for_each_engine(engine, i915, id) {
+ struct intel_context *ce;
+
+ ce = engine->kernel_context;
+ if (ce)
+ ce->ops->reset(ce);
+
+ ce = engine->preempt_context;
+ if (ce)
+ ce->ops->reset(ce);
+ }
+}
+
/**
* intel_engines_cleanup_common - cleans up the engine state created by
* the common initiailizers.
@@ -738,8 +786,6 @@ err_unpin_kernel:
*/
void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
- struct drm_i915_private *i915 = engine->i915;
-
cleanup_status_page(engine);
intel_engine_fini_breadcrumbs(engine);
@@ -749,9 +795,9 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
if (engine->default_state)
i915_gem_object_put(engine->default_state);
- if (i915->preempt_context)
- __intel_context_unpin(i915->preempt_context, engine);
- __intel_context_unpin(i915->kernel_context, engine);
+ if (engine->preempt_context)
+ intel_context_unpin(engine->preempt_context);
+ intel_context_unpin(engine->kernel_context);
i915_timeline_fini(&engine->timeline);
@@ -762,50 +808,48 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct drm_i915_private *i915 = engine->i915;
+
u64 acthd;
- if (INTEL_GEN(dev_priv) >= 8)
- acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
- RING_ACTHD_UDW(engine->mmio_base));
- else if (INTEL_GEN(dev_priv) >= 4)
- acthd = I915_READ(RING_ACTHD(engine->mmio_base));
+ if (INTEL_GEN(i915) >= 8)
+ acthd = ENGINE_READ64(engine, RING_ACTHD, RING_ACTHD_UDW);
+ else if (INTEL_GEN(i915) >= 4)
+ acthd = ENGINE_READ(engine, RING_ACTHD);
else
- acthd = I915_READ(ACTHD);
+ acthd = ENGINE_READ(engine, ACTHD);
return acthd;
}
u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
u64 bbaddr;
- if (INTEL_GEN(dev_priv) >= 8)
- bbaddr = I915_READ64_2x32(RING_BBADDR(engine->mmio_base),
- RING_BBADDR_UDW(engine->mmio_base));
+ if (INTEL_GEN(engine->i915) >= 8)
+ bbaddr = ENGINE_READ64(engine, RING_BBADDR, RING_BBADDR_UDW);
else
- bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
+ bbaddr = ENGINE_READ(engine, RING_BBADDR);
return bbaddr;
}
int intel_engine_stop_cs(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
+ struct intel_uncore *uncore = engine->uncore;
const u32 base = engine->mmio_base;
const i915_reg_t mode = RING_MI_MODE(base);
int err;
- if (INTEL_GEN(dev_priv) < 3)
+ if (INTEL_GEN(engine->i915) < 3)
return -ENODEV;
GEM_TRACE("%s\n", engine->name);
- I915_WRITE_FW(mode, _MASKED_BIT_ENABLE(STOP_RING));
+ intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
err = 0;
- if (__intel_wait_for_register_fw(dev_priv,
+ if (__intel_wait_for_register_fw(uncore,
mode, MODE_IDLE, MODE_IDLE,
1000, 0,
NULL)) {
@@ -814,19 +858,16 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine)
}
/* A final mmio read to let GPU writes be hopefully flushed to memory */
- POSTING_READ_FW(mode);
+ intel_uncore_posting_read_fw(uncore, mode);
return err;
}
void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
GEM_TRACE("%s\n", engine->name);
- I915_WRITE_FW(RING_MI_MODE(engine->mmio_base),
- _MASKED_BIT_DISABLE(STOP_RING));
+ ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
}
const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
@@ -863,6 +904,7 @@ static inline u32
read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
int subslice, i915_reg_t reg)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 mcr_slice_subslice_mask;
u32 mcr_slice_subslice_select;
u32 default_mcr_s_ss_select;
@@ -884,33 +926,33 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(dev_priv);
- fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg,
+ fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
FW_REG_READ);
- fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+ fw_domains |= intel_uncore_forcewake_for_reg(uncore,
GEN8_MCR_SELECTOR,
FW_REG_READ | FW_REG_WRITE);
- spin_lock_irq(&dev_priv->uncore.lock);
- intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
+ spin_lock_irq(&uncore->lock);
+ intel_uncore_forcewake_get__locked(uncore, fw_domains);
- mcr = I915_READ_FW(GEN8_MCR_SELECTOR);
+ mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
WARN_ON_ONCE((mcr & mcr_slice_subslice_mask) !=
default_mcr_s_ss_select);
mcr &= ~mcr_slice_subslice_mask;
mcr |= mcr_slice_subslice_select;
- I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
+ intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
- ret = I915_READ_FW(reg);
+ ret = intel_uncore_read_fw(uncore, reg);
mcr &= ~mcr_slice_subslice_mask;
mcr |= default_mcr_s_ss_select;
- I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr);
+ intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
- intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
- spin_unlock_irq(&dev_priv->uncore.lock);
+ intel_uncore_forcewake_put__locked(uncore, fw_domains);
+ spin_unlock_irq(&uncore->lock);
return ret;
}
@@ -920,6 +962,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
struct intel_instdone *instdone)
{
struct drm_i915_private *dev_priv = engine->i915;
+ struct intel_uncore *uncore = engine->uncore;
u32 mmio_base = engine->mmio_base;
int slice;
int subslice;
@@ -928,12 +971,14 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
switch (INTEL_GEN(dev_priv)) {
default:
- instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
+ instdone->instdone =
+ intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
- if (engine->id != RCS)
+ if (engine->id != RCS0)
break;
- instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
+ instdone->slice_common =
+ intel_uncore_read(uncore, GEN7_SC_INSTDONE);
for_each_instdone_slice_subslice(dev_priv, slice, subslice) {
instdone->sampler[slice][subslice] =
read_subslice_reg(dev_priv, slice, subslice,
@@ -944,28 +989,33 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
}
break;
case 7:
- instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
+ instdone->instdone =
+ intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
- if (engine->id != RCS)
+ if (engine->id != RCS0)
break;
- instdone->slice_common = I915_READ(GEN7_SC_INSTDONE);
- instdone->sampler[0][0] = I915_READ(GEN7_SAMPLER_INSTDONE);
- instdone->row[0][0] = I915_READ(GEN7_ROW_INSTDONE);
+ instdone->slice_common =
+ intel_uncore_read(uncore, GEN7_SC_INSTDONE);
+ instdone->sampler[0][0] =
+ intel_uncore_read(uncore, GEN7_SAMPLER_INSTDONE);
+ instdone->row[0][0] =
+ intel_uncore_read(uncore, GEN7_ROW_INSTDONE);
break;
case 6:
case 5:
case 4:
- instdone->instdone = I915_READ(RING_INSTDONE(mmio_base));
-
- if (engine->id == RCS)
+ instdone->instdone =
+ intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
+ if (engine->id == RCS0)
/* HACK: Using the wrong struct member */
- instdone->slice_common = I915_READ(GEN4_INSTDONE1);
+ instdone->slice_common =
+ intel_uncore_read(uncore, GEN4_INSTDONE1);
break;
case 3:
case 2:
- instdone->instdone = I915_READ(GEN2_INSTDONE);
+ instdone->instdone = intel_uncore_read(uncore, GEN2_INSTDONE);
break;
}
}
@@ -985,12 +1035,13 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
return true;
/* First check that no commands are left in the ring */
- if ((I915_READ_HEAD(engine) & HEAD_ADDR) !=
- (I915_READ_TAIL(engine) & TAIL_ADDR))
+ if ((ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) !=
+ (ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR))
idle = false;
/* No bit for gen2, so assume the CS parser is idle */
- if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
+ if (INTEL_GEN(dev_priv) > 2 &&
+ !(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE))
idle = false;
intel_runtime_pm_put(dev_priv, wakeref);
@@ -1007,16 +1058,10 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
*/
bool intel_engine_is_idle(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
/* More white lies, if wedged, hw state is inconsistent */
- if (i915_terminally_wedged(&dev_priv->gpu_error))
+ if (i915_reset_failed(engine->i915))
return true;
- /* Any inflight/incomplete requests? */
- if (!intel_engine_signaled(engine, intel_engine_last_submit(engine)))
- return false;
-
/* Waiting to drain ELSP? */
if (READ_ONCE(engine->execlists.active)) {
struct tasklet_struct *t = &engine->execlists.tasklet;
@@ -1045,7 +1090,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
return ring_is_idle(engine);
}
-bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
+bool intel_engines_are_idle(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -1054,10 +1099,14 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
* If the driver is wedged, HW state may be very inconsistent and
* report that it is still busy, even though we have stopped using it.
*/
- if (i915_terminally_wedged(&dev_priv->gpu_error))
+ if (i915_reset_failed(i915))
return true;
- for_each_engine(engine, dev_priv, id) {
+ /* Already parked (and passed an idleness test); must still be idle */
+ if (!READ_ONCE(i915->gt.awake))
+ return true;
+
+ for_each_engine(engine, i915, id) {
if (!intel_engine_is_idle(engine))
return false;
}
@@ -1065,34 +1114,6 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
return true;
}
-/**
- * intel_engine_has_kernel_context:
- * @engine: the engine
- *
- * Returns true if the last context to be executed on this engine, or has been
- * executed if the engine is already idle, is the kernel context
- * (#i915.kernel_context).
- */
-bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
-{
- const struct intel_context *kernel_context =
- to_intel_context(engine->i915->kernel_context, engine);
- struct i915_request *rq;
-
- lockdep_assert_held(&engine->i915->drm.struct_mutex);
-
- /*
- * Check the last context seen by the engine. If active, it will be
- * the last request that remains in the timeline. When idle, it is
- * the last executed context as tracked by retirement.
- */
- rq = __i915_active_request_peek(&engine->timeline.last_request);
- if (rq)
- return rq->hw_context == kernel_context;
- else
- return engine->last_retired_context == kernel_context;
-}
-
void intel_engines_reset_default_submission(struct drm_i915_private *i915)
{
struct intel_engine_cs *engine;
@@ -1180,6 +1201,8 @@ void intel_engines_park(struct drm_i915_private *i915)
i915_gem_batch_pool_fini(&engine->batch_pool);
engine->execlists.no_priolist = false;
}
+
+ i915->gt.active_engines = 0;
}
/**
@@ -1283,15 +1306,14 @@ static void print_request(struct drm_printer *m,
x = print_sched_attr(rq->i915, &rq->sched.attr, buf, x, sizeof(buf));
- drm_printf(m, "%s%x%s%s [%llx:%llx]%s @ %dms: %s\n",
+ drm_printf(m, "%s %llx:%llx%s%s %s @ %dms: %s\n",
prefix,
- rq->global_seqno,
+ rq->fence.context, rq->fence.seqno,
i915_request_completed(rq) ? "!" :
i915_request_started(rq) ? "*" :
"",
test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
&rq->fence.flags) ? "+" : "",
- rq->fence.context, rq->fence.seqno,
buf,
jiffies_to_msecs(jiffies - rq->emitted_jiffies),
name);
@@ -1334,25 +1356,26 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
&engine->execlists;
u64 addr;
- if (engine->id == RCS && IS_GEN_RANGE(dev_priv, 4, 7))
- drm_printf(m, "\tCCID: 0x%08x\n", I915_READ(CCID));
+ if (engine->id == RCS0 && IS_GEN_RANGE(dev_priv, 4, 7))
+ drm_printf(m, "\tCCID: 0x%08x\n", ENGINE_READ(engine, CCID));
drm_printf(m, "\tRING_START: 0x%08x\n",
- I915_READ(RING_START(engine->mmio_base)));
+ ENGINE_READ(engine, RING_START));
drm_printf(m, "\tRING_HEAD: 0x%08x\n",
- I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR);
+ ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR);
drm_printf(m, "\tRING_TAIL: 0x%08x\n",
- I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR);
+ ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR);
drm_printf(m, "\tRING_CTL: 0x%08x%s\n",
- I915_READ(RING_CTL(engine->mmio_base)),
- I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_CTL) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : "");
if (INTEL_GEN(engine->i915) > 2) {
drm_printf(m, "\tRING_MODE: 0x%08x%s\n",
- I915_READ(RING_MI_MODE(engine->mmio_base)),
- I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : "");
+ ENGINE_READ(engine, RING_MI_MODE),
+ ENGINE_READ(engine, RING_MI_MODE) & (MODE_IDLE) ? " [idle]" : "");
}
if (INTEL_GEN(dev_priv) >= 6) {
- drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine));
+ drm_printf(m, "\tRING_IMR: %08x\n",
+ ENGINE_READ(engine, RING_IMR));
}
addr = intel_engine_get_active_head(engine);
@@ -1362,57 +1385,53 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
drm_printf(m, "\tBBADDR: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
if (INTEL_GEN(dev_priv) >= 8)
- addr = I915_READ64_2x32(RING_DMA_FADD(engine->mmio_base),
- RING_DMA_FADD_UDW(engine->mmio_base));
+ addr = ENGINE_READ64(engine, RING_DMA_FADD, RING_DMA_FADD_UDW);
else if (INTEL_GEN(dev_priv) >= 4)
- addr = I915_READ(RING_DMA_FADD(engine->mmio_base));
+ addr = ENGINE_READ(engine, RING_DMA_FADD);
else
- addr = I915_READ(DMA_FADD_I8XX);
+ addr = ENGINE_READ(engine, DMA_FADD_I8XX);
drm_printf(m, "\tDMA_FADDR: 0x%08x_%08x\n",
upper_32_bits(addr), lower_32_bits(addr));
if (INTEL_GEN(dev_priv) >= 4) {
drm_printf(m, "\tIPEIR: 0x%08x\n",
- I915_READ(RING_IPEIR(engine->mmio_base)));
+ ENGINE_READ(engine, RING_IPEIR));
drm_printf(m, "\tIPEHR: 0x%08x\n",
- I915_READ(RING_IPEHR(engine->mmio_base)));
+ ENGINE_READ(engine, RING_IPEHR));
} else {
- drm_printf(m, "\tIPEIR: 0x%08x\n", I915_READ(IPEIR));
- drm_printf(m, "\tIPEHR: 0x%08x\n", I915_READ(IPEHR));
+ drm_printf(m, "\tIPEIR: 0x%08x\n", ENGINE_READ(engine, IPEIR));
+ drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR));
}
if (HAS_EXECLISTS(dev_priv)) {
const u32 *hws =
&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
+ const u8 num_entries = execlists->csb_size;
unsigned int idx;
u8 read, write;
- drm_printf(m, "\tExeclist status: 0x%08x %08x\n",
- I915_READ(RING_EXECLIST_STATUS_LO(engine)),
- I915_READ(RING_EXECLIST_STATUS_HI(engine)));
+ drm_printf(m, "\tExeclist status: 0x%08x %08x, entries %u\n",
+ ENGINE_READ(engine, RING_EXECLIST_STATUS_LO),
+ ENGINE_READ(engine, RING_EXECLIST_STATUS_HI),
+ num_entries);
read = execlists->csb_head;
write = READ_ONCE(*execlists->csb_write);
- drm_printf(m, "\tExeclist CSB read %d, write %d [mmio:%d], tasklet queued? %s (%s)\n",
+ drm_printf(m, "\tExeclist CSB read %d, write %d, tasklet queued? %s (%s)\n",
read, write,
- GEN8_CSB_WRITE_PTR(I915_READ(RING_CONTEXT_STATUS_PTR(engine))),
yesno(test_bit(TASKLET_STATE_SCHED,
&engine->execlists.tasklet.state)),
enableddisabled(!atomic_read(&engine->execlists.tasklet.count)));
- if (read >= GEN8_CSB_ENTRIES)
+ if (read >= num_entries)
read = 0;
- if (write >= GEN8_CSB_ENTRIES)
+ if (write >= num_entries)
write = 0;
if (read > write)
- write += GEN8_CSB_ENTRIES;
+ write += num_entries;
while (read < write) {
- idx = ++read % GEN8_CSB_ENTRIES;
- drm_printf(m, "\tExeclist CSB[%d]: 0x%08x [mmio:0x%08x], context: %d [mmio:%d]\n",
- idx,
- hws[idx * 2],
- I915_READ(RING_CONTEXT_STATUS_BUF_LO(engine, idx)),
- hws[idx * 2 + 1],
- I915_READ(RING_CONTEXT_STATUS_BUF_HI(engine, idx)));
+ idx = ++read % num_entries;
+ drm_printf(m, "\tExeclist CSB[%d]: 0x%08x, context: %d\n",
+ idx, hws[idx * 2], hws[idx * 2 + 1]);
}
rcu_read_lock();
@@ -1425,10 +1444,11 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
char hdr[80];
snprintf(hdr, sizeof(hdr),
- "\t\tELSP[%d] count=%d, ring:{start:%08x, hwsp:%08x}, rq: ",
+ "\t\tELSP[%d] count=%d, ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ",
idx, count,
i915_ggtt_offset(rq->ring->vma),
- rq->timeline->hwsp_offset);
+ rq->timeline->hwsp_offset,
+ hwsp_seqno(rq));
print_request(m, rq, hdr);
} else {
drm_printf(m, "\t\tELSP[%d] idle\n", idx);
@@ -1438,11 +1458,11 @@ static void intel_engine_print_registers(const struct intel_engine_cs *engine,
rcu_read_unlock();
} else if (INTEL_GEN(dev_priv) > 6) {
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
- I915_READ(RING_PP_DIR_BASE(engine)));
+ ENGINE_READ(engine, RING_PP_DIR_BASE));
drm_printf(m, "\tPP_DIR_BASE_READ: 0x%08x\n",
- I915_READ(RING_PP_DIR_BASE_READ(engine)));
+ ENGINE_READ(engine, RING_PP_DIR_BASE_READ));
drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n",
- I915_READ(RING_PP_DIR_DCLV(engine)));
+ ENGINE_READ(engine, RING_PP_DIR_DCLV));
}
}
@@ -1495,13 +1515,12 @@ void intel_engine_dump(struct intel_engine_cs *engine,
va_end(ap);
}
- if (i915_terminally_wedged(&engine->i915->gpu_error))
+ if (i915_reset_failed(engine->i915))
drm_printf(m, "*** WEDGED ***\n");
- drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms]\n",
- intel_engine_get_seqno(engine),
- intel_engine_last_submit(engine),
- engine->hangcheck.seqno,
+ drm_printf(m, "\tHangcheck %x:%x [%d ms]\n",
+ engine->hangcheck.last_seqno,
+ engine->hangcheck.next_seqno,
jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
drm_printf(m, "\tReset count: %d (global %d)\n",
i915_reset_engine_count(error, engine),
@@ -1521,7 +1540,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
if (&rq->link != &engine->timeline.requests)
print_request(m, rq, "\t\tlast ");
- rq = i915_gem_find_active_request(engine);
+ rq = intel_engine_find_active_request(engine);
if (rq) {
print_request(m, rq, "\t\tactive ");
@@ -1688,6 +1707,50 @@ void intel_disable_engine_stats(struct intel_engine_cs *engine)
write_sequnlock_irqrestore(&engine->stats.lock, flags);
}
+static bool match_ring(struct i915_request *rq)
+{
+ u32 ring = ENGINE_READ(rq->engine, RING_START);
+
+ return ring == i915_ggtt_offset(rq->ring->vma);
+}
+
+struct i915_request *
+intel_engine_find_active_request(struct intel_engine_cs *engine)
+{
+ struct i915_request *request, *active = NULL;
+ unsigned long flags;
+
+ /*
+ * We are called by the error capture, reset and to dump engine
+ * state at random points in time. In particular, note that neither is
+ * crucially ordered with an interrupt. After a hang, the GPU is dead
+ * and we assume that no more writes can happen (we waited long enough
+ * for all writes that were in transaction to be flushed) - adding an
+ * extra delay for a recent interrupt is pointless. Hence, we do
+ * not need an engine->irq_seqno_barrier() before the seqno reads.
+ * At all other times, we must assume the GPU is still running, but
+ * we only care about the snapshot of this moment.
+ */
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+ list_for_each_entry(request, &engine->timeline.requests, link) {
+ if (i915_request_completed(request))
+ continue;
+
+ if (!i915_request_started(request))
+ break;
+
+ /* More than one preemptible request may match! */
+ if (!match_ring(request))
+ break;
+
+ active = request;
+ break;
+ }
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
+
+ return active;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_engine.c"
#include "selftests/intel_engine_cs.c"
diff --git a/drivers/gpu/drm/i915/intel_engine_types.h b/drivers/gpu/drm/i915/intel_engine_types.h
new file mode 100644
index 000000000000..1f970c76b6a6
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_engine_types.h
@@ -0,0 +1,546 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_ENGINE_TYPES__
+#define __INTEL_ENGINE_TYPES__
+
+#include <linux/hashtable.h>
+#include <linux/irq_work.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+#include "i915_gem.h"
+#include "i915_priolist_types.h"
+#include "i915_selftest.h"
+#include "i915_timeline_types.h"
+#include "intel_workarounds_types.h"
+
+#include "i915_gem_batch_pool.h"
+#include "i915_pmu.h"
+
+#define I915_MAX_SLICES 3
+#define I915_MAX_SUBSLICES 8
+
+#define I915_CMD_HASH_ORDER 9
+
+struct dma_fence;
+struct drm_i915_reg_table;
+struct i915_gem_context;
+struct i915_request;
+struct i915_sched_attr;
+struct intel_uncore;
+
+typedef u8 intel_engine_mask_t;
+#define ALL_ENGINES ((intel_engine_mask_t)~0ul)
+
+struct intel_hw_status_page {
+ struct i915_vma *vma;
+ u32 *addr;
+};
+
+struct intel_instdone {
+ u32 instdone;
+ /* The following exist only in the RCS engine */
+ u32 slice_common;
+ u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
+ u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
+};
+
+struct intel_engine_hangcheck {
+ u64 acthd;
+ u32 last_seqno;
+ u32 next_seqno;
+ unsigned long action_timestamp;
+ struct intel_instdone instdone;
+};
+
+struct intel_ring {
+ struct kref ref;
+ struct i915_vma *vma;
+ void *vaddr;
+
+ struct i915_timeline *timeline;
+ struct list_head request_list;
+ struct list_head active_link;
+
+ u32 head;
+ u32 tail;
+ u32 emit;
+
+ u32 space;
+ u32 size;
+ u32 effective_size;
+};
+
+/*
+ * we use a single page to load ctx workarounds so all of these
+ * values are referred in terms of dwords
+ *
+ * struct i915_wa_ctx_bb:
+ * offset: specifies batch starting position, also helpful in case
+ * if we want to have multiple batches at different offsets based on
+ * some criteria. It is not a requirement at the moment but provides
+ * an option for future use.
+ * size: size of the batch in DWORDS
+ */
+struct i915_ctx_workarounds {
+ struct i915_wa_ctx_bb {
+ u32 offset;
+ u32 size;
+ } indirect_ctx, per_ctx;
+ struct i915_vma *vma;
+};
+
+#define I915_MAX_VCS 4
+#define I915_MAX_VECS 2
+
+/*
+ * Engine IDs definitions.
+ * Keep instances of the same type engine together.
+ */
+enum intel_engine_id {
+ RCS0 = 0,
+ BCS0,
+ VCS0,
+ VCS1,
+ VCS2,
+ VCS3,
+#define _VCS(n) (VCS0 + (n))
+ VECS0,
+ VECS1,
+#define _VECS(n) (VECS0 + (n))
+ I915_NUM_ENGINES
+};
+
+struct st_preempt_hang {
+ struct completion completion;
+ unsigned int count;
+ bool inject_hang;
+};
+
+/**
+ * struct intel_engine_execlists - execlist submission queue and port state
+ *
+ * The struct intel_engine_execlists represents the combined logical state of
+ * driver and the hardware state for execlist mode of submission.
+ */
+struct intel_engine_execlists {
+ /**
+ * @tasklet: softirq tasklet for bottom handler
+ */
+ struct tasklet_struct tasklet;
+
+ /**
+ * @default_priolist: priority list for I915_PRIORITY_NORMAL
+ */
+ struct i915_priolist default_priolist;
+
+ /**
+ * @no_priolist: priority lists disabled
+ */
+ bool no_priolist;
+
+ /**
+ * @submit_reg: gen-specific execlist submission register
+ * set to the ExecList Submission Port (elsp) register pre-Gen11 and to
+ * the ExecList Submission Queue Contents register array for Gen11+
+ */
+ u32 __iomem *submit_reg;
+
+ /**
+ * @ctrl_reg: the enhanced execlists control register, used to load the
+ * submit queue on the HW and to request preemptions to idle
+ */
+ u32 __iomem *ctrl_reg;
+
+ /**
+ * @port: execlist port states
+ *
+ * For each hardware ELSP (ExecList Submission Port) we keep
+ * track of the last request and the number of times we submitted
+ * that port to hw. We then count the number of times the hw reports
+ * a context completion or preemption. As only one context can
+ * be active on hw, we limit resubmission of context to port[0]. This
+ * is called Lite Restore, of the context.
+ */
+ struct execlist_port {
+ /**
+ * @request_count: combined request and submission count
+ */
+ struct i915_request *request_count;
+#define EXECLIST_COUNT_BITS 2
+#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
+#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
+#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
+#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
+#define port_set(p, packed) ((p)->request_count = (packed))
+#define port_isset(p) ((p)->request_count)
+#define port_index(p, execlists) ((p) - (execlists)->port)
+
+ /**
+ * @context_id: context ID for port
+ */
+ GEM_DEBUG_DECL(u32 context_id);
+
+#define EXECLIST_MAX_PORTS 2
+ } port[EXECLIST_MAX_PORTS];
+
+ /**
+ * @active: is the HW active? We consider the HW as active after
+ * submitting any context for execution and until we have seen the
+ * last context completion event. After that, we do not expect any
+ * more events until we submit, and so can park the HW.
+ *
+ * As we have a small number of different sources from which we feed
+ * the HW, we track the state of each inside a single bitfield.
+ */
+ unsigned int active;
+#define EXECLISTS_ACTIVE_USER 0
+#define EXECLISTS_ACTIVE_PREEMPT 1
+#define EXECLISTS_ACTIVE_HWACK 2
+
+ /**
+ * @port_mask: number of execlist ports - 1
+ */
+ unsigned int port_mask;
+
+ /**
+ * @queue_priority_hint: Highest pending priority.
+ *
+ * When we add requests into the queue, or adjust the priority of
+ * executing requests, we compute the maximum priority of those
+ * pending requests. We can then use this value to determine if
+ * we need to preempt the executing requests to service the queue.
+ * However, since the we may have recorded the priority of an inflight
+ * request we wanted to preempt but since completed, at the time of
+ * dequeuing the priority hint may no longer may match the highest
+ * available request priority.
+ */
+ int queue_priority_hint;
+
+ /**
+ * @queue: queue of requests, in priority lists
+ */
+ struct rb_root_cached queue;
+
+ /**
+ * @csb_write: control register for Context Switch buffer
+ *
+ * Note this register may be either mmio or HWSP shadow.
+ */
+ u32 *csb_write;
+
+ /**
+ * @csb_status: status array for Context Switch buffer
+ *
+ * Note these register may be either mmio or HWSP shadow.
+ */
+ u32 *csb_status;
+
+ /**
+ * @preempt_complete_status: expected CSB upon completing preemption
+ */
+ u32 preempt_complete_status;
+
+ /**
+ * @csb_size: context status buffer FIFO size
+ */
+ u8 csb_size;
+
+ /**
+ * @csb_head: context status buffer head
+ */
+ u8 csb_head;
+
+ I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;)
+};
+
+#define INTEL_ENGINE_CS_MAX_NAME 8
+
+struct intel_engine_cs {
+ struct drm_i915_private *i915;
+ struct intel_uncore *uncore;
+ char name[INTEL_ENGINE_CS_MAX_NAME];
+
+ enum intel_engine_id id;
+ unsigned int hw_id;
+ unsigned int guc_id;
+ intel_engine_mask_t mask;
+
+ u8 uabi_class;
+
+ u8 class;
+ u8 instance;
+ u32 context_size;
+ u32 mmio_base;
+
+ struct intel_ring *buffer;
+
+ struct i915_timeline timeline;
+
+ struct intel_context *kernel_context; /* pinned */
+ struct intel_context *preempt_context; /* pinned; optional */
+
+ struct drm_i915_gem_object *default_state;
+ void *pinned_default_state;
+
+ /* Rather than have every client wait upon all user interrupts,
+ * with the herd waking after every interrupt and each doing the
+ * heavyweight seqno dance, we delegate the task (of being the
+ * bottom-half of the user interrupt) to the first client. After
+ * every interrupt, we wake up one client, who does the heavyweight
+ * coherent seqno read and either goes back to sleep (if incomplete),
+ * or wakes up all the completed clients in parallel, before then
+ * transferring the bottom-half status to the next client in the queue.
+ *
+ * Compared to walking the entire list of waiters in a single dedicated
+ * bottom-half, we reduce the latency of the first waiter by avoiding
+ * a context switch, but incur additional coherent seqno reads when
+ * following the chain of request breadcrumbs. Since it is most likely
+ * that we have a single client waiting on each seqno, then reducing
+ * the overhead of waking that client is much preferred.
+ */
+ struct intel_breadcrumbs {
+ spinlock_t irq_lock;
+ struct list_head signalers;
+
+ struct irq_work irq_work; /* for use from inside irq_lock */
+
+ unsigned int irq_enabled;
+
+ bool irq_armed;
+ } breadcrumbs;
+
+ struct intel_engine_pmu {
+ /**
+ * @enable: Bitmask of enable sample events on this engine.
+ *
+ * Bits correspond to sample event types, for instance
+ * I915_SAMPLE_QUEUED is bit 0 etc.
+ */
+ u32 enable;
+ /**
+ * @enable_count: Reference count for the enabled samplers.
+ *
+ * Index number corresponds to @enum drm_i915_pmu_engine_sample.
+ */
+ unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT];
+ /**
+ * @sample: Counter values for sampling events.
+ *
+ * Our internal timer stores the current counters in this field.
+ *
+ * Index number corresponds to @enum drm_i915_pmu_engine_sample.
+ */
+ struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT];
+ } pmu;
+
+ /*
+ * A pool of objects to use as shadow copies of client batch buffers
+ * when the command parser is enabled. Prevents the client from
+ * modifying the batch contents after software parsing.
+ */
+ struct i915_gem_batch_pool batch_pool;
+
+ struct intel_hw_status_page status_page;
+ struct i915_ctx_workarounds wa_ctx;
+ struct i915_wa_list ctx_wa_list;
+ struct i915_wa_list wa_list;
+ struct i915_wa_list whitelist;
+
+ u32 irq_keep_mask; /* always keep these interrupts */
+ u32 irq_enable_mask; /* bitmask to enable ring interrupt */
+ void (*irq_enable)(struct intel_engine_cs *engine);
+ void (*irq_disable)(struct intel_engine_cs *engine);
+
+ int (*init_hw)(struct intel_engine_cs *engine);
+
+ struct {
+ void (*prepare)(struct intel_engine_cs *engine);
+ void (*reset)(struct intel_engine_cs *engine, bool stalled);
+ void (*finish)(struct intel_engine_cs *engine);
+ } reset;
+
+ void (*park)(struct intel_engine_cs *engine);
+ void (*unpark)(struct intel_engine_cs *engine);
+
+ void (*set_default_submission)(struct intel_engine_cs *engine);
+
+ const struct intel_context_ops *cops;
+
+ int (*request_alloc)(struct i915_request *rq);
+ int (*init_context)(struct i915_request *rq);
+
+ int (*emit_flush)(struct i915_request *request, u32 mode);
+#define EMIT_INVALIDATE BIT(0)
+#define EMIT_FLUSH BIT(1)
+#define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
+ int (*emit_bb_start)(struct i915_request *rq,
+ u64 offset, u32 length,
+ unsigned int dispatch_flags);
+#define I915_DISPATCH_SECURE BIT(0)
+#define I915_DISPATCH_PINNED BIT(1)
+ int (*emit_init_breadcrumb)(struct i915_request *rq);
+ u32 *(*emit_fini_breadcrumb)(struct i915_request *rq,
+ u32 *cs);
+ unsigned int emit_fini_breadcrumb_dw;
+
+ /* Pass the request to the hardware queue (e.g. directly into
+ * the legacy ringbuffer or to the end of an execlist).
+ *
+ * This is called from an atomic context with irqs disabled; must
+ * be irq safe.
+ */
+ void (*submit_request)(struct i915_request *rq);
+
+ /*
+ * Call when the priority on a request has changed and it and its
+ * dependencies may need rescheduling. Note the request itself may
+ * not be ready to run!
+ */
+ void (*schedule)(struct i915_request *request,
+ const struct i915_sched_attr *attr);
+
+ /*
+ * Cancel all requests on the hardware, or queued for execution.
+ * This should only cancel the ready requests that have been
+ * submitted to the engine (via the engine->submit_request callback).
+ * This is called when marking the device as wedged.
+ */
+ void (*cancel_requests)(struct intel_engine_cs *engine);
+
+ void (*cleanup)(struct intel_engine_cs *engine);
+
+ struct intel_engine_execlists execlists;
+
+ /* Contexts are pinned whilst they are active on the GPU. The last
+ * context executed remains active whilst the GPU is idle - the
+ * switch away and write to the context object only occurs on the
+ * next execution. Contexts are only unpinned on retirement of the
+ * following request ensuring that we can always write to the object
+ * on the context switch even after idling. Across suspend, we switch
+ * to the kernel context and trash it as the save may not happen
+ * before the hardware is powered down.
+ */
+ struct intel_context *last_retired_context;
+
+ /* status_notifier: list of callbacks for context-switch changes */
+ struct atomic_notifier_head context_status_notifier;
+
+ struct intel_engine_hangcheck hangcheck;
+
+#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
+#define I915_ENGINE_SUPPORTS_STATS BIT(1)
+#define I915_ENGINE_HAS_PREEMPTION BIT(2)
+#define I915_ENGINE_HAS_SEMAPHORES BIT(3)
+#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
+ unsigned int flags;
+
+ /*
+ * Table of commands the command parser needs to know about
+ * for this engine.
+ */
+ DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
+
+ /*
+ * Table of registers allowed in commands that read/write registers.
+ */
+ const struct drm_i915_reg_table *reg_tables;
+ int reg_table_count;
+
+ /*
+ * Returns the bitmask for the length field of the specified command.
+ * Return 0 for an unrecognized/invalid command.
+ *
+ * If the command parser finds an entry for a command in the engine's
+ * cmd_tables, it gets the command's length based on the table entry.
+ * If not, it calls this function to determine the per-engine length
+ * field encoding for the command (i.e. different opcode ranges use
+ * certain bits to encode the command length in the header).
+ */
+ u32 (*get_cmd_length_mask)(u32 cmd_header);
+
+ struct {
+ /**
+ * @lock: Lock protecting the below fields.
+ */
+ seqlock_t lock;
+ /**
+ * @enabled: Reference count indicating number of listeners.
+ */
+ unsigned int enabled;
+ /**
+ * @active: Number of contexts currently scheduled in.
+ */
+ unsigned int active;
+ /**
+ * @enabled_at: Timestamp when busy stats were enabled.
+ */
+ ktime_t enabled_at;
+ /**
+ * @start: Timestamp of the last idle to active transition.
+ *
+ * Idle is defined as active == 0, active is active > 0.
+ */
+ ktime_t start;
+ /**
+ * @total: Total time this engine was busy.
+ *
+ * Accumulated time not counting the most recent block in cases
+ * where engine is currently busy (active > 0).
+ */
+ ktime_t total;
+ } stats;
+};
+
+static inline bool
+intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
+}
+
+static inline bool
+intel_engine_supports_stats(const struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_SUPPORTS_STATS;
+}
+
+static inline bool
+intel_engine_has_preemption(const struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_HAS_PREEMPTION;
+}
+
+static inline bool
+intel_engine_has_semaphores(const struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_HAS_SEMAPHORES;
+}
+
+static inline bool
+intel_engine_needs_breadcrumb_tasklet(const struct intel_engine_cs *engine)
+{
+ return engine->flags & I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
+}
+
+#define instdone_slice_mask(dev_priv__) \
+ (IS_GEN(dev_priv__, 7) ? \
+ 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
+
+#define instdone_subslice_mask(dev_priv__) \
+ (IS_GEN(dev_priv__, 7) ? \
+ 1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0])
+
+#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
+ for ((slice__) = 0, (subslice__) = 0; \
+ (slice__) < I915_MAX_SLICES; \
+ (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
+ (slice__) += ((subslice__) == 0)) \
+ for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
+ (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
+
+#endif /* __INTEL_ENGINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 656e684e7c9a..c805a0966395 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -40,8 +40,10 @@
#include <drm/drm_fourcc.h>
-#include "intel_drv.h"
#include "i915_drv.h"
+#include "intel_drv.h"
+#include "intel_fbc.h"
+#include "intel_frontbuffer.h"
static inline bool fbc_supported(struct drm_i915_private *dev_priv)
{
@@ -108,7 +110,7 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
I915_WRITE(FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
FBC_STATUS, FBC_STAT_COMPRESSING, 0,
10)) {
DRM_DEBUG_KMS("FBC idle timed out\n");
diff --git a/drivers/gpu/drm/i915/intel_fbc.h b/drivers/gpu/drm/i915/intel_fbc.h
new file mode 100644
index 000000000000..50272eda8d43
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_fbc.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_FBC_H__
+#define __INTEL_FBC_H__
+
+#include <linux/types.h>
+
+#include "intel_frontbuffer.h"
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_plane_state;
+
+void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
+ struct intel_atomic_state *state);
+bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
+void intel_fbc_pre_update(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state);
+void intel_fbc_post_update(struct intel_crtc *crtc);
+void intel_fbc_init(struct drm_i915_private *dev_priv);
+void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv);
+void intel_fbc_enable(struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state);
+void intel_fbc_disable(struct intel_crtc *crtc);
+void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
+void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits,
+ enum fb_op_origin origin);
+void intel_fbc_flush(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits, enum fb_op_origin origin);
+void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv);
+void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv);
+int intel_fbc_reset_underrun(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_FBC_H__ */
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 376ffe842e26..89db71996148 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -25,26 +25,27 @@
*/
#include <linux/async.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
#include <linux/console.h>
+#include <linux/delay.h>
#include <linux/errno.h>
-#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
#include <linux/mm.h>
-#include <linux/tty.h>
+#include <linux/module.h>
+#include <linux/string.h>
#include <linux/sysrq.h>
-#include <linux/delay.h>
-#include <linux/init.h>
+#include <linux/tty.h>
#include <linux/vga_switcheroo.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
+#include <drm/i915_drm.h>
+#include "i915_drv.h"
#include "intel_drv.h"
+#include "intel_fbdev.h"
#include "intel_frontbuffer.h"
-#include <drm/i915_drm.h>
-#include "i915_drv.h"
static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
{
@@ -235,12 +236,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
goto out_unpin;
}
- info->par = helper;
-
ifbdev->helper.fb = fb;
- strcpy(info->fix.id, "inteldrmfb");
-
info->fbops = &intelfb_ops;
/* setup aperture base/size for vesafb takeover */
@@ -259,11 +256,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->screen_base = vaddr;
info->screen_size = vma->node.size;
- /* This driver doesn't need a VT switch to restore the mode on resume */
- info->skip_vt_switch = true;
-
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(info, &ifbdev->helper, sizes);
/* If the object is shmemfs backed, it will have given us zeroed pages.
* If the object is stolen however, it will be full of whatever
@@ -292,223 +285,7 @@ out_unlock:
return ret;
}
-static struct drm_fb_helper_crtc *
-intel_fb_helper_crtc(struct drm_fb_helper *fb_helper, struct drm_crtc *crtc)
-{
- int i;
-
- for (i = 0; i < fb_helper->crtc_count; i++)
- if (fb_helper->crtc_info[i].mode_set.crtc == crtc)
- return &fb_helper->crtc_info[i];
-
- return NULL;
-}
-
-/*
- * Try to read the BIOS display configuration and use it for the initial
- * fb configuration.
- *
- * The BIOS or boot loader will generally create an initial display
- * configuration for us that includes some set of active pipes and displays.
- * This routine tries to figure out which pipes and connectors are active
- * and stuffs them into the crtcs and modes array given to us by the
- * drm_fb_helper code.
- *
- * The overall sequence is:
- * intel_fbdev_init - from driver load
- * intel_fbdev_init_bios - initialize the intel_fbdev using BIOS data
- * drm_fb_helper_init - build fb helper structs
- * drm_fb_helper_single_add_all_connectors - more fb helper structs
- * intel_fbdev_initial_config - apply the config
- * drm_fb_helper_initial_config - call ->probe then register_framebuffer()
- * drm_setup_crtcs - build crtc config for fbdev
- * intel_fb_initial_config - find active connectors etc
- * drm_fb_helper_single_fb_probe - set up fbdev
- * intelfb_create - re-use or alloc fb, build out fbdev structs
- *
- * Note that we don't make special consideration whether we could actually
- * switch to the selected modes without a full modeset. E.g. when the display
- * is in VGA mode we need to recalculate watermarks and set a new high-res
- * framebuffer anyway.
- */
-static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
- struct drm_fb_helper_crtc **crtcs,
- struct drm_display_mode **modes,
- struct drm_fb_offset *offsets,
- bool *enabled, int width, int height)
-{
- struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
- unsigned long conn_configured, conn_seq, mask;
- unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
- int i, j;
- bool *save_enabled;
- bool fallback = true, ret = true;
- int num_connectors_enabled = 0;
- int num_connectors_detected = 0;
- struct drm_modeset_acquire_ctx ctx;
-
- save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
- if (!save_enabled)
- return false;
-
- drm_modeset_acquire_init(&ctx, 0);
-
- while (drm_modeset_lock_all_ctx(fb_helper->dev, &ctx) != 0)
- drm_modeset_backoff(&ctx);
-
- memcpy(save_enabled, enabled, count);
- mask = GENMASK(count - 1, 0);
- conn_configured = 0;
-retry:
- conn_seq = conn_configured;
- for (i = 0; i < count; i++) {
- struct drm_fb_helper_connector *fb_conn;
- struct drm_connector *connector;
- struct drm_encoder *encoder;
- struct drm_fb_helper_crtc *new_crtc;
-
- fb_conn = fb_helper->connector_info[i];
- connector = fb_conn->connector;
-
- if (conn_configured & BIT(i))
- continue;
-
- if (conn_seq == 0 && !connector->has_tile)
- continue;
-
- if (connector->status == connector_status_connected)
- num_connectors_detected++;
-
- if (!enabled[i]) {
- DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
- connector->name);
- conn_configured |= BIT(i);
- continue;
- }
-
- if (connector->force == DRM_FORCE_OFF) {
- DRM_DEBUG_KMS("connector %s is disabled by user, skipping\n",
- connector->name);
- enabled[i] = false;
- continue;
- }
-
- encoder = connector->state->best_encoder;
- if (!encoder || WARN_ON(!connector->state->crtc)) {
- if (connector->force > DRM_FORCE_OFF)
- goto bail;
-
- DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
- connector->name);
- enabled[i] = false;
- conn_configured |= BIT(i);
- continue;
- }
-
- num_connectors_enabled++;
-
- new_crtc = intel_fb_helper_crtc(fb_helper,
- connector->state->crtc);
-
- /*
- * Make sure we're not trying to drive multiple connectors
- * with a single CRTC, since our cloning support may not
- * match the BIOS.
- */
- for (j = 0; j < count; j++) {
- if (crtcs[j] == new_crtc) {
- DRM_DEBUG_KMS("fallback: cloned configuration\n");
- goto bail;
- }
- }
-
- DRM_DEBUG_KMS("looking for cmdline mode on connector %s\n",
- connector->name);
-
- /* go for command line mode first */
- modes[i] = drm_pick_cmdline_mode(fb_conn);
-
- /* try for preferred next */
- if (!modes[i]) {
- DRM_DEBUG_KMS("looking for preferred mode on connector %s %d\n",
- connector->name, connector->has_tile);
- modes[i] = drm_has_preferred_mode(fb_conn, width,
- height);
- }
-
- /* No preferred mode marked by the EDID? Are there any modes? */
- if (!modes[i] && !list_empty(&connector->modes)) {
- DRM_DEBUG_KMS("using first mode listed on connector %s\n",
- connector->name);
- modes[i] = list_first_entry(&connector->modes,
- struct drm_display_mode,
- head);
- }
-
- /* last resort: use current mode */
- if (!modes[i]) {
- /*
- * IMPORTANT: We want to use the adjusted mode (i.e.
- * after the panel fitter upscaling) as the initial
- * config, not the input mode, which is what crtc->mode
- * usually contains. But since our current
- * code puts a mode derived from the post-pfit timings
- * into crtc->mode this works out correctly.
- *
- * This is crtc->mode and not crtc->state->mode for the
- * fastboot check to work correctly. crtc_state->mode has
- * I915_MODE_FLAG_INHERITED, which we clear to force check
- * state.
- */
- DRM_DEBUG_KMS("looking for current mode on connector %s\n",
- connector->name);
- modes[i] = &connector->state->crtc->mode;
- }
- crtcs[i] = new_crtc;
-
- DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n",
- connector->name,
- connector->state->crtc->base.id,
- connector->state->crtc->name,
- modes[i]->hdisplay, modes[i]->vdisplay,
- modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
-
- fallback = false;
- conn_configured |= BIT(i);
- }
-
- if ((conn_configured & mask) != mask && conn_configured != conn_seq)
- goto retry;
-
- /*
- * If the BIOS didn't enable everything it could, fall back to have the
- * same user experiencing of lighting up as much as possible like the
- * fbdev helper library.
- */
- if (num_connectors_enabled != num_connectors_detected &&
- num_connectors_enabled < INTEL_INFO(dev_priv)->num_pipes) {
- DRM_DEBUG_KMS("fallback: Not all outputs enabled\n");
- DRM_DEBUG_KMS("Enabled: %i, detected: %i\n", num_connectors_enabled,
- num_connectors_detected);
- fallback = true;
- }
-
- if (fallback) {
-bail:
- DRM_DEBUG_KMS("Not using firmware configuration\n");
- memcpy(enabled, save_enabled, count);
- ret = false;
- }
-
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-
- kfree(save_enabled);
- return ret;
-}
-
static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
- .initial_config = intel_fb_initial_config,
.fb_probe = intelfb_create,
};
diff --git a/drivers/gpu/drm/i915/intel_fbdev.h b/drivers/gpu/drm/i915/intel_fbdev.h
new file mode 100644
index 000000000000..de7c84250eb5
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_fbdev.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_FBDEV_H__
+#define __INTEL_FBDEV_H__
+
+#include <linux/types.h>
+
+struct drm_device;
+struct drm_i915_private;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+int intel_fbdev_init(struct drm_device *dev);
+void intel_fbdev_initial_config_async(struct drm_device *dev);
+void intel_fbdev_unregister(struct drm_i915_private *dev_priv);
+void intel_fbdev_fini(struct drm_i915_private *dev_priv);
+void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous);
+void intel_fbdev_output_poll_changed(struct drm_device *dev);
+void intel_fbdev_restore_mode(struct drm_device *dev);
+#else
+static inline int intel_fbdev_init(struct drm_device *dev)
+{
+ return 0;
+}
+
+static inline void intel_fbdev_initial_config_async(struct drm_device *dev)
+{
+}
+
+static inline void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline void intel_fbdev_fini(struct drm_i915_private *dev_priv)
+{
+}
+
+static inline void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
+{
+}
+
+static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
+{
+}
+
+static inline void intel_fbdev_restore_mode(struct drm_device *dev)
+{
+}
+#endif
+
+#endif /* __INTEL_FBDEV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index f33de4be4b89..74c8b0528294 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -27,6 +27,7 @@
#include "i915_drv.h"
#include "intel_drv.h"
+#include "intel_fbc.h"
/**
* DOC: fifo underrun handling
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index 16f253deaf8d..aa34e33b6087 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -61,9 +61,12 @@
*/
+#include "i915_drv.h"
+#include "intel_dp.h"
#include "intel_drv.h"
+#include "intel_fbc.h"
#include "intel_frontbuffer.h"
-#include "i915_drv.h"
+#include "intel_psr.h"
void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
enum fb_op_origin origin,
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.h b/drivers/gpu/drm/i915/intel_frontbuffer.h
index 63cd9a753a72..d5894666f658 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.h
@@ -24,9 +24,19 @@
#ifndef __INTEL_FRONTBUFFER_H__
#define __INTEL_FRONTBUFFER_H__
+#include "i915_gem_object.h"
+
struct drm_i915_private;
struct drm_i915_gem_object;
+enum fb_op_origin {
+ ORIGIN_GTT,
+ ORIGIN_CPU,
+ ORIGIN_CS,
+ ORIGIN_FLIP,
+ ORIGIN_DIRTYFB,
+};
+
void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits);
void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_gpu_commands.h b/drivers/gpu/drm/i915/intel_gpu_commands.h
index b96a31bc1080..a34ece53a771 100644
--- a/drivers/gpu/drm/i915/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/intel_gpu_commands.h
@@ -105,8 +105,13 @@
#define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */
#define MI_SEMAPHORE_TARGET(engine) ((engine)<<15)
#define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */
-#define MI_SEMAPHORE_POLL (1<<15)
-#define MI_SEMAPHORE_SAD_GTE_SDD (1<<12)
+#define MI_SEMAPHORE_POLL (1 << 15)
+#define MI_SEMAPHORE_SAD_GT_SDD (0 << 12)
+#define MI_SEMAPHORE_SAD_GTE_SDD (1 << 12)
+#define MI_SEMAPHORE_SAD_LT_SDD (2 << 12)
+#define MI_SEMAPHORE_SAD_LTE_SDD (3 << 12)
+#define MI_SEMAPHORE_SAD_EQ_SDD (4 << 12)
+#define MI_SEMAPHORE_SAD_NEQ_SDD (5 << 12)
#define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1)
#define MI_STORE_DWORD_IMM_GEN4 MI_INSTR(0x20, 2)
#define MI_MEM_VIRTUAL (1 << 22) /* 945,g33,965 */
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index 8660af3fd755..3aabfa2d9198 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -54,7 +54,7 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
for (i = 0; i < guc->send_regs.count; i++) {
- fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
+ fw_domains |= intel_uncore_forcewake_for_reg(&dev_priv->uncore,
guc_send_reg(guc, i),
FW_REG_READ | FW_REG_WRITE);
}
@@ -203,11 +203,19 @@ int intel_guc_init(struct intel_guc *guc)
goto err_log;
GEM_BUG_ON(!guc->ads_vma);
+ if (HAS_GUC_CT(dev_priv)) {
+ ret = intel_guc_ct_init(&guc->ct);
+ if (ret)
+ goto err_ads;
+ }
+
/* We need to notify the guc whenever we change the GGTT */
i915_ggtt_enable_guc(dev_priv);
return 0;
+err_ads:
+ intel_guc_ads_destroy(guc);
err_log:
intel_guc_log_destroy(&guc->log);
err_shared:
@@ -222,6 +230,10 @@ void intel_guc_fini(struct intel_guc *guc)
struct drm_i915_private *dev_priv = guc_to_i915(guc);
i915_ggtt_disable_guc(dev_priv);
+
+ if (HAS_GUC_CT(dev_priv))
+ intel_guc_ct_fini(&guc->ct);
+
intel_guc_ads_destroy(guc);
intel_guc_log_destroy(&guc->log);
guc_shared_data_destroy(guc);
@@ -357,14 +369,14 @@ void intel_guc_init_params(struct intel_guc *guc)
* they are power context saved so it's ok to release forcewake
* when we are done here and take it again at xfer time.
*/
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_BLITTER);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_BLITTER);
I915_WRITE(SOFT_SCRATCH(0), 0);
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
I915_WRITE(SOFT_SCRATCH(1 + i), params[i]);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_BLITTER);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_BLITTER);
}
int intel_guc_send_nop(struct intel_guc *guc, const u32 *action, u32 len,
@@ -386,6 +398,7 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
u32 *response_buf, u32 response_buf_size)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ struct intel_uncore *uncore = &dev_priv->uncore;
u32 status;
int i;
int ret;
@@ -402,12 +415,12 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
*action != INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER);
mutex_lock(&guc->send_mutex);
- intel_uncore_forcewake_get(dev_priv, guc->send_regs.fw_domains);
+ intel_uncore_forcewake_get(uncore, guc->send_regs.fw_domains);
for (i = 0; i < len; i++)
- I915_WRITE(guc_send_reg(guc, i), action[i]);
+ intel_uncore_write(uncore, guc_send_reg(guc, i), action[i]);
- POSTING_READ(guc_send_reg(guc, i - 1));
+ intel_uncore_posting_read(uncore, guc_send_reg(guc, i - 1));
intel_guc_notify(guc);
@@ -415,7 +428,7 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
* No GuC command should ever take longer than 10ms.
* Fast commands should still complete in 10us.
*/
- ret = __intel_wait_for_register_fw(dev_priv,
+ ret = __intel_wait_for_register_fw(uncore,
guc_send_reg(guc, 0),
INTEL_GUC_MSG_TYPE_MASK,
INTEL_GUC_MSG_TYPE_RESPONSE <<
@@ -442,7 +455,7 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
ret = INTEL_GUC_MSG_TO_DATA(status);
out:
- intel_uncore_forcewake_put(dev_priv, guc->send_regs.fw_domains);
+ intel_uncore_forcewake_put(uncore, guc->send_regs.fw_domains);
mutex_unlock(&guc->send_mutex);
return ret;
@@ -472,17 +485,25 @@ void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc)
spin_unlock(&guc->irq_lock);
enable_rpm_wakeref_asserts(dev_priv);
- intel_guc_to_host_process_recv_msg(guc, msg);
+ intel_guc_to_host_process_recv_msg(guc, &msg, 1);
}
-void intel_guc_to_host_process_recv_msg(struct intel_guc *guc, u32 msg)
+int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
+ const u32 *payload, u32 len)
{
+ u32 msg;
+
+ if (unlikely(!len))
+ return -EPROTO;
+
/* Make sure to handle only enabled messages */
- msg &= guc->msg_enabled_mask;
+ msg = payload[0] & guc->msg_enabled_mask;
if (msg & (INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER |
INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED))
intel_guc_log_handle_flush_event(&guc->log);
+
+ return 0;
}
int intel_guc_sample_forcewake(struct intel_guc *guc)
@@ -544,7 +565,7 @@ static int guc_sleep_state_action(struct intel_guc *guc,
if (ret)
return ret;
- ret = __intel_wait_for_register(dev_priv, SOFT_SCRATCH(14),
+ ret = __intel_wait_for_register(&dev_priv->uncore, SOFT_SCRATCH(14),
INTEL_GUC_SLEEP_STATE_INVALID_MASK,
0, 0, 10, &status);
if (ret)
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 744220296653..2c59ff8d9f39 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -32,6 +32,7 @@
#include "intel_guc_log.h"
#include "intel_guc_reg.h"
#include "intel_uc_fw.h"
+#include "i915_utils.h"
#include "i915_vma.h"
struct guc_preempt_work {
@@ -164,7 +165,8 @@ int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
void intel_guc_to_host_event_handler(struct intel_guc *guc);
void intel_guc_to_host_event_handler_nop(struct intel_guc *guc);
void intel_guc_to_host_event_handler_mmio(struct intel_guc *guc);
-void intel_guc_to_host_process_recv_msg(struct intel_guc *guc, u32 msg);
+int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
+ const u32 *payload, u32 len);
int intel_guc_sample_forcewake(struct intel_guc *guc);
int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
int intel_guc_suspend(struct intel_guc *guc);
diff --git a/drivers/gpu/drm/i915/intel_guc_ads.c b/drivers/gpu/drm/i915/intel_guc_ads.c
index f0db62887f50..bec62f34b15a 100644
--- a/drivers/gpu/drm/i915/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/intel_guc_ads.c
@@ -121,8 +121,7 @@ int intel_guc_ads_create(struct intel_guc *guc)
* to find it. Note that we have to skip our header (1 page),
* because our GuC shared data is there.
*/
- kernel_ctx_vma = to_intel_context(dev_priv->kernel_context,
- dev_priv->engine[RCS])->state;
+ kernel_ctx_vma = dev_priv->engine[RCS0]->kernel_context->state;
blob->ads.golden_context_lrca =
intel_guc_ggtt_offset(guc, kernel_ctx_vma) + skipped_offset;
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.c b/drivers/gpu/drm/i915/intel_guc_ct.c
index a52883e9146f..dde1dc0d6e69 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/intel_guc_ct.c
@@ -140,11 +140,6 @@ static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
return err;
}
-static bool ctch_is_open(struct intel_guc_ct_channel *ctch)
-{
- return ctch->vma != NULL;
-}
-
static int ctch_init(struct intel_guc *guc,
struct intel_guc_ct_channel *ctch)
{
@@ -214,25 +209,21 @@ err_out:
static void ctch_fini(struct intel_guc *guc,
struct intel_guc_ct_channel *ctch)
{
+ GEM_BUG_ON(ctch->enabled);
+
i915_vma_unpin_and_release(&ctch->vma, I915_VMA_RELEASE_MAP);
}
-static int ctch_open(struct intel_guc *guc,
- struct intel_guc_ct_channel *ctch)
+static int ctch_enable(struct intel_guc *guc,
+ struct intel_guc_ct_channel *ctch)
{
u32 base;
int err;
int i;
- CT_DEBUG_DRIVER("CT: channel %d reopen=%s\n",
- ctch->owner, yesno(ctch_is_open(ctch)));
+ GEM_BUG_ON(!ctch->vma);
- if (!ctch->vma) {
- err = ctch_init(guc, ctch);
- if (unlikely(err))
- goto err_out;
- GEM_BUG_ON(!ctch->vma);
- }
+ GEM_BUG_ON(ctch->enabled);
/* vma should be already allocated and map'ed */
base = intel_guc_ggtt_offset(guc, ctch->vma);
@@ -255,7 +246,7 @@ static int ctch_open(struct intel_guc *guc,
base + PAGE_SIZE/4 * CTB_RECV,
INTEL_GUC_CT_BUFFER_TYPE_RECV);
if (unlikely(err))
- goto err_fini;
+ goto err_out;
err = guc_action_register_ct_buffer(guc,
base + PAGE_SIZE/4 * CTB_SEND,
@@ -263,23 +254,25 @@ static int ctch_open(struct intel_guc *guc,
if (unlikely(err))
goto err_deregister;
+ ctch->enabled = true;
+
return 0;
err_deregister:
guc_action_deregister_ct_buffer(guc,
ctch->owner,
INTEL_GUC_CT_BUFFER_TYPE_RECV);
-err_fini:
- ctch_fini(guc, ctch);
err_out:
DRM_ERROR("CT: can't open channel %d; err=%d\n", ctch->owner, err);
return err;
}
-static void ctch_close(struct intel_guc *guc,
- struct intel_guc_ct_channel *ctch)
+static void ctch_disable(struct intel_guc *guc,
+ struct intel_guc_ct_channel *ctch)
{
- GEM_BUG_ON(!ctch_is_open(ctch));
+ GEM_BUG_ON(!ctch->enabled);
+
+ ctch->enabled = false;
guc_action_deregister_ct_buffer(guc,
ctch->owner,
@@ -287,7 +280,6 @@ static void ctch_close(struct intel_guc *guc,
guc_action_deregister_ct_buffer(guc,
ctch->owner,
INTEL_GUC_CT_BUFFER_TYPE_RECV);
- ctch_fini(guc, ctch);
}
static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch)
@@ -481,7 +473,7 @@ static int ctch_send(struct intel_guc_ct *ct,
u32 fence;
int err;
- GEM_BUG_ON(!ctch_is_open(ctch));
+ GEM_BUG_ON(!ctch->enabled);
GEM_BUG_ON(!len);
GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
GEM_BUG_ON(!response_buf && response_buf_size);
@@ -709,14 +701,15 @@ static void ct_process_request(struct intel_guc_ct *ct,
u32 action, u32 len, const u32 *payload)
{
struct intel_guc *guc = ct_to_guc(ct);
+ int ret;
CT_DEBUG_DRIVER("CT: request %x %*ph\n", action, 4 * len, payload);
switch (action) {
case INTEL_GUC_ACTION_DEFAULT:
- if (unlikely(len < 1))
+ ret = intel_guc_to_host_process_recv_msg(guc, payload, len);
+ if (unlikely(ret))
goto fail_unexpected;
- intel_guc_to_host_process_recv_msg(guc, *payload);
break;
default:
@@ -817,7 +810,7 @@ static void ct_process_host_channel(struct intel_guc_ct *ct)
u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
int err = 0;
- if (!ctch_is_open(ctch))
+ if (!ctch->enabled)
return;
do {
@@ -849,6 +842,51 @@ static void intel_guc_to_host_event_handler_ct(struct intel_guc *guc)
}
/**
+ * intel_guc_ct_init - Init CT communication
+ * @ct: pointer to CT struct
+ *
+ * Allocate memory required for communication via
+ * the CT channel.
+ *
+ * Shall only be called for platforms with HAS_GUC_CT.
+ *
+ * Return: 0 on success, a negative errno code on failure.
+ */
+int intel_guc_ct_init(struct intel_guc_ct *ct)
+{
+ struct intel_guc *guc = ct_to_guc(ct);
+ struct intel_guc_ct_channel *ctch = &ct->host_channel;
+ int err;
+
+ err = ctch_init(guc, ctch);
+ if (unlikely(err)) {
+ DRM_ERROR("CT: can't open channel %d; err=%d\n",
+ ctch->owner, err);
+ return err;
+ }
+
+ GEM_BUG_ON(!ctch->vma);
+ return 0;
+}
+
+/**
+ * intel_guc_ct_fini - Fini CT communication
+ * @ct: pointer to CT struct
+ *
+ * Deallocate memory required for communication via
+ * the CT channel.
+ *
+ * Shall only be called for platforms with HAS_GUC_CT.
+ */
+void intel_guc_ct_fini(struct intel_guc_ct *ct)
+{
+ struct intel_guc *guc = ct_to_guc(ct);
+ struct intel_guc_ct_channel *ctch = &ct->host_channel;
+
+ ctch_fini(guc, ctch);
+}
+
+/**
* intel_guc_ct_enable - Enable buffer based command transport.
* @ct: pointer to CT struct
*
@@ -865,7 +903,10 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
GEM_BUG_ON(!HAS_GUC_CT(i915));
- err = ctch_open(guc, ctch);
+ if (ctch->enabled)
+ return 0;
+
+ err = ctch_enable(guc, ctch);
if (unlikely(err))
return err;
@@ -890,10 +931,10 @@ void intel_guc_ct_disable(struct intel_guc_ct *ct)
GEM_BUG_ON(!HAS_GUC_CT(i915));
- if (!ctch_is_open(ctch))
+ if (!ctch->enabled)
return;
- ctch_close(guc, ctch);
+ ctch_disable(guc, ctch);
/* Disable send */
guc->send = intel_guc_send_nop;
diff --git a/drivers/gpu/drm/i915/intel_guc_ct.h b/drivers/gpu/drm/i915/intel_guc_ct.h
index d774895ab143..f5e7f0663304 100644
--- a/drivers/gpu/drm/i915/intel_guc_ct.h
+++ b/drivers/gpu/drm/i915/intel_guc_ct.h
@@ -66,6 +66,7 @@ struct intel_guc_ct_channel {
struct intel_guc_ct_buffer ctbs[2];
u32 owner;
u32 next_fence;
+ bool enabled;
};
/** Holds all command transport channels.
@@ -90,6 +91,8 @@ struct intel_guc_ct {
};
void intel_guc_ct_init_early(struct intel_guc_ct *ct);
+int intel_guc_ct_init(struct intel_guc_ct *ct);
+void intel_guc_ct_fini(struct intel_guc_ct *ct);
int intel_guc_ct_enable(struct intel_guc_ct *ct);
void intel_guc_ct_disable(struct intel_guc_ct *ct);
diff --git a/drivers/gpu/drm/i915/intel_guc_fw.c b/drivers/gpu/drm/i915/intel_guc_fw.c
index 13ff7003c6be..792a551450c7 100644
--- a/drivers/gpu/drm/i915/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/intel_guc_fw.c
@@ -241,7 +241,7 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
GEM_BUG_ON(guc_fw->type != INTEL_UC_FW_TYPE_GUC);
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
guc_prepare_xfer(guc);
@@ -254,7 +254,7 @@ static int guc_fw_xfer(struct intel_uc_fw *guc_fw, struct i915_vma *vma)
ret = guc_xfer_ucode(guc, vma);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_guc_log.c b/drivers/gpu/drm/i915/intel_guc_log.c
index 806fdfd7c78a..7146524264dd 100644
--- a/drivers/gpu/drm/i915/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/intel_guc_log.c
@@ -620,7 +620,12 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log)
void intel_guc_log_relay_close(struct intel_guc_log *log)
{
+ struct intel_guc *guc = log_to_guc(log);
+ struct drm_i915_private *i915 = guc_to_i915(guc);
+
guc_log_disable_flush_events(log);
+ synchronize_irq(i915->drm.irq);
+
flush_work(&log->relay.flush_work);
mutex_lock(&log->relay.lock);
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 8bc8aa54aa35..37f60cb8e9e1 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -382,7 +382,7 @@ static void guc_stage_desc_init(struct intel_guc_client *client)
desc->db_id = client->doorbell_id;
for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
- struct intel_context *ce = to_intel_context(ctx, engine);
+ struct intel_context *ce = intel_context_lookup(ctx, engine);
u32 guc_engine_id = engine->guc_id;
struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id];
@@ -393,7 +393,7 @@ static void guc_stage_desc_init(struct intel_guc_client *client)
* for now who owns a GuC client. But for future owner of GuC
* client, need to make sure lrc is pinned prior to enter here.
*/
- if (!ce->state)
+ if (!ce || !ce->state)
break; /* XXX: continue? */
/*
@@ -535,7 +535,7 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
spin_lock(&client->wq_lock);
guc_wq_item_append(client, engine->guc_id, ctx_desc,
- ring_tail, rq->global_seqno);
+ ring_tail, rq->fence.seqno);
guc_ring_doorbell(client);
client->submissions[engine->id] += 1;
@@ -567,7 +567,7 @@ static void inject_preempt_context(struct work_struct *work)
preempt_work[engine->id]);
struct intel_guc_client *client = guc->preempt_client;
struct guc_stage_desc *stage_desc = __get_stage_desc(client);
- struct intel_context *ce = to_intel_context(client->owner, engine);
+ struct intel_context *ce = engine->preempt_context;
u32 data[7];
if (!ce->ring->emit) { /* recreate upon load/resume */
@@ -575,7 +575,7 @@ static void inject_preempt_context(struct work_struct *work)
u32 *cs;
cs = ce->ring->vaddr;
- if (engine->id == RCS) {
+ if (engine->class == RENDER_CLASS) {
cs = gen8_emit_ggtt_write_rcs(cs,
GUC_PREEMPT_FINISHED,
addr,
@@ -583,7 +583,8 @@ static void inject_preempt_context(struct work_struct *work)
} else {
cs = gen8_emit_ggtt_write(cs,
GUC_PREEMPT_FINISHED,
- addr);
+ addr,
+ 0);
*cs++ = MI_NOOP;
*cs++ = MI_NOOP;
}
@@ -649,9 +650,10 @@ static void wait_for_guc_preempt_report(struct intel_engine_cs *engine)
struct guc_ctx_report *report =
&data->preempt_ctx_report[engine->guc_id];
- WARN_ON(wait_for_atomic(report->report_return_status ==
- INTEL_GUC_REPORT_STATUS_COMPLETE,
- GUC_PREEMPT_POSTPROCESS_DELAY_MS));
+ if (wait_for_atomic(report->report_return_status ==
+ INTEL_GUC_REPORT_STATUS_COMPLETE,
+ GUC_PREEMPT_POSTPROCESS_DELAY_MS))
+ DRM_ERROR("Timed out waiting for GuC preemption report\n");
/*
* GuC is expecting that we're also going to clear the affected context
* counter, let's also reset the return status to not depend on GuC
@@ -720,7 +722,7 @@ static inline int rq_prio(const struct i915_request *rq)
static inline int port_prio(const struct execlist_port *port)
{
- return rq_prio(port_request(port));
+ return rq_prio(port_request(port)) | __NO_PREEMPTION;
}
static bool __guc_dequeue(struct intel_engine_cs *engine)
@@ -781,8 +783,7 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
}
rb_erase_cached(&p->node, &execlists->queue);
- if (p->priority != I915_PRIORITY_NORMAL)
- kmem_cache_free(engine->i915->priorities, p);
+ i915_priolist_free(p);
}
done:
execlists->queue_priority_hint =
@@ -871,6 +872,104 @@ static void guc_reset_prepare(struct intel_engine_cs *engine)
flush_workqueue(engine->i915->guc.preempt_wq);
}
+static void guc_reset(struct intel_engine_cs *engine, bool stalled)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_request *rq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+
+ execlists_cancel_port_requests(execlists);
+
+ /* Push back any incomplete requests for replay after the reset. */
+ rq = execlists_unwind_incomplete_requests(execlists);
+ if (!rq)
+ goto out_unlock;
+
+ if (!i915_request_started(rq))
+ stalled = false;
+
+ i915_reset_request(rq, stalled);
+ intel_lr_context_reset(engine, rq->hw_context, rq->head, stalled);
+
+out_unlock:
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
+}
+
+static void guc_cancel_requests(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_request *rq, *rn;
+ struct rb_node *rb;
+ unsigned long flags;
+
+ GEM_TRACE("%s\n", engine->name);
+
+ /*
+ * Before we call engine->cancel_requests(), we should have exclusive
+ * access to the submission state. This is arranged for us by the
+ * caller disabling the interrupt generation, the tasklet and other
+ * threads that may then access the same state, giving us a free hand
+ * to reset state. However, we still need to let lockdep be aware that
+ * we know this state may be accessed in hardirq context, so we
+ * disable the irq around this manipulation and we want to keep
+ * the spinlock focused on its duties and not accidentally conflate
+ * coverage to the submission's irq state. (Similarly, although we
+ * shouldn't need to disable irq around the manipulation of the
+ * submission's irq state, we also wish to remind ourselves that
+ * it is irq state.)
+ */
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+
+ /* Cancel the requests on the HW and clear the ELSP tracker. */
+ execlists_cancel_port_requests(execlists);
+
+ /* Mark all executing requests as skipped. */
+ list_for_each_entry(rq, &engine->timeline.requests, link) {
+ if (!i915_request_signaled(rq))
+ dma_fence_set_error(&rq->fence, -EIO);
+
+ i915_request_mark_complete(rq);
+ }
+
+ /* Flush the queued requests to the timeline list (for retiring). */
+ while ((rb = rb_first_cached(&execlists->queue))) {
+ struct i915_priolist *p = to_priolist(rb);
+ int i;
+
+ priolist_for_each_request_consume(rq, rn, p, i) {
+ list_del_init(&rq->sched.link);
+ __i915_request_submit(rq);
+ dma_fence_set_error(&rq->fence, -EIO);
+ i915_request_mark_complete(rq);
+ }
+
+ rb_erase_cached(&p->node, &execlists->queue);
+ i915_priolist_free(p);
+ }
+
+ /* Remaining _unready_ requests will be nop'ed when submitted */
+
+ execlists->queue_priority_hint = INT_MIN;
+ execlists->queue = RB_ROOT_CACHED;
+ GEM_BUG_ON(port_isset(execlists->port));
+
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
+}
+
+static void guc_reset_finish(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+
+ if (__tasklet_enable(&execlists->tasklet))
+ /* And kick in case we missed a new request submission. */
+ tasklet_hi_schedule(&execlists->tasklet);
+
+ GEM_TRACE("%s: depth->%d\n", engine->name,
+ atomic_read(&execlists->tasklet.count));
+}
+
/*
* Everything below here is concerned with setup & teardown, and is
* therefore not part of the somewhat time-critical batch-submission
@@ -1031,7 +1130,7 @@ static int guc_clients_create(struct intel_guc *guc)
GEM_BUG_ON(guc->preempt_client);
client = guc_client_alloc(dev_priv,
- INTEL_INFO(dev_priv)->ring_mask,
+ INTEL_INFO(dev_priv)->engine_mask,
GUC_CLIENT_PRIORITY_KMD_NORMAL,
dev_priv->kernel_context);
if (IS_ERR(client)) {
@@ -1042,7 +1141,7 @@ static int guc_clients_create(struct intel_guc *guc)
if (dev_priv->preempt_context) {
client = guc_client_alloc(dev_priv,
- INTEL_INFO(dev_priv)->ring_mask,
+ INTEL_INFO(dev_priv)->engine_mask,
GUC_CLIENT_PRIORITY_KMD_HIGH,
dev_priv->preempt_context);
if (IS_ERR(client)) {
@@ -1262,10 +1361,12 @@ static void guc_interrupts_release(struct drm_i915_private *dev_priv)
static void guc_submission_park(struct intel_engine_cs *engine)
{
intel_engine_unpin_breadcrumbs_irq(engine);
+ engine->flags &= ~I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
}
static void guc_submission_unpark(struct intel_engine_cs *engine)
{
+ engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
intel_engine_pin_breadcrumbs_irq(engine);
}
@@ -1290,6 +1391,10 @@ static void guc_set_default_submission(struct intel_engine_cs *engine)
engine->unpark = guc_submission_unpark;
engine->reset.prepare = guc_reset_prepare;
+ engine->reset.reset = guc_reset;
+ engine->reset.finish = guc_reset_finish;
+
+ engine->cancel_requests = guc_cancel_requests;
engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
}
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.h b/drivers/gpu/drm/i915/intel_guc_submission.h
index 169c54568340..aa5e6749c925 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/intel_guc_submission.h
@@ -29,6 +29,7 @@
#include "i915_gem.h"
#include "i915_selftest.h"
+#include "intel_engine_types.h"
struct drm_i915_private;
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index a219c796e56d..3d51ed1428d4 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -56,7 +56,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine)
int slice;
int subslice;
- if (engine->id != RCS)
+ if (engine->id != RCS0)
return true;
intel_engine_get_instdone(engine, &instdone);
@@ -118,11 +118,11 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
* and break the hang. This should work on
* all but the second generation chipsets.
*/
- tmp = I915_READ_CTL(engine);
+ tmp = ENGINE_READ(engine, RING_CTL);
if (tmp & RING_WAIT) {
- i915_handle_error(dev_priv, BIT(engine->id), 0,
+ i915_handle_error(dev_priv, engine->mask, 0,
"stuck wait on %s", engine->name);
- I915_WRITE_CTL(engine, tmp);
+ ENGINE_WRITE(engine, RING_CTL, tmp);
return ENGINE_WAIT_KICK;
}
@@ -133,21 +133,21 @@ static void hangcheck_load_sample(struct intel_engine_cs *engine,
struct hangcheck *hc)
{
hc->acthd = intel_engine_get_active_head(engine);
- hc->seqno = intel_engine_get_seqno(engine);
+ hc->seqno = intel_engine_get_hangcheck_seqno(engine);
}
static void hangcheck_store_sample(struct intel_engine_cs *engine,
const struct hangcheck *hc)
{
engine->hangcheck.acthd = hc->acthd;
- engine->hangcheck.seqno = hc->seqno;
+ engine->hangcheck.last_seqno = hc->seqno;
}
static enum intel_engine_hangcheck_action
hangcheck_get_action(struct intel_engine_cs *engine,
const struct hangcheck *hc)
{
- if (engine->hangcheck.seqno != hc->seqno)
+ if (engine->hangcheck.last_seqno != hc->seqno)
return ENGINE_ACTIVE_SEQNO;
if (intel_engine_is_idle(engine))
@@ -221,8 +221,8 @@ static void hangcheck_declare_hang(struct drm_i915_private *i915,
unsigned int stuck)
{
struct intel_engine_cs *engine;
+ intel_engine_mask_t tmp;
char msg[80];
- unsigned int tmp;
int len;
/* If some rings hung but others were still busy, only
@@ -263,14 +263,14 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
if (!READ_ONCE(dev_priv->gt.awake))
return;
- if (i915_terminally_wedged(&dev_priv->gpu_error))
+ if (i915_terminally_wedged(dev_priv))
return;
/* As enabling the GPU requires fairly extensive mmio access,
* periodically arm the mmio checker to see if we are triggering
* any invalid access.
*/
- intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
+ intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
for_each_engine(engine, dev_priv, id) {
struct hangcheck hc;
@@ -282,13 +282,13 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
hangcheck_store_sample(engine, &hc);
if (hc.stalled) {
- hung |= intel_engine_flag(engine);
+ hung |= engine->mask;
if (hc.action != ENGINE_DEAD)
- stuck |= intel_engine_flag(engine);
+ stuck |= engine->mask;
}
if (hc.wedged)
- wedged |= intel_engine_flag(engine);
+ wedged |= engine->mask;
}
if (GEM_SHOW_DEBUG() && (hung | stuck)) {
diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
index ce7ba3a9c000..99b007169c49 100644
--- a/drivers/gpu/drm/i915/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/intel_hdcp.c
@@ -6,15 +6,20 @@
* Sean Paul <seanpaul@chromium.org>
*/
-#include <drm/drm_hdcp.h>
+#include <linux/component.h>
#include <linux/i2c.h>
#include <linux/random.h>
-#include "intel_drv.h"
+#include <drm/drm_hdcp.h>
+#include <drm/i915_component.h>
+
#include "i915_reg.h"
+#include "intel_drv.h"
+#include "intel_hdcp.h"
#define KEY_LOAD_TRIES 5
#define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50
+#define HDCP2_LC_RETRY_CNT 3
static
bool intel_hdcp_is_ksv_valid(u8 *ksv)
@@ -72,6 +77,52 @@ bool intel_hdcp_capable(struct intel_connector *connector)
return capable;
}
+/* Is HDCP2.2 capable on Platform and Sink */
+static bool intel_hdcp2_capable(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ bool capable = false;
+
+ /* I915 support for HDCP2.2 */
+ if (!hdcp->hdcp2_supported)
+ return false;
+
+ /* MEI interface is solid */
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ if (!dev_priv->hdcp_comp_added || !dev_priv->hdcp_master) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return false;
+ }
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ /* Sink's capability for HDCP2.2 */
+ hdcp->shim->hdcp_2_2_capable(intel_dig_port, &capable);
+
+ return capable;
+}
+
+static inline bool intel_hdcp_in_use(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ enum port port = connector->encoder->port;
+ u32 reg;
+
+ reg = I915_READ(PORT_HDCP_STATUS(port));
+ return reg & HDCP_STATUS_ENC;
+}
+
+static inline bool intel_hdcp2_in_use(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ enum port port = connector->encoder->port;
+ u32 reg;
+
+ reg = I915_READ(HDCP2_STATUS_DDI(port));
+ return reg & LINK_ENCRYPTION_STATUS;
+}
+
static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
const struct intel_hdcp_shim *shim)
{
@@ -176,7 +227,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
}
/* Wait for the keys to load (500us) */
- ret = __intel_wait_for_register(dev_priv, HDCP_KEY_STATUS,
+ ret = __intel_wait_for_register(&dev_priv->uncore, HDCP_KEY_STATUS,
HDCP_KEY_LOAD_DONE, HDCP_KEY_LOAD_DONE,
10, 1, &val);
if (ret)
@@ -194,7 +245,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
{
I915_WRITE(HDCP_SHA_TEXT, sha_text);
- if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
+ if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL,
HDCP_SHA1_READY, HDCP_SHA1_READY, 1)) {
DRM_ERROR("Timed out waiting for SHA1 ready\n");
return -ETIMEDOUT;
@@ -425,7 +476,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port,
/* Tell the HW we're done with the hash and wait for it to ACK */
I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH);
- if (intel_wait_for_register(dev_priv, HDCP_REP_CTL,
+ if (intel_wait_for_register(&dev_priv->uncore, HDCP_REP_CTL,
HDCP_SHA1_COMPLETE,
HDCP_SHA1_COMPLETE, 1)) {
DRM_ERROR("Timed out waiting for SHA1 complete\n");
@@ -555,7 +606,7 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN);
/* Wait for An to be acquired */
- if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port),
+ if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port),
HDCP_STATUS_AN_READY,
HDCP_STATUS_AN_READY, 1)) {
DRM_ERROR("Timed out waiting for An\n");
@@ -636,7 +687,7 @@ static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
}
/* Wait for encryption confirmation */
- if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port),
+ if (intel_wait_for_register(&dev_priv->uncore, PORT_HDCP_STATUS(port),
HDCP_STATUS_ENC, HDCP_STATUS_ENC,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
DRM_ERROR("Timed out waiting for encryption\n");
@@ -666,8 +717,10 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n",
connector->base.name, connector->base.base.id);
+ hdcp->hdcp_encrypted = false;
I915_WRITE(PORT_HDCP_CONF(port), 0);
- if (intel_wait_for_register(dev_priv, PORT_HDCP_STATUS(port), ~0, 0,
+ if (intel_wait_for_register(&dev_priv->uncore,
+ PORT_HDCP_STATUS(port), ~0, 0,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
return -ETIMEDOUT;
@@ -711,8 +764,10 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
/* Incase of authentication failures, HDCP spec expects reauth. */
for (i = 0; i < tries; i++) {
ret = intel_hdcp_auth(conn_to_dig_port(connector), hdcp->shim);
- if (!ret)
+ if (!ret) {
+ hdcp->hdcp_encrypted = true;
return 0;
+ }
DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret);
@@ -730,16 +785,64 @@ struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
return container_of(hdcp, struct intel_connector, hdcp);
}
-static void intel_hdcp_check_work(struct work_struct *work)
+/* Implements Part 3 of the HDCP authorization procedure */
+static int intel_hdcp_check_link(struct intel_connector *connector)
{
- struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
- struct intel_hdcp,
- check_work);
- struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ enum port port = intel_dig_port->base.port;
+ int ret = 0;
- if (!intel_hdcp_check_link(connector))
- schedule_delayed_work(&hdcp->check_work,
- DRM_HDCP_CHECK_PERIOD_MS);
+ mutex_lock(&hdcp->mutex);
+
+ /* Check_link valid only when HDCP1.4 is enabled */
+ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
+ !hdcp->hdcp_encrypted) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (WARN_ON(!intel_hdcp_in_use(connector))) {
+ DRM_ERROR("%s:%d HDCP link stopped encryption,%x\n",
+ connector->base.name, connector->base.base.id,
+ I915_READ(PORT_HDCP_STATUS(port)));
+ ret = -ENXIO;
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
+ goto out;
+ }
+
+ if (hdcp->shim->check_link(intel_dig_port)) {
+ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ schedule_work(&hdcp->prop_work);
+ }
+ goto out;
+ }
+
+ DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n",
+ connector->base.name, connector->base.base.id);
+
+ ret = _intel_hdcp_disable(connector);
+ if (ret) {
+ DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
+ goto out;
+ }
+
+ ret = _intel_hdcp_enable(connector);
+ if (ret) {
+ DRM_ERROR("Failed to enable hdcp (%d)\n", ret);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
+ goto out;
+ }
+
+out:
+ mutex_unlock(&hdcp->mutex);
+ return ret;
}
static void intel_hdcp_prop_work(struct work_struct *work)
@@ -773,14 +876,929 @@ bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
return INTEL_GEN(dev_priv) >= 9 && port < PORT_E;
}
+static int
+hdcp2_prepare_ake_init(struct intel_connector *connector,
+ struct hdcp2_ake_init *ake_data)
+{
+ struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
+ if (ret)
+ DRM_DEBUG_KMS("Prepare_ake_init failed. %d\n", ret);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int
+hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
+ struct hdcp2_ake_send_cert *rx_cert,
+ bool *paired,
+ struct hdcp2_ake_no_stored_km *ek_pub_km,
+ size_t *msg_sz)
+{
+ struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->verify_receiver_cert_prepare_km(comp->mei_dev, data,
+ rx_cert, paired,
+ ek_pub_km, msg_sz);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Verify rx_cert failed. %d\n", ret);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int hdcp2_verify_hprime(struct intel_connector *connector,
+ struct hdcp2_ake_send_hprime *rx_hprime)
+{
+ struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Verify hprime failed. %d\n", ret);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int
+hdcp2_store_pairing_info(struct intel_connector *connector,
+ struct hdcp2_ake_send_pairing_info *pairing_info)
+{
+ struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Store pairing info failed. %d\n", ret);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int
+hdcp2_prepare_lc_init(struct intel_connector *connector,
+ struct hdcp2_lc_init *lc_init)
+{
+ struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Prepare lc_init failed. %d\n", ret);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int
+hdcp2_verify_lprime(struct intel_connector *connector,
+ struct hdcp2_lc_send_lprime *rx_lprime)
+{
+ struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Verify L_Prime failed. %d\n", ret);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int hdcp2_prepare_skey(struct intel_connector *connector,
+ struct hdcp2_ske_send_eks *ske_data)
+{
+ struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Get session key failed. %d\n", ret);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int
+hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
+ struct hdcp2_rep_send_receiverid_list
+ *rep_topology,
+ struct hdcp2_rep_send_ack *rep_send_ack)
+{
+ struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->repeater_check_flow_prepare_ack(comp->mei_dev, data,
+ rep_topology,
+ rep_send_ack);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Verify rep topology failed. %d\n", ret);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int
+hdcp2_verify_mprime(struct intel_connector *connector,
+ struct hdcp2_rep_stream_ready *stream_ready)
+{
+ struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Verify mprime failed. %d\n", ret);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int hdcp2_authenticate_port(struct intel_connector *connector)
+{
+ struct hdcp_port_data *data = &connector->hdcp.port_data;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
+ if (ret < 0)
+ DRM_DEBUG_KMS("Enable hdcp auth failed. %d\n", ret);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int hdcp2_close_mei_session(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct i915_hdcp_comp_master *comp;
+ int ret;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ comp = dev_priv->hdcp_master;
+
+ if (!comp || !comp->ops) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return -EINVAL;
+ }
+
+ ret = comp->ops->close_hdcp_session(comp->mei_dev,
+ &connector->hdcp.port_data);
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return ret;
+}
+
+static int hdcp2_deauthenticate_port(struct intel_connector *connector)
+{
+ return hdcp2_close_mei_session(connector);
+}
+
+/* Authentication flow starts from here */
+static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
+{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ union {
+ struct hdcp2_ake_init ake_init;
+ struct hdcp2_ake_send_cert send_cert;
+ struct hdcp2_ake_no_stored_km no_stored_km;
+ struct hdcp2_ake_send_hprime send_hprime;
+ struct hdcp2_ake_send_pairing_info pairing_info;
+ } msgs;
+ const struct intel_hdcp_shim *shim = hdcp->shim;
+ size_t size;
+ int ret;
+
+ /* Init for seq_num */
+ hdcp->seq_num_v = 0;
+ hdcp->seq_num_m = 0;
+
+ ret = hdcp2_prepare_ake_init(connector, &msgs.ake_init);
+ if (ret < 0)
+ return ret;
+
+ ret = shim->write_2_2_msg(intel_dig_port, &msgs.ake_init,
+ sizeof(msgs.ake_init));
+ if (ret < 0)
+ return ret;
+
+ ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_CERT,
+ &msgs.send_cert, sizeof(msgs.send_cert));
+ if (ret < 0)
+ return ret;
+
+ if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL)
+ return -EINVAL;
+
+ hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
+
+ /*
+ * Here msgs.no_stored_km will hold msgs corresponding to the km
+ * stored also.
+ */
+ ret = hdcp2_verify_rx_cert_prepare_km(connector, &msgs.send_cert,
+ &hdcp->is_paired,
+ &msgs.no_stored_km, &size);
+ if (ret < 0)
+ return ret;
+
+ ret = shim->write_2_2_msg(intel_dig_port, &msgs.no_stored_km, size);
+ if (ret < 0)
+ return ret;
+
+ ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_AKE_SEND_HPRIME,
+ &msgs.send_hprime, sizeof(msgs.send_hprime));
+ if (ret < 0)
+ return ret;
+
+ ret = hdcp2_verify_hprime(connector, &msgs.send_hprime);
+ if (ret < 0)
+ return ret;
+
+ if (!hdcp->is_paired) {
+ /* Pairing is required */
+ ret = shim->read_2_2_msg(intel_dig_port,
+ HDCP_2_2_AKE_SEND_PAIRING_INFO,
+ &msgs.pairing_info,
+ sizeof(msgs.pairing_info));
+ if (ret < 0)
+ return ret;
+
+ ret = hdcp2_store_pairing_info(connector, &msgs.pairing_info);
+ if (ret < 0)
+ return ret;
+ hdcp->is_paired = true;
+ }
+
+ return 0;
+}
+
+static int hdcp2_locality_check(struct intel_connector *connector)
+{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ union {
+ struct hdcp2_lc_init lc_init;
+ struct hdcp2_lc_send_lprime send_lprime;
+ } msgs;
+ const struct intel_hdcp_shim *shim = hdcp->shim;
+ int tries = HDCP2_LC_RETRY_CNT, ret, i;
+
+ for (i = 0; i < tries; i++) {
+ ret = hdcp2_prepare_lc_init(connector, &msgs.lc_init);
+ if (ret < 0)
+ continue;
+
+ ret = shim->write_2_2_msg(intel_dig_port, &msgs.lc_init,
+ sizeof(msgs.lc_init));
+ if (ret < 0)
+ continue;
+
+ ret = shim->read_2_2_msg(intel_dig_port,
+ HDCP_2_2_LC_SEND_LPRIME,
+ &msgs.send_lprime,
+ sizeof(msgs.send_lprime));
+ if (ret < 0)
+ continue;
+
+ ret = hdcp2_verify_lprime(connector, &msgs.send_lprime);
+ if (!ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int hdcp2_session_key_exchange(struct intel_connector *connector)
+{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ struct hdcp2_ske_send_eks send_eks;
+ int ret;
+
+ ret = hdcp2_prepare_skey(connector, &send_eks);
+ if (ret < 0)
+ return ret;
+
+ ret = hdcp->shim->write_2_2_msg(intel_dig_port, &send_eks,
+ sizeof(send_eks));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static
+int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
+{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ union {
+ struct hdcp2_rep_stream_manage stream_manage;
+ struct hdcp2_rep_stream_ready stream_ready;
+ } msgs;
+ const struct intel_hdcp_shim *shim = hdcp->shim;
+ int ret;
+
+ /* Prepare RepeaterAuth_Stream_Manage msg */
+ msgs.stream_manage.msg_id = HDCP_2_2_REP_STREAM_MANAGE;
+ drm_hdcp2_u32_to_seq_num(msgs.stream_manage.seq_num_m, hdcp->seq_num_m);
+
+ /* K no of streams is fixed as 1. Stored as big-endian. */
+ msgs.stream_manage.k = cpu_to_be16(1);
+
+ /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
+ msgs.stream_manage.streams[0].stream_id = 0;
+ msgs.stream_manage.streams[0].stream_type = hdcp->content_type;
+
+ /* Send it to Repeater */
+ ret = shim->write_2_2_msg(intel_dig_port, &msgs.stream_manage,
+ sizeof(msgs.stream_manage));
+ if (ret < 0)
+ return ret;
+
+ ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_STREAM_READY,
+ &msgs.stream_ready, sizeof(msgs.stream_ready));
+ if (ret < 0)
+ return ret;
+
+ hdcp->port_data.seq_num_m = hdcp->seq_num_m;
+ hdcp->port_data.streams[0].stream_type = hdcp->content_type;
+
+ ret = hdcp2_verify_mprime(connector, &msgs.stream_ready);
+ if (ret < 0)
+ return ret;
+
+ hdcp->seq_num_m++;
+
+ if (hdcp->seq_num_m > HDCP_2_2_SEQ_NUM_MAX) {
+ DRM_DEBUG_KMS("seq_num_m roll over.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static
+int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
+{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ union {
+ struct hdcp2_rep_send_receiverid_list recvid_list;
+ struct hdcp2_rep_send_ack rep_ack;
+ } msgs;
+ const struct intel_hdcp_shim *shim = hdcp->shim;
+ u8 *rx_info;
+ u32 seq_num_v;
+ int ret;
+
+ ret = shim->read_2_2_msg(intel_dig_port, HDCP_2_2_REP_SEND_RECVID_LIST,
+ &msgs.recvid_list, sizeof(msgs.recvid_list));
+ if (ret < 0)
+ return ret;
+
+ rx_info = msgs.recvid_list.rx_info;
+
+ if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
+ HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
+ DRM_DEBUG_KMS("Topology Max Size Exceeded\n");
+ return -EINVAL;
+ }
+
+ /* Converting and Storing the seq_num_v to local variable as DWORD */
+ seq_num_v = drm_hdcp2_seq_num_to_u32(msgs.recvid_list.seq_num_v);
+
+ if (seq_num_v < hdcp->seq_num_v) {
+ /* Roll over of the seq_num_v from repeater. Reauthenticate. */
+ DRM_DEBUG_KMS("Seq_num_v roll over.\n");
+ return -EINVAL;
+ }
+
+ ret = hdcp2_verify_rep_topology_prepare_ack(connector,
+ &msgs.recvid_list,
+ &msgs.rep_ack);
+ if (ret < 0)
+ return ret;
+
+ hdcp->seq_num_v = seq_num_v;
+ ret = shim->write_2_2_msg(intel_dig_port, &msgs.rep_ack,
+ sizeof(msgs.rep_ack));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int hdcp2_authenticate_repeater(struct intel_connector *connector)
+{
+ int ret;
+
+ ret = hdcp2_authenticate_repeater_topology(connector);
+ if (ret < 0)
+ return ret;
+
+ return hdcp2_propagate_stream_management_info(connector);
+}
+
+static int hdcp2_authenticate_sink(struct intel_connector *connector)
+{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ const struct intel_hdcp_shim *shim = hdcp->shim;
+ int ret;
+
+ ret = hdcp2_authentication_key_exchange(connector);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("AKE Failed. Err : %d\n", ret);
+ return ret;
+ }
+
+ ret = hdcp2_locality_check(connector);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("Locality Check failed. Err : %d\n", ret);
+ return ret;
+ }
+
+ ret = hdcp2_session_key_exchange(connector);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("SKE Failed. Err : %d\n", ret);
+ return ret;
+ }
+
+ if (shim->config_stream_type) {
+ ret = shim->config_stream_type(intel_dig_port,
+ hdcp->is_repeater,
+ hdcp->content_type);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (hdcp->is_repeater) {
+ ret = hdcp2_authenticate_repeater(connector);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("Repeater Auth Failed. Err: %d\n", ret);
+ return ret;
+ }
+ }
+
+ hdcp->port_data.streams[0].stream_type = hdcp->content_type;
+ ret = hdcp2_authenticate_port(connector);
+ if (ret < 0)
+ return ret;
+
+ return ret;
+}
+
+static int hdcp2_enable_encryption(struct intel_connector *connector)
+{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ enum port port = connector->encoder->port;
+ int ret;
+
+ WARN_ON(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS);
+
+ if (hdcp->shim->toggle_signalling) {
+ ret = hdcp->shim->toggle_signalling(intel_dig_port, true);
+ if (ret) {
+ DRM_ERROR("Failed to enable HDCP signalling. %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (I915_READ(HDCP2_STATUS_DDI(port)) & LINK_AUTH_STATUS) {
+ /* Link is Authenticated. Now set for Encryption */
+ I915_WRITE(HDCP2_CTL_DDI(port),
+ I915_READ(HDCP2_CTL_DDI(port)) |
+ CTL_LINK_ENCRYPTION_REQ);
+ }
+
+ ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port),
+ LINK_ENCRYPTION_STATUS,
+ LINK_ENCRYPTION_STATUS,
+ ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+
+ return ret;
+}
+
+static int hdcp2_disable_encryption(struct intel_connector *connector)
+{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ enum port port = connector->encoder->port;
+ int ret;
+
+ WARN_ON(!(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS));
+
+ I915_WRITE(HDCP2_CTL_DDI(port),
+ I915_READ(HDCP2_CTL_DDI(port)) & ~CTL_LINK_ENCRYPTION_REQ);
+
+ ret = intel_wait_for_register(&dev_priv->uncore, HDCP2_STATUS_DDI(port),
+ LINK_ENCRYPTION_STATUS, 0x0,
+ ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
+ if (ret == -ETIMEDOUT)
+ DRM_DEBUG_KMS("Disable Encryption Timedout");
+
+ if (hdcp->shim->toggle_signalling) {
+ ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
+ if (ret) {
+ DRM_ERROR("Failed to disable HDCP signalling. %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
+{
+ int ret, i, tries = 3;
+
+ for (i = 0; i < tries; i++) {
+ ret = hdcp2_authenticate_sink(connector);
+ if (!ret)
+ break;
+
+ /* Clearing the mei hdcp session */
+ DRM_DEBUG_KMS("HDCP2.2 Auth %d of %d Failed.(%d)\n",
+ i + 1, tries, ret);
+ if (hdcp2_deauthenticate_port(connector) < 0)
+ DRM_DEBUG_KMS("Port deauth failed.\n");
+ }
+
+ if (i != tries) {
+ /*
+ * Ensuring the required 200mSec min time interval between
+ * Session Key Exchange and encryption.
+ */
+ msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
+ ret = hdcp2_enable_encryption(connector);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("Encryption Enable Failed.(%d)\n", ret);
+ if (hdcp2_deauthenticate_port(connector) < 0)
+ DRM_DEBUG_KMS("Port deauth failed.\n");
+ }
+ }
+
+ return ret;
+}
+
+static int _intel_hdcp2_enable(struct intel_connector *connector)
+{
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ int ret;
+
+ DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
+ connector->base.name, connector->base.base.id,
+ hdcp->content_type);
+
+ ret = hdcp2_authenticate_and_encrypt(connector);
+ if (ret) {
+ DRM_DEBUG_KMS("HDCP2 Type%d Enabling Failed. (%d)\n",
+ hdcp->content_type, ret);
+ return ret;
+ }
+
+ DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is enabled. Type %d\n",
+ connector->base.name, connector->base.base.id,
+ hdcp->content_type);
+
+ hdcp->hdcp2_encrypted = true;
+ return 0;
+}
+
+static int _intel_hdcp2_disable(struct intel_connector *connector)
+{
+ int ret;
+
+ DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being Disabled\n",
+ connector->base.name, connector->base.base.id);
+
+ ret = hdcp2_disable_encryption(connector);
+
+ if (hdcp2_deauthenticate_port(connector) < 0)
+ DRM_DEBUG_KMS("Port deauth failed.\n");
+
+ connector->hdcp.hdcp2_encrypted = false;
+
+ return ret;
+}
+
+/* Implements the Link Integrity Check for HDCP2.2 */
+static int intel_hdcp2_check_link(struct intel_connector *connector)
+{
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ enum port port = connector->encoder->port;
+ int ret = 0;
+
+ mutex_lock(&hdcp->mutex);
+
+ /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */
+ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED ||
+ !hdcp->hdcp2_encrypted) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (WARN_ON(!intel_hdcp2_in_use(connector))) {
+ DRM_ERROR("HDCP2.2 link stopped the encryption, %x\n",
+ I915_READ(HDCP2_STATUS_DDI(port)));
+ ret = -ENXIO;
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
+ goto out;
+ }
+
+ ret = hdcp->shim->check_2_2_link(intel_dig_port);
+ if (ret == HDCP_LINK_PROTECTED) {
+ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ schedule_work(&hdcp->prop_work);
+ }
+ goto out;
+ }
+
+ if (ret == HDCP_TOPOLOGY_CHANGE) {
+ if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+ goto out;
+
+ DRM_DEBUG_KMS("HDCP2.2 Downstream topology change\n");
+ ret = hdcp2_authenticate_repeater_topology(connector);
+ if (!ret) {
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ schedule_work(&hdcp->prop_work);
+ goto out;
+ }
+ DRM_DEBUG_KMS("[%s:%d] Repeater topology auth failed.(%d)\n",
+ connector->base.name, connector->base.base.id,
+ ret);
+ } else {
+ DRM_DEBUG_KMS("[%s:%d] HDCP2.2 link failed, retrying auth\n",
+ connector->base.name, connector->base.base.id);
+ }
+
+ ret = _intel_hdcp2_disable(connector);
+ if (ret) {
+ DRM_ERROR("[%s:%d] Failed to disable hdcp2.2 (%d)\n",
+ connector->base.name, connector->base.base.id, ret);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
+ goto out;
+ }
+
+ ret = _intel_hdcp2_enable(connector);
+ if (ret) {
+ DRM_DEBUG_KMS("[%s:%d] Failed to enable hdcp2.2 (%d)\n",
+ connector->base.name, connector->base.base.id,
+ ret);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
+ goto out;
+ }
+
+out:
+ mutex_unlock(&hdcp->mutex);
+ return ret;
+}
+
+static void intel_hdcp_check_work(struct work_struct *work)
+{
+ struct intel_hdcp *hdcp = container_of(to_delayed_work(work),
+ struct intel_hdcp,
+ check_work);
+ struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
+
+ if (!intel_hdcp2_check_link(connector))
+ schedule_delayed_work(&hdcp->check_work,
+ DRM_HDCP2_CHECK_PERIOD_MS);
+ else if (!intel_hdcp_check_link(connector))
+ schedule_delayed_work(&hdcp->check_work,
+ DRM_HDCP_CHECK_PERIOD_MS);
+}
+
+static int i915_hdcp_component_bind(struct device *i915_kdev,
+ struct device *mei_kdev, void *data)
+{
+ struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
+
+ DRM_DEBUG("I915 HDCP comp bind\n");
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
+ dev_priv->hdcp_master->mei_dev = mei_kdev;
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ return 0;
+}
+
+static void i915_hdcp_component_unbind(struct device *i915_kdev,
+ struct device *mei_kdev, void *data)
+{
+ struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
+
+ DRM_DEBUG("I915 HDCP comp unbind\n");
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ dev_priv->hdcp_master = NULL;
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+}
+
+static const struct component_ops i915_hdcp_component_ops = {
+ .bind = i915_hdcp_component_bind,
+ .unbind = i915_hdcp_component_unbind,
+};
+
+static inline int initialize_hdcp_port_data(struct intel_connector *connector)
+{
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ struct hdcp_port_data *data = &hdcp->port_data;
+
+ data->port = connector->encoder->port;
+ data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED;
+ data->protocol = (u8)hdcp->shim->protocol;
+
+ data->k = 1;
+ if (!data->streams)
+ data->streams = kcalloc(data->k,
+ sizeof(struct hdcp2_streamid_type),
+ GFP_KERNEL);
+ if (!data->streams) {
+ DRM_ERROR("Out of Memory\n");
+ return -ENOMEM;
+ }
+
+ data->streams[0].stream_id = 0;
+ data->streams[0].stream_type = hdcp->content_type;
+
+ return 0;
+}
+
+static bool is_hdcp2_supported(struct drm_i915_private *dev_priv)
+{
+ if (!IS_ENABLED(CONFIG_INTEL_MEI_HDCP))
+ return false;
+
+ return (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) ||
+ IS_KABYLAKE(dev_priv));
+}
+
+void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
+{
+ int ret;
+
+ if (!is_hdcp2_supported(dev_priv))
+ return;
+
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ WARN_ON(dev_priv->hdcp_comp_added);
+
+ dev_priv->hdcp_comp_added = true;
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
+ I915_COMPONENT_HDCP);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("Failed at component add(%d)\n", ret);
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ dev_priv->hdcp_comp_added = false;
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return;
+ }
+}
+
+static void intel_hdcp2_init(struct intel_connector *connector)
+{
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ int ret;
+
+ ret = initialize_hdcp_port_data(connector);
+ if (ret) {
+ DRM_DEBUG_KMS("Mei hdcp data init failed\n");
+ return;
+ }
+
+ hdcp->hdcp2_supported = true;
+}
+
int intel_hdcp_init(struct intel_connector *connector,
const struct intel_hdcp_shim *shim)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
- ret = drm_connector_attach_content_protection_property(
- &connector->base);
+ if (!shim)
+ return -EINVAL;
+
+ ret = drm_connector_attach_content_protection_property(&connector->base);
if (ret)
return ret;
@@ -788,28 +1806,47 @@ int intel_hdcp_init(struct intel_connector *connector,
mutex_init(&hdcp->mutex);
INIT_DELAYED_WORK(&hdcp->check_work, intel_hdcp_check_work);
INIT_WORK(&hdcp->prop_work, intel_hdcp_prop_work);
+
+ if (is_hdcp2_supported(dev_priv))
+ intel_hdcp2_init(connector);
+ init_waitqueue_head(&hdcp->cp_irq_queue);
+
return 0;
}
int intel_hdcp_enable(struct intel_connector *connector)
{
struct intel_hdcp *hdcp = &connector->hdcp;
- int ret;
+ unsigned long check_link_interval = DRM_HDCP_CHECK_PERIOD_MS;
+ int ret = -EINVAL;
if (!hdcp->shim)
return -ENOENT;
mutex_lock(&hdcp->mutex);
+ WARN_ON(hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
- ret = _intel_hdcp_enable(connector);
- if (ret)
- goto out;
+ /*
+ * Considering that HDCP2.2 is more secure than HDCP1.4, If the setup
+ * is capable of HDCP2.2, it is preferred to use HDCP2.2.
+ */
+ if (intel_hdcp2_capable(connector)) {
+ ret = _intel_hdcp2_enable(connector);
+ if (!ret)
+ check_link_interval = DRM_HDCP2_CHECK_PERIOD_MS;
+ }
+
+ /* When HDCP2.2 fails, HDCP1.4 will be attempted */
+ if (ret && intel_hdcp_capable(connector)) {
+ ret = _intel_hdcp_enable(connector);
+ }
+
+ if (!ret) {
+ schedule_delayed_work(&hdcp->check_work, check_link_interval);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ schedule_work(&hdcp->prop_work);
+ }
- hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
- schedule_work(&hdcp->prop_work);
- schedule_delayed_work(&hdcp->check_work,
- DRM_HDCP_CHECK_PERIOD_MS);
-out:
mutex_unlock(&hdcp->mutex);
return ret;
}
@@ -826,7 +1863,10 @@ int intel_hdcp_disable(struct intel_connector *connector)
if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
hdcp->value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
- ret = _intel_hdcp_disable(connector);
+ if (hdcp->hdcp2_encrypted)
+ ret = _intel_hdcp2_disable(connector);
+ else if (hdcp->hdcp_encrypted)
+ ret = _intel_hdcp_disable(connector);
}
mutex_unlock(&hdcp->mutex);
@@ -834,6 +1874,30 @@ int intel_hdcp_disable(struct intel_connector *connector)
return ret;
}
+void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
+{
+ mutex_lock(&dev_priv->hdcp_comp_mutex);
+ if (!dev_priv->hdcp_comp_added) {
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+ return;
+ }
+
+ dev_priv->hdcp_comp_added = false;
+ mutex_unlock(&dev_priv->hdcp_comp_mutex);
+
+ component_del(dev_priv->drm.dev, &i915_hdcp_component_ops);
+}
+
+void intel_hdcp_cleanup(struct intel_connector *connector)
+{
+ if (!connector->hdcp.shim)
+ return;
+
+ mutex_lock(&connector->hdcp.mutex);
+ kfree(connector->hdcp.port_data.streams);
+ mutex_unlock(&connector->hdcp.mutex);
+}
+
void intel_hdcp_atomic_check(struct drm_connector *connector,
struct drm_connector_state *old_state,
struct drm_connector_state *new_state)
@@ -867,61 +1931,16 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
crtc_state->mode_changed = true;
}
-/* Implements Part 3 of the HDCP authorization procedure */
-int intel_hdcp_check_link(struct intel_connector *connector)
+/* Handles the CP_IRQ raised from the DP HDCP sink */
+void intel_hdcp_handle_cp_irq(struct intel_connector *connector)
{
struct intel_hdcp *hdcp = &connector->hdcp;
- struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
- enum port port = intel_dig_port->base.port;
- int ret = 0;
if (!hdcp->shim)
- return -ENOENT;
-
- mutex_lock(&hdcp->mutex);
-
- if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
- goto out;
-
- if (!(I915_READ(PORT_HDCP_STATUS(port)) & HDCP_STATUS_ENC)) {
- DRM_ERROR("%s:%d HDCP check failed: link is not encrypted,%x\n",
- connector->base.name, connector->base.base.id,
- I915_READ(PORT_HDCP_STATUS(port)));
- ret = -ENXIO;
- hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
- schedule_work(&hdcp->prop_work);
- goto out;
- }
-
- if (hdcp->shim->check_link(intel_dig_port)) {
- if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
- hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
- schedule_work(&hdcp->prop_work);
- }
- goto out;
- }
-
- DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n",
- connector->base.name, connector->base.base.id);
-
- ret = _intel_hdcp_disable(connector);
- if (ret) {
- DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
- hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
- schedule_work(&hdcp->prop_work);
- goto out;
- }
+ return;
- ret = _intel_hdcp_enable(connector);
- if (ret) {
- DRM_DEBUG_KMS("Failed to enable hdcp (%d)\n", ret);
- hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
- schedule_work(&hdcp->prop_work);
- goto out;
- }
+ atomic_inc(&connector->hdcp.cp_irq_count);
+ wake_up_all(&connector->hdcp.cp_irq_queue);
-out:
- mutex_unlock(&hdcp->mutex);
- return ret;
+ schedule_delayed_work(&hdcp->check_work, 0);
}
diff --git a/drivers/gpu/drm/i915/intel_hdcp.h b/drivers/gpu/drm/i915/intel_hdcp.h
new file mode 100644
index 000000000000..a75f25f09d39
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_hdcp.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_HDCP_H__
+#define __INTEL_HDCP_H__
+
+#include <linux/types.h>
+
+#include <drm/i915_drm.h>
+
+struct drm_connector;
+struct drm_connector_state;
+struct drm_i915_private;
+struct intel_connector;
+struct intel_hdcp_shim;
+
+void intel_hdcp_atomic_check(struct drm_connector *connector,
+ struct drm_connector_state *old_state,
+ struct drm_connector_state *new_state);
+int intel_hdcp_init(struct intel_connector *connector,
+ const struct intel_hdcp_shim *hdcp_shim);
+int intel_hdcp_enable(struct intel_connector *connector);
+int intel_hdcp_disable(struct intel_connector *connector);
+bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
+bool intel_hdcp_capable(struct intel_connector *connector);
+void intel_hdcp_component_init(struct drm_i915_private *dev_priv);
+void intel_hdcp_component_fini(struct drm_i915_private *dev_priv);
+void intel_hdcp_cleanup(struct intel_connector *connector);
+void intel_hdcp_handle_cp_irq(struct intel_connector *connector);
+
+#endif /* __INTEL_HDCP_H__ */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index a46bffe2b288..34be2cfd0ec8 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -26,19 +26,30 @@
* Jesse Barnes <jesse.barnes@intel.com>
*/
-#include <linux/i2c.h>
-#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/hdmi.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_hdcp.h>
#include <drm/drm_scdc_helper.h>
-#include "intel_drv.h"
#include <drm/i915_drm.h>
#include <drm/intel_lpe_audio.h>
+
#include "i915_drv.h"
+#include "intel_audio.h"
+#include "intel_connector.h"
+#include "intel_ddi.h"
+#include "intel_dp.h"
+#include "intel_drv.h"
+#include "intel_hdcp.h"
+#include "intel_hdmi.h"
+#include "intel_lspcon.h"
+#include "intel_sdvo.h"
+#include "intel_panel.h"
static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
{
@@ -82,6 +93,8 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
static u32 g4x_infoframe_index(unsigned int type)
{
switch (type) {
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ return VIDEO_DIP_SELECT_GAMUT;
case HDMI_INFOFRAME_TYPE_AVI:
return VIDEO_DIP_SELECT_AVI;
case HDMI_INFOFRAME_TYPE_SPD:
@@ -97,6 +110,12 @@ static u32 g4x_infoframe_index(unsigned int type)
static u32 g4x_infoframe_enable(unsigned int type)
{
switch (type) {
+ case HDMI_PACKET_TYPE_GENERAL_CONTROL:
+ return VIDEO_DIP_ENABLE_GCP;
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ return VIDEO_DIP_ENABLE_GAMUT;
+ case DP_SDP_VSC:
+ return 0;
case HDMI_INFOFRAME_TYPE_AVI:
return VIDEO_DIP_ENABLE_AVI;
case HDMI_INFOFRAME_TYPE_SPD:
@@ -112,6 +131,10 @@ static u32 g4x_infoframe_enable(unsigned int type)
static u32 hsw_infoframe_enable(unsigned int type)
{
switch (type) {
+ case HDMI_PACKET_TYPE_GENERAL_CONTROL:
+ return VIDEO_DIP_ENABLE_GCP_HSW;
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ return VIDEO_DIP_ENABLE_GMP_HSW;
case DP_SDP_VSC:
return VIDEO_DIP_ENABLE_VSC_HSW;
case DP_SDP_PPS:
@@ -135,6 +158,8 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv,
int i)
{
switch (type) {
+ case HDMI_PACKET_TYPE_GAMUT_METADATA:
+ return HSW_TVIDEO_DIP_GMP_DATA(cpu_transcoder, i);
case DP_SDP_VSC:
return HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, i);
case DP_SDP_PPS:
@@ -198,17 +223,37 @@ static void g4x_write_infoframe(struct intel_encoder *encoder,
POSTING_READ(VIDEO_DIP_CTL);
}
-static bool g4x_infoframe_enabled(struct intel_encoder *encoder,
+static void g4x_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ u32 val, *data = frame;
+ int i;
+
+ val = I915_READ(VIDEO_DIP_CTL);
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(type);
+
+ I915_WRITE(VIDEO_DIP_CTL, val);
+
+ for (i = 0; i < len; i += 4)
+ *data++ = I915_READ(VIDEO_DIP_DATA);
+}
+
+static u32 g4x_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 val = I915_READ(VIDEO_DIP_CTL);
if ((val & VIDEO_DIP_ENABLE) == 0)
- return false;
+ return 0;
if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
- return false;
+ return 0;
return val & (VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
@@ -251,7 +296,28 @@ static void ibx_write_infoframe(struct intel_encoder *encoder,
POSTING_READ(reg);
}
-static bool ibx_infoframe_enabled(struct intel_encoder *encoder,
+static void ibx_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ u32 val, *data = frame;
+ int i;
+
+ val = I915_READ(TVIDEO_DIP_CTL(crtc->pipe));
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(type);
+
+ I915_WRITE(TVIDEO_DIP_CTL(crtc->pipe), val);
+
+ for (i = 0; i < len; i += 4)
+ *data++ = I915_READ(TVIDEO_DIP_DATA(crtc->pipe));
+}
+
+static u32 ibx_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -260,10 +326,10 @@ static bool ibx_infoframe_enabled(struct intel_encoder *encoder,
u32 val = I915_READ(reg);
if ((val & VIDEO_DIP_ENABLE) == 0)
- return false;
+ return 0;
if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
- return false;
+ return 0;
return val & (VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
@@ -310,7 +376,28 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
POSTING_READ(reg);
}
-static bool cpt_infoframe_enabled(struct intel_encoder *encoder,
+static void cpt_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ u32 val, *data = frame;
+ int i;
+
+ val = I915_READ(TVIDEO_DIP_CTL(crtc->pipe));
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(type);
+
+ I915_WRITE(TVIDEO_DIP_CTL(crtc->pipe), val);
+
+ for (i = 0; i < len; i += 4)
+ *data++ = I915_READ(TVIDEO_DIP_DATA(crtc->pipe));
+}
+
+static u32 cpt_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -318,7 +405,7 @@ static bool cpt_infoframe_enabled(struct intel_encoder *encoder,
u32 val = I915_READ(TVIDEO_DIP_CTL(pipe));
if ((val & VIDEO_DIP_ENABLE) == 0)
- return false;
+ return 0;
return val & (VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
@@ -362,7 +449,28 @@ static void vlv_write_infoframe(struct intel_encoder *encoder,
POSTING_READ(reg);
}
-static bool vlv_infoframe_enabled(struct intel_encoder *encoder,
+static void vlv_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ u32 val, *data = frame;
+ int i;
+
+ val = I915_READ(VLV_TVIDEO_DIP_CTL(crtc->pipe));
+
+ val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+ val |= g4x_infoframe_index(type);
+
+ I915_WRITE(VLV_TVIDEO_DIP_CTL(crtc->pipe), val);
+
+ for (i = 0; i < len; i += 4)
+ *data++ = I915_READ(VLV_TVIDEO_DIP_DATA(crtc->pipe));
+}
+
+static u32 vlv_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -370,10 +478,10 @@ static bool vlv_infoframe_enabled(struct intel_encoder *encoder,
u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe));
if ((val & VIDEO_DIP_ENABLE) == 0)
- return false;
+ return 0;
if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
- return false;
+ return 0;
return val & (VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
@@ -413,7 +521,24 @@ static void hsw_write_infoframe(struct intel_encoder *encoder,
POSTING_READ(ctl_reg);
}
-static bool hsw_infoframe_enabled(struct intel_encoder *encoder,
+static void hsw_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 val, *data = frame;
+ int i;
+
+ val = I915_READ(HSW_TVIDEO_DIP_CTL(cpu_transcoder));
+
+ for (i = 0; i < len; i += 4)
+ *data++ = I915_READ(hsw_dip_data_reg(dev_priv, cpu_transcoder,
+ type, i >> 2));
+}
+
+static u32 hsw_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -424,6 +549,53 @@ static bool hsw_infoframe_enabled(struct intel_encoder *encoder,
VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
}
+static const u8 infoframe_type_to_idx[] = {
+ HDMI_PACKET_TYPE_GENERAL_CONTROL,
+ HDMI_PACKET_TYPE_GAMUT_METADATA,
+ DP_SDP_VSC,
+ HDMI_INFOFRAME_TYPE_AVI,
+ HDMI_INFOFRAME_TYPE_SPD,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+};
+
+u32 intel_hdmi_infoframe_enable(unsigned int type)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(infoframe_type_to_idx); i++) {
+ if (infoframe_type_to_idx[i] == type)
+ return BIT(i);
+ }
+
+ return 0;
+}
+
+u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ u32 val, ret = 0;
+ int i;
+
+ val = dig_port->infoframes_enabled(encoder, crtc_state);
+
+ /* map from hardware bits to dip idx */
+ for (i = 0; i < ARRAY_SIZE(infoframe_type_to_idx); i++) {
+ unsigned int type = infoframe_type_to_idx[i];
+
+ if (HAS_DDI(dev_priv)) {
+ if (val & hsw_infoframe_enable(type))
+ ret |= BIT(i);
+ } else {
+ if (val & g4x_infoframe_enable(type))
+ ret |= BIT(i);
+ }
+ }
+
+ return ret;
+}
+
/*
* The data we write to the DIP data buffer registers is 1 byte bigger than the
* HDMI infoframe size because of an ECC/reserved byte at position 3 (starting
@@ -443,15 +615,23 @@ static bool hsw_infoframe_enabled(struct intel_encoder *encoder,
*/
static void intel_write_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
- union hdmi_infoframe *frame)
+ enum hdmi_infoframe_type type,
+ const union hdmi_infoframe *frame)
{
struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
u8 buffer[VIDEO_DIP_DATA_SIZE];
ssize_t len;
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(type)) == 0)
+ return;
+
+ if (WARN_ON(frame->any.type != type))
+ return;
+
/* see comment above for the reason for this offset */
- len = hdmi_infoframe_pack(frame, buffer + 1, sizeof(buffer) - 1);
- if (len < 0)
+ len = hdmi_infoframe_pack_only(frame, buffer + 1, sizeof(buffer) - 1);
+ if (WARN_ON(len < 0))
return;
/* Insert the 'hole' (see big comment above) at position 3 */
@@ -459,84 +639,143 @@ static void intel_write_infoframe(struct intel_encoder *encoder,
buffer[3] = 0;
len++;
- intel_dig_port->write_infoframe(encoder,
- crtc_state,
- frame->any.type, buffer, len);
+ intel_dig_port->write_infoframe(encoder, crtc_state, type, buffer, len);
}
-static void intel_hdmi_set_avi_infoframe(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+void intel_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ enum hdmi_infoframe_type type,
+ union hdmi_infoframe *frame)
+{
+ struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+ u8 buffer[VIDEO_DIP_DATA_SIZE];
+ int ret;
+
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(type)) == 0)
+ return;
+
+ intel_dig_port->read_infoframe(encoder, crtc_state,
+ type, buffer, sizeof(buffer));
+
+ /* Fill the 'hole' (see big comment above) at position 3 */
+ memmove(&buffer[1], &buffer[0], 3);
+
+ /* see comment above for the reason for this offset */
+ ret = hdmi_infoframe_unpack(frame, buffer + 1, sizeof(buffer) - 1);
+ if (ret) {
+ DRM_DEBUG_KMS("Failed to unpack infoframe type 0x%02x\n", type);
+ return;
+ }
+
+ if (frame->any.type != type)
+ DRM_DEBUG_KMS("Found the wrong infoframe type 0x%x (expected 0x%02x)\n",
+ frame->any.type, type);
+}
+
+static bool
+intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
+ struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi;
const struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
- union hdmi_infoframe frame;
+ struct drm_connector *connector = conn_state->connector;
int ret;
- ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
- conn_state->connector,
+ if (!crtc_state->has_infoframe)
+ return true;
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI);
+
+ ret = drm_hdmi_avi_infoframe_from_display_mode(frame, connector,
adjusted_mode);
- if (ret < 0) {
- DRM_ERROR("couldn't fill AVI infoframe\n");
- return;
- }
+ if (ret)
+ return false;
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
- frame.avi.colorspace = HDMI_COLORSPACE_YUV420;
+ frame->colorspace = HDMI_COLORSPACE_YUV420;
else if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
- frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
+ frame->colorspace = HDMI_COLORSPACE_YUV444;
else
- frame.avi.colorspace = HDMI_COLORSPACE_RGB;
+ frame->colorspace = HDMI_COLORSPACE_RGB;
+
+ drm_hdmi_avi_infoframe_colorspace(frame, conn_state);
- drm_hdmi_avi_infoframe_quant_range(&frame.avi,
- conn_state->connector,
+ drm_hdmi_avi_infoframe_quant_range(frame, connector,
adjusted_mode,
crtc_state->limited_color_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL);
- drm_hdmi_avi_infoframe_content_type(&frame.avi,
- conn_state);
+ drm_hdmi_avi_infoframe_content_type(frame, conn_state);
/* TODO: handle pixel repetition for YCBCR420 outputs */
- intel_write_infoframe(encoder, crtc_state,
- &frame);
+
+ ret = hdmi_avi_infoframe_check(frame);
+ if (WARN_ON(ret))
+ return false;
+
+ return true;
}
-static void intel_hdmi_set_spd_infoframe(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+static bool
+intel_hdmi_compute_spd_infoframe(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
- union hdmi_infoframe frame;
+ struct hdmi_spd_infoframe *frame = &crtc_state->infoframes.spd.spd;
int ret;
- ret = hdmi_spd_infoframe_init(&frame.spd, "Intel", "Integrated gfx");
- if (ret < 0) {
- DRM_ERROR("couldn't fill SPD infoframe\n");
- return;
- }
+ if (!crtc_state->has_infoframe)
+ return true;
- frame.spd.sdi = HDMI_SPD_SDI_PC;
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD);
- intel_write_infoframe(encoder, crtc_state,
- &frame);
+ ret = hdmi_spd_infoframe_init(frame, "Intel", "Integrated gfx");
+ if (WARN_ON(ret))
+ return false;
+
+ frame->sdi = HDMI_SPD_SDI_PC;
+
+ ret = hdmi_spd_infoframe_check(frame);
+ if (WARN_ON(ret))
+ return false;
+
+ return true;
}
-static void
-intel_hdmi_set_hdmi_infoframe(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
-{
- union hdmi_infoframe frame;
+static bool
+intel_hdmi_compute_hdmi_infoframe(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct hdmi_vendor_infoframe *frame =
+ &crtc_state->infoframes.hdmi.vendor.hdmi;
+ const struct drm_display_info *info =
+ &conn_state->connector->display_info;
int ret;
- ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
+ if (!crtc_state->has_infoframe || !info->has_hdmi_infoframe)
+ return true;
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR);
+
+ ret = drm_hdmi_vendor_infoframe_from_display_mode(frame,
conn_state->connector,
&crtc_state->base.adjusted_mode);
- if (ret < 0)
- return;
+ if (WARN_ON(ret))
+ return false;
- intel_write_infoframe(encoder, crtc_state,
- &frame);
+ ret = hdmi_vendor_infoframe_check(frame);
+ if (WARN_ON(ret))
+ return false;
+
+ return true;
}
static void g4x_set_infoframes(struct intel_encoder *encoder,
@@ -596,9 +835,15 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
- intel_hdmi_set_spd_infoframe(encoder, crtc_state);
- intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_AVI,
+ &crtc_state->infoframes.avi);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_SPD,
+ &crtc_state->infoframes.spd);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ &crtc_state->infoframes.hdmi);
}
static bool hdmi_sink_is_deep_color(const struct drm_connector_state *conn_state)
@@ -664,7 +909,10 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
i915_reg_t reg;
- u32 val = 0;
+
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) == 0)
+ return false;
if (HAS_DDI(dev_priv))
reg = HSW_TVIDEO_DIP_GCP(crtc_state->cpu_transcoder);
@@ -675,18 +923,54 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
else
return false;
+ I915_WRITE(reg, crtc_state->infoframes.gcp);
+
+ return true;
+}
+
+void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ i915_reg_t reg;
+
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) == 0)
+ return;
+
+ if (HAS_DDI(dev_priv))
+ reg = HSW_TVIDEO_DIP_GCP(crtc_state->cpu_transcoder);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
+ else if (HAS_PCH_SPLIT(dev_priv))
+ reg = TVIDEO_DIP_GCP(crtc->pipe);
+ else
+ return;
+
+ crtc_state->infoframes.gcp = I915_READ(reg);
+}
+
+static void intel_hdmi_compute_gcp_infoframe(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (IS_G4X(dev_priv) || !crtc_state->has_infoframe)
+ return;
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL);
+
/* Indicate color depth whenever the sink supports deep color */
if (hdmi_sink_is_deep_color(conn_state))
- val |= GCP_COLOR_INDICATION;
+ crtc_state->infoframes.gcp |= GCP_COLOR_INDICATION;
/* Enable default_phase whenever the display mode is suitably aligned */
if (gcp_default_phase_possible(crtc_state->pipe_bpp,
&crtc_state->base.adjusted_mode))
- val |= GCP_DEFAULT_PHASE_ENABLE;
-
- I915_WRITE(reg, val);
-
- return val != 0;
+ crtc_state->infoframes.gcp |= GCP_DEFAULT_PHASE_ENABLE;
}
static void ibx_set_infoframes(struct intel_encoder *encoder,
@@ -737,9 +1021,15 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
- intel_hdmi_set_spd_infoframe(encoder, crtc_state);
- intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_AVI,
+ &crtc_state->infoframes.avi);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_SPD,
+ &crtc_state->infoframes.spd);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ &crtc_state->infoframes.hdmi);
}
static void cpt_set_infoframes(struct intel_encoder *encoder,
@@ -780,9 +1070,15 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
- intel_hdmi_set_spd_infoframe(encoder, crtc_state);
- intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_AVI,
+ &crtc_state->infoframes.avi);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_SPD,
+ &crtc_state->infoframes.spd);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ &crtc_state->infoframes.hdmi);
}
static void vlv_set_infoframes(struct intel_encoder *encoder,
@@ -832,9 +1128,15 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
- intel_hdmi_set_spd_infoframe(encoder, crtc_state);
- intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_AVI,
+ &crtc_state->infoframes.avi);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_SPD,
+ &crtc_state->infoframes.spd);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ &crtc_state->infoframes.hdmi);
}
static void hsw_set_infoframes(struct intel_encoder *encoder,
@@ -865,9 +1167,15 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
I915_WRITE(reg, val);
POSTING_READ(reg);
- intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
- intel_hdmi_set_spd_infoframe(encoder, crtc_state);
- intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_AVI,
+ &crtc_state->infoframes.avi);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_SPD,
+ &crtc_state->infoframes.spd);
+ intel_write_infoframe(encoder, crtc_state,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ &crtc_state->infoframes.hdmi);
}
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
@@ -1073,10 +1381,44 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
return ret;
}
+static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct drm_crtc *crtc = connector->base.state->crtc;
+ struct intel_crtc *intel_crtc = container_of(crtc,
+ struct intel_crtc, base);
+ u32 scanline;
+ int ret;
+
+ for (;;) {
+ scanline = I915_READ(PIPEDSL(intel_crtc->pipe));
+ if (scanline > 100 && scanline < 200)
+ break;
+ usleep_range(25, 50);
+ }
+
+ ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, false);
+ if (ret) {
+ DRM_ERROR("Disable HDCP signalling failed (%d)\n", ret);
+ return ret;
+ }
+ ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, true);
+ if (ret) {
+ DRM_ERROR("Enable HDCP signalling failed (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
static
int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
bool enable)
{
+ struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
+ struct intel_connector *connector = hdmi->attached_connector;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
int ret;
if (!enable)
@@ -1088,6 +1430,14 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
enable ? "Enable" : "Disable", ret);
return ret;
}
+
+ /*
+ * WA: To fix incorrect positioning of the window of
+ * opportunity and enc_en signalling in KABYLAKE.
+ */
+ if (IS_KABYLAKE(dev_priv) && enable)
+ return kbl_repositioning_enc_en_signal(connector);
+
return 0;
}
@@ -1119,6 +1469,190 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
return true;
}
+static struct hdcp2_hdmi_msg_data {
+ u8 msg_id;
+ u32 timeout;
+ u32 timeout2;
+ } hdcp2_msg_data[] = {
+ {HDCP_2_2_AKE_INIT, 0, 0},
+ {HDCP_2_2_AKE_SEND_CERT, HDCP_2_2_CERT_TIMEOUT_MS, 0},
+ {HDCP_2_2_AKE_NO_STORED_KM, 0, 0},
+ {HDCP_2_2_AKE_STORED_KM, 0, 0},
+ {HDCP_2_2_AKE_SEND_HPRIME, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
+ HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS},
+ {HDCP_2_2_AKE_SEND_PAIRING_INFO, HDCP_2_2_PAIRING_TIMEOUT_MS,
+ 0},
+ {HDCP_2_2_LC_INIT, 0, 0},
+ {HDCP_2_2_LC_SEND_LPRIME, HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS, 0},
+ {HDCP_2_2_SKE_SEND_EKS, 0, 0},
+ {HDCP_2_2_REP_SEND_RECVID_LIST,
+ HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0},
+ {HDCP_2_2_REP_SEND_ACK, 0, 0},
+ {HDCP_2_2_REP_STREAM_MANAGE, 0, 0},
+ {HDCP_2_2_REP_STREAM_READY, HDCP_2_2_STREAM_READY_TIMEOUT_MS,
+ 0},
+ };
+
+static
+int intel_hdmi_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
+ u8 *rx_status)
+{
+ return intel_hdmi_hdcp_read(intel_dig_port,
+ HDCP_2_2_HDMI_REG_RXSTATUS_OFFSET,
+ rx_status,
+ HDCP_2_2_HDMI_RXSTATUS_LEN);
+}
+
+static int get_hdcp2_msg_timeout(u8 msg_id, bool is_paired)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++)
+ if (hdcp2_msg_data[i].msg_id == msg_id &&
+ (msg_id != HDCP_2_2_AKE_SEND_HPRIME || is_paired))
+ return hdcp2_msg_data[i].timeout;
+ else if (hdcp2_msg_data[i].msg_id == msg_id)
+ return hdcp2_msg_data[i].timeout2;
+
+ return -EINVAL;
+}
+
+static inline
+int hdcp2_detect_msg_availability(struct intel_digital_port *intel_digital_port,
+ u8 msg_id, bool *msg_ready,
+ ssize_t *msg_sz)
+{
+ u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
+ int ret;
+
+ ret = intel_hdmi_hdcp2_read_rx_status(intel_digital_port, rx_status);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("rx_status read failed. Err %d\n", ret);
+ return ret;
+ }
+
+ *msg_sz = ((HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(rx_status[1]) << 8) |
+ rx_status[0]);
+
+ if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST)
+ *msg_ready = (HDCP_2_2_HDMI_RXSTATUS_READY(rx_status[1]) &&
+ *msg_sz);
+ else
+ *msg_ready = *msg_sz;
+
+ return 0;
+}
+
+static ssize_t
+intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
+ u8 msg_id, bool paired)
+{
+ bool msg_ready = false;
+ int timeout, ret;
+ ssize_t msg_sz = 0;
+
+ timeout = get_hdcp2_msg_timeout(msg_id, paired);
+ if (timeout < 0)
+ return timeout;
+
+ ret = __wait_for(ret = hdcp2_detect_msg_availability(intel_dig_port,
+ msg_id, &msg_ready,
+ &msg_sz),
+ !ret && msg_ready && msg_sz, timeout * 1000,
+ 1000, 5 * 1000);
+ if (ret)
+ DRM_DEBUG_KMS("msg_id: %d, ret: %d, timeout: %d\n",
+ msg_id, ret, timeout);
+
+ return ret ? ret : msg_sz;
+}
+
+static
+int intel_hdmi_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
+ void *buf, size_t size)
+{
+ unsigned int offset;
+
+ offset = HDCP_2_2_HDMI_REG_WR_MSG_OFFSET;
+ return intel_hdmi_hdcp_write(intel_dig_port, offset, buf, size);
+}
+
+static
+int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
+ u8 msg_id, void *buf, size_t size)
+{
+ struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
+ struct intel_hdcp *hdcp = &hdmi->attached_connector->hdcp;
+ unsigned int offset;
+ ssize_t ret;
+
+ ret = intel_hdmi_hdcp2_wait_for_msg(intel_dig_port, msg_id,
+ hdcp->is_paired);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Available msg size should be equal to or lesser than the
+ * available buffer.
+ */
+ if (ret > size) {
+ DRM_DEBUG_KMS("msg_sz(%zd) is more than exp size(%zu)\n",
+ ret, size);
+ return -1;
+ }
+
+ offset = HDCP_2_2_HDMI_REG_RD_MSG_OFFSET;
+ ret = intel_hdmi_hdcp_read(intel_dig_port, offset, buf, ret);
+ if (ret)
+ DRM_DEBUG_KMS("Failed to read msg_id: %d(%zd)\n", msg_id, ret);
+
+ return ret;
+}
+
+static
+int intel_hdmi_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
+{
+ u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
+ int ret;
+
+ ret = intel_hdmi_hdcp2_read_rx_status(intel_dig_port, rx_status);
+ if (ret)
+ return ret;
+
+ /*
+ * Re-auth request and Link Integrity Failures are represented by
+ * same bit. i.e reauth_req.
+ */
+ if (HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(rx_status[1]))
+ ret = HDCP_REAUTH_REQUEST;
+ else if (HDCP_2_2_HDMI_RXSTATUS_READY(rx_status[1]))
+ ret = HDCP_TOPOLOGY_CHANGE;
+
+ return ret;
+}
+
+static
+int intel_hdmi_hdcp2_capable(struct intel_digital_port *intel_dig_port,
+ bool *capable)
+{
+ u8 hdcp2_version;
+ int ret;
+
+ *capable = false;
+ ret = intel_hdmi_hdcp_read(intel_dig_port, HDCP_2_2_HDMI_REG_VER_OFFSET,
+ &hdcp2_version, sizeof(hdcp2_version));
+ if (!ret && hdcp2_version & HDCP_2_2_HDMI_SUPPORT_MASK)
+ *capable = true;
+
+ return ret;
+}
+
+static inline
+enum hdcp_wired_protocol intel_hdmi_hdcp2_protocol(void)
+{
+ return HDCP_PROTOCOL_HDMI;
+}
+
static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = {
.write_an_aksv = intel_hdmi_hdcp_write_an_aksv,
.read_bksv = intel_hdmi_hdcp_read_bksv,
@@ -1130,6 +1664,11 @@ static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = {
.read_v_prime_part = intel_hdmi_hdcp_read_v_prime_part,
.toggle_signalling = intel_hdmi_hdcp_toggle_signalling,
.check_link = intel_hdmi_hdcp_check_link,
+ .write_2_2_msg = intel_hdmi_hdcp2_write_msg,
+ .read_2_2_msg = intel_hdmi_hdcp2_read_msg,
+ .check_2_2_link = intel_hdmi_hdcp2_check_link,
+ .hdcp_2_2_capable = intel_hdmi_hdcp2_capable,
+ .protocol = HDCP_PROTOCOL_HDMI,
};
static void intel_hdmi_prepare(struct intel_encoder *encoder,
@@ -1195,7 +1734,6 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- struct intel_digital_port *intel_dig_port = hdmi_to_dig_port(intel_hdmi);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp, flags = 0;
@@ -1218,7 +1756,10 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
if (tmp & HDMI_MODE_SELECT_HDMI)
pipe_config->has_hdmi_sink = true;
- if (intel_dig_port->infoframe_enabled(encoder, pipe_config))
+ pipe_config->infoframes.enable |=
+ intel_hdmi_infoframes_enabled(encoder, pipe_config);
+
+ if (pipe_config->infoframes.enable)
pipe_config->has_infoframe = true;
if (tmp & SDVO_AUDIO_ENABLE)
@@ -1241,6 +1782,18 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
pipe_config->base.adjusted_mode.crtc_clock = dotclock;
pipe_config->lane_count = 4;
+
+ intel_hdmi_read_gcp_infoframe(encoder, pipe_config);
+
+ intel_read_infoframe(encoder, pipe_config,
+ HDMI_INFOFRAME_TYPE_AVI,
+ &pipe_config->infoframes.avi);
+ intel_read_infoframe(encoder, pipe_config,
+ HDMI_INFOFRAME_TYPE_SPD,
+ &pipe_config->infoframes.spd);
+ intel_read_infoframe(encoder, pipe_config,
+ HDMI_INFOFRAME_TYPE_VENDOR,
+ &pipe_config->infoframes.hdmi);
}
static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
@@ -1654,7 +2207,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
/* Display Wa_1405510057:icl */
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
- bpc == 10 && IS_ICELAKE(dev_priv) &&
+ bpc == 10 && INTEL_GEN(dev_priv) >= 11 &&
(adjusted_mode->crtc_hblank_end -
adjusted_mode->crtc_hblank_start) % 8 == 2)
return false;
@@ -1812,6 +2365,23 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
}
}
+ intel_hdmi_compute_gcp_infoframe(encoder, pipe_config, conn_state);
+
+ if (!intel_hdmi_compute_avi_infoframe(encoder, pipe_config, conn_state)) {
+ DRM_DEBUG_KMS("bad AVI infoframe\n");
+ return -EINVAL;
+ }
+
+ if (!intel_hdmi_compute_spd_infoframe(encoder, pipe_config, conn_state)) {
+ DRM_DEBUG_KMS("bad SPD infoframe\n");
+ return -EINVAL;
+ }
+
+ if (!intel_hdmi_compute_hdmi_infoframe(encoder, pipe_config, conn_state)) {
+ DRM_DEBUG_KMS("bad HDMI infoframe\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -1931,7 +2501,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
- if (IS_ICELAKE(dev_priv) &&
+ if (INTEL_GEN(dev_priv) >= 11 &&
!intel_digital_port_connected(encoder))
goto out;
@@ -2133,10 +2703,21 @@ static void
intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_digital_port *intel_dig_port =
+ hdmi_to_dig_port(intel_hdmi);
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
intel_attach_aspect_ratio_property(connector);
+
+ /*
+ * Attach Colorspace property for Non LSPCON based device
+ * ToDo: This needs to be extended for LSPCON implementation
+ * as well. Will be implemented separately.
+ */
+ if (!intel_dig_port->lspcon.active)
+ intel_attach_colorspace_property(connector);
+
drm_connector_attach_content_type_property(connector);
connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
@@ -2321,14 +2902,14 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
return info->alternate_ddc_pin;
}
- if (IS_CHERRYVIEW(dev_priv))
- ddc_pin = chv_port_to_ddc_pin(dev_priv, port);
- else if (IS_GEN9_LP(dev_priv))
- ddc_pin = bxt_port_to_ddc_pin(dev_priv, port);
+ if (HAS_PCH_ICP(dev_priv))
+ ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
else if (HAS_PCH_CNP(dev_priv))
ddc_pin = cnp_port_to_ddc_pin(dev_priv, port);
- else if (HAS_PCH_ICP(dev_priv))
- ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
+ else if (IS_GEN9_LP(dev_priv))
+ ddc_pin = bxt_port_to_ddc_pin(dev_priv, port);
+ else if (IS_CHERRYVIEW(dev_priv))
+ ddc_pin = chv_port_to_ddc_pin(dev_priv, port);
else
ddc_pin = g4x_port_to_ddc_pin(dev_priv, port);
@@ -2345,33 +2926,36 @@ void intel_infoframe_init(struct intel_digital_port *intel_dig_port)
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_dig_port->write_infoframe = vlv_write_infoframe;
+ intel_dig_port->read_infoframe = vlv_read_infoframe;
intel_dig_port->set_infoframes = vlv_set_infoframes;
- intel_dig_port->infoframe_enabled = vlv_infoframe_enabled;
+ intel_dig_port->infoframes_enabled = vlv_infoframes_enabled;
} else if (IS_G4X(dev_priv)) {
intel_dig_port->write_infoframe = g4x_write_infoframe;
+ intel_dig_port->read_infoframe = g4x_read_infoframe;
intel_dig_port->set_infoframes = g4x_set_infoframes;
- intel_dig_port->infoframe_enabled = g4x_infoframe_enabled;
+ intel_dig_port->infoframes_enabled = g4x_infoframes_enabled;
} else if (HAS_DDI(dev_priv)) {
if (intel_dig_port->lspcon.active) {
- intel_dig_port->write_infoframe =
- lspcon_write_infoframe;
+ intel_dig_port->write_infoframe = lspcon_write_infoframe;
+ intel_dig_port->read_infoframe = lspcon_read_infoframe;
intel_dig_port->set_infoframes = lspcon_set_infoframes;
- intel_dig_port->infoframe_enabled =
- lspcon_infoframe_enabled;
+ intel_dig_port->infoframes_enabled = lspcon_infoframes_enabled;
} else {
- intel_dig_port->set_infoframes = hsw_set_infoframes;
- intel_dig_port->infoframe_enabled =
- hsw_infoframe_enabled;
intel_dig_port->write_infoframe = hsw_write_infoframe;
+ intel_dig_port->read_infoframe = hsw_read_infoframe;
+ intel_dig_port->set_infoframes = hsw_set_infoframes;
+ intel_dig_port->infoframes_enabled = hsw_infoframes_enabled;
}
} else if (HAS_PCH_IBX(dev_priv)) {
intel_dig_port->write_infoframe = ibx_write_infoframe;
+ intel_dig_port->read_infoframe = ibx_read_infoframe;
intel_dig_port->set_infoframes = ibx_set_infoframes;
- intel_dig_port->infoframe_enabled = ibx_infoframe_enabled;
+ intel_dig_port->infoframes_enabled = ibx_infoframes_enabled;
} else {
intel_dig_port->write_infoframe = cpt_write_infoframe;
+ intel_dig_port->read_infoframe = cpt_read_infoframe;
intel_dig_port->set_infoframes = cpt_set_infoframes;
- intel_dig_port->infoframe_enabled = cpt_infoframe_enabled;
+ intel_dig_port->infoframes_enabled = cpt_infoframes_enabled;
}
}
@@ -2417,6 +3001,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
intel_hdmi_add_properties(intel_hdmi, connector);
+ intel_connector_attach_encoder(intel_connector, intel_encoder);
+ intel_hdmi->attached_connector = intel_connector;
+
if (is_hdcp_supported(dev_priv, port)) {
int ret = intel_hdcp_init(intel_connector,
&intel_hdmi_hdcp_shim);
@@ -2424,9 +3011,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
}
- intel_connector_attach_encoder(intel_connector, intel_encoder);
- intel_hdmi->attached_connector = intel_connector;
-
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
* 0xd. Failure to do so will result in spurious interrupts being
* generated on the port when a cable is not attached.
diff --git a/drivers/gpu/drm/i915/intel_hdmi.h b/drivers/gpu/drm/i915/intel_hdmi.h
new file mode 100644
index 000000000000..106c2e0bc3c9
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_hdmi.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_HDMI_H__
+#define __INTEL_HDMI_H__
+
+#include <linux/hdmi.h>
+#include <linux/types.h>
+
+#include <drm/i915_drm.h>
+
+#include "i915_reg.h"
+
+struct drm_connector;
+struct drm_encoder;
+struct drm_i915_private;
+struct intel_connector;
+struct intel_digital_port;
+struct intel_encoder;
+struct intel_crtc_state;
+struct intel_hdmi;
+struct drm_connector_state;
+union hdmi_infoframe;
+
+void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg,
+ enum port port);
+void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
+ struct intel_connector *intel_connector);
+struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
+int intel_hdmi_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state);
+bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
+ struct drm_connector *connector,
+ bool high_tmds_clock_ratio,
+ bool scrambling);
+void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
+void intel_infoframe_init(struct intel_digital_port *intel_dig_port);
+u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+u32 intel_hdmi_infoframe_enable(unsigned int type);
+void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state);
+void intel_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ enum hdmi_infoframe_type type,
+ union hdmi_infoframe *frame);
+
+#endif /* __INTEL_HDMI_H__ */
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index 9bd1c9002c2a..94c04f16a2ad 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -79,7 +79,7 @@ int intel_huc_auth(struct intel_huc *huc)
}
/* Check authentication status, it should be done by now */
- ret = __intel_wait_for_register(i915,
+ ret = __intel_wait_for_register(&i915->uncore,
HUC_STATUS2,
HUC_FW_VERIFIED,
HUC_FW_VERIFIED,
diff --git a/drivers/gpu/drm/i915/intel_huc_fw.c b/drivers/gpu/drm/i915/intel_huc_fw.c
index 7d7bfc7f7ca7..68d47c105939 100644
--- a/drivers/gpu/drm/i915/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/intel_huc_fw.c
@@ -106,41 +106,46 @@ static int huc_fw_xfer(struct intel_uc_fw *huc_fw, struct i915_vma *vma)
{
struct intel_huc *huc = container_of(huc_fw, struct intel_huc, fw);
struct drm_i915_private *dev_priv = huc_to_i915(huc);
+ struct intel_uncore *uncore = &dev_priv->uncore;
unsigned long offset = 0;
u32 size;
int ret;
GEM_BUG_ON(huc_fw->type != INTEL_UC_FW_TYPE_HUC);
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
/* Set the source address for the uCode */
offset = intel_guc_ggtt_offset(&dev_priv->guc, vma) +
huc_fw->header_offset;
- I915_WRITE(DMA_ADDR_0_LOW, lower_32_bits(offset));
- I915_WRITE(DMA_ADDR_0_HIGH, upper_32_bits(offset) & 0xFFFF);
+ intel_uncore_write(uncore, DMA_ADDR_0_LOW,
+ lower_32_bits(offset));
+ intel_uncore_write(uncore, DMA_ADDR_0_HIGH,
+ upper_32_bits(offset) & 0xFFFF);
- /* Hardware doesn't look at destination address for HuC. Set it to 0,
+ /*
+ * Hardware doesn't look at destination address for HuC. Set it to 0,
* but still program the correct address space.
*/
- I915_WRITE(DMA_ADDR_1_LOW, 0);
- I915_WRITE(DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
+ intel_uncore_write(uncore, DMA_ADDR_1_LOW, 0);
+ intel_uncore_write(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
size = huc_fw->header_size + huc_fw->ucode_size;
- I915_WRITE(DMA_COPY_SIZE, size);
+ intel_uncore_write(uncore, DMA_COPY_SIZE, size);
/* Start the DMA */
- I915_WRITE(DMA_CTRL, _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA));
+ intel_uncore_write(uncore, DMA_CTRL,
+ _MASKED_BIT_ENABLE(HUC_UKERNEL | START_DMA));
/* Wait for DMA to finish */
- ret = intel_wait_for_register_fw(dev_priv, DMA_CTRL, START_DMA, 0, 100);
+ ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
DRM_DEBUG_DRIVER("HuC DMA transfer wait over with ret %d\n", ret);
/* Disable the bits once DMA is over */
- I915_WRITE(DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL));
+ intel_uncore_write(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(HUC_UKERNEL));
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 5a733e711355..422685d120e9 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -348,7 +348,7 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
I915_WRITE_FW(GMBUS4, irq_enable);
- ret = intel_wait_for_register_fw(dev_priv,
+ ret = intel_wait_for_register_fw(&dev_priv->uncore,
GMBUS2, GMBUS_ACTIVE, 0,
10);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 5e98fd79bd9d..4e0a351bfbca 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -164,20 +164,15 @@
#define WA_TAIL_DWORDS 2
#define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
-static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- struct intel_context *ce);
+#define ACTIVE_PRIORITY (I915_PRIORITY_NEWCLIENT | I915_PRIORITY_NOSEMAPHORE)
+
+static int execlists_context_deferred_alloc(struct intel_context *ce,
+ struct intel_engine_cs *engine);
static void execlists_init_reg_state(u32 *reg_state,
- struct i915_gem_context *ctx,
+ struct intel_context *ce,
struct intel_engine_cs *engine,
struct intel_ring *ring);
-static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
-{
- return (i915_ggtt_offset(engine->status_page.vma) +
- I915_GEM_HWS_INDEX_ADDR);
-}
-
static inline struct i915_priolist *to_priolist(struct rb_node *rb)
{
return rb_entry(rb, struct i915_priolist, node);
@@ -188,6 +183,34 @@ static inline int rq_prio(const struct i915_request *rq)
return rq->sched.attr.priority;
}
+static int effective_prio(const struct i915_request *rq)
+{
+ int prio = rq_prio(rq);
+
+ /*
+ * On unwinding the active request, we give it a priority bump
+ * equivalent to a freshly submitted request. This protects it from
+ * being gazumped again, but it would be preferable if we didn't
+ * let it be gazumped in the first place!
+ *
+ * See __unwind_incomplete_requests()
+ */
+ if (~prio & ACTIVE_PRIORITY && __i915_request_has_started(rq)) {
+ /*
+ * After preemption, we insert the active request at the
+ * end of the new priority level. This means that we will be
+ * _lower_ priority than the preemptee all things equal (and
+ * so the preemption is valid), so adjust our comparison
+ * accordingly.
+ */
+ prio |= ACTIVE_PRIORITY;
+ prio--;
+ }
+
+ /* Restrict mere WAIT boosts from triggering preemption */
+ return prio | __NO_PREEMPTION;
+}
+
static int queue_prio(const struct intel_engine_execlists *execlists)
{
struct i915_priolist *p;
@@ -208,9 +231,9 @@ static int queue_prio(const struct intel_engine_execlists *execlists)
static inline bool need_preempt(const struct intel_engine_cs *engine,
const struct i915_request *rq)
{
- const int last_prio = rq_prio(rq);
+ int last_prio;
- if (!intel_engine_has_preemption(engine))
+ if (!engine->preempt_context)
return false;
if (i915_request_completed(rq))
@@ -228,6 +251,7 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
* preempt. If that hint is stale or we may be trying to preempt
* ourselves, ignore the request.
*/
+ last_prio = effective_prio(rq);
if (!__execlists_need_preempt(engine->execlists.queue_priority_hint,
last_prio))
return false;
@@ -254,12 +278,11 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
}
__maybe_unused static inline bool
-assert_priority_queue(const struct intel_engine_execlists *execlists,
- const struct i915_request *prev,
+assert_priority_queue(const struct i915_request *prev,
const struct i915_request *next)
{
- if (!prev)
- return true;
+ const struct intel_engine_execlists *execlists =
+ &prev->engine->execlists;
/*
* Without preemption, the prev may refer to the still active element
@@ -300,11 +323,10 @@ assert_priority_queue(const struct intel_engine_execlists *execlists,
* engine info, SW context ID and SW counter need to form a unique number
* (Context ID) per lrc.
*/
-static void
-intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- struct intel_context *ce)
+static u64
+lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine)
{
+ struct i915_gem_context *ctx = ce->gem_context;
u64 desc;
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
@@ -322,7 +344,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
* Consider updating oa_get_render_ctx_id in i915_perf.c when changing
* anything below.
*/
- if (INTEL_GEN(ctx->i915) >= 11) {
+ if (INTEL_GEN(engine->i915) >= 11) {
GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH));
desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT;
/* bits 37-47 */
@@ -339,7 +361,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
}
- ce->lrc_desc = desc;
+ return desc;
}
static void unwind_wa_tail(struct i915_request *rq)
@@ -353,7 +375,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
{
struct i915_request *rq, *rn, *active = NULL;
struct list_head *uninitialized_var(pl);
- int prio = I915_PRIORITY_INVALID | I915_PRIORITY_NEWCLIENT;
+ int prio = I915_PRIORITY_INVALID | ACTIVE_PRIORITY;
lockdep_assert_held(&engine->timeline.lock);
@@ -384,9 +406,21 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
* The active request is now effectively the start of a new client
* stream, so give it the equivalent small priority bump to prevent
* it being gazumped a second time by another peer.
+ *
+ * Note we have to be careful not to apply a priority boost to a request
+ * still spinning on its semaphores. If the request hasn't started, that
+ * means it is still waiting for its dependencies to be signaled, and
+ * if we apply a priority boost to this request, we will boost it past
+ * its signalers and so break PI.
+ *
+ * One consequence of this preemption boost is that we may jump
+ * over lesser priorities (such as I915_PRIORITY_WAIT), effectively
+ * making those priorities non-preemptible. They will be moved forward
+ * in the priority queue, but they will not gain immediate access to
+ * the GPU.
*/
- if (!(prio & I915_PRIORITY_NEWCLIENT)) {
- prio |= I915_PRIORITY_NEWCLIENT;
+ if (~prio & ACTIVE_PRIORITY && __i915_request_has_started(active)) {
+ prio |= ACTIVE_PRIORITY;
active->sched.attr.priority = prio;
list_move_tail(&active->sched.link,
i915_sched_lookup_priolist(engine, prio));
@@ -395,13 +429,13 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
return active;
}
-void
+struct i915_request *
execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
{
struct intel_engine_cs *engine =
container_of(execlists, typeof(*engine), execlists);
- __unwind_incomplete_requests(engine);
+ return __unwind_incomplete_requests(engine);
}
static inline void
@@ -523,13 +557,11 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
desc = execlists_update_context(rq);
GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
- GEM_TRACE("%s in[%d]: ctx=%d.%d, global=%d (fence %llx:%lld) (current %d:%d), prio=%d\n",
+ GEM_TRACE("%s in[%d]: ctx=%d.%d, fence %llx:%lld (current %d), prio=%d\n",
engine->name, n,
port[n].context_id, count,
- rq->global_seqno,
rq->fence.context, rq->fence.seqno,
hwsp_seqno(rq),
- intel_engine_get_seqno(engine),
rq_prio(rq));
} else {
GEM_BUG_ON(!n);
@@ -564,6 +596,17 @@ static bool can_merge_ctx(const struct intel_context *prev,
return true;
}
+static bool can_merge_rq(const struct i915_request *prev,
+ const struct i915_request *next)
+{
+ GEM_BUG_ON(!assert_priority_queue(prev, next));
+
+ if (!can_merge_ctx(prev->hw_context, next->hw_context))
+ return false;
+
+ return true;
+}
+
static void port_assign(struct execlist_port *port, struct i915_request *rq)
{
GEM_BUG_ON(rq == port_request(port));
@@ -577,8 +620,7 @@ static void port_assign(struct execlist_port *port, struct i915_request *rq)
static void inject_preempt_context(struct intel_engine_cs *engine)
{
struct intel_engine_execlists *execlists = &engine->execlists;
- struct intel_context *ce =
- to_intel_context(engine->i915->preempt_context, engine);
+ struct intel_context *ce = engine->preempt_context;
unsigned int n;
GEM_BUG_ON(execlists->preempt_complete_status !=
@@ -716,8 +758,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
int i;
priolist_for_each_request_consume(rq, rn, p, i) {
- GEM_BUG_ON(!assert_priority_queue(execlists, last, rq));
-
/*
* Can we combine this request with the current port?
* It has to be the same context/ringbuffer and not
@@ -729,8 +769,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* second request, and so we never need to tell the
* hardware about the first.
*/
- if (last &&
- !can_merge_ctx(rq->hw_context, last->hw_context)) {
+ if (last && !can_merge_rq(last, rq)) {
/*
* If we are on the second port and cannot
* combine this request with the last, then we
@@ -740,6 +779,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
goto done;
/*
+ * We must not populate both ELSP[] with the
+ * same LRCA, i.e. we must submit 2 different
+ * contexts if we submit 2 ELSP.
+ */
+ if (last->hw_context == rq->hw_context)
+ goto done;
+
+ /*
* If GVT overrides us we only ever submit
* port[0], leaving port[1] empty. Note that we
* also have to be careful that we don't queue
@@ -750,7 +797,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
ctx_single_port_submission(rq->hw_context))
goto done;
- GEM_BUG_ON(last->hw_context == rq->hw_context);
if (submit)
port_assign(port, last);
@@ -769,8 +815,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
}
rb_erase_cached(&p->node, &execlists->queue);
- if (p->priority != I915_PRIORITY_NORMAL)
- kmem_cache_free(engine->i915->priorities, p);
+ i915_priolist_free(p);
}
done:
@@ -790,8 +835,7 @@ done:
* request triggering preemption on the next dequeue (or subsequent
* interrupt for secondary ports).
*/
- execlists->queue_priority_hint =
- port != execlists->port ? rq_prio(last) : INT_MIN;
+ execlists->queue_priority_hint = queue_prio(execlists);
if (submit) {
port_assign(port, last);
@@ -821,13 +865,11 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
while (num_ports-- && port_isset(port)) {
struct i915_request *rq = port_request(port);
- GEM_TRACE("%s:port%u global=%d (fence %llx:%lld), (current %d:%d)\n",
+ GEM_TRACE("%s:port%u fence %llx:%lld, (current %d)\n",
rq->engine->name,
(unsigned int)(port - execlists->port),
- rq->global_seqno,
rq->fence.context, rq->fence.seqno,
- hwsp_seqno(rq),
- intel_engine_get_seqno(rq->engine));
+ hwsp_seqno(rq));
GEM_BUG_ON(!execlists->active);
execlists_context_schedule_out(rq,
@@ -851,104 +893,6 @@ invalidate_csb_entries(const u32 *first, const u32 *last)
clflush((void *)last);
}
-static void reset_csb_pointers(struct intel_engine_execlists *execlists)
-{
- const unsigned int reset_value = GEN8_CSB_ENTRIES - 1;
-
- /*
- * After a reset, the HW starts writing into CSB entry [0]. We
- * therefore have to set our HEAD pointer back one entry so that
- * the *first* entry we check is entry 0. To complicate this further,
- * as we don't wait for the first interrupt after reset, we have to
- * fake the HW write to point back to the last entry so that our
- * inline comparison of our cached head position against the last HW
- * write works even before the first interrupt.
- */
- execlists->csb_head = reset_value;
- WRITE_ONCE(*execlists->csb_write, reset_value);
-
- invalidate_csb_entries(&execlists->csb_status[0],
- &execlists->csb_status[GEN8_CSB_ENTRIES - 1]);
-}
-
-static void nop_submission_tasklet(unsigned long data)
-{
- /* The driver is wedged; don't process any more events. */
-}
-
-static void execlists_cancel_requests(struct intel_engine_cs *engine)
-{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct i915_request *rq, *rn;
- struct rb_node *rb;
- unsigned long flags;
-
- GEM_TRACE("%s current %d\n",
- engine->name, intel_engine_get_seqno(engine));
-
- /*
- * Before we call engine->cancel_requests(), we should have exclusive
- * access to the submission state. This is arranged for us by the
- * caller disabling the interrupt generation, the tasklet and other
- * threads that may then access the same state, giving us a free hand
- * to reset state. However, we still need to let lockdep be aware that
- * we know this state may be accessed in hardirq context, so we
- * disable the irq around this manipulation and we want to keep
- * the spinlock focused on its duties and not accidentally conflate
- * coverage to the submission's irq state. (Similarly, although we
- * shouldn't need to disable irq around the manipulation of the
- * submission's irq state, we also wish to remind ourselves that
- * it is irq state.)
- */
- spin_lock_irqsave(&engine->timeline.lock, flags);
-
- /* Cancel the requests on the HW and clear the ELSP tracker. */
- execlists_cancel_port_requests(execlists);
- execlists_user_end(execlists);
-
- /* Mark all executing requests as skipped. */
- list_for_each_entry(rq, &engine->timeline.requests, link) {
- GEM_BUG_ON(!rq->global_seqno);
-
- if (!i915_request_signaled(rq))
- dma_fence_set_error(&rq->fence, -EIO);
-
- i915_request_mark_complete(rq);
- }
-
- /* Flush the queued requests to the timeline list (for retiring). */
- while ((rb = rb_first_cached(&execlists->queue))) {
- struct i915_priolist *p = to_priolist(rb);
- int i;
-
- priolist_for_each_request_consume(rq, rn, p, i) {
- list_del_init(&rq->sched.link);
- __i915_request_submit(rq);
- dma_fence_set_error(&rq->fence, -EIO);
- i915_request_mark_complete(rq);
- }
-
- rb_erase_cached(&p->node, &execlists->queue);
- if (p->priority != I915_PRIORITY_NORMAL)
- kmem_cache_free(engine->i915->priorities, p);
- }
-
- intel_write_status_page(engine,
- I915_GEM_HWS_INDEX,
- intel_engine_last_submit(engine));
-
- /* Remaining _unready_ requests will be nop'ed when submitted */
-
- execlists->queue_priority_hint = INT_MIN;
- execlists->queue = RB_ROOT_CACHED;
- GEM_BUG_ON(port_isset(execlists->port));
-
- GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
- execlists->tasklet.func = nop_submission_tasklet;
-
- spin_unlock_irqrestore(&engine->timeline.lock, flags);
-}
-
static inline bool
reset_in_progress(const struct intel_engine_execlists *execlists)
{
@@ -960,6 +904,7 @@ static void process_csb(struct intel_engine_cs *engine)
struct intel_engine_execlists * const execlists = &engine->execlists;
struct execlist_port *port = execlists->port;
const u32 * const buf = execlists->csb_status;
+ const u8 num_entries = execlists->csb_size;
u8 head, tail;
lockdep_assert_held(&engine->timeline.lock);
@@ -995,7 +940,7 @@ static void process_csb(struct intel_engine_cs *engine)
unsigned int status;
unsigned int count;
- if (++head == GEN8_CSB_ENTRIES)
+ if (++head == num_entries)
head = 0;
/*
@@ -1052,14 +997,12 @@ static void process_csb(struct intel_engine_cs *engine)
EXECLISTS_ACTIVE_USER));
rq = port_unpack(port, &count);
- GEM_TRACE("%s out[0]: ctx=%d.%d, global=%d (fence %llx:%lld) (current %d:%d), prio=%d\n",
+ GEM_TRACE("%s out[0]: ctx=%d.%d, fence %llx:%lld (current %d), prio=%d\n",
engine->name,
port->context_id, count,
- rq ? rq->global_seqno : 0,
rq ? rq->fence.context : 0,
rq ? rq->fence.seqno : 0,
rq ? hwsp_seqno(rq) : 0,
- intel_engine_get_seqno(engine),
rq ? rq_prio(rq) : 0);
/* Check the context/desc id for this event matches */
@@ -1119,7 +1062,7 @@ static void process_csb(struct intel_engine_cs *engine)
* the wash as hardware, working or not, will need to do the
* invalidation before.
*/
- invalidate_csb_entries(&buf[0], &buf[GEN8_CSB_ENTRIES - 1]);
+ invalidate_csb_entries(&buf[0], &buf[num_entries - 1]);
}
static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
@@ -1196,19 +1139,50 @@ static void execlists_submit_request(struct i915_request *request)
spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
-static void execlists_context_destroy(struct intel_context *ce)
+static void __execlists_context_fini(struct intel_context *ce)
{
- GEM_BUG_ON(ce->pin_count);
-
- if (!ce->state)
- return;
-
- intel_ring_free(ce->ring);
+ intel_ring_put(ce->ring);
GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
i915_gem_object_put(ce->state->obj);
}
+static void execlists_context_destroy(struct kref *kref)
+{
+ struct intel_context *ce = container_of(kref, typeof(*ce), ref);
+
+ GEM_BUG_ON(intel_context_is_pinned(ce));
+
+ if (ce->state)
+ __execlists_context_fini(ce);
+
+ intel_context_free(ce);
+}
+
+static int __context_pin(struct i915_vma *vma)
+{
+ unsigned int flags;
+ int err;
+
+ flags = PIN_GLOBAL | PIN_HIGH;
+ flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
+
+ err = i915_vma_pin(vma, 0, 0, flags);
+ if (err)
+ return err;
+
+ vma->obj->pin_global++;
+ vma->obj->mm.dirty = true;
+
+ return 0;
+}
+
+static void __context_unpin(struct i915_vma *vma)
+{
+ vma->obj->pin_global--;
+ __i915_vma_unpin(vma);
+}
+
static void execlists_context_unpin(struct intel_context *ce)
{
struct intel_engine_cs *engine;
@@ -1237,41 +1211,19 @@ static void execlists_context_unpin(struct intel_context *ce)
intel_ring_unpin(ce->ring);
- ce->state->obj->pin_global--;
i915_gem_object_unpin_map(ce->state->obj);
- i915_vma_unpin(ce->state);
-
- i915_gem_context_put(ce->gem_context);
-}
-
-static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
-{
- unsigned int flags;
- int err;
-
- /*
- * Clear this page out of any CPU caches for coherent swap-in/out.
- * We only want to do this on the first bind so that we do not stall
- * on an active context (which by nature is already on the GPU).
- */
- if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
- err = i915_gem_object_set_to_wc_domain(vma->obj, true);
- if (err)
- return err;
- }
-
- flags = PIN_GLOBAL | PIN_HIGH;
- flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
-
- return i915_vma_pin(vma, 0, 0, flags);
+ __context_unpin(ce->state);
}
static void
-__execlists_update_reg_state(struct intel_engine_cs *engine,
- struct intel_context *ce)
+__execlists_update_reg_state(struct intel_context *ce,
+ struct intel_engine_cs *engine)
{
- u32 *regs = ce->lrc_reg_state;
struct intel_ring *ring = ce->ring;
+ u32 *regs = ce->lrc_reg_state;
+
+ GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
+ GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(ring->vma);
regs[CTX_RING_HEAD + 1] = ring->head;
@@ -1279,29 +1231,30 @@ __execlists_update_reg_state(struct intel_engine_cs *engine,
/* RPCS */
if (engine->class == RENDER_CLASS)
- regs[CTX_R_PWR_CLK_STATE + 1] = gen8_make_rpcs(engine->i915,
- &ce->sseu);
+ regs[CTX_R_PWR_CLK_STATE + 1] =
+ gen8_make_rpcs(engine->i915, &ce->sseu);
}
-static struct intel_context *
-__execlists_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx,
- struct intel_context *ce)
+static int
+__execlists_context_pin(struct intel_context *ce,
+ struct intel_engine_cs *engine)
{
void *vaddr;
int ret;
- ret = execlists_context_deferred_alloc(ctx, engine, ce);
+ GEM_BUG_ON(!ce->gem_context->ppgtt);
+
+ ret = execlists_context_deferred_alloc(ce, engine);
if (ret)
goto err;
GEM_BUG_ON(!ce->state);
- ret = __context_pin(ctx, ce->state);
+ ret = __context_pin(ce->state);
if (ret)
goto err;
vaddr = i915_gem_object_pin_map(ce->state->obj,
- i915_coherent_map_type(ctx->i915) |
+ i915_coherent_map_type(engine->i915) |
I915_MAP_OVERRIDE);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
@@ -1312,55 +1265,60 @@ __execlists_context_pin(struct intel_engine_cs *engine,
if (ret)
goto unpin_map;
- ret = i915_gem_context_pin_hw_id(ctx);
+ ret = i915_gem_context_pin_hw_id(ce->gem_context);
if (ret)
goto unpin_ring;
- intel_lr_context_descriptor_update(ctx, engine, ce);
-
- GEM_BUG_ON(!intel_ring_offset_valid(ce->ring, ce->ring->head));
-
+ ce->lrc_desc = lrc_descriptor(ce, engine);
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
+ __execlists_update_reg_state(ce, engine);
- __execlists_update_reg_state(engine, ce);
-
- ce->state->obj->pin_global++;
- i915_gem_context_get(ctx);
- return ce;
+ return 0;
unpin_ring:
intel_ring_unpin(ce->ring);
unpin_map:
i915_gem_object_unpin_map(ce->state->obj);
unpin_vma:
- __i915_vma_unpin(ce->state);
+ __context_unpin(ce->state);
err:
- ce->pin_count = 0;
- return ERR_PTR(ret);
+ return ret;
}
-static const struct intel_context_ops execlists_context_ops = {
- .unpin = execlists_context_unpin,
- .destroy = execlists_context_destroy,
-};
-
-static struct intel_context *
-execlists_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static int execlists_context_pin(struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
-
- lockdep_assert_held(&ctx->i915->drm.struct_mutex);
- GEM_BUG_ON(!ctx->ppgtt);
+ return __execlists_context_pin(ce, ce->engine);
+}
- if (likely(ce->pin_count++))
- return ce;
- GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
+static void execlists_context_reset(struct intel_context *ce)
+{
+ /*
+ * Because we emit WA_TAIL_DWORDS there may be a disparity
+ * between our bookkeeping in ce->ring->head and ce->ring->tail and
+ * that stored in context. As we only write new commands from
+ * ce->ring->tail onwards, everything before that is junk. If the GPU
+ * starts reading from its RING_HEAD from the context, it may try to
+ * execute that junk and die.
+ *
+ * The contexts that are stilled pinned on resume belong to the
+ * kernel, and are local to each engine. All other contexts will
+ * have their head/tail sanitized upon pinning before use, so they
+ * will never see garbage,
+ *
+ * So to avoid that we reset the context images upon resume. For
+ * simplicity, we just zero everything out.
+ */
+ intel_ring_reset(ce->ring, 0);
+ __execlists_update_reg_state(ce, ce->engine);
+}
- ce->ops = &execlists_context_ops;
+static const struct intel_context_ops execlists_context_ops = {
+ .pin = execlists_context_pin,
+ .unpin = execlists_context_unpin,
- return __execlists_context_pin(engine, ctx, ce);
-}
+ .reset = execlists_context_reset,
+ .destroy = execlists_context_destroy,
+};
static int gen8_emit_init_breadcrumb(struct i915_request *rq)
{
@@ -1387,6 +1345,10 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
*cs++ = rq->fence.seqno - 1;
intel_ring_advance(rq, cs);
+
+ /* Record the updated position of the request's payload */
+ rq->infix = intel_ring_offset(rq, cs);
+
return 0;
}
@@ -1424,10 +1386,11 @@ static int emit_pdps(struct i915_request *rq)
*cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED;
for (i = GEN8_3LVL_PDPES; i--; ) {
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
+ u32 base = engine->mmio_base;
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i));
*cs++ = upper_32_bits(pd_daddr);
- *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i));
*cs++ = lower_32_bits(pd_daddr);
}
*cs++ = MI_NOOP;
@@ -1447,7 +1410,7 @@ static int execlists_request_alloc(struct i915_request *request)
{
int ret;
- GEM_BUG_ON(!request->hw_context->pin_count);
+ GEM_BUG_ON(!intel_context_is_pinned(request->hw_context));
/*
* Flush enough space to reduce the likelihood of waiting after
@@ -1465,7 +1428,7 @@ static int execlists_request_alloc(struct i915_request *request)
*/
/* Unconditionally invalidate GPU caches and TLBs. */
- if (i915_vm_is_48bit(&request->gem_context->ppgtt->vm))
+ if (i915_vm_is_4lvl(&request->gem_context->ppgtt->vm))
ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
else
ret = emit_pdps(request);
@@ -1732,7 +1695,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
unsigned int i;
int ret;
- if (GEM_DEBUG_WARN_ON(engine->id != RCS))
+ if (GEM_DEBUG_WARN_ON(engine->id != RCS0))
return -EINVAL;
switch (INTEL_GEN(engine->i915)) {
@@ -1796,17 +1759,9 @@ static void enable_execlists(struct intel_engine_cs *engine)
intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
- /*
- * Make sure we're not enabling the new 12-deep CSB
- * FIFO as that requires a slightly updated handling
- * in the ctx switch irq. Since we're currently only
- * using only 2 elements of the enhanced execlists the
- * deeper FIFO it's not needed and it's not worth adding
- * more statements to the irq handler to support it.
- */
if (INTEL_GEN(dev_priv) >= 11)
I915_WRITE(RING_MODE_GEN7(engine),
- _MASKED_BIT_DISABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
+ _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
else
I915_WRITE(RING_MODE_GEN7(engine),
_MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
@@ -1872,20 +1827,72 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
__tasklet_disable_sync_once(&execlists->tasklet);
GEM_BUG_ON(!reset_in_progress(execlists));
+ intel_engine_stop_cs(engine);
+
/* And flush any current direct submission. */
spin_lock_irqsave(&engine->timeline.lock, flags);
- process_csb(engine); /* drain preemption events */
spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
-static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
+static bool lrc_regs_ok(const struct i915_request *rq)
+{
+ const struct intel_ring *ring = rq->ring;
+ const u32 *regs = rq->hw_context->lrc_reg_state;
+
+ /* Quick spot check for the common signs of context corruption */
+
+ if (regs[CTX_RING_BUFFER_CONTROL + 1] !=
+ (RING_CTL_SIZE(ring->size) | RING_VALID))
+ return false;
+
+ if (regs[CTX_RING_BUFFER_START + 1] != i915_ggtt_offset(ring->vma))
+ return false;
+
+ return true;
+}
+
+static void reset_csb_pointers(struct intel_engine_execlists *execlists)
+{
+ const unsigned int reset_value = execlists->csb_size - 1;
+
+ /*
+ * After a reset, the HW starts writing into CSB entry [0]. We
+ * therefore have to set our HEAD pointer back one entry so that
+ * the *first* entry we check is entry 0. To complicate this further,
+ * as we don't wait for the first interrupt after reset, we have to
+ * fake the HW write to point back to the last entry so that our
+ * inline comparison of our cached head position against the last HW
+ * write works even before the first interrupt.
+ */
+ execlists->csb_head = reset_value;
+ WRITE_ONCE(*execlists->csb_write, reset_value);
+ wmb(); /* Make sure this is visible to HW (paranoia?) */
+
+ invalidate_csb_entries(&execlists->csb_status[0],
+ &execlists->csb_status[reset_value]);
+}
+
+static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct intel_context *ce;
struct i915_request *rq;
- unsigned long flags;
u32 *regs;
- spin_lock_irqsave(&engine->timeline.lock, flags);
+ process_csb(engine); /* drain preemption events */
+
+ /* Following the reset, we need to reload the CSB read/write pointers */
+ reset_csb_pointers(&engine->execlists);
+
+ /*
+ * Save the currently executing context, even if we completed
+ * its request, it was still running at the time of the
+ * reset and will have been clobbered.
+ */
+ if (!port_isset(execlists->port))
+ goto out_clear;
+
+ ce = port_request(execlists->port)->hw_context;
/*
* Catch up with any missed context-switch interrupts.
@@ -1900,17 +1907,28 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
/* Push back any incomplete requests for replay after the reset. */
rq = __unwind_incomplete_requests(engine);
+ if (!rq)
+ goto out_replay;
- /* Following the reset, we need to reload the CSB read/write pointers */
- reset_csb_pointers(&engine->execlists);
+ if (rq->hw_context != ce) { /* caught just before a CS event */
+ rq = NULL;
+ goto out_replay;
+ }
- GEM_TRACE("%s seqno=%d, current=%d, stalled? %s\n",
- engine->name,
- rq ? rq->global_seqno : 0,
- intel_engine_get_seqno(engine),
- yesno(stalled));
- if (!rq)
- goto out_unlock;
+ /*
+ * If this request hasn't started yet, e.g. it is waiting on a
+ * semaphore, we need to avoid skipping the request or else we
+ * break the signaling chain. However, if the context is corrupt
+ * the request will not restart and we will be stuck with a wedged
+ * device. It is quite often the case that if we issue a reset
+ * while the GPU is loading the context image, that the context
+ * image becomes corrupt.
+ *
+ * Otherwise, if we have not started yet, the request should replay
+ * perfectly and we do not need to flag the result as being erroneous.
+ */
+ if (!i915_request_started(rq) && lrc_regs_ok(rq))
+ goto out_replay;
/*
* If the request was innocent, we leave the request in the ELSP
@@ -1924,8 +1942,8 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
* image back to the expected values to skip over the guilty request.
*/
i915_reset_request(rq, stalled);
- if (!stalled)
- goto out_unlock;
+ if (!stalled && lrc_regs_ok(rq))
+ goto out_replay;
/*
* We want a simple context + ring to execute the breadcrumb update.
@@ -1935,21 +1953,103 @@ static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
* future request will be after userspace has had the opportunity
* to recreate its own state.
*/
- regs = rq->hw_context->lrc_reg_state;
+ regs = ce->lrc_reg_state;
if (engine->pinned_default_state) {
memcpy(regs, /* skip restoring the vanilla PPHWSP */
engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
engine->context_size - PAGE_SIZE);
}
+ execlists_init_reg_state(regs, ce, engine, ce->ring);
- /* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
- rq->ring->head = intel_ring_wrap(rq->ring, rq->postfix);
- intel_ring_update_space(rq->ring);
+ /* Rerun the request; its payload has been neutered (if guilty). */
+out_replay:
+ ce->ring->head =
+ rq ? intel_ring_wrap(ce->ring, rq->head) : ce->ring->tail;
+ intel_ring_update_space(ce->ring);
+ __execlists_update_reg_state(ce, engine);
+
+out_clear:
+ execlists_clear_all_active(execlists);
+}
+
+static void execlists_reset(struct intel_engine_cs *engine, bool stalled)
+{
+ unsigned long flags;
+
+ GEM_TRACE("%s\n", engine->name);
+
+ spin_lock_irqsave(&engine->timeline.lock, flags);
- execlists_init_reg_state(regs, rq->gem_context, engine, rq->ring);
- __execlists_update_reg_state(engine, rq->hw_context);
+ __execlists_reset(engine, stalled);
+
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
+}
+
+static void nop_submission_tasklet(unsigned long data)
+{
+ /* The driver is wedged; don't process any more events. */
+}
+
+static void execlists_cancel_requests(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_request *rq, *rn;
+ struct rb_node *rb;
+ unsigned long flags;
+
+ GEM_TRACE("%s\n", engine->name);
+
+ /*
+ * Before we call engine->cancel_requests(), we should have exclusive
+ * access to the submission state. This is arranged for us by the
+ * caller disabling the interrupt generation, the tasklet and other
+ * threads that may then access the same state, giving us a free hand
+ * to reset state. However, we still need to let lockdep be aware that
+ * we know this state may be accessed in hardirq context, so we
+ * disable the irq around this manipulation and we want to keep
+ * the spinlock focused on its duties and not accidentally conflate
+ * coverage to the submission's irq state. (Similarly, although we
+ * shouldn't need to disable irq around the manipulation of the
+ * submission's irq state, we also wish to remind ourselves that
+ * it is irq state.)
+ */
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+
+ __execlists_reset(engine, true);
+
+ /* Mark all executing requests as skipped. */
+ list_for_each_entry(rq, &engine->timeline.requests, link) {
+ if (!i915_request_signaled(rq))
+ dma_fence_set_error(&rq->fence, -EIO);
+
+ i915_request_mark_complete(rq);
+ }
+
+ /* Flush the queued requests to the timeline list (for retiring). */
+ while ((rb = rb_first_cached(&execlists->queue))) {
+ struct i915_priolist *p = to_priolist(rb);
+ int i;
+
+ priolist_for_each_request_consume(rq, rn, p, i) {
+ list_del_init(&rq->sched.link);
+ __i915_request_submit(rq);
+ dma_fence_set_error(&rq->fence, -EIO);
+ i915_request_mark_complete(rq);
+ }
+
+ rb_erase_cached(&p->node, &execlists->queue);
+ i915_priolist_free(p);
+ }
+
+ /* Remaining _unready_ requests will be nop'ed when submitted */
+
+ execlists->queue_priority_hint = INT_MIN;
+ execlists->queue = RB_ROOT_CACHED;
+ GEM_BUG_ON(port_isset(execlists->port));
+
+ GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
+ execlists->tasklet.func = nop_submission_tasklet;
-out_unlock:
spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
@@ -1961,13 +2061,14 @@ static void execlists_reset_finish(struct intel_engine_cs *engine)
* After a GPU reset, we may have requests to replay. Do so now while
* we still have the forcewake to be sure that the GPU is not allowed
* to sleep before we restart and reload a context.
- *
*/
GEM_BUG_ON(!reset_in_progress(execlists));
if (!RB_EMPTY_ROOT(&execlists->queue.rb_root))
execlists->tasklet.func(execlists->tasklet.data);
- tasklet_enable(&execlists->tasklet);
+ if (__tasklet_enable(&execlists->tasklet))
+ /* And kick in case we missed a new request submission. */
+ tasklet_hi_schedule(&execlists->tasklet);
GEM_TRACE("%s: depth->%d\n", engine->name,
atomic_read(&execlists->tasklet.count));
}
@@ -1978,7 +2079,7 @@ static int gen8_emit_bb_start(struct i915_request *rq,
{
u32 *cs;
- cs = intel_ring_begin(rq, 6);
+ cs = intel_ring_begin(rq, 4);
if (IS_ERR(cs))
return PTR_ERR(cs);
@@ -1989,19 +2090,37 @@ static int gen8_emit_bb_start(struct i915_request *rq,
* particular all the gen that do not need the w/a at all!), if we
* took care to make sure that on every switch into this context
* (both ordinary and for preemption) that arbitrartion was enabled
- * we would be fine. However, there doesn't seem to be a downside to
- * being paranoid and making sure it is set before each batch and
- * every context-switch.
- *
- * Note that if we fail to enable arbitration before the request
- * is complete, then we do not see the context-switch interrupt and
- * the engine hangs (with RING_HEAD == RING_TAIL).
- *
- * That satisfies both the GPGPU w/a and our heavy-handed paranoia.
+ * we would be fine. However, for gen8 there is another w/a that
+ * requires us to not preempt inside GPGPU execution, so we keep
+ * arbitration disabled for gen8 batches. Arbitration will be
+ * re-enabled before we close the request
+ * (engine->emit_fini_breadcrumb).
*/
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+
+ /* FIXME(BDW+): Address space and security selectors. */
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 |
+ (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int gen9_emit_bb_start(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags)
+{
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
- /* FIXME(BDW): Address space and security selectors. */
*cs++ = MI_BATCH_BUFFER_START_GEN8 |
(flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
*cs++ = lower_32_bits(offset);
@@ -2017,16 +2136,14 @@ static int gen8_emit_bb_start(struct i915_request *rq,
static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
- I915_WRITE_IMR(engine,
- ~(engine->irq_enable_mask | engine->irq_keep_mask));
- POSTING_READ_FW(RING_IMR(engine->mmio_base));
+ ENGINE_WRITE(engine, RING_IMR,
+ ~(engine->irq_enable_mask | engine->irq_keep_mask));
+ ENGINE_POSTING_READ(engine, RING_IMR);
}
static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
- I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
+ ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
}
static int gen8_emit_flush(struct i915_request *request, u32 mode)
@@ -2148,16 +2265,16 @@ static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs)
static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs)
{
- /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
- BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
-
cs = gen8_emit_ggtt_write(cs,
request->fence.seqno,
- request->timeline->hwsp_offset);
+ request->timeline->hwsp_offset,
+ 0);
cs = gen8_emit_ggtt_write(cs,
- request->global_seqno,
- intel_hws_seqno_address(request->engine));
+ intel_engine_next_hangcheck_seqno(request->engine),
+ I915_GEM_HWS_HANGCHECK_ADDR,
+ MI_FLUSH_DW_STORE_INDEX);
+
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -2180,9 +2297,9 @@ static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
PIPE_CONTROL_CS_STALL);
cs = gen8_emit_ggtt_write_rcs(cs,
- request->global_seqno,
- intel_hws_seqno_address(request->engine),
- PIPE_CONTROL_CS_STALL);
+ intel_engine_next_hangcheck_seqno(request->engine),
+ I915_GEM_HWS_HANGCHECK_ADDR,
+ PIPE_CONTROL_STORE_DATA_INDEX);
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -2231,7 +2348,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
dev_priv = engine->i915;
if (engine->buffer) {
- WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
+ WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
}
if (engine->cleanup)
@@ -2254,19 +2371,18 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
engine->execlists.tasklet.func = execlists_submission_tasklet;
engine->reset.prepare = execlists_reset_prepare;
+ engine->reset.reset = execlists_reset;
+ engine->reset.finish = execlists_reset_finish;
engine->park = NULL;
engine->unpark = NULL;
engine->flags |= I915_ENGINE_SUPPORTS_STATS;
- if (engine->i915->preempt_context)
+ if (!intel_vgpu_active(engine->i915))
+ engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
+ if (engine->preempt_context &&
+ HAS_LOGICAL_RING_PREEMPTION(engine->i915))
engine->flags |= I915_ENGINE_HAS_PREEMPTION;
-
- engine->i915->caps.scheduler =
- I915_SCHEDULER_CAP_ENABLED |
- I915_SCHEDULER_CAP_PRIORITY;
- if (intel_engine_has_preemption(engine))
- engine->i915->caps.scheduler |= I915_SCHEDULER_CAP_PREEMPTION;
}
static void
@@ -2279,7 +2395,7 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
engine->reset.reset = execlists_reset;
engine->reset.finish = execlists_reset_finish;
- engine->context_pin = execlists_context_pin;
+ engine->cops = &execlists_context_ops;
engine->request_alloc = execlists_request_alloc;
engine->emit_flush = gen8_emit_flush;
@@ -2299,7 +2415,10 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
* until a more refined solution exists.
*/
}
- engine->emit_bb_start = gen8_emit_bb_start;
+ if (IS_GEN(engine->i915, 8))
+ engine->emit_bb_start = gen8_emit_bb_start;
+ else
+ engine->emit_bb_start = gen9_emit_bb_start;
}
static inline void
@@ -2309,11 +2428,11 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
if (INTEL_GEN(engine->i915) < 11) {
const u8 irq_shifts[] = {
- [RCS] = GEN8_RCS_IRQ_SHIFT,
- [BCS] = GEN8_BCS_IRQ_SHIFT,
- [VCS] = GEN8_VCS1_IRQ_SHIFT,
- [VCS2] = GEN8_VCS2_IRQ_SHIFT,
- [VECS] = GEN8_VECS_IRQ_SHIFT,
+ [RCS0] = GEN8_RCS_IRQ_SHIFT,
+ [BCS0] = GEN8_BCS_IRQ_SHIFT,
+ [VCS0] = GEN8_VCS0_IRQ_SHIFT,
+ [VCS1] = GEN8_VCS1_IRQ_SHIFT,
+ [VECS0] = GEN8_VECS_IRQ_SHIFT,
};
shift = irq_shifts[engine->id];
@@ -2348,6 +2467,7 @@ static int logical_ring_init(struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
struct intel_engine_execlists * const execlists = &engine->execlists;
+ u32 base = engine->mmio_base;
int ret;
ret = intel_engine_init_common(engine);
@@ -2357,23 +2477,19 @@ static int logical_ring_init(struct intel_engine_cs *engine)
intel_engine_init_workarounds(engine);
if (HAS_LOGICAL_RING_ELSQ(i915)) {
- execlists->submit_reg = i915->regs +
- i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(engine));
- execlists->ctrl_reg = i915->regs +
- i915_mmio_reg_offset(RING_EXECLIST_CONTROL(engine));
+ execlists->submit_reg = i915->uncore.regs +
+ i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base));
+ execlists->ctrl_reg = i915->uncore.regs +
+ i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base));
} else {
- execlists->submit_reg = i915->regs +
- i915_mmio_reg_offset(RING_ELSP(engine));
+ execlists->submit_reg = i915->uncore.regs +
+ i915_mmio_reg_offset(RING_ELSP(base));
}
execlists->preempt_complete_status = ~0u;
- if (i915->preempt_context) {
- struct intel_context *ce =
- to_intel_context(i915->preempt_context, engine);
-
+ if (engine->preempt_context)
execlists->preempt_complete_status =
- upper_32_bits(ce->lrc_desc);
- }
+ upper_32_bits(engine->preempt_context->lrc_desc);
execlists->csb_status =
&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
@@ -2381,6 +2497,11 @@ static int logical_ring_init(struct intel_engine_cs *engine)
execlists->csb_write =
&engine->status_page.addr[intel_hws_csb_write_index(i915)];
+ if (INTEL_GEN(engine->i915) < 11)
+ execlists->csb_size = GEN8_CSB_ENTRIES;
+ else
+ execlists->csb_size = GEN11_CSB_ENTRIES;
+
reset_csb_pointers(execlists);
return 0;
@@ -2592,13 +2713,13 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
}
static void execlists_init_reg_state(u32 *regs,
- struct i915_gem_context *ctx,
+ struct intel_context *ce,
struct intel_engine_cs *engine,
struct intel_ring *ring)
{
- struct drm_i915_private *dev_priv = engine->i915;
- u32 base = engine->mmio_base;
+ struct i915_hw_ppgtt *ppgtt = ce->gem_context->ppgtt;
bool rcs = engine->class == RENDER_CLASS;
+ u32 base = engine->mmio_base;
/* A context is actually a big batch buffer with several
* MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
@@ -2610,10 +2731,10 @@ static void execlists_init_reg_state(u32 *regs,
regs[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(rcs ? 14 : 11) |
MI_LRI_FORCE_POSTED;
- CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(engine),
+ CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(base),
_MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) |
_MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH));
- if (INTEL_GEN(dev_priv) < 11) {
+ if (INTEL_GEN(engine->i915) < 11) {
regs[CTX_CONTEXT_CONTROL + 1] |=
_MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
CTX_CTRL_RS_CTX_ENABLE);
@@ -2659,42 +2780,42 @@ static void execlists_init_reg_state(u32 *regs,
CTX_REG(regs, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(base), 0);
/* PDP values well be assigned later if needed */
- CTX_REG(regs, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3), 0);
- CTX_REG(regs, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3), 0);
- CTX_REG(regs, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2), 0);
- CTX_REG(regs, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2), 0);
- CTX_REG(regs, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1), 0);
- CTX_REG(regs, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1), 0);
- CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0);
- CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0);
-
- if (i915_vm_is_48bit(&ctx->ppgtt->vm)) {
+ CTX_REG(regs, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(base, 3), 0);
+ CTX_REG(regs, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(base, 3), 0);
+ CTX_REG(regs, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(base, 2), 0);
+ CTX_REG(regs, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(base, 2), 0);
+ CTX_REG(regs, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(base, 1), 0);
+ CTX_REG(regs, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(base, 1), 0);
+ CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(base, 0), 0);
+ CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(base, 0), 0);
+
+ if (i915_vm_is_4lvl(&ppgtt->vm)) {
/* 64b PPGTT (48bit canonical)
* PDP0_DESCRIPTOR contains the base address to PML4 and
* other PDP Descriptors are ignored.
*/
- ASSIGN_CTX_PML4(ctx->ppgtt, regs);
+ ASSIGN_CTX_PML4(ppgtt, regs);
} else {
- ASSIGN_CTX_PDP(ctx->ppgtt, regs, 3);
- ASSIGN_CTX_PDP(ctx->ppgtt, regs, 2);
- ASSIGN_CTX_PDP(ctx->ppgtt, regs, 1);
- ASSIGN_CTX_PDP(ctx->ppgtt, regs, 0);
+ ASSIGN_CTX_PDP(ppgtt, regs, 3);
+ ASSIGN_CTX_PDP(ppgtt, regs, 2);
+ ASSIGN_CTX_PDP(ppgtt, regs, 1);
+ ASSIGN_CTX_PDP(ppgtt, regs, 0);
}
if (rcs) {
regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 0);
- i915_oa_init_reg_state(engine, ctx, regs);
+ i915_oa_init_reg_state(engine, ce, regs);
}
regs[CTX_END] = MI_BATCH_BUFFER_END;
- if (INTEL_GEN(dev_priv) >= 10)
+ if (INTEL_GEN(engine->i915) >= 10)
regs[CTX_END] |= BIT(0);
}
static int
-populate_lr_context(struct i915_gem_context *ctx,
+populate_lr_context(struct intel_context *ce,
struct drm_i915_gem_object *ctx_obj,
struct intel_engine_cs *engine,
struct intel_ring *ring)
@@ -2703,19 +2824,12 @@ populate_lr_context(struct i915_gem_context *ctx,
u32 *regs;
int ret;
- ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
- if (ret) {
- DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
- return ret;
- }
-
vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
if (IS_ERR(vaddr)) {
ret = PTR_ERR(vaddr);
DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
return ret;
}
- ctx_obj->mm.dirty = true;
if (engine->default_state) {
/*
@@ -2740,23 +2854,35 @@ populate_lr_context(struct i915_gem_context *ctx,
/* The second page of the context object contains some fields which must
* be set up prior to the first execution. */
regs = vaddr + LRC_STATE_PN * PAGE_SIZE;
- execlists_init_reg_state(regs, ctx, engine, ring);
+ execlists_init_reg_state(regs, ce, engine, ring);
if (!engine->default_state)
regs[CTX_CONTEXT_CONTROL + 1] |=
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
- if (ctx == ctx->i915->preempt_context && INTEL_GEN(engine->i915) < 11)
+ if (ce->gem_context == engine->i915->preempt_context &&
+ INTEL_GEN(engine->i915) < 11)
regs[CTX_CONTEXT_CONTROL + 1] |=
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT);
+ ret = 0;
err_unpin_ctx:
+ __i915_gem_object_flush_map(ctx_obj,
+ LRC_HEADER_PAGES * PAGE_SIZE,
+ engine->context_size);
i915_gem_object_unpin_map(ctx_obj);
return ret;
}
-static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
- struct intel_engine_cs *engine,
- struct intel_context *ce)
+static struct i915_timeline *get_timeline(struct i915_gem_context *ctx)
+{
+ if (ctx->timeline)
+ return i915_timeline_get(ctx->timeline);
+ else
+ return i915_timeline_create(ctx->i915, NULL);
+}
+
+static int execlists_context_deferred_alloc(struct intel_context *ce,
+ struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *ctx_obj;
struct i915_vma *vma;
@@ -2776,30 +2902,32 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
*/
context_size += LRC_HEADER_PAGES * PAGE_SIZE;
- ctx_obj = i915_gem_object_create(ctx->i915, context_size);
+ ctx_obj = i915_gem_object_create(engine->i915, context_size);
if (IS_ERR(ctx_obj))
return PTR_ERR(ctx_obj);
- vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(ctx_obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto error_deref_obj;
}
- timeline = i915_timeline_create(ctx->i915, ctx->name, NULL);
+ timeline = get_timeline(ce->gem_context);
if (IS_ERR(timeline)) {
ret = PTR_ERR(timeline);
goto error_deref_obj;
}
- ring = intel_engine_create_ring(engine, timeline, ctx->ring_size);
+ ring = intel_engine_create_ring(engine,
+ timeline,
+ ce->gem_context->ring_size);
i915_timeline_put(timeline);
if (IS_ERR(ring)) {
ret = PTR_ERR(ring);
goto error_deref_obj;
}
- ret = populate_lr_context(ctx, ctx_obj, engine, ring);
+ ret = populate_lr_context(ce, ctx_obj, engine, ring);
if (ret) {
DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
goto error_ring_free;
@@ -2811,45 +2939,12 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
return 0;
error_ring_free:
- intel_ring_free(ring);
+ intel_ring_put(ring);
error_deref_obj:
i915_gem_object_put(ctx_obj);
return ret;
}
-void intel_lr_context_resume(struct drm_i915_private *i915)
-{
- struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
- enum intel_engine_id id;
-
- /*
- * Because we emit WA_TAIL_DWORDS there may be a disparity
- * between our bookkeeping in ce->ring->head and ce->ring->tail and
- * that stored in context. As we only write new commands from
- * ce->ring->tail onwards, everything before that is junk. If the GPU
- * starts reading from its RING_HEAD from the context, it may try to
- * execute that junk and die.
- *
- * So to avoid that we reset the context images upon resume. For
- * simplicity, we just zero everything out.
- */
- list_for_each_entry(ctx, &i915->contexts.list, link) {
- for_each_engine(engine, i915, id) {
- struct intel_context *ce =
- to_intel_context(ctx, engine);
-
- if (!ce->state)
- continue;
-
- intel_ring_reset(ce->ring, 0);
-
- if (ce->pin_count) /* otherwise done in context_pin */
- __execlists_update_reg_state(engine, ce);
- }
- }
-}
-
void intel_execlists_show_requests(struct intel_engine_cs *engine,
struct drm_printer *m,
void (*show_request)(struct drm_printer *m,
@@ -2910,6 +3005,37 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
spin_unlock_irqrestore(&engine->timeline.lock, flags);
}
+void intel_lr_context_reset(struct intel_engine_cs *engine,
+ struct intel_context *ce,
+ u32 head,
+ bool scrub)
+{
+ /*
+ * We want a simple context + ring to execute the breadcrumb update.
+ * We cannot rely on the context being intact across the GPU hang,
+ * so clear it and rebuild just what we need for the breadcrumb.
+ * All pending requests for this context will be zapped, and any
+ * future request will be after userspace has had the opportunity
+ * to recreate its own state.
+ */
+ if (scrub) {
+ u32 *regs = ce->lrc_reg_state;
+
+ if (engine->pinned_default_state) {
+ memcpy(regs, /* skip restoring the vanilla PPHWSP */
+ engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
+ engine->context_size - PAGE_SIZE);
+ }
+ execlists_init_reg_state(regs, ce, engine, ce->ring);
+ }
+
+ /* Rerun the request; its payload has been neutered (if guilty). */
+ ce->ring->head = head;
+ intel_ring_update_space(ce->ring);
+
+ __execlists_update_reg_state(ce, engine);
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/intel_lrc.c"
#endif
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index f1aec8a6986f..84aa230ea27b 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -28,20 +28,18 @@
#include "i915_gem_context.h"
/* Execlists regs */
-#define RING_ELSP(engine) _MMIO((engine)->mmio_base + 0x230)
-#define RING_EXECLIST_STATUS_LO(engine) _MMIO((engine)->mmio_base + 0x234)
-#define RING_EXECLIST_STATUS_HI(engine) _MMIO((engine)->mmio_base + 0x234 + 4)
-#define RING_CONTEXT_CONTROL(engine) _MMIO((engine)->mmio_base + 0x244)
+#define RING_ELSP(base) _MMIO((base) + 0x230)
+#define RING_EXECLIST_STATUS_LO(base) _MMIO((base) + 0x234)
+#define RING_EXECLIST_STATUS_HI(base) _MMIO((base) + 0x234 + 4)
+#define RING_CONTEXT_CONTROL(base) _MMIO((base) + 0x244)
#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
-#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
+#define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
#define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT (1 << 2)
-#define RING_CONTEXT_STATUS_BUF_BASE(engine) _MMIO((engine)->mmio_base + 0x370)
-#define RING_CONTEXT_STATUS_BUF_LO(engine, i) _MMIO((engine)->mmio_base + 0x370 + (i) * 8)
-#define RING_CONTEXT_STATUS_BUF_HI(engine, i) _MMIO((engine)->mmio_base + 0x370 + (i) * 8 + 4)
-#define RING_CONTEXT_STATUS_PTR(engine) _MMIO((engine)->mmio_base + 0x3a0)
-#define RING_EXECLIST_SQ_CONTENTS(engine) _MMIO((engine)->mmio_base + 0x510)
-#define RING_EXECLIST_CONTROL(engine) _MMIO((engine)->mmio_base + 0x550)
+#define RING_CONTEXT_STATUS_PTR(base) _MMIO((base) + 0x3a0)
+#define RING_EXECLIST_SQ_CONTENTS(base) _MMIO((base) + 0x510)
+#define RING_EXECLIST_CONTROL(base) _MMIO((base) + 0x550)
+
#define EL_CTRL_LOAD (1 << 0)
/* The docs specify that the write pointer wraps around after 5h, "After status
@@ -55,10 +53,11 @@
#define GEN8_CSB_PTR_MASK 0x7
#define GEN8_CSB_READ_PTR_MASK (GEN8_CSB_PTR_MASK << 8)
#define GEN8_CSB_WRITE_PTR_MASK (GEN8_CSB_PTR_MASK << 0)
-#define GEN8_CSB_WRITE_PTR(csb_status) \
- (((csb_status) & GEN8_CSB_WRITE_PTR_MASK) >> 0)
-#define GEN8_CSB_READ_PTR(csb_status) \
- (((csb_status) & GEN8_CSB_READ_PTR_MASK) >> 8)
+
+#define GEN11_CSB_ENTRIES 12
+#define GEN11_CSB_PTR_MASK 0xf
+#define GEN11_CSB_READ_PTR_MASK (GEN11_CSB_PTR_MASK << 8)
+#define GEN11_CSB_WRITE_PTR_MASK (GEN11_CSB_PTR_MASK << 0)
enum {
INTEL_CONTEXT_SCHEDULE_IN = 0,
@@ -102,9 +101,13 @@ struct drm_printer;
struct drm_i915_private;
struct i915_gem_context;
-void intel_lr_context_resume(struct drm_i915_private *dev_priv);
void intel_execlists_set_default_submission(struct intel_engine_cs *engine);
+void intel_lr_context_reset(struct intel_engine_cs *engine,
+ struct intel_context *ce,
+ u32 head,
+ bool scrub);
+
void intel_execlists_show_requests(struct intel_engine_cs *engine,
struct drm_printer *m,
void (*show_request)(struct drm_printer *m,
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index 322bdddda164..7028d0cf3bb1 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -22,10 +22,14 @@
*
*
*/
-#include <drm/drm_edid.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_dual_mode_helper.h>
+#include <drm/drm_edid.h>
+
+#include "intel_dp.h"
#include "intel_drv.h"
+#include "intel_lspcon.h"
/* LSPCON OUI Vendor ID(signatures) */
#define LSPCON_VENDOR_PARADE_OUI 0x001CF8
@@ -452,6 +456,14 @@ void lspcon_write_infoframe(struct intel_encoder *encoder,
DRM_DEBUG_DRIVER("AVI infoframes updated successfully\n");
}
+void lspcon_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len)
+{
+ /* FIXME implement this */
+}
+
void lspcon_set_infoframes(struct intel_encoder *encoder,
bool enable,
const struct intel_crtc_state *crtc_state,
@@ -470,6 +482,8 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
return;
}
+ /* FIXME precompute infoframes */
+
ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
conn_state->connector,
adjusted_mode);
@@ -504,9 +518,10 @@ void lspcon_set_infoframes(struct intel_encoder *encoder,
buf, ret);
}
-bool lspcon_infoframe_enabled(struct intel_encoder *encoder,
+u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
+ /* FIXME actually read this from the hw */
return enc_to_intel_lspcon(&encoder->base)->active;
}
diff --git a/drivers/gpu/drm/i915/intel_lspcon.h b/drivers/gpu/drm/i915/intel_lspcon.h
new file mode 100644
index 000000000000..37cfddf8a9c5
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_lspcon.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_LSPCON_H__
+#define __INTEL_LSPCON_H__
+
+#include <linux/types.h>
+
+struct drm_connector;
+struct drm_connector_state;
+struct intel_crtc_state;
+struct intel_digital_port;
+struct intel_encoder;
+struct intel_lspcon;
+
+bool lspcon_init(struct intel_digital_port *intel_dig_port);
+void lspcon_resume(struct intel_lspcon *lspcon);
+void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
+void lspcon_write_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ const void *buf, ssize_t len);
+void lspcon_read_infoframe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ unsigned int type,
+ void *frame, ssize_t len);
+void lspcon_set_infoframes(struct intel_encoder *encoder,
+ bool enable,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+u32 lspcon_infoframes_enabled(struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config);
+void lspcon_ycbcr420_config(struct drm_connector *connector,
+ struct intel_crtc_state *crtc_state);
+
+#endif /* __INTEL_LSPCON_H__ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b4aa49768e90..51d1d59c1619 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -28,17 +28,22 @@
*/
#include <acpi/button.h>
+#include <linux/acpi.h>
#include <linux/dmi.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/vga_switcheroo.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include "intel_drv.h"
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
-#include <linux/acpi.h>
+#include "intel_connector.h"
+#include "intel_drv.h"
+#include "intel_lvds.h"
+#include "intel_panel.h"
/* Private structure for the integrated LVDS support */
struct intel_lvds_pps {
@@ -152,24 +157,17 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
pps->powerdown_on_reset = I915_READ(PP_CONTROL(0)) & PANEL_POWER_RESET;
val = I915_READ(PP_ON_DELAYS(0));
- pps->port = (val & PANEL_PORT_SELECT_MASK) >>
- PANEL_PORT_SELECT_SHIFT;
- pps->t1_t2 = (val & PANEL_POWER_UP_DELAY_MASK) >>
- PANEL_POWER_UP_DELAY_SHIFT;
- pps->t5 = (val & PANEL_LIGHT_ON_DELAY_MASK) >>
- PANEL_LIGHT_ON_DELAY_SHIFT;
+ pps->port = REG_FIELD_GET(PANEL_PORT_SELECT_MASK, val);
+ pps->t1_t2 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, val);
+ pps->t5 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, val);
val = I915_READ(PP_OFF_DELAYS(0));
- pps->t3 = (val & PANEL_POWER_DOWN_DELAY_MASK) >>
- PANEL_POWER_DOWN_DELAY_SHIFT;
- pps->tx = (val & PANEL_LIGHT_OFF_DELAY_MASK) >>
- PANEL_LIGHT_OFF_DELAY_SHIFT;
+ pps->t3 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, val);
+ pps->tx = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, val);
val = I915_READ(PP_DIVISOR(0));
- pps->divider = (val & PP_REFERENCE_DIVIDER_MASK) >>
- PP_REFERENCE_DIVIDER_SHIFT;
- val = (val & PANEL_POWER_CYCLE_DELAY_MASK) >>
- PANEL_POWER_CYCLE_DELAY_SHIFT;
+ pps->divider = REG_FIELD_GET(PP_REFERENCE_DIVIDER_MASK, val);
+ val = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, val);
/*
* Remove the BSpec specified +1 (100ms) offset that accounts for a
* too short power-cycle delay due to the asynchronous programming of
@@ -209,16 +207,19 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
val |= PANEL_POWER_RESET;
I915_WRITE(PP_CONTROL(0), val);
- I915_WRITE(PP_ON_DELAYS(0), (pps->port << PANEL_PORT_SELECT_SHIFT) |
- (pps->t1_t2 << PANEL_POWER_UP_DELAY_SHIFT) |
- (pps->t5 << PANEL_LIGHT_ON_DELAY_SHIFT));
- I915_WRITE(PP_OFF_DELAYS(0), (pps->t3 << PANEL_POWER_DOWN_DELAY_SHIFT) |
- (pps->tx << PANEL_LIGHT_OFF_DELAY_SHIFT));
+ I915_WRITE(PP_ON_DELAYS(0),
+ REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) |
+ REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) |
+ REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5));
+
+ I915_WRITE(PP_OFF_DELAYS(0),
+ REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) |
+ REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx));
- val = pps->divider << PP_REFERENCE_DIVIDER_SHIFT;
- val |= (DIV_ROUND_UP(pps->t4, 1000) + 1) <<
- PANEL_POWER_CYCLE_DELAY_SHIFT;
- I915_WRITE(PP_DIVISOR(0), val);
+ I915_WRITE(PP_DIVISOR(0),
+ REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) |
+ REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK,
+ DIV_ROUND_UP(pps->t4, 1000) + 1));
}
static void intel_pre_enable_lvds(struct intel_encoder *encoder,
@@ -315,7 +316,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON);
POSTING_READ(lvds_encoder->reg);
- if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 5000))
+ if (intel_wait_for_register(&dev_priv->uncore,
+ PP_STATUS(0), PP_ON, PP_ON, 5000))
DRM_ERROR("timed out waiting for panel to power on\n");
intel_panel_enable_backlight(pipe_config, conn_state);
@@ -329,7 +331,8 @@ static void intel_disable_lvds(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) & ~PANEL_POWER_ON);
- if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, 0, 1000))
+ if (intel_wait_for_register(&dev_priv->uncore,
+ PP_STATUS(0), PP_ON, 0, 1000))
DRM_ERROR("timed out waiting for panel to power off\n");
I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
@@ -746,20 +749,21 @@ static const struct dmi_system_id intel_dual_link_lvds[] = {
{ } /* terminating entry */
};
-struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev)
+struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv)
{
- struct intel_encoder *intel_encoder;
+ struct intel_encoder *encoder;
- for_each_intel_encoder(dev, intel_encoder)
- if (intel_encoder->type == INTEL_OUTPUT_LVDS)
- return intel_encoder;
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ if (encoder->type == INTEL_OUTPUT_LVDS)
+ return encoder;
+ }
return NULL;
}
-bool intel_is_dual_link_lvds(struct drm_device *dev)
+bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv)
{
- struct intel_encoder *encoder = intel_get_lvds_encoder(dev);
+ struct intel_encoder *encoder = intel_get_lvds_encoder(dev_priv);
return encoder && to_lvds_encoder(&encoder->base)->is_dual_link;
}
@@ -813,7 +817,6 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
- struct drm_display_mode *scan; /* *modes, *bios_mode; */
struct drm_display_mode *fixed_mode = NULL;
struct drm_display_mode *downclock_mode = NULL;
struct edid *edid;
@@ -952,30 +955,14 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
}
intel_connector->edid = edid;
- list_for_each_entry(scan, &connector->probed_modes, head) {
- if (scan->type & DRM_MODE_TYPE_PREFERRED) {
- DRM_DEBUG_KMS("using preferred mode from EDID: ");
- drm_mode_debug_printmodeline(scan);
-
- fixed_mode = drm_mode_duplicate(dev, scan);
- if (fixed_mode)
- goto out;
- }
- }
+ fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
+ if (fixed_mode)
+ goto out;
/* Failed to get EDID, what about VBT? */
- if (dev_priv->vbt.lfp_lvds_vbt_mode) {
- DRM_DEBUG_KMS("using mode from VBT: ");
- drm_mode_debug_printmodeline(dev_priv->vbt.lfp_lvds_vbt_mode);
-
- fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
- if (fixed_mode) {
- fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
- connector->display_info.width_mm = fixed_mode->width_mm;
- connector->display_info.height_mm = fixed_mode->height_mm;
- goto out;
- }
- }
+ fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
+ if (fixed_mode)
+ goto out;
/*
* If we didn't get EDID, try checking if the panel is already turned
diff --git a/drivers/gpu/drm/i915/intel_lvds.h b/drivers/gpu/drm/i915/intel_lvds.h
new file mode 100644
index 000000000000..bc9c8b84ba2f
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_lvds.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_LVDS_H__
+#define __INTEL_LVDS_H__
+
+#include <linux/types.h>
+
+#include "i915_reg.h"
+
+enum pipe;
+struct drm_i915_private;
+
+bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t lvds_reg, enum pipe *pipe);
+void intel_lvds_init(struct drm_i915_private *dev_priv);
+struct intel_encoder *intel_get_lvds_encoder(struct drm_i915_private *dev_priv);
+bool intel_is_dual_link_lvds(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_LVDS_H__ */
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 331e7a678fb7..274ba78500c0 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -252,7 +252,7 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
{
bool result = false;
- if (IS_ICELAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 11) {
table->size = ARRAY_SIZE(icelake_mocs_table);
table->table = icelake_mocs_table;
table->n_entries = GEN11_NUM_MOCS_ENTRIES;
@@ -288,17 +288,17 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
static i915_reg_t mocs_register(enum intel_engine_id engine_id, int index)
{
switch (engine_id) {
- case RCS:
+ case RCS0:
return GEN9_GFX_MOCS(index);
- case VCS:
+ case VCS0:
return GEN9_MFX0_MOCS(index);
- case BCS:
+ case BCS0:
return GEN9_BLT_MOCS(index);
- case VECS:
+ case VECS0:
return GEN9_VEBOX_MOCS(index);
- case VCS2:
+ case VCS1:
return GEN9_MFX1_MOCS(index);
- case VCS3:
+ case VCS2:
return GEN11_MFX2_MOCS(index);
default:
MISSING_CASE(engine_id);
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 5e00ee9270b5..8fa1159d097f 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -32,9 +32,10 @@
#include <drm/i915_drm.h>
-#include "intel_opregion.h"
#include "i915_drv.h"
#include "intel_drv.h"
+#include "intel_opregion.h"
+#include "intel_panel.h"
#define OPREGION_HEADER_OFFSET 0
#define OPREGION_ACPI_OFFSET 0x100
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index c0df1dbb0069..eb317759b5d3 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -236,7 +236,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
static struct i915_request *alloc_request(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
- struct intel_engine_cs *engine = dev_priv->engine[RCS];
+ struct intel_engine_cs *engine = dev_priv->engine[RCS0];
return i915_request_alloc(engine, dev_priv->kernel_context);
}
@@ -446,7 +446,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
if (!overlay->old_vma)
return 0;
- if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
+ if (I915_READ(GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
/* synchronous slowpath */
struct i915_request *rq;
@@ -1430,7 +1430,7 @@ intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
return NULL;
error->dovsta = I915_READ(DOVSTA);
- error->isr = I915_READ(ISR);
+ error->isr = I915_READ(GEN2_ISR);
error->base = overlay->flip_addr;
memcpy_fromio(&error->regs, overlay->regs, sizeof(error->regs));
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index beca98d2b035..4ab4ce6569e7 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -33,7 +33,10 @@
#include <linux/kernel.h>
#include <linux/moduleparam.h>
#include <linux/pwm.h>
+
+#include "intel_connector.h"
#include "intel_drv.h"
+#include "intel_panel.h"
#define CRC_PMIC_PWM_PERIOD_NS 21333
@@ -46,27 +49,26 @@ intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
drm_mode_set_crtcinfo(adjusted_mode, 0);
}
-/**
- * intel_find_panel_downclock - find the reduced downclock for LVDS in EDID
- * @dev_priv: i915 device instance
- * @fixed_mode : panel native mode
- * @connector: LVDS/eDP connector
- *
- * Return downclock_avail
- * Find the reduced downclock for LVDS/eDP in EDID.
- */
-struct drm_display_mode *
-intel_find_panel_downclock(struct drm_i915_private *dev_priv,
- struct drm_display_mode *fixed_mode,
- struct drm_connector *connector)
+static bool is_downclock_mode(const struct drm_display_mode *downclock_mode,
+ const struct drm_display_mode *fixed_mode)
{
- struct drm_display_mode *scan, *tmp_mode;
- int temp_downclock;
+ return drm_mode_match(downclock_mode, fixed_mode,
+ DRM_MODE_MATCH_TIMINGS |
+ DRM_MODE_MATCH_FLAGS |
+ DRM_MODE_MATCH_3D_FLAGS) &&
+ downclock_mode->clock < fixed_mode->clock;
+}
- temp_downclock = fixed_mode->clock;
- tmp_mode = NULL;
+struct drm_display_mode *
+intel_panel_edid_downclock_mode(struct intel_connector *connector,
+ const struct drm_display_mode *fixed_mode)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ const struct drm_display_mode *scan, *best_mode = NULL;
+ struct drm_display_mode *downclock_mode;
+ int best_clock = fixed_mode->clock;
- list_for_each_entry(scan, &connector->probed_modes, head) {
+ list_for_each_entry(scan, &connector->base.probed_modes, head) {
/*
* If one mode has the same resolution with the fixed_panel
* mode while they have the different refresh rate, it means
@@ -74,29 +76,98 @@ intel_find_panel_downclock(struct drm_i915_private *dev_priv,
* case we can set the different FPx0/1 to dynamically select
* between low and high frequency.
*/
- if (scan->hdisplay == fixed_mode->hdisplay &&
- scan->hsync_start == fixed_mode->hsync_start &&
- scan->hsync_end == fixed_mode->hsync_end &&
- scan->htotal == fixed_mode->htotal &&
- scan->vdisplay == fixed_mode->vdisplay &&
- scan->vsync_start == fixed_mode->vsync_start &&
- scan->vsync_end == fixed_mode->vsync_end &&
- scan->vtotal == fixed_mode->vtotal) {
- if (scan->clock < temp_downclock) {
- /*
- * The downclock is already found. But we
- * expect to find the lower downclock.
- */
- temp_downclock = scan->clock;
- tmp_mode = scan;
- }
+ if (is_downclock_mode(scan, fixed_mode) &&
+ scan->clock < best_clock) {
+ /*
+ * The downclock is already found. But we
+ * expect to find the lower downclock.
+ */
+ best_clock = scan->clock;
+ best_mode = scan;
}
}
- if (temp_downclock < fixed_mode->clock)
- return drm_mode_duplicate(&dev_priv->drm, tmp_mode);
- else
+ if (!best_mode)
+ return NULL;
+
+ downclock_mode = drm_mode_duplicate(&dev_priv->drm, best_mode);
+ if (!downclock_mode)
+ return NULL;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using downclock mode from EDID: ",
+ connector->base.base.id, connector->base.name);
+ drm_mode_debug_printmodeline(downclock_mode);
+
+ return downclock_mode;
+}
+
+struct drm_display_mode *
+intel_panel_edid_fixed_mode(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ const struct drm_display_mode *scan;
+ struct drm_display_mode *fixed_mode;
+
+ if (list_empty(&connector->base.probed_modes))
+ return NULL;
+
+ /* prefer fixed mode from EDID if available */
+ list_for_each_entry(scan, &connector->base.probed_modes, head) {
+ if ((scan->type & DRM_MODE_TYPE_PREFERRED) == 0)
+ continue;
+
+ fixed_mode = drm_mode_duplicate(&dev_priv->drm, scan);
+ if (!fixed_mode)
+ return NULL;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using preferred mode from EDID: ",
+ connector->base.base.id, connector->base.name);
+ drm_mode_debug_printmodeline(fixed_mode);
+
+ return fixed_mode;
+ }
+
+ scan = list_first_entry(&connector->base.probed_modes,
+ typeof(*scan), head);
+
+ fixed_mode = drm_mode_duplicate(&dev_priv->drm, scan);
+ if (!fixed_mode)
return NULL;
+
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using first mode from EDID: ",
+ connector->base.base.id, connector->base.name);
+ drm_mode_debug_printmodeline(fixed_mode);
+
+ return fixed_mode;
+}
+
+struct drm_display_mode *
+intel_panel_vbt_fixed_mode(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_display_info *info = &connector->base.display_info;
+ struct drm_display_mode *fixed_mode;
+
+ if (!dev_priv->vbt.lfp_lvds_vbt_mode)
+ return NULL;
+
+ fixed_mode = drm_mode_duplicate(&dev_priv->drm,
+ dev_priv->vbt.lfp_lvds_vbt_mode);
+ if (!fixed_mode)
+ return NULL;
+
+ fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using mode from VBT: ",
+ connector->base.base.id, connector->base.name);
+ drm_mode_debug_printmodeline(fixed_mode);
+
+ info->width_mm = fixed_mode->width_mm;
+ info->height_mm = fixed_mode->height_mm;
+
+ return fixed_mode;
}
/* adjusted_mode has been preset to be the panel's fixed mode */
@@ -1894,15 +1965,14 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
panel->backlight.set = bxt_set_backlight;
panel->backlight.get = bxt_get_backlight;
panel->backlight.hz_to_pwm = bxt_hz_to_pwm;
- } else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_ICP(dev_priv)) {
+ } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) {
panel->backlight.setup = cnp_setup_backlight;
panel->backlight.enable = cnp_enable_backlight;
panel->backlight.disable = cnp_disable_backlight;
panel->backlight.set = bxt_set_backlight;
panel->backlight.get = bxt_get_backlight;
panel->backlight.hz_to_pwm = cnp_hz_to_pwm;
- } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv) ||
- HAS_PCH_KBP(dev_priv)) {
+ } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_LPT) {
panel->backlight.setup = lpt_setup_backlight;
panel->backlight.enable = lpt_enable_backlight;
panel->backlight.disable = lpt_disable_backlight;
diff --git a/drivers/gpu/drm/i915/intel_panel.h b/drivers/gpu/drm/i915/intel_panel.h
new file mode 100644
index 000000000000..cedeea443336
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_panel.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_PANEL_H__
+#define __INTEL_PANEL_H__
+
+#include <linux/types.h>
+
+#include "intel_display.h"
+
+struct drm_connector;
+struct drm_connector_state;
+struct drm_display_mode;
+struct intel_connector;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_encoder;
+struct intel_panel;
+
+int intel_panel_init(struct intel_panel *panel,
+ struct drm_display_mode *fixed_mode,
+ struct drm_display_mode *downclock_mode);
+void intel_panel_fini(struct intel_panel *panel);
+void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
+ struct drm_display_mode *adjusted_mode);
+void intel_pch_panel_fitting(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config,
+ int fitting_mode);
+void intel_gmch_panel_fitting(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config,
+ int fitting_mode);
+void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state,
+ u32 level, u32 max);
+int intel_panel_setup_backlight(struct drm_connector *connector,
+ enum pipe pipe);
+void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_panel_update_backlight(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
+struct drm_display_mode *
+intel_panel_edid_downclock_mode(struct intel_connector *connector,
+ const struct drm_display_mode *fixed_mode);
+struct drm_display_mode *
+intel_panel_edid_fixed_mode(struct intel_connector *connector);
+struct drm_display_mode *
+intel_panel_vbt_fixed_mode(struct intel_connector *connector);
+
+#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
+int intel_backlight_device_register(struct intel_connector *connector);
+void intel_backlight_device_unregister(struct intel_connector *connector);
+#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+static inline int intel_backlight_device_register(struct intel_connector *connector)
+{
+ return 0;
+}
+static inline void intel_backlight_device_unregister(struct intel_connector *connector)
+{
+}
+#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+
+#endif /* __INTEL_PANEL_H__ */
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c
index a8554dc4f196..e94b5b1bc1b7 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.c
@@ -24,23 +24,29 @@
*
*/
-#include <linux/seq_file.h>
#include <linux/circ_buf.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
#include "intel_drv.h"
+#include "intel_pipe_crc.h"
static const char * const pipe_crc_sources[] = {
- "none",
- "plane1",
- "plane2",
- "pf",
- "pipe",
- "TV",
- "DP-B",
- "DP-C",
- "DP-D",
- "auto",
+ [INTEL_PIPE_CRC_SOURCE_NONE] = "none",
+ [INTEL_PIPE_CRC_SOURCE_PLANE1] = "plane1",
+ [INTEL_PIPE_CRC_SOURCE_PLANE2] = "plane2",
+ [INTEL_PIPE_CRC_SOURCE_PLANE3] = "plane3",
+ [INTEL_PIPE_CRC_SOURCE_PLANE4] = "plane4",
+ [INTEL_PIPE_CRC_SOURCE_PLANE5] = "plane5",
+ [INTEL_PIPE_CRC_SOURCE_PLANE6] = "plane6",
+ [INTEL_PIPE_CRC_SOURCE_PLANE7] = "plane7",
+ [INTEL_PIPE_CRC_SOURCE_PIPE] = "pipe",
+ [INTEL_PIPE_CRC_SOURCE_TV] = "TV",
+ [INTEL_PIPE_CRC_SOURCE_DP_B] = "DP-B",
+ [INTEL_PIPE_CRC_SOURCE_DP_C] = "DP-C",
+ [INTEL_PIPE_CRC_SOURCE_DP_D] = "DP-D",
+ [INTEL_PIPE_CRC_SOURCE_AUTO] = "auto",
};
static int i8xx_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
@@ -192,8 +198,6 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum intel_pipe_crc_source *source,
u32 *val)
{
- bool need_stable_symbols = false;
-
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
int ret = i9xx_pipe_crc_auto_source(dev_priv, pipe, source);
if (ret)
@@ -209,56 +213,23 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
return -EINVAL;
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_TV_PRE;
break;
- case INTEL_PIPE_CRC_SOURCE_DP_B:
- if (!IS_G4X(dev_priv))
- return -EINVAL;
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_B_G4X;
- need_stable_symbols = true;
- break;
- case INTEL_PIPE_CRC_SOURCE_DP_C:
- if (!IS_G4X(dev_priv))
- return -EINVAL;
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_C_G4X;
- need_stable_symbols = true;
- break;
- case INTEL_PIPE_CRC_SOURCE_DP_D:
- if (!IS_G4X(dev_priv))
- return -EINVAL;
- *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DP_D_G4X;
- need_stable_symbols = true;
- break;
case INTEL_PIPE_CRC_SOURCE_NONE:
*val = 0;
break;
default:
+ /*
+ * The DP CRC source doesn't work on g4x.
+ * It can be made to work to some degree by selecting
+ * the correct CRC source before the port is enabled,
+ * and not touching the CRC source bits again until
+ * the port is disabled. But even then the bits
+ * eventually get stuck and a reboot is needed to get
+ * working CRCs on the pipe again. Let's simply
+ * refuse to use DP CRCs on g4x.
+ */
return -EINVAL;
}
- /*
- * When the pipe CRC tap point is after the transcoders we need
- * to tweak symbol-level features to produce a deterministic series of
- * symbols for a given frame. We need to reset those features only once
- * a frame (instead of every nth symbol):
- * - DC-balance: used to ensure a better clock recovery from the data
- * link (SDVO)
- * - DisplayPort scrambling: used for EMI reduction
- */
- if (need_stable_symbols) {
- u32 tmp = I915_READ(PORT_DFT2_G4X);
-
- WARN_ON(!IS_G4X(dev_priv));
-
- I915_WRITE(PORT_DFT_I9XX,
- I915_READ(PORT_DFT_I9XX) | DC_BALANCE_RESET);
-
- if (pipe == PIPE_A)
- tmp |= PIPE_A_SCRAMBLE_RESET;
- else
- tmp |= PIPE_B_SCRAMBLE_RESET;
-
- I915_WRITE(PORT_DFT2_G4X, tmp);
- }
-
return 0;
}
@@ -283,24 +254,6 @@ static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
tmp &= ~DC_BALANCE_RESET_VLV;
I915_WRITE(PORT_DFT2_G4X, tmp);
-
-}
-
-static void g4x_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- u32 tmp = I915_READ(PORT_DFT2_G4X);
-
- if (pipe == PIPE_A)
- tmp &= ~PIPE_A_SCRAMBLE_RESET;
- else
- tmp &= ~PIPE_B_SCRAMBLE_RESET;
- I915_WRITE(PORT_DFT2_G4X, tmp);
-
- if (!(tmp & PIPE_SCRAMBLE_RESET_MASK)) {
- I915_WRITE(PORT_DFT_I9XX,
- I915_READ(PORT_DFT_I9XX) & ~DC_BALANCE_RESET);
- }
}
static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
@@ -329,19 +282,18 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
return 0;
}
-static void hsw_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
- bool enable)
+static void
+intel_crtc_crc_setup_workarounds(struct intel_crtc *crtc, bool enable)
{
- struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *pipe_config;
struct drm_atomic_state *state;
struct drm_modeset_acquire_ctx ctx;
- int ret = 0;
+ int ret;
drm_modeset_acquire_init(&ctx, 0);
- state = drm_atomic_state_alloc(dev);
+ state = drm_atomic_state_alloc(&dev_priv->drm);
if (!state) {
ret = -ENOMEM;
goto unlock;
@@ -356,17 +308,10 @@ retry:
goto put_state;
}
- if (HAS_IPS(dev_priv)) {
- /*
- * When IPS gets enabled, the pipe CRC changes. Since IPS gets
- * enabled and disabled dynamically based on package C states,
- * user space can't make reliable use of the CRCs, so let's just
- * completely disable it.
- */
- pipe_config->ips_force_disable = enable;
- }
+ pipe_config->base.mode_changed = pipe_config->has_psr;
+ pipe_config->crc_enabled = enable;
- if (IS_HASWELL(dev_priv)) {
+ if (IS_HASWELL(dev_priv) && crtc->pipe == PIPE_A) {
pipe_config->pch_pfit.force_thru = enable;
if (pipe_config->cpu_transcoder == TRANSCODER_EDP &&
pipe_config->pch_pfit.enabled != enable)
@@ -392,11 +337,10 @@ unlock:
static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum pipe pipe,
enum intel_pipe_crc_source *source,
- u32 *val,
- bool set_wa)
+ u32 *val)
{
if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
- *source = INTEL_PIPE_CRC_SOURCE_PF;
+ *source = INTEL_PIPE_CRC_SOURCE_PIPE;
switch (*source) {
case INTEL_PIPE_CRC_SOURCE_PLANE1:
@@ -405,11 +349,7 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
case INTEL_PIPE_CRC_SOURCE_PLANE2:
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_SPRITE_IVB;
break;
- case INTEL_PIPE_CRC_SOURCE_PF:
- if (set_wa && (IS_HASWELL(dev_priv) ||
- IS_BROADWELL(dev_priv)) && pipe == PIPE_A)
- hsw_pipe_A_crc_wa(dev_priv, true);
-
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
*val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PF_IVB;
break;
case INTEL_PIPE_CRC_SOURCE_NONE:
@@ -422,10 +362,52 @@ static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
return 0;
}
+static int skl_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
+ enum pipe pipe,
+ enum intel_pipe_crc_source *source,
+ u32 *val)
+{
+ if (*source == INTEL_PIPE_CRC_SOURCE_AUTO)
+ *source = INTEL_PIPE_CRC_SOURCE_PIPE;
+
+ switch (*source) {
+ case INTEL_PIPE_CRC_SOURCE_PLANE1:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_1_SKL;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE2:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_2_SKL;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE3:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_3_SKL;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE4:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_4_SKL;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE5:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_5_SKL;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE6:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_6_SKL;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PLANE7:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_PLANE_7_SKL;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ *val = PIPE_CRC_ENABLE | PIPE_CRC_SOURCE_DMUX_SKL;
+ break;
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ *val = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
enum pipe pipe,
- enum intel_pipe_crc_source *source, u32 *val,
- bool set_wa)
+ enum intel_pipe_crc_source *source, u32 *val)
{
if (IS_GEN(dev_priv, 2))
return i8xx_pipe_crc_ctl_reg(source, val);
@@ -435,8 +417,10 @@ static int get_new_crc_ctl_reg(struct drm_i915_private *dev_priv,
return vlv_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
else if (IS_GEN_RANGE(dev_priv, 5, 6))
return ilk_pipe_crc_ctl_reg(source, val);
+ else if (INTEL_GEN(dev_priv) < 9)
+ return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
else
- return ivb_pipe_crc_ctl_reg(dev_priv, pipe, source, val, set_wa);
+ return skl_pipe_crc_ctl_reg(dev_priv, pipe, source, val);
}
static int
@@ -486,9 +470,6 @@ static int i9xx_crc_source_valid(struct drm_i915_private *dev_priv,
switch (source) {
case INTEL_PIPE_CRC_SOURCE_PIPE:
case INTEL_PIPE_CRC_SOURCE_TV:
- case INTEL_PIPE_CRC_SOURCE_DP_B:
- case INTEL_PIPE_CRC_SOURCE_DP_C:
- case INTEL_PIPE_CRC_SOURCE_DP_D:
case INTEL_PIPE_CRC_SOURCE_NONE:
return 0;
default:
@@ -532,7 +513,25 @@ static int ivb_crc_source_valid(struct drm_i915_private *dev_priv,
case INTEL_PIPE_CRC_SOURCE_PIPE:
case INTEL_PIPE_CRC_SOURCE_PLANE1:
case INTEL_PIPE_CRC_SOURCE_PLANE2:
- case INTEL_PIPE_CRC_SOURCE_PF:
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int skl_crc_source_valid(struct drm_i915_private *dev_priv,
+ const enum intel_pipe_crc_source source)
+{
+ switch (source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ case INTEL_PIPE_CRC_SOURCE_PLANE1:
+ case INTEL_PIPE_CRC_SOURCE_PLANE2:
+ case INTEL_PIPE_CRC_SOURCE_PLANE3:
+ case INTEL_PIPE_CRC_SOURCE_PLANE4:
+ case INTEL_PIPE_CRC_SOURCE_PLANE5:
+ case INTEL_PIPE_CRC_SOURCE_PLANE6:
+ case INTEL_PIPE_CRC_SOURCE_PLANE7:
case INTEL_PIPE_CRC_SOURCE_NONE:
return 0;
default:
@@ -552,8 +551,10 @@ intel_is_valid_crc_source(struct drm_i915_private *dev_priv,
return vlv_crc_source_valid(dev_priv, source);
else if (IS_GEN_RANGE(dev_priv, 5, 6))
return ilk_crc_source_valid(dev_priv, source);
- else
+ else if (INTEL_GEN(dev_priv) < 9)
return ivb_crc_source_valid(dev_priv, source);
+ else
+ return skl_crc_source_valid(dev_priv, source);
}
const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
@@ -592,6 +593,7 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
intel_wakeref_t wakeref;
u32 val = 0; /* shut up gcc */
int ret = 0;
+ bool enable;
if (display_crc_ctl_parse_source(source_name, &source) < 0) {
DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
@@ -605,7 +607,11 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
return -EIO;
}
- ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val, true);
+ enable = source != INTEL_PIPE_CRC_SOURCE_NONE;
+ if (enable)
+ intel_crtc_crc_setup_workarounds(to_intel_crtc(crtc), true);
+
+ ret = get_new_crc_ctl_reg(dev_priv, crtc->index, &source, &val);
if (ret != 0)
goto out;
@@ -614,18 +620,16 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
POSTING_READ(PIPE_CRC_CTL(crtc->index));
if (!source) {
- if (IS_G4X(dev_priv))
- g4x_undo_pipe_scramble_reset(dev_priv, crtc->index);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_undo_pipe_scramble_reset(dev_priv, crtc->index);
- else if ((IS_HASWELL(dev_priv) ||
- IS_BROADWELL(dev_priv)) && crtc->index == PIPE_A)
- hsw_pipe_A_crc_wa(dev_priv, false);
}
pipe_crc->skipped = 0;
out:
+ if (!enable)
+ intel_crtc_crc_setup_workarounds(to_intel_crtc(crtc), false);
+
intel_display_power_put(dev_priv, power_domain, wakeref);
return ret;
@@ -641,7 +645,7 @@ void intel_crtc_enable_pipe_crc(struct intel_crtc *intel_crtc)
if (!crtc->crc.opened)
return;
- if (get_new_crc_ctl_reg(dev_priv, crtc->index, &pipe_crc->source, &val, false) < 0)
+ if (get_new_crc_ctl_reg(dev_priv, crtc->index, &pipe_crc->source, &val) < 0)
return;
/* Don't need pipe_crc->lock here, IRQs are not generated. */
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.h b/drivers/gpu/drm/i915/intel_pipe_crc.h
new file mode 100644
index 000000000000..81eaf1854788
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_PIPE_CRC_H__
+#define __INTEL_PIPE_CRC_H__
+
+#include <linux/types.h>
+
+struct drm_crtc;
+struct intel_crtc;
+
+#ifdef CONFIG_DEBUG_FS
+int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name);
+int intel_crtc_verify_crc_source(struct drm_crtc *crtc,
+ const char *source_name, size_t *values_cnt);
+const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
+ size_t *count);
+void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc);
+void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc);
+#else
+#define intel_crtc_set_crc_source NULL
+#define intel_crtc_verify_crc_source NULL
+#define intel_crtc_get_crc_sources NULL
+static inline void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc)
+{
+}
+
+static inline void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc)
+{
+}
+#endif
+
+#endif /* __INTEL_PIPE_CRC_H__ */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 54307f1df6cf..44be676fabd6 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -35,6 +35,9 @@
#include "i915_drv.h"
#include "intel_drv.h"
+#include "intel_fbc.h"
+#include "intel_pm.h"
+#include "intel_sprite.h"
#include "../../../platform/x86/intel_ips.h"
/**
@@ -338,12 +341,12 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
mutex_lock(&dev_priv->pcu_lock);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
if (enable)
val |= DSP_MAXFIFO_PM5_ENABLE;
else
val &= ~DSP_MAXFIFO_PM5_ENABLE;
- vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, val);
mutex_unlock(&dev_priv->pcu_lock);
}
@@ -850,7 +853,7 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc)
u32 reg;
unsigned int wm;
- latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
+ latency = intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
dev_priv->is_ddr3,
dev_priv->fsb_freq,
dev_priv->mem_freq);
@@ -3624,7 +3627,12 @@ static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) < 11)
return enabled_slices;
- if (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
+ /*
+ * FIXME: for now we'll only ever use 1 slice; pretend that we have
+ * only that 1 slice enabled until we have a proper way for on-demand
+ * toggling of the second slice.
+ */
+ if (0 && I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
enabled_slices++;
return enabled_slices;
@@ -3919,12 +3927,43 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
alloc->end = ddb_size * (width_before_pipe + pipe_width) / total_width;
}
-static unsigned int skl_cursor_allocation(int num_active)
+static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
+ int width, const struct drm_format_info *format,
+ u64 modifier, unsigned int rotation,
+ u32 plane_pixel_rate, struct skl_wm_params *wp,
+ int color_plane);
+static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
+ int level,
+ const struct skl_wm_params *wp,
+ const struct skl_wm_level *result_prev,
+ struct skl_wm_level *result /* out */);
+
+static unsigned int
+skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
+ int num_active)
{
- if (num_active == 1)
- return 32;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ int level, max_level = ilk_wm_max_level(dev_priv);
+ struct skl_wm_level wm = {};
+ int ret, min_ddb_alloc = 0;
+ struct skl_wm_params wp;
+
+ ret = skl_compute_wm_params(crtc_state, 256,
+ drm_format_info(DRM_FORMAT_ARGB8888),
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_MODE_ROTATE_0,
+ crtc_state->pixel_rate, &wp, 0);
+ WARN_ON(ret);
- return 8;
+ for (level = 0; level <= max_level; level++) {
+ skl_compute_plane_wm(crtc_state, level, &wp, &wm, &wm);
+ if (wm.min_ddb_alloc == U16_MAX)
+ break;
+
+ min_ddb_alloc = wm.min_ddb_alloc;
+ }
+
+ return max(num_active == 1 ? 32 : 8, min_ddb_alloc);
}
static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv,
@@ -3970,7 +4009,7 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv,
val = I915_READ(PLANE_BUF_CFG(pipe, plane_id));
val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id));
- if (fourcc == DRM_FORMAT_NV12)
+ if (is_planar_yuv_format(fourcc))
swap(val, val2);
skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val);
@@ -4180,7 +4219,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
if (intel_plane->id == PLANE_CURSOR)
return 0;
- if (plane == 1 && format != DRM_FORMAT_NV12)
+ if (plane == 1 && !is_planar_yuv_format(format))
return 0;
/*
@@ -4192,7 +4231,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
height = drm_rect_height(&intel_pstate->base.src) >> 16;
/* UV plane does 1/2 pixel sub-sampling */
- if (plane == 1 && format == DRM_FORMAT_NV12) {
+ if (plane == 1 && is_planar_yuv_format(format)) {
width /= 2;
height /= 2;
}
@@ -4308,7 +4347,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct skl_ddb_entry *alloc = &cstate->wm.skl.ddb;
- struct skl_plane_wm *wm;
u16 alloc_size, start = 0;
u16 total[I915_MAX_PLANES] = {};
u16 uv_total[I915_MAX_PLANES] = {};
@@ -4349,7 +4387,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
return 0;
/* Allocate fixed number of blocks for cursor. */
- total[PLANE_CURSOR] = skl_cursor_allocation(num_active);
+ total[PLANE_CURSOR] = skl_cursor_allocation(cstate, num_active);
alloc_size -= total[PLANE_CURSOR];
cstate->wm.skl.plane_ddb_y[PLANE_CURSOR].start =
alloc->end - total[PLANE_CURSOR];
@@ -4365,15 +4403,23 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
blocks = 0;
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
- if (plane_id == PLANE_CURSOR)
+ const struct skl_plane_wm *wm =
+ &cstate->wm.skl.optimal.planes[plane_id];
+
+ if (plane_id == PLANE_CURSOR) {
+ if (WARN_ON(wm->wm[level].min_ddb_alloc >
+ total[PLANE_CURSOR])) {
+ blocks = U32_MAX;
+ break;
+ }
continue;
+ }
- wm = &cstate->wm.skl.optimal.planes[plane_id];
blocks += wm->wm[level].min_ddb_alloc;
blocks += wm->uv_wm[level].min_ddb_alloc;
}
- if (blocks < alloc_size) {
+ if (blocks <= alloc_size) {
alloc_size -= blocks;
break;
}
@@ -4392,6 +4438,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
* proportional to its relative data rate.
*/
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ const struct skl_plane_wm *wm =
+ &cstate->wm.skl.optimal.planes[plane_id];
u64 rate;
u16 extra;
@@ -4405,8 +4453,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
if (total_data_rate == 0)
break;
- wm = &cstate->wm.skl.optimal.planes[plane_id];
-
rate = plane_data_rate[plane_id];
extra = min_t(u16, alloc_size,
DIV64_U64_ROUND_UP(alloc_size * rate,
@@ -4431,14 +4477,14 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
/* Set the actual DDB start/end points for each plane */
start = alloc->start;
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
- struct skl_ddb_entry *plane_alloc, *uv_plane_alloc;
+ struct skl_ddb_entry *plane_alloc =
+ &cstate->wm.skl.plane_ddb_y[plane_id];
+ struct skl_ddb_entry *uv_plane_alloc =
+ &cstate->wm.skl.plane_ddb_uv[plane_id];
if (plane_id == PLANE_CURSOR)
continue;
- plane_alloc = &cstate->wm.skl.plane_ddb_y[plane_id];
- uv_plane_alloc = &cstate->wm.skl.plane_ddb_uv[plane_id];
-
/* Gen11+ uses a separate plane for UV watermarks */
WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]);
@@ -4464,8 +4510,35 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
*/
for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
- wm = &cstate->wm.skl.optimal.planes[plane_id];
- memset(&wm->wm[level], 0, sizeof(wm->wm[level]));
+ struct skl_plane_wm *wm =
+ &cstate->wm.skl.optimal.planes[plane_id];
+
+ /*
+ * We only disable the watermarks for each plane if
+ * they exceed the ddb allocation of said plane. This
+ * is done so that we don't end up touching cursor
+ * watermarks needlessly when some other plane reduces
+ * our max possible watermark level.
+ *
+ * Bspec has this to say about the PLANE_WM enable bit:
+ * "All the watermarks at this level for all enabled
+ * planes must be enabled before the level will be used."
+ * So this is actually safe to do.
+ */
+ if (wm->wm[level].min_ddb_alloc > total[plane_id] ||
+ wm->uv_wm[level].min_ddb_alloc > uv_total[plane_id])
+ memset(&wm->wm[level], 0, sizeof(wm->wm[level]));
+
+ /*
+ * Wa_1408961008:icl, ehl
+ * Underruns with WM1+ disabled
+ */
+ if (IS_GEN(dev_priv, 11) &&
+ level == 1 && wm->wm[0].plane_en) {
+ wm->wm[level].plane_res_b = wm->wm[0].plane_res_b;
+ wm->wm[level].plane_res_l = wm->wm[0].plane_res_l;
+ wm->wm[level].ignore_lines = wm->wm[0].ignore_lines;
+ }
}
}
@@ -4474,7 +4547,9 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
* don't have enough DDB blocks for it.
*/
for_each_plane_id_on_crtc(intel_crtc, plane_id) {
- wm = &cstate->wm.skl.optimal.planes[plane_id];
+ struct skl_plane_wm *wm =
+ &cstate->wm.skl.optimal.planes[plane_id];
+
if (wm->trans_wm.plane_res_b >= total[plane_id])
memset(&wm->trans_wm, 0, sizeof(wm->trans_wm));
}
@@ -4568,57 +4643,45 @@ skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
}
static int
-skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *intel_pstate,
- struct skl_wm_params *wp, int color_plane)
+skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
+ int width, const struct drm_format_info *format,
+ u64 modifier, unsigned int rotation,
+ u32 plane_pixel_rate, struct skl_wm_params *wp,
+ int color_plane)
{
- struct intel_plane *plane = to_intel_plane(intel_pstate->base.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct drm_plane_state *pstate = &intel_pstate->base;
- const struct drm_framebuffer *fb = pstate->fb;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 interm_pbpl;
- /* only NV12 format has two planes */
- if (color_plane == 1 && fb->format->format != DRM_FORMAT_NV12) {
- DRM_DEBUG_KMS("Non NV12 format have single plane\n");
+ /* only planar format has two planes */
+ if (color_plane == 1 && !is_planar_yuv_format(format->format)) {
+ DRM_DEBUG_KMS("Non planar format have single plane\n");
return -EINVAL;
}
- wp->y_tiled = fb->modifier == I915_FORMAT_MOD_Y_TILED ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED ||
- fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
- wp->x_tiled = fb->modifier == I915_FORMAT_MOD_X_TILED;
- wp->rc_surface = fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
- fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
- wp->is_planar = fb->format->format == DRM_FORMAT_NV12;
-
- if (plane->id == PLANE_CURSOR) {
- wp->width = intel_pstate->base.crtc_w;
- } else {
- /*
- * Src coordinates are already rotated by 270 degrees for
- * the 90/270 degree plane rotation cases (to match the
- * GTT mapping), hence no need to account for rotation here.
- */
- wp->width = drm_rect_width(&intel_pstate->base.src) >> 16;
- }
+ wp->y_tiled = modifier == I915_FORMAT_MOD_Y_TILED ||
+ modifier == I915_FORMAT_MOD_Yf_TILED ||
+ modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+ wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED;
+ wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
+ wp->is_planar = is_planar_yuv_format(format->format);
+ wp->width = width;
if (color_plane == 1 && wp->is_planar)
wp->width /= 2;
- wp->cpp = fb->format->cpp[color_plane];
- wp->plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate,
- intel_pstate);
+ wp->cpp = format->cpp[color_plane];
+ wp->plane_pixel_rate = plane_pixel_rate;
if (INTEL_GEN(dev_priv) >= 11 &&
- fb->modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
+ modifier == I915_FORMAT_MOD_Yf_TILED && wp->cpp == 1)
wp->dbuf_block_size = 256;
else
wp->dbuf_block_size = 512;
- if (drm_rotation_90_or_270(pstate->rotation)) {
-
+ if (drm_rotation_90_or_270(rotation)) {
switch (wp->cpp) {
case 1:
wp->y_min_scanlines = 16;
@@ -4663,12 +4726,40 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *cstate,
wp->y_tile_minimum = mul_u32_fixed16(wp->y_min_scanlines,
wp->plane_blocks_per_line);
+
wp->linetime_us = fixed16_to_u32_round_up(
- intel_get_linetime_us(cstate));
+ intel_get_linetime_us(crtc_state));
return 0;
}
+static int
+skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ struct skl_wm_params *wp, int color_plane)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ const struct drm_framebuffer *fb = plane_state->base.fb;
+ int width;
+
+ if (plane->id == PLANE_CURSOR) {
+ width = plane_state->base.crtc_w;
+ } else {
+ /*
+ * Src coordinates are already rotated by 270 degrees for
+ * the 90/270 degree plane rotation cases (to match the
+ * GTT mapping), hence no need to account for rotation here.
+ */
+ width = drm_rect_width(&plane_state->base.src) >> 16;
+ }
+
+ return skl_compute_wm_params(crtc_state, width,
+ fb->format, fb->modifier,
+ plane_state->base.rotation,
+ skl_adjusted_plane_pixel_rate(crtc_state, plane_state),
+ wp, color_plane);
+}
+
static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
{
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
@@ -4679,14 +4770,12 @@ static bool skl_wm_has_lines(struct drm_i915_private *dev_priv, int level)
}
static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *intel_pstate,
int level,
const struct skl_wm_params *wp,
const struct skl_wm_level *result_prev,
struct skl_wm_level *result /* out */)
{
- struct drm_i915_private *dev_priv =
- to_i915(intel_pstate->base.plane->dev);
+ struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
u32 latency = dev_priv->wm.skl_latency[level];
uint_fixed_16_16_t method1, method2;
uint_fixed_16_16_t selected_result;
@@ -4805,19 +4894,17 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *cstate,
static void
skl_compute_wm_levels(const struct intel_crtc_state *cstate,
- const struct intel_plane_state *intel_pstate,
const struct skl_wm_params *wm_params,
struct skl_wm_level *levels)
{
- struct drm_i915_private *dev_priv =
- to_i915(intel_pstate->base.plane->dev);
+ struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
int level, max_level = ilk_wm_max_level(dev_priv);
struct skl_wm_level *result_prev = &levels[0];
for (level = 0; level <= max_level; level++) {
struct skl_wm_level *result = &levels[level];
- skl_compute_plane_wm(cstate, intel_pstate, level, wm_params,
+ skl_compute_plane_wm(cstate, level, wm_params,
result_prev, result);
result_prev = result;
@@ -4914,7 +5001,7 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- skl_compute_wm_levels(crtc_state, plane_state, &wm_params, wm->wm);
+ skl_compute_wm_levels(crtc_state, &wm_params, wm->wm);
skl_compute_transition_wm(crtc_state, &wm_params, wm);
return 0;
@@ -4936,13 +5023,12 @@ static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- skl_compute_wm_levels(crtc_state, plane_state, &wm_params, wm->uv_wm);
+ skl_compute_wm_levels(crtc_state, &wm_params, wm->uv_wm);
return 0;
}
-static int skl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
- struct intel_crtc_state *crtc_state,
+static int skl_build_plane_wm(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
@@ -4968,8 +5054,7 @@ static int skl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
return 0;
}
-static int icl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
- struct intel_crtc_state *crtc_state,
+static int icl_build_plane_wm(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
enum plane_id plane_id = to_intel_plane(plane_state->base.plane)->id;
@@ -5006,10 +5091,10 @@ static int icl_build_plane_wm(struct skl_pipe_wm *pipe_wm,
return 0;
}
-static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
- struct skl_pipe_wm *pipe_wm)
+static int skl_build_pipe_wm(struct intel_crtc_state *cstate)
{
struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev);
+ struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
struct drm_crtc_state *crtc_state = &cstate->base;
struct drm_plane *plane;
const struct drm_plane_state *pstate;
@@ -5026,11 +5111,9 @@ static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
to_intel_plane_state(pstate);
if (INTEL_GEN(dev_priv) >= 11)
- ret = icl_build_plane_wm(pipe_wm,
- cstate, intel_pstate);
+ ret = icl_build_plane_wm(cstate, intel_pstate);
else
- ret = skl_build_plane_wm(pipe_wm,
- cstate, intel_pstate);
+ ret = skl_build_plane_wm(cstate, intel_pstate);
if (ret)
return ret;
}
@@ -5056,11 +5139,12 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv,
{
u32 val = 0;
- if (level->plane_en) {
+ if (level->plane_en)
val |= PLANE_WM_EN;
- val |= level->plane_res_b;
- val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
- }
+ if (level->ignore_lines)
+ val |= PLANE_WM_IGNORE_LINES;
+ val |= level->plane_res_b;
+ val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
I915_WRITE_FW(reg, val);
}
@@ -5126,6 +5210,7 @@ bool skl_wm_level_equals(const struct skl_wm_level *l1,
const struct skl_wm_level *l2)
{
return l1->plane_en == l2->plane_en &&
+ l1->ignore_lines == l2->ignore_lines &&
l1->plane_res_l == l2->plane_res_l &&
l1->plane_res_b == l2->plane_res_b;
}
@@ -5169,7 +5254,7 @@ static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
}
bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
- const struct skl_ddb_entry entries[],
+ const struct skl_ddb_entry *entries,
int num_entries, int ignore_idx)
{
int i;
@@ -5183,23 +5268,6 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
return false;
}
-static int skl_update_pipe_wm(struct intel_crtc_state *cstate,
- const struct skl_pipe_wm *old_pipe_wm,
- struct skl_pipe_wm *pipe_wm, /* out */
- bool *changed /* out */)
-{
- struct intel_crtc *crtc = to_intel_crtc(cstate->base.crtc);
- int ret;
-
- ret = skl_build_pipe_wm(cstate, pipe_wm);
- if (ret)
- return ret;
-
- *changed = !skl_pipe_wm_equals(crtc, old_pipe_wm, pipe_wm);
-
- return 0;
-}
-
static u32
pipes_modified(struct intel_atomic_state *state)
{
@@ -5269,6 +5337,11 @@ skl_compute_ddb(struct intel_atomic_state *state)
return 0;
}
+static char enast(bool enable)
+{
+ return enable ? '*' : ' ';
+}
+
static void
skl_print_wm_changes(struct intel_atomic_state *state)
{
@@ -5279,8 +5352,16 @@ skl_print_wm_changes(struct intel_atomic_state *state)
struct intel_crtc *crtc;
int i;
+ if ((drm_debug & DRM_UT_KMS) == 0)
+ return;
+
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
+ const struct skl_pipe_wm *old_pipe_wm, *new_pipe_wm;
+
+ old_pipe_wm = &old_crtc_state->wm.skl.optimal;
+ new_pipe_wm = &new_crtc_state->wm.skl.optimal;
+
for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
enum plane_id plane_id = plane->id;
const struct skl_ddb_entry *old, *new;
@@ -5291,10 +5372,86 @@ skl_print_wm_changes(struct intel_atomic_state *state)
if (skl_ddb_entry_equal(old, new))
continue;
- DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%d - %d) -> (%d - %d)\n",
+ DRM_DEBUG_KMS("[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n",
+ plane->base.base.id, plane->base.name,
+ old->start, old->end, new->start, new->end,
+ skl_ddb_entry_size(old), skl_ddb_entry_size(new));
+ }
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ enum plane_id plane_id = plane->id;
+ const struct skl_plane_wm *old_wm, *new_wm;
+
+ old_wm = &old_pipe_wm->planes[plane_id];
+ new_wm = &new_pipe_wm->planes[plane_id];
+
+ if (skl_plane_wm_equals(dev_priv, old_wm, new_wm))
+ continue;
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm"
+ " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm\n",
+ plane->base.base.id, plane->base.name,
+ enast(old_wm->wm[0].plane_en), enast(old_wm->wm[1].plane_en),
+ enast(old_wm->wm[2].plane_en), enast(old_wm->wm[3].plane_en),
+ enast(old_wm->wm[4].plane_en), enast(old_wm->wm[5].plane_en),
+ enast(old_wm->wm[6].plane_en), enast(old_wm->wm[7].plane_en),
+ enast(old_wm->trans_wm.plane_en),
+ enast(new_wm->wm[0].plane_en), enast(new_wm->wm[1].plane_en),
+ enast(new_wm->wm[2].plane_en), enast(new_wm->wm[3].plane_en),
+ enast(new_wm->wm[4].plane_en), enast(new_wm->wm[5].plane_en),
+ enast(new_wm->wm[6].plane_en), enast(new_wm->wm[7].plane_en),
+ enast(new_wm->trans_wm.plane_en));
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d"
+ " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d\n",
plane->base.base.id, plane->base.name,
- old->start, old->end,
- new->start, new->end);
+ enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].plane_res_l,
+ enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].plane_res_l,
+ enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].plane_res_l,
+ enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].plane_res_l,
+ enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].plane_res_l,
+ enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].plane_res_l,
+ enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].plane_res_l,
+ enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].plane_res_l,
+ enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.plane_res_l,
+
+ enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].plane_res_l,
+ enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].plane_res_l,
+ enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].plane_res_l,
+ enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].plane_res_l,
+ enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].plane_res_l,
+ enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].plane_res_l,
+ enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].plane_res_l,
+ enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].plane_res_l,
+ enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.plane_res_l);
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
+ plane->base.base.id, plane->base.name,
+ old_wm->wm[0].plane_res_b, old_wm->wm[1].plane_res_b,
+ old_wm->wm[2].plane_res_b, old_wm->wm[3].plane_res_b,
+ old_wm->wm[4].plane_res_b, old_wm->wm[5].plane_res_b,
+ old_wm->wm[6].plane_res_b, old_wm->wm[7].plane_res_b,
+ old_wm->trans_wm.plane_res_b,
+ new_wm->wm[0].plane_res_b, new_wm->wm[1].plane_res_b,
+ new_wm->wm[2].plane_res_b, new_wm->wm[3].plane_res_b,
+ new_wm->wm[4].plane_res_b, new_wm->wm[5].plane_res_b,
+ new_wm->wm[6].plane_res_b, new_wm->wm[7].plane_res_b,
+ new_wm->trans_wm.plane_res_b);
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d"
+ " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d\n",
+ plane->base.base.id, plane->base.name,
+ old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc,
+ old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc,
+ old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc,
+ old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc,
+ old_wm->trans_wm.min_ddb_alloc,
+ new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc,
+ new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc,
+ new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc,
+ new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc,
+ new_wm->trans_wm.min_ddb_alloc);
}
}
}
@@ -5449,10 +5606,9 @@ static int
skl_compute_wm(struct intel_atomic_state *state)
{
struct intel_crtc *crtc;
- struct intel_crtc_state *cstate;
+ struct intel_crtc_state *new_crtc_state;
struct intel_crtc_state *old_crtc_state;
struct skl_ddb_values *results = &state->wm_results;
- struct skl_pipe_wm *pipe_wm;
bool changed = false;
int ret, i;
@@ -5470,12 +5626,8 @@ skl_compute_wm(struct intel_atomic_state *state)
* pipe allocations had to change.
*/
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- cstate, i) {
- const struct skl_pipe_wm *old_pipe_wm =
- &old_crtc_state->wm.skl.optimal;
-
- pipe_wm = &cstate->wm.skl.optimal;
- ret = skl_update_pipe_wm(cstate, old_pipe_wm, pipe_wm, &changed);
+ new_crtc_state, i) {
+ ret = skl_build_pipe_wm(new_crtc_state);
if (ret)
return ret;
@@ -5483,7 +5635,9 @@ skl_compute_wm(struct intel_atomic_state *state)
if (ret)
return ret;
- if (changed)
+ if (!skl_pipe_wm_equals(crtc,
+ &old_crtc_state->wm.skl.optimal,
+ &new_crtc_state->wm.skl.optimal))
results->dirty_pipes |= drm_crtc_mask(&crtc->base);
}
@@ -5609,6 +5763,7 @@ static inline void skl_wm_level_from_reg_val(u32 val,
struct skl_wm_level *level)
{
level->plane_en = val & PLANE_WM_EN;
+ level->ignore_lines = val & PLANE_WM_IGNORE_LINES;
level->plane_res_b = val & PLANE_WM_BLOCKS_MASK;
level->plane_res_l = (val >> PLANE_WM_LINES_SHIFT) &
PLANE_WM_LINES_MASK;
@@ -5986,7 +6141,7 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv)
if (IS_CHERRYVIEW(dev_priv)) {
mutex_lock(&dev_priv->pcu_lock);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
if (val & DSP_MAXFIFO_PM5_ENABLE)
wm->level = VLV_WM_LEVEL_PM5;
@@ -6451,7 +6606,7 @@ static void rps_set_power(struct drm_i915_private *dev_priv, int new_power)
ei_down * threshold_down / 100));
I915_WRITE(GEN6_RP_CONTROL,
- GEN6_RP_MEDIA_TURBO |
+ (INTEL_GEN(dev_priv) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) |
GEN6_RP_MEDIA_HW_NORMAL_MODE |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
@@ -6629,9 +6784,9 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
* punit into committing the voltage change) as that takes a lot less
* power than the render powerwell.
*/
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_MEDIA);
err = valleyview_set_rps(dev_priv, val);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_MEDIA);
if (err)
DRM_ERROR("Failed to set RPS for idle\n");
@@ -6691,8 +6846,7 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->pcu_lock);
}
-void gen6_rps_boost(struct i915_request *rq,
- struct intel_rps_client *rps_client)
+void gen6_rps_boost(struct i915_request *rq)
{
struct intel_rps *rps = &rq->i915->gt_pm.rps;
unsigned long flags;
@@ -6721,7 +6875,7 @@ void gen6_rps_boost(struct i915_request *rq,
if (READ_ONCE(rps->cur_freq) < rps->boost_freq)
schedule_work(&rps->work);
- atomic_inc(rps_client ? &rps_client->boosts : &rps->boosts);
+ atomic_inc(&rps->boosts);
}
int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
@@ -6782,11 +6936,11 @@ static void valleyview_disable_rc6(struct drm_i915_private *dev_priv)
{
/* We're doing forcewake before Disabling RC6,
* This what the BIOS expects when going into suspend */
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
I915_WRITE(GEN6_RC_CONTROL, 0);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
@@ -6945,7 +7099,7 @@ static void reset_rps(struct drm_i915_private *dev_priv,
/* See the Gen9_GT_PM_Programming_Guide doc for the below */
static void gen9_enable_rps(struct drm_i915_private *dev_priv)
{
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* Program defaults and thresholds for RPS */
if (IS_GEN(dev_priv, 9))
@@ -6963,7 +7117,79 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
* RP_INTERRUPT_LIMITS & RPNSWREQ registers */
reset_rps(dev_priv, gen6_set_rps);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+}
+
+static void gen11_enable_rc6(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /* 1a: Software RC state - RC0 */
+ I915_WRITE(GEN6_RC_STATE, 0);
+
+ /*
+ * 1b: Get forcewake during program sequence. Although the driver
+ * hasn't enabled a state yet where we need forcewake, BIOS may have.
+ */
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+
+ /* 2a: Disable RC states. */
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+
+ /* 2b: Program RC6 thresholds.*/
+ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85);
+ I915_WRITE(GEN10_MEDIA_WAKE_RATE_LIMIT, 150);
+
+ I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
+ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
+ for_each_engine(engine, dev_priv, id)
+ I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
+
+ if (HAS_GUC(dev_priv))
+ I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
+
+ I915_WRITE(GEN6_RC_SLEEP, 0);
+
+ I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
+
+ /*
+ * 2c: Program Coarse Power Gating Policies.
+ *
+ * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we
+ * use instead is a more conservative estimate for the maximum time
+ * it takes us to service a CS interrupt and submit a new ELSP - that
+ * is the time which the GPU is idle waiting for the CPU to select the
+ * next request to execute. If the idle hysteresis is less than that
+ * interrupt service latency, the hardware will automatically gate
+ * the power well and we will then incur the wake up cost on top of
+ * the service latency. A similar guide from intel_pstate is that we
+ * do not want the enable hysteresis to less than the wakeup latency.
+ *
+ * igt/gem_exec_nop/sequential provides a rough estimate for the
+ * service latency, and puts it around 10us for Broadwell (and other
+ * big core) and around 40us for Broxton (and other low power cores).
+ * [Note that for legacy ringbuffer submission, this is less than 1us!]
+ * However, the wakeup latency on Broxton is closer to 100us. To be
+ * conservative, we have to factor in a context switch on top (due
+ * to ksoftirqd).
+ */
+ I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250);
+ I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 250);
+
+ /* 3a: Enable RC6 */
+ I915_WRITE(GEN6_RC_CONTROL,
+ GEN6_RC_CTL_HW_ENABLE |
+ GEN6_RC_CTL_RC6_ENABLE |
+ GEN6_RC_CTL_EI_MODE(1));
+
+ /* 3b: Enable Coarse Power Gating only when RC6 is enabled. */
+ I915_WRITE(GEN9_PG_ENABLE,
+ GEN9_RENDER_PG_ENABLE |
+ GEN9_MEDIA_PG_ENABLE |
+ GEN11_MEDIA_SAMPLER_PG_ENABLE);
+
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
@@ -6977,7 +7203,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
/* 1b: Get forcewake during program sequence. Although the driver
* hasn't enabled a state yet where we need forcewake, BIOS may have.*/
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* 2a: Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -7054,7 +7280,7 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GEN9_PG_ENABLE,
GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
@@ -7067,7 +7293,7 @@ static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
/* 1b: Get forcewake during program sequence. Although the driver
* hasn't enabled a state yet where we need forcewake, BIOS may have.*/
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* 2a: Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -7088,14 +7314,14 @@ static void gen8_enable_rc6(struct drm_i915_private *dev_priv)
GEN7_RC_CTL_TO_MODE |
GEN6_RC_CTL_RC6_ENABLE);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void gen8_enable_rps(struct drm_i915_private *dev_priv)
{
struct intel_rps *rps = &dev_priv->gt_pm.rps;
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* 1 Program defaults and thresholds for RPS*/
I915_WRITE(GEN6_RPNSWREQ,
@@ -7128,7 +7354,7 @@ static void gen8_enable_rps(struct drm_i915_private *dev_priv)
reset_rps(dev_priv, gen6_set_rps);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
@@ -7148,7 +7374,7 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GTFIFODBG, gtfifodbg);
}
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* disable the counters and set deterministic thresholds */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -7196,7 +7422,7 @@ static void gen6_enable_rc6(struct drm_i915_private *dev_priv)
DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
}
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void gen6_enable_rps(struct drm_i915_private *dev_priv)
@@ -7207,7 +7433,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
* Perhaps there might be some value in exposing these to
* userspace...
*/
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* Power down if completely idle for over 50ms */
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
@@ -7215,7 +7441,7 @@ static void gen6_enable_rps(struct drm_i915_private *dev_priv)
reset_rps(dev_priv, gen6_set_rps);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
@@ -7638,7 +7864,7 @@ static void cherryview_enable_rc6(struct drm_i915_private *dev_priv)
/* 1a & 1b: Get forcewake during program sequence. Although the driver
* hasn't enabled a state yet where we need forcewake, BIOS may have.*/
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -7670,14 +7896,14 @@ static void cherryview_enable_rc6(struct drm_i915_private *dev_priv)
rc6_mode = GEN7_RC_CTL_TO_MODE;
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
{
u32 val;
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* 1: Program defaults and thresholds for RPS*/
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
@@ -7712,7 +7938,7 @@ static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
reset_rps(dev_priv, valleyview_set_rps);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
@@ -7730,7 +7956,7 @@ static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GTFIFODBG, gtfifodbg);
}
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
/* Disable RC states. */
I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -7755,14 +7981,14 @@ static void valleyview_enable_rc6(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC_CONTROL,
GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
{
u32 val;
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
@@ -7796,7 +8022,7 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
reset_rps(dev_priv, valleyview_set_rps);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
}
static unsigned long intel_pxfreq(u32 vidfreq)
@@ -8037,14 +8263,14 @@ unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
return val;
}
-static struct drm_i915_private *i915_mch_dev;
+static struct drm_i915_private __rcu *i915_mch_dev;
static struct drm_i915_private *mchdev_get(void)
{
struct drm_i915_private *i915;
rcu_read_lock();
- i915 = i915_mch_dev;
+ i915 = rcu_dereference(i915_mch_dev);
if (!kref_get_unless_zero(&i915->drm.ref))
i915 = NULL;
rcu_read_unlock();
@@ -8343,22 +8569,6 @@ void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
pm_runtime_put(&dev_priv->drm.pdev->dev);
}
-/**
- * intel_suspend_gt_powersave - suspend PM work and helper threads
- * @dev_priv: i915 device
- *
- * We don't want to disable RC6 or other features here, we just want
- * to make sure any work we've queued has finished and won't bother
- * us while we're suspended.
- */
-void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
-{
- if (INTEL_GEN(dev_priv) < 6)
- return;
-
- /* gen6_rps_idle() will be called later to disable interrupts */
-}
-
void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv)
{
dev_priv->gt_pm.rps.enabled = true; /* force RPS disabling */
@@ -8458,6 +8668,8 @@ static void intel_enable_rc6(struct drm_i915_private *dev_priv)
cherryview_enable_rc6(dev_priv);
else if (IS_VALLEYVIEW(dev_priv))
valleyview_enable_rc6(dev_priv);
+ else if (INTEL_GEN(dev_priv) >= 11)
+ gen11_enable_rc6(dev_priv);
else if (INTEL_GEN(dev_priv) >= 9)
gen9_enable_rc6(dev_priv);
else if (IS_BROADWELL(dev_priv))
@@ -9361,7 +9573,7 @@ static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
*/
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
{
- if (IS_ICELAKE(dev_priv))
+ if (IS_GEN(dev_priv, 11))
dev_priv->display.init_clock_gating = icl_init_clock_gating;
else if (IS_CANNONLAKE(dev_priv))
dev_priv->display.init_clock_gating = cnl_init_clock_gating;
@@ -9454,7 +9666,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
dev_priv->display.initial_watermarks = g4x_initial_watermarks;
dev_priv->display.optimize_watermarks = g4x_optimize_watermarks;
} else if (IS_PINEVIEW(dev_priv)) {
- if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
+ if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
dev_priv->is_ddr3,
dev_priv->fsb_freq,
dev_priv->mem_freq)) {
@@ -9552,7 +9764,7 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
- if (__intel_wait_for_register_fw(dev_priv,
+ if (__intel_wait_for_register_fw(&dev_priv->uncore,
GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
500, 0, NULL)) {
DRM_ERROR("timeout waiting for pcode read (from mbox %x) to finish for %ps\n",
@@ -9600,7 +9812,7 @@ int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv,
I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
- if (__intel_wait_for_register_fw(dev_priv,
+ if (__intel_wait_for_register_fw(&dev_priv->uncore,
GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
fast_timeout_us, slow_timeout_ms,
NULL)) {
@@ -9824,6 +10036,7 @@ static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
const i915_reg_t reg)
{
+ struct intel_uncore *uncore = &dev_priv->uncore;
u64 time_hw, prev_hw, overflow_hw;
unsigned int fw_domains;
unsigned long flags;
@@ -9845,10 +10058,10 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
if (WARN_ON_ONCE(i >= ARRAY_SIZE(dev_priv->gt_pm.rc6.cur_residency)))
return 0;
- fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
+ fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
- spin_lock_irqsave(&dev_priv->uncore.lock, flags);
- intel_uncore_forcewake_get__locked(dev_priv, fw_domains);
+ spin_lock_irqsave(&uncore->lock, flags);
+ intel_uncore_forcewake_get__locked(uncore, fw_domains);
/* On VLV and CHV, residency time is in CZ units rather than 1.28us */
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
@@ -9867,7 +10080,7 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
}
overflow_hw = BIT_ULL(32);
- time_hw = I915_READ_FW(reg);
+ time_hw = intel_uncore_read_fw(uncore, reg);
}
/*
@@ -9889,8 +10102,8 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
time_hw += dev_priv->gt_pm.rc6.cur_residency[i];
dev_priv->gt_pm.rc6.cur_residency[i] = time_hw;
- intel_uncore_forcewake_put__locked(dev_priv, fw_domains);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
+ intel_uncore_forcewake_put__locked(uncore, fw_domains);
+ spin_unlock_irqrestore(&uncore->lock, flags);
return mul_u64_u32_div(time_hw, mul, div);
}
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
new file mode 100644
index 000000000000..674a3f0f16a7
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_PM_H__
+#define __INTEL_PM_H__
+
+#include <linux/types.h>
+
+struct drm_atomic_state;
+struct drm_device;
+struct drm_i915_private;
+struct i915_request;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_plane;
+struct skl_ddb_allocation;
+struct skl_ddb_entry;
+struct skl_pipe_wm;
+struct skl_wm_level;
+
+void intel_init_clock_gating(struct drm_i915_private *dev_priv);
+void intel_suspend_hw(struct drm_i915_private *dev_priv);
+int ilk_wm_max_level(const struct drm_i915_private *dev_priv);
+void intel_update_watermarks(struct intel_crtc *crtc);
+void intel_init_pm(struct drm_i915_private *dev_priv);
+void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
+void intel_pm_setup(struct drm_i915_private *dev_priv);
+void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
+void intel_gpu_ips_teardown(void);
+void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
+void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
+void gen6_rps_busy(struct drm_i915_private *dev_priv);
+void gen6_rps_idle(struct drm_i915_private *dev_priv);
+void gen6_rps_boost(struct i915_request *rq);
+void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
+void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
+void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
+void skl_wm_get_hw_state(struct drm_i915_private *dev_priv);
+void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
+ struct skl_ddb_entry *ddb_y,
+ struct skl_ddb_entry *ddb_uv);
+void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
+ struct skl_ddb_allocation *ddb /* out */);
+void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
+ struct skl_pipe_wm *out);
+void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
+void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
+bool intel_can_enable_sagv(struct drm_atomic_state *state);
+int intel_enable_sagv(struct drm_i915_private *dev_priv);
+int intel_disable_sagv(struct drm_i915_private *dev_priv);
+bool skl_wm_level_equals(const struct skl_wm_level *l1,
+ const struct skl_wm_level *l2);
+bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb,
+ const struct skl_ddb_entry *entries,
+ int num_entries, int ignore_idx);
+void skl_write_plane_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
+void skl_write_cursor_wm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
+bool ilk_disable_lp_wm(struct drm_device *dev);
+int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *cstate);
+void intel_init_ipc(struct drm_i915_private *dev_priv);
+void intel_enable_ipc(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_PM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 84a0fb981561..963663ba0edf 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -21,6 +21,14 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <drm/drm_atomic_helper.h>
+
+#include "i915_drv.h"
+#include "intel_dp.h"
+#include "intel_drv.h"
+#include "intel_psr.h"
+#include "intel_sprite.h"
+
/**
* DOC: Panel Self Refresh (PSR/SRD)
*
@@ -51,10 +59,6 @@
* must be correctly synchronized/cancelled when shutting down the pipe."
*/
-
-#include "intel_drv.h"
-#include "i915_drv.h"
-
static bool psr_global_enabled(u32 debug)
{
switch (debug & I915_PSR_DEBUG_MODE_MASK) {
@@ -78,9 +82,6 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
case I915_PSR_DEBUG_DISABLE:
case I915_PSR_DEBUG_FORCE_PSR1:
return false;
- case I915_PSR_DEBUG_DEFAULT:
- if (i915_modparams.enable_psr <= 0)
- return false;
default:
return crtc_state->has_psr2;
}
@@ -435,32 +436,16 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
}
-static void hsw_activate_psr1(struct intel_dp *intel_dp)
+static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 max_sleep_time = 0x1f;
- u32 val = EDP_PSR_ENABLE;
+ u32 val = 0;
- /* Let's use 6 as the minimum to cover all known cases including the
- * off-by-one issue that HW has in some cases.
- */
- int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
-
- /* sink_sync_latency of 8 means source has to wait for more than 8
- * frames, we'll go with 9 frames for now
- */
- idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
- val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
-
- val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
- if (IS_HASWELL(dev_priv))
- val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
-
- if (dev_priv->psr.link_standby)
- val |= EDP_PSR_LINK_STANDBY;
+ if (INTEL_GEN(dev_priv) >= 11)
+ val |= EDP_PSR_TP4_TIME_0US;
if (dev_priv->vbt.psr.tp1_wakeup_time_us == 0)
- val |= EDP_PSR_TP1_TIME_0us;
+ val |= EDP_PSR_TP1_TIME_0us;
else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 100)
val |= EDP_PSR_TP1_TIME_100us;
else if (dev_priv->vbt.psr.tp1_wakeup_time_us <= 500)
@@ -469,7 +454,7 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
val |= EDP_PSR_TP1_TIME_2500us;
if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us == 0)
- val |= EDP_PSR_TP2_TP3_TIME_0us;
+ val |= EDP_PSR_TP2_TP3_TIME_0us;
else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
val |= EDP_PSR_TP2_TP3_TIME_100us;
else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
@@ -483,6 +468,35 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
else
val |= EDP_PSR_TP1_TP2_SEL;
+ return val;
+}
+
+static void hsw_activate_psr1(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 max_sleep_time = 0x1f;
+ u32 val = EDP_PSR_ENABLE;
+
+ /* Let's use 6 as the minimum to cover all known cases including the
+ * off-by-one issue that HW has in some cases.
+ */
+ int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
+
+ /* sink_sync_latency of 8 means source has to wait for more than 8
+ * frames, we'll go with 9 frames for now
+ */
+ idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
+ val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
+
+ val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
+ if (IS_HASWELL(dev_priv))
+ val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
+
+ if (dev_priv->psr.link_standby)
+ val |= EDP_PSR_LINK_STANDBY;
+
+ val |= intel_psr1_get_tp_time(intel_dp);
+
if (INTEL_GEN(dev_priv) >= 8)
val |= EDP_PSR_CRC_ENABLE;
@@ -509,16 +523,22 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
val |= EDP_PSR2_FRAME_BEFORE_SU(dev_priv->psr.sink_sync_latency + 1);
- if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us >= 0 &&
- dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 50)
+ if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
+ dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 50)
val |= EDP_PSR2_TP2_TIME_50us;
- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 100)
+ else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 100)
val |= EDP_PSR2_TP2_TIME_100us;
- else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time_us <= 500)
+ else if (dev_priv->vbt.psr.psr2_tp2_tp3_wakeup_time_us <= 500)
val |= EDP_PSR2_TP2_TIME_500us;
else
val |= EDP_PSR2_TP2_TIME_2500us;
+ /*
+ * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
+ * recommending keep this bit unset while PSR2 is enabled.
+ */
+ I915_WRITE(EDP_PSR_CTL, 0);
+
I915_WRITE(EDP_PSR2_CTL, val);
}
@@ -530,11 +550,6 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
int crtc_vdisplay = crtc_state->base.adjusted_mode.crtc_vdisplay;
int psr_max_h = 0, psr_max_v = 0;
- /*
- * FIXME psr2_support is messed up. It's both computed
- * dynamically during PSR enable, and extracted from sink
- * caps during eDP detection.
- */
if (!dev_priv->psr.sink_psr2_support)
return false;
@@ -575,6 +590,11 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
+ if (crtc_state->crc_enabled) {
+ DRM_DEBUG_KMS("PSR2 not enabled because it would inhibit pipe CRC calculation\n");
+ return false;
+ }
+
return true;
}
@@ -610,9 +630,8 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
- if (IS_HASWELL(dev_priv) &&
- adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
+ if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ DRM_DEBUG_KMS("PSR condition failed: Interlaced mode enabled\n");
return;
}
@@ -718,8 +737,11 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
{
struct intel_dp *intel_dp = dev_priv->psr.dp;
- if (dev_priv->psr.enabled)
- return;
+ WARN_ON(dev_priv->psr.enabled);
+
+ dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
+ dev_priv->psr.busy_frontbuffer_bits = 0;
+ dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
DRM_DEBUG_KMS("Enabling PSR%s\n",
dev_priv->psr.psr2_enabled ? "2" : "1");
@@ -752,20 +774,13 @@ void intel_psr_enable(struct intel_dp *intel_dp,
WARN_ON(dev_priv->drrs.dp);
mutex_lock(&dev_priv->psr.lock);
- if (dev_priv->psr.prepared) {
- DRM_DEBUG_KMS("PSR already in use\n");
+
+ if (!psr_global_enabled(dev_priv->psr.debug)) {
+ DRM_DEBUG_KMS("PSR disabled by flag\n");
goto unlock;
}
- dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
- dev_priv->psr.busy_frontbuffer_bits = 0;
- dev_priv->psr.prepared = true;
- dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe;
-
- if (psr_global_enabled(dev_priv->psr.debug))
- intel_psr_enable_locked(dev_priv, crtc_state);
- else
- DRM_DEBUG_KMS("PSR disabled by flag\n");
+ intel_psr_enable_locked(dev_priv, crtc_state);
unlock:
mutex_unlock(&dev_priv->psr.lock);
@@ -819,8 +834,8 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
}
/* Wait till PSR is idle */
- if (intel_wait_for_register(dev_priv, psr_status, psr_status_mask, 0,
- 2000))
+ if (intel_wait_for_register(&dev_priv->uncore,
+ psr_status, psr_status_mask, 0, 2000))
DRM_ERROR("Timed out waiting PSR idle state\n");
/* Disable PSR on Sink */
@@ -848,18 +863,69 @@ void intel_psr_disable(struct intel_dp *intel_dp,
return;
mutex_lock(&dev_priv->psr.lock);
- if (!dev_priv->psr.prepared) {
- mutex_unlock(&dev_priv->psr.lock);
- return;
- }
intel_psr_disable_locked(intel_dp);
- dev_priv->psr.prepared = false;
mutex_unlock(&dev_priv->psr.lock);
cancel_work_sync(&dev_priv->psr.work);
}
+static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
+{
+ /*
+ * Display WA #0884: all
+ * This documented WA for bxt can be safely applied
+ * broadly so we can force HW tracking to exit PSR
+ * instead of disabling and re-enabling.
+ * Workaround tells us to write 0 to CUR_SURFLIVE_A,
+ * but it makes more sense write to the current active
+ * pipe.
+ */
+ I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
+}
+
+/**
+ * intel_psr_update - Update PSR state
+ * @intel_dp: Intel DP
+ * @crtc_state: new CRTC state
+ *
+ * This functions will update PSR states, disabling, enabling or switching PSR
+ * version when executing fastsets. For full modeset, intel_psr_disable() and
+ * intel_psr_enable() should be called instead.
+ */
+void intel_psr_update(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct i915_psr *psr = &dev_priv->psr;
+ bool enable, psr2_enable;
+
+ if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp)
+ return;
+
+ mutex_lock(&dev_priv->psr.lock);
+
+ enable = crtc_state->has_psr && psr_global_enabled(psr->debug);
+ psr2_enable = intel_psr2_enabled(dev_priv, crtc_state);
+
+ if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) {
+ /* Force a PSR exit when enabling CRC to avoid CRC timeouts */
+ if (crtc_state->crc_enabled && psr->enabled)
+ psr_force_hw_tracking_exit(dev_priv);
+
+ goto unlock;
+ }
+
+ if (psr->enabled)
+ intel_psr_disable_locked(intel_dp);
+
+ if (enable)
+ intel_psr_enable_locked(dev_priv, crtc_state);
+
+unlock:
+ mutex_unlock(&dev_priv->psr.lock);
+}
+
/**
* intel_psr_wait_for_idle - wait for PSR1 to idle
* @new_crtc_state: new CRTC state
@@ -890,7 +956,7 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
* defensive enough to cover everything.
*/
- return __intel_wait_for_register(dev_priv, EDP_PSR_STATUS,
+ return __intel_wait_for_register(&dev_priv->uncore, EDP_PSR_STATUS,
EDP_PSR_STATUS_STATE_MASK,
EDP_PSR_STATUS_STATE_IDLE, 2, 50,
out_value);
@@ -915,7 +981,7 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
mutex_unlock(&dev_priv->psr.lock);
- err = intel_wait_for_register(dev_priv, reg, mask, 0, 50);
+ err = intel_wait_for_register(&dev_priv->uncore, reg, mask, 0, 50);
if (err)
DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
@@ -924,36 +990,63 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
return err == 0 && dev_priv->psr.enabled;
}
-static bool switching_psr(struct drm_i915_private *dev_priv,
- struct intel_crtc_state *crtc_state,
- u32 mode)
+static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
{
- /* Can't switch psr state anyway if PSR2 is not supported. */
- if (!crtc_state || !crtc_state->has_psr2)
- return false;
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ struct drm_crtc *crtc;
+ int err;
- if (dev_priv->psr.psr2_enabled && mode == I915_PSR_DEBUG_FORCE_PSR1)
- return true;
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ return -ENOMEM;
- if (!dev_priv->psr.psr2_enabled && mode != I915_PSR_DEBUG_FORCE_PSR1)
- return true;
+ drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
+ state->acquire_ctx = &ctx;
+
+retry:
+ drm_for_each_crtc(crtc, dev) {
+ struct drm_crtc_state *crtc_state;
+ struct intel_crtc_state *intel_crtc_state;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state)) {
+ err = PTR_ERR(crtc_state);
+ goto error;
+ }
+
+ intel_crtc_state = to_intel_crtc_state(crtc_state);
- return false;
+ if (crtc_state->active && intel_crtc_state->has_psr) {
+ /* Mark mode as changed to trigger a pipe->update() */
+ crtc_state->mode_changed = true;
+ break;
+ }
+ }
+
+ err = drm_atomic_commit(state);
+
+error:
+ if (err == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ err = drm_modeset_backoff(&ctx);
+ if (!err)
+ goto retry;
+ }
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+ drm_atomic_state_put(state);
+
+ return err;
}
-int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv,
- struct drm_modeset_acquire_ctx *ctx,
- u64 val)
+int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
{
- struct drm_device *dev = &dev_priv->drm;
- struct drm_connector_state *conn_state;
- struct intel_crtc_state *crtc_state = NULL;
- struct drm_crtc_commit *commit;
- struct drm_crtc *crtc;
- struct intel_dp *dp;
+ const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
+ u32 old_mode;
int ret;
- bool enable;
- u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
mode > I915_PSR_DEBUG_FORCE_PSR1) {
@@ -961,49 +1054,19 @@ int intel_psr_set_debugfs_mode(struct drm_i915_private *dev_priv,
return -EINVAL;
}
- ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx);
- if (ret)
- return ret;
-
- /* dev_priv->psr.dp should be set once and then never touched again. */
- dp = READ_ONCE(dev_priv->psr.dp);
- conn_state = dp->attached_connector->base.state;
- crtc = conn_state->crtc;
- if (crtc) {
- ret = drm_modeset_lock(&crtc->mutex, ctx);
- if (ret)
- return ret;
-
- crtc_state = to_intel_crtc_state(crtc->state);
- commit = crtc_state->base.commit;
- } else {
- commit = conn_state->commit;
- }
- if (commit) {
- ret = wait_for_completion_interruptible(&commit->hw_done);
- if (ret)
- return ret;
- }
-
ret = mutex_lock_interruptible(&dev_priv->psr.lock);
if (ret)
return ret;
- enable = psr_global_enabled(val);
-
- if (!enable || switching_psr(dev_priv, crtc_state, mode))
- intel_psr_disable_locked(dev_priv->psr.dp);
-
+ old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK;
dev_priv->psr.debug = val;
- if (crtc)
- dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
-
intel_psr_irq_control(dev_priv, dev_priv->psr.debug);
- if (dev_priv->psr.prepared && enable)
- intel_psr_enable_locked(dev_priv, crtc_state);
-
mutex_unlock(&dev_priv->psr.lock);
+
+ if (old_mode != mode)
+ ret = intel_psr_fastset_force(dev_priv);
+
return ret;
}
@@ -1121,18 +1184,8 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
/* By definition flush = invalidate + flush */
- if (frontbuffer_bits) {
- /*
- * Display WA #0884: all
- * This documented WA for bxt can be safely applied
- * broadly so we can force HW tracking to exit PSR
- * instead of disabling and re-enabling.
- * Workaround tells us to write 0 to CUR_SURFLIVE_A,
- * but it makes more sense write to the current active
- * pipe.
- */
- I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
- }
+ if (frontbuffer_bits)
+ psr_force_hw_tracking_exit(dev_priv);
if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
schedule_work(&dev_priv->psr.work);
@@ -1176,7 +1229,6 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
if (val) {
DRM_DEBUG_KMS("PSR interruption error set\n");
dev_priv->psr.sink_not_reliable = true;
- return;
}
/* Set link_standby x link_off defaults */
diff --git a/drivers/gpu/drm/i915/intel_psr.h b/drivers/gpu/drm/i915/intel_psr.h
new file mode 100644
index 000000000000..dc818826f36d
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_psr.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_PSR_H__
+#define __INTEL_PSR_H__
+
+#include "intel_frontbuffer.h"
+
+struct drm_i915_private;
+struct intel_crtc_state;
+struct intel_dp;
+
+#define CAN_PSR(dev_priv) (HAS_PSR(dev_priv) && dev_priv->psr.sink_support)
+void intel_psr_init_dpcd(struct intel_dp *intel_dp);
+void intel_psr_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_psr_disable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *old_crtc_state);
+void intel_psr_update(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 value);
+void intel_psr_invalidate(struct drm_i915_private *dev_priv,
+ unsigned frontbuffer_bits,
+ enum fb_op_origin origin);
+void intel_psr_flush(struct drm_i915_private *dev_priv,
+ unsigned frontbuffer_bits,
+ enum fb_op_origin origin);
+void intel_psr_init(struct drm_i915_private *dev_priv);
+void intel_psr_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state);
+void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug);
+void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir);
+void intel_psr_short_pulse(struct intel_dp *intel_dp);
+int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state,
+ u32 *out_value);
+bool intel_psr_enabled(struct intel_dp *intel_dp);
+
+#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 7f841dba87b3..029fd8ec1857 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -43,12 +43,6 @@
*/
#define LEGACY_REQUEST_SIZE 200
-static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
-{
- return (i915_ggtt_offset(engine->status_page.vma) +
- I915_GEM_HWS_INDEX_ADDR);
-}
-
unsigned int intel_ring_update_space(struct intel_ring *ring)
{
unsigned int space;
@@ -317,9 +311,9 @@ static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = rq->fence.seqno;
*cs++ = GFX_OP_PIPE_CONTROL(4);
- *cs++ = PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
- *cs++ = intel_hws_seqno_address(rq->engine) | PIPE_CONTROL_GLOBAL_GTT;
- *cs++ = rq->global_seqno;
+ *cs++ = PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_STORE_DATA_INDEX;
+ *cs++ = I915_GEM_HWS_HANGCHECK_ADDR | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
@@ -424,10 +418,10 @@ static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = GFX_OP_PIPE_CONTROL(4);
*cs++ = (PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_CS_STALL);
- *cs++ = intel_hws_seqno_address(rq->engine);
- *cs++ = rq->global_seqno;
+ PIPE_CONTROL_STORE_DATA_INDEX |
+ PIPE_CONTROL_GLOBAL_GTT_IVB);
+ *cs++ = I915_GEM_HWS_HANGCHECK_ADDR;
+ *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
@@ -448,8 +442,8 @@ static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = rq->fence.seqno;
*cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
- *cs++ = I915_GEM_HWS_INDEX_ADDR | MI_FLUSH_DW_USE_GTT;
- *cs++ = rq->global_seqno;
+ *cs++ = I915_GEM_HWS_HANGCHECK_ADDR | MI_FLUSH_DW_USE_GTT;
+ *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
*cs++ = MI_USER_INTERRUPT;
*cs++ = MI_NOOP;
@@ -473,8 +467,8 @@ static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = rq->fence.seqno;
*cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
- *cs++ = I915_GEM_HWS_INDEX_ADDR | MI_FLUSH_DW_USE_GTT;
- *cs++ = rq->global_seqno;
+ *cs++ = I915_GEM_HWS_HANGCHECK_ADDR | MI_FLUSH_DW_USE_GTT;
+ *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
for (i = 0; i < GEN7_XCS_WA; i++) {
*cs++ = MI_STORE_DWORD_INDEX;
@@ -554,16 +548,17 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
*/
default:
GEM_BUG_ON(engine->id);
- case RCS:
+ /* fallthrough */
+ case RCS0:
hwsp = RENDER_HWS_PGA_GEN7;
break;
- case BCS:
+ case BCS0:
hwsp = BLT_HWS_PGA_GEN7;
break;
- case VCS:
+ case VCS0:
hwsp = BSD_HWS_PGA_GEN7;
break;
- case VECS:
+ case VECS0:
hwsp = VEBOX_HWS_PGA_GEN7;
break;
}
@@ -580,19 +575,19 @@ static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
static void flush_cs_tlb(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- i915_reg_t instpm = RING_INSTPM(engine->mmio_base);
if (!IS_GEN_RANGE(dev_priv, 6, 7))
return;
/* ring should be idle before issuing a sync flush*/
- WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
-
- I915_WRITE(instpm,
- _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
- INSTPM_SYNC_FLUSH));
- if (intel_wait_for_register(dev_priv,
- instpm, INSTPM_SYNC_FLUSH, 0,
+ WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
+
+ ENGINE_WRITE(engine, RING_INSTPM,
+ _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
+ INSTPM_SYNC_FLUSH));
+ if (intel_wait_for_register(engine->uncore,
+ RING_INSTPM(engine->mmio_base),
+ INSTPM_SYNC_FLUSH, 0,
1000))
DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
engine->name);
@@ -611,32 +606,36 @@ static bool stop_ring(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
if (INTEL_GEN(dev_priv) > 2) {
- I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
- if (intel_wait_for_register(dev_priv,
+ ENGINE_WRITE(engine,
+ RING_MI_MODE, _MASKED_BIT_ENABLE(STOP_RING));
+ if (intel_wait_for_register(engine->uncore,
RING_MI_MODE(engine->mmio_base),
MODE_IDLE,
MODE_IDLE,
1000)) {
DRM_ERROR("%s : timed out trying to stop ring\n",
engine->name);
- /* Sometimes we observe that the idle flag is not
+
+ /*
+ * Sometimes we observe that the idle flag is not
* set even though the ring is empty. So double
* check before giving up.
*/
- if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
+ if (ENGINE_READ(engine, RING_HEAD) !=
+ ENGINE_READ(engine, RING_TAIL))
return false;
}
}
- I915_WRITE_HEAD(engine, I915_READ_TAIL(engine));
+ ENGINE_WRITE(engine, RING_HEAD, ENGINE_READ(engine, RING_TAIL));
- I915_WRITE_HEAD(engine, 0);
- I915_WRITE_TAIL(engine, 0);
+ ENGINE_WRITE(engine, RING_HEAD, 0);
+ ENGINE_WRITE(engine, RING_TAIL, 0);
/* The ring must be empty before it is disabled */
- I915_WRITE_CTL(engine, 0);
+ ENGINE_WRITE(engine, RING_CTL, 0);
- return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
+ return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0;
}
static int init_ring_common(struct intel_engine_cs *engine)
@@ -645,26 +644,26 @@ static int init_ring_common(struct intel_engine_cs *engine)
struct intel_ring *ring = engine->buffer;
int ret = 0;
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
if (!stop_ring(engine)) {
/* G45 ring initialization often fails to reset head to zero */
DRM_DEBUG_DRIVER("%s head not reset to zero "
"ctl %08x head %08x tail %08x start %08x\n",
engine->name,
- I915_READ_CTL(engine),
- I915_READ_HEAD(engine),
- I915_READ_TAIL(engine),
- I915_READ_START(engine));
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_HEAD),
+ ENGINE_READ(engine, RING_TAIL),
+ ENGINE_READ(engine, RING_START));
if (!stop_ring(engine)) {
DRM_ERROR("failed to set %s head to zero "
"ctl %08x head %08x tail %08x start %08x\n",
engine->name,
- I915_READ_CTL(engine),
- I915_READ_HEAD(engine),
- I915_READ_TAIL(engine),
- I915_READ_START(engine));
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_HEAD),
+ ENGINE_READ(engine, RING_TAIL),
+ ENGINE_READ(engine, RING_START));
ret = -EIO;
goto out;
}
@@ -678,18 +677,18 @@ static int init_ring_common(struct intel_engine_cs *engine)
intel_engine_reset_breadcrumbs(engine);
/* Enforce ordering by reading HEAD register back */
- I915_READ_HEAD(engine);
+ ENGINE_READ(engine, RING_HEAD);
/* Initialize the ring. This must happen _after_ we've cleared the ring
* registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring
* register values. */
- I915_WRITE_START(engine, i915_ggtt_offset(ring->vma));
+ ENGINE_WRITE(engine, RING_START, i915_ggtt_offset(ring->vma));
/* WaClearRingBufHeadRegAtInit:ctg,elk */
- if (I915_READ_HEAD(engine))
+ if (ENGINE_READ(engine, RING_HEAD))
DRM_DEBUG_DRIVER("%s initialization failed [head=%08x], fudging\n",
- engine->name, I915_READ_HEAD(engine));
+ engine->name, ENGINE_READ(engine, RING_HEAD));
/* Check that the ring offsets point within the ring! */
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
@@ -697,42 +696,44 @@ static int init_ring_common(struct intel_engine_cs *engine)
intel_ring_update_space(ring);
/* First wake the ring up to an empty/idle ring */
- I915_WRITE_HEAD(engine, ring->head);
- I915_WRITE_TAIL(engine, ring->head);
- (void)I915_READ_TAIL(engine);
+ ENGINE_WRITE(engine, RING_HEAD, ring->head);
+ ENGINE_WRITE(engine, RING_TAIL, ring->head);
+ ENGINE_POSTING_READ(engine, RING_TAIL);
- I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
+ ENGINE_WRITE(engine, RING_CTL, RING_CTL_SIZE(ring->size) | RING_VALID);
/* If the head is still not zero, the ring is dead */
- if (intel_wait_for_register(dev_priv, RING_CTL(engine->mmio_base),
+ if (intel_wait_for_register(engine->uncore,
+ RING_CTL(engine->mmio_base),
RING_VALID, RING_VALID,
50)) {
DRM_ERROR("%s initialization failed "
"ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
engine->name,
- I915_READ_CTL(engine),
- I915_READ_CTL(engine) & RING_VALID,
- I915_READ_HEAD(engine), ring->head,
- I915_READ_TAIL(engine), ring->tail,
- I915_READ_START(engine),
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_CTL) & RING_VALID,
+ ENGINE_READ(engine, RING_HEAD), ring->head,
+ ENGINE_READ(engine, RING_TAIL), ring->tail,
+ ENGINE_READ(engine, RING_START),
i915_ggtt_offset(ring->vma));
ret = -EIO;
goto out;
}
if (INTEL_GEN(dev_priv) > 2)
- I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
+ ENGINE_WRITE(engine,
+ RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
/* Now awake, let it get started */
if (ring->tail != ring->head) {
- I915_WRITE_TAIL(engine, ring->tail);
- (void)I915_READ_TAIL(engine);
+ ENGINE_WRITE(engine, RING_TAIL, ring->tail);
+ ENGINE_POSTING_READ(engine, RING_TAIL);
}
/* Papering over lost _interrupts_ immediately following the restart */
intel_engine_queue_breadcrumbs(engine);
out:
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
return ret;
}
@@ -758,11 +759,6 @@ static void reset_ring(struct intel_engine_cs *engine, bool stalled)
}
}
- GEM_TRACE("%s seqno=%d, current=%d, stalled? %s\n",
- engine->name,
- rq ? rq->global_seqno : 0,
- intel_engine_get_seqno(engine),
- yesno(stalled));
/*
* The guilty request will get skipped on a hung engine.
*
@@ -878,7 +874,7 @@ static int init_render_ring(struct intel_engine_cs *engine)
I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
if (INTEL_GEN(dev_priv) >= 6)
- I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
+ ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
return 0;
}
@@ -892,18 +888,12 @@ static void cancel_requests(struct intel_engine_cs *engine)
/* Mark all submitted requests as skipped. */
list_for_each_entry(request, &engine->timeline.requests, link) {
- GEM_BUG_ON(!request->global_seqno);
-
if (!i915_request_signaled(request))
dma_fence_set_error(&request->fence, -EIO);
i915_request_mark_complete(request);
}
- intel_write_status_page(engine,
- I915_GEM_HWS_INDEX,
- intel_engine_last_submit(engine));
-
/* Remaining _unready_ requests will be nop'ed when submitted */
spin_unlock_irqrestore(&engine->timeline.lock, flags);
@@ -911,12 +901,10 @@ static void cancel_requests(struct intel_engine_cs *engine)
static void i9xx_submit_request(struct i915_request *request)
{
- struct drm_i915_private *dev_priv = request->i915;
-
i915_request_submit(request);
- I915_WRITE_TAIL(request->engine,
- intel_ring_set_tail(request->ring, request->tail));
+ ENGINE_WRITE(request->engine, RING_TAIL,
+ intel_ring_set_tail(request->ring, request->tail));
}
static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
@@ -931,8 +919,8 @@ static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = rq->fence.seqno;
*cs++ = MI_STORE_DWORD_INDEX;
- *cs++ = I915_GEM_HWS_INDEX_ADDR;
- *cs++ = rq->global_seqno;
+ *cs++ = I915_GEM_HWS_HANGCHECK_ADDR;
+ *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
*cs++ = MI_USER_INTERRUPT;
@@ -953,14 +941,14 @@ static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
*cs++ = MI_FLUSH;
*cs++ = MI_STORE_DWORD_INDEX;
- *cs++ = I915_GEM_HWS_SEQNO_ADDR;
- *cs++ = rq->fence.seqno;
+ *cs++ = I915_GEM_HWS_HANGCHECK_ADDR;
+ *cs++ = intel_engine_next_hangcheck_seqno(rq->engine);
BUILD_BUG_ON(GEN5_WA_STORES < 1);
for (i = 0; i < GEN5_WA_STORES; i++) {
*cs++ = MI_STORE_DWORD_INDEX;
- *cs++ = I915_GEM_HWS_INDEX_ADDR;
- *cs++ = rq->global_seqno;
+ *cs++ = I915_GEM_HWS_SEQNO_ADDR;
+ *cs++ = rq->fence.seqno;
}
*cs++ = MI_USER_INTERRUPT;
@@ -988,20 +976,16 @@ gen5_irq_disable(struct intel_engine_cs *engine)
static void
i9xx_irq_enable(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- dev_priv->irq_mask &= ~engine->irq_enable_mask;
- I915_WRITE(IMR, dev_priv->irq_mask);
- POSTING_READ_FW(RING_IMR(engine->mmio_base));
+ engine->i915->irq_mask &= ~engine->irq_enable_mask;
+ intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
+ intel_uncore_posting_read_fw(engine->uncore, GEN2_IMR);
}
static void
i9xx_irq_disable(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- dev_priv->irq_mask |= engine->irq_enable_mask;
- I915_WRITE(IMR, dev_priv->irq_mask);
+ engine->i915->irq_mask |= engine->irq_enable_mask;
+ intel_uncore_write(engine->uncore, GEN2_IMR, engine->i915->irq_mask);
}
static void
@@ -1010,7 +994,7 @@ i8xx_irq_enable(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
dev_priv->irq_mask &= ~engine->irq_enable_mask;
- I915_WRITE16(IMR, dev_priv->irq_mask);
+ I915_WRITE16(GEN2_IMR, dev_priv->irq_mask);
POSTING_READ16(RING_IMR(engine->mmio_base));
}
@@ -1020,7 +1004,7 @@ i8xx_irq_disable(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
dev_priv->irq_mask |= engine->irq_enable_mask;
- I915_WRITE16(IMR, dev_priv->irq_mask);
+ I915_WRITE16(GEN2_IMR, dev_priv->irq_mask);
}
static int
@@ -1041,47 +1025,38 @@ bsd_ring_flush(struct i915_request *rq, u32 mode)
static void
gen6_irq_enable(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- I915_WRITE_IMR(engine,
- ~(engine->irq_enable_mask |
- engine->irq_keep_mask));
+ ENGINE_WRITE(engine, RING_IMR,
+ ~(engine->irq_enable_mask | engine->irq_keep_mask));
/* Flush/delay to ensure the RING_IMR is active before the GT IMR */
- POSTING_READ_FW(RING_IMR(engine->mmio_base));
+ ENGINE_POSTING_READ(engine, RING_IMR);
- gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
+ gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
}
static void
gen6_irq_disable(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
- gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
+ ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask);
+ gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
}
static void
hsw_vebox_irq_enable(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
+ ENGINE_WRITE(engine, RING_IMR, ~engine->irq_enable_mask);
/* Flush/delay to ensure the RING_IMR is active before the GT IMR */
- POSTING_READ_FW(RING_IMR(engine->mmio_base));
+ ENGINE_POSTING_READ(engine, RING_IMR);
- gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
+ gen6_unmask_pm_irq(engine->i915, engine->irq_enable_mask);
}
static void
hsw_vebox_irq_disable(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
-
- I915_WRITE_IMR(engine, ~0);
- gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask);
+ ENGINE_WRITE(engine, RING_IMR, ~0);
+ gen6_mask_pm_irq(engine->i915, engine->irq_enable_mask);
}
static int
@@ -1211,15 +1186,6 @@ int intel_ring_pin(struct intel_ring *ring)
else
flags |= PIN_HIGH;
- if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
- if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
- ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
- else
- ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
- if (unlikely(ret))
- goto unpin_timeline;
- }
-
ret = i915_vma_pin(vma, 0, 0, flags);
if (unlikely(ret))
goto unpin_timeline;
@@ -1323,6 +1289,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine,
if (!ring)
return ERR_PTR(-ENOMEM);
+ kref_init(&ring->ref);
INIT_LIST_HEAD(&ring->request_list);
ring->timeline = i915_timeline_get(timeline);
@@ -1347,9 +1314,9 @@ intel_engine_create_ring(struct intel_engine_cs *engine,
return ring;
}
-void
-intel_ring_free(struct intel_ring *ring)
+void intel_ring_free(struct kref *ref)
{
+ struct intel_ring *ring = container_of(ref, typeof(*ring), ref);
struct drm_i915_gem_object *obj = ring->vma->obj;
i915_vma_close(ring->vma);
@@ -1359,17 +1326,24 @@ intel_ring_free(struct intel_ring *ring)
kfree(ring);
}
-static void intel_ring_context_destroy(struct intel_context *ce)
+static void __ring_context_fini(struct intel_context *ce)
{
- GEM_BUG_ON(ce->pin_count);
-
- if (!ce->state)
- return;
-
GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
i915_gem_object_put(ce->state->obj);
}
+static void ring_context_destroy(struct kref *ref)
+{
+ struct intel_context *ce = container_of(ref, typeof(*ce), ref);
+
+ GEM_BUG_ON(intel_context_is_pinned(ce));
+
+ if (ce->state)
+ __ring_context_fini(ce);
+
+ intel_context_free(ce);
+}
+
static int __context_pin_ppgtt(struct i915_gem_context *ctx)
{
struct i915_hw_ppgtt *ppgtt;
@@ -1400,17 +1374,6 @@ static int __context_pin(struct intel_context *ce)
if (!vma)
return 0;
- /*
- * Clear this page out of any CPU caches for coherent swap-in/out.
- * We only want to do this on the first bind so that we do not stall
- * on an active context (which by nature is already on the GPU).
- */
- if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
- err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
- if (err)
- return err;
- }
-
err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
return err;
@@ -1420,6 +1383,7 @@ static int __context_pin(struct intel_context *ce)
* it cannot reclaim the object until we release it.
*/
vma->obj->pin_global++;
+ vma->obj->mm.dirty = true;
return 0;
}
@@ -1436,12 +1400,10 @@ static void __context_unpin(struct intel_context *ce)
i915_vma_unpin(vma);
}
-static void intel_ring_context_unpin(struct intel_context *ce)
+static void ring_context_unpin(struct intel_context *ce)
{
__context_unpin_ppgtt(ce->gem_context);
__context_unpin(ce);
-
- i915_gem_context_put(ce->gem_context);
}
static struct i915_vma *
@@ -1456,6 +1418,24 @@ alloc_context_vma(struct intel_engine_cs *engine)
if (IS_ERR(obj))
return ERR_CAST(obj);
+ /*
+ * Try to make the context utilize L3 as well as LLC.
+ *
+ * On VLV we don't have L3 controls in the PTEs so we
+ * shouldn't touch the cache level, especially as that
+ * would make the object snooped which might have a
+ * negative performance impact.
+ *
+ * Snooping is required on non-llc platforms in execlist
+ * mode, but since all GGTT accesses use PAT entry 0 we
+ * get snooping anyway regardless of cache_level.
+ *
+ * This is only applicable for Ivy Bridge devices since
+ * later platforms don't have L3 control bits in the PTE.
+ */
+ if (IS_IVYBRIDGE(i915))
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC);
+
if (engine->default_state) {
void *defaults, *vaddr;
@@ -1473,29 +1453,10 @@ alloc_context_vma(struct intel_engine_cs *engine)
}
memcpy(vaddr, defaults, engine->context_size);
-
i915_gem_object_unpin_map(engine->default_state);
- i915_gem_object_unpin_map(obj);
- }
- /*
- * Try to make the context utilize L3 as well as LLC.
- *
- * On VLV we don't have L3 controls in the PTEs so we
- * shouldn't touch the cache level, especially as that
- * would make the object snooped which might have a
- * negative performance impact.
- *
- * Snooping is required on non-llc platforms in execlist
- * mode, but since all GGTT accesses use PAT entry 0 we
- * get snooping anyway regardless of cache_level.
- *
- * This is only applicable for Ivy Bridge devices since
- * later platforms don't have L3 control bits in the PTE.
- */
- if (IS_IVYBRIDGE(i915)) {
- /* Ignore any error, regard it as a simple optimisation */
- i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
}
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
@@ -1513,69 +1474,52 @@ err_obj:
return ERR_PTR(err);
}
-static struct intel_context *
-__ring_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx,
- struct intel_context *ce)
+static int ring_context_pin(struct intel_context *ce)
{
+ struct intel_engine_cs *engine = ce->engine;
int err;
+ /* One ringbuffer to rule them all */
+ GEM_BUG_ON(!engine->buffer);
+ ce->ring = engine->buffer;
+
if (!ce->state && engine->context_size) {
struct i915_vma *vma;
vma = alloc_context_vma(engine);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err;
- }
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
ce->state = vma;
}
err = __context_pin(ce);
if (err)
- goto err;
+ return err;
err = __context_pin_ppgtt(ce->gem_context);
if (err)
goto err_unpin;
- i915_gem_context_get(ctx);
-
- /* One ringbuffer to rule them all */
- GEM_BUG_ON(!engine->buffer);
- ce->ring = engine->buffer;
-
- return ce;
+ return 0;
err_unpin:
__context_unpin(ce);
-err:
- ce->pin_count = 0;
- return ERR_PTR(err);
+ return err;
}
-static const struct intel_context_ops ring_context_ops = {
- .unpin = intel_ring_context_unpin,
- .destroy = intel_ring_context_destroy,
-};
-
-static struct intel_context *
-intel_ring_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static void ring_context_reset(struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
-
- lockdep_assert_held(&ctx->i915->drm.struct_mutex);
-
- if (likely(ce->pin_count++))
- return ce;
- GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
+ intel_ring_reset(ce->ring, 0);
+}
- ce->ops = &ring_context_ops;
+static const struct intel_context_ops ring_context_ops = {
+ .pin = ring_context_pin,
+ .unpin = ring_context_unpin,
- return __ring_context_pin(engine, ctx, ce);
-}
+ .reset = ring_context_reset,
+ .destroy = ring_context_destroy,
+};
static int intel_init_ring_buffer(struct intel_engine_cs *engine)
{
@@ -1587,9 +1531,7 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
if (err)
return err;
- timeline = i915_timeline_create(engine->i915,
- engine->name,
- engine->status_page.vma);
+ timeline = i915_timeline_create(engine->i915, engine->status_page.vma);
if (IS_ERR(timeline)) {
err = PTR_ERR(timeline);
goto err;
@@ -1621,7 +1563,7 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
err_unpin:
intel_ring_unpin(ring);
err_ring:
- intel_ring_free(ring);
+ intel_ring_put(ring);
err:
intel_engine_cleanup_common(engine);
return err;
@@ -1632,10 +1574,10 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
WARN_ON(INTEL_GEN(dev_priv) > 2 &&
- (I915_READ_MODE(engine) & MODE_IDLE) == 0);
+ (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
intel_ring_unpin(engine->buffer);
- intel_ring_free(engine->buffer);
+ intel_ring_put(engine->buffer);
if (engine->cleanup)
engine->cleanup(engine);
@@ -1646,16 +1588,6 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
kfree(engine);
}
-void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- /* Restart from the beginning of the rings for convenience */
- for_each_engine(engine, dev_priv, id)
- intel_ring_reset(engine->buffer, 0);
-}
-
static int load_pd_dir(struct i915_request *rq,
const struct i915_hw_ppgtt *ppgtt)
{
@@ -1667,11 +1599,11 @@ static int load_pd_dir(struct i915_request *rq,
return PTR_ERR(cs);
*cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
+ *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base));
*cs++ = PP_DIR_DCLV_2G;
*cs++ = MI_LOAD_REGISTER_IMM(1);
- *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+ *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
*cs++ = ppgtt->pd.base.ggtt_offset << 10;
intel_ring_advance(rq, cs);
@@ -1690,7 +1622,7 @@ static int flush_pd_dir(struct i915_request *rq)
/* Stall until the page table load is complete */
*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
- *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+ *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
*cs++ = i915_scratch_offset(rq->i915);
*cs++ = MI_NOOP;
@@ -1703,8 +1635,8 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
struct drm_i915_private *i915 = rq->i915;
struct intel_engine_cs *engine = rq->engine;
enum intel_engine_id id;
- const int num_rings =
- IS_HSW_GT1(i915) ? RUNTIME_INFO(i915)->num_rings - 1 : 0;
+ const int num_engines =
+ IS_HSW_GT1(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0;
bool force_restore = false;
int len;
u32 *cs;
@@ -1718,7 +1650,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
len = 4;
if (IS_GEN(i915, 7))
- len += 2 + (num_rings ? 4*num_rings + 6 : 0);
+ len += 2 + (num_engines ? 4 * num_engines + 6 : 0);
if (flags & MI_FORCE_RESTORE) {
GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
flags &= ~MI_FORCE_RESTORE;
@@ -1733,10 +1665,10 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
if (IS_GEN(i915, 7)) {
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
- if (num_rings) {
+ if (num_engines) {
struct intel_engine_cs *signaller;
- *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
+ *cs++ = MI_LOAD_REGISTER_IMM(num_engines);
for_each_engine(signaller, i915, id) {
if (signaller == engine)
continue;
@@ -1763,8 +1695,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
* placeholder we use to flush other contexts.
*/
*cs++ = MI_SET_CONTEXT;
- *cs++ = i915_ggtt_offset(to_intel_context(i915->kernel_context,
- engine)->state) |
+ *cs++ = i915_ggtt_offset(engine->kernel_context->state) |
MI_MM_SPACE_GTT |
MI_RESTORE_INHIBIT;
}
@@ -1779,11 +1710,11 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
*cs++ = MI_NOOP;
if (IS_GEN(i915, 7)) {
- if (num_rings) {
+ if (num_engines) {
struct intel_engine_cs *signaller;
i915_reg_t last_reg = {}; /* keep gcc quiet */
- *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
+ *cs++ = MI_LOAD_REGISTER_IMM(num_engines);
for_each_engine(signaller, i915, id) {
if (signaller == engine)
continue;
@@ -1861,7 +1792,7 @@ static int switch_context(struct i915_request *rq)
* explanation.
*/
loops = 1;
- if (engine->id == BCS && IS_VALLEYVIEW(engine->i915))
+ if (engine->id == BCS0 && IS_VALLEYVIEW(engine->i915))
loops = 32;
do {
@@ -1870,15 +1801,15 @@ static int switch_context(struct i915_request *rq)
goto err;
} while (--loops);
- if (intel_engine_flag(engine) & ppgtt->pd_dirty_rings) {
- unwind_mm = intel_engine_flag(engine);
- ppgtt->pd_dirty_rings &= ~unwind_mm;
+ if (ppgtt->pd_dirty_engines & engine->mask) {
+ unwind_mm = engine->mask;
+ ppgtt->pd_dirty_engines &= ~unwind_mm;
hw_flags = MI_FORCE_RESTORE;
}
}
if (rq->hw_context->state) {
- GEM_BUG_ON(engine->id != RCS);
+ GEM_BUG_ON(engine->id != RCS0);
/*
* The kernel context(s) is treated as pure scratch and is not
@@ -1938,7 +1869,7 @@ static int switch_context(struct i915_request *rq)
err_mm:
if (unwind_mm)
- ppgtt->pd_dirty_rings |= unwind_mm;
+ ppgtt->pd_dirty_engines |= unwind_mm;
err:
return ret;
}
@@ -1947,7 +1878,7 @@ static int ring_request_alloc(struct i915_request *request)
{
int ret;
- GEM_BUG_ON(!request->hw_context->pin_count);
+ GEM_BUG_ON(!intel_context_is_pinned(request->hw_context));
GEM_BUG_ON(request->timeline->has_initial_breadcrumb);
/*
@@ -2108,23 +2039,23 @@ int intel_ring_cacheline_align(struct i915_request *rq)
static void gen6_bsd_submit_request(struct i915_request *request)
{
- struct drm_i915_private *dev_priv = request->i915;
+ struct intel_uncore *uncore = request->engine->uncore;
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
/* Every tail move must follow the sequence below */
/* Disable notification that the ring is IDLE. The GT
* will then assume that it is busy and bring it out of rc6.
*/
- I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
- _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+ intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL,
+ _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
/* Clear the context id. Here be magic! */
- I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
+ intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0);
/* Wait for the ring not to be idle, i.e. for it to wake up. */
- if (__intel_wait_for_register_fw(dev_priv,
+ if (__intel_wait_for_register_fw(uncore,
GEN6_BSD_SLEEP_PSMI_CONTROL,
GEN6_BSD_SLEEP_INDICATOR,
0,
@@ -2137,10 +2068,10 @@ static void gen6_bsd_submit_request(struct i915_request *request)
/* Let the ring send IDLE messages to the GT again,
* and so let it sleep to conserve power when idle.
*/
- I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
- _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
+ intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL,
+ _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
}
static int mi_flush_dw(struct i915_request *rq, u32 flags)
@@ -2282,7 +2213,7 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
engine->reset.reset = reset_ring;
engine->reset.finish = reset_finish;
- engine->context_pin = intel_ring_context_pin;
+ engine->cops = &ring_context_ops;
engine->request_alloc = ring_request_alloc;
/*
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 710ffb221775..72c7c337ace9 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -6,22 +6,20 @@
#include <linux/hashtable.h>
#include <linux/irq_work.h>
+#include <linux/random.h>
#include <linux/seqlock.h>
#include "i915_gem_batch_pool.h"
-
-#include "i915_reg.h"
#include "i915_pmu.h"
+#include "i915_reg.h"
#include "i915_request.h"
#include "i915_selftest.h"
#include "i915_timeline.h"
+#include "intel_engine_types.h"
#include "intel_gpu_commands.h"
#include "intel_workarounds.h"
struct drm_printer;
-struct i915_sched_attr;
-
-#define I915_CMD_HASH_ORDER 9
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
* but keeps the logic simple. Indeed, the whole purpose of this macro is just
@@ -31,28 +29,44 @@ struct i915_sched_attr;
#define CACHELINE_BYTES 64
#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32))
-struct intel_hw_status_page {
- struct i915_vma *vma;
- u32 *addr;
-};
+/*
+ * The register defines to be used with the following macros need to accept a
+ * base param, e.g:
+ *
+ * REG_FOO(base) _MMIO((base) + <relative offset>)
+ * ENGINE_READ(engine, REG_FOO);
+ *
+ * register arrays are to be defined and accessed as follows:
+ *
+ * REG_BAR(base, i) _MMIO((base) + <relative offset> + (i) * <shift>)
+ * ENGINE_READ_IDX(engine, REG_BAR, i)
+ */
+
+#define __ENGINE_REG_OP(op__, engine__, ...) \
+ intel_uncore_##op__((engine__)->uncore, __VA_ARGS__)
-#define I915_READ_TAIL(engine) I915_READ(RING_TAIL((engine)->mmio_base))
-#define I915_WRITE_TAIL(engine, val) I915_WRITE(RING_TAIL((engine)->mmio_base), val)
+#define __ENGINE_READ_OP(op__, engine__, reg__) \
+ __ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base))
-#define I915_READ_START(engine) I915_READ(RING_START((engine)->mmio_base))
-#define I915_WRITE_START(engine, val) I915_WRITE(RING_START((engine)->mmio_base), val)
+#define ENGINE_READ16(...) __ENGINE_READ_OP(read16, __VA_ARGS__)
+#define ENGINE_READ(...) __ENGINE_READ_OP(read, __VA_ARGS__)
+#define ENGINE_READ_FW(...) __ENGINE_READ_OP(read_fw, __VA_ARGS__)
+#define ENGINE_POSTING_READ(...) __ENGINE_READ_OP(posting_read, __VA_ARGS__)
-#define I915_READ_HEAD(engine) I915_READ(RING_HEAD((engine)->mmio_base))
-#define I915_WRITE_HEAD(engine, val) I915_WRITE(RING_HEAD((engine)->mmio_base), val)
+#define ENGINE_READ64(engine__, lower_reg__, upper_reg__) \
+ __ENGINE_REG_OP(read64_2x32, (engine__), \
+ lower_reg__((engine__)->mmio_base), \
+ upper_reg__((engine__)->mmio_base))
-#define I915_READ_CTL(engine) I915_READ(RING_CTL((engine)->mmio_base))
-#define I915_WRITE_CTL(engine, val) I915_WRITE(RING_CTL((engine)->mmio_base), val)
+#define ENGINE_READ_IDX(engine__, reg__, idx__) \
+ __ENGINE_REG_OP(read, (engine__), reg__((engine__)->mmio_base, (idx__)))
-#define I915_READ_IMR(engine) I915_READ(RING_IMR((engine)->mmio_base))
-#define I915_WRITE_IMR(engine, val) I915_WRITE(RING_IMR((engine)->mmio_base), val)
+#define __ENGINE_WRITE_OP(op__, engine__, reg__, val__) \
+ __ENGINE_REG_OP(op__, (engine__), reg__((engine__)->mmio_base), (val__))
-#define I915_READ_MODE(engine) I915_READ(RING_MI_MODE((engine)->mmio_base))
-#define I915_WRITE_MODE(engine, val) I915_WRITE(RING_MI_MODE((engine)->mmio_base), val)
+#define ENGINE_WRITE16(...) __ENGINE_WRITE_OP(write16, __VA_ARGS__)
+#define ENGINE_WRITE(...) __ENGINE_WRITE_OP(write, __VA_ARGS__)
+#define ENGINE_WRITE_FW(...) __ENGINE_WRITE_OP(write_fw, __VA_ARGS__)
/* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
* do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
@@ -90,506 +104,7 @@ hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
return "unknown";
}
-#define I915_MAX_SLICES 3
-#define I915_MAX_SUBSLICES 8
-
-#define instdone_slice_mask(dev_priv__) \
- (IS_GEN(dev_priv__, 7) ? \
- 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask)
-
-#define instdone_subslice_mask(dev_priv__) \
- (IS_GEN(dev_priv__, 7) ? \
- 1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0])
-
-#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \
- for ((slice__) = 0, (subslice__) = 0; \
- (slice__) < I915_MAX_SLICES; \
- (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \
- (slice__) += ((subslice__) == 0)) \
- for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \
- (BIT(subslice__) & instdone_subslice_mask(dev_priv__)))
-
-struct intel_instdone {
- u32 instdone;
- /* The following exist only in the RCS engine */
- u32 slice_common;
- u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
- u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
-};
-
-struct intel_engine_hangcheck {
- u64 acthd;
- u32 seqno;
- unsigned long action_timestamp;
- struct intel_instdone instdone;
-};
-
-struct intel_ring {
- struct i915_vma *vma;
- void *vaddr;
-
- struct i915_timeline *timeline;
- struct list_head request_list;
- struct list_head active_link;
-
- u32 head;
- u32 tail;
- u32 emit;
-
- u32 space;
- u32 size;
- u32 effective_size;
-};
-
-struct i915_gem_context;
-struct drm_i915_reg_table;
-
-/*
- * we use a single page to load ctx workarounds so all of these
- * values are referred in terms of dwords
- *
- * struct i915_wa_ctx_bb:
- * offset: specifies batch starting position, also helpful in case
- * if we want to have multiple batches at different offsets based on
- * some criteria. It is not a requirement at the moment but provides
- * an option for future use.
- * size: size of the batch in DWORDS
- */
-struct i915_ctx_workarounds {
- struct i915_wa_ctx_bb {
- u32 offset;
- u32 size;
- } indirect_ctx, per_ctx;
- struct i915_vma *vma;
-};
-
-struct i915_request;
-
-#define I915_MAX_VCS 4
-#define I915_MAX_VECS 2
-
-/*
- * Engine IDs definitions.
- * Keep instances of the same type engine together.
- */
-enum intel_engine_id {
- RCS = 0,
- BCS,
- VCS,
- VCS2,
- VCS3,
- VCS4,
-#define _VCS(n) (VCS + (n))
- VECS,
- VECS2
-#define _VECS(n) (VECS + (n))
-};
-
-struct i915_priolist {
- struct list_head requests[I915_PRIORITY_COUNT];
- struct rb_node node;
- unsigned long used;
- int priority;
-};
-
-#define priolist_for_each_request(it, plist, idx) \
- for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
- list_for_each_entry(it, &(plist)->requests[idx], sched.link)
-
-#define priolist_for_each_request_consume(it, n, plist, idx) \
- for (; (idx = ffs((plist)->used)); (plist)->used &= ~BIT(idx - 1)) \
- list_for_each_entry_safe(it, n, \
- &(plist)->requests[idx - 1], \
- sched.link)
-
-struct st_preempt_hang {
- struct completion completion;
- unsigned int count;
- bool inject_hang;
-};
-
-/**
- * struct intel_engine_execlists - execlist submission queue and port state
- *
- * The struct intel_engine_execlists represents the combined logical state of
- * driver and the hardware state for execlist mode of submission.
- */
-struct intel_engine_execlists {
- /**
- * @tasklet: softirq tasklet for bottom handler
- */
- struct tasklet_struct tasklet;
-
- /**
- * @default_priolist: priority list for I915_PRIORITY_NORMAL
- */
- struct i915_priolist default_priolist;
-
- /**
- * @no_priolist: priority lists disabled
- */
- bool no_priolist;
-
- /**
- * @submit_reg: gen-specific execlist submission register
- * set to the ExecList Submission Port (elsp) register pre-Gen11 and to
- * the ExecList Submission Queue Contents register array for Gen11+
- */
- u32 __iomem *submit_reg;
-
- /**
- * @ctrl_reg: the enhanced execlists control register, used to load the
- * submit queue on the HW and to request preemptions to idle
- */
- u32 __iomem *ctrl_reg;
-
- /**
- * @port: execlist port states
- *
- * For each hardware ELSP (ExecList Submission Port) we keep
- * track of the last request and the number of times we submitted
- * that port to hw. We then count the number of times the hw reports
- * a context completion or preemption. As only one context can
- * be active on hw, we limit resubmission of context to port[0]. This
- * is called Lite Restore, of the context.
- */
- struct execlist_port {
- /**
- * @request_count: combined request and submission count
- */
- struct i915_request *request_count;
-#define EXECLIST_COUNT_BITS 2
-#define port_request(p) ptr_mask_bits((p)->request_count, EXECLIST_COUNT_BITS)
-#define port_count(p) ptr_unmask_bits((p)->request_count, EXECLIST_COUNT_BITS)
-#define port_pack(rq, count) ptr_pack_bits(rq, count, EXECLIST_COUNT_BITS)
-#define port_unpack(p, count) ptr_unpack_bits((p)->request_count, count, EXECLIST_COUNT_BITS)
-#define port_set(p, packed) ((p)->request_count = (packed))
-#define port_isset(p) ((p)->request_count)
-#define port_index(p, execlists) ((p) - (execlists)->port)
-
- /**
- * @context_id: context ID for port
- */
- GEM_DEBUG_DECL(u32 context_id);
-
-#define EXECLIST_MAX_PORTS 2
- } port[EXECLIST_MAX_PORTS];
-
- /**
- * @active: is the HW active? We consider the HW as active after
- * submitting any context for execution and until we have seen the
- * last context completion event. After that, we do not expect any
- * more events until we submit, and so can park the HW.
- *
- * As we have a small number of different sources from which we feed
- * the HW, we track the state of each inside a single bitfield.
- */
- unsigned int active;
-#define EXECLISTS_ACTIVE_USER 0
-#define EXECLISTS_ACTIVE_PREEMPT 1
-#define EXECLISTS_ACTIVE_HWACK 2
-
- /**
- * @port_mask: number of execlist ports - 1
- */
- unsigned int port_mask;
-
- /**
- * @queue_priority_hint: Highest pending priority.
- *
- * When we add requests into the queue, or adjust the priority of
- * executing requests, we compute the maximum priority of those
- * pending requests. We can then use this value to determine if
- * we need to preempt the executing requests to service the queue.
- * However, since the we may have recorded the priority of an inflight
- * request we wanted to preempt but since completed, at the time of
- * dequeuing the priority hint may no longer may match the highest
- * available request priority.
- */
- int queue_priority_hint;
-
- /**
- * @queue: queue of requests, in priority lists
- */
- struct rb_root_cached queue;
-
- /**
- * @csb_write: control register for Context Switch buffer
- *
- * Note this register may be either mmio or HWSP shadow.
- */
- u32 *csb_write;
-
- /**
- * @csb_status: status array for Context Switch buffer
- *
- * Note these register may be either mmio or HWSP shadow.
- */
- u32 *csb_status;
-
- /**
- * @preempt_complete_status: expected CSB upon completing preemption
- */
- u32 preempt_complete_status;
-
- /**
- * @csb_head: context status buffer head
- */
- u8 csb_head;
-
- I915_SELFTEST_DECLARE(struct st_preempt_hang preempt_hang;)
-};
-
-#define INTEL_ENGINE_CS_MAX_NAME 8
-
-struct intel_engine_cs {
- struct drm_i915_private *i915;
- char name[INTEL_ENGINE_CS_MAX_NAME];
-
- enum intel_engine_id id;
- unsigned int hw_id;
- unsigned int guc_id;
-
- u8 uabi_id;
- u8 uabi_class;
-
- u8 class;
- u8 instance;
- u32 context_size;
- u32 mmio_base;
-
- struct intel_ring *buffer;
-
- struct i915_timeline timeline;
-
- struct drm_i915_gem_object *default_state;
- void *pinned_default_state;
-
- /* Rather than have every client wait upon all user interrupts,
- * with the herd waking after every interrupt and each doing the
- * heavyweight seqno dance, we delegate the task (of being the
- * bottom-half of the user interrupt) to the first client. After
- * every interrupt, we wake up one client, who does the heavyweight
- * coherent seqno read and either goes back to sleep (if incomplete),
- * or wakes up all the completed clients in parallel, before then
- * transferring the bottom-half status to the next client in the queue.
- *
- * Compared to walking the entire list of waiters in a single dedicated
- * bottom-half, we reduce the latency of the first waiter by avoiding
- * a context switch, but incur additional coherent seqno reads when
- * following the chain of request breadcrumbs. Since it is most likely
- * that we have a single client waiting on each seqno, then reducing
- * the overhead of waking that client is much preferred.
- */
- struct intel_breadcrumbs {
- spinlock_t irq_lock;
- struct list_head signalers;
-
- struct irq_work irq_work; /* for use from inside irq_lock */
-
- unsigned int irq_enabled;
-
- bool irq_armed;
- } breadcrumbs;
-
- struct {
- /**
- * @enable: Bitmask of enable sample events on this engine.
- *
- * Bits correspond to sample event types, for instance
- * I915_SAMPLE_QUEUED is bit 0 etc.
- */
- u32 enable;
- /**
- * @enable_count: Reference count for the enabled samplers.
- *
- * Index number corresponds to @enum drm_i915_pmu_engine_sample.
- */
- unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT];
- /**
- * @sample: Counter values for sampling events.
- *
- * Our internal timer stores the current counters in this field.
- *
- * Index number corresponds to @enum drm_i915_pmu_engine_sample.
- */
- struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT];
- } pmu;
-
- /*
- * A pool of objects to use as shadow copies of client batch buffers
- * when the command parser is enabled. Prevents the client from
- * modifying the batch contents after software parsing.
- */
- struct i915_gem_batch_pool batch_pool;
-
- struct intel_hw_status_page status_page;
- struct i915_ctx_workarounds wa_ctx;
- struct i915_wa_list ctx_wa_list;
- struct i915_wa_list wa_list;
- struct i915_wa_list whitelist;
-
- u32 irq_keep_mask; /* always keep these interrupts */
- u32 irq_enable_mask; /* bitmask to enable ring interrupt */
- void (*irq_enable)(struct intel_engine_cs *engine);
- void (*irq_disable)(struct intel_engine_cs *engine);
-
- int (*init_hw)(struct intel_engine_cs *engine);
-
- struct {
- void (*prepare)(struct intel_engine_cs *engine);
- void (*reset)(struct intel_engine_cs *engine, bool stalled);
- void (*finish)(struct intel_engine_cs *engine);
- } reset;
-
- void (*park)(struct intel_engine_cs *engine);
- void (*unpark)(struct intel_engine_cs *engine);
-
- void (*set_default_submission)(struct intel_engine_cs *engine);
-
- struct intel_context *(*context_pin)(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx);
-
- int (*request_alloc)(struct i915_request *rq);
- int (*init_context)(struct i915_request *rq);
-
- int (*emit_flush)(struct i915_request *request, u32 mode);
-#define EMIT_INVALIDATE BIT(0)
-#define EMIT_FLUSH BIT(1)
-#define EMIT_BARRIER (EMIT_INVALIDATE | EMIT_FLUSH)
- int (*emit_bb_start)(struct i915_request *rq,
- u64 offset, u32 length,
- unsigned int dispatch_flags);
-#define I915_DISPATCH_SECURE BIT(0)
-#define I915_DISPATCH_PINNED BIT(1)
- int (*emit_init_breadcrumb)(struct i915_request *rq);
- u32 *(*emit_fini_breadcrumb)(struct i915_request *rq,
- u32 *cs);
- unsigned int emit_fini_breadcrumb_dw;
-
- /* Pass the request to the hardware queue (e.g. directly into
- * the legacy ringbuffer or to the end of an execlist).
- *
- * This is called from an atomic context with irqs disabled; must
- * be irq safe.
- */
- void (*submit_request)(struct i915_request *rq);
-
- /*
- * Call when the priority on a request has changed and it and its
- * dependencies may need rescheduling. Note the request itself may
- * not be ready to run!
- */
- void (*schedule)(struct i915_request *request,
- const struct i915_sched_attr *attr);
-
- /*
- * Cancel all requests on the hardware, or queued for execution.
- * This should only cancel the ready requests that have been
- * submitted to the engine (via the engine->submit_request callback).
- * This is called when marking the device as wedged.
- */
- void (*cancel_requests)(struct intel_engine_cs *engine);
-
- void (*cleanup)(struct intel_engine_cs *engine);
-
- struct intel_engine_execlists execlists;
-
- /* Contexts are pinned whilst they are active on the GPU. The last
- * context executed remains active whilst the GPU is idle - the
- * switch away and write to the context object only occurs on the
- * next execution. Contexts are only unpinned on retirement of the
- * following request ensuring that we can always write to the object
- * on the context switch even after idling. Across suspend, we switch
- * to the kernel context and trash it as the save may not happen
- * before the hardware is powered down.
- */
- struct intel_context *last_retired_context;
-
- /* status_notifier: list of callbacks for context-switch changes */
- struct atomic_notifier_head context_status_notifier;
-
- struct intel_engine_hangcheck hangcheck;
-
-#define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
-#define I915_ENGINE_SUPPORTS_STATS BIT(1)
-#define I915_ENGINE_HAS_PREEMPTION BIT(2)
- unsigned int flags;
-
- /*
- * Table of commands the command parser needs to know about
- * for this engine.
- */
- DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
-
- /*
- * Table of registers allowed in commands that read/write registers.
- */
- const struct drm_i915_reg_table *reg_tables;
- int reg_table_count;
-
- /*
- * Returns the bitmask for the length field of the specified command.
- * Return 0 for an unrecognized/invalid command.
- *
- * If the command parser finds an entry for a command in the engine's
- * cmd_tables, it gets the command's length based on the table entry.
- * If not, it calls this function to determine the per-engine length
- * field encoding for the command (i.e. different opcode ranges use
- * certain bits to encode the command length in the header).
- */
- u32 (*get_cmd_length_mask)(u32 cmd_header);
-
- struct {
- /**
- * @lock: Lock protecting the below fields.
- */
- seqlock_t lock;
- /**
- * @enabled: Reference count indicating number of listeners.
- */
- unsigned int enabled;
- /**
- * @active: Number of contexts currently scheduled in.
- */
- unsigned int active;
- /**
- * @enabled_at: Timestamp when busy stats were enabled.
- */
- ktime_t enabled_at;
- /**
- * @start: Timestamp of the last idle to active transition.
- *
- * Idle is defined as active == 0, active is active > 0.
- */
- ktime_t start;
- /**
- * @total: Total time this engine was busy.
- *
- * Accumulated time not counting the most recent block in cases
- * where engine is currently busy (active > 0).
- */
- ktime_t total;
- } stats;
-};
-
-static inline bool
-intel_engine_needs_cmd_parser(const struct intel_engine_cs *engine)
-{
- return engine->flags & I915_ENGINE_NEEDS_CMD_PARSER;
-}
-
-static inline bool
-intel_engine_supports_stats(const struct intel_engine_cs *engine)
-{
- return engine->flags & I915_ENGINE_SUPPORTS_STATS;
-}
-
-static inline bool
-intel_engine_has_preemption(const struct intel_engine_cs *engine)
-{
- return engine->flags & I915_ENGINE_HAS_PREEMPTION;
-}
+void intel_engines_set_scheduler_caps(struct drm_i915_private *i915);
static inline bool __execlists_need_preempt(int prio, int last)
{
@@ -650,7 +165,7 @@ void execlists_user_end(struct intel_engine_execlists *execlists);
void
execlists_cancel_port_requests(struct intel_engine_execlists * const execlists);
-void
+struct i915_request *
execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
static inline unsigned int
@@ -674,12 +189,6 @@ execlists_port_complete(struct intel_engine_execlists * const execlists,
return port;
}
-static inline unsigned int
-intel_engine_flag(const struct intel_engine_cs *engine)
-{
- return BIT(engine->id);
-}
-
static inline u32
intel_read_status_page(const struct intel_engine_cs *engine, int reg)
{
@@ -722,10 +231,10 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
*
* The area from dword 0x30 to 0x3ff is available for driver usage.
*/
-#define I915_GEM_HWS_INDEX 0x30
-#define I915_GEM_HWS_INDEX_ADDR (I915_GEM_HWS_INDEX * sizeof(u32))
#define I915_GEM_HWS_PREEMPT 0x32
#define I915_GEM_HWS_PREEMPT_ADDR (I915_GEM_HWS_PREEMPT * sizeof(u32))
+#define I915_GEM_HWS_HANGCHECK 0x34
+#define I915_GEM_HWS_HANGCHECK_ADDR (I915_GEM_HWS_HANGCHECK * sizeof(u32))
#define I915_GEM_HWS_SEQNO 0x40
#define I915_GEM_HWS_SEQNO_ADDR (I915_GEM_HWS_SEQNO * sizeof(u32))
#define I915_GEM_HWS_SCRATCH 0x80
@@ -743,13 +252,22 @@ int intel_ring_pin(struct intel_ring *ring);
void intel_ring_reset(struct intel_ring *ring, u32 tail);
unsigned int intel_ring_update_space(struct intel_ring *ring);
void intel_ring_unpin(struct intel_ring *ring);
-void intel_ring_free(struct intel_ring *ring);
+void intel_ring_free(struct kref *ref);
+
+static inline struct intel_ring *intel_ring_get(struct intel_ring *ring)
+{
+ kref_get(&ring->ref);
+ return ring;
+}
+
+static inline void intel_ring_put(struct intel_ring *ring)
+{
+ kref_put(&ring->ref, intel_ring_free);
+}
void intel_engine_stop(struct intel_engine_cs *engine);
void intel_engine_cleanup(struct intel_engine_cs *engine);
-void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
-
int __must_check intel_ring_cacheline_align(struct i915_request *rq);
u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n);
@@ -844,8 +362,6 @@ __intel_ring_space(unsigned int head, unsigned int tail, unsigned int size)
return (head - tail - CACHELINE_BYTES) & (size - 1);
}
-void intel_engine_write_global_seqno(struct intel_engine_cs *engine, u32 seqno);
-
int intel_engine_setup_common(struct intel_engine_cs *engine);
int intel_engine_init_common(struct intel_engine_cs *engine);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
@@ -863,44 +379,6 @@ void intel_engine_set_hwsp_writemask(struct intel_engine_cs *engine, u32 mask);
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine);
u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine);
-static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
-{
- /*
- * We are only peeking at the tail of the submit queue (and not the
- * queue itself) in order to gain a hint as to the current active
- * state of the engine. Callers are not expected to be taking
- * engine->timeline->lock, nor are they expected to be concerned
- * wtih serialising this hint with anything, so document it as
- * a hint and nothing more.
- */
- return READ_ONCE(engine->timeline.seqno);
-}
-
-static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
-{
- return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
-}
-
-static inline bool intel_engine_signaled(struct intel_engine_cs *engine,
- u32 seqno)
-{
- return i915_seqno_passed(intel_engine_get_seqno(engine), seqno);
-}
-
-static inline bool intel_engine_has_completed(struct intel_engine_cs *engine,
- u32 seqno)
-{
- GEM_BUG_ON(!seqno);
- return intel_engine_signaled(engine, seqno);
-}
-
-static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
- u32 seqno)
-{
- GEM_BUG_ON(!seqno);
- return intel_engine_signaled(engine, seqno - 1);
-}
-
void intel_engine_get_instdone(struct intel_engine_cs *engine,
struct intel_instdone *instdone);
@@ -910,7 +388,7 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_pin_breadcrumbs_irq(struct intel_engine_cs *engine);
void intel_engine_unpin_breadcrumbs_irq(struct intel_engine_cs *engine);
-bool intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine);
+void intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
static inline void
@@ -919,7 +397,7 @@ intel_engine_queue_breadcrumbs(struct intel_engine_cs *engine)
irq_work_queue(&engine->breadcrumbs.irq_work);
}
-bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine);
+void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine);
void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
@@ -960,14 +438,14 @@ gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
}
static inline u32 *
-gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset)
+gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags)
{
/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
GEM_BUG_ON(gtt_offset & (1 << 5));
/* Offset should be aligned to 8 bytes for both (QW/DW) write types */
GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8));
- *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
+ *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW | flags;
*cs++ = gtt_offset | MI_FLUSH_DW_USE_GTT;
*cs++ = 0;
*cs++ = value;
@@ -983,11 +461,11 @@ static inline void intel_engine_reset(struct intel_engine_cs *engine,
}
void intel_engines_sanitize(struct drm_i915_private *i915, bool force);
+void intel_gt_resume(struct drm_i915_private *i915);
bool intel_engine_is_idle(struct intel_engine_cs *engine);
bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
-bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);
void intel_engine_lost_context(struct intel_engine_cs *engine);
void intel_engines_park(struct drm_i915_private *i915);
@@ -1066,6 +544,9 @@ void intel_disable_engine_stats(struct intel_engine_cs *engine);
ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine);
+struct i915_request *
+intel_engine_find_active_request(struct intel_engine_cs *engine);
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
@@ -1086,4 +567,17 @@ static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
#endif
+static inline u32
+intel_engine_next_hangcheck_seqno(struct intel_engine_cs *engine)
+{
+ return engine->hangcheck.next_seqno =
+ next_pseudo_random32(engine->hangcheck.next_seqno);
+}
+
+static inline u32
+intel_engine_get_hangcheck_seqno(struct intel_engine_cs *engine)
+{
+ return intel_read_status_page(engine, I915_GEM_HWS_HANGCHECK);
+}
+
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 20c4434474e3..6150e35bf7b5 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -32,6 +32,10 @@
#include <drm/drm_print.h>
#include "i915_drv.h"
+#include "intel_cdclk.h"
+#include "intel_crt.h"
+#include "intel_csr.h"
+#include "intel_dp.h"
#include "intel_drv.h"
/**
@@ -147,7 +151,7 @@ static void cancel_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
char *buf;
- buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
if (!buf)
return;
@@ -183,7 +187,7 @@ __print_intel_runtime_pm_wakeref(struct drm_printer *p,
unsigned long i;
char *buf;
- buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
if (!buf)
return;
@@ -267,7 +271,9 @@ void print_intel_runtime_pm_wakeref(struct drm_i915_private *i915,
if (dbg.count <= alloc)
break;
- s = krealloc(dbg.owners, dbg.count * sizeof(*s), GFP_KERNEL);
+ s = krealloc(dbg.owners,
+ dbg.count * sizeof(*s),
+ GFP_NOWAIT | __GFP_NOWARN);
if (!s)
goto out;
@@ -554,7 +560,7 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
int pw_idx = power_well->desc->hsw.idx;
/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
- WARN_ON(intel_wait_for_register(dev_priv,
+ WARN_ON(intel_wait_for_register(&dev_priv->uncore,
regs->driver,
HSW_PWR_WELL_CTL_STATE(pw_idx),
HSW_PWR_WELL_CTL_STATE(pw_idx),
@@ -609,7 +615,7 @@ static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
enum skl_power_gate pg)
{
/* Timeout 5us for PG#0, for other PGs 1us */
- WARN_ON(intel_wait_for_register(dev_priv, SKL_FUSE_STATUS,
+ WARN_ON(intel_wait_for_register(&dev_priv->uncore, SKL_FUSE_STATUS,
SKL_FUSE_PG_DIST_STATUS(pg),
SKL_FUSE_PG_DIST_STATUS(pg), 1));
}
@@ -1510,7 +1516,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
* The PHY may be busy with some initial calibration and whatnot,
* so the power state can take a while to actually change.
*/
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
DISPLAY_PHY_STATUS,
phy_status_mask,
phy_status,
@@ -1545,7 +1551,7 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
vlv_set_power_well(dev_priv, power_well, true);
/* Poll for phypwrgood signal */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
DISPLAY_PHY_STATUS,
PHY_POWERGOOD(phy),
PHY_POWERGOOD(phy),
@@ -1749,7 +1755,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
mutex_lock(&dev_priv->pcu_lock);
- state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
+ state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
/*
* We only ever set the power-on and power-gate states, anything
* else is unexpected.
@@ -1761,7 +1767,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
* A transient state at this point would mean some unexpected party
* is poking at the power controls too.
*/
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
WARN_ON(ctrl << 16 != state);
mutex_unlock(&dev_priv->pcu_lock);
@@ -1782,20 +1788,20 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
mutex_lock(&dev_priv->pcu_lock);
#define COND \
- ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
+ ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
if (COND)
goto out;
- ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
ctrl &= ~DP_SSC_MASK(pipe);
ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
- vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
if (wait_for(COND, 100))
DRM_ERROR("timeout setting power well state %08x (%08x)\n",
state,
- vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
+ vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
#undef COND
@@ -3431,7 +3437,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
* The enabling order will be from lower to higher indexed wells,
* the disabling order is reversed.
*/
- if (IS_ICELAKE(dev_priv)) {
+ if (IS_GEN(dev_priv, 11)) {
err = set_power_wells(power_domains, icl_power_wells);
} else if (IS_CANNONLAKE(dev_priv)) {
err = set_power_wells(power_domains, cnl_power_wells);
@@ -3565,7 +3571,11 @@ static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
!(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
DRM_ERROR("DBuf power enable timeout\n");
else
- dev_priv->wm.skl_hw.ddb.enabled_slices = 2;
+ /*
+ * FIXME: for now pretend that we only have 1 slice, see
+ * intel_enabled_dbuf_slices_num().
+ */
+ dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
}
static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
@@ -3580,7 +3590,11 @@ static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
DRM_ERROR("DBuf power disable timeout!\n");
else
- dev_priv->wm.skl_hw.ddb.enabled_slices = 0;
+ /*
+ * FIXME: for now pretend that the first slice is always
+ * enabled, see intel_enabled_dbuf_slices_num().
+ */
+ dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
}
static void icl_mbus_init(struct drm_i915_private *dev_priv)
@@ -3641,7 +3655,7 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
mutex_unlock(&power_domains->lock);
- skl_init_cdclk(dev_priv);
+ intel_cdclk_init(dev_priv);
gen9_dbuf_enable(dev_priv);
@@ -3658,7 +3672,7 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
gen9_dbuf_disable(dev_priv);
- skl_uninit_cdclk(dev_priv);
+ intel_cdclk_uninit(dev_priv);
/* The spec doesn't call for removing the reset handshake flag */
/* disable PG1 and Misc I/O */
@@ -3703,7 +3717,7 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv,
mutex_unlock(&power_domains->lock);
- bxt_init_cdclk(dev_priv);
+ intel_cdclk_init(dev_priv);
gen9_dbuf_enable(dev_priv);
@@ -3720,7 +3734,7 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
gen9_dbuf_disable(dev_priv);
- bxt_uninit_cdclk(dev_priv);
+ intel_cdclk_uninit(dev_priv);
/* The spec doesn't call for removing the reset handshake flag */
@@ -3762,7 +3776,7 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
mutex_unlock(&power_domains->lock);
/* 5. Enable CD clock */
- cnl_init_cdclk(dev_priv);
+ intel_cdclk_init(dev_priv);
/* 6. Enable DBUF */
gen9_dbuf_enable(dev_priv);
@@ -3784,7 +3798,7 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
gen9_dbuf_disable(dev_priv);
/* 3. Disable CD clock */
- cnl_uninit_cdclk(dev_priv);
+ intel_cdclk_uninit(dev_priv);
/*
* 4. Disable Power Well 1 (PG1).
@@ -3826,7 +3840,7 @@ void icl_display_core_init(struct drm_i915_private *dev_priv,
mutex_unlock(&power_domains->lock);
/* 5. Enable CDCLK. */
- icl_init_cdclk(dev_priv);
+ intel_cdclk_init(dev_priv);
/* 6. Enable DBUF. */
icl_dbuf_enable(dev_priv);
@@ -3851,7 +3865,7 @@ void icl_display_core_uninit(struct drm_i915_private *dev_priv)
icl_dbuf_disable(dev_priv);
/* 3. Disable CD clock */
- icl_uninit_cdclk(dev_priv);
+ intel_cdclk_uninit(dev_priv);
/*
* 4. Disable Power Well 1 (PG1).
@@ -3982,6 +3996,36 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
cmn->desc->ops->disable(dev_priv, cmn);
}
+static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
+{
+ bool ret;
+
+ mutex_lock(&dev_priv->pcu_lock);
+ ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
+ mutex_unlock(&dev_priv->pcu_lock);
+
+ return ret;
+}
+
+static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
+{
+ WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
+ "VED not power gated\n");
+}
+
+static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
+{
+ static const struct pci_device_id isp_ids[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
+ {}
+ };
+
+ WARN(!pci_dev_present(isp_ids) &&
+ !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
+ "ISP not power gated\n");
+}
+
static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
/**
@@ -4006,7 +4050,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
power_domains->initializing = true;
- if (IS_ICELAKE(i915)) {
+ if (INTEL_GEN(i915) >= 11) {
icl_display_core_init(i915, resume);
} else if (IS_CANNONLAKE(i915)) {
cnl_display_core_init(i915, resume);
@@ -4018,10 +4062,13 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
mutex_lock(&power_domains->lock);
chv_phy_control_init(i915);
mutex_unlock(&power_domains->lock);
+ assert_isp_power_gated(i915);
} else if (IS_VALLEYVIEW(i915)) {
mutex_lock(&power_domains->lock);
vlv_cmnlane_wa(i915);
mutex_unlock(&power_domains->lock);
+ assert_ved_power_gated(i915);
+ assert_isp_power_gated(i915);
} else if (IS_IVYBRIDGE(i915) || INTEL_GEN(i915) >= 7) {
intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
}
@@ -4151,7 +4198,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915,
intel_power_domains_verify_state(i915);
}
- if (IS_ICELAKE(i915))
+ if (INTEL_GEN(i915) >= 11)
icl_display_core_uninit(i915);
else if (IS_CANNONLAKE(i915))
cnl_display_core_uninit(i915);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index e7b0884ba5a5..0e3d91d9ef13 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -25,16 +25,23 @@
* Authors:
* Eric Anholt <eric@anholt.net>
*/
-#include <linux/i2c.h>
-#include <linux/slab.h>
+
#include <linux/delay.h>
#include <linux/export.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include "intel_drv.h"
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
+#include "intel_connector.h"
+#include "intel_drv.h"
+#include "intel_hdmi.h"
+#include "intel_panel.h"
+#include "intel_sdvo.h"
#include "intel_sdvo_regs.h"
#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
@@ -978,34 +985,109 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
&tx_rate, 1);
}
-static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
+static ssize_t intel_sdvo_read_infoframe(struct intel_sdvo *intel_sdvo,
+ unsigned int if_index,
+ u8 *data, unsigned int length)
{
+ u8 set_buf_index[2] = { if_index, 0 };
+ u8 hbuf_size, tx_rate, av_split;
+ int i;
+
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_HBUF_AV_SPLIT,
+ &av_split, 1))
+ return -ENXIO;
+
+ if (av_split < if_index)
+ return 0;
+
+ if (!intel_sdvo_get_value(intel_sdvo,
+ SDVO_CMD_GET_HBUF_TXRATE,
+ &tx_rate, 1))
+ return -ENXIO;
+
+ if (tx_rate == SDVO_HBUF_TX_DISABLED)
+ return 0;
+
+ if (!intel_sdvo_set_value(intel_sdvo,
+ SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2))
+ return -ENXIO;
+
+ if (!intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HBUF_INFO,
+ &hbuf_size, 1))
+ return -ENXIO;
+
+ /* Buffer size is 0 based, hooray! */
+ hbuf_size++;
+
+ DRM_DEBUG_KMS("reading sdvo hbuf: %i, hbuf_size %i, hbuf_size: %i\n",
+ if_index, length, hbuf_size);
+
+ hbuf_size = min_t(unsigned int, length, hbuf_size);
+
+ for (i = 0; i < hbuf_size; i += 8) {
+ if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HBUF_DATA, NULL, 0))
+ return -ENXIO;
+ if (!intel_sdvo_read_response(intel_sdvo, &data[i],
+ min_t(unsigned int, 8, hbuf_size - i)))
+ return -ENXIO;
+ }
+
+ return hbuf_size;
+}
+
+static bool intel_sdvo_compute_avi_infoframe(struct intel_sdvo *intel_sdvo,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi;
const struct drm_display_mode *adjusted_mode =
- &pipe_config->base.adjusted_mode;
- u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
- union hdmi_infoframe frame;
+ &crtc_state->base.adjusted_mode;
int ret;
- ssize_t len;
- ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+ if (!crtc_state->has_hdmi_sink)
+ return true;
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI);
+
+ ret = drm_hdmi_avi_infoframe_from_display_mode(frame,
conn_state->connector,
adjusted_mode);
- if (ret < 0) {
- DRM_ERROR("couldn't fill AVI infoframe\n");
+ if (ret)
return false;
- }
- drm_hdmi_avi_infoframe_quant_range(&frame.avi,
+ drm_hdmi_avi_infoframe_quant_range(frame,
conn_state->connector,
adjusted_mode,
- pipe_config->limited_color_range ?
+ crtc_state->limited_color_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL);
- len = hdmi_infoframe_pack(&frame, sdvo_data, sizeof(sdvo_data));
- if (len < 0)
+ ret = hdmi_avi_infoframe_check(frame);
+ if (WARN_ON(ret))
+ return false;
+
+ return true;
+}
+
+static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
+ const struct intel_crtc_state *crtc_state)
+{
+ u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
+ const union hdmi_infoframe *frame = &crtc_state->infoframes.avi;
+ ssize_t len;
+
+ if ((crtc_state->infoframes.enable &
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI)) == 0)
+ return true;
+
+ if (WARN_ON(frame->any.type != HDMI_INFOFRAME_TYPE_AVI))
+ return false;
+
+ len = hdmi_infoframe_pack_only(frame, sdvo_data, sizeof(sdvo_data));
+ if (WARN_ON(len < 0))
return false;
return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
@@ -1013,6 +1095,40 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
sdvo_data, sizeof(sdvo_data));
}
+static void intel_sdvo_get_avi_infoframe(struct intel_sdvo *intel_sdvo,
+ struct intel_crtc_state *crtc_state)
+{
+ u8 sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
+ union hdmi_infoframe *frame = &crtc_state->infoframes.avi;
+ ssize_t len;
+ int ret;
+
+ if (!crtc_state->has_hdmi_sink)
+ return;
+
+ len = intel_sdvo_read_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
+ sdvo_data, sizeof(sdvo_data));
+ if (len < 0) {
+ DRM_DEBUG_KMS("failed to read AVI infoframe\n");
+ return;
+ } else if (len == 0) {
+ return;
+ }
+
+ crtc_state->infoframes.enable |=
+ intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI);
+
+ ret = hdmi_infoframe_unpack(frame, sdvo_data, sizeof(sdvo_data));
+ if (ret) {
+ DRM_DEBUG_KMS("Failed to unpack AVI infoframe\n");
+ return;
+ }
+
+ if (frame->any.type != HDMI_INFOFRAME_TYPE_AVI)
+ DRM_DEBUG_KMS("Found the wrong infoframe type 0x%x (expected 0x%02x)\n",
+ frame->any.type, HDMI_INFOFRAME_TYPE_AVI);
+}
+
static bool intel_sdvo_set_tv_format(struct intel_sdvo *intel_sdvo,
const struct drm_connector_state *conn_state)
{
@@ -1193,6 +1309,12 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
if (intel_sdvo_connector->is_hdmi)
adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio;
+ if (!intel_sdvo_compute_avi_infoframe(intel_sdvo,
+ pipe_config, conn_state)) {
+ DRM_DEBUG_KMS("bad AVI infoframe\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -1315,8 +1437,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
- intel_sdvo_set_avi_infoframe(intel_sdvo,
- crtc_state, conn_state);
+ intel_sdvo_set_avi_infoframe(intel_sdvo, crtc_state);
} else
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
@@ -1507,6 +1628,10 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
}
}
+ WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier,
+ "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
+ pipe_config->pixel_multiplier, encoder_pixel_multiplier);
+
if (sdvox & HDMI_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
@@ -1519,9 +1644,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
pipe_config->has_hdmi_sink = true;
}
- WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier,
- "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
- pipe_config->pixel_multiplier, encoder_pixel_multiplier);
+ intel_sdvo_get_avi_infoframe(intel_sdvo, pipe_config);
}
static void intel_disable_sdvo(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_sdvo.h b/drivers/gpu/drm/i915/intel_sdvo.h
new file mode 100644
index 000000000000..c9e05bcdd141
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_sdvo.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_SDVO_H__
+#define __INTEL_SDVO_H__
+
+#include <linux/types.h>
+
+#include <drm/i915_drm.h>
+
+#include "i915_reg.h"
+
+struct drm_i915_private;
+enum pipe;
+
+bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
+ i915_reg_t sdvo_reg, enum pipe *pipe);
+bool intel_sdvo_init(struct drm_i915_private *dev_priv,
+ i915_reg_t reg, enum port port);
+
+#endif /* __INTEL_SDVO_H__ */
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 75c872bb8cc9..57de41b1f989 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -51,7 +51,7 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
5)) {
DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
@@ -63,7 +63,7 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
I915_WRITE(VLV_IOSF_DATA, is_read ? 0 : *val);
I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
5)) {
DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n",
@@ -208,7 +208,7 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
u32 value = 0;
WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
SBI_CTL_STAT, SBI_BUSY, 0,
100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
@@ -224,7 +224,7 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
SBI_CTL_STAT,
SBI_BUSY,
0,
@@ -248,7 +248,7 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
SBI_CTL_STAT, SBI_BUSY, 0,
100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
@@ -264,7 +264,7 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
SBI_CTL_STAT,
SBI_BUSY,
0,
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index b56a1a9ad01d..2913e89280d7 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -29,17 +29,36 @@
* registers; newer ones are much simpler and we can use the new DRM plane
* support.
*/
+
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_color_mgmt.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
-#include <drm/drm_rect.h>
-#include <drm/drm_atomic.h>
#include <drm/drm_plane_helper.h>
-#include "intel_drv.h"
-#include "intel_frontbuffer.h"
+#include <drm/drm_rect.h>
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
-#include <drm/drm_color_mgmt.h>
+#include "intel_atomic_plane.h"
+#include "intel_drv.h"
+#include "intel_frontbuffer.h"
+#include "intel_pm.h"
+#include "intel_psr.h"
+#include "intel_sprite.h"
+
+bool is_planar_yuv_format(u32 pixelformat)
+{
+ switch (pixelformat) {
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ return true;
+ default:
+ return false;
+ }
+}
int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
int usecs)
@@ -256,7 +275,8 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->base.fb;
struct drm_rect *src = &plane_state->base.src;
- u32 src_x, src_y, src_w, src_h;
+ u32 src_x, src_y, src_w, src_h, hsub, vsub;
+ bool rotated = drm_rotation_90_or_270(plane_state->base.rotation);
/*
* Hardware doesn't handle subpixel coordinates.
@@ -274,18 +294,26 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
src->y1 = src_y << 16;
src->y2 = (src_y + src_h) << 16;
- if (fb->format->is_yuv &&
- (src_x & 1 || src_w & 1)) {
- DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of 2 for YUV planes\n",
- src_x, src_w);
+ if (!fb->format->is_yuv)
+ return 0;
+
+ /* YUV specific checks */
+ if (!rotated) {
+ hsub = fb->format->hsub;
+ vsub = fb->format->vsub;
+ } else {
+ hsub = vsub = max(fb->format->hsub, fb->format->vsub);
+ }
+
+ if (src_x % hsub || src_w % hsub) {
+ DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of %u for %sYUV planes\n",
+ src_x, src_w, hsub, rotated ? "rotated " : "");
return -EINVAL;
}
- if (fb->format->is_yuv &&
- fb->format->num_planes > 1 &&
- (src_y & 1 || src_h & 1)) {
- DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of 2 for planar YUV planes\n",
- src_y, src_h);
+ if (src_y % vsub || src_h % vsub) {
+ DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of %u for %sYUV planes\n",
+ src_y, src_h, vsub, rotated ? "rotated " : "");
return -EINVAL;
}
@@ -335,8 +363,8 @@ skl_program_scaler(struct intel_plane *plane,
0, INT_MAX);
/* TODO: handle sub-pixel coordinates */
- if (plane_state->base.fb->format->format == DRM_FORMAT_NV12 &&
- !icl_is_hdr_plane(plane)) {
+ if (is_planar_yuv_format(plane_state->base.fb->format->format) &&
+ !icl_is_hdr_plane(dev_priv, plane->id)) {
y_hphase = skl_scaler_calc_phase(1, hscale, false);
y_vphase = skl_scaler_calc_phase(1, vscale, false);
@@ -518,7 +546,7 @@ skl_program_plane(struct intel_plane *plane,
I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id),
(plane_state->color_plane[1].offset - surf_addr) | aux_stride);
- if (icl_is_hdr_plane(plane)) {
+ if (icl_is_hdr_plane(dev_priv, plane_id)) {
u32 cus_ctl = 0;
if (linked) {
@@ -542,7 +570,7 @@ skl_program_plane(struct intel_plane *plane,
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl);
- if (fb->format->is_yuv && icl_is_hdr_plane(plane))
+ if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id))
icl_program_input_csc(plane, crtc_state, plane_state);
skl_write_plane_wm(plane, crtc_state);
@@ -609,6 +637,9 @@ skl_disable_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ if (icl_is_hdr_plane(dev_priv, plane_id))
+ I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), 0);
+
skl_write_plane_wm(plane, crtc_state);
I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
@@ -741,7 +772,12 @@ vlv_update_clrc(const struct intel_plane_state *plane_state)
static u32 vlv_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
- return SP_GAMMA_ENABLE;
+ u32 sprctl = 0;
+
+ if (crtc_state->gamma_enable)
+ sprctl |= SP_GAMMA_ENABLE;
+
+ return sprctl;
}
static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
@@ -916,12 +952,12 @@ vlv_plane_get_hw_state(struct intel_plane *plane,
static u32 ivb_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
u32 sprctl = 0;
- sprctl |= SPRITE_GAMMA_ENABLE;
+ if (crtc_state->gamma_enable)
+ sprctl |= SPRITE_GAMMA_ENABLE;
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ if (crtc_state->csc_enable)
sprctl |= SPRITE_PIPE_CSC_ENABLE;
return sprctl;
@@ -1107,7 +1143,15 @@ g4x_sprite_max_stride(struct intel_plane *plane,
static u32 g4x_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state)
{
- return DVS_GAMMA_ENABLE;
+ u32 dvscntr = 0;
+
+ if (crtc_state->gamma_enable)
+ dvscntr |= DVS_GAMMA_ENABLE;
+
+ if (crtc_state->csc_enable)
+ dvscntr |= DVS_PIPE_CSC_ENABLE;
+
+ return dvscntr;
}
static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state,
@@ -1482,8 +1526,6 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
/*
* 90/270 is not allowed with RGB64 16:16:16:16 and
* Indexed 8-bit. RGB 16-bit 5:6:5 is allowed gen11 onwards.
- * TBD: Add RGB64 case once its added in supported format
- * list.
*/
switch (fb->format->format) {
case DRM_FORMAT_RGB565:
@@ -1491,6 +1533,15 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
break;
/* fall through */
case DRM_FORMAT_C8:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_Y210:
+ case DRM_FORMAT_Y212:
+ case DRM_FORMAT_Y216:
+ case DRM_FORMAT_XVYU12_16161616:
+ case DRM_FORMAT_XVYU16161616:
DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
drm_get_format_name(fb->format->format,
&format_name));
@@ -1551,10 +1602,10 @@ static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_s
int src_w = drm_rect_width(&plane_state->base.src) >> 16;
/* Display WA #1106 */
- if (fb->format->format == DRM_FORMAT_NV12 && src_w & 3 &&
+ if (is_planar_yuv_format(fb->format->format) && src_w & 3 &&
(rotation == DRM_MODE_ROTATE_270 ||
rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) {
- DRM_DEBUG_KMS("src width must be multiple of 4 for rotated NV12\n");
+ DRM_DEBUG_KMS("src width must be multiple of 4 for rotated planar YUV\n");
return -EINVAL;
}
@@ -1790,6 +1841,52 @@ static const u32 skl_plane_formats[] = {
DRM_FORMAT_VYUY,
};
+static const u32 icl_plane_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_Y210,
+ DRM_FORMAT_Y212,
+ DRM_FORMAT_Y216,
+ DRM_FORMAT_XVYU2101010,
+ DRM_FORMAT_XVYU12_16161616,
+ DRM_FORMAT_XVYU16161616,
+};
+
+static const u32 icl_hdr_plane_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
+ DRM_FORMAT_ARGB16161616F,
+ DRM_FORMAT_ABGR16161616F,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_Y210,
+ DRM_FORMAT_Y212,
+ DRM_FORMAT_Y216,
+ DRM_FORMAT_XVYU2101010,
+ DRM_FORMAT_XVYU12_16161616,
+ DRM_FORMAT_XVYU16161616,
+};
+
static const u32 skl_planar_formats[] = {
DRM_FORMAT_C8,
DRM_FORMAT_RGB565,
@@ -1806,6 +1903,79 @@ static const u32 skl_planar_formats[] = {
DRM_FORMAT_NV12,
};
+static const u32 glk_planar_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_P010,
+ DRM_FORMAT_P012,
+ DRM_FORMAT_P016,
+};
+
+static const u32 icl_planar_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_P010,
+ DRM_FORMAT_P012,
+ DRM_FORMAT_P016,
+ DRM_FORMAT_Y210,
+ DRM_FORMAT_Y212,
+ DRM_FORMAT_Y216,
+ DRM_FORMAT_XVYU2101010,
+ DRM_FORMAT_XVYU12_16161616,
+ DRM_FORMAT_XVYU16161616,
+};
+
+static const u32 icl_hdr_planar_formats[] = {
+ DRM_FORMAT_C8,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB2101010,
+ DRM_FORMAT_XBGR2101010,
+ DRM_FORMAT_XRGB16161616F,
+ DRM_FORMAT_XBGR16161616F,
+ DRM_FORMAT_ARGB16161616F,
+ DRM_FORMAT_ABGR16161616F,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_P010,
+ DRM_FORMAT_P012,
+ DRM_FORMAT_P016,
+ DRM_FORMAT_Y210,
+ DRM_FORMAT_Y212,
+ DRM_FORMAT_Y216,
+ DRM_FORMAT_XVYU2101010,
+ DRM_FORMAT_XVYU12_16161616,
+ DRM_FORMAT_XVYU16161616,
+};
+
static const u64 skl_plane_format_modifiers_noccs[] = {
I915_FORMAT_MOD_Yf_TILED,
I915_FORMAT_MOD_Y_TILED,
@@ -1945,10 +2115,23 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane,
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_NV12:
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ case DRM_FORMAT_XVYU2101010:
if (modifier == I915_FORMAT_MOD_Yf_TILED)
return true;
/* fall through */
case DRM_FORMAT_C8:
+ case DRM_FORMAT_XBGR16161616F:
+ case DRM_FORMAT_ABGR16161616F:
+ case DRM_FORMAT_XRGB16161616F:
+ case DRM_FORMAT_ARGB16161616F:
+ case DRM_FORMAT_Y210:
+ case DRM_FORMAT_Y212:
+ case DRM_FORMAT_Y216:
+ case DRM_FORMAT_XVYU12_16161616:
+ case DRM_FORMAT_XVYU16161616:
if (modifier == DRM_FORMAT_MOD_LINEAR ||
modifier == I915_FORMAT_MOD_X_TILED ||
modifier == I915_FORMAT_MOD_Y_TILED)
@@ -2085,8 +2268,25 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
plane->update_slave = icl_update_slave;
if (skl_plane_has_planar(dev_priv, pipe, plane_id)) {
- formats = skl_planar_formats;
- num_formats = ARRAY_SIZE(skl_planar_formats);
+ if (icl_is_hdr_plane(dev_priv, plane_id)) {
+ formats = icl_hdr_planar_formats;
+ num_formats = ARRAY_SIZE(icl_hdr_planar_formats);
+ } else if (INTEL_GEN(dev_priv) >= 11) {
+ formats = icl_planar_formats;
+ num_formats = ARRAY_SIZE(icl_planar_formats);
+ } else if (INTEL_GEN(dev_priv) == 10 || IS_GEMINILAKE(dev_priv)) {
+ formats = glk_planar_formats;
+ num_formats = ARRAY_SIZE(glk_planar_formats);
+ } else {
+ formats = skl_planar_formats;
+ num_formats = ARRAY_SIZE(skl_planar_formats);
+ }
+ } else if (icl_is_hdr_plane(dev_priv, plane_id)) {
+ formats = icl_hdr_plane_formats;
+ num_formats = ARRAY_SIZE(icl_hdr_plane_formats);
+ } else if (INTEL_GEN(dev_priv) >= 11) {
+ formats = icl_plane_formats;
+ num_formats = ARRAY_SIZE(icl_plane_formats);
} else {
formats = skl_plane_formats;
num_formats = ARRAY_SIZE(skl_plane_formats);
diff --git a/drivers/gpu/drm/i915/intel_sprite.h b/drivers/gpu/drm/i915/intel_sprite.h
new file mode 100644
index 000000000000..84be8686be16
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_sprite.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_SPRITE_H__
+#define __INTEL_SPRITE_H__
+
+#include <linux/types.h>
+
+#include "i915_drv.h"
+#include "intel_display.h"
+
+struct drm_device;
+struct drm_display_mode;
+struct drm_file;
+struct drm_i915_private;
+struct intel_crtc_state;
+struct intel_plane_state;
+
+bool is_planar_yuv_format(u32 pixelformat);
+int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
+ int usecs);
+struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe, int plane);
+int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state);
+void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state);
+int intel_plane_check_stride(const struct intel_plane_state *plane_state);
+int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state);
+int chv_plane_check_rotation(const struct intel_plane_state *plane_state);
+struct intel_plane *
+skl_universal_plane_create(struct drm_i915_private *dev_priv,
+ enum pipe pipe, enum plane_id plane_id);
+
+static inline bool icl_is_nv12_y_plane(enum plane_id id)
+{
+ /* Don't need to do a gen check, these planes are only available on gen11 */
+ if (id == PLANE_SPRITE4 || id == PLANE_SPRITE5)
+ return true;
+
+ return false;
+}
+
+static inline bool icl_is_hdr_plane(struct drm_i915_private *dev_priv,
+ enum plane_id plane_id)
+{
+ if (INTEL_GEN(dev_priv) < 11)
+ return false;
+
+ return plane_id < PLANE_SPRITE2;
+}
+
+#endif /* __INTEL_SPRITE_H__ */
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 3924c4944e1f..5dbba33f4202 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -33,9 +33,12 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include "intel_drv.h"
#include <drm/i915_drm.h>
+
#include "i915_drv.h"
+#include "intel_connector.h"
+#include "intel_drv.h"
+#include "intel_tv.h"
enum tv_margin {
TV_MARGIN_LEFT, TV_MARGIN_TOP,
diff --git a/drivers/gpu/drm/i915/intel_tv.h b/drivers/gpu/drm/i915/intel_tv.h
new file mode 100644
index 000000000000..44518575ec5c
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_tv.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_TV_H__
+#define __INTEL_TV_H__
+
+struct drm_i915_private;
+
+void intel_tv_init(struct drm_i915_private *dev_priv);
+
+#endif /* __INTEL_TV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index e711eb3268bc..25b80ffe71ad 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -332,8 +332,6 @@ void intel_uc_sanitize(struct drm_i915_private *i915)
GEM_BUG_ON(!HAS_GUC(i915));
- guc_disable_communication(guc);
-
intel_huc_sanitize(huc);
intel_guc_sanitize(guc);
@@ -377,7 +375,7 @@ int intel_uc_init_hw(struct drm_i915_private *i915)
intel_guc_init_params(guc);
ret = intel_guc_fw_upload(guc);
- if (ret == 0 || ret != -ETIMEDOUT)
+ if (ret == 0)
break;
DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
@@ -451,6 +449,23 @@ void intel_uc_fini_hw(struct drm_i915_private *i915)
guc_disable_communication(guc);
}
+/**
+ * intel_uc_reset_prepare - Prepare for reset
+ * @i915: device private
+ *
+ * Preparing for full gpu reset.
+ */
+void intel_uc_reset_prepare(struct drm_i915_private *i915)
+{
+ struct intel_guc *guc = &i915->guc;
+
+ if (!USES_GUC(i915))
+ return;
+
+ guc_disable_communication(guc);
+ intel_uc_sanitize(i915);
+}
+
int intel_uc_suspend(struct drm_i915_private *i915)
{
struct intel_guc *guc = &i915->guc;
@@ -468,7 +483,7 @@ int intel_uc_suspend(struct drm_i915_private *i915)
return err;
}
- gen9_disable_guc_interrupts(i915);
+ guc_disable_communication(guc);
return 0;
}
@@ -484,7 +499,7 @@ int intel_uc_resume(struct drm_i915_private *i915)
if (guc->fw.load_status != INTEL_UC_FIRMWARE_SUCCESS)
return 0;
- gen9_enable_guc_interrupts(i915);
+ guc_enable_communication(guc);
err = intel_guc_resume(guc);
if (err) {
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index 870faf9011b9..c14729786652 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -38,6 +38,7 @@ int intel_uc_init_hw(struct drm_i915_private *dev_priv);
void intel_uc_fini_hw(struct drm_i915_private *dev_priv);
int intel_uc_init(struct drm_i915_private *dev_priv);
void intel_uc_fini(struct drm_i915_private *dev_priv);
+void intel_uc_reset_prepare(struct drm_i915_private *i915);
int intel_uc_suspend(struct drm_i915_private *dev_priv);
int intel_uc_resume(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 75646a1e0051..d1d51e1121e2 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -21,17 +21,18 @@
* IN THE SOFTWARE.
*/
+#include <linux/pm_runtime.h>
+#include <asm/iosf_mbi.h>
+
#include "i915_drv.h"
-#include "intel_drv.h"
#include "i915_vgpu.h"
-
-#include <asm/iosf_mbi.h>
-#include <linux/pm_runtime.h>
+#include "intel_drv.h"
+#include "intel_pm.h"
#define FORCEWAKE_ACK_TIMEOUT_MS 50
#define GT_FIFO_TIMEOUT_MS 10
-#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32((dev_priv__), (reg__))
+#define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
static const char * const forcewake_domain_names[] = {
"render",
@@ -58,16 +59,20 @@ intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
return "unknown";
}
+#define fw_ack(d) readl((d)->reg_ack)
+#define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)
+#define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)
+
static inline void
-fw_domain_reset(struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d)
+fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
{
/*
* We don't really know if the powerwell for the forcewake domain we are
* trying to reset here does exist at this point (engines could be fused
* off in ICL+), so no waiting for acks
*/
- __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_reset);
+ /* WaRsClearFWBitsAtReset:bdw,skl */
+ fw_clear(d, 0xffff);
}
static inline void
@@ -81,36 +86,32 @@ fw_domain_arm_timer(struct intel_uncore_forcewake_domain *d)
}
static inline int
-__wait_for_ack(const struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d,
+__wait_for_ack(const struct intel_uncore_forcewake_domain *d,
const u32 ack,
const u32 value)
{
- return wait_for_atomic((__raw_i915_read32(i915, d->reg_ack) & ack) == value,
+ return wait_for_atomic((fw_ack(d) & ack) == value,
FORCEWAKE_ACK_TIMEOUT_MS);
}
static inline int
-wait_ack_clear(const struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d,
+wait_ack_clear(const struct intel_uncore_forcewake_domain *d,
const u32 ack)
{
- return __wait_for_ack(i915, d, ack, 0);
+ return __wait_for_ack(d, ack, 0);
}
static inline int
-wait_ack_set(const struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d,
+wait_ack_set(const struct intel_uncore_forcewake_domain *d,
const u32 ack)
{
- return __wait_for_ack(i915, d, ack, ack);
+ return __wait_for_ack(d, ack, ack);
}
static inline void
-fw_domain_wait_ack_clear(const struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d)
+fw_domain_wait_ack_clear(const struct intel_uncore_forcewake_domain *d)
{
- if (wait_ack_clear(i915, d, FORCEWAKE_KERNEL))
+ if (wait_ack_clear(d, FORCEWAKE_KERNEL))
DRM_ERROR("%s: timed out waiting for forcewake ack to clear.\n",
intel_uncore_forcewake_domain_to_str(d->id));
}
@@ -121,8 +122,7 @@ enum ack_type {
};
static int
-fw_domain_wait_ack_with_fallback(const struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d,
+fw_domain_wait_ack_with_fallback(const struct intel_uncore_forcewake_domain *d,
const enum ack_type type)
{
const u32 ack_bit = FORCEWAKE_KERNEL;
@@ -146,129 +146,122 @@ fw_domain_wait_ack_with_fallback(const struct drm_i915_private *i915,
pass = 1;
do {
- wait_ack_clear(i915, d, FORCEWAKE_KERNEL_FALLBACK);
+ wait_ack_clear(d, FORCEWAKE_KERNEL_FALLBACK);
- __raw_i915_write32(i915, d->reg_set,
- _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL_FALLBACK));
+ fw_set(d, FORCEWAKE_KERNEL_FALLBACK);
/* Give gt some time to relax before the polling frenzy */
udelay(10 * pass);
- wait_ack_set(i915, d, FORCEWAKE_KERNEL_FALLBACK);
+ wait_ack_set(d, FORCEWAKE_KERNEL_FALLBACK);
- ack_detected = (__raw_i915_read32(i915, d->reg_ack) & ack_bit) == value;
+ ack_detected = (fw_ack(d) & ack_bit) == value;
- __raw_i915_write32(i915, d->reg_set,
- _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL_FALLBACK));
+ fw_clear(d, FORCEWAKE_KERNEL_FALLBACK);
} while (!ack_detected && pass++ < 10);
DRM_DEBUG_DRIVER("%s had to use fallback to %s ack, 0x%x (passes %u)\n",
intel_uncore_forcewake_domain_to_str(d->id),
type == ACK_SET ? "set" : "clear",
- __raw_i915_read32(i915, d->reg_ack),
+ fw_ack(d),
pass);
return ack_detected ? 0 : -ETIMEDOUT;
}
static inline void
-fw_domain_wait_ack_clear_fallback(const struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d)
+fw_domain_wait_ack_clear_fallback(const struct intel_uncore_forcewake_domain *d)
{
- if (likely(!wait_ack_clear(i915, d, FORCEWAKE_KERNEL)))
+ if (likely(!wait_ack_clear(d, FORCEWAKE_KERNEL)))
return;
- if (fw_domain_wait_ack_with_fallback(i915, d, ACK_CLEAR))
- fw_domain_wait_ack_clear(i915, d);
+ if (fw_domain_wait_ack_with_fallback(d, ACK_CLEAR))
+ fw_domain_wait_ack_clear(d);
}
static inline void
-fw_domain_get(struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d)
+fw_domain_get(const struct intel_uncore_forcewake_domain *d)
{
- __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_set);
+ fw_set(d, FORCEWAKE_KERNEL);
}
static inline void
-fw_domain_wait_ack_set(const struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d)
+fw_domain_wait_ack_set(const struct intel_uncore_forcewake_domain *d)
{
- if (wait_ack_set(i915, d, FORCEWAKE_KERNEL))
+ if (wait_ack_set(d, FORCEWAKE_KERNEL))
DRM_ERROR("%s: timed out waiting for forcewake ack request.\n",
intel_uncore_forcewake_domain_to_str(d->id));
}
static inline void
-fw_domain_wait_ack_set_fallback(const struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d)
+fw_domain_wait_ack_set_fallback(const struct intel_uncore_forcewake_domain *d)
{
- if (likely(!wait_ack_set(i915, d, FORCEWAKE_KERNEL)))
+ if (likely(!wait_ack_set(d, FORCEWAKE_KERNEL)))
return;
- if (fw_domain_wait_ack_with_fallback(i915, d, ACK_SET))
- fw_domain_wait_ack_set(i915, d);
+ if (fw_domain_wait_ack_with_fallback(d, ACK_SET))
+ fw_domain_wait_ack_set(d);
}
static inline void
-fw_domain_put(const struct drm_i915_private *i915,
- const struct intel_uncore_forcewake_domain *d)
+fw_domain_put(const struct intel_uncore_forcewake_domain *d)
{
- __raw_i915_write32(i915, d->reg_set, i915->uncore.fw_clear);
+ fw_clear(d, FORCEWAKE_KERNEL);
}
static void
-fw_domains_get(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
+fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
unsigned int tmp;
- GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
+ GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
- for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
- fw_domain_wait_ack_clear(i915, d);
- fw_domain_get(i915, d);
+ for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
+ fw_domain_wait_ack_clear(d);
+ fw_domain_get(d);
}
- for_each_fw_domain_masked(d, fw_domains, i915, tmp)
- fw_domain_wait_ack_set(i915, d);
+ for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
+ fw_domain_wait_ack_set(d);
- i915->uncore.fw_domains_active |= fw_domains;
+ uncore->fw_domains_active |= fw_domains;
}
static void
-fw_domains_get_with_fallback(struct drm_i915_private *i915,
+fw_domains_get_with_fallback(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
unsigned int tmp;
- GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
+ GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
- for_each_fw_domain_masked(d, fw_domains, i915, tmp) {
- fw_domain_wait_ack_clear_fallback(i915, d);
- fw_domain_get(i915, d);
+ for_each_fw_domain_masked(d, fw_domains, uncore, tmp) {
+ fw_domain_wait_ack_clear_fallback(d);
+ fw_domain_get(d);
}
- for_each_fw_domain_masked(d, fw_domains, i915, tmp)
- fw_domain_wait_ack_set_fallback(i915, d);
+ for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
+ fw_domain_wait_ack_set_fallback(d);
- i915->uncore.fw_domains_active |= fw_domains;
+ uncore->fw_domains_active |= fw_domains;
}
static void
-fw_domains_put(struct drm_i915_private *i915, enum forcewake_domains fw_domains)
+fw_domains_put(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
unsigned int tmp;
- GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
+ GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
- for_each_fw_domain_masked(d, fw_domains, i915, tmp)
- fw_domain_put(i915, d);
+ for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
+ fw_domain_put(d);
- i915->uncore.fw_domains_active &= ~fw_domains;
+ uncore->fw_domains_active &= ~fw_domains;
}
static void
-fw_domains_reset(struct drm_i915_private *i915,
+fw_domains_reset(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
@@ -277,61 +270,61 @@ fw_domains_reset(struct drm_i915_private *i915,
if (!fw_domains)
return;
- GEM_BUG_ON(fw_domains & ~i915->uncore.fw_domains);
+ GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
- for_each_fw_domain_masked(d, fw_domains, i915, tmp)
- fw_domain_reset(i915, d);
+ for_each_fw_domain_masked(d, fw_domains, uncore, tmp)
+ fw_domain_reset(d);
}
-static inline u32 gt_thread_status(struct drm_i915_private *dev_priv)
+static inline u32 gt_thread_status(struct intel_uncore *uncore)
{
u32 val;
- val = __raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG);
+ val = __raw_uncore_read32(uncore, GEN6_GT_THREAD_STATUS_REG);
val &= GEN6_GT_THREAD_STATUS_CORE_MASK;
return val;
}
-static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
+static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
{
/*
* w/a for a sporadic read returning 0 by waiting for the GT
* thread to wake up.
*/
- WARN_ONCE(wait_for_atomic_us(gt_thread_status(dev_priv) == 0, 5000),
+ WARN_ONCE(wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
"GT thread status wait timed out\n");
}
-static void fw_domains_get_with_thread_status(struct drm_i915_private *dev_priv,
+static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
- fw_domains_get(dev_priv, fw_domains);
+ fw_domains_get(uncore, fw_domains);
/* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
- __gen6_gt_wait_for_thread_c0(dev_priv);
+ __gen6_gt_wait_for_thread_c0(uncore);
}
-static inline u32 fifo_free_entries(struct drm_i915_private *dev_priv)
+static inline u32 fifo_free_entries(struct intel_uncore *uncore)
{
- u32 count = __raw_i915_read32(dev_priv, GTFIFOCTL);
+ u32 count = __raw_uncore_read32(uncore, GTFIFOCTL);
return count & GT_FIFO_FREE_ENTRIES_MASK;
}
-static void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+static void __gen6_gt_wait_for_fifo(struct intel_uncore *uncore)
{
u32 n;
/* On VLV, FIFO will be shared by both SW and HW.
* So, we need to read the FREE_ENTRIES everytime */
- if (IS_VALLEYVIEW(dev_priv))
- n = fifo_free_entries(dev_priv);
+ if (IS_VALLEYVIEW(uncore_to_i915(uncore)))
+ n = fifo_free_entries(uncore);
else
- n = dev_priv->uncore.fifo_count;
+ n = uncore->fifo_count;
if (n <= GT_FIFO_NUM_RESERVED_ENTRIES) {
- if (wait_for_atomic((n = fifo_free_entries(dev_priv)) >
+ if (wait_for_atomic((n = fifo_free_entries(uncore)) >
GT_FIFO_NUM_RESERVED_ENTRIES,
GT_FIFO_TIMEOUT_MS)) {
DRM_DEBUG("GT_FIFO timeout, entries: %u\n", n);
@@ -339,7 +332,7 @@ static void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
}
}
- dev_priv->uncore.fifo_count = n - 1;
+ uncore->fifo_count = n - 1;
}
static enum hrtimer_restart
@@ -347,30 +340,29 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
{
struct intel_uncore_forcewake_domain *domain =
container_of(timer, struct intel_uncore_forcewake_domain, timer);
- struct drm_i915_private *dev_priv =
- container_of(domain, struct drm_i915_private, uncore.fw_domain[domain->id]);
+ struct intel_uncore *uncore = forcewake_domain_to_uncore(domain);
unsigned long irqflags;
- assert_rpm_device_not_suspended(dev_priv);
+ assert_rpm_device_not_suspended(uncore->rpm);
if (xchg(&domain->active, false))
return HRTIMER_RESTART;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ spin_lock_irqsave(&uncore->lock, irqflags);
if (WARN_ON(domain->wake_count == 0))
domain->wake_count++;
if (--domain->wake_count == 0)
- dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
+ uncore->funcs.force_wake_put(uncore, domain->mask);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ spin_unlock_irqrestore(&uncore->lock, irqflags);
return HRTIMER_NORESTART;
}
/* Note callers must have acquired the PUNIT->PMIC bus, before calling this. */
static unsigned int
-intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv)
+intel_uncore_forcewake_reset(struct intel_uncore *uncore)
{
unsigned long irqflags;
struct intel_uncore_forcewake_domain *domain;
@@ -388,7 +380,7 @@ intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv)
active_domains = 0;
- for_each_fw_domain(domain, dev_priv, tmp) {
+ for_each_fw_domain(domain, uncore, tmp) {
smp_store_mb(domain->active, false);
if (hrtimer_cancel(&domain->timer) == 0)
continue;
@@ -396,9 +388,9 @@ intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv)
intel_uncore_fw_release_timer(&domain->timer);
}
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ spin_lock_irqsave(&uncore->lock, irqflags);
- for_each_fw_domain(domain, dev_priv, tmp) {
+ for_each_fw_domain(domain, uncore, tmp) {
if (hrtimer_active(&domain->timer))
active_domains |= domain->mask;
}
@@ -411,185 +403,134 @@ intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv)
break;
}
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ spin_unlock_irqrestore(&uncore->lock, irqflags);
cond_resched();
}
WARN_ON(active_domains);
- fw = dev_priv->uncore.fw_domains_active;
+ fw = uncore->fw_domains_active;
if (fw)
- dev_priv->uncore.funcs.force_wake_put(dev_priv, fw);
+ uncore->funcs.force_wake_put(uncore, fw);
- fw_domains_reset(dev_priv, dev_priv->uncore.fw_domains);
- assert_forcewakes_inactive(dev_priv);
+ fw_domains_reset(uncore, uncore->fw_domains);
+ assert_forcewakes_inactive(uncore);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ spin_unlock_irqrestore(&uncore->lock, irqflags);
return fw; /* track the lost user forcewake domains */
}
-static u64 gen9_edram_size(struct drm_i915_private *dev_priv)
-{
- const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
- const unsigned int sets[4] = { 1, 1, 2, 2 };
- const u32 cap = dev_priv->edram_cap;
-
- return EDRAM_NUM_BANKS(cap) *
- ways[EDRAM_WAYS_IDX(cap)] *
- sets[EDRAM_SETS_IDX(cap)] *
- 1024 * 1024;
-}
-
-u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv)
-{
- if (!HAS_EDRAM(dev_priv))
- return 0;
-
- /* The needed capability bits for size calculation
- * are not there with pre gen9 so return 128MB always.
- */
- if (INTEL_GEN(dev_priv) < 9)
- return 128 * 1024 * 1024;
-
- return gen9_edram_size(dev_priv);
-}
-
-static void intel_uncore_edram_detect(struct drm_i915_private *dev_priv)
-{
- if (IS_HASWELL(dev_priv) ||
- IS_BROADWELL(dev_priv) ||
- INTEL_GEN(dev_priv) >= 9) {
- dev_priv->edram_cap = __raw_i915_read32(dev_priv,
- HSW_EDRAM_CAP);
-
- /* NB: We can't write IDICR yet because we do not have gt funcs
- * set up */
- } else {
- dev_priv->edram_cap = 0;
- }
-
- if (HAS_EDRAM(dev_priv))
- DRM_INFO("Found %lluMB of eDRAM\n",
- intel_uncore_edram_size(dev_priv) / (1024 * 1024));
-}
-
static bool
-fpga_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
+fpga_check_for_unclaimed_mmio(struct intel_uncore *uncore)
{
u32 dbg;
- dbg = __raw_i915_read32(dev_priv, FPGA_DBG);
+ dbg = __raw_uncore_read32(uncore, FPGA_DBG);
if (likely(!(dbg & FPGA_DBG_RM_NOCLAIM)))
return false;
- __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ __raw_uncore_write32(uncore, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
return true;
}
static bool
-vlv_check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
+vlv_check_for_unclaimed_mmio(struct intel_uncore *uncore)
{
u32 cer;
- cer = __raw_i915_read32(dev_priv, CLAIM_ER);
+ cer = __raw_uncore_read32(uncore, CLAIM_ER);
if (likely(!(cer & (CLAIM_ER_OVERFLOW | CLAIM_ER_CTR_MASK))))
return false;
- __raw_i915_write32(dev_priv, CLAIM_ER, CLAIM_ER_CLR);
+ __raw_uncore_write32(uncore, CLAIM_ER, CLAIM_ER_CLR);
return true;
}
static bool
-gen6_check_for_fifo_debug(struct drm_i915_private *dev_priv)
+gen6_check_for_fifo_debug(struct intel_uncore *uncore)
{
u32 fifodbg;
- fifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
+ fifodbg = __raw_uncore_read32(uncore, GTFIFODBG);
if (unlikely(fifodbg)) {
DRM_DEBUG_DRIVER("GTFIFODBG = 0x08%x\n", fifodbg);
- __raw_i915_write32(dev_priv, GTFIFODBG, fifodbg);
+ __raw_uncore_write32(uncore, GTFIFODBG, fifodbg);
}
return fifodbg;
}
static bool
-check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
+check_for_unclaimed_mmio(struct intel_uncore *uncore)
{
bool ret = false;
- if (HAS_FPGA_DBG_UNCLAIMED(dev_priv))
- ret |= fpga_check_for_unclaimed_mmio(dev_priv);
+ if (intel_uncore_has_fpga_dbg_unclaimed(uncore))
+ ret |= fpga_check_for_unclaimed_mmio(uncore);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- ret |= vlv_check_for_unclaimed_mmio(dev_priv);
+ if (intel_uncore_has_dbg_unclaimed(uncore))
+ ret |= vlv_check_for_unclaimed_mmio(uncore);
- if (IS_GEN_RANGE(dev_priv, 6, 7))
- ret |= gen6_check_for_fifo_debug(dev_priv);
+ if (intel_uncore_has_fifo(uncore))
+ ret |= gen6_check_for_fifo_debug(uncore);
return ret;
}
-static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
+static void __intel_uncore_early_sanitize(struct intel_uncore *uncore,
unsigned int restore_forcewake)
{
/* clear out unclaimed reg detection bit */
- if (check_for_unclaimed_mmio(dev_priv))
+ if (check_for_unclaimed_mmio(uncore))
DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
/* WaDisableShadowRegForCpd:chv */
- if (IS_CHERRYVIEW(dev_priv)) {
- __raw_i915_write32(dev_priv, GTFIFOCTL,
- __raw_i915_read32(dev_priv, GTFIFOCTL) |
- GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
- GT_FIFO_CTL_RC6_POLICY_STALL);
+ if (IS_CHERRYVIEW(uncore_to_i915(uncore))) {
+ __raw_uncore_write32(uncore, GTFIFOCTL,
+ __raw_uncore_read32(uncore, GTFIFOCTL) |
+ GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
+ GT_FIFO_CTL_RC6_POLICY_STALL);
}
iosf_mbi_punit_acquire();
- intel_uncore_forcewake_reset(dev_priv);
+ intel_uncore_forcewake_reset(uncore);
if (restore_forcewake) {
- spin_lock_irq(&dev_priv->uncore.lock);
- dev_priv->uncore.funcs.force_wake_get(dev_priv,
- restore_forcewake);
-
- if (IS_GEN_RANGE(dev_priv, 6, 7))
- dev_priv->uncore.fifo_count =
- fifo_free_entries(dev_priv);
- spin_unlock_irq(&dev_priv->uncore.lock);
+ spin_lock_irq(&uncore->lock);
+ uncore->funcs.force_wake_get(uncore, restore_forcewake);
+
+ if (intel_uncore_has_fifo(uncore))
+ uncore->fifo_count = fifo_free_entries(uncore);
+ spin_unlock_irq(&uncore->lock);
}
iosf_mbi_punit_release();
}
-void intel_uncore_suspend(struct drm_i915_private *dev_priv)
+void intel_uncore_suspend(struct intel_uncore *uncore)
{
iosf_mbi_punit_acquire();
iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
- &dev_priv->uncore.pmic_bus_access_nb);
- dev_priv->uncore.fw_domains_saved =
- intel_uncore_forcewake_reset(dev_priv);
+ &uncore->pmic_bus_access_nb);
+ uncore->fw_domains_saved = intel_uncore_forcewake_reset(uncore);
iosf_mbi_punit_release();
}
-void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
+void intel_uncore_resume_early(struct intel_uncore *uncore)
{
unsigned int restore_forcewake;
- restore_forcewake = fetch_and_zero(&dev_priv->uncore.fw_domains_saved);
- __intel_uncore_early_sanitize(dev_priv, restore_forcewake);
+ restore_forcewake = fetch_and_zero(&uncore->fw_domains_saved);
+ __intel_uncore_early_sanitize(uncore, restore_forcewake);
- iosf_mbi_register_pmic_bus_access_notifier(
- &dev_priv->uncore.pmic_bus_access_nb);
- i915_check_and_clear_faults(dev_priv);
+ iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
}
-void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv)
+void intel_uncore_runtime_resume(struct intel_uncore *uncore)
{
- iosf_mbi_register_pmic_bus_access_notifier(
- &dev_priv->uncore.pmic_bus_access_nb);
+ iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
}
void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
@@ -598,15 +539,15 @@ void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
intel_sanitize_gt_powersave(dev_priv);
}
-static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
+static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
unsigned int tmp;
- fw_domains &= dev_priv->uncore.fw_domains;
+ fw_domains &= uncore->fw_domains;
- for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) {
+ for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
if (domain->wake_count++) {
fw_domains &= ~domain->mask;
domain->active = true;
@@ -614,12 +555,12 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
}
if (fw_domains)
- dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
+ uncore->funcs.force_wake_get(uncore, fw_domains);
}
/**
* intel_uncore_forcewake_get - grab forcewake domain references
- * @dev_priv: i915 device instance
+ * @uncore: the intel_uncore structure
* @fw_domains: forcewake domains to get reference on
*
* This function can be used get GT's forcewake domain references.
@@ -630,100 +571,100 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
* call to intel_unforce_forcewake_put(). Usually caller wants all the domains
* to be kept awake so the @fw_domains would be then FORCEWAKE_ALL.
*/
-void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
+void intel_uncore_forcewake_get(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
unsigned long irqflags;
- if (!dev_priv->uncore.funcs.force_wake_get)
+ if (!uncore->funcs.force_wake_get)
return;
- assert_rpm_wakelock_held(dev_priv);
+ __assert_rpm_wakelock_held(uncore->rpm);
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- __intel_uncore_forcewake_get(dev_priv, fw_domains);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ spin_lock_irqsave(&uncore->lock, irqflags);
+ __intel_uncore_forcewake_get(uncore, fw_domains);
+ spin_unlock_irqrestore(&uncore->lock, irqflags);
}
/**
* intel_uncore_forcewake_user_get - claim forcewake on behalf of userspace
- * @dev_priv: i915 device instance
+ * @uncore: the intel_uncore structure
*
* This function is a wrapper around intel_uncore_forcewake_get() to acquire
* the GT powerwell and in the process disable our debugging for the
* duration of userspace's bypass.
*/
-void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv)
+void intel_uncore_forcewake_user_get(struct intel_uncore *uncore)
{
- spin_lock_irq(&dev_priv->uncore.lock);
- if (!dev_priv->uncore.user_forcewake.count++) {
- intel_uncore_forcewake_get__locked(dev_priv, FORCEWAKE_ALL);
+ spin_lock_irq(&uncore->lock);
+ if (!uncore->user_forcewake.count++) {
+ intel_uncore_forcewake_get__locked(uncore, FORCEWAKE_ALL);
/* Save and disable mmio debugging for the user bypass */
- dev_priv->uncore.user_forcewake.saved_mmio_check =
- dev_priv->uncore.unclaimed_mmio_check;
- dev_priv->uncore.user_forcewake.saved_mmio_debug =
+ uncore->user_forcewake.saved_mmio_check =
+ uncore->unclaimed_mmio_check;
+ uncore->user_forcewake.saved_mmio_debug =
i915_modparams.mmio_debug;
- dev_priv->uncore.unclaimed_mmio_check = 0;
+ uncore->unclaimed_mmio_check = 0;
i915_modparams.mmio_debug = 0;
}
- spin_unlock_irq(&dev_priv->uncore.lock);
+ spin_unlock_irq(&uncore->lock);
}
/**
* intel_uncore_forcewake_user_put - release forcewake on behalf of userspace
- * @dev_priv: i915 device instance
+ * @uncore: the intel_uncore structure
*
* This function complements intel_uncore_forcewake_user_get() and releases
* the GT powerwell taken on behalf of the userspace bypass.
*/
-void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv)
+void intel_uncore_forcewake_user_put(struct intel_uncore *uncore)
{
- spin_lock_irq(&dev_priv->uncore.lock);
- if (!--dev_priv->uncore.user_forcewake.count) {
- if (intel_uncore_unclaimed_mmio(dev_priv))
- dev_info(dev_priv->drm.dev,
+ spin_lock_irq(&uncore->lock);
+ if (!--uncore->user_forcewake.count) {
+ if (intel_uncore_unclaimed_mmio(uncore))
+ dev_info(uncore_to_i915(uncore)->drm.dev,
"Invalid mmio detected during user access\n");
- dev_priv->uncore.unclaimed_mmio_check =
- dev_priv->uncore.user_forcewake.saved_mmio_check;
+ uncore->unclaimed_mmio_check =
+ uncore->user_forcewake.saved_mmio_check;
i915_modparams.mmio_debug =
- dev_priv->uncore.user_forcewake.saved_mmio_debug;
+ uncore->user_forcewake.saved_mmio_debug;
- intel_uncore_forcewake_put__locked(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put__locked(uncore, FORCEWAKE_ALL);
}
- spin_unlock_irq(&dev_priv->uncore.lock);
+ spin_unlock_irq(&uncore->lock);
}
/**
* intel_uncore_forcewake_get__locked - grab forcewake domain references
- * @dev_priv: i915 device instance
+ * @uncore: the intel_uncore structure
* @fw_domains: forcewake domains to get reference on
*
* See intel_uncore_forcewake_get(). This variant places the onus
* on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
*/
-void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
+void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
- lockdep_assert_held(&dev_priv->uncore.lock);
+ lockdep_assert_held(&uncore->lock);
- if (!dev_priv->uncore.funcs.force_wake_get)
+ if (!uncore->funcs.force_wake_get)
return;
- __intel_uncore_forcewake_get(dev_priv, fw_domains);
+ __intel_uncore_forcewake_get(uncore, fw_domains);
}
-static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
+static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
unsigned int tmp;
- fw_domains &= dev_priv->uncore.fw_domains;
+ fw_domains &= uncore->fw_domains;
- for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp) {
+ for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
if (WARN_ON(domain->wake_count == 0))
continue;
@@ -738,66 +679,66 @@ static void __intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
/**
* intel_uncore_forcewake_put - release a forcewake domain reference
- * @dev_priv: i915 device instance
+ * @uncore: the intel_uncore structure
* @fw_domains: forcewake domains to put references
*
* This function drops the device-level forcewakes for specified
* domains obtained by intel_uncore_forcewake_get().
*/
-void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
+void intel_uncore_forcewake_put(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
unsigned long irqflags;
- if (!dev_priv->uncore.funcs.force_wake_put)
+ if (!uncore->funcs.force_wake_put)
return;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- __intel_uncore_forcewake_put(dev_priv, fw_domains);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ spin_lock_irqsave(&uncore->lock, irqflags);
+ __intel_uncore_forcewake_put(uncore, fw_domains);
+ spin_unlock_irqrestore(&uncore->lock, irqflags);
}
/**
* intel_uncore_forcewake_put__locked - grab forcewake domain references
- * @dev_priv: i915 device instance
+ * @uncore: the intel_uncore structure
* @fw_domains: forcewake domains to get reference on
*
* See intel_uncore_forcewake_put(). This variant places the onus
* on the caller to explicitly handle the dev_priv->uncore.lock spinlock.
*/
-void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
+void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
- lockdep_assert_held(&dev_priv->uncore.lock);
+ lockdep_assert_held(&uncore->lock);
- if (!dev_priv->uncore.funcs.force_wake_put)
+ if (!uncore->funcs.force_wake_put)
return;
- __intel_uncore_forcewake_put(dev_priv, fw_domains);
+ __intel_uncore_forcewake_put(uncore, fw_domains);
}
-void assert_forcewakes_inactive(struct drm_i915_private *dev_priv)
+void assert_forcewakes_inactive(struct intel_uncore *uncore)
{
- if (!dev_priv->uncore.funcs.force_wake_get)
+ if (!uncore->funcs.force_wake_get)
return;
- WARN(dev_priv->uncore.fw_domains_active,
+ WARN(uncore->fw_domains_active,
"Expected all fw_domains to be inactive, but %08x are still on\n",
- dev_priv->uncore.fw_domains_active);
+ uncore->fw_domains_active);
}
-void assert_forcewakes_active(struct drm_i915_private *dev_priv,
+void assert_forcewakes_active(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
- if (!dev_priv->uncore.funcs.force_wake_get)
+ if (!uncore->funcs.force_wake_get)
return;
- assert_rpm_wakelock_held(dev_priv);
+ __assert_rpm_wakelock_held(uncore->rpm);
- fw_domains &= dev_priv->uncore.fw_domains;
- WARN(fw_domains & ~dev_priv->uncore.fw_domains_active,
+ fw_domains &= uncore->fw_domains;
+ WARN(fw_domains & ~uncore->fw_domains_active,
"Expected %08x fw_domains to be active, but %08x are off\n",
- fw_domains, fw_domains & ~dev_priv->uncore.fw_domains_active);
+ fw_domains, fw_domains & ~uncore->fw_domains_active);
}
/* We give fast paths for the really cool registers */
@@ -806,7 +747,7 @@ void assert_forcewakes_active(struct drm_i915_private *dev_priv,
#define GEN11_NEEDS_FORCE_WAKE(reg) \
((reg) < 0x40000 || ((reg) >= 0x1c0000 && (reg) < 0x1dc000))
-#define __gen6_reg_read_fw_domains(offset) \
+#define __gen6_reg_read_fw_domains(uncore, offset) \
({ \
enum forcewake_domains __fwd; \
if (NEEDS_FORCE_WAKE(offset)) \
@@ -846,13 +787,13 @@ static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
})
static enum forcewake_domains
-find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
+find_fw_domain(struct intel_uncore *uncore, u32 offset)
{
const struct intel_forcewake_range *entry;
entry = BSEARCH(offset,
- dev_priv->uncore.fw_domains_table,
- dev_priv->uncore.fw_domains_table_entries,
+ uncore->fw_domains_table,
+ uncore->fw_domains_table_entries,
fw_range_cmp);
if (!entry)
@@ -864,11 +805,11 @@ find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
* translate it here to the list of available domains.
*/
if (entry->domains == FORCEWAKE_ALL)
- return dev_priv->uncore.fw_domains;
+ return uncore->fw_domains;
- WARN(entry->domains & ~dev_priv->uncore.fw_domains,
+ WARN(entry->domains & ~uncore->fw_domains,
"Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
- entry->domains & ~dev_priv->uncore.fw_domains, offset);
+ entry->domains & ~uncore->fw_domains, offset);
return entry->domains;
}
@@ -892,19 +833,19 @@ static const struct intel_forcewake_range __vlv_fw_ranges[] = {
GEN_FW_RANGE(0x30000, 0x3ffff, FORCEWAKE_MEDIA),
};
-#define __fwtable_reg_read_fw_domains(offset) \
+#define __fwtable_reg_read_fw_domains(uncore, offset) \
({ \
enum forcewake_domains __fwd = 0; \
if (NEEDS_FORCE_WAKE((offset))) \
- __fwd = find_fw_domain(dev_priv, offset); \
+ __fwd = find_fw_domain(uncore, offset); \
__fwd; \
})
-#define __gen11_fwtable_reg_read_fw_domains(offset) \
+#define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \
({ \
enum forcewake_domains __fwd = 0; \
if (GEN11_NEEDS_FORCE_WAKE((offset))) \
- __fwd = find_fw_domain(dev_priv, offset); \
+ __fwd = find_fw_domain(uncore, offset); \
__fwd; \
})
@@ -956,7 +897,7 @@ static bool is_gen##x##_shadowed(u32 offset) \
__is_genX_shadowed(8)
__is_genX_shadowed(11)
-#define __gen8_reg_write_fw_domains(offset) \
+#define __gen8_reg_write_fw_domains(uncore, offset) \
({ \
enum forcewake_domains __fwd; \
if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
@@ -986,19 +927,19 @@ static const struct intel_forcewake_range __chv_fw_ranges[] = {
GEN_FW_RANGE(0x30000, 0x37fff, FORCEWAKE_MEDIA),
};
-#define __fwtable_reg_write_fw_domains(offset) \
+#define __fwtable_reg_write_fw_domains(uncore, offset) \
({ \
enum forcewake_domains __fwd = 0; \
if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
- __fwd = find_fw_domain(dev_priv, offset); \
+ __fwd = find_fw_domain(uncore, offset); \
__fwd; \
})
-#define __gen11_fwtable_reg_write_fw_domains(offset) \
+#define __gen11_fwtable_reg_write_fw_domains(uncore, offset) \
({ \
enum forcewake_domains __fwd = 0; \
if (GEN11_NEEDS_FORCE_WAKE((offset)) && !is_gen11_shadowed(offset)) \
- __fwd = find_fw_domain(dev_priv, offset); \
+ __fwd = find_fw_domain(uncore, offset); \
__fwd; \
})
@@ -1073,21 +1014,21 @@ static const struct intel_forcewake_range __gen11_fw_ranges[] = {
};
static void
-ilk_dummy_write(struct drm_i915_private *dev_priv)
+ilk_dummy_write(struct intel_uncore *uncore)
{
/* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
* the chip from rc6 before touching it for real. MI_MODE is masked,
* hence harmless to write 0 into. */
- __raw_i915_write32(dev_priv, MI_MODE, 0);
+ __raw_uncore_write32(uncore, MI_MODE, 0);
}
static void
-__unclaimed_reg_debug(struct drm_i915_private *dev_priv,
+__unclaimed_reg_debug(struct intel_uncore *uncore,
const i915_reg_t reg,
const bool read,
const bool before)
{
- if (WARN(check_for_unclaimed_mmio(dev_priv) && !before,
+ if (WARN(check_for_unclaimed_mmio(uncore) && !before,
"Unclaimed %s register 0x%x\n",
read ? "read from" : "write to",
i915_mmio_reg_offset(reg)))
@@ -1096,7 +1037,7 @@ __unclaimed_reg_debug(struct drm_i915_private *dev_priv,
}
static inline void
-unclaimed_reg_debug(struct drm_i915_private *dev_priv,
+unclaimed_reg_debug(struct intel_uncore *uncore,
const i915_reg_t reg,
const bool read,
const bool before)
@@ -1104,12 +1045,12 @@ unclaimed_reg_debug(struct drm_i915_private *dev_priv,
if (likely(!i915_modparams.mmio_debug))
return;
- __unclaimed_reg_debug(dev_priv, reg, read, before);
+ __unclaimed_reg_debug(uncore, reg, read, before);
}
#define GEN2_READ_HEADER(x) \
u##x val = 0; \
- assert_rpm_wakelock_held(dev_priv);
+ __assert_rpm_wakelock_held(uncore->rpm);
#define GEN2_READ_FOOTER \
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
@@ -1117,18 +1058,18 @@ unclaimed_reg_debug(struct drm_i915_private *dev_priv,
#define __gen2_read(x) \
static u##x \
-gen2_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
+gen2_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
GEN2_READ_HEADER(x); \
- val = __raw_i915_read##x(dev_priv, reg); \
+ val = __raw_uncore_read##x(uncore, reg); \
GEN2_READ_FOOTER; \
}
#define __gen5_read(x) \
static u##x \
-gen5_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
+gen5_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
GEN2_READ_HEADER(x); \
- ilk_dummy_write(dev_priv); \
- val = __raw_i915_read##x(dev_priv, reg); \
+ ilk_dummy_write(uncore); \
+ val = __raw_uncore_read##x(uncore, reg); \
GEN2_READ_FOOTER; \
}
@@ -1151,53 +1092,53 @@ __gen2_read(64)
u32 offset = i915_mmio_reg_offset(reg); \
unsigned long irqflags; \
u##x val = 0; \
- assert_rpm_wakelock_held(dev_priv); \
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
- unclaimed_reg_debug(dev_priv, reg, true, true)
+ __assert_rpm_wakelock_held(uncore->rpm); \
+ spin_lock_irqsave(&uncore->lock, irqflags); \
+ unclaimed_reg_debug(uncore, reg, true, true)
#define GEN6_READ_FOOTER \
- unclaimed_reg_debug(dev_priv, reg, true, false); \
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+ unclaimed_reg_debug(uncore, reg, true, false); \
+ spin_unlock_irqrestore(&uncore->lock, irqflags); \
trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
return val
-static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
+static noinline void ___force_wake_auto(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *domain;
unsigned int tmp;
- GEM_BUG_ON(fw_domains & ~dev_priv->uncore.fw_domains);
+ GEM_BUG_ON(fw_domains & ~uncore->fw_domains);
- for_each_fw_domain_masked(domain, fw_domains, dev_priv, tmp)
+ for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
fw_domain_arm_timer(domain);
- dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
+ uncore->funcs.force_wake_get(uncore, fw_domains);
}
-static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
+static inline void __force_wake_auto(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
if (WARN_ON(!fw_domains))
return;
/* Turn on all requested but inactive supported forcewake domains. */
- fw_domains &= dev_priv->uncore.fw_domains;
- fw_domains &= ~dev_priv->uncore.fw_domains_active;
+ fw_domains &= uncore->fw_domains;
+ fw_domains &= ~uncore->fw_domains_active;
if (fw_domains)
- ___force_wake_auto(dev_priv, fw_domains);
+ ___force_wake_auto(uncore, fw_domains);
}
#define __gen_read(func, x) \
static u##x \
-func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
+func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
- fw_engine = __##func##_reg_read_fw_domains(offset); \
+ fw_engine = __##func##_reg_read_fw_domains(uncore, offset); \
if (fw_engine) \
- __force_wake_auto(dev_priv, fw_engine); \
- val = __raw_i915_read##x(dev_priv, reg); \
+ __force_wake_auto(uncore, fw_engine); \
+ val = __raw_uncore_read##x(uncore, reg); \
GEN6_READ_FOOTER; \
}
#define __gen6_read(x) __gen_read(gen6, x)
@@ -1225,24 +1166,24 @@ __gen6_read(64)
#define GEN2_WRITE_HEADER \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
- assert_rpm_wakelock_held(dev_priv); \
+ __assert_rpm_wakelock_held(uncore->rpm); \
#define GEN2_WRITE_FOOTER
#define __gen2_write(x) \
static void \
-gen2_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
+gen2_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
GEN2_WRITE_HEADER; \
- __raw_i915_write##x(dev_priv, reg, val); \
+ __raw_uncore_write##x(uncore, reg, val); \
GEN2_WRITE_FOOTER; \
}
#define __gen5_write(x) \
static void \
-gen5_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
+gen5_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
GEN2_WRITE_HEADER; \
- ilk_dummy_write(dev_priv); \
- __raw_i915_write##x(dev_priv, reg, val); \
+ ilk_dummy_write(uncore); \
+ __raw_uncore_write##x(uncore, reg, val); \
GEN2_WRITE_FOOTER; \
}
@@ -1263,33 +1204,33 @@ __gen2_write(32)
u32 offset = i915_mmio_reg_offset(reg); \
unsigned long irqflags; \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
- assert_rpm_wakelock_held(dev_priv); \
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
- unclaimed_reg_debug(dev_priv, reg, false, true)
+ __assert_rpm_wakelock_held(uncore->rpm); \
+ spin_lock_irqsave(&uncore->lock, irqflags); \
+ unclaimed_reg_debug(uncore, reg, false, true)
#define GEN6_WRITE_FOOTER \
- unclaimed_reg_debug(dev_priv, reg, false, false); \
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
+ unclaimed_reg_debug(uncore, reg, false, false); \
+ spin_unlock_irqrestore(&uncore->lock, irqflags)
#define __gen6_write(x) \
static void \
-gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
+gen6_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
GEN6_WRITE_HEADER; \
if (NEEDS_FORCE_WAKE(offset)) \
- __gen6_gt_wait_for_fifo(dev_priv); \
- __raw_i915_write##x(dev_priv, reg, val); \
+ __gen6_gt_wait_for_fifo(uncore); \
+ __raw_uncore_write##x(uncore, reg, val); \
GEN6_WRITE_FOOTER; \
}
#define __gen_write(func, x) \
static void \
-func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
+func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_WRITE_HEADER; \
- fw_engine = __##func##_reg_write_fw_domains(offset); \
+ fw_engine = __##func##_reg_write_fw_domains(uncore, offset); \
if (fw_engine) \
- __force_wake_auto(dev_priv, fw_engine); \
- __raw_i915_write##x(dev_priv, reg, val); \
+ __force_wake_auto(uncore, fw_engine); \
+ __raw_uncore_write##x(uncore, reg, val); \
GEN6_WRITE_FOOTER; \
}
#define __gen8_write(x) __gen_write(gen8, x)
@@ -1316,23 +1257,23 @@ __gen6_write(32)
#undef GEN6_WRITE_FOOTER
#undef GEN6_WRITE_HEADER
-#define ASSIGN_WRITE_MMIO_VFUNCS(i915, x) \
+#define ASSIGN_WRITE_MMIO_VFUNCS(uncore, x) \
do { \
- (i915)->uncore.funcs.mmio_writeb = x##_write8; \
- (i915)->uncore.funcs.mmio_writew = x##_write16; \
- (i915)->uncore.funcs.mmio_writel = x##_write32; \
+ (uncore)->funcs.mmio_writeb = x##_write8; \
+ (uncore)->funcs.mmio_writew = x##_write16; \
+ (uncore)->funcs.mmio_writel = x##_write32; \
} while (0)
-#define ASSIGN_READ_MMIO_VFUNCS(i915, x) \
+#define ASSIGN_READ_MMIO_VFUNCS(uncore, x) \
do { \
- (i915)->uncore.funcs.mmio_readb = x##_read8; \
- (i915)->uncore.funcs.mmio_readw = x##_read16; \
- (i915)->uncore.funcs.mmio_readl = x##_read32; \
- (i915)->uncore.funcs.mmio_readq = x##_read64; \
+ (uncore)->funcs.mmio_readb = x##_read8; \
+ (uncore)->funcs.mmio_readw = x##_read16; \
+ (uncore)->funcs.mmio_readl = x##_read32; \
+ (uncore)->funcs.mmio_readq = x##_read64; \
} while (0)
-static void fw_domain_init(struct drm_i915_private *dev_priv,
+static void fw_domain_init(struct intel_uncore *uncore,
enum forcewake_domain_id domain_id,
i915_reg_t reg_set,
i915_reg_t reg_ack)
@@ -1342,7 +1283,7 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
return;
- d = &dev_priv->uncore.fw_domain[domain_id];
+ d = &uncore->fw_domain[domain_id];
WARN_ON(d->wake_count);
@@ -1350,8 +1291,8 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
WARN_ON(!i915_mmio_reg_valid(reg_ack));
d->wake_count = 0;
- d->reg_set = reg_set;
- d->reg_ack = reg_ack;
+ d->reg_set = uncore->regs + i915_mmio_reg_offset(reg_set);
+ d->reg_ack = uncore->regs + i915_mmio_reg_offset(reg_ack);
d->id = domain_id;
@@ -1371,12 +1312,12 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
hrtimer_init(&d->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
d->timer.function = intel_uncore_fw_release_timer;
- dev_priv->uncore.fw_domains |= BIT(domain_id);
+ uncore->fw_domains |= BIT(domain_id);
- fw_domain_reset(dev_priv, d);
+ fw_domain_reset(d);
}
-static void fw_domain_fini(struct drm_i915_private *dev_priv,
+static void fw_domain_fini(struct intel_uncore *uncore,
enum forcewake_domain_id domain_id)
{
struct intel_uncore_forcewake_domain *d;
@@ -1384,85 +1325,76 @@ static void fw_domain_fini(struct drm_i915_private *dev_priv,
if (WARN_ON(domain_id >= FW_DOMAIN_ID_COUNT))
return;
- d = &dev_priv->uncore.fw_domain[domain_id];
+ d = &uncore->fw_domain[domain_id];
WARN_ON(d->wake_count);
WARN_ON(hrtimer_cancel(&d->timer));
memset(d, 0, sizeof(*d));
- dev_priv->uncore.fw_domains &= ~BIT(domain_id);
+ uncore->fw_domains &= ~BIT(domain_id);
}
-static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
+static void intel_uncore_fw_domains_init(struct intel_uncore *uncore)
{
- if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
- return;
+ struct drm_i915_private *i915 = uncore_to_i915(uncore);
- if (IS_GEN(dev_priv, 6)) {
- dev_priv->uncore.fw_reset = 0;
- dev_priv->uncore.fw_set = FORCEWAKE_KERNEL;
- dev_priv->uncore.fw_clear = 0;
- } else {
- /* WaRsClearFWBitsAtReset:bdw,skl */
- dev_priv->uncore.fw_reset = _MASKED_BIT_DISABLE(0xffff);
- dev_priv->uncore.fw_set = _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL);
- dev_priv->uncore.fw_clear = _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL);
- }
+ if (!intel_uncore_has_forcewake(uncore))
+ return;
- if (INTEL_GEN(dev_priv) >= 11) {
+ if (INTEL_GEN(i915) >= 11) {
int i;
- dev_priv->uncore.funcs.force_wake_get =
+ uncore->funcs.force_wake_get =
fw_domains_get_with_fallback;
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
- fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ uncore->funcs.force_wake_put = fw_domains_put;
+ fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE_RENDER_GEN9,
FORCEWAKE_ACK_RENDER_GEN9);
- fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
+ fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
FORCEWAKE_BLITTER_GEN9,
FORCEWAKE_ACK_BLITTER_GEN9);
for (i = 0; i < I915_MAX_VCS; i++) {
- if (!HAS_ENGINE(dev_priv, _VCS(i)))
+ if (!HAS_ENGINE(i915, _VCS(i)))
continue;
- fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
+ fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VDBOX0 + i,
FORCEWAKE_MEDIA_VDBOX_GEN11(i),
FORCEWAKE_ACK_MEDIA_VDBOX_GEN11(i));
}
for (i = 0; i < I915_MAX_VECS; i++) {
- if (!HAS_ENGINE(dev_priv, _VECS(i)))
+ if (!HAS_ENGINE(i915, _VECS(i)))
continue;
- fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
+ fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA_VEBOX0 + i,
FORCEWAKE_MEDIA_VEBOX_GEN11(i),
FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
}
- } else if (IS_GEN_RANGE(dev_priv, 9, 10)) {
- dev_priv->uncore.funcs.force_wake_get =
+ } else if (IS_GEN_RANGE(i915, 9, 10)) {
+ uncore->funcs.force_wake_get =
fw_domains_get_with_fallback;
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
- fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ uncore->funcs.force_wake_put = fw_domains_put;
+ fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE_RENDER_GEN9,
FORCEWAKE_ACK_RENDER_GEN9);
- fw_domain_init(dev_priv, FW_DOMAIN_ID_BLITTER,
+ fw_domain_init(uncore, FW_DOMAIN_ID_BLITTER,
FORCEWAKE_BLITTER_GEN9,
FORCEWAKE_ACK_BLITTER_GEN9);
- fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
+ fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
- fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ uncore->funcs.force_wake_get = fw_domains_get;
+ uncore->funcs.force_wake_put = fw_domains_put;
+ fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
- fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
+ fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
- } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- dev_priv->uncore.funcs.force_wake_get =
+ } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+ uncore->funcs.force_wake_get =
fw_domains_get_with_thread_status;
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
- fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ uncore->funcs.force_wake_put = fw_domains_put;
+ fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
- } else if (IS_IVYBRIDGE(dev_priv)) {
+ } else if (IS_IVYBRIDGE(i915)) {
u32 ecobus;
/* IVB configs may use multi-threaded forcewake */
@@ -1474,9 +1406,9 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
* (correctly) interpreted by the test below as MT
* forcewake being disabled.
*/
- dev_priv->uncore.funcs.force_wake_get =
+ uncore->funcs.force_wake_get =
fw_domains_get_with_thread_status;
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
+ uncore->funcs.force_wake_put = fw_domains_put;
/* We need to init first for ECOBUS access and then
* determine later if we want to reinit, in case of MT access is
@@ -1485,41 +1417,41 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
* before the ecobus check.
*/
- __raw_i915_write32(dev_priv, FORCEWAKE, 0);
- __raw_posting_read(dev_priv, ECOBUS);
+ __raw_uncore_write32(uncore, FORCEWAKE, 0);
+ __raw_posting_read(uncore, ECOBUS);
- fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE_MT, FORCEWAKE_MT_ACK);
- spin_lock_irq(&dev_priv->uncore.lock);
- fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_RENDER);
- ecobus = __raw_i915_read32(dev_priv, ECOBUS);
- fw_domains_put(dev_priv, FORCEWAKE_RENDER);
- spin_unlock_irq(&dev_priv->uncore.lock);
+ spin_lock_irq(&uncore->lock);
+ fw_domains_get_with_thread_status(uncore, FORCEWAKE_RENDER);
+ ecobus = __raw_uncore_read32(uncore, ECOBUS);
+ fw_domains_put(uncore, FORCEWAKE_RENDER);
+ spin_unlock_irq(&uncore->lock);
if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
DRM_INFO("when using vblank-synced partial screen updates.\n");
- fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE, FORCEWAKE_ACK);
}
- } else if (IS_GEN(dev_priv, 6)) {
- dev_priv->uncore.funcs.force_wake_get =
+ } else if (IS_GEN(i915, 6)) {
+ uncore->funcs.force_wake_get =
fw_domains_get_with_thread_status;
- dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
- fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
+ uncore->funcs.force_wake_put = fw_domains_put;
+ fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE, FORCEWAKE_ACK);
}
/* All future platforms are expected to require complex power gating */
- WARN_ON(dev_priv->uncore.fw_domains == 0);
+ WARN_ON(uncore->fw_domains == 0);
}
-#define ASSIGN_FW_DOMAINS_TABLE(d) \
+#define ASSIGN_FW_DOMAINS_TABLE(uncore, d) \
{ \
- dev_priv->uncore.fw_domains_table = \
+ (uncore)->fw_domains_table = \
(struct intel_forcewake_range *)(d); \
- dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
+ (uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
}
static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
@@ -1544,66 +1476,132 @@ static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
* the access.
*/
disable_rpm_wakeref_asserts(dev_priv);
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
enable_rpm_wakeref_asserts(dev_priv);
break;
case MBI_PMIC_BUS_ACCESS_END:
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
break;
}
return NOTIFY_OK;
}
-void intel_uncore_init(struct drm_i915_private *dev_priv)
+static int uncore_mmio_setup(struct intel_uncore *uncore)
{
- i915_check_vgpu(dev_priv);
+ struct drm_i915_private *i915 = uncore_to_i915(uncore);
+ struct pci_dev *pdev = i915->drm.pdev;
+ int mmio_bar;
+ int mmio_size;
- intel_uncore_edram_detect(dev_priv);
- intel_uncore_fw_domains_init(dev_priv);
- __intel_uncore_early_sanitize(dev_priv, 0);
+ mmio_bar = IS_GEN(i915, 2) ? 1 : 0;
+ /*
+ * Before gen4, the registers and the GTT are behind different BARs.
+ * However, from gen4 onwards, the registers and the GTT are shared
+ * in the same BAR, so we want to restrict this ioremap from
+ * clobbering the GTT which we want ioremap_wc instead. Fortunately,
+ * the register BAR remains the same size for all the earlier
+ * generations up to Ironlake.
+ */
+ if (INTEL_GEN(i915) < 5)
+ mmio_size = 512 * 1024;
+ else
+ mmio_size = 2 * 1024 * 1024;
+ uncore->regs = pci_iomap(pdev, mmio_bar, mmio_size);
+ if (uncore->regs == NULL) {
+ DRM_ERROR("failed to map registers\n");
+
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void uncore_mmio_cleanup(struct intel_uncore *uncore)
+{
+ struct drm_i915_private *i915 = uncore_to_i915(uncore);
+ struct pci_dev *pdev = i915->drm.pdev;
+
+ pci_iounmap(pdev, uncore->regs);
+}
+
+void intel_uncore_init_early(struct intel_uncore *uncore)
+{
+ spin_lock_init(&uncore->lock);
+}
+
+int intel_uncore_init_mmio(struct intel_uncore *uncore)
+{
+ struct drm_i915_private *i915 = uncore_to_i915(uncore);
+ int ret;
+
+ ret = uncore_mmio_setup(uncore);
+ if (ret)
+ return ret;
+
+ i915_check_vgpu(i915);
+
+ if (INTEL_GEN(i915) > 5 && !intel_vgpu_active(i915))
+ uncore->flags |= UNCORE_HAS_FORCEWAKE;
- dev_priv->uncore.unclaimed_mmio_check = 1;
- dev_priv->uncore.pmic_bus_access_nb.notifier_call =
+ intel_uncore_fw_domains_init(uncore);
+ __intel_uncore_early_sanitize(uncore, 0);
+
+ uncore->unclaimed_mmio_check = 1;
+ uncore->pmic_bus_access_nb.notifier_call =
i915_pmic_bus_access_notifier;
- if (IS_GEN_RANGE(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) {
- ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen2);
- ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen2);
- } else if (IS_GEN(dev_priv, 5)) {
- ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen5);
- ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen5);
- } else if (IS_GEN_RANGE(dev_priv, 6, 7)) {
- ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen6);
-
- if (IS_VALLEYVIEW(dev_priv)) {
- ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
- ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
+ uncore->rpm = &i915->runtime_pm;
+
+ if (!intel_uncore_has_forcewake(uncore)) {
+ if (IS_GEN(i915, 5)) {
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen5);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, gen5);
+ } else {
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen2);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, gen2);
+ }
+ } else if (IS_GEN_RANGE(i915, 6, 7)) {
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
+
+ if (IS_VALLEYVIEW(i915)) {
+ ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
} else {
- ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
}
- } else if (IS_GEN(dev_priv, 8)) {
- if (IS_CHERRYVIEW(dev_priv)) {
- ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
- ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
- ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
+ } else if (IS_GEN(i915, 8)) {
+ if (IS_CHERRYVIEW(i915)) {
+ ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
} else {
- ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen8);
- ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen6);
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
}
- } else if (IS_GEN_RANGE(dev_priv, 9, 10)) {
- ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
- ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, fwtable);
- ASSIGN_READ_MMIO_VFUNCS(dev_priv, fwtable);
+ } else if (IS_GEN_RANGE(i915, 9, 10)) {
+ ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
} else {
- ASSIGN_FW_DOMAINS_TABLE(__gen11_fw_ranges);
- ASSIGN_WRITE_MMIO_VFUNCS(dev_priv, gen11_fwtable);
- ASSIGN_READ_MMIO_VFUNCS(dev_priv, gen11_fwtable);
+ ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
}
- iosf_mbi_register_pmic_bus_access_notifier(
- &dev_priv->uncore.pmic_bus_access_nb);
+ if (HAS_FPGA_DBG_UNCLAIMED(i915))
+ uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED;
+
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
+ uncore->flags |= UNCORE_HAS_DBG_UNCLAIMED;
+
+ if (IS_GEN_RANGE(i915, 6, 7))
+ uncore->flags |= UNCORE_HAS_FIFO;
+
+ iosf_mbi_register_pmic_bus_access_notifier(&uncore->pmic_bus_access_nb);
+
+ return 0;
}
/*
@@ -1611,45 +1609,48 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
* the forcewake domains. Prune them, to make sure they only reference existing
* engines.
*/
-void intel_uncore_prune(struct drm_i915_private *dev_priv)
+void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore)
{
- if (INTEL_GEN(dev_priv) >= 11) {
- enum forcewake_domains fw_domains = dev_priv->uncore.fw_domains;
+ struct drm_i915_private *i915 = uncore_to_i915(uncore);
+
+ if (INTEL_GEN(i915) >= 11) {
+ enum forcewake_domains fw_domains = uncore->fw_domains;
enum forcewake_domain_id domain_id;
int i;
for (i = 0; i < I915_MAX_VCS; i++) {
domain_id = FW_DOMAIN_ID_MEDIA_VDBOX0 + i;
- if (HAS_ENGINE(dev_priv, _VCS(i)))
+ if (HAS_ENGINE(i915, _VCS(i)))
continue;
if (fw_domains & BIT(domain_id))
- fw_domain_fini(dev_priv, domain_id);
+ fw_domain_fini(uncore, domain_id);
}
for (i = 0; i < I915_MAX_VECS; i++) {
domain_id = FW_DOMAIN_ID_MEDIA_VEBOX0 + i;
- if (HAS_ENGINE(dev_priv, _VECS(i)))
+ if (HAS_ENGINE(i915, _VECS(i)))
continue;
if (fw_domains & BIT(domain_id))
- fw_domain_fini(dev_priv, domain_id);
+ fw_domain_fini(uncore, domain_id);
}
}
}
-void intel_uncore_fini(struct drm_i915_private *dev_priv)
+void intel_uncore_fini_mmio(struct intel_uncore *uncore)
{
/* Paranoia: make sure we have disabled everything before we exit. */
- intel_uncore_sanitize(dev_priv);
+ intel_uncore_sanitize(uncore_to_i915(uncore));
iosf_mbi_punit_acquire();
iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
- &dev_priv->uncore.pmic_bus_access_nb);
- intel_uncore_forcewake_reset(dev_priv);
+ &uncore->pmic_bus_access_nb);
+ intel_uncore_forcewake_reset(uncore);
iosf_mbi_punit_release();
+ uncore_mmio_cleanup(uncore);
}
static const struct reg_whitelist {
@@ -1717,7 +1718,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
/**
* __intel_wait_for_register_fw - wait until register matches expected state
- * @dev_priv: the i915 device
+ * @uncore: the struct intel_uncore
* @reg: the register to read
* @mask: mask to apply to register value
* @value: expected value
@@ -1741,7 +1742,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
*
* Returns 0 if the register matches the desired condition, or -ETIMEOUT.
*/
-int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
+int __intel_wait_for_register_fw(struct intel_uncore *uncore,
i915_reg_t reg,
u32 mask,
u32 value,
@@ -1750,7 +1751,7 @@ int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
u32 *out_value)
{
u32 uninitialized_var(reg_value);
-#define done (((reg_value = I915_READ_FW(reg)) & mask) == value)
+#define done (((reg_value = intel_uncore_read_fw(uncore, reg)) & mask) == value)
int ret;
/* Catch any overuse of this function */
@@ -1772,7 +1773,7 @@ int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
/**
* __intel_wait_for_register - wait until register matches expected state
- * @dev_priv: the i915 device
+ * @uncore: the struct intel_uncore
* @reg: the register to read
* @mask: mask to apply to register value
* @value: expected value
@@ -1789,33 +1790,34 @@ int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
*
* Returns 0 if the register matches the desired condition, or -ETIMEOUT.
*/
-int __intel_wait_for_register(struct drm_i915_private *dev_priv,
- i915_reg_t reg,
- u32 mask,
- u32 value,
- unsigned int fast_timeout_us,
- unsigned int slow_timeout_ms,
- u32 *out_value)
+int __intel_wait_for_register(struct intel_uncore *uncore,
+ i915_reg_t reg,
+ u32 mask,
+ u32 value,
+ unsigned int fast_timeout_us,
+ unsigned int slow_timeout_ms,
+ u32 *out_value)
{
unsigned fw =
- intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
+ intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
u32 reg_value;
int ret;
might_sleep_if(slow_timeout_ms);
- spin_lock_irq(&dev_priv->uncore.lock);
- intel_uncore_forcewake_get__locked(dev_priv, fw);
+ spin_lock_irq(&uncore->lock);
+ intel_uncore_forcewake_get__locked(uncore, fw);
- ret = __intel_wait_for_register_fw(dev_priv,
+ ret = __intel_wait_for_register_fw(uncore,
reg, mask, value,
fast_timeout_us, 0, &reg_value);
- intel_uncore_forcewake_put__locked(dev_priv, fw);
- spin_unlock_irq(&dev_priv->uncore.lock);
+ intel_uncore_forcewake_put__locked(uncore, fw);
+ spin_unlock_irq(&uncore->lock);
if (ret && slow_timeout_ms)
- ret = __wait_for(reg_value = I915_READ_NOTRACE(reg),
+ ret = __wait_for(reg_value = intel_uncore_read_notrace(uncore,
+ reg),
(reg_value & mask) == value,
slow_timeout_ms * 1000, 10, 1000);
@@ -1828,82 +1830,90 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv,
return ret;
}
-bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv)
+bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore)
{
- return check_for_unclaimed_mmio(dev_priv);
+ return check_for_unclaimed_mmio(uncore);
}
bool
-intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv)
+intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
{
bool ret = false;
- spin_lock_irq(&dev_priv->uncore.lock);
+ spin_lock_irq(&uncore->lock);
- if (unlikely(dev_priv->uncore.unclaimed_mmio_check <= 0))
+ if (unlikely(uncore->unclaimed_mmio_check <= 0))
goto out;
- if (unlikely(intel_uncore_unclaimed_mmio(dev_priv))) {
+ if (unlikely(intel_uncore_unclaimed_mmio(uncore))) {
if (!i915_modparams.mmio_debug) {
DRM_DEBUG("Unclaimed register detected, "
"enabling oneshot unclaimed register reporting. "
"Please use i915.mmio_debug=N for more information.\n");
i915_modparams.mmio_debug++;
}
- dev_priv->uncore.unclaimed_mmio_check--;
+ uncore->unclaimed_mmio_check--;
ret = true;
}
out:
- spin_unlock_irq(&dev_priv->uncore.lock);
+ spin_unlock_irq(&uncore->lock);
return ret;
}
static enum forcewake_domains
-intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
+intel_uncore_forcewake_for_read(struct intel_uncore *uncore,
i915_reg_t reg)
{
+ struct drm_i915_private *i915 = uncore_to_i915(uncore);
u32 offset = i915_mmio_reg_offset(reg);
enum forcewake_domains fw_domains;
- if (INTEL_GEN(dev_priv) >= 11) {
- fw_domains = __gen11_fwtable_reg_read_fw_domains(offset);
- } else if (HAS_FWTABLE(dev_priv)) {
- fw_domains = __fwtable_reg_read_fw_domains(offset);
- } else if (INTEL_GEN(dev_priv) >= 6) {
- fw_domains = __gen6_reg_read_fw_domains(offset);
+ if (INTEL_GEN(i915) >= 11) {
+ fw_domains = __gen11_fwtable_reg_read_fw_domains(uncore, offset);
+ } else if (HAS_FWTABLE(i915)) {
+ fw_domains = __fwtable_reg_read_fw_domains(uncore, offset);
+ } else if (INTEL_GEN(i915) >= 6) {
+ fw_domains = __gen6_reg_read_fw_domains(uncore, offset);
} else {
- WARN_ON(!IS_GEN_RANGE(dev_priv, 2, 5));
+ /* on devices with FW we expect to hit one of the above cases */
+ if (intel_uncore_has_forcewake(uncore))
+ MISSING_CASE(INTEL_GEN(i915));
+
fw_domains = 0;
}
- WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
+ WARN_ON(fw_domains & ~uncore->fw_domains);
return fw_domains;
}
static enum forcewake_domains
-intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
+intel_uncore_forcewake_for_write(struct intel_uncore *uncore,
i915_reg_t reg)
{
+ struct drm_i915_private *i915 = uncore_to_i915(uncore);
u32 offset = i915_mmio_reg_offset(reg);
enum forcewake_domains fw_domains;
- if (INTEL_GEN(dev_priv) >= 11) {
- fw_domains = __gen11_fwtable_reg_write_fw_domains(offset);
- } else if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) {
- fw_domains = __fwtable_reg_write_fw_domains(offset);
- } else if (IS_GEN(dev_priv, 8)) {
- fw_domains = __gen8_reg_write_fw_domains(offset);
- } else if (IS_GEN_RANGE(dev_priv, 6, 7)) {
+ if (INTEL_GEN(i915) >= 11) {
+ fw_domains = __gen11_fwtable_reg_write_fw_domains(uncore, offset);
+ } else if (HAS_FWTABLE(i915) && !IS_VALLEYVIEW(i915)) {
+ fw_domains = __fwtable_reg_write_fw_domains(uncore, offset);
+ } else if (IS_GEN(i915, 8)) {
+ fw_domains = __gen8_reg_write_fw_domains(uncore, offset);
+ } else if (IS_GEN_RANGE(i915, 6, 7)) {
fw_domains = FORCEWAKE_RENDER;
} else {
- WARN_ON(!IS_GEN_RANGE(dev_priv, 2, 5));
+ /* on devices with FW we expect to hit one of the above cases */
+ if (intel_uncore_has_forcewake(uncore))
+ MISSING_CASE(INTEL_GEN(i915));
+
fw_domains = 0;
}
- WARN_ON(fw_domains & ~dev_priv->uncore.fw_domains);
+ WARN_ON(fw_domains & ~uncore->fw_domains);
return fw_domains;
}
@@ -1911,7 +1921,7 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
/**
* intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
* a register
- * @dev_priv: pointer to struct drm_i915_private
+ * @uncore: pointer to struct intel_uncore
* @reg: register in question
* @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
*
@@ -1923,21 +1933,21 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
* callers to do FIFO management on their own or risk losing writes.
*/
enum forcewake_domains
-intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
+intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
i915_reg_t reg, unsigned int op)
{
enum forcewake_domains fw_domains = 0;
WARN_ON(!op);
- if (intel_vgpu_active(dev_priv))
+ if (!intel_uncore_has_forcewake(uncore))
return 0;
if (op & FW_REG_READ)
- fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg);
+ fw_domains = intel_uncore_forcewake_for_read(uncore, reg);
if (op & FW_REG_WRITE)
- fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg);
+ fw_domains |= intel_uncore_forcewake_for_write(uncore, reg);
return fw_domains;
}
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index e5e157d288de..d6af3de70121 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -28,10 +28,13 @@
#include <linux/spinlock.h>
#include <linux/notifier.h>
#include <linux/hrtimer.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
#include "i915_reg.h"
struct drm_i915_private;
+struct i915_runtime_pm;
+struct intel_uncore;
enum forcewake_domain_id {
FW_DOMAIN_ID_RENDER = 0,
@@ -62,25 +65,25 @@ enum forcewake_domains {
};
struct intel_uncore_funcs {
- void (*force_wake_get)(struct drm_i915_private *dev_priv,
+ void (*force_wake_get)(struct intel_uncore *uncore,
enum forcewake_domains domains);
- void (*force_wake_put)(struct drm_i915_private *dev_priv,
+ void (*force_wake_put)(struct intel_uncore *uncore,
enum forcewake_domains domains);
- u8 (*mmio_readb)(struct drm_i915_private *dev_priv,
+ u8 (*mmio_readb)(struct intel_uncore *uncore,
i915_reg_t r, bool trace);
- u16 (*mmio_readw)(struct drm_i915_private *dev_priv,
+ u16 (*mmio_readw)(struct intel_uncore *uncore,
i915_reg_t r, bool trace);
- u32 (*mmio_readl)(struct drm_i915_private *dev_priv,
+ u32 (*mmio_readl)(struct intel_uncore *uncore,
i915_reg_t r, bool trace);
- u64 (*mmio_readq)(struct drm_i915_private *dev_priv,
+ u64 (*mmio_readq)(struct intel_uncore *uncore,
i915_reg_t r, bool trace);
- void (*mmio_writeb)(struct drm_i915_private *dev_priv,
+ void (*mmio_writeb)(struct intel_uncore *uncore,
i915_reg_t r, u8 val, bool trace);
- void (*mmio_writew)(struct drm_i915_private *dev_priv,
+ void (*mmio_writew)(struct intel_uncore *uncore,
i915_reg_t r, u16 val, bool trace);
- void (*mmio_writel)(struct drm_i915_private *dev_priv,
+ void (*mmio_writel)(struct intel_uncore *uncore,
i915_reg_t r, u32 val, bool trace);
};
@@ -92,8 +95,18 @@ struct intel_forcewake_range {
};
struct intel_uncore {
+ void __iomem *regs;
+
+ struct i915_runtime_pm *rpm;
+
spinlock_t lock; /** lock is also taken in irq contexts. */
+ unsigned int flags;
+#define UNCORE_HAS_FORCEWAKE BIT(0)
+#define UNCORE_HAS_FPGA_DBG_UNCLAIMED BIT(1)
+#define UNCORE_HAS_DBG_UNCLAIMED BIT(2)
+#define UNCORE_HAS_FIFO BIT(3)
+
const struct intel_forcewake_range *fw_domains_table;
unsigned int fw_domains_table_entries;
@@ -106,18 +119,14 @@ struct intel_uncore {
enum forcewake_domains fw_domains_active;
enum forcewake_domains fw_domains_saved; /* user domains saved for S3 */
- u32 fw_set;
- u32 fw_clear;
- u32 fw_reset;
-
struct intel_uncore_forcewake_domain {
enum forcewake_domain_id id;
enum forcewake_domains mask;
unsigned int wake_count;
bool active;
struct hrtimer timer;
- i915_reg_t reg_set;
- i915_reg_t reg_ack;
+ u32 __iomem *reg_set;
+ u32 __iomem *reg_ack;
} fw_domain[FW_DOMAIN_ID_COUNT];
struct {
@@ -131,86 +140,257 @@ struct intel_uncore {
};
/* Iterate over initialised fw domains */
-#define for_each_fw_domain_masked(domain__, mask__, dev_priv__, tmp__) \
+#define for_each_fw_domain_masked(domain__, mask__, uncore__, tmp__) \
for (tmp__ = (mask__); \
- tmp__ ? (domain__ = &(dev_priv__)->uncore.fw_domain[__mask_next_bit(tmp__)]), 1 : 0;)
+ tmp__ ? (domain__ = &(uncore__)->fw_domain[__mask_next_bit(tmp__)]), 1 : 0;)
+
+#define for_each_fw_domain(domain__, uncore__, tmp__) \
+ for_each_fw_domain_masked(domain__, (uncore__)->fw_domains, uncore__, tmp__)
+
+static inline struct intel_uncore *
+forcewake_domain_to_uncore(const struct intel_uncore_forcewake_domain *d)
+{
+ return container_of(d, struct intel_uncore, fw_domain[d->id]);
+}
-#define for_each_fw_domain(domain__, dev_priv__, tmp__) \
- for_each_fw_domain_masked(domain__, (dev_priv__)->uncore.fw_domains, dev_priv__, tmp__)
+static inline bool
+intel_uncore_has_forcewake(const struct intel_uncore *uncore)
+{
+ return uncore->flags & UNCORE_HAS_FORCEWAKE;
+}
+static inline bool
+intel_uncore_has_fpga_dbg_unclaimed(const struct intel_uncore *uncore)
+{
+ return uncore->flags & UNCORE_HAS_FPGA_DBG_UNCLAIMED;
+}
+
+static inline bool
+intel_uncore_has_dbg_unclaimed(const struct intel_uncore *uncore)
+{
+ return uncore->flags & UNCORE_HAS_DBG_UNCLAIMED;
+}
+
+static inline bool
+intel_uncore_has_fifo(const struct intel_uncore *uncore)
+{
+ return uncore->flags & UNCORE_HAS_FIFO;
+}
void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
-void intel_uncore_init(struct drm_i915_private *dev_priv);
-void intel_uncore_prune(struct drm_i915_private *dev_priv);
-bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
-bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
-void intel_uncore_fini(struct drm_i915_private *dev_priv);
-void intel_uncore_suspend(struct drm_i915_private *dev_priv);
-void intel_uncore_resume_early(struct drm_i915_private *dev_priv);
-void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv);
-
-u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
-void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
-void assert_forcewakes_active(struct drm_i915_private *dev_priv,
+void intel_uncore_init_early(struct intel_uncore *uncore);
+int intel_uncore_init_mmio(struct intel_uncore *uncore);
+void intel_uncore_prune_mmio_domains(struct intel_uncore *uncore);
+bool intel_uncore_unclaimed_mmio(struct intel_uncore *uncore);
+bool intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore);
+void intel_uncore_fini_mmio(struct intel_uncore *uncore);
+void intel_uncore_suspend(struct intel_uncore *uncore);
+void intel_uncore_resume_early(struct intel_uncore *uncore);
+void intel_uncore_runtime_resume(struct intel_uncore *uncore);
+
+void assert_forcewakes_inactive(struct intel_uncore *uncore);
+void assert_forcewakes_active(struct intel_uncore *uncore,
enum forcewake_domains fw_domains);
const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
enum forcewake_domains
-intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
+intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
i915_reg_t reg, unsigned int op);
#define FW_REG_READ (1)
#define FW_REG_WRITE (2)
-void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
+void intel_uncore_forcewake_get(struct intel_uncore *uncore,
enum forcewake_domains domains);
-void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
+void intel_uncore_forcewake_put(struct intel_uncore *uncore,
enum forcewake_domains domains);
/* Like above but the caller must manage the uncore.lock itself.
* Must be used with I915_READ_FW and friends.
*/
-void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
+void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
enum forcewake_domains domains);
-void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
+void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
enum forcewake_domains domains);
-void intel_uncore_forcewake_user_get(struct drm_i915_private *dev_priv);
-void intel_uncore_forcewake_user_put(struct drm_i915_private *dev_priv);
+void intel_uncore_forcewake_user_get(struct intel_uncore *uncore);
+void intel_uncore_forcewake_user_put(struct intel_uncore *uncore);
-int __intel_wait_for_register(struct drm_i915_private *dev_priv,
+int __intel_wait_for_register(struct intel_uncore *uncore,
i915_reg_t reg,
u32 mask,
u32 value,
unsigned int fast_timeout_us,
unsigned int slow_timeout_ms,
u32 *out_value);
-static inline
-int intel_wait_for_register(struct drm_i915_private *dev_priv,
- i915_reg_t reg,
- u32 mask,
- u32 value,
- unsigned int timeout_ms)
+static inline int
+intel_wait_for_register(struct intel_uncore *uncore,
+ i915_reg_t reg,
+ u32 mask,
+ u32 value,
+ unsigned int timeout_ms)
{
- return __intel_wait_for_register(dev_priv, reg, mask, value, 2,
+ return __intel_wait_for_register(uncore, reg, mask, value, 2,
timeout_ms, NULL);
}
-int __intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
+
+int __intel_wait_for_register_fw(struct intel_uncore *uncore,
i915_reg_t reg,
u32 mask,
u32 value,
unsigned int fast_timeout_us,
unsigned int slow_timeout_ms,
u32 *out_value);
-static inline
-int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
- i915_reg_t reg,
- u32 mask,
- u32 value,
+static inline int
+intel_wait_for_register_fw(struct intel_uncore *uncore,
+ i915_reg_t reg,
+ u32 mask,
+ u32 value,
unsigned int timeout_ms)
{
- return __intel_wait_for_register_fw(dev_priv, reg, mask, value,
+ return __intel_wait_for_register_fw(uncore, reg, mask, value,
2, timeout_ms, NULL);
}
+/* register access functions */
+#define __raw_read(x__, s__) \
+static inline u##x__ __raw_uncore_read##x__(const struct intel_uncore *uncore, \
+ i915_reg_t reg) \
+{ \
+ return read##s__(uncore->regs + i915_mmio_reg_offset(reg)); \
+}
+
+#define __raw_write(x__, s__) \
+static inline void __raw_uncore_write##x__(const struct intel_uncore *uncore, \
+ i915_reg_t reg, u##x__ val) \
+{ \
+ write##s__(val, uncore->regs + i915_mmio_reg_offset(reg)); \
+}
+__raw_read(8, b)
+__raw_read(16, w)
+__raw_read(32, l)
+__raw_read(64, q)
+
+__raw_write(8, b)
+__raw_write(16, w)
+__raw_write(32, l)
+__raw_write(64, q)
+
+#undef __raw_read
+#undef __raw_write
+
+#define __uncore_read(name__, x__, s__, trace__) \
+static inline u##x__ intel_uncore_##name__(struct intel_uncore *uncore, \
+ i915_reg_t reg) \
+{ \
+ return uncore->funcs.mmio_read##s__(uncore, reg, (trace__)); \
+}
+
+#define __uncore_write(name__, x__, s__, trace__) \
+static inline void intel_uncore_##name__(struct intel_uncore *uncore, \
+ i915_reg_t reg, u##x__ val) \
+{ \
+ uncore->funcs.mmio_write##s__(uncore, reg, val, (trace__)); \
+}
+
+__uncore_read(read8, 8, b, true)
+__uncore_read(read16, 16, w, true)
+__uncore_read(read, 32, l, true)
+__uncore_read(read16_notrace, 16, w, false)
+__uncore_read(read_notrace, 32, l, false)
+
+__uncore_write(write8, 8, b, true)
+__uncore_write(write16, 16, w, true)
+__uncore_write(write, 32, l, true)
+__uncore_write(write_notrace, 32, l, false)
+
+/* Be very careful with read/write 64-bit values. On 32-bit machines, they
+ * will be implemented using 2 32-bit writes in an arbitrary order with
+ * an arbitrary delay between them. This can cause the hardware to
+ * act upon the intermediate value, possibly leading to corruption and
+ * machine death. For this reason we do not support I915_WRITE64, or
+ * uncore->funcs.mmio_writeq.
+ *
+ * When reading a 64-bit value as two 32-bit values, the delay may cause
+ * the two reads to mismatch, e.g. a timestamp overflowing. Also note that
+ * occasionally a 64-bit register does not actually support a full readq
+ * and must be read using two 32-bit reads.
+ *
+ * You have been warned.
+ */
+__uncore_read(read64, 64, q, true)
+
+static inline u64
+intel_uncore_read64_2x32(struct intel_uncore *uncore,
+ i915_reg_t lower_reg, i915_reg_t upper_reg)
+{
+ u32 upper, lower, old_upper, loop = 0;
+ upper = intel_uncore_read(uncore, upper_reg);
+ do {
+ old_upper = upper;
+ lower = intel_uncore_read(uncore, lower_reg);
+ upper = intel_uncore_read(uncore, upper_reg);
+ } while (upper != old_upper && loop++ < 2);
+ return (u64)upper << 32 | lower;
+}
+
+#define intel_uncore_posting_read(...) ((void)intel_uncore_read_notrace(__VA_ARGS__))
+#define intel_uncore_posting_read16(...) ((void)intel_uncore_read16_notrace(__VA_ARGS__))
+
+#undef __uncore_read
+#undef __uncore_write
+
+/* These are untraced mmio-accessors that are only valid to be used inside
+ * critical sections, such as inside IRQ handlers, where forcewake is explicitly
+ * controlled.
+ *
+ * Think twice, and think again, before using these.
+ *
+ * As an example, these accessors can possibly be used between:
+ *
+ * spin_lock_irq(&uncore->lock);
+ * intel_uncore_forcewake_get__locked();
+ *
+ * and
+ *
+ * intel_uncore_forcewake_put__locked();
+ * spin_unlock_irq(&uncore->lock);
+ *
+ *
+ * Note: some registers may not need forcewake held, so
+ * intel_uncore_forcewake_{get,put} can be omitted, see
+ * intel_uncore_forcewake_for_reg().
+ *
+ * Certain architectures will die if the same cacheline is concurrently accessed
+ * by different clients (e.g. on Ivybridge). Access to registers should
+ * therefore generally be serialised, by either the dev_priv->uncore.lock or
+ * a more localised lock guarding all access to that bank of registers.
+ */
+#define intel_uncore_read_fw(...) __raw_uncore_read32(__VA_ARGS__)
+#define intel_uncore_write_fw(...) __raw_uncore_write32(__VA_ARGS__)
+#define intel_uncore_write64_fw(...) __raw_uncore_write64(__VA_ARGS__)
+#define intel_uncore_posting_read_fw(...) ((void)intel_uncore_read_fw(__VA_ARGS__))
+
+static inline void intel_uncore_rmw(struct intel_uncore *uncore,
+ i915_reg_t reg, u32 clear, u32 set)
+{
+ u32 val;
+
+ val = intel_uncore_read(uncore, reg);
+ val &= ~clear;
+ val |= set;
+ intel_uncore_write(uncore, reg, val);
+}
+
+static inline void intel_uncore_rmw_fw(struct intel_uncore *uncore,
+ i915_reg_t reg, u32 clear, u32 set)
+{
+ u32 val;
+
+ val = intel_uncore_read_fw(uncore, reg);
+ val &= ~clear;
+ val |= set;
+ intel_uncore_write_fw(uncore, reg, val);
+}
+
#define raw_reg_read(base, reg) \
readl(base + i915_mmio_reg_offset(reg))
#define raw_reg_write(base, reg, value) \
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index bf3662ad5fed..fdbbb9a53804 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -772,6 +772,9 @@ struct psr_table {
/* TP wake up time in multiple of 100 */
u16 tp1_wakeup_time;
u16 tp2_tp3_wakeup_time;
+
+ /* PSR2 TP2/TP3 wakeup time for 16 panels */
+ u32 psr2_tp2_tp3_wakeup_time;
} __packed;
struct bdb_psr {
diff --git a/drivers/gpu/drm/i915/intel_vdsc.c b/drivers/gpu/drm/i915/intel_vdsc.c
index 23abf03736e7..3f9921ba4a76 100644
--- a/drivers/gpu/drm/i915/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/intel_vdsc.c
@@ -317,129 +317,6 @@ static int get_column_index_for_rc_params(u8 bits_per_component)
}
}
-static int intel_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg)
-{
- unsigned long groups_per_line = 0;
- unsigned long groups_total = 0;
- unsigned long num_extra_mux_bits = 0;
- unsigned long slice_bits = 0;
- unsigned long hrd_delay = 0;
- unsigned long final_scale = 0;
- unsigned long rbs_min = 0;
-
- /* Number of groups used to code each line of a slice */
- groups_per_line = DIV_ROUND_UP(vdsc_cfg->slice_width,
- DSC_RC_PIXELS_PER_GROUP);
-
- /* chunksize in Bytes */
- vdsc_cfg->slice_chunk_size = DIV_ROUND_UP(vdsc_cfg->slice_width *
- vdsc_cfg->bits_per_pixel,
- (8 * 16));
-
- if (vdsc_cfg->convert_rgb)
- num_extra_mux_bits = 3 * (vdsc_cfg->mux_word_size +
- (4 * vdsc_cfg->bits_per_component + 4)
- - 2);
- else
- num_extra_mux_bits = 3 * vdsc_cfg->mux_word_size +
- (4 * vdsc_cfg->bits_per_component + 4) +
- 2 * (4 * vdsc_cfg->bits_per_component) - 2;
- /* Number of bits in one Slice */
- slice_bits = 8 * vdsc_cfg->slice_chunk_size * vdsc_cfg->slice_height;
-
- while ((num_extra_mux_bits > 0) &&
- ((slice_bits - num_extra_mux_bits) % vdsc_cfg->mux_word_size))
- num_extra_mux_bits--;
-
- if (groups_per_line < vdsc_cfg->initial_scale_value - 8)
- vdsc_cfg->initial_scale_value = groups_per_line + 8;
-
- /* scale_decrement_interval calculation according to DSC spec 1.11 */
- if (vdsc_cfg->initial_scale_value > 8)
- vdsc_cfg->scale_decrement_interval = groups_per_line /
- (vdsc_cfg->initial_scale_value - 8);
- else
- vdsc_cfg->scale_decrement_interval = DSC_SCALE_DECREMENT_INTERVAL_MAX;
-
- vdsc_cfg->final_offset = vdsc_cfg->rc_model_size -
- (vdsc_cfg->initial_xmit_delay *
- vdsc_cfg->bits_per_pixel + 8) / 16 + num_extra_mux_bits;
-
- if (vdsc_cfg->final_offset >= vdsc_cfg->rc_model_size) {
- DRM_DEBUG_KMS("FinalOfs < RcModelSze for this InitialXmitDelay\n");
- return -ERANGE;
- }
-
- final_scale = (vdsc_cfg->rc_model_size * 8) /
- (vdsc_cfg->rc_model_size - vdsc_cfg->final_offset);
- if (vdsc_cfg->slice_height > 1)
- /*
- * NflBpgOffset is 16 bit value with 11 fractional bits
- * hence we multiply by 2^11 for preserving the
- * fractional part
- */
- vdsc_cfg->nfl_bpg_offset = DIV_ROUND_UP((vdsc_cfg->first_line_bpg_offset << 11),
- (vdsc_cfg->slice_height - 1));
- else
- vdsc_cfg->nfl_bpg_offset = 0;
-
- /* 2^16 - 1 */
- if (vdsc_cfg->nfl_bpg_offset > 65535) {
- DRM_DEBUG_KMS("NflBpgOffset is too large for this slice height\n");
- return -ERANGE;
- }
-
- /* Number of groups used to code the entire slice */
- groups_total = groups_per_line * vdsc_cfg->slice_height;
-
- /* slice_bpg_offset is 16 bit value with 11 fractional bits */
- vdsc_cfg->slice_bpg_offset = DIV_ROUND_UP(((vdsc_cfg->rc_model_size -
- vdsc_cfg->initial_offset +
- num_extra_mux_bits) << 11),
- groups_total);
-
- if (final_scale > 9) {
- /*
- * ScaleIncrementInterval =
- * finaloffset/((NflBpgOffset + SliceBpgOffset)*8(finalscale - 1.125))
- * as (NflBpgOffset + SliceBpgOffset) has 11 bit fractional value,
- * we need divide by 2^11 from pstDscCfg values
- */
- vdsc_cfg->scale_increment_interval =
- (vdsc_cfg->final_offset * (1 << 11)) /
- ((vdsc_cfg->nfl_bpg_offset +
- vdsc_cfg->slice_bpg_offset) *
- (final_scale - 9));
- } else {
- /*
- * If finalScaleValue is less than or equal to 9, a value of 0 should
- * be used to disable the scale increment at the end of the slice
- */
- vdsc_cfg->scale_increment_interval = 0;
- }
-
- if (vdsc_cfg->scale_increment_interval > 65535) {
- DRM_DEBUG_KMS("ScaleIncrementInterval is large for slice height\n");
- return -ERANGE;
- }
-
- /*
- * DSC spec mentions that bits_per_pixel specifies the target
- * bits/pixel (bpp) rate that is used by the encoder,
- * in steps of 1/16 of a bit per pixel
- */
- rbs_min = vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset +
- DIV_ROUND_UP(vdsc_cfg->initial_xmit_delay *
- vdsc_cfg->bits_per_pixel, 16) +
- groups_per_line * vdsc_cfg->first_line_bpg_offset;
-
- hrd_delay = DIV_ROUND_UP((rbs_min * 16), vdsc_cfg->bits_per_pixel);
- vdsc_cfg->rc_bits = (hrd_delay * vdsc_cfg->bits_per_pixel) / 16;
- vdsc_cfg->initial_dec_delay = hrd_delay - vdsc_cfg->initial_xmit_delay;
-
- return 0;
-}
-
int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config)
{
@@ -491,7 +368,7 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
/* Gen 11 does not support YCbCr */
- vdsc_cfg->enable422 = false;
+ vdsc_cfg->simple_422 = false;
/* Gen 11 does not support VBR */
vdsc_cfg->vbr_enable = false;
vdsc_cfg->block_pred_enable =
@@ -574,7 +451,7 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp,
vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) /
(vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset);
- return intel_compute_rc_parameters(vdsc_cfg);
+ return drm_dsc_compute_rc_parameters(vdsc_cfg);
}
enum intel_display_power_domain
@@ -618,7 +495,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder,
pps_val |= DSC_BLOCK_PREDICTION;
if (vdsc_cfg->convert_rgb)
pps_val |= DSC_COLOR_SPACE_CONVERSION;
- if (vdsc_cfg->enable422)
+ if (vdsc_cfg->simple_422)
pps_val |= DSC_422_ENABLE;
if (vdsc_cfg->vbr_enable)
pps_val |= DSC_VBR_ENABLE;
@@ -1004,10 +881,10 @@ static void intel_dp_write_dsc_pps_sdp(struct intel_encoder *encoder,
struct drm_dsc_pps_infoframe dp_dsc_pps_sdp;
/* Prepare DP SDP PPS header as per DP 1.4 spec, Table 2-123 */
- drm_dsc_dp_pps_header_init(&dp_dsc_pps_sdp);
+ drm_dsc_dp_pps_header_init(&dp_dsc_pps_sdp.pps_header);
/* Fill the PPS payload bytes as per DSC spec 1.2 Table 4-1 */
- drm_dsc_pps_infoframe_pack(&dp_dsc_pps_sdp, vdsc_cfg);
+ drm_dsc_pps_payload_pack(&dp_dsc_pps_sdp.pps_payload, vdsc_cfg);
intel_dig_port->write_infoframe(encoder, crtc_state,
DP_SDP_PPS, &dp_dsc_pps_sdp,
diff --git a/drivers/gpu/drm/i915/intel_workarounds.c b/drivers/gpu/drm/i915/intel_workarounds.c
index 15f4a6dee5aa..9682dd575152 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/intel_workarounds.c
@@ -541,10 +541,6 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
GEN11_TDL_CLOCK_GATING_FIX_DISABLE);
- /* WaEnableStateCacheRedirectToCS:icl */
- WA_SET_BIT_MASKED(GEN9_SLICE_COMMON_ECO_CHICKEN1,
- GEN11_STATE_CACHE_REDIRECT_TO_CS);
-
/* Wa_2006665173:icl (pre-prod) */
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0))
WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
@@ -555,6 +551,11 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine)
GEN10_CACHE_MODE_SS,
0, /* write-only, so skip validation */
_MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE));
+
+ /* WaDisableGPGPUMidThreadPreemption:icl */
+ WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+ GEN9_PREEMPT_GPGPU_LEVEL_MASK,
+ GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
}
void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
@@ -564,26 +565,26 @@ void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
wa_init_start(wal, "context");
- if (INTEL_GEN(i915) < 8)
- return;
- else if (IS_BROADWELL(i915))
- bdw_ctx_workarounds_init(engine);
- else if (IS_CHERRYVIEW(i915))
- chv_ctx_workarounds_init(engine);
- else if (IS_SKYLAKE(i915))
- skl_ctx_workarounds_init(engine);
- else if (IS_BROXTON(i915))
- bxt_ctx_workarounds_init(engine);
- else if (IS_KABYLAKE(i915))
- kbl_ctx_workarounds_init(engine);
- else if (IS_GEMINILAKE(i915))
- glk_ctx_workarounds_init(engine);
- else if (IS_COFFEELAKE(i915))
- cfl_ctx_workarounds_init(engine);
+ if (IS_GEN(i915, 11))
+ icl_ctx_workarounds_init(engine);
else if (IS_CANNONLAKE(i915))
cnl_ctx_workarounds_init(engine);
- else if (IS_ICELAKE(i915))
- icl_ctx_workarounds_init(engine);
+ else if (IS_COFFEELAKE(i915))
+ cfl_ctx_workarounds_init(engine);
+ else if (IS_GEMINILAKE(i915))
+ glk_ctx_workarounds_init(engine);
+ else if (IS_KABYLAKE(i915))
+ kbl_ctx_workarounds_init(engine);
+ else if (IS_BROXTON(i915))
+ bxt_ctx_workarounds_init(engine);
+ else if (IS_SKYLAKE(i915))
+ skl_ctx_workarounds_init(engine);
+ else if (IS_CHERRYVIEW(i915))
+ chv_ctx_workarounds_init(engine);
+ else if (IS_BROADWELL(i915))
+ bdw_ctx_workarounds_init(engine);
+ else if (INTEL_GEN(i915) < 8)
+ return;
else
MISSING_CASE(INTEL_GEN(i915));
@@ -724,9 +725,9 @@ cfl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
}
static void
-wa_init_mcr(struct drm_i915_private *dev_priv, struct i915_wa_list *wal)
+wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
+ const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
u32 mcr_slice_subslice_mask;
/*
@@ -742,14 +743,15 @@ wa_init_mcr(struct drm_i915_private *dev_priv, struct i915_wa_list *wal)
* something more complex that requires checking the range of every
* MMIO read).
*/
- if (INTEL_GEN(dev_priv) >= 10 &&
+ if (INTEL_GEN(i915) >= 10 &&
is_power_of_2(sseu->slice_mask)) {
/*
* read FUSE3 for enabled L3 Bank IDs, if L3 Bank matches
* enabled subslice, no need to redirect MCR packet
*/
u32 slice = fls(sseu->slice_mask);
- u32 fuse3 = I915_READ(GEN10_MIRROR_FUSE3);
+ u32 fuse3 =
+ intel_uncore_read(&i915->uncore, GEN10_MIRROR_FUSE3);
u8 ss_mask = sseu->subslice_mask[slice];
u8 enabled_mask = (ss_mask | ss_mask >>
@@ -763,7 +765,7 @@ wa_init_mcr(struct drm_i915_private *dev_priv, struct i915_wa_list *wal)
WARN_ON((enabled_mask & disabled_mask) != enabled_mask);
}
- if (INTEL_GEN(dev_priv) >= 11)
+ if (INTEL_GEN(i915) >= 11)
mcr_slice_subslice_mask = GEN11_MCR_SLICE_MASK |
GEN11_MCR_SUBSLICE_MASK;
else
@@ -783,7 +785,7 @@ wa_init_mcr(struct drm_i915_private *dev_priv, struct i915_wa_list *wal)
wa_write_masked_or(wal,
GEN8_MCR_SELECTOR,
mcr_slice_subslice_mask,
- intel_calculate_mcr_s_ss_select(dev_priv));
+ intel_calculate_mcr_s_ss_select(i915));
}
static void
@@ -862,26 +864,22 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
static void
gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
- if (INTEL_GEN(i915) < 8)
- return;
- else if (IS_BROADWELL(i915))
- return;
- else if (IS_CHERRYVIEW(i915))
- return;
- else if (IS_SKYLAKE(i915))
- skl_gt_workarounds_init(i915, wal);
- else if (IS_BROXTON(i915))
- bxt_gt_workarounds_init(i915, wal);
- else if (IS_KABYLAKE(i915))
- kbl_gt_workarounds_init(i915, wal);
- else if (IS_GEMINILAKE(i915))
- glk_gt_workarounds_init(i915, wal);
- else if (IS_COFFEELAKE(i915))
- cfl_gt_workarounds_init(i915, wal);
+ if (IS_GEN(i915, 11))
+ icl_gt_workarounds_init(i915, wal);
else if (IS_CANNONLAKE(i915))
cnl_gt_workarounds_init(i915, wal);
- else if (IS_ICELAKE(i915))
- icl_gt_workarounds_init(i915, wal);
+ else if (IS_COFFEELAKE(i915))
+ cfl_gt_workarounds_init(i915, wal);
+ else if (IS_GEMINILAKE(i915))
+ glk_gt_workarounds_init(i915, wal);
+ else if (IS_KABYLAKE(i915))
+ kbl_gt_workarounds_init(i915, wal);
+ else if (IS_BROXTON(i915))
+ bxt_gt_workarounds_init(i915, wal);
+ else if (IS_SKYLAKE(i915))
+ skl_gt_workarounds_init(i915, wal);
+ else if (INTEL_GEN(i915) <= 8)
+ return;
else
MISSING_CASE(INTEL_GEN(i915));
}
@@ -896,15 +894,14 @@ void intel_gt_init_workarounds(struct drm_i915_private *i915)
}
static enum forcewake_domains
-wal_get_fw_for_rmw(struct drm_i915_private *dev_priv,
- const struct i915_wa_list *wal)
+wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
{
enum forcewake_domains fw = 0;
struct i915_wa *wa;
unsigned int i;
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
- fw |= intel_uncore_forcewake_for_reg(dev_priv,
+ fw |= intel_uncore_forcewake_for_reg(uncore,
wa->reg,
FW_REG_READ |
FW_REG_WRITE);
@@ -913,7 +910,7 @@ wal_get_fw_for_rmw(struct drm_i915_private *dev_priv,
}
static void
-wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal)
+wa_list_apply(struct intel_uncore *uncore, const struct i915_wa_list *wal)
{
enum forcewake_domains fw;
unsigned long flags;
@@ -923,27 +920,22 @@ wa_list_apply(struct drm_i915_private *dev_priv, const struct i915_wa_list *wal)
if (!wal->count)
return;
- fw = wal_get_fw_for_rmw(dev_priv, wal);
+ fw = wal_get_fw_for_rmw(uncore, wal);
- spin_lock_irqsave(&dev_priv->uncore.lock, flags);
- intel_uncore_forcewake_get__locked(dev_priv, fw);
+ spin_lock_irqsave(&uncore->lock, flags);
+ intel_uncore_forcewake_get__locked(uncore, fw);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
- u32 val = I915_READ_FW(wa->reg);
-
- val &= ~wa->mask;
- val |= wa->val;
-
- I915_WRITE_FW(wa->reg, val);
+ intel_uncore_rmw_fw(uncore, wa->reg, wa->mask, wa->val);
}
- intel_uncore_forcewake_put__locked(dev_priv, fw);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
+ intel_uncore_forcewake_put__locked(uncore, fw);
+ spin_unlock_irqrestore(&uncore->lock, flags);
}
-void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv)
+void intel_gt_apply_workarounds(struct drm_i915_private *i915)
{
- wa_list_apply(dev_priv, &dev_priv->gt_wa_list);
+ wa_list_apply(&i915->uncore, &i915->gt_wa_list);
}
static bool
@@ -960,7 +952,7 @@ wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
return true;
}
-static bool wa_list_verify(struct drm_i915_private *dev_priv,
+static bool wa_list_verify(struct intel_uncore *uncore,
const struct i915_wa_list *wal,
const char *from)
{
@@ -969,15 +961,17 @@ static bool wa_list_verify(struct drm_i915_private *dev_priv,
bool ok = true;
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
- ok &= wa_verify(wa, I915_READ(wa->reg), wal->name, from);
+ ok &= wa_verify(wa,
+ intel_uncore_read(uncore, wa->reg),
+ wal->name, from);
return ok;
}
-bool intel_gt_verify_workarounds(struct drm_i915_private *dev_priv,
+bool intel_gt_verify_workarounds(struct drm_i915_private *i915,
const char *from)
{
- return wa_list_verify(dev_priv, &dev_priv->gt_wa_list, from);
+ return wa_list_verify(&i915->uncore, &i915->gt_wa_list, from);
}
static void
@@ -1052,6 +1046,9 @@ static void icl_whitelist_build(struct i915_wa_list *w)
/* WaAllowUMDToModifySamplerMode:icl */
whitelist_reg(w, GEN10_SAMPLER_MODE);
+
+ /* WaEnableStateCacheRedirectToCS:icl */
+ whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
}
void intel_engine_init_whitelist(struct intel_engine_cs *engine)
@@ -1059,30 +1056,26 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
struct drm_i915_private *i915 = engine->i915;
struct i915_wa_list *w = &engine->whitelist;
- GEM_BUG_ON(engine->id != RCS);
+ GEM_BUG_ON(engine->id != RCS0);
wa_init_start(w, "whitelist");
- if (INTEL_GEN(i915) < 8)
- return;
- else if (IS_BROADWELL(i915))
- return;
- else if (IS_CHERRYVIEW(i915))
- return;
- else if (IS_SKYLAKE(i915))
- skl_whitelist_build(w);
- else if (IS_BROXTON(i915))
- bxt_whitelist_build(w);
- else if (IS_KABYLAKE(i915))
- kbl_whitelist_build(w);
- else if (IS_GEMINILAKE(i915))
- glk_whitelist_build(w);
- else if (IS_COFFEELAKE(i915))
- cfl_whitelist_build(w);
+ if (IS_GEN(i915, 11))
+ icl_whitelist_build(w);
else if (IS_CANNONLAKE(i915))
cnl_whitelist_build(w);
- else if (IS_ICELAKE(i915))
- icl_whitelist_build(w);
+ else if (IS_COFFEELAKE(i915))
+ cfl_whitelist_build(w);
+ else if (IS_GEMINILAKE(i915))
+ glk_whitelist_build(w);
+ else if (IS_KABYLAKE(i915))
+ kbl_whitelist_build(w);
+ else if (IS_BROXTON(i915))
+ bxt_whitelist_build(w);
+ else if (IS_SKYLAKE(i915))
+ skl_whitelist_build(w);
+ else if (INTEL_GEN(i915) <= 8)
+ return;
else
MISSING_CASE(INTEL_GEN(i915));
@@ -1091,8 +1084,8 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
const struct i915_wa_list *wal = &engine->whitelist;
+ struct intel_uncore *uncore = engine->uncore;
const u32 base = engine->mmio_base;
struct i915_wa *wa;
unsigned int i;
@@ -1101,13 +1094,15 @@ void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
return;
for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
- I915_WRITE(RING_FORCE_TO_NONPRIV(base, i),
- i915_mmio_reg_offset(wa->reg));
+ intel_uncore_write(uncore,
+ RING_FORCE_TO_NONPRIV(base, i),
+ i915_mmio_reg_offset(wa->reg));
/* And clear the rest just in case of garbage */
for (; i < RING_MAX_NONPRIV_SLOTS; i++)
- I915_WRITE(RING_FORCE_TO_NONPRIV(base, i),
- i915_mmio_reg_offset(RING_NOPID(base)));
+ intel_uncore_write(uncore,
+ RING_FORCE_TO_NONPRIV(base, i),
+ i915_mmio_reg_offset(RING_NOPID(base)));
}
static void
@@ -1115,7 +1110,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
struct drm_i915_private *i915 = engine->i915;
- if (IS_ICELAKE(i915)) {
+ if (IS_GEN(i915, 11)) {
/* This is not an Wa. Enable for better image quality */
wa_masked_en(wal,
_3D_CHICKEN3,
@@ -1170,8 +1165,8 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN7_DISABLE_SAMPLER_PREFETCH);
}
- if (IS_GEN(i915, 9) || IS_CANNONLAKE(i915)) {
- /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,cnl */
+ if (IS_GEN_RANGE(i915, 9, 11)) {
+ /* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl */
wa_masked_en(wal,
GEN7_FF_SLICE_CS_CHICKEN1,
GEN9_FFSC_PERCTX_PREEMPT_CTRL);
@@ -1236,7 +1231,7 @@ engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal
if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8))
return;
- if (engine->id == RCS)
+ if (engine->id == RCS0)
rcs_engine_wa_init(engine, wal);
else
xcs_engine_wa_init(engine, wal);
@@ -1256,7 +1251,7 @@ void intel_engine_init_workarounds(struct intel_engine_cs *engine)
void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
{
- wa_list_apply(engine->i915, &engine->wa_list);
+ wa_list_apply(engine->uncore, &engine->wa_list);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/intel_workarounds.h b/drivers/gpu/drm/i915/intel_workarounds.h
index 7c734714b05e..34eee5ec511e 100644
--- a/drivers/gpu/drm/i915/intel_workarounds.h
+++ b/drivers/gpu/drm/i915/intel_workarounds.h
@@ -9,18 +9,7 @@
#include <linux/slab.h>
-struct i915_wa {
- i915_reg_t reg;
- u32 mask;
- u32 val;
-};
-
-struct i915_wa_list {
- const char *name;
- struct i915_wa *list;
- unsigned int count;
- unsigned int wa_count;
-};
+#include "intel_workarounds_types.h"
static inline void intel_wa_list_free(struct i915_wa_list *wal)
{
@@ -31,9 +20,9 @@ static inline void intel_wa_list_free(struct i915_wa_list *wal)
void intel_engine_init_ctx_wa(struct intel_engine_cs *engine);
int intel_engine_emit_ctx_wa(struct i915_request *rq);
-void intel_gt_init_workarounds(struct drm_i915_private *dev_priv);
-void intel_gt_apply_workarounds(struct drm_i915_private *dev_priv);
-bool intel_gt_verify_workarounds(struct drm_i915_private *dev_priv,
+void intel_gt_init_workarounds(struct drm_i915_private *i915);
+void intel_gt_apply_workarounds(struct drm_i915_private *i915);
+bool intel_gt_verify_workarounds(struct drm_i915_private *i915,
const char *from);
void intel_engine_init_whitelist(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/intel_workarounds_types.h b/drivers/gpu/drm/i915/intel_workarounds_types.h
new file mode 100644
index 000000000000..30918da180ff
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_workarounds_types.h
@@ -0,0 +1,27 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#ifndef __INTEL_WORKAROUNDS_TYPES_H__
+#define __INTEL_WORKAROUNDS_TYPES_H__
+
+#include <linux/types.h>
+
+#include "i915_reg.h"
+
+struct i915_wa {
+ i915_reg_t reg;
+ u32 mask;
+ u32 val;
+};
+
+struct i915_wa_list {
+ const char *name;
+ struct i915_wa *list;
+ unsigned int count;
+ unsigned int wa_count;
+};
+
+#endif /* __INTEL_WORKAROUNDS_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
index 391f3d9ffdf1..419fd4d6a8f0 100644
--- a/drivers/gpu/drm/i915/selftests/huge_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
@@ -122,7 +122,7 @@ huge_gem_object(struct drm_i915_private *i915,
if (overflows_type(dma_size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc(i915);
+ obj = i915_gem_object_alloc();
if (!obj)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index a9a2fa35876f..90721b54e7ae 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -171,7 +171,7 @@ huge_pages_object(struct drm_i915_private *i915,
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc(i915);
+ obj = i915_gem_object_alloc();
if (!obj)
return ERR_PTR(-ENOMEM);
@@ -320,7 +320,7 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc(i915);
+ obj = i915_gem_object_alloc();
if (!obj)
return ERR_PTR(-ENOMEM);
@@ -908,10 +908,6 @@ gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
if (IS_ERR(obj))
return ERR_CAST(obj);
- err = i915_gem_object_set_to_wc_domain(obj, true);
- if (err)
- goto err;
-
cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
@@ -1449,7 +1445,7 @@ static int igt_ppgtt_pin_update(void *arg)
* huge-gtt-pages.
*/
- if (!ppgtt || !i915_vm_is_48bit(&ppgtt->vm)) {
+ if (!ppgtt || !i915_vm_is_4lvl(&ppgtt->vm)) {
pr_info("48b PPGTT not supported, skipping\n");
return 0;
}
@@ -1535,7 +1531,7 @@ static int igt_ppgtt_pin_update(void *arg)
* land in the now stale 2M page.
*/
- err = gpu_write(vma, ctx, dev_priv->engine[RCS], 0, 0xdeadbeaf);
+ err = gpu_write(vma, ctx, dev_priv->engine[RCS0], 0, 0xdeadbeaf);
if (err)
goto out_unpin;
@@ -1584,6 +1580,7 @@ static int igt_tmpfs_fallback(void *arg)
}
*vaddr = 0xdeadbeaf;
+ __i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
vma = i915_vma_instance(obj, vm, NULL);
@@ -1653,7 +1650,7 @@ static int igt_shrink_thp(void *arg)
if (err)
goto out_unpin;
- err = gpu_write(vma, ctx, i915->engine[RCS], 0, 0xdeadbeaf);
+ err = gpu_write(vma, ctx, i915->engine[RCS0], 0, 0xdeadbeaf);
if (err)
goto out_unpin;
@@ -1709,16 +1706,17 @@ int i915_gem_huge_page_mock_selftests(void)
return -ENOMEM;
/* Pretend to be a device which supports the 48b PPGTT */
- mkwrite_device_info(dev_priv)->ppgtt = INTEL_PPGTT_FULL_4LVL;
+ mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
+ mkwrite_device_info(dev_priv)->ppgtt_size = 48;
mutex_lock(&dev_priv->drm.struct_mutex);
- ppgtt = i915_ppgtt_create(dev_priv, ERR_PTR(-ENODEV));
+ ppgtt = i915_ppgtt_create(dev_priv);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_unlock;
}
- if (!i915_vm_is_48bit(&ppgtt->vm)) {
+ if (!i915_vm_is_4lvl(&ppgtt->vm)) {
pr_err("failed to create 48b PPGTT\n");
err = -EINVAL;
goto out_close;
@@ -1734,7 +1732,6 @@ int i915_gem_huge_page_mock_selftests(void)
err = i915_subtests(tests, ppgtt);
out_close:
- i915_ppgtt_close(&ppgtt->vm);
i915_ppgtt_put(ppgtt);
out_unlock:
@@ -1764,7 +1761,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
return 0;
}
- if (i915_terminally_wedged(&dev_priv->gpu_error))
+ if (i915_terminally_wedged(dev_priv))
return 0;
file = mock_file(dev_priv);
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index 337b1f98b923..27d8f853111b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -150,7 +150,7 @@ int i915_active_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_active_retire),
};
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
return 0;
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index e77b7ed449ae..6fd70d326468 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -84,14 +84,9 @@ static void simulate_hibernate(struct drm_i915_private *i915)
static int pm_prepare(struct drm_i915_private *i915)
{
- int err = 0;
-
- if (i915_gem_suspend(i915)) {
- pr_err("i915_gem_suspend failed\n");
- err = -EINVAL;
- }
+ i915_gem_suspend(i915);
- return err;
+ return 0;
}
static void pm_suspend(struct drm_i915_private *i915)
@@ -220,5 +215,8 @@ int i915_gem_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_gem_hibernate),
};
+ if (i915_terminally_wedged(i915))
+ return 0;
+
return i915_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
index fd89a5a33c1a..e43630b40fce 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
@@ -202,7 +202,7 @@ static int gpu_set(struct drm_i915_gem_object *obj,
if (IS_ERR(vma))
return PTR_ERR(vma);
- rq = i915_request_alloc(i915->engine[RCS], i915->kernel_context);
+ rq = i915_request_alloc(i915->engine[RCS0], i915->kernel_context);
if (IS_ERR(rq)) {
i915_vma_unpin(vma);
return PTR_ERR(rq);
@@ -248,15 +248,15 @@ static bool always_valid(struct drm_i915_private *i915)
static bool needs_fence_registers(struct drm_i915_private *i915)
{
- return !i915_terminally_wedged(&i915->gpu_error);
+ return !i915_terminally_wedged(i915);
}
static bool needs_mi_store_dword(struct drm_i915_private *i915)
{
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
return false;
- return intel_engine_can_store_dword(i915->engine[RCS]);
+ return intel_engine_can_store_dword(i915->engine[RCS0]);
}
static const struct igt_coherency_mode {
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index 7eb58a9d1319..4e1b6efc6b22 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -76,7 +76,7 @@ static int live_nop_switch(void *arg)
}
for (n = 0; n < nctx; n++) {
- ctx[n] = i915_gem_create_context(i915, file->driver_priv);
+ ctx[n] = live_context(i915, file);
if (IS_ERR(ctx[n])) {
err = PTR_ERR(ctx[n]);
goto out_unlock;
@@ -220,6 +220,7 @@ gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
offset += PAGE_SIZE;
}
*cmd = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
err = i915_gem_object_set_to_gtt_domain(obj, false);
@@ -372,7 +373,8 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
return 0;
}
-static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
+static noinline int cpu_check(struct drm_i915_gem_object *obj,
+ unsigned int idx, unsigned int max)
{
unsigned int n, m, needs_flush;
int err;
@@ -390,8 +392,10 @@ static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
for (m = 0; m < max; m++) {
if (map[m] != m) {
- pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
- n, m, map[m], m);
+ pr_err("%pS: Invalid value at object %d page %d/%ld, offset %d/%d: found %x expected %x\n",
+ __builtin_return_address(0), idx,
+ n, real_page_count(obj), m, max,
+ map[m], m);
err = -EINVAL;
goto out_unmap;
}
@@ -399,8 +403,9 @@ static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
for (; m < DW_PER_PAGE; m++) {
if (map[m] != STACK_MAGIC) {
- pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
- n, m, map[m], STACK_MAGIC);
+ pr_err("%pS: Invalid value at object %d page %d, offset %d: found %x expected %x (uninitialised)\n",
+ __builtin_return_address(0), idx, n, m,
+ map[m], STACK_MAGIC);
err = -EINVAL;
goto out_unmap;
}
@@ -478,12 +483,8 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj)
static int igt_ctx_exec(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct drm_i915_gem_object *obj = NULL;
- unsigned long ncontexts, ndwords, dw;
- struct igt_live_test t;
- struct drm_file *file;
- IGT_TIMEOUT(end_time);
- LIST_HEAD(objects);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
int err = -ENODEV;
/*
@@ -495,44 +496,167 @@ static int igt_ctx_exec(void *arg)
if (!DRIVER_CAPS(i915)->has_logical_contexts)
return 0;
+ for_each_engine(engine, i915, id) {
+ struct drm_i915_gem_object *obj = NULL;
+ unsigned long ncontexts, ndwords, dw;
+ struct igt_live_test t;
+ struct drm_file *file;
+ IGT_TIMEOUT(end_time);
+ LIST_HEAD(objects);
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (!engine->context_size)
+ continue; /* No logical context support in HW */
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ err = igt_live_test_begin(&t, i915, __func__, engine->name);
+ if (err)
+ goto out_unlock;
+
+ ncontexts = 0;
+ ndwords = 0;
+ dw = 0;
+ while (!time_after(jiffies, end_time)) {
+ struct i915_gem_context *ctx;
+ intel_wakeref_t wakeref;
+
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_unlock;
+ }
+
+ if (!obj) {
+ obj = create_test_object(ctx, file, &objects);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_unlock;
+ }
+ }
+
+ with_intel_runtime_pm(i915, wakeref)
+ err = gpu_fill(obj, ctx, engine, dw);
+ if (err) {
+ pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
+ ndwords, dw, max_dwords(obj),
+ engine->name, ctx->hw_id,
+ yesno(!!ctx->ppgtt), err);
+ goto out_unlock;
+ }
+
+ if (++dw == max_dwords(obj)) {
+ obj = NULL;
+ dw = 0;
+ }
+
+ ndwords++;
+ ncontexts++;
+ }
+
+ pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
+ ncontexts, engine->name, ndwords);
+
+ ncontexts = dw = 0;
+ list_for_each_entry(obj, &objects, st_link) {
+ unsigned int rem =
+ min_t(unsigned int, ndwords - dw, max_dwords(obj));
+
+ err = cpu_check(obj, ncontexts++, rem);
+ if (err)
+ break;
+
+ dw += rem;
+ }
+
+out_unlock:
+ if (igt_live_test_end(&t))
+ err = -EIO;
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ mock_file_free(i915, file);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int igt_shared_ctx_exec(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_gem_context *parent;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_live_test t;
+ struct drm_file *file;
+ int err = 0;
+
+ /*
+ * Create a few different contexts with the same mm and write
+ * through each ctx using the GPU making sure those writes end
+ * up in the expected pages of our obj.
+ */
+ if (!DRIVER_CAPS(i915)->has_logical_contexts)
+ return 0;
+
file = mock_file(i915);
if (IS_ERR(file))
return PTR_ERR(file);
mutex_lock(&i915->drm.struct_mutex);
+ parent = live_context(i915, file);
+ if (IS_ERR(parent)) {
+ err = PTR_ERR(parent);
+ goto out_unlock;
+ }
+
+ if (!parent->ppgtt) { /* not full-ppgtt; nothing to share */
+ err = 0;
+ goto out_unlock;
+ }
+
err = igt_live_test_begin(&t, i915, __func__, "");
if (err)
goto out_unlock;
- ncontexts = 0;
- ndwords = 0;
- dw = 0;
- while (!time_after(jiffies, end_time)) {
- struct intel_engine_cs *engine;
- struct i915_gem_context *ctx;
- unsigned int id;
+ for_each_engine(engine, i915, id) {
+ unsigned long ncontexts, ndwords, dw;
+ struct drm_i915_gem_object *obj = NULL;
+ IGT_TIMEOUT(end_time);
+ LIST_HEAD(objects);
- ctx = i915_gem_create_context(i915, file->driver_priv);
- if (IS_ERR(ctx)) {
- err = PTR_ERR(ctx);
- goto out_unlock;
- }
+ if (!intel_engine_can_store_dword(engine))
+ continue;
- for_each_engine(engine, i915, id) {
+ dw = 0;
+ ndwords = 0;
+ ncontexts = 0;
+ while (!time_after(jiffies, end_time)) {
+ struct i915_gem_context *ctx;
intel_wakeref_t wakeref;
- if (!engine->context_size)
- continue; /* No logical context support in HW */
+ ctx = kernel_context(i915);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_test;
+ }
- if (!intel_engine_can_store_dword(engine))
- continue;
+ __assign_ppgtt(ctx, parent->ppgtt);
if (!obj) {
- obj = create_test_object(ctx, file, &objects);
+ obj = create_test_object(parent, file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
- goto out_unlock;
+ kernel_context_close(ctx);
+ goto out_test;
}
}
@@ -544,35 +668,39 @@ static int igt_ctx_exec(void *arg)
ndwords, dw, max_dwords(obj),
engine->name, ctx->hw_id,
yesno(!!ctx->ppgtt), err);
- goto out_unlock;
+ kernel_context_close(ctx);
+ goto out_test;
}
if (++dw == max_dwords(obj)) {
obj = NULL;
dw = 0;
}
+
ndwords++;
+ ncontexts++;
+
+ kernel_context_close(ctx);
}
- ncontexts++;
- }
- pr_info("Submitted %lu contexts (across %u engines), filling %lu dwords\n",
- ncontexts, RUNTIME_INFO(i915)->num_rings, ndwords);
+ pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
+ ncontexts, engine->name, ndwords);
- dw = 0;
- list_for_each_entry(obj, &objects, st_link) {
- unsigned int rem =
- min_t(unsigned int, ndwords - dw, max_dwords(obj));
+ ncontexts = dw = 0;
+ list_for_each_entry(obj, &objects, st_link) {
+ unsigned int rem =
+ min_t(unsigned int, ndwords - dw, max_dwords(obj));
- err = cpu_check(obj, rem);
- if (err)
- break;
+ err = cpu_check(obj, ncontexts++, rem);
+ if (err)
+ goto out_test;
- dw += rem;
+ dw += rem;
+ }
}
-
-out_unlock:
+out_test:
if (igt_live_test_end(&t))
err = -EIO;
+out_unlock:
mutex_unlock(&i915->drm.struct_mutex);
mock_file_free(i915, file);
@@ -604,12 +732,9 @@ static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
*cmd++ = upper_32_bits(vma->node.start);
*cmd = MI_BATCH_BUFFER_END;
+ __i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, false);
- if (err)
- goto err;
-
vma = i915_vma_instance(obj, vma->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
@@ -923,7 +1048,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
unsigned int flags)
{
struct intel_sseu default_sseu = intel_device_default_sseu(i915);
- struct intel_engine_cs *engine = i915->engine[RCS];
+ struct intel_engine_cs *engine = i915->engine[RCS0];
struct drm_i915_gem_object *obj;
struct i915_gem_context *ctx;
struct intel_sseu pg_sseu;
@@ -962,11 +1087,12 @@ __igt_ctx_sseu(struct drm_i915_private *i915,
mutex_lock(&i915->drm.struct_mutex);
- ctx = i915_gem_create_context(i915, file->driver_priv);
+ ctx = live_context(i915, file);
if (IS_ERR(ctx)) {
ret = PTR_ERR(ctx);
goto out_unlock;
}
+ i915_gem_context_clear_bannable(ctx); /* to reset and beyond! */
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) {
@@ -1047,7 +1173,7 @@ static int igt_ctx_readonly(void *arg)
struct drm_i915_gem_object *obj = NULL;
struct i915_gem_context *ctx;
struct i915_hw_ppgtt *ppgtt;
- unsigned long ndwords, dw;
+ unsigned long idx, ndwords, dw;
struct igt_live_test t;
struct drm_file *file;
I915_RND_STATE(prng);
@@ -1071,7 +1197,7 @@ static int igt_ctx_readonly(void *arg)
if (err)
goto out_unlock;
- ctx = i915_gem_create_context(i915, file->driver_priv);
+ ctx = live_context(i915, file);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out_unlock;
@@ -1125,9 +1251,10 @@ static int igt_ctx_readonly(void *arg)
}
}
pr_info("Submitted %lu dwords (across %u engines)\n",
- ndwords, RUNTIME_INFO(i915)->num_rings);
+ ndwords, RUNTIME_INFO(i915)->num_engines);
dw = 0;
+ idx = 0;
list_for_each_entry(obj, &objects, st_link) {
unsigned int rem =
min_t(unsigned int, ndwords - dw, max_dwords(obj));
@@ -1137,7 +1264,7 @@ static int igt_ctx_readonly(void *arg)
if (i915_gem_object_is_readonly(obj))
num_writes = 0;
- err = cpu_check(obj, num_writes);
+ err = cpu_check(obj, idx++, num_writes);
if (err)
break;
@@ -1201,12 +1328,9 @@ static int write_to_scratch(struct i915_gem_context *ctx,
}
*cmd++ = value;
*cmd = MI_BATCH_BUFFER_END;
+ __i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, false);
- if (err)
- goto err;
-
vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
@@ -1298,11 +1422,9 @@ static int read_from_scratch(struct i915_gem_context *ctx,
*cmd++ = result;
}
*cmd = MI_BATCH_BUFFER_END;
- i915_gem_object_unpin_map(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, false);
- if (err)
- goto err;
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
if (IS_ERR(vma)) {
@@ -1396,13 +1518,13 @@ static int igt_vm_isolation(void *arg)
if (err)
goto out_unlock;
- ctx_a = i915_gem_create_context(i915, file->driver_priv);
+ ctx_a = live_context(i915, file);
if (IS_ERR(ctx_a)) {
err = PTR_ERR(ctx_a);
goto out_unlock;
}
- ctx_b = i915_gem_create_context(i915, file->driver_priv);
+ ctx_b = live_context(i915, file);
if (IS_ERR(ctx_b)) {
err = PTR_ERR(ctx_b);
goto out_unlock;
@@ -1432,7 +1554,7 @@ static int igt_vm_isolation(void *arg)
div64_u64_rem(i915_prandom_u64_state(&prng),
vm_total, &offset);
- offset &= ~sizeof(u32);
+ offset &= -sizeof(u32);
offset += I915_GTT_PAGE_SIZE;
err = write_to_scratch(ctx_a, engine,
@@ -1458,7 +1580,7 @@ static int igt_vm_isolation(void *arg)
count += this;
}
pr_info("Checked %lu scratch offsets across %d engines\n",
- count, RUNTIME_INFO(i915)->num_rings);
+ count, RUNTIME_INFO(i915)->num_engines);
out_rpm:
intel_runtime_pm_put(i915, wakeref);
@@ -1472,10 +1594,10 @@ out_unlock:
}
static __maybe_unused const char *
-__engine_name(struct drm_i915_private *i915, unsigned int engines)
+__engine_name(struct drm_i915_private *i915, intel_engine_mask_t engines)
{
struct intel_engine_cs *engine;
- unsigned int tmp;
+ intel_engine_mask_t tmp;
if (engines == ALL_ENGINES)
return "all";
@@ -1488,67 +1610,60 @@ __engine_name(struct drm_i915_private *i915, unsigned int engines)
static int __igt_switch_to_kernel_context(struct drm_i915_private *i915,
struct i915_gem_context *ctx,
- unsigned int engines)
+ intel_engine_mask_t engines)
{
struct intel_engine_cs *engine;
- unsigned int tmp;
- int err;
+ intel_engine_mask_t tmp;
+ int pass;
GEM_TRACE("Testing %s\n", __engine_name(i915, engines));
- for_each_engine_masked(engine, i915, engines, tmp) {
- struct i915_request *rq;
+ for (pass = 0; pass < 4; pass++) { /* Once busy; once idle; repeat */
+ bool from_idle = pass & 1;
+ int err;
- rq = i915_request_alloc(engine, ctx);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
+ if (!from_idle) {
+ for_each_engine_masked(engine, i915, engines, tmp) {
+ struct i915_request *rq;
- i915_request_add(rq);
- }
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
- err = i915_gem_switch_to_kernel_context(i915);
- if (err)
- return err;
-
- for_each_engine_masked(engine, i915, engines, tmp) {
- if (!engine_has_kernel_context_barrier(engine)) {
- pr_err("kernel context not last on engine %s!\n",
- engine->name);
- return -EINVAL;
+ i915_request_add(rq);
+ }
}
- }
- err = i915_gem_wait_for_idle(i915,
- I915_WAIT_LOCKED,
- MAX_SCHEDULE_TIMEOUT);
- if (err)
- return err;
+ err = i915_gem_switch_to_kernel_context(i915,
+ i915->gt.active_engines);
+ if (err)
+ return err;
- GEM_BUG_ON(i915->gt.active_requests);
- for_each_engine_masked(engine, i915, engines, tmp) {
- if (engine->last_retired_context->gem_context != i915->kernel_context) {
- pr_err("engine %s not idling in kernel context!\n",
- engine->name);
+ if (!from_idle) {
+ err = i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+ if (err)
+ return err;
+ }
+
+ if (i915->gt.active_requests) {
+ pr_err("%d active requests remain after switching to kernel context, pass %d (%s) on %s engine%s\n",
+ i915->gt.active_requests,
+ pass, from_idle ? "idle" : "busy",
+ __engine_name(i915, engines),
+ is_power_of_2(engines) ? "" : "s");
return -EINVAL;
}
- }
- err = i915_gem_switch_to_kernel_context(i915);
- if (err)
- return err;
+ /* XXX Bonus points for proving we are the kernel context! */
- if (i915->gt.active_requests) {
- pr_err("switch-to-kernel-context emitted %d requests even though it should already be idling in the kernel context\n",
- i915->gt.active_requests);
- return -EINVAL;
+ mutex_unlock(&i915->drm.struct_mutex);
+ drain_delayed_work(&i915->gt.idle_work);
+ mutex_lock(&i915->drm.struct_mutex);
}
- for_each_engine_masked(engine, i915, engines, tmp) {
- if (!intel_engine_has_kernel_context(engine)) {
- pr_err("kernel context not last on engine %s!\n",
- engine->name);
- return -EINVAL;
- }
- }
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ return -EIO;
return 0;
}
@@ -1592,8 +1707,6 @@ static int igt_switch_to_kernel_context(void *arg)
out_unlock:
GEM_TRACE_DUMP_ON(err);
- if (igt_flush_test(i915, I915_WAIT_LOCKED))
- err = -EIO;
intel_runtime_pm_put(i915, wakeref);
mutex_unlock(&i915->drm.struct_mutex);
@@ -1602,10 +1715,117 @@ out_unlock:
return err;
}
+static void mock_barrier_task(void *data)
+{
+ unsigned int *counter = data;
+
+ ++*counter;
+}
+
+static int mock_context_barrier(void *arg)
+{
+#undef pr_fmt
+#define pr_fmt(x) "context_barrier_task():" # x
+ struct drm_i915_private *i915 = arg;
+ struct i915_gem_context *ctx;
+ struct i915_request *rq;
+ intel_wakeref_t wakeref;
+ unsigned int counter;
+ int err;
+
+ /*
+ * The context barrier provides us with a callback after it emits
+ * a request; useful for retiring old state after loading new.
+ */
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ ctx = mock_context(i915, "mock");
+ if (!ctx) {
+ err = -ENOMEM;
+ goto unlock;
+ }
+
+ counter = 0;
+ err = context_barrier_task(ctx, 0,
+ NULL, mock_barrier_task, &counter);
+ if (err) {
+ pr_err("Failed at line %d, err=%d\n", __LINE__, err);
+ goto out;
+ }
+ if (counter == 0) {
+ pr_err("Did not retire immediately with 0 engines\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ counter = 0;
+ err = context_barrier_task(ctx, ALL_ENGINES,
+ NULL, mock_barrier_task, &counter);
+ if (err) {
+ pr_err("Failed at line %d, err=%d\n", __LINE__, err);
+ goto out;
+ }
+ if (counter == 0) {
+ pr_err("Did not retire immediately for all inactive engines\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ rq = ERR_PTR(-ENODEV);
+ with_intel_runtime_pm(i915, wakeref)
+ rq = i915_request_alloc(i915->engine[RCS0], ctx);
+ if (IS_ERR(rq)) {
+ pr_err("Request allocation failed!\n");
+ goto out;
+ }
+ i915_request_add(rq);
+ GEM_BUG_ON(list_empty(&ctx->active_engines));
+
+ counter = 0;
+ context_barrier_inject_fault = BIT(RCS0);
+ err = context_barrier_task(ctx, ALL_ENGINES,
+ NULL, mock_barrier_task, &counter);
+ context_barrier_inject_fault = 0;
+ if (err == -ENXIO)
+ err = 0;
+ else
+ pr_err("Did not hit fault injection!\n");
+ if (counter != 0) {
+ pr_err("Invoked callback on error!\n");
+ err = -EIO;
+ }
+ if (err)
+ goto out;
+
+ counter = 0;
+ err = context_barrier_task(ctx, ALL_ENGINES,
+ NULL, mock_barrier_task, &counter);
+ if (err) {
+ pr_err("Failed at line %d, err=%d\n", __LINE__, err);
+ goto out;
+ }
+ mock_device_flush(i915);
+ if (counter == 0) {
+ pr_err("Did not retire on each active engines\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+out:
+ mock_context_close(ctx);
+unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+#undef pr_fmt
+#define pr_fmt(x) x
+}
+
int i915_gem_context_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
SUBTEST(igt_switch_to_kernel_context),
+ SUBTEST(mock_context_barrier),
};
struct drm_i915_private *i915;
int err;
@@ -1628,10 +1848,11 @@ int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
SUBTEST(igt_ctx_exec),
SUBTEST(igt_ctx_readonly),
SUBTEST(igt_ctx_sseu),
+ SUBTEST(igt_shared_ctx_exec),
SUBTEST(igt_vm_isolation),
};
- if (i915_terminally_wedged(&dev_priv->gpu_error))
+ if (i915_terminally_wedged(dev_priv))
return 0;
return i915_subtests(tests, dev_priv);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
index a7055b12e53c..2b943ee246c9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
@@ -315,6 +315,7 @@ static int igt_dmabuf_export_kmap(void *arg)
goto err;
}
memset(ptr + PAGE_SIZE, 0xaa, PAGE_SIZE);
+ i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
ptr = dma_buf_kmap(dmabuf, 1);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index b9b0ea4e2404..89766688e420 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -274,7 +274,7 @@ static int igt_evict_for_cache_color(void *arg)
err = PTR_ERR(obj);
goto cleanup;
}
- i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
quirk_add(obj, &objects);
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
@@ -290,7 +290,7 @@ static int igt_evict_for_cache_color(void *arg)
err = PTR_ERR(obj);
goto cleanup;
}
- i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
quirk_add(obj, &objects);
/* Neighbouring; same colour - should fit */
@@ -547,7 +547,7 @@ int i915_gem_evict_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_evict_contexts),
};
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
return 0;
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 3850ef4a5ec8..9cca66e4420a 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -120,7 +120,7 @@ fake_dma_object(struct drm_i915_private *i915, u64 size)
if (overflows_type(size, obj->base.size))
return ERR_PTR(-E2BIG);
- obj = i915_gem_object_alloc(i915);
+ obj = i915_gem_object_alloc();
if (!obj)
goto err;
@@ -1010,7 +1010,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
return PTR_ERR(file);
mutex_lock(&dev_priv->drm.struct_mutex);
- ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv);
+ ppgtt = i915_ppgtt_create(dev_priv);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_unlock;
@@ -1020,7 +1020,6 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
- i915_ppgtt_close(&ppgtt->vm);
i915_ppgtt_put(ppgtt);
out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -1681,25 +1680,31 @@ int i915_gem_gtt_mock_selftests(void)
SUBTEST(igt_gtt_insert),
};
struct drm_i915_private *i915;
- struct i915_ggtt ggtt;
+ struct i915_ggtt *ggtt;
int err;
i915 = mock_gem_device();
if (!i915)
return -ENOMEM;
- mock_init_ggtt(i915, &ggtt);
+ ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
+ if (!ggtt) {
+ err = -ENOMEM;
+ goto out_put;
+ }
+ mock_init_ggtt(i915, ggtt);
mutex_lock(&i915->drm.struct_mutex);
- err = i915_subtests(tests, &ggtt);
+ err = i915_subtests(tests, ggtt);
mock_device_flush(i915);
mutex_unlock(&i915->drm.struct_mutex);
i915_gem_drain_freed_objects(i915);
- mock_fini_ggtt(&ggtt);
+ mock_fini_ggtt(ggtt);
+ kfree(ggtt);
+out_put:
drm_dev_put(&i915->drm);
-
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
index 395ae878e0f7..971148fbe6f5 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -468,7 +468,7 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
if (err)
return err;
- rq = i915_request_alloc(i915->engine[RCS], i915->kernel_context);
+ rq = i915_request_alloc(i915->engine[RCS0], i915->kernel_context);
if (IS_ERR(rq)) {
i915_vma_unpin(vma);
return PTR_ERR(rq);
@@ -583,7 +583,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
for (loop = 0; loop < 3; loop++) {
intel_wakeref_t wakeref;
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
break;
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 6733dc5b6b4c..e6ffe2240126 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -42,7 +42,7 @@ static int igt_add_request(void *arg)
/* Basic preliminary test to create a request and let it loose! */
mutex_lock(&i915->drm.struct_mutex);
- request = mock_request(i915->engine[RCS],
+ request = mock_request(i915->engine[RCS0],
i915->kernel_context,
HZ / 10);
if (!request)
@@ -66,7 +66,7 @@ static int igt_wait_request(void *arg)
/* Submit a request, then wait upon it */
mutex_lock(&i915->drm.struct_mutex);
- request = mock_request(i915->engine[RCS], i915->kernel_context, T);
+ request = mock_request(i915->engine[RCS0], i915->kernel_context, T);
if (!request) {
err = -ENOMEM;
goto out_unlock;
@@ -136,19 +136,17 @@ static int igt_fence_wait(void *arg)
/* Submit a request, treat it as a fence and wait upon it */
mutex_lock(&i915->drm.struct_mutex);
- request = mock_request(i915->engine[RCS], i915->kernel_context, T);
+ request = mock_request(i915->engine[RCS0], i915->kernel_context, T);
if (!request) {
err = -ENOMEM;
goto out_locked;
}
- mutex_unlock(&i915->drm.struct_mutex); /* safe as we are single user */
if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
pr_err("fence wait success before submit (expected timeout)!\n");
- goto out_device;
+ goto out_locked;
}
- mutex_lock(&i915->drm.struct_mutex);
i915_request_add(request);
mutex_unlock(&i915->drm.struct_mutex);
@@ -195,7 +193,7 @@ static int igt_request_rewind(void *arg)
mutex_lock(&i915->drm.struct_mutex);
ctx[0] = mock_context(i915, "A");
- request = mock_request(i915->engine[RCS], ctx[0], 2 * HZ);
+ request = mock_request(i915->engine[RCS0], ctx[0], 2 * HZ);
if (!request) {
err = -ENOMEM;
goto err_context_0;
@@ -205,7 +203,7 @@ static int igt_request_rewind(void *arg)
i915_request_add(request);
ctx[1] = mock_context(i915, "B");
- vip = mock_request(i915->engine[RCS], ctx[1], 0);
+ vip = mock_request(i915->engine[RCS0], ctx[1], 0);
if (!vip) {
err = -ENOMEM;
goto err_context_1;
@@ -226,8 +224,7 @@ static int igt_request_rewind(void *arg)
mutex_unlock(&i915->drm.struct_mutex);
if (i915_request_wait(vip, 0, HZ) == -ETIME) {
- pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n",
- vip->global_seqno, intel_engine_get_seqno(i915->engine[RCS]));
+ pr_err("timed out waiting for high priority request\n");
goto err;
}
@@ -418,7 +415,7 @@ static int mock_breadcrumbs_smoketest(void *arg)
{
struct drm_i915_private *i915 = arg;
struct smoketest t = {
- .engine = i915->engine[RCS],
+ .engine = i915->engine[RCS0],
.ncontexts = 1024,
.max_batch = 1024,
.request_alloc = __mock_request_alloc
@@ -622,13 +619,11 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
}
*cmd = MI_BATCH_BUFFER_END;
- i915_gem_chipset_flush(i915);
+ __i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
- err = i915_gem_object_set_to_gtt_domain(obj, false);
- if (err)
- goto err;
+ i915_gem_chipset_flush(i915);
vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) {
@@ -780,10 +775,6 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
if (err)
goto err;
- err = i915_gem_object_set_to_wc_domain(obj, true);
- if (err)
- goto err;
-
cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
if (IS_ERR(cmd)) {
err = PTR_ERR(cmd);
@@ -802,10 +793,12 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
*cmd++ = lower_32_bits(vma->node.start);
}
*cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
- i915_gem_chipset_flush(i915);
+ __i915_gem_object_flush_map(obj, 0, 64);
i915_gem_object_unpin_map(obj);
+ i915_gem_chipset_flush(i915);
+
return vma;
err:
@@ -1219,7 +1212,7 @@ out_flush:
num_fences += atomic_long_read(&t[id].num_fences);
}
pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n",
- num_waits, num_fences, RUNTIME_INFO(i915)->num_rings, ncpus);
+ num_waits, num_fences, RUNTIME_INFO(i915)->num_engines, ncpus);
mutex_lock(&i915->drm.struct_mutex);
ret = igt_live_test_end(&live) ?: ret;
@@ -1246,7 +1239,7 @@ int i915_request_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_breadcrumbs_smoketest),
};
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
return 0;
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
index 10ef0e636a24..b18eaefef798 100644
--- a/drivers/gpu/drm/i915/selftests/i915_selftest.c
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -133,7 +133,7 @@ static int __run_selftests(const char *name,
if (signal_pending(current))
return -EINTR;
- pr_debug(DRIVER_NAME ": Running %s\n", st->name);
+ pr_info(DRIVER_NAME ": Running %s\n", st->name);
if (data)
err = st->live(data);
else
@@ -255,7 +255,7 @@ int __i915_subtests(const char *caller,
if (!apply_subtest_filter(caller, st->name))
continue;
- pr_debug(DRIVER_NAME ": Running %s/%s\n", caller, st->name);
+ pr_info(DRIVER_NAME ": Running %s/%s\n", caller, st->name);
GEM_TRACE("Running %s/%s\n", caller, st->name);
err = st->func(data);
diff --git a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
index cdbc8f134e5e..cbf45d85cbff 100644
--- a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
@@ -571,21 +571,27 @@ static int test_timer(void *arg)
unsigned long target, delay;
struct timed_fence tf;
+ preempt_disable();
timed_fence_init(&tf, target = jiffies);
if (!i915_sw_fence_done(&tf.fence)) {
pr_err("Fence with immediate expiration not signaled\n");
goto err;
}
+ preempt_enable();
timed_fence_fini(&tf);
for_each_prime_number(delay, i915_selftest.timeout_jiffies/2) {
+ preempt_disable();
timed_fence_init(&tf, target = jiffies + delay);
if (i915_sw_fence_done(&tf.fence)) {
pr_err("Fence with future expiration (%lu jiffies) already signaled\n", delay);
goto err;
}
+ preempt_enable();
i915_sw_fence_wait(&tf.fence);
+
+ preempt_disable();
if (!i915_sw_fence_done(&tf.fence)) {
pr_err("Fence not signaled after wait\n");
goto err;
@@ -595,13 +601,14 @@ static int test_timer(void *arg)
target, jiffies);
goto err;
}
-
+ preempt_enable();
timed_fence_fini(&tf);
}
return 0;
err:
+ preempt_enable();
timed_fence_fini(&tf);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/selftests/i915_timeline.c
index 12ea69b1a1e5..bd96afcadfe7 100644
--- a/drivers/gpu/drm/i915/selftests/i915_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/i915_timeline.c
@@ -64,7 +64,7 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state,
unsigned long cacheline;
int err;
- tl = i915_timeline_create(state->i915, "mock", NULL);
+ tl = i915_timeline_create(state->i915, NULL);
if (IS_ERR(tl))
return PTR_ERR(tl);
@@ -476,7 +476,7 @@ checked_i915_timeline_create(struct drm_i915_private *i915)
{
struct i915_timeline *tl;
- tl = i915_timeline_create(i915, "live", NULL);
+ tl = i915_timeline_create(i915, NULL);
if (IS_ERR(tl))
return tl;
@@ -641,6 +641,118 @@ out:
#undef NUM_TIMELINES
}
+static int live_hwsp_wrap(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct i915_timeline *tl;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ int err = 0;
+
+ /*
+ * Across a seqno wrap, we need to keep the old cacheline alive for
+ * foreign GPU references.
+ */
+
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(i915);
+
+ tl = i915_timeline_create(i915, NULL);
+ if (IS_ERR(tl)) {
+ err = PTR_ERR(tl);
+ goto out_rpm;
+ }
+ if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
+ goto out_free;
+
+ err = i915_timeline_pin(tl);
+ if (err)
+ goto out_free;
+
+ for_each_engine(engine, i915, id) {
+ const u32 *hwsp_seqno[2];
+ struct i915_request *rq;
+ u32 seqno[2];
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ rq = i915_request_alloc(engine, i915->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ tl->seqno = -4u;
+
+ err = i915_timeline_get_seqno(tl, rq, &seqno[0]);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ pr_debug("seqno[0]:%08x, hwsp_offset:%08x\n",
+ seqno[0], tl->hwsp_offset);
+
+ err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[0]);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ hwsp_seqno[0] = tl->hwsp_seqno;
+
+ err = i915_timeline_get_seqno(tl, rq, &seqno[1]);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ pr_debug("seqno[1]:%08x, hwsp_offset:%08x\n",
+ seqno[1], tl->hwsp_offset);
+
+ err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[1]);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ hwsp_seqno[1] = tl->hwsp_seqno;
+
+ /* With wrap should come a new hwsp */
+ GEM_BUG_ON(seqno[1] >= seqno[0]);
+ GEM_BUG_ON(hwsp_seqno[0] == hwsp_seqno[1]);
+
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
+ pr_err("Wait for timeline writes timed out!\n");
+ err = -EIO;
+ goto out;
+ }
+
+ if (*hwsp_seqno[0] != seqno[0] || *hwsp_seqno[1] != seqno[1]) {
+ pr_err("Bad timeline values: found (%x, %x), expected (%x, %x)\n",
+ *hwsp_seqno[0], *hwsp_seqno[1],
+ seqno[0], seqno[1]);
+ err = -EINVAL;
+ goto out;
+ }
+
+ i915_retire_requests(i915); /* recycle HWSP */
+ }
+
+out:
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+
+ i915_timeline_unpin(tl);
+out_free:
+ i915_timeline_put(tl);
+out_rpm:
+ intel_runtime_pm_put(i915, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ return err;
+}
+
static int live_hwsp_recycle(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -723,7 +835,11 @@ int i915_timeline_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_hwsp_recycle),
SUBTEST(live_hwsp_engine),
SUBTEST(live_hwsp_alternate),
+ SUBTEST(live_hwsp_wrap),
};
+ if (i915_terminally_wedged(i915))
+ return 0;
+
return i915_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index cf1de82741fa..fc594b030f5a 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -725,24 +725,30 @@ int i915_vma_mock_selftests(void)
SUBTEST(igt_vma_partial),
};
struct drm_i915_private *i915;
- struct i915_ggtt ggtt;
+ struct i915_ggtt *ggtt;
int err;
i915 = mock_gem_device();
if (!i915)
return -ENOMEM;
- mock_init_ggtt(i915, &ggtt);
+ ggtt = kmalloc(sizeof(*ggtt), GFP_KERNEL);
+ if (!ggtt) {
+ err = -ENOMEM;
+ goto out_put;
+ }
+ mock_init_ggtt(i915, ggtt);
mutex_lock(&i915->drm.struct_mutex);
- err = i915_subtests(tests, &ggtt);
+ err = i915_subtests(tests, ggtt);
mock_device_flush(i915);
mutex_unlock(&i915->drm.struct_mutex);
i915_gem_drain_freed_objects(i915);
- mock_fini_ggtt(&ggtt);
+ mock_fini_ggtt(ggtt);
+ kfree(ggtt);
+out_put:
drm_dev_put(&i915->drm);
-
return err;
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
index af66e3d4e23a..94aee4071a66 100644
--- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c
+++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c
@@ -14,7 +14,7 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
cond_resched();
if (flags & I915_WAIT_LOCKED &&
- i915_gem_switch_to_kernel_context(i915)) {
+ i915_gem_switch_to_kernel_context(i915, i915->gt.active_engines)) {
pr_err("Failed to switch back to kernel context; declaring wedged\n");
i915_gem_set_wedged(i915);
}
@@ -29,5 +29,5 @@ int igt_flush_test(struct drm_i915_private *i915, unsigned int flags)
i915_gem_set_wedged(i915);
}
- return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0;
+ return i915_terminally_wedged(i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 9ebd9225684e..16890dfe74c0 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -29,7 +29,7 @@ int igt_spinner_init(struct igt_spinner *spin, struct drm_i915_private *i915)
goto err_hws;
}
- i915_gem_object_set_cache_level(spin->hws, I915_CACHE_LLC);
+ i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC);
vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
@@ -144,6 +144,13 @@ igt_spinner_create_request(struct igt_spinner *spin,
i915_gem_chipset_flush(spin->i915);
+ if (engine->emit_init_breadcrumb &&
+ rq->timeline->has_initial_breadcrumb) {
+ err = engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto cancel_rq;
+ }
+
err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
cancel_rq:
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c
index c5e0a0e98fcb..b05a21eaa8f4 100644
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_guc.c
@@ -111,7 +111,7 @@ static int validate_client(struct intel_guc_client *client,
dev_priv->preempt_context : dev_priv->kernel_context;
if (client->owner != ctx_owner ||
- client->engines != INTEL_INFO(dev_priv)->ring_mask ||
+ client->engines != INTEL_INFO(dev_priv)->engine_mask ||
client->priority != client_priority ||
client->doorbell_id == GUC_DOORBELL_INVALID)
return -EINVAL;
@@ -261,7 +261,7 @@ static int igt_guc_doorbells(void *arg)
for (i = 0; i < ATTEMPTS; i++) {
clients[i] = guc_client_alloc(dev_priv,
- INTEL_INFO(dev_priv)->ring_mask,
+ INTEL_INFO(dev_priv)->engine_mask,
i % GUC_CLIENT_PRIORITY_NUM,
dev_priv->kernel_context);
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 7b6f3bea9ef8..050bd1e19e02 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -56,6 +56,8 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915)
if (IS_ERR(h->ctx))
return PTR_ERR(h->ctx);
+ GEM_BUG_ON(i915_gem_context_is_bannable(h->ctx));
+
h->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(h->hws)) {
err = PTR_ERR(h->hws);
@@ -68,7 +70,7 @@ static int hang_init(struct hang *h, struct drm_i915_private *i915)
goto err_hws;
}
- i915_gem_object_set_cache_level(h->hws, I915_CACHE_LLC);
+ i915_gem_object_set_cache_coherency(h->hws, I915_CACHE_LLC);
vaddr = i915_gem_object_pin_map(h->hws, I915_MAP_WB);
if (IS_ERR(vaddr)) {
err = PTR_ERR(vaddr);
@@ -242,6 +244,12 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
*batch++ = MI_BATCH_BUFFER_END; /* not reached */
i915_gem_chipset_flush(h->i915);
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto cancel_rq;
+ }
+
flags = 0;
if (INTEL_GEN(vm->i915) <= 5)
flags |= I915_DISPATCH_SECURE;
@@ -334,7 +342,7 @@ static int igt_hang_sanitycheck(void *arg)
timeout = i915_request_wait(rq,
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
timeout = -EIO;
i915_request_put(rq);
@@ -375,7 +383,7 @@ static int igt_global_reset(void *arg)
igt_global_reset_unlock(i915);
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
err = -EIO;
return err;
@@ -393,15 +401,13 @@ static int igt_wedged_reset(void *arg)
i915_gem_set_wedged(i915);
- mutex_lock(&i915->drm.struct_mutex);
- GEM_BUG_ON(!i915_terminally_wedged(&i915->gpu_error));
+ GEM_BUG_ON(!i915_reset_failed(i915));
i915_reset(i915, ALL_ENGINES, NULL);
- mutex_unlock(&i915->drm.struct_mutex);
intel_runtime_pm_put(i915, wakeref);
igt_global_reset_unlock(i915);
- return i915_terminally_wedged(&i915->gpu_error) ? -EIO : 0;
+ return i915_reset_failed(i915) ? -EIO : 0;
}
static bool wait_for_idle(struct intel_engine_cs *engine)
@@ -409,6 +415,222 @@ static bool wait_for_idle(struct intel_engine_cs *engine)
return wait_for(intel_engine_is_idle(engine), IGT_IDLE_TIMEOUT) == 0;
}
+static int igt_reset_nop(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ unsigned int reset_count, count;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ struct drm_file *file;
+ IGT_TIMEOUT(end_time);
+ int err = 0;
+
+ /* Check that we can reset during non-user portions of requests */
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ ctx = live_context(i915, file);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out;
+ }
+
+ i915_gem_context_clear_bannable(ctx);
+ wakeref = intel_runtime_pm_get(i915);
+ reset_count = i915_reset_count(&i915->gpu_error);
+ count = 0;
+ do {
+ mutex_lock(&i915->drm.struct_mutex);
+ for_each_engine(engine, i915, id) {
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ struct i915_request *rq;
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_add(rq);
+ }
+ }
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ igt_global_reset_lock(i915);
+ i915_reset(i915, ALL_ENGINES, NULL);
+ igt_global_reset_unlock(i915);
+ if (i915_reset_failed(i915)) {
+ err = -EIO;
+ break;
+ }
+
+ if (i915_reset_count(&i915->gpu_error) !=
+ reset_count + ++count) {
+ pr_err("Full GPU reset not recorded!\n");
+ err = -EINVAL;
+ break;
+ }
+
+ if (!i915_reset_flush(i915)) {
+ struct drm_printer p =
+ drm_info_printer(i915->drm.dev);
+
+ pr_err("%s failed to idle after reset\n",
+ engine->name);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ err = -EIO;
+ break;
+ }
+
+ err = igt_flush_test(i915, 0);
+ if (err)
+ break;
+ } while (time_before(jiffies, end_time));
+ pr_info("%s: %d resets\n", __func__, count);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = igt_flush_test(i915, I915_WAIT_LOCKED);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ intel_runtime_pm_put(i915, wakeref);
+
+out:
+ mock_file_free(i915, file);
+ if (i915_reset_failed(i915))
+ err = -EIO;
+ return err;
+}
+
+static int igt_reset_nop_engine(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ struct drm_file *file;
+ int err = 0;
+
+ /* Check that we can engine-reset during non-user portions */
+
+ if (!intel_has_reset_engine(i915))
+ return 0;
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ ctx = live_context(i915, file);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out;
+ }
+
+ i915_gem_context_clear_bannable(ctx);
+ wakeref = intel_runtime_pm_get(i915);
+ for_each_engine(engine, i915, id) {
+ unsigned int reset_count, reset_engine_count;
+ unsigned int count;
+ IGT_TIMEOUT(end_time);
+
+ reset_count = i915_reset_count(&i915->gpu_error);
+ reset_engine_count = i915_reset_engine_count(&i915->gpu_error,
+ engine);
+ count = 0;
+
+ set_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ do {
+ int i;
+
+ if (!wait_for_idle(engine)) {
+ pr_err("%s failed to idle before reset\n",
+ engine->name);
+ err = -EIO;
+ break;
+ }
+
+ mutex_lock(&i915->drm.struct_mutex);
+ for (i = 0; i < 16; i++) {
+ struct i915_request *rq;
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+
+ i915_request_add(rq);
+ }
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ err = i915_reset_engine(engine, NULL);
+ if (err) {
+ pr_err("i915_reset_engine failed\n");
+ break;
+ }
+
+ if (i915_reset_count(&i915->gpu_error) != reset_count) {
+ pr_err("Full GPU reset recorded! (engine reset expected)\n");
+ err = -EINVAL;
+ break;
+ }
+
+ if (i915_reset_engine_count(&i915->gpu_error, engine) !=
+ reset_engine_count + ++count) {
+ pr_err("%s engine reset not recorded!\n",
+ engine->name);
+ err = -EINVAL;
+ break;
+ }
+
+ if (!i915_reset_flush(i915)) {
+ struct drm_printer p =
+ drm_info_printer(i915->drm.dev);
+
+ pr_err("%s failed to idle after reset\n",
+ engine->name);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+
+ err = -EIO;
+ break;
+ }
+ } while (time_before(jiffies, end_time));
+ clear_bit(I915_RESET_ENGINE + id, &i915->gpu_error.flags);
+ pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
+
+ if (err)
+ break;
+
+ err = igt_flush_test(i915, 0);
+ if (err)
+ break;
+ }
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = igt_flush_test(i915, I915_WAIT_LOCKED);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ intel_runtime_pm_put(i915, wakeref);
+out:
+ mock_file_free(i915, file);
+ if (i915_reset_failed(i915))
+ err = -EIO;
+ return err;
+}
+
static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
{
struct intel_engine_cs *engine;
@@ -523,7 +745,7 @@ static int __igt_reset_engine(struct drm_i915_private *i915, bool active)
break;
}
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
err = -EIO;
if (active) {
@@ -565,11 +787,10 @@ static int active_request_put(struct i915_request *rq)
return 0;
if (i915_request_wait(rq, 0, 5 * HZ) < 0) {
- GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld, seqno %d.\n",
+ GEM_TRACE("%s timed out waiting for completion of fence %llx:%lld\n",
rq->engine->name,
rq->fence.context,
- rq->fence.seqno,
- i915_request_global_seqno(rq));
+ rq->fence.seqno);
GEM_TRACE_DUMP();
i915_gem_set_wedged(rq->i915);
@@ -762,7 +983,23 @@ static int __igt_reset_engines(struct drm_i915_private *i915,
count++;
if (rq) {
- i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ struct drm_printer p =
+ drm_info_printer(i915->drm.dev);
+
+ pr_err("i915_reset_engine(%s:%s):"
+ " failed to complete request after reset\n",
+ engine->name, test_name);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+ i915_request_put(rq);
+
+ GEM_TRACE_DUMP();
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ break;
+ }
+
i915_request_put(rq);
}
@@ -837,7 +1074,7 @@ unwind:
break;
}
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
err = -EIO;
if (flags & TEST_ACTIVE) {
@@ -887,7 +1124,8 @@ static int igt_reset_engines(void *arg)
return 0;
}
-static u32 fake_hangcheck(struct drm_i915_private *i915, u32 mask)
+static u32 fake_hangcheck(struct drm_i915_private *i915,
+ intel_engine_mask_t mask)
{
u32 count = i915_reset_count(&i915->gpu_error);
@@ -905,7 +1143,7 @@ static int igt_reset_wait(void *arg)
long timeout;
int err;
- if (!intel_engine_can_store_dword(i915->engine[RCS]))
+ if (!intel_engine_can_store_dword(i915->engine[RCS0]))
return 0;
/* Check that we detect a stuck waiter and issue a reset */
@@ -917,7 +1155,7 @@ static int igt_reset_wait(void *arg)
if (err)
goto unlock;
- rq = hang_create_request(&h, i915->engine[RCS]);
+ rq = hang_create_request(&h, i915->engine[RCS0]);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto fini;
@@ -963,7 +1201,7 @@ unlock:
mutex_unlock(&i915->drm.struct_mutex);
igt_global_reset_unlock(i915);
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
return -EIO;
return err;
@@ -1034,13 +1272,11 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
struct hang h;
int err;
- if (!intel_engine_can_store_dword(i915->engine[RCS]))
+ if (!intel_engine_can_store_dword(i915->engine[RCS0]))
return 0;
/* Check that we can recover an unbind stuck on a hanging request */
- igt_global_reset_lock(i915);
-
mutex_lock(&i915->drm.struct_mutex);
err = hang_init(&h, i915);
if (err)
@@ -1066,7 +1302,7 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
goto out_obj;
}
- rq = hang_create_request(&h, i915->engine[RCS]);
+ rq = hang_create_request(&h, i915->engine[RCS0]);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto out_obj;
@@ -1138,7 +1374,9 @@ static int __igt_reset_evict_vma(struct drm_i915_private *i915,
}
out_reset:
- fake_hangcheck(rq->i915, intel_engine_flag(rq->engine));
+ igt_global_reset_lock(i915);
+ fake_hangcheck(rq->i915, rq->engine->mask);
+ igt_global_reset_unlock(i915);
if (tsk) {
struct igt_wedge_me w;
@@ -1159,9 +1397,8 @@ fini:
hang_fini(&h);
unlock:
mutex_unlock(&i915->drm.struct_mutex);
- igt_global_reset_unlock(i915);
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
return -EIO;
return err;
@@ -1317,7 +1554,7 @@ static int igt_reset_queue(void *arg)
goto fini;
}
- reset_count = fake_hangcheck(i915, ENGINE_MASK(id));
+ reset_count = fake_hangcheck(i915, BIT(id));
if (prev->fence.error != -EIO) {
pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n",
@@ -1367,7 +1604,7 @@ unlock:
mutex_unlock(&i915->drm.struct_mutex);
igt_global_reset_unlock(i915);
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
return -EIO;
return err;
@@ -1376,7 +1613,7 @@ unlock:
static int igt_handle_error(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct intel_engine_cs *engine = i915->engine[RCS];
+ struct intel_engine_cs *engine = i915->engine[RCS0];
struct hang h;
struct i915_request *rq;
struct i915_gpu_state *error;
@@ -1423,7 +1660,7 @@ static int igt_handle_error(void *arg)
/* Temporarily disable error capture */
error = xchg(&i915->gpu_error.first_error, (void *)-1);
- i915_handle_error(i915, ENGINE_MASK(engine->id), 0, NULL);
+ i915_handle_error(i915, engine->mask, 0, NULL);
xchg(&i915->gpu_error.first_error, error);
@@ -1547,7 +1784,7 @@ static int igt_atomic_reset_engine(struct intel_engine_cs *engine,
i915_request_wait(rq,
I915_WAIT_LOCKED,
MAX_SCHEDULE_TIMEOUT);
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
err = -EIO;
}
@@ -1586,7 +1823,7 @@ static int igt_atomic_reset(void *arg)
/* Flush any requests before we get started and check basics */
force_reset(i915);
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_reset_failed(i915))
goto unlock;
if (intel_has_gpu_reset(i915)) {
@@ -1642,6 +1879,8 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_global_reset), /* attempt to recover GPU first */
SUBTEST(igt_wedged_reset),
SUBTEST(igt_hang_sanitycheck),
+ SUBTEST(igt_reset_nop),
+ SUBTEST(igt_reset_nop_engine),
SUBTEST(igt_reset_idle_engine),
SUBTEST(igt_reset_active_engine),
SUBTEST(igt_reset_engines),
@@ -1660,7 +1899,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
if (!intel_has_gpu_reset(i915))
return 0;
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
return -EIO; /* we're long past hope of a successful reset */
wakeref = intel_runtime_pm_get(i915);
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 58144e024751..fbee030db940 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -10,6 +10,7 @@
#include "../i915_selftest.h"
#include "igt_flush_test.h"
+#include "igt_live_test.h"
#include "igt_spinner.h"
#include "i915_random.h"
@@ -75,6 +76,185 @@ err_unlock:
return err;
}
+static int live_busywait_preempt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_gem_context *ctx_hi, *ctx_lo;
+ struct intel_engine_cs *engine;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ int err = -ENOMEM;
+ u32 *map;
+
+ /*
+ * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can
+ * preempt the busywaits used to synchronise between rings.
+ */
+
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(i915);
+
+ ctx_hi = kernel_context(i915);
+ if (!ctx_hi)
+ goto err_unlock;
+ ctx_hi->sched.priority = INT_MAX;
+
+ ctx_lo = kernel_context(i915);
+ if (!ctx_lo)
+ goto err_ctx_hi;
+ ctx_lo->sched.priority = INT_MIN;
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto err_ctx_lo;
+ }
+
+ map = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(map)) {
+ err = PTR_ERR(map);
+ goto err_obj;
+ }
+
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_map;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto err_map;
+
+ for_each_engine(engine, i915, id) {
+ struct i915_request *lo, *hi;
+ struct igt_live_test t;
+ u32 *cs;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_vma;
+ }
+
+ /*
+ * We create two requests. The low priority request
+ * busywaits on a semaphore (inside the ringbuffer where
+ * is should be preemptible) and the high priority requests
+ * uses a MI_STORE_DWORD_IMM to update the semaphore value
+ * allowing the first request to complete. If preemption
+ * fails, we hang instead.
+ */
+
+ lo = i915_request_alloc(engine, ctx_lo);
+ if (IS_ERR(lo)) {
+ err = PTR_ERR(lo);
+ goto err_vma;
+ }
+
+ cs = intel_ring_begin(lo, 8);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(lo);
+ goto err_vma;
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma);
+ *cs++ = 0;
+ *cs++ = 1;
+
+ /* XXX Do we need a flush + invalidate here? */
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD;
+ *cs++ = 0;
+ *cs++ = i915_ggtt_offset(vma);
+ *cs++ = 0;
+
+ intel_ring_advance(lo, cs);
+ i915_request_add(lo);
+
+ if (wait_for(READ_ONCE(*map), 10)) {
+ err = -ETIMEDOUT;
+ goto err_vma;
+ }
+
+ /* Low priority request should be busywaiting now */
+ if (i915_request_wait(lo, I915_WAIT_LOCKED, 1) != -ETIME) {
+ pr_err("%s: Busywaiting request did not!\n",
+ engine->name);
+ err = -EIO;
+ goto err_vma;
+ }
+
+ hi = i915_request_alloc(engine, ctx_hi);
+ if (IS_ERR(hi)) {
+ err = PTR_ERR(hi);
+ goto err_vma;
+ }
+
+ cs = intel_ring_begin(hi, 4);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ i915_request_add(hi);
+ goto err_vma;
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(vma);
+ *cs++ = 0;
+ *cs++ = 0;
+
+ intel_ring_advance(hi, cs);
+ i915_request_add(hi);
+
+ if (i915_request_wait(lo, I915_WAIT_LOCKED, HZ / 5) < 0) {
+ struct drm_printer p = drm_info_printer(i915->drm.dev);
+
+ pr_err("%s: Failed to preempt semaphore busywait!\n",
+ engine->name);
+
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ GEM_TRACE_DUMP();
+
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ goto err_vma;
+ }
+ GEM_BUG_ON(READ_ONCE(*map));
+
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ goto err_vma;
+ }
+ }
+
+ err = 0;
+err_vma:
+ i915_vma_unpin(vma);
+err_map:
+ i915_gem_object_unpin_map(obj);
+err_obj:
+ i915_gem_object_put(obj);
+err_ctx_lo:
+ kernel_context_close(ctx_lo);
+err_ctx_hi:
+ kernel_context_close(ctx_hi);
+err_unlock:
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+ intel_runtime_pm_put(i915, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
static int live_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -88,6 +268,9 @@ static int live_preempt(void *arg)
if (!HAS_LOGICAL_RING_PREEMPTION(i915))
return 0;
+ if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
+ pr_err("Logical preemption supported, but not exposed\n");
+
mutex_lock(&i915->drm.struct_mutex);
wakeref = intel_runtime_pm_get(i915);
@@ -110,8 +293,17 @@ static int live_preempt(void *arg)
I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
for_each_engine(engine, i915, id) {
+ struct igt_live_test t;
struct i915_request *rq;
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
MI_ARB_CHECK);
if (IS_ERR(rq)) {
@@ -147,7 +339,8 @@ static int live_preempt(void *arg)
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
- if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+
+ if (igt_live_test_end(&t)) {
err = -EIO;
goto err_ctx_lo;
}
@@ -201,8 +394,17 @@ static int live_late_preempt(void *arg)
goto err_ctx_hi;
for_each_engine(engine, i915, id) {
+ struct igt_live_test t;
struct i915_request *rq;
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_ctx_lo;
+ }
+
rq = igt_spinner_create_request(&spin_lo, ctx_lo, engine,
MI_ARB_CHECK);
if (IS_ERR(rq)) {
@@ -241,7 +443,8 @@ static int live_late_preempt(void *arg)
igt_spinner_end(&spin_hi);
igt_spinner_end(&spin_lo);
- if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+
+ if (igt_live_test_end(&t)) {
err = -EIO;
goto err_ctx_lo;
}
@@ -335,6 +538,9 @@ static int live_suppress_self_preempt(void *arg)
struct i915_request *rq_a, *rq_b;
int depth;
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
engine->execlists.preempt_hang.count = 0;
rq_a = igt_spinner_create_request(&a.spin,
@@ -407,6 +613,171 @@ err_wedged:
goto err_client_b;
}
+static int __i915_sw_fence_call
+dummy_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+ return NOTIFY_DONE;
+}
+
+static struct i915_request *dummy_request(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ rq = kzalloc(sizeof(*rq), GFP_KERNEL);
+ if (!rq)
+ return NULL;
+
+ INIT_LIST_HEAD(&rq->active_list);
+ rq->engine = engine;
+
+ i915_sched_node_init(&rq->sched);
+
+ /* mark this request as permanently incomplete */
+ rq->fence.seqno = 1;
+ BUILD_BUG_ON(sizeof(rq->fence.seqno) != 8); /* upper 32b == 0 */
+ rq->hwsp_seqno = (u32 *)&rq->fence.seqno + 1;
+ GEM_BUG_ON(i915_request_completed(rq));
+
+ i915_sw_fence_init(&rq->submit, dummy_notify);
+ i915_sw_fence_commit(&rq->submit);
+
+ return rq;
+}
+
+static void dummy_request_free(struct i915_request *dummy)
+{
+ i915_request_mark_complete(dummy);
+ i915_sched_node_fini(&dummy->sched);
+ i915_sw_fence_fini(&dummy->submit);
+
+ dma_fence_free(&dummy->fence);
+}
+
+static int live_suppress_wait_preempt(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct preempt_client client[4];
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ int err = -ENOMEM;
+ int i;
+
+ /*
+ * Waiters are given a little priority nudge, but not enough
+ * to actually cause any preemption. Double check that we do
+ * not needlessly generate preempt-to-idle cycles.
+ */
+
+ if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+ return 0;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ wakeref = intel_runtime_pm_get(i915);
+
+ if (preempt_client_init(i915, &client[0])) /* ELSP[0] */
+ goto err_unlock;
+ if (preempt_client_init(i915, &client[1])) /* ELSP[1] */
+ goto err_client_0;
+ if (preempt_client_init(i915, &client[2])) /* head of queue */
+ goto err_client_1;
+ if (preempt_client_init(i915, &client[3])) /* bystander */
+ goto err_client_2;
+
+ for_each_engine(engine, i915, id) {
+ int depth;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ if (!engine->emit_init_breadcrumb)
+ continue;
+
+ for (depth = 0; depth < ARRAY_SIZE(client); depth++) {
+ struct i915_request *rq[ARRAY_SIZE(client)];
+ struct i915_request *dummy;
+
+ engine->execlists.preempt_hang.count = 0;
+
+ dummy = dummy_request(engine);
+ if (!dummy)
+ goto err_client_3;
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ rq[i] = igt_spinner_create_request(&client[i].spin,
+ client[i].ctx, engine,
+ MI_NOOP);
+ if (IS_ERR(rq[i])) {
+ err = PTR_ERR(rq[i]);
+ goto err_wedged;
+ }
+
+ /* Disable NEWCLIENT promotion */
+ __i915_active_request_set(&rq[i]->timeline->last_request,
+ dummy);
+ i915_request_add(rq[i]);
+ }
+
+ dummy_request_free(dummy);
+
+ GEM_BUG_ON(i915_request_completed(rq[0]));
+ if (!igt_wait_for_spinner(&client[0].spin, rq[0])) {
+ pr_err("%s: First client failed to start\n",
+ engine->name);
+ goto err_wedged;
+ }
+ GEM_BUG_ON(!i915_request_started(rq[0]));
+
+ if (i915_request_wait(rq[depth],
+ I915_WAIT_LOCKED |
+ I915_WAIT_PRIORITY,
+ 1) != -ETIME) {
+ pr_err("%s: Waiter depth:%d completed!\n",
+ engine->name, depth);
+ goto err_wedged;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(client); i++)
+ igt_spinner_end(&client[i].spin);
+
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ goto err_wedged;
+
+ if (engine->execlists.preempt_hang.count) {
+ pr_err("%s: Preemption recorded x%d, depth %d; should have been suppressed!\n",
+ engine->name,
+ engine->execlists.preempt_hang.count,
+ depth);
+ err = -EINVAL;
+ goto err_client_3;
+ }
+ }
+ }
+
+ err = 0;
+err_client_3:
+ preempt_client_fini(&client[3]);
+err_client_2:
+ preempt_client_fini(&client[2]);
+err_client_1:
+ preempt_client_fini(&client[1]);
+err_client_0:
+ preempt_client_fini(&client[0]);
+err_unlock:
+ if (igt_flush_test(i915, I915_WAIT_LOCKED))
+ err = -EIO;
+ intel_runtime_pm_put(i915, wakeref);
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+
+err_wedged:
+ for (i = 0; i < ARRAY_SIZE(client); i++)
+ igt_spinner_end(&client[i].spin);
+ i915_gem_set_wedged(i915);
+ err = -EIO;
+ goto err_client_3;
+}
+
static int live_chain_preempt(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -438,11 +809,39 @@ static int live_chain_preempt(void *arg)
struct i915_sched_attr attr = {
.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
};
- int count, i;
+ struct igt_live_test t;
+ struct i915_request *rq;
+ int ring_size, count, i;
- for_each_prime_number_from(count, 1, 32) { /* must fit ring! */
- struct i915_request *rq;
+ if (!intel_engine_has_preemption(engine))
+ continue;
+ rq = igt_spinner_create_request(&lo.spin,
+ lo.ctx, engine,
+ MI_ARB_CHECK);
+ if (IS_ERR(rq))
+ goto err_wedged;
+ i915_request_add(rq);
+
+ ring_size = rq->wa_tail - rq->head;
+ if (ring_size < 0)
+ ring_size += rq->ring->size;
+ ring_size = rq->ring->size / ring_size;
+ pr_debug("%s(%s): Using maximum of %d requests\n",
+ __func__, engine->name, ring_size);
+
+ igt_spinner_end(&lo.spin);
+ if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 2) < 0) {
+ pr_err("Timed out waiting to flush %s\n", engine->name);
+ goto err_wedged;
+ }
+
+ if (igt_live_test_begin(&t, i915, __func__, engine->name)) {
+ err = -EIO;
+ goto err_wedged;
+ }
+
+ for_each_prime_number_from(count, 1, ring_size) {
rq = igt_spinner_create_request(&hi.spin,
hi.ctx, engine,
MI_ARB_CHECK);
@@ -484,6 +883,26 @@ static int live_chain_preempt(void *arg)
goto err_wedged;
}
igt_spinner_end(&lo.spin);
+
+ rq = i915_request_alloc(engine, lo.ctx);
+ if (IS_ERR(rq))
+ goto err_wedged;
+ i915_request_add(rq);
+ if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
+ struct drm_printer p =
+ drm_info_printer(i915->drm.dev);
+
+ pr_err("Failed to flush low priority chain of %d requests\n",
+ count);
+ intel_engine_dump(engine, &p,
+ "%s\n", engine->name);
+ goto err_wedged;
+ }
+ }
+
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ goto err_wedged;
}
}
@@ -767,7 +1186,7 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
count, flags,
- RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext);
+ RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
return 0;
}
@@ -795,7 +1214,7 @@ static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
count, flags,
- RUNTIME_INFO(smoke->i915)->num_rings, smoke->ncontext);
+ RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext);
return 0;
}
@@ -808,6 +1227,7 @@ static int live_preempt_smoke(void *arg)
};
const unsigned int phase[] = { 0, BATCH };
intel_wakeref_t wakeref;
+ struct igt_live_test t;
int err = -ENOMEM;
u32 *cs;
int n;
@@ -838,11 +1258,13 @@ static int live_preempt_smoke(void *arg)
for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
cs[n] = MI_ARB_CHECK;
cs[n] = MI_BATCH_BUFFER_END;
+ i915_gem_object_flush_map(smoke.batch);
i915_gem_object_unpin_map(smoke.batch);
- err = i915_gem_object_set_to_gtt_domain(smoke.batch, false);
- if (err)
+ if (igt_live_test_begin(&t, smoke.i915, __func__, "all")) {
+ err = -EIO;
goto err_batch;
+ }
for (n = 0; n < smoke.ncontext; n++) {
smoke.contexts[n] = kernel_context(smoke.i915);
@@ -861,7 +1283,7 @@ static int live_preempt_smoke(void *arg)
}
err_ctx:
- if (igt_flush_test(smoke.i915, I915_WAIT_LOCKED))
+ if (igt_live_test_end(&t))
err = -EIO;
for (n = 0; n < smoke.ncontext; n++) {
@@ -884,9 +1306,11 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_sanitycheck),
+ SUBTEST(live_busywait_preempt),
SUBTEST(live_preempt),
SUBTEST(live_late_preempt),
SUBTEST(live_suppress_self_preempt),
+ SUBTEST(live_suppress_wait_preempt),
SUBTEST(live_chain_preempt),
SUBTEST(live_preempt_hang),
SUBTEST(live_preempt_smoke),
@@ -895,7 +1319,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
if (!HAS_EXECLISTS(i915))
return 0;
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
return 0;
return i915_subtests(tests, i915);
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 81d9d31042a9..e0d7ebecb215 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -119,9 +119,143 @@ int intel_uncore_mock_selftests(void)
return 0;
}
-static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_priv)
+static int live_forcewake_ops(void *arg)
+{
+ static const struct reg {
+ const char *name;
+ unsigned long platforms;
+ unsigned int offset;
+ } registers[] = {
+ {
+ "RING_START",
+ INTEL_GEN_MASK(6, 7),
+ 0x38,
+ },
+ {
+ "RING_MI_MODE",
+ INTEL_GEN_MASK(8, BITS_PER_LONG),
+ 0x9c,
+ }
+ };
+ const struct reg *r;
+ struct drm_i915_private *i915 = arg;
+ struct intel_uncore_forcewake_domain *domain;
+ struct intel_uncore *uncore = &i915->uncore;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ unsigned int tmp;
+ int err = 0;
+
+ GEM_BUG_ON(i915->gt.awake);
+
+ /* vlv/chv with their pcu behave differently wrt reads */
+ if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ pr_debug("PCU fakes forcewake badly; skipping\n");
+ return 0;
+ }
+
+ /*
+ * Not quite as reliable across the gen as one would hope.
+ *
+ * Either our theory of operation is incorrect, or there remain
+ * external parties interfering with the powerwells.
+ *
+ * https://bugs.freedesktop.org/show_bug.cgi?id=110210
+ */
+ if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
+ return 0;
+
+ /* We have to pick carefully to get the exact behaviour we need */
+ for (r = registers; r->name; r++)
+ if (r->platforms & INTEL_INFO(i915)->gen_mask)
+ break;
+ if (!r->name) {
+ pr_debug("Forcewaked register not known for %s; skipping\n",
+ intel_platform_name(INTEL_INFO(i915)->platform));
+ return 0;
+ }
+
+ wakeref = intel_runtime_pm_get(i915);
+
+ for_each_fw_domain(domain, uncore, tmp) {
+ smp_store_mb(domain->active, false);
+ if (!hrtimer_cancel(&domain->timer))
+ continue;
+
+ intel_uncore_fw_release_timer(&domain->timer);
+ }
+
+ for_each_engine(engine, i915, id) {
+ i915_reg_t mmio = _MMIO(engine->mmio_base + r->offset);
+ u32 __iomem *reg = uncore->regs + engine->mmio_base + r->offset;
+ enum forcewake_domains fw_domains;
+ u32 val;
+
+ if (!engine->default_state)
+ continue;
+
+ fw_domains = intel_uncore_forcewake_for_reg(uncore, mmio,
+ FW_REG_READ);
+ if (!fw_domains)
+ continue;
+
+ for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
+ if (!domain->wake_count)
+ continue;
+
+ pr_err("fw_domain %s still active, aborting test!\n",
+ intel_uncore_forcewake_domain_to_str(domain->id));
+ err = -EINVAL;
+ goto out_rpm;
+ }
+
+ intel_uncore_forcewake_get(uncore, fw_domains);
+ val = readl(reg);
+ intel_uncore_forcewake_put(uncore, fw_domains);
+
+ /* Flush the forcewake release (delayed onto a timer) */
+ for_each_fw_domain_masked(domain, fw_domains, uncore, tmp) {
+ smp_store_mb(domain->active, false);
+ if (hrtimer_cancel(&domain->timer))
+ intel_uncore_fw_release_timer(&domain->timer);
+
+ preempt_disable();
+ err = wait_ack_clear(domain, FORCEWAKE_KERNEL);
+ preempt_enable();
+ if (err) {
+ pr_err("Failed to clear fw_domain %s\n",
+ intel_uncore_forcewake_domain_to_str(domain->id));
+ goto out_rpm;
+ }
+ }
+
+ if (!val) {
+ pr_err("%s:%s was zero while fw was held!\n",
+ engine->name, r->name);
+ err = -EINVAL;
+ goto out_rpm;
+ }
+
+ /* We then expect the read to return 0 outside of the fw */
+ if (wait_for(readl(reg) == 0, 100)) {
+ pr_err("%s:%s=%0x, fw_domains 0x%x still up after 100ms!\n",
+ engine->name, r->name, readl(reg), fw_domains);
+ err = -ETIMEDOUT;
+ goto out_rpm;
+ }
+ }
+
+out_rpm:
+ intel_runtime_pm_put(i915, wakeref);
+ return err;
+}
+
+static int live_forcewake_domains(void *arg)
{
#define FW_RANGE 0x40000
+ struct drm_i915_private *dev_priv = arg;
+ struct intel_uncore *uncore = &dev_priv->uncore;
unsigned long *valid;
u32 offset;
int err;
@@ -137,48 +271,52 @@ static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_pri
if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
return 0;
- valid = kcalloc(BITS_TO_LONGS(FW_RANGE), sizeof(*valid),
- GFP_KERNEL);
+ valid = bitmap_zalloc(FW_RANGE, GFP_KERNEL);
if (!valid)
return -ENOMEM;
- intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
- check_for_unclaimed_mmio(dev_priv);
+ check_for_unclaimed_mmio(uncore);
for (offset = 0; offset < FW_RANGE; offset += 4) {
i915_reg_t reg = { offset };
(void)I915_READ_FW(reg);
- if (!check_for_unclaimed_mmio(dev_priv))
+ if (!check_for_unclaimed_mmio(uncore))
set_bit(offset, valid);
}
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
err = 0;
for_each_set_bit(offset, valid, FW_RANGE) {
i915_reg_t reg = { offset };
iosf_mbi_punit_acquire();
- intel_uncore_forcewake_reset(dev_priv);
+ intel_uncore_forcewake_reset(uncore);
iosf_mbi_punit_release();
- check_for_unclaimed_mmio(dev_priv);
+ check_for_unclaimed_mmio(uncore);
(void)I915_READ(reg);
- if (check_for_unclaimed_mmio(dev_priv)) {
+ if (check_for_unclaimed_mmio(uncore)) {
pr_err("Unclaimed mmio read to register 0x%04x\n",
offset);
err = -EINVAL;
}
}
- kfree(valid);
+ bitmap_free(valid);
return err;
}
int intel_uncore_live_selftests(struct drm_i915_private *i915)
{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_forcewake_ops),
+ SUBTEST(live_forcewake_domains),
+ };
+
int err;
/* Confirm the table we load is still valid */
@@ -188,9 +326,5 @@ int intel_uncore_live_selftests(struct drm_i915_private *i915)
if (err)
return err;
- err = intel_uncore_check_forcewake_domains(i915);
- if (err)
- return err;
-
- return 0;
+ return i915_subtests(tests, i915);
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_workarounds.c b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
index b15c4f26c593..567b6f8dae86 100644
--- a/drivers/gpu/drm/i915/selftests/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/selftests/intel_workarounds.c
@@ -12,6 +12,14 @@
#include "igt_spinner.h"
#include "igt_wedge_me.h"
#include "mock_context.h"
+#include "mock_drm.h"
+
+static const struct wo_register {
+ enum intel_platform platform;
+ u32 reg;
+} wo_registers[] = {
+ { INTEL_GEMINILAKE, 0x731c }
+};
#define REF_NAME_MAX (INTEL_ENGINE_CS_MAX_NAME + 4)
struct wa_lists {
@@ -74,7 +82,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
if (IS_ERR(result))
return result;
- i915_gem_object_set_cache_level(result, I915_CACHE_LLC);
+ i915_gem_object_set_cache_coherency(result, I915_CACHE_LLC);
cs = i915_gem_object_pin_map(result, I915_MAP_WB);
if (IS_ERR(cs)) {
@@ -82,6 +90,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
goto err_obj;
}
memset(cs, 0xc5, PAGE_SIZE);
+ i915_gem_object_flush_map(result);
i915_gem_object_unpin_map(result);
vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
@@ -181,7 +190,7 @@ static int check_whitelist(struct i915_gem_context *ctx,
err = 0;
igt_wedge_on_timeout(&wedge, ctx->i915, HZ / 5) /* a safety net! */
err = i915_gem_object_set_to_cpu_domain(results, false);
- if (i915_terminally_wedged(&ctx->i915->gpu_error))
+ if (i915_terminally_wedged(ctx->i915))
err = -EIO;
if (err)
goto out_put;
@@ -214,7 +223,7 @@ out_put:
static int do_device_reset(struct intel_engine_cs *engine)
{
- i915_reset(engine->i915, ENGINE_MASK(engine->id), "live_workarounds");
+ i915_reset(engine->i915, engine->mask, "live_workarounds");
return 0;
}
@@ -236,15 +245,11 @@ switch_to_scratch_context(struct intel_engine_cs *engine,
if (IS_ERR(ctx))
return PTR_ERR(ctx);
+ GEM_BUG_ON(i915_gem_context_is_bannable(ctx));
+
rq = ERR_PTR(-ENODEV);
- with_intel_runtime_pm(engine->i915, wakeref) {
- if (spin)
- rq = igt_spinner_create_request(spin,
- ctx, engine,
- MI_NOOP);
- else
- rq = i915_request_alloc(engine, ctx);
- }
+ with_intel_runtime_pm(engine->i915, wakeref)
+ rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP);
kernel_context_close(ctx);
@@ -273,7 +278,6 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
const char *name)
{
struct drm_i915_private *i915 = engine->i915;
- bool want_spin = reset == do_engine_reset;
struct i915_gem_context *ctx;
struct igt_spinner spin;
intel_wakeref_t wakeref;
@@ -282,11 +286,9 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
pr_info("Checking %d whitelisted registers (RING_NONPRIV) [%s]\n",
engine->whitelist.count, name);
- if (want_spin) {
- err = igt_spinner_init(&spin, i915);
- if (err)
- return err;
- }
+ err = igt_spinner_init(&spin, i915);
+ if (err)
+ return err;
ctx = kernel_context(i915);
if (IS_ERR(ctx))
@@ -298,17 +300,15 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine,
goto out;
}
- err = switch_to_scratch_context(engine, want_spin ? &spin : NULL);
+ err = switch_to_scratch_context(engine, &spin);
if (err)
goto out;
with_intel_runtime_pm(i915, wakeref)
err = reset(engine);
- if (want_spin) {
- igt_spinner_end(&spin);
- igt_spinner_fini(&spin);
- }
+ igt_spinner_end(&spin);
+ igt_spinner_fini(&spin);
if (err) {
pr_err("%s reset failed\n", name);
@@ -340,10 +340,379 @@ out:
return err;
}
+static struct i915_vma *create_scratch(struct i915_gem_context *ctx)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ void *ptr;
+ int err;
+
+ obj = i915_gem_object_create_internal(ctx->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
+
+ ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(ptr)) {
+ err = PTR_ERR(ptr);
+ goto err_obj;
+ }
+ memset(ptr, 0xc5, PAGE_SIZE);
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+
+ vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err_obj;
+
+ err = i915_gem_object_set_to_cpu_domain(obj, false);
+ if (err)
+ goto err_obj;
+
+ return vma;
+
+err_obj:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static struct i915_vma *create_batch(struct i915_gem_context *ctx)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(ctx->i915, 16 * PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, &ctx->ppgtt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err_obj;
+
+ err = i915_gem_object_set_to_wc_domain(obj, true);
+ if (err)
+ goto err_obj;
+
+ return vma;
+
+err_obj:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static u32 reg_write(u32 old, u32 new, u32 rsvd)
+{
+ if (rsvd == 0x0000ffff) {
+ old &= ~(new >> 16);
+ old |= new & (new >> 16);
+ } else {
+ old &= ~rsvd;
+ old |= new & rsvd;
+ }
+
+ return old;
+}
+
+static bool wo_register(struct intel_engine_cs *engine, u32 reg)
+{
+ enum intel_platform platform = INTEL_INFO(engine->i915)->platform;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(wo_registers); i++) {
+ if (wo_registers[i].platform == platform &&
+ wo_registers[i].reg == reg)
+ return true;
+ }
+
+ return false;
+}
+
+static int check_dirty_whitelist(struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine)
+{
+ const u32 values[] = {
+ 0x00000000,
+ 0x01010101,
+ 0x10100101,
+ 0x03030303,
+ 0x30300303,
+ 0x05050505,
+ 0x50500505,
+ 0x0f0f0f0f,
+ 0xf00ff00f,
+ 0x10101010,
+ 0xf0f01010,
+ 0x30303030,
+ 0xa0a03030,
+ 0x50505050,
+ 0xc0c05050,
+ 0xf0f0f0f0,
+ 0x11111111,
+ 0x33333333,
+ 0x55555555,
+ 0x0000ffff,
+ 0x00ff00ff,
+ 0xff0000ff,
+ 0xffff00ff,
+ 0xffffffff,
+ };
+ struct i915_vma *scratch;
+ struct i915_vma *batch;
+ int err = 0, i, v;
+ u32 *cs, *results;
+
+ scratch = create_scratch(ctx);
+ if (IS_ERR(scratch))
+ return PTR_ERR(scratch);
+
+ batch = create_batch(ctx);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_scratch;
+ }
+
+ for (i = 0; i < engine->whitelist.count; i++) {
+ u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg);
+ u64 addr = scratch->node.start;
+ struct i915_request *rq;
+ u32 srm, lrm, rsvd;
+ u32 expect;
+ int idx;
+
+ if (wo_register(engine, reg))
+ continue;
+
+ srm = MI_STORE_REGISTER_MEM;
+ lrm = MI_LOAD_REGISTER_MEM;
+ if (INTEL_GEN(ctx->i915) >= 8)
+ lrm++, srm++;
+
+ pr_debug("%s: Writing garbage to %x\n",
+ engine->name, reg);
+
+ cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto out_batch;
+ }
+
+ /* SRM original */
+ *cs++ = srm;
+ *cs++ = reg;
+ *cs++ = lower_32_bits(addr);
+ *cs++ = upper_32_bits(addr);
+
+ idx = 1;
+ for (v = 0; v < ARRAY_SIZE(values); v++) {
+ /* LRI garbage */
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = reg;
+ *cs++ = values[v];
+
+ /* SRM result */
+ *cs++ = srm;
+ *cs++ = reg;
+ *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
+ *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
+ idx++;
+ }
+ for (v = 0; v < ARRAY_SIZE(values); v++) {
+ /* LRI garbage */
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = reg;
+ *cs++ = ~values[v];
+
+ /* SRM result */
+ *cs++ = srm;
+ *cs++ = reg;
+ *cs++ = lower_32_bits(addr + sizeof(u32) * idx);
+ *cs++ = upper_32_bits(addr + sizeof(u32) * idx);
+ idx++;
+ }
+ GEM_BUG_ON(idx * sizeof(u32) > scratch->size);
+
+ /* LRM original -- don't leave garbage in the context! */
+ *cs++ = lrm;
+ *cs++ = reg;
+ *cs++ = lower_32_bits(addr);
+ *cs++ = upper_32_bits(addr);
+
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(batch->obj);
+ i915_gem_object_unpin_map(batch->obj);
+ i915_gem_chipset_flush(ctx->i915);
+
+ rq = i915_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_batch;
+ }
+
+ if (engine->emit_init_breadcrumb) { /* Be nice if we hang */
+ err = engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto err_request;
+ }
+
+ err = engine->emit_bb_start(rq,
+ batch->node.start, PAGE_SIZE,
+ 0);
+ if (err)
+ goto err_request;
+
+err_request:
+ i915_request_add(rq);
+ if (err)
+ goto out_batch;
+
+ if (i915_request_wait(rq, I915_WAIT_LOCKED, HZ / 5) < 0) {
+ pr_err("%s: Futzing %x timedout; cancelling test\n",
+ engine->name, reg);
+ i915_gem_set_wedged(ctx->i915);
+ err = -EIO;
+ goto out_batch;
+ }
+
+ results = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB);
+ if (IS_ERR(results)) {
+ err = PTR_ERR(results);
+ goto out_batch;
+ }
+
+ GEM_BUG_ON(values[ARRAY_SIZE(values) - 1] != 0xffffffff);
+ rsvd = results[ARRAY_SIZE(values)]; /* detect write masking */
+ if (!rsvd) {
+ pr_err("%s: Unable to write to whitelisted register %x\n",
+ engine->name, reg);
+ err = -EINVAL;
+ goto out_unpin;
+ }
+
+ expect = results[0];
+ idx = 1;
+ for (v = 0; v < ARRAY_SIZE(values); v++) {
+ expect = reg_write(expect, values[v], rsvd);
+ if (results[idx] != expect)
+ err++;
+ idx++;
+ }
+ for (v = 0; v < ARRAY_SIZE(values); v++) {
+ expect = reg_write(expect, ~values[v], rsvd);
+ if (results[idx] != expect)
+ err++;
+ idx++;
+ }
+ if (err) {
+ pr_err("%s: %d mismatch between values written to whitelisted register [%x], and values read back!\n",
+ engine->name, err, reg);
+
+ pr_info("%s: Whitelisted register: %x, original value %08x, rsvd %08x\n",
+ engine->name, reg, results[0], rsvd);
+
+ expect = results[0];
+ idx = 1;
+ for (v = 0; v < ARRAY_SIZE(values); v++) {
+ u32 w = values[v];
+
+ expect = reg_write(expect, w, rsvd);
+ pr_info("Wrote %08x, read %08x, expect %08x\n",
+ w, results[idx], expect);
+ idx++;
+ }
+ for (v = 0; v < ARRAY_SIZE(values); v++) {
+ u32 w = ~values[v];
+
+ expect = reg_write(expect, w, rsvd);
+ pr_info("Wrote %08x, read %08x, expect %08x\n",
+ w, results[idx], expect);
+ idx++;
+ }
+
+ err = -EINVAL;
+ }
+out_unpin:
+ i915_gem_object_unpin_map(scratch->obj);
+ if (err)
+ break;
+ }
+
+ if (igt_flush_test(ctx->i915, I915_WAIT_LOCKED))
+ err = -EIO;
+out_batch:
+ i915_vma_unpin_and_release(&batch, 0);
+out_scratch:
+ i915_vma_unpin_and_release(&scratch, 0);
+ return err;
+}
+
+static int live_dirty_whitelist(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ enum intel_engine_id id;
+ intel_wakeref_t wakeref;
+ struct drm_file *file;
+ int err = 0;
+
+ /* Can the user write to the whitelisted registers? */
+
+ if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */
+ return 0;
+
+ wakeref = intel_runtime_pm_get(i915);
+
+ mutex_unlock(&i915->drm.struct_mutex);
+ file = mock_file(i915);
+ mutex_lock(&i915->drm.struct_mutex);
+ if (IS_ERR(file)) {
+ err = PTR_ERR(file);
+ goto out_rpm;
+ }
+
+ ctx = live_context(i915, file);
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_file;
+ }
+
+ for_each_engine(engine, i915, id) {
+ if (engine->whitelist.count == 0)
+ continue;
+
+ err = check_dirty_whitelist(ctx, engine);
+ if (err)
+ goto out_file;
+ }
+
+out_file:
+ mutex_unlock(&i915->drm.struct_mutex);
+ mock_file_free(i915, file);
+ mutex_lock(&i915->drm.struct_mutex);
+out_rpm:
+ intel_runtime_pm_put(i915, wakeref);
+ return err;
+}
+
static int live_reset_whitelist(void *arg)
{
struct drm_i915_private *i915 = arg;
- struct intel_engine_cs *engine = i915->engine[RCS];
+ struct intel_engine_cs *engine = i915->engine[RCS0];
int err = 0;
/* If we reset the gpu, we should not lose the RING_NONPRIV */
@@ -381,10 +750,11 @@ static bool verify_gt_engine_wa(struct drm_i915_private *i915,
enum intel_engine_id id;
bool ok = true;
- ok &= wa_list_verify(i915, &lists->gt_wa_list, str);
+ ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str);
for_each_engine(engine, i915, id)
- ok &= wa_list_verify(i915, &lists->engine[id].wa_list, str);
+ ok &= wa_list_verify(engine->uncore,
+ &lists->engine[id].wa_list, str);
return ok;
}
@@ -513,13 +883,14 @@ err:
int intel_workarounds_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
+ SUBTEST(live_dirty_whitelist),
SUBTEST(live_reset_whitelist),
SUBTEST(live_gpu_reset_gt_engine_workarounds),
SUBTEST(live_engine_reset_gt_engine_workarounds),
};
int err;
- if (i915_terminally_wedged(&i915->gpu_error))
+ if (i915_terminally_wedged(i915))
return 0;
mutex_lock(&i915->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
index b646cdcdd602..0426093bf1d9 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -30,7 +30,6 @@ mock_context(struct drm_i915_private *i915,
const char *name)
{
struct i915_gem_context *ctx;
- unsigned int n;
int ret;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -41,25 +40,31 @@ mock_context(struct drm_i915_private *i915,
INIT_LIST_HEAD(&ctx->link);
ctx->i915 = i915;
+ ctx->hw_contexts = RB_ROOT;
+ spin_lock_init(&ctx->hw_contexts_lock);
+
INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
INIT_LIST_HEAD(&ctx->handles_list);
INIT_LIST_HEAD(&ctx->hw_id_link);
-
- for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++)
- intel_context_init(&ctx->__engine[n], ctx, i915->engine[n]);
+ INIT_LIST_HEAD(&ctx->active_engines);
+ mutex_init(&ctx->mutex);
ret = i915_gem_context_pin_hw_id(ctx);
if (ret < 0)
goto err_handles;
if (name) {
+ struct i915_hw_ppgtt *ppgtt;
+
ctx->name = kstrdup(name, GFP_KERNEL);
if (!ctx->name)
goto err_put;
- ctx->ppgtt = mock_ppgtt(i915, name);
- if (!ctx->ppgtt)
+ ppgtt = mock_ppgtt(i915, name);
+ if (!ppgtt)
goto err_put;
+
+ __set_ppgtt(ctx, ppgtt);
}
return ctx;
@@ -87,9 +92,24 @@ void mock_init_contexts(struct drm_i915_private *i915)
struct i915_gem_context *
live_context(struct drm_i915_private *i915, struct drm_file *file)
{
+ struct i915_gem_context *ctx;
+ int err;
+
lockdep_assert_held(&i915->drm.struct_mutex);
- return i915_gem_create_context(i915, file->driver_priv);
+ ctx = i915_gem_create_context(i915, 0);
+ if (IS_ERR(ctx))
+ return ctx;
+
+ err = gem_context_register(ctx, file->driver_priv);
+ if (err < 0)
+ goto err_ctx;
+
+ return ctx;
+
+err_ctx:
+ context_close(ctx);
+ return ERR_PTR(err);
}
struct i915_gem_context *
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 08f0cab02e0f..61a8206ed677 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -50,13 +50,12 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
if (!ring)
return NULL;
- if (i915_timeline_init(engine->i915,
- &ring->timeline, engine->name,
- NULL)) {
+ if (i915_timeline_init(engine->i915, &ring->timeline, NULL)) {
kfree(ring);
return NULL;
}
+ kref_init(&ring->base.ref);
ring->base.size = sz;
ring->base.effective_size = sz;
ring->base.vaddr = (void *)(ring + 1);
@@ -76,28 +75,26 @@ static void mock_ring_free(struct intel_ring *base)
kfree(ring);
}
-static struct mock_request *first_request(struct mock_engine *engine)
+static struct i915_request *first_request(struct mock_engine *engine)
{
return list_first_entry_or_null(&engine->hw_queue,
- struct mock_request,
- link);
+ struct i915_request,
+ mock.link);
}
-static void advance(struct mock_request *request)
+static void advance(struct i915_request *request)
{
- list_del_init(&request->link);
- intel_engine_write_global_seqno(request->base.engine,
- request->base.global_seqno);
- i915_request_mark_complete(&request->base);
- GEM_BUG_ON(!i915_request_completed(&request->base));
+ list_del_init(&request->mock.link);
+ i915_request_mark_complete(request);
+ GEM_BUG_ON(!i915_request_completed(request));
- intel_engine_queue_breadcrumbs(request->base.engine);
+ intel_engine_queue_breadcrumbs(request->engine);
}
static void hw_delay_complete(struct timer_list *t)
{
struct mock_engine *engine = from_timer(engine, t, hw_delay);
- struct mock_request *request;
+ struct i915_request *request;
unsigned long flags;
spin_lock_irqsave(&engine->hw_lock, flags);
@@ -112,8 +109,9 @@ static void hw_delay_complete(struct timer_list *t)
* requeue the timer for the next delayed request.
*/
while ((request = first_request(engine))) {
- if (request->delay) {
- mod_timer(&engine->hw_delay, jiffies + request->delay);
+ if (request->mock.delay) {
+ mod_timer(&engine->hw_delay,
+ jiffies + request->mock.delay);
break;
}
@@ -126,55 +124,43 @@ static void hw_delay_complete(struct timer_list *t)
static void mock_context_unpin(struct intel_context *ce)
{
mock_timeline_unpin(ce->ring->timeline);
- i915_gem_context_put(ce->gem_context);
}
-static void mock_context_destroy(struct intel_context *ce)
+static void mock_context_destroy(struct kref *ref)
{
- GEM_BUG_ON(ce->pin_count);
+ struct intel_context *ce = container_of(ref, typeof(*ce), ref);
+
+ GEM_BUG_ON(intel_context_is_pinned(ce));
if (ce->ring)
mock_ring_free(ce->ring);
-}
-static const struct intel_context_ops mock_context_ops = {
- .unpin = mock_context_unpin,
- .destroy = mock_context_destroy,
-};
+ intel_context_free(ce);
+}
-static struct intel_context *
-mock_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static int mock_context_pin(struct intel_context *ce)
{
- struct intel_context *ce = to_intel_context(ctx, engine);
- int err = -ENOMEM;
-
- if (ce->pin_count++)
- return ce;
-
if (!ce->ring) {
- ce->ring = mock_ring(engine);
+ ce->ring = mock_ring(ce->engine);
if (!ce->ring)
- goto err;
+ return -ENOMEM;
}
mock_timeline_pin(ce->ring->timeline);
+ return 0;
+}
- ce->ops = &mock_context_ops;
- i915_gem_context_get(ctx);
- return ce;
+static const struct intel_context_ops mock_context_ops = {
+ .pin = mock_context_pin,
+ .unpin = mock_context_unpin,
-err:
- ce->pin_count = 0;
- return ERR_PTR(err);
-}
+ .destroy = mock_context_destroy,
+};
static int mock_request_alloc(struct i915_request *request)
{
- struct mock_request *mock = container_of(request, typeof(*mock), base);
-
- INIT_LIST_HEAD(&mock->link);
- mock->delay = 0;
+ INIT_LIST_HEAD(&request->mock.link);
+ request->mock.delay = 0;
return 0;
}
@@ -192,25 +178,55 @@ static u32 *mock_emit_breadcrumb(struct i915_request *request, u32 *cs)
static void mock_submit_request(struct i915_request *request)
{
- struct mock_request *mock = container_of(request, typeof(*mock), base);
struct mock_engine *engine =
container_of(request->engine, typeof(*engine), base);
unsigned long flags;
i915_request_submit(request);
- GEM_BUG_ON(!request->global_seqno);
spin_lock_irqsave(&engine->hw_lock, flags);
- list_add_tail(&mock->link, &engine->hw_queue);
- if (mock->link.prev == &engine->hw_queue) {
- if (mock->delay)
- mod_timer(&engine->hw_delay, jiffies + mock->delay);
+ list_add_tail(&request->mock.link, &engine->hw_queue);
+ if (list_is_first(&request->mock.link, &engine->hw_queue)) {
+ if (request->mock.delay)
+ mod_timer(&engine->hw_delay,
+ jiffies + request->mock.delay);
else
- advance(mock);
+ advance(request);
}
spin_unlock_irqrestore(&engine->hw_lock, flags);
}
+static void mock_reset_prepare(struct intel_engine_cs *engine)
+{
+}
+
+static void mock_reset(struct intel_engine_cs *engine, bool stalled)
+{
+ GEM_BUG_ON(stalled);
+}
+
+static void mock_reset_finish(struct intel_engine_cs *engine)
+{
+}
+
+static void mock_cancel_requests(struct intel_engine_cs *engine)
+{
+ struct i915_request *request;
+ unsigned long flags;
+
+ spin_lock_irqsave(&engine->timeline.lock, flags);
+
+ /* Mark all submitted requests as skipped. */
+ list_for_each_entry(request, &engine->timeline.requests, sched.link) {
+ if (!i915_request_signaled(request))
+ dma_fence_set_error(&request->fence, -EIO);
+
+ i915_request_mark_complete(request);
+ }
+
+ spin_unlock_irqrestore(&engine->timeline.lock, flags);
+}
+
struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
const char *name,
int id)
@@ -227,18 +243,21 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
engine->base.i915 = i915;
snprintf(engine->base.name, sizeof(engine->base.name), "%s", name);
engine->base.id = id;
+ engine->base.mask = BIT(id);
engine->base.status_page.addr = (void *)(engine + 1);
- engine->base.context_pin = mock_context_pin;
+ engine->base.cops = &mock_context_ops;
engine->base.request_alloc = mock_request_alloc;
engine->base.emit_flush = mock_emit_flush;
engine->base.emit_fini_breadcrumb = mock_emit_breadcrumb;
engine->base.submit_request = mock_submit_request;
- if (i915_timeline_init(i915,
- &engine->base.timeline,
- engine->base.name,
- NULL))
+ engine->base.reset.prepare = mock_reset_prepare;
+ engine->base.reset.reset = mock_reset;
+ engine->base.reset.finish = mock_reset_finish;
+ engine->base.cancel_requests = mock_cancel_requests;
+
+ if (i915_timeline_init(i915, &engine->base.timeline, NULL))
goto err_free;
i915_timeline_set_subclass(&engine->base.timeline, TIMELINE_ENGINE);
@@ -249,7 +268,8 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
timer_setup(&engine->hw_delay, hw_delay_complete, 0);
INIT_LIST_HEAD(&engine->hw_queue);
- if (IS_ERR(intel_context_pin(i915->kernel_context, &engine->base)))
+ if (pin_context(i915->kernel_context, &engine->base,
+ &engine->base.kernel_context))
goto err_breadcrumbs;
return &engine->base;
@@ -266,19 +286,18 @@ void mock_engine_flush(struct intel_engine_cs *engine)
{
struct mock_engine *mock =
container_of(engine, typeof(*mock), base);
- struct mock_request *request, *rn;
+ struct i915_request *request, *rn;
del_timer_sync(&mock->hw_delay);
spin_lock_irq(&mock->hw_lock);
- list_for_each_entry_safe(request, rn, &mock->hw_queue, link)
+ list_for_each_entry_safe(request, rn, &mock->hw_queue, mock.link)
advance(request);
spin_unlock_irq(&mock->hw_lock);
}
void mock_engine_reset(struct intel_engine_cs *engine)
{
- intel_engine_write_global_seqno(engine, 0);
}
void mock_engine_free(struct intel_engine_cs *engine)
@@ -293,7 +312,7 @@ void mock_engine_free(struct intel_engine_cs *engine)
if (ce)
intel_context_unpin(ce);
- __intel_context_unpin(engine->i915->kernel_context, engine);
+ intel_context_unpin(engine->kernel_context);
intel_engine_fini_breadcrumbs(engine);
i915_timeline_fini(&engine->timeline);
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 14ae46fda49f..60bbf8b4df40 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -79,12 +79,6 @@ static void mock_device_release(struct drm_device *dev)
destroy_workqueue(i915->wq);
- kmem_cache_destroy(i915->priorities);
- kmem_cache_destroy(i915->dependencies);
- kmem_cache_destroy(i915->requests);
- kmem_cache_destroy(i915->vmas);
- kmem_cache_destroy(i915->objects);
-
i915_gemfs_fini(i915);
drm_mode_config_cleanup(&i915->drm);
@@ -115,6 +109,10 @@ static void mock_retire_work_handler(struct work_struct *work)
static void mock_idle_work_handler(struct work_struct *work)
{
+ struct drm_i915_private *i915 =
+ container_of(work, typeof(*i915), gt.idle_work.work);
+
+ i915->gt.active_engines = 0;
}
static int pm_domain_resume(struct device *dev)
@@ -184,11 +182,12 @@ struct drm_i915_private *mock_gem_device(void)
I915_GTT_PAGE_SIZE_64K |
I915_GTT_PAGE_SIZE_2M;
- mock_uncore_init(i915);
+ mock_uncore_init(&i915->uncore);
i915_gem_init__mm(i915);
init_waitqueue_head(&i915->gpu_error.wait_queue);
init_waitqueue_head(&i915->gpu_error.reset_queue);
+ init_srcu_struct(&i915->gpu_error.reset_backoff_srcu);
mutex_init(&i915->gpu_error.wedge_mutex);
i915->wq = alloc_ordered_workqueue("mock", 0);
@@ -202,31 +201,6 @@ struct drm_i915_private *mock_gem_device(void)
i915->gt.awake = true;
- i915->objects = KMEM_CACHE(mock_object, SLAB_HWCACHE_ALIGN);
- if (!i915->objects)
- goto err_wq;
-
- i915->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
- if (!i915->vmas)
- goto err_objects;
-
- i915->requests = KMEM_CACHE(mock_request,
- SLAB_HWCACHE_ALIGN |
- SLAB_RECLAIM_ACCOUNT |
- SLAB_TYPESAFE_BY_RCU);
- if (!i915->requests)
- goto err_vmas;
-
- i915->dependencies = KMEM_CACHE(i915_dependency,
- SLAB_HWCACHE_ALIGN |
- SLAB_RECLAIM_ACCOUNT);
- if (!i915->dependencies)
- goto err_requests;
-
- i915->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
- if (!i915->priorities)
- goto err_dependencies;
-
i915_timelines_init(i915);
INIT_LIST_HEAD(&i915->gt.active_rings);
@@ -236,13 +210,13 @@ struct drm_i915_private *mock_gem_device(void)
mock_init_ggtt(i915, &i915->ggtt);
- mkwrite_device_info(i915)->ring_mask = BIT(0);
+ mkwrite_device_info(i915)->engine_mask = BIT(0);
i915->kernel_context = mock_context(i915, NULL);
if (!i915->kernel_context)
goto err_unlock;
- i915->engine[RCS] = mock_engine(i915, "mock", RCS);
- if (!i915->engine[RCS])
+ i915->engine[RCS0] = mock_engine(i915, "mock", RCS0);
+ if (!i915->engine[RCS0])
goto err_context;
mutex_unlock(&i915->drm.struct_mutex);
@@ -256,16 +230,6 @@ err_context:
err_unlock:
mutex_unlock(&i915->drm.struct_mutex);
i915_timelines_fini(i915);
- kmem_cache_destroy(i915->priorities);
-err_dependencies:
- kmem_cache_destroy(i915->dependencies);
-err_requests:
- kmem_cache_destroy(i915->requests);
-err_vmas:
- kmem_cache_destroy(i915->vmas);
-err_objects:
- kmem_cache_destroy(i915->objects);
-err_wq:
destroy_workqueue(i915->wq);
err_drv:
drm_mode_config_cleanup(&i915->drm);
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c
index 0dc29e242597..d1a7c9608712 100644
--- a/drivers/gpu/drm/i915/selftests/mock_request.c
+++ b/drivers/gpu/drm/i915/selftests/mock_request.c
@@ -31,29 +31,25 @@ mock_request(struct intel_engine_cs *engine,
unsigned long delay)
{
struct i915_request *request;
- struct mock_request *mock;
/* NB the i915->requests slab cache is enlarged to fit mock_request */
request = i915_request_alloc(engine, context);
if (IS_ERR(request))
return NULL;
- mock = container_of(request, typeof(*mock), base);
- mock->delay = delay;
-
- return &mock->base;
+ request->mock.delay = delay;
+ return request;
}
bool mock_cancel_request(struct i915_request *request)
{
- struct mock_request *mock = container_of(request, typeof(*mock), base);
struct mock_engine *engine =
container_of(request->engine, typeof(*engine), base);
bool was_queued;
spin_lock_irq(&engine->hw_lock);
- was_queued = !list_empty(&mock->link);
- list_del_init(&mock->link);
+ was_queued = !list_empty(&request->mock.link);
+ list_del_init(&request->mock.link);
spin_unlock_irq(&engine->hw_lock);
if (was_queued)
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.h b/drivers/gpu/drm/i915/selftests/mock_request.h
index 995fb728380c..4acf0211df20 100644
--- a/drivers/gpu/drm/i915/selftests/mock_request.h
+++ b/drivers/gpu/drm/i915/selftests/mock_request.h
@@ -29,13 +29,6 @@
#include "../i915_request.h"
-struct mock_request {
- struct i915_request base;
-
- struct list_head link;
- unsigned long delay;
-};
-
struct i915_request *
mock_request(struct intel_engine_cs *engine,
struct i915_gem_context *context,
diff --git a/drivers/gpu/drm/i915/selftests/mock_timeline.c b/drivers/gpu/drm/i915/selftests/mock_timeline.c
index d2de9ece2118..e084476469ef 100644
--- a/drivers/gpu/drm/i915/selftests/mock_timeline.c
+++ b/drivers/gpu/drm/i915/selftests/mock_timeline.c
@@ -14,8 +14,8 @@ void mock_timeline_init(struct i915_timeline *timeline, u64 context)
timeline->fence_context = context;
spin_lock_init(&timeline->lock);
+ mutex_init(&timeline->mutex);
- INIT_ACTIVE_REQUEST(&timeline->barrier);
INIT_ACTIVE_REQUEST(&timeline->last_request);
INIT_LIST_HEAD(&timeline->requests);
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.c b/drivers/gpu/drm/i915/selftests/mock_uncore.c
index 8ef14c7e5e38..ff8999c63a12 100644
--- a/drivers/gpu/drm/i915/selftests/mock_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.c
@@ -26,21 +26,21 @@
#define __nop_write(x) \
static void \
-nop_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { }
+nop_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { }
__nop_write(8)
__nop_write(16)
__nop_write(32)
#define __nop_read(x) \
static u##x \
-nop_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { return 0; }
+nop_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { return 0; }
__nop_read(8)
__nop_read(16)
__nop_read(32)
__nop_read(64)
-void mock_uncore_init(struct drm_i915_private *i915)
+void mock_uncore_init(struct intel_uncore *uncore)
{
- ASSIGN_WRITE_MMIO_VFUNCS(i915, nop);
- ASSIGN_READ_MMIO_VFUNCS(i915, nop);
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, nop);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, nop);
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.h b/drivers/gpu/drm/i915/selftests/mock_uncore.h
index d79aa3ca4d51..dacb36b5ffcd 100644
--- a/drivers/gpu/drm/i915/selftests/mock_uncore.h
+++ b/drivers/gpu/drm/i915/selftests/mock_uncore.h
@@ -25,6 +25,6 @@
#ifndef __MOCK_UNCORE_H
#define __MOCK_UNCORE_H
-void mock_uncore_init(struct drm_i915_private *i915);
+void mock_uncore_init(struct intel_uncore *uncore);
#endif /* !__MOCK_UNCORE_H */
diff --git a/drivers/gpu/drm/i915/vlv_dsi.c b/drivers/gpu/drm/i915/vlv_dsi.c
index 31c93c3ccd00..e0b1ec821960 100644
--- a/drivers/gpu/drm/i915/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/vlv_dsi.c
@@ -23,16 +23,20 @@
* Author: Jani Nikula <jani.nikula@intel.com>
*/
+#include <linux/gpio/consumer.h>
+#include <linux/slab.h>
+
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include <drm/i915_drm.h>
#include <drm/drm_mipi_dsi.h>
-#include <linux/slab.h>
-#include <linux/gpio/consumer.h>
+#include <drm/i915_drm.h>
+
#include "i915_drv.h"
+#include "intel_connector.h"
#include "intel_drv.h"
#include "intel_dsi.h"
+#include "intel_panel.h"
/* return pixels in terms of txbyteclkhs */
static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
@@ -78,7 +82,7 @@ void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
MIPI_GEN_FIFO_STAT(port), mask, mask,
100))
DRM_ERROR("DPI FIFOs are not empty\n");
@@ -148,7 +152,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
/* note: this is never true for reads */
if (packet.payload_length) {
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
MIPI_GEN_FIFO_STAT(port),
data_mask, 0,
50))
@@ -162,7 +166,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
}
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
MIPI_GEN_FIFO_STAT(port),
ctrl_mask, 0,
50)) {
@@ -174,7 +178,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
/* ->rx_len is set only for reads */
if (msg->rx_len) {
data_mask = GEN_READ_DATA_AVAIL;
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
MIPI_INTR_STAT(port),
data_mask, data_mask,
50))
@@ -234,7 +238,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
mask = SPL_PKT_SENT_INTERRUPT;
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
MIPI_INTR_STAT(port), mask, mask,
100))
DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
@@ -375,16 +379,18 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
/* Wait for Pwr ACK */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(dev_priv,
- MIPI_CTRL(port), GLK_MIPIIO_PORT_POWERED,
- GLK_MIPIIO_PORT_POWERED, 20))
+ if (intel_wait_for_register(&dev_priv->uncore,
+ MIPI_CTRL(port),
+ GLK_MIPIIO_PORT_POWERED,
+ GLK_MIPIIO_PORT_POWERED,
+ 20))
DRM_ERROR("MIPIO port is powergated\n");
}
/* Check for cold boot scenario */
for_each_dsi_port(port, intel_dsi->ports) {
- cold_boot |= !(I915_READ(MIPI_DEVICE_READY(port)) &
- DEVICE_READY);
+ cold_boot |=
+ !(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY);
}
return cold_boot;
@@ -399,9 +405,11 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
/* Wait for MIPI PHY status bit to set */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(dev_priv,
- MIPI_CTRL(port), GLK_PHY_STATUS_PORT_READY,
- GLK_PHY_STATUS_PORT_READY, 20))
+ if (intel_wait_for_register(&dev_priv->uncore,
+ MIPI_CTRL(port),
+ GLK_PHY_STATUS_PORT_READY,
+ GLK_PHY_STATUS_PORT_READY,
+ 20))
DRM_ERROR("PHY is not ON\n");
}
@@ -425,8 +433,11 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
I915_WRITE(MIPI_DEVICE_READY(port), val);
/* Wait for ULPS active */
- if (intel_wait_for_register(dev_priv,
- MIPI_CTRL(port), GLK_ULPS_NOT_ACTIVE, 0, 20))
+ if (intel_wait_for_register(&dev_priv->uncore,
+ MIPI_CTRL(port),
+ GLK_ULPS_NOT_ACTIVE,
+ 0,
+ 20))
DRM_ERROR("ULPS not active\n");
/* Exit ULPS */
@@ -449,17 +460,21 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
/* Wait for Stop state */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(dev_priv,
- MIPI_CTRL(port), GLK_DATA_LANE_STOP_STATE,
- GLK_DATA_LANE_STOP_STATE, 20))
+ if (intel_wait_for_register(&dev_priv->uncore,
+ MIPI_CTRL(port),
+ GLK_DATA_LANE_STOP_STATE,
+ GLK_DATA_LANE_STOP_STATE,
+ 20))
DRM_ERROR("Date lane not in STOP state\n");
}
/* Wait for AFE LATCH */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(dev_priv,
- BXT_MIPI_PORT_CTRL(port), AFE_LATCHOUT,
- AFE_LATCHOUT, 20))
+ if (intel_wait_for_register(&dev_priv->uncore,
+ BXT_MIPI_PORT_CTRL(port),
+ AFE_LATCHOUT,
+ AFE_LATCHOUT,
+ 20))
DRM_ERROR("D-PHY not entering LP-11 state\n");
}
}
@@ -559,7 +574,7 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
MIPI_CTRL(port),
GLK_PHY_STATUS_PORT_READY, 0, 20))
DRM_ERROR("PHY is not turning OFF\n");
@@ -567,7 +582,7 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
/* Wait for Pwr ACK bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
MIPI_CTRL(port),
GLK_MIPIIO_PORT_POWERED, 0, 20))
DRM_ERROR("MIPI IO Port is not powergated\n");
@@ -588,7 +603,7 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
MIPI_CTRL(port),
GLK_PHY_STATUS_PORT_READY, 0, 20))
DRM_ERROR("PHY is not turning OFF\n");
@@ -638,7 +653,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
* Port A only. MIPI Port C has no similar bit for checking.
*/
if ((IS_GEN9_LP(dev_priv) || port == PORT_A) &&
- intel_wait_for_register(dev_priv,
+ intel_wait_for_register(&dev_priv->uncore,
port_ctrl, AFE_LATCHOUT, 0,
30))
DRM_ERROR("DSI LP not going Low\n");
@@ -1682,7 +1697,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
struct drm_connector *connector;
- struct drm_display_mode *scan, *fixed_mode = NULL;
+ struct drm_display_mode *fixed_mode;
enum port port;
DRM_DEBUG_KMS("\n");
@@ -1793,13 +1808,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_connector_attach_encoder(intel_connector, intel_encoder);
mutex_lock(&dev->mode_config.mutex);
- intel_dsi_vbt_get_modes(intel_dsi);
- list_for_each_entry(scan, &connector->probed_modes, head) {
- if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
- fixed_mode = drm_mode_duplicate(dev, scan);
- break;
- }
- }
+ fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
mutex_unlock(&dev->mode_config.mutex);
if (!fixed_mode) {
@@ -1807,9 +1816,6 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
goto err;
}
- connector->display_info.width_mm = fixed_mode->width_mm;
- connector->display_info.height_mm = fixed_mode->height_mm;
-
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
intel_panel_setup_backlight(connector, INVALID_PIPE);
diff --git a/drivers/gpu/drm/i915/vlv_dsi_pll.c b/drivers/gpu/drm/i915/vlv_dsi_pll.c
index 954d5a8c4fa7..5e7b1fb2db5d 100644
--- a/drivers/gpu/drm/i915/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/vlv_dsi_pll.c
@@ -244,7 +244,7 @@ void bxt_dsi_pll_disable(struct intel_encoder *encoder)
* PLL lock should deassert within 200us.
* Wait up to 1ms before timing out.
*/
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
BXT_DSI_PLL_ENABLE,
BXT_DSI_PLL_LOCKED,
0,
@@ -528,7 +528,7 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
I915_WRITE(BXT_DSI_PLL_ENABLE, val);
/* Timeout and fail if PLL not locked */
- if (intel_wait_for_register(dev_priv,
+ if (intel_wait_for_register(&dev_priv->uncore,
BXT_DSI_PLL_ENABLE,
BXT_DSI_PLL_LOCKED,
BXT_DSI_PLL_LOCKED,
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index c935cbe059a7..3e8bece620df 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -185,7 +185,7 @@ static int compare_of(struct device *dev, void *data)
}
/* Special case for LDB, one device for two channels */
- if (of_node_cmp(np->name, "lvds-channel") == 0) {
+ if (of_node_name_eq(np, "lvds-channel")) {
np = of_get_parent(np);
of_node_put(np);
}
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 54011df8c2e8..9cc1d678674f 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -295,7 +295,7 @@ static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc)
sig_cfg.enable_pol = !(imx_crtc_state->bus_flags & DRM_BUS_FLAG_DE_LOW);
/* Default to driving pixel data on negative clock edges */
sig_cfg.clk_pol = !!(imx_crtc_state->bus_flags &
- DRM_BUS_FLAG_PIXDATA_POSEDGE);
+ DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE);
sig_cfg.bus_format = imx_crtc_state->bus_format;
sig_cfg.v_to_h_sync = 0;
sig_cfg.hsync_pin = imx_crtc_state->di_hsync_pin;
diff --git a/drivers/gpu/drm/lima/Kconfig b/drivers/gpu/drm/lima/Kconfig
new file mode 100644
index 000000000000..bb4ddc6bb0a6
--- /dev/null
+++ b/drivers/gpu/drm/lima/Kconfig
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0 OR MIT
+# Copyright 2017-2019 Qiang Yu <yuq825@gmail.com>
+
+config DRM_LIMA
+ tristate "LIMA (DRM support for ARM Mali 400/450 GPU)"
+ depends on DRM
+ depends on ARM || ARM64 || COMPILE_TEST
+ depends on MMU
+ depends on COMMON_CLK
+ depends on OF
+ select DRM_SCHED
+ help
+ DRM driver for ARM Mali 400/450 GPUs.
diff --git a/drivers/gpu/drm/lima/Makefile b/drivers/gpu/drm/lima/Makefile
new file mode 100644
index 000000000000..38cc70281ba5
--- /dev/null
+++ b/drivers/gpu/drm/lima/Makefile
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0 OR MIT
+# Copyright 2017-2019 Qiang Yu <yuq825@gmail.com>
+
+lima-y := \
+ lima_drv.o \
+ lima_device.o \
+ lima_pmu.o \
+ lima_l2_cache.o \
+ lima_mmu.o \
+ lima_gp.o \
+ lima_pp.o \
+ lima_gem.o \
+ lima_vm.o \
+ lima_sched.o \
+ lima_ctx.o \
+ lima_gem_prime.o \
+ lima_dlbu.o \
+ lima_bcast.o \
+ lima_object.o
+
+obj-$(CONFIG_DRM_LIMA) += lima.o
diff --git a/drivers/gpu/drm/lima/lima_bcast.c b/drivers/gpu/drm/lima/lima_bcast.c
new file mode 100644
index 000000000000..288398027bfa
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_bcast.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/io.h>
+#include <linux/device.h>
+
+#include "lima_device.h"
+#include "lima_bcast.h"
+#include "lima_regs.h"
+
+#define bcast_write(reg, data) writel(data, ip->iomem + reg)
+#define bcast_read(reg) readl(ip->iomem + reg)
+
+void lima_bcast_enable(struct lima_device *dev, int num_pp)
+{
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+ struct lima_ip *ip = dev->ip + lima_ip_bcast;
+ int i, mask = bcast_read(LIMA_BCAST_BROADCAST_MASK) & 0xffff0000;
+
+ for (i = 0; i < num_pp; i++) {
+ struct lima_ip *pp = pipe->processor[i];
+
+ mask |= 1 << (pp->id - lima_ip_pp0);
+ }
+
+ bcast_write(LIMA_BCAST_BROADCAST_MASK, mask);
+}
+
+int lima_bcast_init(struct lima_ip *ip)
+{
+ int i, mask = 0;
+
+ for (i = lima_ip_pp0; i <= lima_ip_pp7; i++) {
+ if (ip->dev->ip[i].present)
+ mask |= 1 << (i - lima_ip_pp0);
+ }
+
+ bcast_write(LIMA_BCAST_BROADCAST_MASK, mask << 16);
+ bcast_write(LIMA_BCAST_INTERRUPT_MASK, mask);
+ return 0;
+}
+
+void lima_bcast_fini(struct lima_ip *ip)
+{
+
+}
+
diff --git a/drivers/gpu/drm/lima/lima_bcast.h b/drivers/gpu/drm/lima/lima_bcast.h
new file mode 100644
index 000000000000..c47e58563d0a
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_bcast.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_BCAST_H__
+#define __LIMA_BCAST_H__
+
+struct lima_ip;
+
+int lima_bcast_init(struct lima_ip *ip);
+void lima_bcast_fini(struct lima_ip *ip);
+
+void lima_bcast_enable(struct lima_device *dev, int num_pp);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_ctx.c b/drivers/gpu/drm/lima/lima_ctx.c
new file mode 100644
index 000000000000..22fff6caa961
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_ctx.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/slab.h>
+
+#include "lima_device.h"
+#include "lima_ctx.h"
+
+int lima_ctx_create(struct lima_device *dev, struct lima_ctx_mgr *mgr, u32 *id)
+{
+ struct lima_ctx *ctx;
+ int i, err;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->dev = dev;
+ kref_init(&ctx->refcnt);
+
+ for (i = 0; i < lima_pipe_num; i++) {
+ err = lima_sched_context_init(dev->pipe + i, ctx->context + i, &ctx->guilty);
+ if (err)
+ goto err_out0;
+ }
+
+ err = xa_alloc(&mgr->handles, id, ctx, xa_limit_32b, GFP_KERNEL);
+ if (err < 0)
+ goto err_out0;
+
+ return 0;
+
+err_out0:
+ for (i--; i >= 0; i--)
+ lima_sched_context_fini(dev->pipe + i, ctx->context + i);
+ kfree(ctx);
+ return err;
+}
+
+static void lima_ctx_do_release(struct kref *ref)
+{
+ struct lima_ctx *ctx = container_of(ref, struct lima_ctx, refcnt);
+ int i;
+
+ for (i = 0; i < lima_pipe_num; i++)
+ lima_sched_context_fini(ctx->dev->pipe + i, ctx->context + i);
+ kfree(ctx);
+}
+
+int lima_ctx_free(struct lima_ctx_mgr *mgr, u32 id)
+{
+ struct lima_ctx *ctx;
+ int ret = 0;
+
+ mutex_lock(&mgr->lock);
+ ctx = xa_erase(&mgr->handles, id);
+ if (ctx)
+ kref_put(&ctx->refcnt, lima_ctx_do_release);
+ else
+ ret = -EINVAL;
+ mutex_unlock(&mgr->lock);
+ return ret;
+}
+
+struct lima_ctx *lima_ctx_get(struct lima_ctx_mgr *mgr, u32 id)
+{
+ struct lima_ctx *ctx;
+
+ mutex_lock(&mgr->lock);
+ ctx = xa_load(&mgr->handles, id);
+ if (ctx)
+ kref_get(&ctx->refcnt);
+ mutex_unlock(&mgr->lock);
+ return ctx;
+}
+
+void lima_ctx_put(struct lima_ctx *ctx)
+{
+ kref_put(&ctx->refcnt, lima_ctx_do_release);
+}
+
+void lima_ctx_mgr_init(struct lima_ctx_mgr *mgr)
+{
+ mutex_init(&mgr->lock);
+ xa_init_flags(&mgr->handles, XA_FLAGS_ALLOC);
+}
+
+void lima_ctx_mgr_fini(struct lima_ctx_mgr *mgr)
+{
+ struct lima_ctx *ctx;
+ unsigned long id;
+
+ xa_for_each(&mgr->handles, id, ctx) {
+ kref_put(&ctx->refcnt, lima_ctx_do_release);
+ }
+
+ xa_destroy(&mgr->handles);
+ mutex_destroy(&mgr->lock);
+}
diff --git a/drivers/gpu/drm/lima/lima_ctx.h b/drivers/gpu/drm/lima/lima_ctx.h
new file mode 100644
index 000000000000..6154e5c9bfe4
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_ctx.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_CTX_H__
+#define __LIMA_CTX_H__
+
+#include <linux/xarray.h>
+
+#include "lima_device.h"
+
+struct lima_ctx {
+ struct kref refcnt;
+ struct lima_device *dev;
+ struct lima_sched_context context[lima_pipe_num];
+ atomic_t guilty;
+};
+
+struct lima_ctx_mgr {
+ struct mutex lock;
+ struct xarray handles;
+};
+
+int lima_ctx_create(struct lima_device *dev, struct lima_ctx_mgr *mgr, u32 *id);
+int lima_ctx_free(struct lima_ctx_mgr *mgr, u32 id);
+struct lima_ctx *lima_ctx_get(struct lima_ctx_mgr *mgr, u32 id);
+void lima_ctx_put(struct lima_ctx *ctx);
+void lima_ctx_mgr_init(struct lima_ctx_mgr *mgr);
+void lima_ctx_mgr_fini(struct lima_ctx_mgr *mgr);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_device.c b/drivers/gpu/drm/lima/lima_device.c
new file mode 100644
index 000000000000..570d0e93f9a9
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_device.c
@@ -0,0 +1,385 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+
+#include "lima_device.h"
+#include "lima_gp.h"
+#include "lima_pp.h"
+#include "lima_mmu.h"
+#include "lima_pmu.h"
+#include "lima_l2_cache.h"
+#include "lima_dlbu.h"
+#include "lima_bcast.h"
+#include "lima_vm.h"
+
+struct lima_ip_desc {
+ char *name;
+ char *irq_name;
+ bool must_have[lima_gpu_num];
+ int offset[lima_gpu_num];
+
+ int (*init)(struct lima_ip *ip);
+ void (*fini)(struct lima_ip *ip);
+};
+
+#define LIMA_IP_DESC(ipname, mst0, mst1, off0, off1, func, irq) \
+ [lima_ip_##ipname] = { \
+ .name = #ipname, \
+ .irq_name = irq, \
+ .must_have = { \
+ [lima_gpu_mali400] = mst0, \
+ [lima_gpu_mali450] = mst1, \
+ }, \
+ .offset = { \
+ [lima_gpu_mali400] = off0, \
+ [lima_gpu_mali450] = off1, \
+ }, \
+ .init = lima_##func##_init, \
+ .fini = lima_##func##_fini, \
+ }
+
+static struct lima_ip_desc lima_ip_desc[lima_ip_num] = {
+ LIMA_IP_DESC(pmu, false, false, 0x02000, 0x02000, pmu, "pmu"),
+ LIMA_IP_DESC(l2_cache0, true, true, 0x01000, 0x10000, l2_cache, NULL),
+ LIMA_IP_DESC(l2_cache1, false, true, -1, 0x01000, l2_cache, NULL),
+ LIMA_IP_DESC(l2_cache2, false, false, -1, 0x11000, l2_cache, NULL),
+ LIMA_IP_DESC(gp, true, true, 0x00000, 0x00000, gp, "gp"),
+ LIMA_IP_DESC(pp0, true, true, 0x08000, 0x08000, pp, "pp0"),
+ LIMA_IP_DESC(pp1, false, false, 0x0A000, 0x0A000, pp, "pp1"),
+ LIMA_IP_DESC(pp2, false, false, 0x0C000, 0x0C000, pp, "pp2"),
+ LIMA_IP_DESC(pp3, false, false, 0x0E000, 0x0E000, pp, "pp3"),
+ LIMA_IP_DESC(pp4, false, false, -1, 0x28000, pp, "pp4"),
+ LIMA_IP_DESC(pp5, false, false, -1, 0x2A000, pp, "pp5"),
+ LIMA_IP_DESC(pp6, false, false, -1, 0x2C000, pp, "pp6"),
+ LIMA_IP_DESC(pp7, false, false, -1, 0x2E000, pp, "pp7"),
+ LIMA_IP_DESC(gpmmu, true, true, 0x03000, 0x03000, mmu, "gpmmu"),
+ LIMA_IP_DESC(ppmmu0, true, true, 0x04000, 0x04000, mmu, "ppmmu0"),
+ LIMA_IP_DESC(ppmmu1, false, false, 0x05000, 0x05000, mmu, "ppmmu1"),
+ LIMA_IP_DESC(ppmmu2, false, false, 0x06000, 0x06000, mmu, "ppmmu2"),
+ LIMA_IP_DESC(ppmmu3, false, false, 0x07000, 0x07000, mmu, "ppmmu3"),
+ LIMA_IP_DESC(ppmmu4, false, false, -1, 0x1C000, mmu, "ppmmu4"),
+ LIMA_IP_DESC(ppmmu5, false, false, -1, 0x1D000, mmu, "ppmmu5"),
+ LIMA_IP_DESC(ppmmu6, false, false, -1, 0x1E000, mmu, "ppmmu6"),
+ LIMA_IP_DESC(ppmmu7, false, false, -1, 0x1F000, mmu, "ppmmu7"),
+ LIMA_IP_DESC(dlbu, false, true, -1, 0x14000, dlbu, NULL),
+ LIMA_IP_DESC(bcast, false, true, -1, 0x13000, bcast, NULL),
+ LIMA_IP_DESC(pp_bcast, false, true, -1, 0x16000, pp_bcast, "pp"),
+ LIMA_IP_DESC(ppmmu_bcast, false, true, -1, 0x15000, mmu, NULL),
+};
+
+const char *lima_ip_name(struct lima_ip *ip)
+{
+ return lima_ip_desc[ip->id].name;
+}
+
+static int lima_clk_init(struct lima_device *dev)
+{
+ int err;
+ unsigned long bus_rate, gpu_rate;
+
+ dev->clk_bus = devm_clk_get(dev->dev, "bus");
+ if (IS_ERR(dev->clk_bus)) {
+ dev_err(dev->dev, "get bus clk failed %ld\n", PTR_ERR(dev->clk_bus));
+ return PTR_ERR(dev->clk_bus);
+ }
+
+ dev->clk_gpu = devm_clk_get(dev->dev, "core");
+ if (IS_ERR(dev->clk_gpu)) {
+ dev_err(dev->dev, "get core clk failed %ld\n", PTR_ERR(dev->clk_gpu));
+ return PTR_ERR(dev->clk_gpu);
+ }
+
+ bus_rate = clk_get_rate(dev->clk_bus);
+ dev_info(dev->dev, "bus rate = %lu\n", bus_rate);
+
+ gpu_rate = clk_get_rate(dev->clk_gpu);
+ dev_info(dev->dev, "mod rate = %lu", gpu_rate);
+
+ err = clk_prepare_enable(dev->clk_bus);
+ if (err)
+ return err;
+
+ err = clk_prepare_enable(dev->clk_gpu);
+ if (err)
+ goto error_out0;
+
+ dev->reset = devm_reset_control_get_optional(dev->dev, NULL);
+ if (IS_ERR(dev->reset)) {
+ err = PTR_ERR(dev->reset);
+ goto error_out1;
+ } else if (dev->reset != NULL) {
+ err = reset_control_deassert(dev->reset);
+ if (err)
+ goto error_out1;
+ }
+
+ return 0;
+
+error_out1:
+ clk_disable_unprepare(dev->clk_gpu);
+error_out0:
+ clk_disable_unprepare(dev->clk_bus);
+ return err;
+}
+
+static void lima_clk_fini(struct lima_device *dev)
+{
+ if (dev->reset != NULL)
+ reset_control_assert(dev->reset);
+ clk_disable_unprepare(dev->clk_gpu);
+ clk_disable_unprepare(dev->clk_bus);
+}
+
+static int lima_regulator_init(struct lima_device *dev)
+{
+ int ret;
+
+ dev->regulator = devm_regulator_get_optional(dev->dev, "mali");
+ if (IS_ERR(dev->regulator)) {
+ ret = PTR_ERR(dev->regulator);
+ dev->regulator = NULL;
+ if (ret == -ENODEV)
+ return 0;
+ dev_err(dev->dev, "failed to get regulator: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_enable(dev->regulator);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to enable regulator: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void lima_regulator_fini(struct lima_device *dev)
+{
+ if (dev->regulator)
+ regulator_disable(dev->regulator);
+}
+
+static int lima_init_ip(struct lima_device *dev, int index)
+{
+ struct lima_ip_desc *desc = lima_ip_desc + index;
+ struct lima_ip *ip = dev->ip + index;
+ int offset = desc->offset[dev->id];
+ bool must = desc->must_have[dev->id];
+ int err;
+
+ if (offset < 0)
+ return 0;
+
+ ip->dev = dev;
+ ip->id = index;
+ ip->iomem = dev->iomem + offset;
+ if (desc->irq_name) {
+ err = platform_get_irq_byname(dev->pdev, desc->irq_name);
+ if (err < 0)
+ goto out;
+ ip->irq = err;
+ }
+
+ err = desc->init(ip);
+ if (!err) {
+ ip->present = true;
+ return 0;
+ }
+
+out:
+ return must ? err : 0;
+}
+
+static void lima_fini_ip(struct lima_device *ldev, int index)
+{
+ struct lima_ip_desc *desc = lima_ip_desc + index;
+ struct lima_ip *ip = ldev->ip + index;
+
+ if (ip->present)
+ desc->fini(ip);
+}
+
+static int lima_init_gp_pipe(struct lima_device *dev)
+{
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
+ int err;
+
+ err = lima_sched_pipe_init(pipe, "gp");
+ if (err)
+ return err;
+
+ pipe->l2_cache[pipe->num_l2_cache++] = dev->ip + lima_ip_l2_cache0;
+ pipe->mmu[pipe->num_mmu++] = dev->ip + lima_ip_gpmmu;
+ pipe->processor[pipe->num_processor++] = dev->ip + lima_ip_gp;
+
+ err = lima_gp_pipe_init(dev);
+ if (err) {
+ lima_sched_pipe_fini(pipe);
+ return err;
+ }
+
+ return 0;
+}
+
+static void lima_fini_gp_pipe(struct lima_device *dev)
+{
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
+
+ lima_gp_pipe_fini(dev);
+ lima_sched_pipe_fini(pipe);
+}
+
+static int lima_init_pp_pipe(struct lima_device *dev)
+{
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+ int err, i;
+
+ err = lima_sched_pipe_init(pipe, "pp");
+ if (err)
+ return err;
+
+ for (i = 0; i < LIMA_SCHED_PIPE_MAX_PROCESSOR; i++) {
+ struct lima_ip *pp = dev->ip + lima_ip_pp0 + i;
+ struct lima_ip *ppmmu = dev->ip + lima_ip_ppmmu0 + i;
+ struct lima_ip *l2_cache;
+
+ if (dev->id == lima_gpu_mali400)
+ l2_cache = dev->ip + lima_ip_l2_cache0;
+ else
+ l2_cache = dev->ip + lima_ip_l2_cache1 + (i >> 2);
+
+ if (pp->present && ppmmu->present && l2_cache->present) {
+ pipe->mmu[pipe->num_mmu++] = ppmmu;
+ pipe->processor[pipe->num_processor++] = pp;
+ if (!pipe->l2_cache[i >> 2])
+ pipe->l2_cache[pipe->num_l2_cache++] = l2_cache;
+ }
+ }
+
+ if (dev->ip[lima_ip_bcast].present) {
+ pipe->bcast_processor = dev->ip + lima_ip_pp_bcast;
+ pipe->bcast_mmu = dev->ip + lima_ip_ppmmu_bcast;
+ }
+
+ err = lima_pp_pipe_init(dev);
+ if (err) {
+ lima_sched_pipe_fini(pipe);
+ return err;
+ }
+
+ return 0;
+}
+
+static void lima_fini_pp_pipe(struct lima_device *dev)
+{
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+
+ lima_pp_pipe_fini(dev);
+ lima_sched_pipe_fini(pipe);
+}
+
+int lima_device_init(struct lima_device *ldev)
+{
+ int err, i;
+ struct resource *res;
+
+ dma_set_coherent_mask(ldev->dev, DMA_BIT_MASK(32));
+
+ err = lima_clk_init(ldev);
+ if (err) {
+ dev_err(ldev->dev, "clk init fail %d\n", err);
+ return err;
+ }
+
+ err = lima_regulator_init(ldev);
+ if (err) {
+ dev_err(ldev->dev, "regulator init fail %d\n", err);
+ goto err_out0;
+ }
+
+ ldev->empty_vm = lima_vm_create(ldev);
+ if (!ldev->empty_vm) {
+ err = -ENOMEM;
+ goto err_out1;
+ }
+
+ ldev->va_start = 0;
+ if (ldev->id == lima_gpu_mali450) {
+ ldev->va_end = LIMA_VA_RESERVE_START;
+ ldev->dlbu_cpu = dma_alloc_wc(
+ ldev->dev, LIMA_PAGE_SIZE,
+ &ldev->dlbu_dma, GFP_KERNEL);
+ if (!ldev->dlbu_cpu) {
+ err = -ENOMEM;
+ goto err_out2;
+ }
+ } else
+ ldev->va_end = LIMA_VA_RESERVE_END;
+
+ res = platform_get_resource(ldev->pdev, IORESOURCE_MEM, 0);
+ ldev->iomem = devm_ioremap_resource(ldev->dev, res);
+ if (IS_ERR(ldev->iomem)) {
+ dev_err(ldev->dev, "fail to ioremap iomem\n");
+ err = PTR_ERR(ldev->iomem);
+ goto err_out3;
+ }
+
+ for (i = 0; i < lima_ip_num; i++) {
+ err = lima_init_ip(ldev, i);
+ if (err)
+ goto err_out4;
+ }
+
+ err = lima_init_gp_pipe(ldev);
+ if (err)
+ goto err_out4;
+
+ err = lima_init_pp_pipe(ldev);
+ if (err)
+ goto err_out5;
+
+ return 0;
+
+err_out5:
+ lima_fini_gp_pipe(ldev);
+err_out4:
+ while (--i >= 0)
+ lima_fini_ip(ldev, i);
+err_out3:
+ if (ldev->dlbu_cpu)
+ dma_free_wc(ldev->dev, LIMA_PAGE_SIZE,
+ ldev->dlbu_cpu, ldev->dlbu_dma);
+err_out2:
+ lima_vm_put(ldev->empty_vm);
+err_out1:
+ lima_regulator_fini(ldev);
+err_out0:
+ lima_clk_fini(ldev);
+ return err;
+}
+
+void lima_device_fini(struct lima_device *ldev)
+{
+ int i;
+
+ lima_fini_pp_pipe(ldev);
+ lima_fini_gp_pipe(ldev);
+
+ for (i = lima_ip_num - 1; i >= 0; i--)
+ lima_fini_ip(ldev, i);
+
+ if (ldev->dlbu_cpu)
+ dma_free_wc(ldev->dev, LIMA_PAGE_SIZE,
+ ldev->dlbu_cpu, ldev->dlbu_dma);
+
+ lima_vm_put(ldev->empty_vm);
+
+ lima_regulator_fini(ldev);
+
+ lima_clk_fini(ldev);
+}
diff --git a/drivers/gpu/drm/lima/lima_device.h b/drivers/gpu/drm/lima/lima_device.h
new file mode 100644
index 000000000000..31158d86271c
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_device.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_DEVICE_H__
+#define __LIMA_DEVICE_H__
+
+#include <drm/drm_device.h>
+#include <linux/delay.h>
+
+#include "lima_sched.h"
+
+enum lima_gpu_id {
+ lima_gpu_mali400 = 0,
+ lima_gpu_mali450,
+ lima_gpu_num,
+};
+
+enum lima_ip_id {
+ lima_ip_pmu,
+ lima_ip_gpmmu,
+ lima_ip_ppmmu0,
+ lima_ip_ppmmu1,
+ lima_ip_ppmmu2,
+ lima_ip_ppmmu3,
+ lima_ip_ppmmu4,
+ lima_ip_ppmmu5,
+ lima_ip_ppmmu6,
+ lima_ip_ppmmu7,
+ lima_ip_gp,
+ lima_ip_pp0,
+ lima_ip_pp1,
+ lima_ip_pp2,
+ lima_ip_pp3,
+ lima_ip_pp4,
+ lima_ip_pp5,
+ lima_ip_pp6,
+ lima_ip_pp7,
+ lima_ip_l2_cache0,
+ lima_ip_l2_cache1,
+ lima_ip_l2_cache2,
+ lima_ip_dlbu,
+ lima_ip_bcast,
+ lima_ip_pp_bcast,
+ lima_ip_ppmmu_bcast,
+ lima_ip_num,
+};
+
+struct lima_device;
+
+struct lima_ip {
+ struct lima_device *dev;
+ enum lima_ip_id id;
+ bool present;
+
+ void __iomem *iomem;
+ int irq;
+
+ union {
+ /* gp/pp */
+ bool async_reset;
+ /* l2 cache */
+ spinlock_t lock;
+ } data;
+};
+
+enum lima_pipe_id {
+ lima_pipe_gp,
+ lima_pipe_pp,
+ lima_pipe_num,
+};
+
+struct lima_device {
+ struct device *dev;
+ struct drm_device *ddev;
+ struct platform_device *pdev;
+
+ enum lima_gpu_id id;
+ u32 gp_version;
+ u32 pp_version;
+ int num_pp;
+
+ void __iomem *iomem;
+ struct clk *clk_bus;
+ struct clk *clk_gpu;
+ struct reset_control *reset;
+ struct regulator *regulator;
+
+ struct lima_ip ip[lima_ip_num];
+ struct lima_sched_pipe pipe[lima_pipe_num];
+
+ struct lima_vm *empty_vm;
+ uint64_t va_start;
+ uint64_t va_end;
+
+ u32 *dlbu_cpu;
+ dma_addr_t dlbu_dma;
+};
+
+static inline struct lima_device *
+to_lima_dev(struct drm_device *dev)
+{
+ return dev->dev_private;
+}
+
+int lima_device_init(struct lima_device *ldev);
+void lima_device_fini(struct lima_device *ldev);
+
+const char *lima_ip_name(struct lima_ip *ip);
+
+typedef int (*lima_poll_func_t)(struct lima_ip *);
+
+static inline int lima_poll_timeout(struct lima_ip *ip, lima_poll_func_t func,
+ int sleep_us, int timeout_us)
+{
+ ktime_t timeout = ktime_add_us(ktime_get(), timeout_us);
+
+ might_sleep_if(sleep_us);
+ while (1) {
+ if (func(ip))
+ return 0;
+
+ if (timeout_us && ktime_compare(ktime_get(), timeout) > 0)
+ return -ETIMEDOUT;
+
+ if (sleep_us)
+ usleep_range((sleep_us >> 2) + 1, sleep_us);
+ }
+ return 0;
+}
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_dlbu.c b/drivers/gpu/drm/lima/lima_dlbu.c
new file mode 100644
index 000000000000..8399ceffb94b
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_dlbu.c
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/io.h>
+#include <linux/device.h>
+
+#include "lima_device.h"
+#include "lima_dlbu.h"
+#include "lima_vm.h"
+#include "lima_regs.h"
+
+#define dlbu_write(reg, data) writel(data, ip->iomem + reg)
+#define dlbu_read(reg) readl(ip->iomem + reg)
+
+void lima_dlbu_enable(struct lima_device *dev, int num_pp)
+{
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+ struct lima_ip *ip = dev->ip + lima_ip_dlbu;
+ int i, mask = 0;
+
+ for (i = 0; i < num_pp; i++) {
+ struct lima_ip *pp = pipe->processor[i];
+
+ mask |= 1 << (pp->id - lima_ip_pp0);
+ }
+
+ dlbu_write(LIMA_DLBU_PP_ENABLE_MASK, mask);
+}
+
+void lima_dlbu_disable(struct lima_device *dev)
+{
+ struct lima_ip *ip = dev->ip + lima_ip_dlbu;
+
+ dlbu_write(LIMA_DLBU_PP_ENABLE_MASK, 0);
+}
+
+void lima_dlbu_set_reg(struct lima_ip *ip, u32 *reg)
+{
+ dlbu_write(LIMA_DLBU_TLLIST_VBASEADDR, reg[0]);
+ dlbu_write(LIMA_DLBU_FB_DIM, reg[1]);
+ dlbu_write(LIMA_DLBU_TLLIST_CONF, reg[2]);
+ dlbu_write(LIMA_DLBU_START_TILE_POS, reg[3]);
+}
+
+int lima_dlbu_init(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+
+ dlbu_write(LIMA_DLBU_MASTER_TLLIST_PHYS_ADDR, dev->dlbu_dma | 1);
+ dlbu_write(LIMA_DLBU_MASTER_TLLIST_VADDR, LIMA_VA_RESERVE_DLBU);
+
+ return 0;
+}
+
+void lima_dlbu_fini(struct lima_ip *ip)
+{
+
+}
diff --git a/drivers/gpu/drm/lima/lima_dlbu.h b/drivers/gpu/drm/lima/lima_dlbu.h
new file mode 100644
index 000000000000..16f877984466
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_dlbu.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_DLBU_H__
+#define __LIMA_DLBU_H__
+
+struct lima_ip;
+struct lima_device;
+
+void lima_dlbu_enable(struct lima_device *dev, int num_pp);
+void lima_dlbu_disable(struct lima_device *dev);
+
+void lima_dlbu_set_reg(struct lima_ip *ip, u32 *reg);
+
+int lima_dlbu_init(struct lima_ip *ip);
+void lima_dlbu_fini(struct lima_ip *ip);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
new file mode 100644
index 000000000000..f9a281a62083
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_drv.c
@@ -0,0 +1,376 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_prime.h>
+#include <drm/lima_drm.h>
+
+#include "lima_drv.h"
+#include "lima_gem.h"
+#include "lima_gem_prime.h"
+#include "lima_vm.h"
+
+int lima_sched_timeout_ms;
+
+MODULE_PARM_DESC(sched_timeout_ms, "task run timeout in ms (0 = no timeout (default))");
+module_param_named(sched_timeout_ms, lima_sched_timeout_ms, int, 0444);
+
+static int lima_ioctl_get_param(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_lima_get_param *args = data;
+ struct lima_device *ldev = to_lima_dev(dev);
+
+ if (args->pad)
+ return -EINVAL;
+
+ switch (args->param) {
+ case DRM_LIMA_PARAM_GPU_ID:
+ switch (ldev->id) {
+ case lima_gpu_mali400:
+ args->value = DRM_LIMA_PARAM_GPU_ID_MALI400;
+ break;
+ case lima_gpu_mali450:
+ args->value = DRM_LIMA_PARAM_GPU_ID_MALI450;
+ break;
+ default:
+ args->value = DRM_LIMA_PARAM_GPU_ID_UNKNOWN;
+ break;
+ }
+ break;
+
+ case DRM_LIMA_PARAM_NUM_PP:
+ args->value = ldev->pipe[lima_pipe_pp].num_processor;
+ break;
+
+ case DRM_LIMA_PARAM_GP_VERSION:
+ args->value = ldev->gp_version;
+ break;
+
+ case DRM_LIMA_PARAM_PP_VERSION:
+ args->value = ldev->pp_version;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int lima_ioctl_gem_create(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_lima_gem_create *args = data;
+
+ if (args->pad)
+ return -EINVAL;
+
+ if (args->flags)
+ return -EINVAL;
+
+ if (args->size == 0)
+ return -EINVAL;
+
+ return lima_gem_create_handle(dev, file, args->size, args->flags, &args->handle);
+}
+
+static int lima_ioctl_gem_info(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_lima_gem_info *args = data;
+
+ return lima_gem_get_info(file, args->handle, &args->va, &args->offset);
+}
+
+static int lima_ioctl_gem_submit(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_lima_gem_submit *args = data;
+ struct lima_device *ldev = to_lima_dev(dev);
+ struct lima_drm_priv *priv = file->driver_priv;
+ struct drm_lima_gem_submit_bo *bos;
+ struct lima_sched_pipe *pipe;
+ struct lima_sched_task *task;
+ struct lima_ctx *ctx;
+ struct lima_submit submit = {0};
+ size_t size;
+ int err = 0;
+
+ if (args->pipe >= lima_pipe_num || args->nr_bos == 0)
+ return -EINVAL;
+
+ if (args->flags & ~(LIMA_SUBMIT_FLAG_EXPLICIT_FENCE))
+ return -EINVAL;
+
+ pipe = ldev->pipe + args->pipe;
+ if (args->frame_size != pipe->frame_size)
+ return -EINVAL;
+
+ bos = kvcalloc(args->nr_bos, sizeof(*submit.bos) + sizeof(*submit.lbos), GFP_KERNEL);
+ if (!bos)
+ return -ENOMEM;
+
+ size = args->nr_bos * sizeof(*submit.bos);
+ if (copy_from_user(bos, u64_to_user_ptr(args->bos), size)) {
+ err = -EFAULT;
+ goto out0;
+ }
+
+ task = kmem_cache_zalloc(pipe->task_slab, GFP_KERNEL);
+ if (!task) {
+ err = -ENOMEM;
+ goto out0;
+ }
+
+ task->frame = task + 1;
+ if (copy_from_user(task->frame, u64_to_user_ptr(args->frame), args->frame_size)) {
+ err = -EFAULT;
+ goto out1;
+ }
+
+ err = pipe->task_validate(pipe, task);
+ if (err)
+ goto out1;
+
+ ctx = lima_ctx_get(&priv->ctx_mgr, args->ctx);
+ if (!ctx) {
+ err = -ENOENT;
+ goto out1;
+ }
+
+ submit.pipe = args->pipe;
+ submit.bos = bos;
+ submit.lbos = (void *)bos + size;
+ submit.nr_bos = args->nr_bos;
+ submit.task = task;
+ submit.ctx = ctx;
+ submit.flags = args->flags;
+ submit.in_sync[0] = args->in_sync[0];
+ submit.in_sync[1] = args->in_sync[1];
+ submit.out_sync = args->out_sync;
+
+ err = lima_gem_submit(file, &submit);
+
+ lima_ctx_put(ctx);
+out1:
+ if (err)
+ kmem_cache_free(pipe->task_slab, task);
+out0:
+ kvfree(bos);
+ return err;
+}
+
+static int lima_ioctl_gem_wait(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_lima_gem_wait *args = data;
+
+ if (args->op & ~(LIMA_GEM_WAIT_READ|LIMA_GEM_WAIT_WRITE))
+ return -EINVAL;
+
+ return lima_gem_wait(file, args->handle, args->op, args->timeout_ns);
+}
+
+static int lima_ioctl_ctx_create(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_lima_ctx_create *args = data;
+ struct lima_drm_priv *priv = file->driver_priv;
+ struct lima_device *ldev = to_lima_dev(dev);
+
+ if (args->_pad)
+ return -EINVAL;
+
+ return lima_ctx_create(ldev, &priv->ctx_mgr, &args->id);
+}
+
+static int lima_ioctl_ctx_free(struct drm_device *dev, void *data, struct drm_file *file)
+{
+ struct drm_lima_ctx_create *args = data;
+ struct lima_drm_priv *priv = file->driver_priv;
+
+ if (args->_pad)
+ return -EINVAL;
+
+ return lima_ctx_free(&priv->ctx_mgr, args->id);
+}
+
+static int lima_drm_driver_open(struct drm_device *dev, struct drm_file *file)
+{
+ int err;
+ struct lima_drm_priv *priv;
+ struct lima_device *ldev = to_lima_dev(dev);
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->vm = lima_vm_create(ldev);
+ if (!priv->vm) {
+ err = -ENOMEM;
+ goto err_out0;
+ }
+
+ lima_ctx_mgr_init(&priv->ctx_mgr);
+
+ file->driver_priv = priv;
+ return 0;
+
+err_out0:
+ kfree(priv);
+ return err;
+}
+
+static void lima_drm_driver_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct lima_drm_priv *priv = file->driver_priv;
+
+ lima_ctx_mgr_fini(&priv->ctx_mgr);
+ lima_vm_put(priv->vm);
+ kfree(priv);
+}
+
+static const struct drm_ioctl_desc lima_drm_driver_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(LIMA_GET_PARAM, lima_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(LIMA_GEM_CREATE, lima_ioctl_gem_create, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(LIMA_GEM_INFO, lima_ioctl_gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(LIMA_GEM_SUBMIT, lima_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(LIMA_GEM_WAIT, lima_ioctl_gem_wait, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(LIMA_CTX_CREATE, lima_ioctl_ctx_create, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(LIMA_CTX_FREE, lima_ioctl_ctx_free, DRM_AUTH|DRM_RENDER_ALLOW),
+};
+
+static const struct file_operations lima_drm_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .mmap = lima_gem_mmap,
+};
+
+static struct drm_driver lima_drm_driver = {
+ .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_PRIME | DRIVER_SYNCOBJ,
+ .open = lima_drm_driver_open,
+ .postclose = lima_drm_driver_postclose,
+ .ioctls = lima_drm_driver_ioctls,
+ .num_ioctls = ARRAY_SIZE(lima_drm_driver_ioctls),
+ .fops = &lima_drm_driver_fops,
+ .gem_free_object_unlocked = lima_gem_free_object,
+ .gem_open_object = lima_gem_object_open,
+ .gem_close_object = lima_gem_object_close,
+ .gem_vm_ops = &lima_gem_vm_ops,
+ .name = "lima",
+ .desc = "lima DRM",
+ .date = "20190217",
+ .major = 1,
+ .minor = 0,
+ .patchlevel = 0,
+
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import_sg_table = lima_gem_prime_import_sg_table,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .gem_prime_get_sg_table = lima_gem_prime_get_sg_table,
+ .gem_prime_mmap = lima_gem_prime_mmap,
+};
+
+static int lima_pdev_probe(struct platform_device *pdev)
+{
+ struct lima_device *ldev;
+ struct drm_device *ddev;
+ int err;
+
+ err = lima_sched_slab_init();
+ if (err)
+ return err;
+
+ ldev = devm_kzalloc(&pdev->dev, sizeof(*ldev), GFP_KERNEL);
+ if (!ldev) {
+ err = -ENOMEM;
+ goto err_out0;
+ }
+
+ ldev->pdev = pdev;
+ ldev->dev = &pdev->dev;
+ ldev->id = (enum lima_gpu_id)of_device_get_match_data(&pdev->dev);
+
+ platform_set_drvdata(pdev, ldev);
+
+ /* Allocate and initialize the DRM device. */
+ ddev = drm_dev_alloc(&lima_drm_driver, &pdev->dev);
+ if (IS_ERR(ddev))
+ return PTR_ERR(ddev);
+
+ ddev->dev_private = ldev;
+ ldev->ddev = ddev;
+
+ err = lima_device_init(ldev);
+ if (err) {
+ dev_err(&pdev->dev, "Fatal error during GPU init\n");
+ goto err_out1;
+ }
+
+ /*
+ * Register the DRM device with the core and the connectors with
+ * sysfs.
+ */
+ err = drm_dev_register(ddev, 0);
+ if (err < 0)
+ goto err_out2;
+
+ return 0;
+
+err_out2:
+ lima_device_fini(ldev);
+err_out1:
+ drm_dev_put(ddev);
+err_out0:
+ lima_sched_slab_fini();
+ return err;
+}
+
+static int lima_pdev_remove(struct platform_device *pdev)
+{
+ struct lima_device *ldev = platform_get_drvdata(pdev);
+ struct drm_device *ddev = ldev->ddev;
+
+ drm_dev_unregister(ddev);
+ lima_device_fini(ldev);
+ drm_dev_put(ddev);
+ lima_sched_slab_fini();
+ return 0;
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "arm,mali-400", .data = (void *)lima_gpu_mali400 },
+ { .compatible = "arm,mali-450", .data = (void *)lima_gpu_mali450 },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
+static struct platform_driver lima_platform_driver = {
+ .probe = lima_pdev_probe,
+ .remove = lima_pdev_remove,
+ .driver = {
+ .name = "lima",
+ .of_match_table = dt_match,
+ },
+};
+
+static int __init lima_init(void)
+{
+ return platform_driver_register(&lima_platform_driver);
+}
+module_init(lima_init);
+
+static void __exit lima_exit(void)
+{
+ platform_driver_unregister(&lima_platform_driver);
+}
+module_exit(lima_exit);
+
+MODULE_AUTHOR("Lima Project Developers");
+MODULE_DESCRIPTION("Lima DRM Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/lima/lima_drv.h b/drivers/gpu/drm/lima/lima_drv.h
new file mode 100644
index 000000000000..69c7344715c9
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_drv.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_DRV_H__
+#define __LIMA_DRV_H__
+
+#include <drm/drm_file.h>
+
+#include "lima_ctx.h"
+
+extern int lima_sched_timeout_ms;
+
+struct lima_vm;
+struct lima_bo;
+struct lima_sched_task;
+
+struct drm_lima_gem_submit_bo;
+
+struct lima_drm_priv {
+ struct lima_vm *vm;
+ struct lima_ctx_mgr ctx_mgr;
+};
+
+struct lima_submit {
+ struct lima_ctx *ctx;
+ int pipe;
+ u32 flags;
+
+ struct drm_lima_gem_submit_bo *bos;
+ struct lima_bo **lbos;
+ u32 nr_bos;
+
+ u32 in_sync[2];
+ u32 out_sync;
+
+ struct lima_sched_task *task;
+};
+
+static inline struct lima_drm_priv *
+to_lima_drm_priv(struct drm_file *file)
+{
+ return file->driver_priv;
+}
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
new file mode 100644
index 000000000000..477c0f766663
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/mm.h>
+#include <linux/sync_file.h>
+#include <linux/pfn_t.h>
+
+#include <drm/drm_file.h>
+#include <drm/drm_syncobj.h>
+#include <drm/drm_utils.h>
+
+#include <drm/lima_drm.h>
+
+#include "lima_drv.h"
+#include "lima_gem.h"
+#include "lima_gem_prime.h"
+#include "lima_vm.h"
+#include "lima_object.h"
+
+int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
+ u32 size, u32 flags, u32 *handle)
+{
+ int err;
+ struct lima_bo *bo;
+ struct lima_device *ldev = to_lima_dev(dev);
+
+ bo = lima_bo_create(ldev, size, flags, NULL, NULL);
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
+
+ err = drm_gem_handle_create(file, &bo->gem, handle);
+
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_put_unlocked(&bo->gem);
+
+ return err;
+}
+
+void lima_gem_free_object(struct drm_gem_object *obj)
+{
+ struct lima_bo *bo = to_lima_bo(obj);
+
+ if (!list_empty(&bo->va))
+ dev_err(obj->dev->dev, "lima gem free bo still has va\n");
+
+ lima_bo_destroy(bo);
+}
+
+int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file)
+{
+ struct lima_bo *bo = to_lima_bo(obj);
+ struct lima_drm_priv *priv = to_lima_drm_priv(file);
+ struct lima_vm *vm = priv->vm;
+
+ return lima_vm_bo_add(vm, bo, true);
+}
+
+void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file)
+{
+ struct lima_bo *bo = to_lima_bo(obj);
+ struct lima_drm_priv *priv = to_lima_drm_priv(file);
+ struct lima_vm *vm = priv->vm;
+
+ lima_vm_bo_del(vm, bo);
+}
+
+int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset)
+{
+ struct drm_gem_object *obj;
+ struct lima_bo *bo;
+ struct lima_drm_priv *priv = to_lima_drm_priv(file);
+ struct lima_vm *vm = priv->vm;
+ int err;
+
+ obj = drm_gem_object_lookup(file, handle);
+ if (!obj)
+ return -ENOENT;
+
+ bo = to_lima_bo(obj);
+
+ *va = lima_vm_get_va(vm, bo);
+
+ err = drm_gem_create_mmap_offset(obj);
+ if (!err)
+ *offset = drm_vma_node_offset_addr(&obj->vma_node);
+
+ drm_gem_object_put_unlocked(obj);
+ return err;
+}
+
+static vm_fault_t lima_gem_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct lima_bo *bo = to_lima_bo(obj);
+ pfn_t pfn;
+ pgoff_t pgoff;
+
+ /* We don't use vmf->pgoff since that has the fake offset: */
+ pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
+ pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV);
+
+ return vmf_insert_mixed(vma, vmf->address, pfn);
+}
+
+const struct vm_operations_struct lima_gem_vm_ops = {
+ .fault = lima_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+void lima_set_vma_flags(struct vm_area_struct *vma)
+{
+ pgprot_t prot = vm_get_page_prot(vma->vm_flags);
+
+ vma->vm_flags |= VM_MIXEDMAP;
+ vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_page_prot = pgprot_writecombine(prot);
+}
+
+int lima_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ int ret;
+
+ ret = drm_gem_mmap(filp, vma);
+ if (ret)
+ return ret;
+
+ lima_set_vma_flags(vma);
+ return 0;
+}
+
+static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
+ bool write, bool explicit)
+{
+ int err = 0;
+
+ if (!write) {
+ err = reservation_object_reserve_shared(bo->gem.resv, 1);
+ if (err)
+ return err;
+ }
+
+ /* explicit sync use user passed dep fence */
+ if (explicit)
+ return 0;
+
+ return drm_gem_fence_array_add_implicit(&task->deps, &bo->gem, write);
+}
+
+static int lima_gem_lock_bos(struct lima_bo **bos, u32 nr_bos,
+ struct ww_acquire_ctx *ctx)
+{
+ int i, ret = 0, contended, slow_locked = -1;
+
+ ww_acquire_init(ctx, &reservation_ww_class);
+
+retry:
+ for (i = 0; i < nr_bos; i++) {
+ if (i == slow_locked) {
+ slow_locked = -1;
+ continue;
+ }
+
+ ret = ww_mutex_lock_interruptible(&bos[i]->gem.resv->lock, ctx);
+ if (ret < 0) {
+ contended = i;
+ goto err;
+ }
+ }
+
+ ww_acquire_done(ctx);
+ return 0;
+
+err:
+ for (i--; i >= 0; i--)
+ ww_mutex_unlock(&bos[i]->gem.resv->lock);
+
+ if (slow_locked >= 0)
+ ww_mutex_unlock(&bos[slow_locked]->gem.resv->lock);
+
+ if (ret == -EDEADLK) {
+ /* we lost out in a seqno race, lock and retry.. */
+ ret = ww_mutex_lock_slow_interruptible(
+ &bos[contended]->gem.resv->lock, ctx);
+ if (!ret) {
+ slow_locked = contended;
+ goto retry;
+ }
+ }
+ ww_acquire_fini(ctx);
+
+ return ret;
+}
+
+static void lima_gem_unlock_bos(struct lima_bo **bos, u32 nr_bos,
+ struct ww_acquire_ctx *ctx)
+{
+ int i;
+
+ for (i = 0; i < nr_bos; i++)
+ ww_mutex_unlock(&bos[i]->gem.resv->lock);
+ ww_acquire_fini(ctx);
+}
+
+static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
+{
+ int i, err;
+
+ for (i = 0; i < ARRAY_SIZE(submit->in_sync); i++) {
+ struct dma_fence *fence = NULL;
+
+ if (!submit->in_sync[i])
+ continue;
+
+ err = drm_syncobj_find_fence(file, submit->in_sync[i],
+ 0, 0, &fence);
+ if (err)
+ return err;
+
+ err = drm_gem_fence_array_add(&submit->task->deps, fence);
+ if (err) {
+ dma_fence_put(fence);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
+{
+ int i, err = 0;
+ struct ww_acquire_ctx ctx;
+ struct lima_drm_priv *priv = to_lima_drm_priv(file);
+ struct lima_vm *vm = priv->vm;
+ struct drm_syncobj *out_sync = NULL;
+ struct dma_fence *fence;
+ struct lima_bo **bos = submit->lbos;
+
+ if (submit->out_sync) {
+ out_sync = drm_syncobj_find(file, submit->out_sync);
+ if (!out_sync)
+ return -ENOENT;
+ }
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct drm_gem_object *obj;
+ struct lima_bo *bo;
+
+ obj = drm_gem_object_lookup(file, submit->bos[i].handle);
+ if (!obj) {
+ err = -ENOENT;
+ goto err_out0;
+ }
+
+ bo = to_lima_bo(obj);
+
+ /* increase refcnt of gpu va map to prevent unmapped when executing,
+ * will be decreased when task done
+ */
+ err = lima_vm_bo_add(vm, bo, false);
+ if (err) {
+ drm_gem_object_put_unlocked(obj);
+ goto err_out0;
+ }
+
+ bos[i] = bo;
+ }
+
+ err = lima_gem_lock_bos(bos, submit->nr_bos, &ctx);
+ if (err)
+ goto err_out0;
+
+ err = lima_sched_task_init(
+ submit->task, submit->ctx->context + submit->pipe,
+ bos, submit->nr_bos, vm);
+ if (err)
+ goto err_out1;
+
+ err = lima_gem_add_deps(file, submit);
+ if (err)
+ goto err_out2;
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ err = lima_gem_sync_bo(
+ submit->task, bos[i],
+ submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE,
+ submit->flags & LIMA_SUBMIT_FLAG_EXPLICIT_FENCE);
+ if (err)
+ goto err_out2;
+ }
+
+ fence = lima_sched_context_queue_task(
+ submit->ctx->context + submit->pipe, submit->task);
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE)
+ reservation_object_add_excl_fence(bos[i]->gem.resv, fence);
+ else
+ reservation_object_add_shared_fence(bos[i]->gem.resv, fence);
+ }
+
+ lima_gem_unlock_bos(bos, submit->nr_bos, &ctx);
+
+ for (i = 0; i < submit->nr_bos; i++)
+ drm_gem_object_put_unlocked(&bos[i]->gem);
+
+ if (out_sync) {
+ drm_syncobj_replace_fence(out_sync, fence);
+ drm_syncobj_put(out_sync);
+ }
+
+ dma_fence_put(fence);
+
+ return 0;
+
+err_out2:
+ lima_sched_task_fini(submit->task);
+err_out1:
+ lima_gem_unlock_bos(bos, submit->nr_bos, &ctx);
+err_out0:
+ for (i = 0; i < submit->nr_bos; i++) {
+ if (!bos[i])
+ break;
+ lima_vm_bo_del(vm, bos[i]);
+ drm_gem_object_put_unlocked(&bos[i]->gem);
+ }
+ if (out_sync)
+ drm_syncobj_put(out_sync);
+ return err;
+}
+
+int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns)
+{
+ bool write = op & LIMA_GEM_WAIT_WRITE;
+ long ret, timeout;
+
+ if (!op)
+ return 0;
+
+ timeout = drm_timeout_abs_to_jiffies(timeout_ns);
+
+ ret = drm_gem_reservation_object_wait(file, handle, write, timeout);
+ if (ret == 0)
+ ret = timeout ? -ETIMEDOUT : -EBUSY;
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/lima/lima_gem.h b/drivers/gpu/drm/lima/lima_gem.h
new file mode 100644
index 000000000000..556111a01135
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_gem.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_GEM_H__
+#define __LIMA_GEM_H__
+
+struct lima_bo;
+struct lima_submit;
+
+extern const struct vm_operations_struct lima_gem_vm_ops;
+
+struct lima_bo *lima_gem_create_bo(struct drm_device *dev, u32 size, u32 flags);
+int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
+ u32 size, u32 flags, u32 *handle);
+void lima_gem_free_object(struct drm_gem_object *obj);
+int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file);
+void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file);
+int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset);
+int lima_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+int lima_gem_submit(struct drm_file *file, struct lima_submit *submit);
+int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns);
+
+void lima_set_vma_flags(struct vm_area_struct *vma);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_gem_prime.c b/drivers/gpu/drm/lima/lima_gem_prime.c
new file mode 100644
index 000000000000..9c6d9f1dba55
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_gem_prime.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/dma-buf.h>
+#include <drm/drm_prime.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+
+#include "lima_device.h"
+#include "lima_object.h"
+#include "lima_gem.h"
+#include "lima_gem_prime.h"
+
+struct drm_gem_object *lima_gem_prime_import_sg_table(
+ struct drm_device *dev, struct dma_buf_attachment *attach,
+ struct sg_table *sgt)
+{
+ struct lima_device *ldev = to_lima_dev(dev);
+ struct lima_bo *bo;
+
+ bo = lima_bo_create(ldev, attach->dmabuf->size, 0, sgt,
+ attach->dmabuf->resv);
+ if (IS_ERR(bo))
+ return ERR_CAST(bo);
+
+ return &bo->gem;
+}
+
+struct sg_table *lima_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ struct lima_bo *bo = to_lima_bo(obj);
+ int npages = obj->size >> PAGE_SHIFT;
+
+ return drm_prime_pages_to_sg(bo->pages, npages);
+}
+
+int lima_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ int ret;
+
+ ret = drm_gem_mmap_obj(obj, obj->size, vma);
+ if (ret)
+ return ret;
+
+ lima_set_vma_flags(vma);
+ return 0;
+}
diff --git a/drivers/gpu/drm/lima/lima_gem_prime.h b/drivers/gpu/drm/lima/lima_gem_prime.h
new file mode 100644
index 000000000000..34b4d35c21e3
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_gem_prime.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_GEM_PRIME_H__
+#define __LIMA_GEM_PRIME_H__
+
+struct drm_gem_object *lima_gem_prime_import_sg_table(
+ struct drm_device *dev, struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+struct sg_table *lima_gem_prime_get_sg_table(struct drm_gem_object *obj);
+int lima_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_gp.c b/drivers/gpu/drm/lima/lima_gp.c
new file mode 100644
index 000000000000..ccf49faedebf
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_gp.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include <drm/lima_drm.h>
+
+#include "lima_device.h"
+#include "lima_gp.h"
+#include "lima_regs.h"
+
+#define gp_write(reg, data) writel(data, ip->iomem + reg)
+#define gp_read(reg) readl(ip->iomem + reg)
+
+static irqreturn_t lima_gp_irq_handler(int irq, void *data)
+{
+ struct lima_ip *ip = data;
+ struct lima_device *dev = ip->dev;
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
+ u32 state = gp_read(LIMA_GP_INT_STAT);
+ u32 status = gp_read(LIMA_GP_STATUS);
+ bool done = false;
+
+ /* for shared irq case */
+ if (!state)
+ return IRQ_NONE;
+
+ if (state & LIMA_GP_IRQ_MASK_ERROR) {
+ dev_err(dev->dev, "gp error irq state=%x status=%x\n",
+ state, status);
+
+ /* mask all interrupts before hard reset */
+ gp_write(LIMA_GP_INT_MASK, 0);
+
+ pipe->error = true;
+ done = true;
+ } else {
+ bool valid = state & (LIMA_GP_IRQ_VS_END_CMD_LST |
+ LIMA_GP_IRQ_PLBU_END_CMD_LST);
+ bool active = status & (LIMA_GP_STATUS_VS_ACTIVE |
+ LIMA_GP_STATUS_PLBU_ACTIVE);
+ done = valid && !active;
+ }
+
+ gp_write(LIMA_GP_INT_CLEAR, state);
+
+ if (done)
+ lima_sched_pipe_task_done(pipe);
+
+ return IRQ_HANDLED;
+}
+
+static void lima_gp_soft_reset_async(struct lima_ip *ip)
+{
+ if (ip->data.async_reset)
+ return;
+
+ gp_write(LIMA_GP_INT_MASK, 0);
+ gp_write(LIMA_GP_INT_CLEAR, LIMA_GP_IRQ_RESET_COMPLETED);
+ gp_write(LIMA_GP_CMD, LIMA_GP_CMD_SOFT_RESET);
+ ip->data.async_reset = true;
+}
+
+static int lima_gp_soft_reset_async_wait(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int err;
+ u32 v;
+
+ if (!ip->data.async_reset)
+ return 0;
+
+ err = readl_poll_timeout(ip->iomem + LIMA_GP_INT_RAWSTAT, v,
+ v & LIMA_GP_IRQ_RESET_COMPLETED,
+ 0, 100);
+ if (err) {
+ dev_err(dev->dev, "gp soft reset time out\n");
+ return err;
+ }
+
+ gp_write(LIMA_GP_INT_CLEAR, LIMA_GP_IRQ_MASK_ALL);
+ gp_write(LIMA_GP_INT_MASK, LIMA_GP_IRQ_MASK_USED);
+
+ ip->data.async_reset = false;
+ return 0;
+}
+
+static int lima_gp_task_validate(struct lima_sched_pipe *pipe,
+ struct lima_sched_task *task)
+{
+ struct drm_lima_gp_frame *frame = task->frame;
+ u32 *f = frame->frame;
+ (void)pipe;
+
+ if (f[LIMA_GP_VSCL_START_ADDR >> 2] >
+ f[LIMA_GP_VSCL_END_ADDR >> 2] ||
+ f[LIMA_GP_PLBUCL_START_ADDR >> 2] >
+ f[LIMA_GP_PLBUCL_END_ADDR >> 2] ||
+ f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2] >
+ f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2])
+ return -EINVAL;
+
+ if (f[LIMA_GP_VSCL_START_ADDR >> 2] ==
+ f[LIMA_GP_VSCL_END_ADDR >> 2] &&
+ f[LIMA_GP_PLBUCL_START_ADDR >> 2] ==
+ f[LIMA_GP_PLBUCL_END_ADDR >> 2])
+ return -EINVAL;
+
+ return 0;
+}
+
+static void lima_gp_task_run(struct lima_sched_pipe *pipe,
+ struct lima_sched_task *task)
+{
+ struct lima_ip *ip = pipe->processor[0];
+ struct drm_lima_gp_frame *frame = task->frame;
+ u32 *f = frame->frame;
+ u32 cmd = 0;
+ int i;
+
+ if (f[LIMA_GP_VSCL_START_ADDR >> 2] !=
+ f[LIMA_GP_VSCL_END_ADDR >> 2])
+ cmd |= LIMA_GP_CMD_START_VS;
+ if (f[LIMA_GP_PLBUCL_START_ADDR >> 2] !=
+ f[LIMA_GP_PLBUCL_END_ADDR >> 2])
+ cmd |= LIMA_GP_CMD_START_PLBU;
+
+ /* before any hw ops, wait last success task async soft reset */
+ lima_gp_soft_reset_async_wait(ip);
+
+ for (i = 0; i < LIMA_GP_FRAME_REG_NUM; i++)
+ writel(f[i], ip->iomem + LIMA_GP_VSCL_START_ADDR + i * 4);
+
+ gp_write(LIMA_GP_CMD, LIMA_GP_CMD_UPDATE_PLBU_ALLOC);
+ gp_write(LIMA_GP_CMD, cmd);
+}
+
+static int lima_gp_hard_reset_poll(struct lima_ip *ip)
+{
+ gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC01A0000);
+ return gp_read(LIMA_GP_PERF_CNT_0_LIMIT) == 0xC01A0000;
+}
+
+static int lima_gp_hard_reset(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int ret;
+
+ gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC0FFE000);
+ gp_write(LIMA_GP_INT_MASK, 0);
+ gp_write(LIMA_GP_CMD, LIMA_GP_CMD_RESET);
+ ret = lima_poll_timeout(ip, lima_gp_hard_reset_poll, 10, 100);
+ if (ret) {
+ dev_err(dev->dev, "gp hard reset timeout\n");
+ return ret;
+ }
+
+ gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0);
+ gp_write(LIMA_GP_INT_CLEAR, LIMA_GP_IRQ_MASK_ALL);
+ gp_write(LIMA_GP_INT_MASK, LIMA_GP_IRQ_MASK_USED);
+ return 0;
+}
+
+static void lima_gp_task_fini(struct lima_sched_pipe *pipe)
+{
+ lima_gp_soft_reset_async(pipe->processor[0]);
+}
+
+static void lima_gp_task_error(struct lima_sched_pipe *pipe)
+{
+ struct lima_ip *ip = pipe->processor[0];
+
+ dev_err(ip->dev->dev, "gp task error int_state=%x status=%x\n",
+ gp_read(LIMA_GP_INT_STAT), gp_read(LIMA_GP_STATUS));
+
+ lima_gp_hard_reset(ip);
+}
+
+static void lima_gp_task_mmu_error(struct lima_sched_pipe *pipe)
+{
+ lima_sched_pipe_task_done(pipe);
+}
+
+static void lima_gp_print_version(struct lima_ip *ip)
+{
+ u32 version, major, minor;
+ char *name;
+
+ version = gp_read(LIMA_GP_VERSION);
+ major = (version >> 8) & 0xFF;
+ minor = version & 0xFF;
+ switch (version >> 16) {
+ case 0xA07:
+ name = "mali200";
+ break;
+ case 0xC07:
+ name = "mali300";
+ break;
+ case 0xB07:
+ name = "mali400";
+ break;
+ case 0xD07:
+ name = "mali450";
+ break;
+ default:
+ name = "unknown";
+ break;
+ }
+ dev_info(ip->dev->dev, "%s - %s version major %d minor %d\n",
+ lima_ip_name(ip), name, major, minor);
+}
+
+static struct kmem_cache *lima_gp_task_slab;
+static int lima_gp_task_slab_refcnt;
+
+int lima_gp_init(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int err;
+
+ lima_gp_print_version(ip);
+
+ ip->data.async_reset = false;
+ lima_gp_soft_reset_async(ip);
+ err = lima_gp_soft_reset_async_wait(ip);
+ if (err)
+ return err;
+
+ err = devm_request_irq(dev->dev, ip->irq, lima_gp_irq_handler,
+ IRQF_SHARED, lima_ip_name(ip), ip);
+ if (err) {
+ dev_err(dev->dev, "gp %s fail to request irq\n",
+ lima_ip_name(ip));
+ return err;
+ }
+
+ dev->gp_version = gp_read(LIMA_GP_VERSION);
+
+ return 0;
+}
+
+void lima_gp_fini(struct lima_ip *ip)
+{
+
+}
+
+int lima_gp_pipe_init(struct lima_device *dev)
+{
+ int frame_size = sizeof(struct drm_lima_gp_frame);
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
+
+ if (!lima_gp_task_slab) {
+ lima_gp_task_slab = kmem_cache_create_usercopy(
+ "lima_gp_task", sizeof(struct lima_sched_task) + frame_size,
+ 0, SLAB_HWCACHE_ALIGN, sizeof(struct lima_sched_task),
+ frame_size, NULL);
+ if (!lima_gp_task_slab)
+ return -ENOMEM;
+ }
+ lima_gp_task_slab_refcnt++;
+
+ pipe->frame_size = frame_size;
+ pipe->task_slab = lima_gp_task_slab;
+
+ pipe->task_validate = lima_gp_task_validate;
+ pipe->task_run = lima_gp_task_run;
+ pipe->task_fini = lima_gp_task_fini;
+ pipe->task_error = lima_gp_task_error;
+ pipe->task_mmu_error = lima_gp_task_mmu_error;
+
+ return 0;
+}
+
+void lima_gp_pipe_fini(struct lima_device *dev)
+{
+ if (!--lima_gp_task_slab_refcnt) {
+ kmem_cache_destroy(lima_gp_task_slab);
+ lima_gp_task_slab = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/lima/lima_gp.h b/drivers/gpu/drm/lima/lima_gp.h
new file mode 100644
index 000000000000..516e5c1babbb
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_gp.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_GP_H__
+#define __LIMA_GP_H__
+
+struct lima_ip;
+struct lima_device;
+
+int lima_gp_init(struct lima_ip *ip);
+void lima_gp_fini(struct lima_ip *ip);
+
+int lima_gp_pipe_init(struct lima_device *dev);
+void lima_gp_pipe_fini(struct lima_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_l2_cache.c b/drivers/gpu/drm/lima/lima_l2_cache.c
new file mode 100644
index 000000000000..6873a7af5a5c
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_l2_cache.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/iopoll.h>
+#include <linux/device.h>
+
+#include "lima_device.h"
+#include "lima_l2_cache.h"
+#include "lima_regs.h"
+
+#define l2_cache_write(reg, data) writel(data, ip->iomem + reg)
+#define l2_cache_read(reg) readl(ip->iomem + reg)
+
+static int lima_l2_cache_wait_idle(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int err;
+ u32 v;
+
+ err = readl_poll_timeout(ip->iomem + LIMA_L2_CACHE_STATUS, v,
+ !(v & LIMA_L2_CACHE_STATUS_COMMAND_BUSY),
+ 0, 1000);
+ if (err) {
+ dev_err(dev->dev, "l2 cache wait command timeout\n");
+ return err;
+ }
+ return 0;
+}
+
+int lima_l2_cache_flush(struct lima_ip *ip)
+{
+ int ret;
+
+ spin_lock(&ip->data.lock);
+ l2_cache_write(LIMA_L2_CACHE_COMMAND, LIMA_L2_CACHE_COMMAND_CLEAR_ALL);
+ ret = lima_l2_cache_wait_idle(ip);
+ spin_unlock(&ip->data.lock);
+ return ret;
+}
+
+int lima_l2_cache_init(struct lima_ip *ip)
+{
+ int i, err;
+ u32 size;
+ struct lima_device *dev = ip->dev;
+
+ /* l2_cache2 only exists when one of PP4-7 present */
+ if (ip->id == lima_ip_l2_cache2) {
+ for (i = lima_ip_pp4; i <= lima_ip_pp7; i++) {
+ if (dev->ip[i].present)
+ break;
+ }
+ if (i > lima_ip_pp7)
+ return -ENODEV;
+ }
+
+ spin_lock_init(&ip->data.lock);
+
+ size = l2_cache_read(LIMA_L2_CACHE_SIZE);
+ dev_info(dev->dev, "l2 cache %uK, %u-way, %ubyte cache line, %ubit external bus\n",
+ 1 << (((size >> 16) & 0xff) - 10),
+ 1 << ((size >> 8) & 0xff),
+ 1 << (size & 0xff),
+ 1 << ((size >> 24) & 0xff));
+
+ err = lima_l2_cache_flush(ip);
+ if (err)
+ return err;
+
+ l2_cache_write(LIMA_L2_CACHE_ENABLE,
+ LIMA_L2_CACHE_ENABLE_ACCESS|LIMA_L2_CACHE_ENABLE_READ_ALLOCATE);
+ l2_cache_write(LIMA_L2_CACHE_MAX_READS, 0x1c);
+
+ return 0;
+}
+
+void lima_l2_cache_fini(struct lima_ip *ip)
+{
+
+}
diff --git a/drivers/gpu/drm/lima/lima_l2_cache.h b/drivers/gpu/drm/lima/lima_l2_cache.h
new file mode 100644
index 000000000000..c63fb676ff14
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_l2_cache.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_L2_CACHE_H__
+#define __LIMA_L2_CACHE_H__
+
+struct lima_ip;
+
+int lima_l2_cache_init(struct lima_ip *ip);
+void lima_l2_cache_fini(struct lima_ip *ip);
+
+int lima_l2_cache_flush(struct lima_ip *ip);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_mmu.c b/drivers/gpu/drm/lima/lima_mmu.c
new file mode 100644
index 000000000000..8e1651d6a61f
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_mmu.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/device.h>
+
+#include "lima_device.h"
+#include "lima_mmu.h"
+#include "lima_vm.h"
+#include "lima_object.h"
+#include "lima_regs.h"
+
+#define mmu_write(reg, data) writel(data, ip->iomem + reg)
+#define mmu_read(reg) readl(ip->iomem + reg)
+
+#define lima_mmu_send_command(cmd, addr, val, cond) \
+({ \
+ int __ret; \
+ \
+ mmu_write(LIMA_MMU_COMMAND, cmd); \
+ __ret = readl_poll_timeout(ip->iomem + (addr), val, \
+ cond, 0, 100); \
+ if (__ret) \
+ dev_err(dev->dev, \
+ "mmu command %x timeout\n", cmd); \
+ __ret; \
+})
+
+static irqreturn_t lima_mmu_irq_handler(int irq, void *data)
+{
+ struct lima_ip *ip = data;
+ struct lima_device *dev = ip->dev;
+ u32 status = mmu_read(LIMA_MMU_INT_STATUS);
+ struct lima_sched_pipe *pipe;
+
+ /* for shared irq case */
+ if (!status)
+ return IRQ_NONE;
+
+ if (status & LIMA_MMU_INT_PAGE_FAULT) {
+ u32 fault = mmu_read(LIMA_MMU_PAGE_FAULT_ADDR);
+
+ dev_err(dev->dev, "mmu page fault at 0x%x from bus id %d of type %s on %s\n",
+ fault, LIMA_MMU_STATUS_BUS_ID(status),
+ status & LIMA_MMU_STATUS_PAGE_FAULT_IS_WRITE ? "write" : "read",
+ lima_ip_name(ip));
+ }
+
+ if (status & LIMA_MMU_INT_READ_BUS_ERROR)
+ dev_err(dev->dev, "mmu %s irq bus error\n", lima_ip_name(ip));
+
+ /* mask all interrupts before resume */
+ mmu_write(LIMA_MMU_INT_MASK, 0);
+ mmu_write(LIMA_MMU_INT_CLEAR, status);
+
+ pipe = dev->pipe + (ip->id == lima_ip_gpmmu ? lima_pipe_gp : lima_pipe_pp);
+ lima_sched_pipe_mmu_error(pipe);
+
+ return IRQ_HANDLED;
+}
+
+int lima_mmu_init(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int err;
+ u32 v;
+
+ if (ip->id == lima_ip_ppmmu_bcast)
+ return 0;
+
+ mmu_write(LIMA_MMU_DTE_ADDR, 0xCAFEBABE);
+ if (mmu_read(LIMA_MMU_DTE_ADDR) != 0xCAFEB000) {
+ dev_err(dev->dev, "mmu %s dte write test fail\n", lima_ip_name(ip));
+ return -EIO;
+ }
+
+ mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_HARD_RESET);
+ err = lima_mmu_send_command(LIMA_MMU_COMMAND_HARD_RESET,
+ LIMA_MMU_DTE_ADDR, v, v == 0);
+ if (err)
+ return err;
+
+ err = devm_request_irq(dev->dev, ip->irq, lima_mmu_irq_handler,
+ IRQF_SHARED, lima_ip_name(ip), ip);
+ if (err) {
+ dev_err(dev->dev, "mmu %s fail to request irq\n", lima_ip_name(ip));
+ return err;
+ }
+
+ mmu_write(LIMA_MMU_INT_MASK, LIMA_MMU_INT_PAGE_FAULT | LIMA_MMU_INT_READ_BUS_ERROR);
+ mmu_write(LIMA_MMU_DTE_ADDR, dev->empty_vm->pd.dma);
+ return lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_PAGING,
+ LIMA_MMU_STATUS, v,
+ v & LIMA_MMU_STATUS_PAGING_ENABLED);
+}
+
+void lima_mmu_fini(struct lima_ip *ip)
+{
+
+}
+
+void lima_mmu_switch_vm(struct lima_ip *ip, struct lima_vm *vm)
+{
+ struct lima_device *dev = ip->dev;
+ u32 v;
+
+ lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_STALL,
+ LIMA_MMU_STATUS, v,
+ v & LIMA_MMU_STATUS_STALL_ACTIVE);
+
+ if (vm)
+ mmu_write(LIMA_MMU_DTE_ADDR, vm->pd.dma);
+
+ /* flush the TLB */
+ mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_ZAP_CACHE);
+
+ lima_mmu_send_command(LIMA_MMU_COMMAND_DISABLE_STALL,
+ LIMA_MMU_STATUS, v,
+ !(v & LIMA_MMU_STATUS_STALL_ACTIVE));
+}
+
+void lima_mmu_page_fault_resume(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ u32 status = mmu_read(LIMA_MMU_STATUS);
+ u32 v;
+
+ if (status & LIMA_MMU_STATUS_PAGE_FAULT_ACTIVE) {
+ dev_info(dev->dev, "mmu resume\n");
+
+ mmu_write(LIMA_MMU_INT_MASK, 0);
+ mmu_write(LIMA_MMU_DTE_ADDR, 0xCAFEBABE);
+ lima_mmu_send_command(LIMA_MMU_COMMAND_HARD_RESET,
+ LIMA_MMU_DTE_ADDR, v, v == 0);
+ mmu_write(LIMA_MMU_INT_MASK, LIMA_MMU_INT_PAGE_FAULT | LIMA_MMU_INT_READ_BUS_ERROR);
+ mmu_write(LIMA_MMU_DTE_ADDR, dev->empty_vm->pd.dma);
+ lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_PAGING,
+ LIMA_MMU_STATUS, v,
+ v & LIMA_MMU_STATUS_PAGING_ENABLED);
+ }
+}
diff --git a/drivers/gpu/drm/lima/lima_mmu.h b/drivers/gpu/drm/lima/lima_mmu.h
new file mode 100644
index 000000000000..8c78319bcc8e
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_mmu.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_MMU_H__
+#define __LIMA_MMU_H__
+
+struct lima_ip;
+struct lima_vm;
+
+int lima_mmu_init(struct lima_ip *ip);
+void lima_mmu_fini(struct lima_ip *ip);
+
+void lima_mmu_switch_vm(struct lima_ip *ip, struct lima_vm *vm);
+void lima_mmu_page_fault_resume(struct lima_ip *ip);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_object.c b/drivers/gpu/drm/lima/lima_object.c
new file mode 100644
index 000000000000..5c41f859a72f
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_object.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <drm/drm_prime.h>
+#include <linux/pagemap.h>
+#include <linux/dma-mapping.h>
+
+#include "lima_object.h"
+
+void lima_bo_destroy(struct lima_bo *bo)
+{
+ if (bo->sgt) {
+ kfree(bo->pages);
+ drm_prime_gem_destroy(&bo->gem, bo->sgt);
+ } else {
+ if (bo->pages_dma_addr) {
+ int i, npages = bo->gem.size >> PAGE_SHIFT;
+
+ for (i = 0; i < npages; i++) {
+ if (bo->pages_dma_addr[i])
+ dma_unmap_page(bo->gem.dev->dev,
+ bo->pages_dma_addr[i],
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
+ }
+
+ if (bo->pages)
+ drm_gem_put_pages(&bo->gem, bo->pages, true, true);
+ }
+
+ kfree(bo->pages_dma_addr);
+ drm_gem_object_release(&bo->gem);
+ kfree(bo);
+}
+
+static struct lima_bo *lima_bo_create_struct(struct lima_device *dev, u32 size, u32 flags,
+ struct reservation_object *resv)
+{
+ struct lima_bo *bo;
+ int err;
+
+ size = PAGE_ALIGN(size);
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&bo->lock);
+ INIT_LIST_HEAD(&bo->va);
+ bo->gem.resv = resv;
+
+ err = drm_gem_object_init(dev->ddev, &bo->gem, size);
+ if (err) {
+ kfree(bo);
+ return ERR_PTR(err);
+ }
+
+ return bo;
+}
+
+struct lima_bo *lima_bo_create(struct lima_device *dev, u32 size,
+ u32 flags, struct sg_table *sgt,
+ struct reservation_object *resv)
+{
+ int i, err;
+ size_t npages;
+ struct lima_bo *bo, *ret;
+
+ bo = lima_bo_create_struct(dev, size, flags, resv);
+ if (IS_ERR(bo))
+ return bo;
+
+ npages = bo->gem.size >> PAGE_SHIFT;
+
+ bo->pages_dma_addr = kcalloc(npages, sizeof(dma_addr_t), GFP_KERNEL);
+ if (!bo->pages_dma_addr) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err_out;
+ }
+
+ if (sgt) {
+ bo->sgt = sgt;
+
+ bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL);
+ if (!bo->pages) {
+ ret = ERR_PTR(-ENOMEM);
+ goto err_out;
+ }
+
+ err = drm_prime_sg_to_page_addr_arrays(
+ sgt, bo->pages, bo->pages_dma_addr, npages);
+ if (err) {
+ ret = ERR_PTR(err);
+ goto err_out;
+ }
+ } else {
+ mapping_set_gfp_mask(bo->gem.filp->f_mapping, GFP_DMA32);
+ bo->pages = drm_gem_get_pages(&bo->gem);
+ if (IS_ERR(bo->pages)) {
+ ret = ERR_CAST(bo->pages);
+ bo->pages = NULL;
+ goto err_out;
+ }
+
+ for (i = 0; i < npages; i++) {
+ dma_addr_t addr = dma_map_page(dev->dev, bo->pages[i], 0,
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev->dev, addr)) {
+ ret = ERR_PTR(-EFAULT);
+ goto err_out;
+ }
+ bo->pages_dma_addr[i] = addr;
+ }
+
+ }
+
+ return bo;
+
+err_out:
+ lima_bo_destroy(bo);
+ return ret;
+}
diff --git a/drivers/gpu/drm/lima/lima_object.h b/drivers/gpu/drm/lima/lima_object.h
new file mode 100644
index 000000000000..6738724afb7b
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_object.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_OBJECT_H__
+#define __LIMA_OBJECT_H__
+
+#include <drm/drm_gem.h>
+
+#include "lima_device.h"
+
+struct lima_bo {
+ struct drm_gem_object gem;
+
+ struct page **pages;
+ dma_addr_t *pages_dma_addr;
+ struct sg_table *sgt;
+ void *vaddr;
+
+ struct mutex lock;
+ struct list_head va;
+};
+
+static inline struct lima_bo *
+to_lima_bo(struct drm_gem_object *obj)
+{
+ return container_of(obj, struct lima_bo, gem);
+}
+
+struct lima_bo *lima_bo_create(struct lima_device *dev, u32 size,
+ u32 flags, struct sg_table *sgt,
+ struct reservation_object *resv);
+void lima_bo_destroy(struct lima_bo *bo);
+void *lima_bo_vmap(struct lima_bo *bo);
+void lima_bo_vunmap(struct lima_bo *bo);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_pmu.c b/drivers/gpu/drm/lima/lima_pmu.c
new file mode 100644
index 000000000000..571f6d661581
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_pmu.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/iopoll.h>
+#include <linux/device.h>
+
+#include "lima_device.h"
+#include "lima_pmu.h"
+#include "lima_regs.h"
+
+#define pmu_write(reg, data) writel(data, ip->iomem + reg)
+#define pmu_read(reg) readl(ip->iomem + reg)
+
+static int lima_pmu_wait_cmd(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int err;
+ u32 v;
+
+ err = readl_poll_timeout(ip->iomem + LIMA_PMU_INT_RAWSTAT,
+ v, v & LIMA_PMU_INT_CMD_MASK,
+ 100, 100000);
+ if (err) {
+ dev_err(dev->dev, "timeout wait pmd cmd\n");
+ return err;
+ }
+
+ pmu_write(LIMA_PMU_INT_CLEAR, LIMA_PMU_INT_CMD_MASK);
+ return 0;
+}
+
+int lima_pmu_init(struct lima_ip *ip)
+{
+ int err;
+ u32 stat;
+
+ pmu_write(LIMA_PMU_INT_MASK, 0);
+
+ /* If this value is too low, when in high GPU clk freq,
+ * GPU will be in unstable state.
+ */
+ pmu_write(LIMA_PMU_SW_DELAY, 0xffff);
+
+ /* status reg 1=off 0=on */
+ stat = pmu_read(LIMA_PMU_STATUS);
+
+ /* power up all ip */
+ if (stat) {
+ pmu_write(LIMA_PMU_POWER_UP, stat);
+ err = lima_pmu_wait_cmd(ip);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+void lima_pmu_fini(struct lima_ip *ip)
+{
+
+}
diff --git a/drivers/gpu/drm/lima/lima_pmu.h b/drivers/gpu/drm/lima/lima_pmu.h
new file mode 100644
index 000000000000..a2a18775eb07
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_pmu.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_PMU_H__
+#define __LIMA_PMU_H__
+
+struct lima_ip;
+
+int lima_pmu_init(struct lima_ip *ip);
+void lima_pmu_fini(struct lima_ip *ip);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_pp.c b/drivers/gpu/drm/lima/lima_pp.c
new file mode 100644
index 000000000000..d29721e177bf
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_pp.c
@@ -0,0 +1,427 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include <drm/lima_drm.h>
+
+#include "lima_device.h"
+#include "lima_pp.h"
+#include "lima_dlbu.h"
+#include "lima_bcast.h"
+#include "lima_vm.h"
+#include "lima_regs.h"
+
+#define pp_write(reg, data) writel(data, ip->iomem + reg)
+#define pp_read(reg) readl(ip->iomem + reg)
+
+static void lima_pp_handle_irq(struct lima_ip *ip, u32 state)
+{
+ struct lima_device *dev = ip->dev;
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+
+ if (state & LIMA_PP_IRQ_MASK_ERROR) {
+ u32 status = pp_read(LIMA_PP_STATUS);
+
+ dev_err(dev->dev, "pp error irq state=%x status=%x\n",
+ state, status);
+
+ pipe->error = true;
+
+ /* mask all interrupts before hard reset */
+ pp_write(LIMA_PP_INT_MASK, 0);
+ }
+
+ pp_write(LIMA_PP_INT_CLEAR, state);
+}
+
+static irqreturn_t lima_pp_irq_handler(int irq, void *data)
+{
+ struct lima_ip *ip = data;
+ struct lima_device *dev = ip->dev;
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+ u32 state = pp_read(LIMA_PP_INT_STATUS);
+
+ /* for shared irq case */
+ if (!state)
+ return IRQ_NONE;
+
+ lima_pp_handle_irq(ip, state);
+
+ if (atomic_dec_and_test(&pipe->task))
+ lima_sched_pipe_task_done(pipe);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t lima_pp_bcast_irq_handler(int irq, void *data)
+{
+ int i;
+ irqreturn_t ret = IRQ_NONE;
+ struct lima_ip *pp_bcast = data;
+ struct lima_device *dev = pp_bcast->dev;
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+ struct drm_lima_m450_pp_frame *frame = pipe->current_task->frame;
+
+ for (i = 0; i < frame->num_pp; i++) {
+ struct lima_ip *ip = pipe->processor[i];
+ u32 status, state;
+
+ if (pipe->done & (1 << i))
+ continue;
+
+ /* status read first in case int state change in the middle
+ * which may miss the interrupt handling
+ */
+ status = pp_read(LIMA_PP_STATUS);
+ state = pp_read(LIMA_PP_INT_STATUS);
+
+ if (state) {
+ lima_pp_handle_irq(ip, state);
+ ret = IRQ_HANDLED;
+ } else {
+ if (status & LIMA_PP_STATUS_RENDERING_ACTIVE)
+ continue;
+ }
+
+ pipe->done |= (1 << i);
+ if (atomic_dec_and_test(&pipe->task))
+ lima_sched_pipe_task_done(pipe);
+ }
+
+ return ret;
+}
+
+static void lima_pp_soft_reset_async(struct lima_ip *ip)
+{
+ if (ip->data.async_reset)
+ return;
+
+ pp_write(LIMA_PP_INT_MASK, 0);
+ pp_write(LIMA_PP_INT_RAWSTAT, LIMA_PP_IRQ_MASK_ALL);
+ pp_write(LIMA_PP_CTRL, LIMA_PP_CTRL_SOFT_RESET);
+ ip->data.async_reset = true;
+}
+
+static int lima_pp_soft_reset_poll(struct lima_ip *ip)
+{
+ return !(pp_read(LIMA_PP_STATUS) & LIMA_PP_STATUS_RENDERING_ACTIVE) &&
+ pp_read(LIMA_PP_INT_RAWSTAT) == LIMA_PP_IRQ_RESET_COMPLETED;
+}
+
+static int lima_pp_soft_reset_async_wait_one(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int ret;
+
+ ret = lima_poll_timeout(ip, lima_pp_soft_reset_poll, 0, 100);
+ if (ret) {
+ dev_err(dev->dev, "pp %s reset time out\n", lima_ip_name(ip));
+ return ret;
+ }
+
+ pp_write(LIMA_PP_INT_CLEAR, LIMA_PP_IRQ_MASK_ALL);
+ pp_write(LIMA_PP_INT_MASK, LIMA_PP_IRQ_MASK_USED);
+ return 0;
+}
+
+static int lima_pp_soft_reset_async_wait(struct lima_ip *ip)
+{
+ int i, err = 0;
+
+ if (!ip->data.async_reset)
+ return 0;
+
+ if (ip->id == lima_ip_pp_bcast) {
+ struct lima_device *dev = ip->dev;
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+ struct drm_lima_m450_pp_frame *frame = pipe->current_task->frame;
+
+ for (i = 0; i < frame->num_pp; i++)
+ err |= lima_pp_soft_reset_async_wait_one(pipe->processor[i]);
+ } else
+ err = lima_pp_soft_reset_async_wait_one(ip);
+
+ ip->data.async_reset = false;
+ return err;
+}
+
+static void lima_pp_write_frame(struct lima_ip *ip, u32 *frame, u32 *wb)
+{
+ int i, j, n = 0;
+
+ for (i = 0; i < LIMA_PP_FRAME_REG_NUM; i++)
+ writel(frame[i], ip->iomem + LIMA_PP_FRAME + i * 4);
+
+ for (i = 0; i < 3; i++) {
+ for (j = 0; j < LIMA_PP_WB_REG_NUM; j++)
+ writel(wb[n++], ip->iomem + LIMA_PP_WB(i) + j * 4);
+ }
+}
+
+static int lima_pp_hard_reset_poll(struct lima_ip *ip)
+{
+ pp_write(LIMA_PP_PERF_CNT_0_LIMIT, 0xC01A0000);
+ return pp_read(LIMA_PP_PERF_CNT_0_LIMIT) == 0xC01A0000;
+}
+
+static int lima_pp_hard_reset(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int ret;
+
+ pp_write(LIMA_PP_PERF_CNT_0_LIMIT, 0xC0FFE000);
+ pp_write(LIMA_PP_INT_MASK, 0);
+ pp_write(LIMA_PP_CTRL, LIMA_PP_CTRL_FORCE_RESET);
+ ret = lima_poll_timeout(ip, lima_pp_hard_reset_poll, 10, 100);
+ if (ret) {
+ dev_err(dev->dev, "pp hard reset timeout\n");
+ return ret;
+ }
+
+ pp_write(LIMA_PP_PERF_CNT_0_LIMIT, 0);
+ pp_write(LIMA_PP_INT_CLEAR, LIMA_PP_IRQ_MASK_ALL);
+ pp_write(LIMA_PP_INT_MASK, LIMA_PP_IRQ_MASK_USED);
+ return 0;
+}
+
+static void lima_pp_print_version(struct lima_ip *ip)
+{
+ u32 version, major, minor;
+ char *name;
+
+ version = pp_read(LIMA_PP_VERSION);
+ major = (version >> 8) & 0xFF;
+ minor = version & 0xFF;
+ switch (version >> 16) {
+ case 0xC807:
+ name = "mali200";
+ break;
+ case 0xCE07:
+ name = "mali300";
+ break;
+ case 0xCD07:
+ name = "mali400";
+ break;
+ case 0xCF07:
+ name = "mali450";
+ break;
+ default:
+ name = "unknown";
+ break;
+ }
+ dev_info(ip->dev->dev, "%s - %s version major %d minor %d\n",
+ lima_ip_name(ip), name, major, minor);
+}
+
+int lima_pp_init(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int err;
+
+ lima_pp_print_version(ip);
+
+ ip->data.async_reset = false;
+ lima_pp_soft_reset_async(ip);
+ err = lima_pp_soft_reset_async_wait(ip);
+ if (err)
+ return err;
+
+ err = devm_request_irq(dev->dev, ip->irq, lima_pp_irq_handler,
+ IRQF_SHARED, lima_ip_name(ip), ip);
+ if (err) {
+ dev_err(dev->dev, "pp %s fail to request irq\n",
+ lima_ip_name(ip));
+ return err;
+ }
+
+ dev->pp_version = pp_read(LIMA_PP_VERSION);
+
+ return 0;
+}
+
+void lima_pp_fini(struct lima_ip *ip)
+{
+
+}
+
+int lima_pp_bcast_init(struct lima_ip *ip)
+{
+ struct lima_device *dev = ip->dev;
+ int err;
+
+ err = devm_request_irq(dev->dev, ip->irq, lima_pp_bcast_irq_handler,
+ IRQF_SHARED, lima_ip_name(ip), ip);
+ if (err) {
+ dev_err(dev->dev, "pp %s fail to request irq\n",
+ lima_ip_name(ip));
+ return err;
+ }
+
+ return 0;
+}
+
+void lima_pp_bcast_fini(struct lima_ip *ip)
+{
+
+}
+
+static int lima_pp_task_validate(struct lima_sched_pipe *pipe,
+ struct lima_sched_task *task)
+{
+ u32 num_pp;
+
+ if (pipe->bcast_processor) {
+ struct drm_lima_m450_pp_frame *f = task->frame;
+
+ num_pp = f->num_pp;
+
+ if (f->_pad)
+ return -EINVAL;
+ } else {
+ struct drm_lima_m400_pp_frame *f = task->frame;
+
+ num_pp = f->num_pp;
+ }
+
+ if (num_pp == 0 || num_pp > pipe->num_processor)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void lima_pp_task_run(struct lima_sched_pipe *pipe,
+ struct lima_sched_task *task)
+{
+ if (pipe->bcast_processor) {
+ struct drm_lima_m450_pp_frame *frame = task->frame;
+ struct lima_device *dev = pipe->bcast_processor->dev;
+ struct lima_ip *ip = pipe->bcast_processor;
+ int i;
+
+ pipe->done = 0;
+ atomic_set(&pipe->task, frame->num_pp);
+
+ if (frame->use_dlbu) {
+ lima_dlbu_enable(dev, frame->num_pp);
+
+ frame->frame[LIMA_PP_FRAME >> 2] = LIMA_VA_RESERVE_DLBU;
+ lima_dlbu_set_reg(dev->ip + lima_ip_dlbu, frame->dlbu_regs);
+ } else
+ lima_dlbu_disable(dev);
+
+ lima_bcast_enable(dev, frame->num_pp);
+
+ lima_pp_soft_reset_async_wait(ip);
+
+ lima_pp_write_frame(ip, frame->frame, frame->wb);
+
+ for (i = 0; i < frame->num_pp; i++) {
+ struct lima_ip *ip = pipe->processor[i];
+
+ pp_write(LIMA_PP_STACK, frame->fragment_stack_address[i]);
+ if (!frame->use_dlbu)
+ pp_write(LIMA_PP_FRAME, frame->plbu_array_address[i]);
+ }
+
+ pp_write(LIMA_PP_CTRL, LIMA_PP_CTRL_START_RENDERING);
+ } else {
+ struct drm_lima_m400_pp_frame *frame = task->frame;
+ int i;
+
+ atomic_set(&pipe->task, frame->num_pp);
+
+ for (i = 0; i < frame->num_pp; i++) {
+ struct lima_ip *ip = pipe->processor[i];
+
+ frame->frame[LIMA_PP_FRAME >> 2] =
+ frame->plbu_array_address[i];
+ frame->frame[LIMA_PP_STACK >> 2] =
+ frame->fragment_stack_address[i];
+
+ lima_pp_soft_reset_async_wait(ip);
+
+ lima_pp_write_frame(ip, frame->frame, frame->wb);
+
+ pp_write(LIMA_PP_CTRL, LIMA_PP_CTRL_START_RENDERING);
+ }
+ }
+}
+
+static void lima_pp_task_fini(struct lima_sched_pipe *pipe)
+{
+ if (pipe->bcast_processor)
+ lima_pp_soft_reset_async(pipe->bcast_processor);
+ else {
+ int i;
+
+ for (i = 0; i < pipe->num_processor; i++)
+ lima_pp_soft_reset_async(pipe->processor[i]);
+ }
+}
+
+static void lima_pp_task_error(struct lima_sched_pipe *pipe)
+{
+ int i;
+
+ for (i = 0; i < pipe->num_processor; i++) {
+ struct lima_ip *ip = pipe->processor[i];
+
+ dev_err(ip->dev->dev, "pp task error %d int_state=%x status=%x\n",
+ i, pp_read(LIMA_PP_INT_STATUS), pp_read(LIMA_PP_STATUS));
+
+ lima_pp_hard_reset(ip);
+ }
+}
+
+static void lima_pp_task_mmu_error(struct lima_sched_pipe *pipe)
+{
+ if (atomic_dec_and_test(&pipe->task))
+ lima_sched_pipe_task_done(pipe);
+}
+
+static struct kmem_cache *lima_pp_task_slab;
+static int lima_pp_task_slab_refcnt;
+
+int lima_pp_pipe_init(struct lima_device *dev)
+{
+ int frame_size;
+ struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
+
+ if (dev->id == lima_gpu_mali400)
+ frame_size = sizeof(struct drm_lima_m400_pp_frame);
+ else
+ frame_size = sizeof(struct drm_lima_m450_pp_frame);
+
+ if (!lima_pp_task_slab) {
+ lima_pp_task_slab = kmem_cache_create_usercopy(
+ "lima_pp_task", sizeof(struct lima_sched_task) + frame_size,
+ 0, SLAB_HWCACHE_ALIGN, sizeof(struct lima_sched_task),
+ frame_size, NULL);
+ if (!lima_pp_task_slab)
+ return -ENOMEM;
+ }
+ lima_pp_task_slab_refcnt++;
+
+ pipe->frame_size = frame_size;
+ pipe->task_slab = lima_pp_task_slab;
+
+ pipe->task_validate = lima_pp_task_validate;
+ pipe->task_run = lima_pp_task_run;
+ pipe->task_fini = lima_pp_task_fini;
+ pipe->task_error = lima_pp_task_error;
+ pipe->task_mmu_error = lima_pp_task_mmu_error;
+
+ return 0;
+}
+
+void lima_pp_pipe_fini(struct lima_device *dev)
+{
+ if (!--lima_pp_task_slab_refcnt) {
+ kmem_cache_destroy(lima_pp_task_slab);
+ lima_pp_task_slab = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/lima/lima_pp.h b/drivers/gpu/drm/lima/lima_pp.h
new file mode 100644
index 000000000000..bf60c77b2633
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_pp.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_PP_H__
+#define __LIMA_PP_H__
+
+struct lima_ip;
+struct lima_device;
+
+int lima_pp_init(struct lima_ip *ip);
+void lima_pp_fini(struct lima_ip *ip);
+
+int lima_pp_bcast_init(struct lima_ip *ip);
+void lima_pp_bcast_fini(struct lima_ip *ip);
+
+int lima_pp_pipe_init(struct lima_device *dev);
+void lima_pp_pipe_fini(struct lima_device *dev);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_regs.h b/drivers/gpu/drm/lima/lima_regs.h
new file mode 100644
index 000000000000..ace8ecefbe90
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_regs.h
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2010-2017 ARM Limited. All rights reserved.
+ * Copyright 2017-2019 Qiang Yu <yuq825@gmail.com>
+ */
+
+#ifndef __LIMA_REGS_H__
+#define __LIMA_REGS_H__
+
+/* This file's register definition is collected from the
+ * official ARM Mali Utgard GPU kernel driver source code
+ */
+
+/* PMU regs */
+#define LIMA_PMU_POWER_UP 0x00
+#define LIMA_PMU_POWER_DOWN 0x04
+#define LIMA_PMU_POWER_GP0_MASK BIT(0)
+#define LIMA_PMU_POWER_L2_MASK BIT(1)
+#define LIMA_PMU_POWER_PP_MASK(i) BIT(2 + i)
+
+/*
+ * On Mali450 each block automatically starts up its corresponding L2
+ * and the PPs are not fully independent controllable.
+ * Instead PP0, PP1-3 and PP4-7 can be turned on or off.
+ */
+#define LIMA450_PMU_POWER_PP0_MASK BIT(1)
+#define LIMA450_PMU_POWER_PP13_MASK BIT(2)
+#define LIMA450_PMU_POWER_PP47_MASK BIT(3)
+
+#define LIMA_PMU_STATUS 0x08
+#define LIMA_PMU_INT_MASK 0x0C
+#define LIMA_PMU_INT_RAWSTAT 0x10
+#define LIMA_PMU_INT_CLEAR 0x18
+#define LIMA_PMU_INT_CMD_MASK BIT(0)
+#define LIMA_PMU_SW_DELAY 0x1C
+
+/* L2 cache regs */
+#define LIMA_L2_CACHE_SIZE 0x0004
+#define LIMA_L2_CACHE_STATUS 0x0008
+#define LIMA_L2_CACHE_STATUS_COMMAND_BUSY BIT(0)
+#define LIMA_L2_CACHE_STATUS_DATA_BUSY BIT(1)
+#define LIMA_L2_CACHE_COMMAND 0x0010
+#define LIMA_L2_CACHE_COMMAND_CLEAR_ALL BIT(0)
+#define LIMA_L2_CACHE_CLEAR_PAGE 0x0014
+#define LIMA_L2_CACHE_MAX_READS 0x0018
+#define LIMA_L2_CACHE_ENABLE 0x001C
+#define LIMA_L2_CACHE_ENABLE_ACCESS BIT(0)
+#define LIMA_L2_CACHE_ENABLE_READ_ALLOCATE BIT(1)
+#define LIMA_L2_CACHE_PERFCNT_SRC0 0x0020
+#define LIMA_L2_CACHE_PERFCNT_VAL0 0x0024
+#define LIMA_L2_CACHE_PERFCNT_SRC1 0x0028
+#define LIMA_L2_CACHE_ERFCNT_VAL1 0x002C
+
+/* GP regs */
+#define LIMA_GP_VSCL_START_ADDR 0x00
+#define LIMA_GP_VSCL_END_ADDR 0x04
+#define LIMA_GP_PLBUCL_START_ADDR 0x08
+#define LIMA_GP_PLBUCL_END_ADDR 0x0c
+#define LIMA_GP_PLBU_ALLOC_START_ADDR 0x10
+#define LIMA_GP_PLBU_ALLOC_END_ADDR 0x14
+#define LIMA_GP_CMD 0x20
+#define LIMA_GP_CMD_START_VS BIT(0)
+#define LIMA_GP_CMD_START_PLBU BIT(1)
+#define LIMA_GP_CMD_UPDATE_PLBU_ALLOC BIT(4)
+#define LIMA_GP_CMD_RESET BIT(5)
+#define LIMA_GP_CMD_FORCE_HANG BIT(6)
+#define LIMA_GP_CMD_STOP_BUS BIT(9)
+#define LIMA_GP_CMD_SOFT_RESET BIT(10)
+#define LIMA_GP_INT_RAWSTAT 0x24
+#define LIMA_GP_INT_CLEAR 0x28
+#define LIMA_GP_INT_MASK 0x2C
+#define LIMA_GP_INT_STAT 0x30
+#define LIMA_GP_IRQ_VS_END_CMD_LST BIT(0)
+#define LIMA_GP_IRQ_PLBU_END_CMD_LST BIT(1)
+#define LIMA_GP_IRQ_PLBU_OUT_OF_MEM BIT(2)
+#define LIMA_GP_IRQ_VS_SEM_IRQ BIT(3)
+#define LIMA_GP_IRQ_PLBU_SEM_IRQ BIT(4)
+#define LIMA_GP_IRQ_HANG BIT(5)
+#define LIMA_GP_IRQ_FORCE_HANG BIT(6)
+#define LIMA_GP_IRQ_PERF_CNT_0_LIMIT BIT(7)
+#define LIMA_GP_IRQ_PERF_CNT_1_LIMIT BIT(8)
+#define LIMA_GP_IRQ_WRITE_BOUND_ERR BIT(9)
+#define LIMA_GP_IRQ_SYNC_ERROR BIT(10)
+#define LIMA_GP_IRQ_AXI_BUS_ERROR BIT(11)
+#define LIMA_GP_IRQ_AXI_BUS_STOPPED BIT(12)
+#define LIMA_GP_IRQ_VS_INVALID_CMD BIT(13)
+#define LIMA_GP_IRQ_PLB_INVALID_CMD BIT(14)
+#define LIMA_GP_IRQ_RESET_COMPLETED BIT(19)
+#define LIMA_GP_IRQ_SEMAPHORE_UNDERFLOW BIT(20)
+#define LIMA_GP_IRQ_SEMAPHORE_OVERFLOW BIT(21)
+#define LIMA_GP_IRQ_PTR_ARRAY_OUT_OF_BOUNDS BIT(22)
+#define LIMA_GP_WRITE_BOUND_LOW 0x34
+#define LIMA_GP_PERF_CNT_0_ENABLE 0x3C
+#define LIMA_GP_PERF_CNT_1_ENABLE 0x40
+#define LIMA_GP_PERF_CNT_0_SRC 0x44
+#define LIMA_GP_PERF_CNT_1_SRC 0x48
+#define LIMA_GP_PERF_CNT_0_VALUE 0x4C
+#define LIMA_GP_PERF_CNT_1_VALUE 0x50
+#define LIMA_GP_PERF_CNT_0_LIMIT 0x54
+#define LIMA_GP_STATUS 0x68
+#define LIMA_GP_STATUS_VS_ACTIVE BIT(1)
+#define LIMA_GP_STATUS_BUS_STOPPED BIT(2)
+#define LIMA_GP_STATUS_PLBU_ACTIVE BIT(3)
+#define LIMA_GP_STATUS_BUS_ERROR BIT(6)
+#define LIMA_GP_STATUS_WRITE_BOUND_ERR BIT(8)
+#define LIMA_GP_VERSION 0x6C
+#define LIMA_GP_VSCL_START_ADDR_READ 0x80
+#define LIMA_GP_PLBCL_START_ADDR_READ 0x84
+#define LIMA_GP_CONTR_AXI_BUS_ERROR_STAT 0x94
+
+#define LIMA_GP_IRQ_MASK_ALL \
+ ( \
+ LIMA_GP_IRQ_VS_END_CMD_LST | \
+ LIMA_GP_IRQ_PLBU_END_CMD_LST | \
+ LIMA_GP_IRQ_PLBU_OUT_OF_MEM | \
+ LIMA_GP_IRQ_VS_SEM_IRQ | \
+ LIMA_GP_IRQ_PLBU_SEM_IRQ | \
+ LIMA_GP_IRQ_HANG | \
+ LIMA_GP_IRQ_FORCE_HANG | \
+ LIMA_GP_IRQ_PERF_CNT_0_LIMIT | \
+ LIMA_GP_IRQ_PERF_CNT_1_LIMIT | \
+ LIMA_GP_IRQ_WRITE_BOUND_ERR | \
+ LIMA_GP_IRQ_SYNC_ERROR | \
+ LIMA_GP_IRQ_AXI_BUS_ERROR | \
+ LIMA_GP_IRQ_AXI_BUS_STOPPED | \
+ LIMA_GP_IRQ_VS_INVALID_CMD | \
+ LIMA_GP_IRQ_PLB_INVALID_CMD | \
+ LIMA_GP_IRQ_RESET_COMPLETED | \
+ LIMA_GP_IRQ_SEMAPHORE_UNDERFLOW | \
+ LIMA_GP_IRQ_SEMAPHORE_OVERFLOW | \
+ LIMA_GP_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+
+#define LIMA_GP_IRQ_MASK_ERROR \
+ ( \
+ LIMA_GP_IRQ_PLBU_OUT_OF_MEM | \
+ LIMA_GP_IRQ_FORCE_HANG | \
+ LIMA_GP_IRQ_WRITE_BOUND_ERR | \
+ LIMA_GP_IRQ_SYNC_ERROR | \
+ LIMA_GP_IRQ_AXI_BUS_ERROR | \
+ LIMA_GP_IRQ_VS_INVALID_CMD | \
+ LIMA_GP_IRQ_PLB_INVALID_CMD | \
+ LIMA_GP_IRQ_SEMAPHORE_UNDERFLOW | \
+ LIMA_GP_IRQ_SEMAPHORE_OVERFLOW | \
+ LIMA_GP_IRQ_PTR_ARRAY_OUT_OF_BOUNDS)
+
+#define LIMA_GP_IRQ_MASK_USED \
+ ( \
+ LIMA_GP_IRQ_VS_END_CMD_LST | \
+ LIMA_GP_IRQ_PLBU_END_CMD_LST | \
+ LIMA_GP_IRQ_MASK_ERROR)
+
+/* PP regs */
+#define LIMA_PP_FRAME 0x0000
+#define LIMA_PP_RSW 0x0004
+#define LIMA_PP_STACK 0x0030
+#define LIMA_PP_STACK_SIZE 0x0034
+#define LIMA_PP_ORIGIN_OFFSET_X 0x0040
+#define LIMA_PP_WB(i) (0x0100 * (i + 1))
+#define LIMA_PP_WB_SOURCE_SELECT 0x0000
+#define LIMA_PP_WB_SOURCE_ADDR 0x0004
+
+#define LIMA_PP_VERSION 0x1000
+#define LIMA_PP_CURRENT_REND_LIST_ADDR 0x1004
+#define LIMA_PP_STATUS 0x1008
+#define LIMA_PP_STATUS_RENDERING_ACTIVE BIT(0)
+#define LIMA_PP_STATUS_BUS_STOPPED BIT(4)
+#define LIMA_PP_CTRL 0x100c
+#define LIMA_PP_CTRL_STOP_BUS BIT(0)
+#define LIMA_PP_CTRL_FLUSH_CACHES BIT(3)
+#define LIMA_PP_CTRL_FORCE_RESET BIT(5)
+#define LIMA_PP_CTRL_START_RENDERING BIT(6)
+#define LIMA_PP_CTRL_SOFT_RESET BIT(7)
+#define LIMA_PP_INT_RAWSTAT 0x1020
+#define LIMA_PP_INT_CLEAR 0x1024
+#define LIMA_PP_INT_MASK 0x1028
+#define LIMA_PP_INT_STATUS 0x102c
+#define LIMA_PP_IRQ_END_OF_FRAME BIT(0)
+#define LIMA_PP_IRQ_END_OF_TILE BIT(1)
+#define LIMA_PP_IRQ_HANG BIT(2)
+#define LIMA_PP_IRQ_FORCE_HANG BIT(3)
+#define LIMA_PP_IRQ_BUS_ERROR BIT(4)
+#define LIMA_PP_IRQ_BUS_STOP BIT(5)
+#define LIMA_PP_IRQ_CNT_0_LIMIT BIT(6)
+#define LIMA_PP_IRQ_CNT_1_LIMIT BIT(7)
+#define LIMA_PP_IRQ_WRITE_BOUNDARY_ERROR BIT(8)
+#define LIMA_PP_IRQ_INVALID_PLIST_COMMAND BIT(9)
+#define LIMA_PP_IRQ_CALL_STACK_UNDERFLOW BIT(10)
+#define LIMA_PP_IRQ_CALL_STACK_OVERFLOW BIT(11)
+#define LIMA_PP_IRQ_RESET_COMPLETED BIT(12)
+#define LIMA_PP_WRITE_BOUNDARY_LOW 0x1044
+#define LIMA_PP_BUS_ERROR_STATUS 0x1050
+#define LIMA_PP_PERF_CNT_0_ENABLE 0x1080
+#define LIMA_PP_PERF_CNT_0_SRC 0x1084
+#define LIMA_PP_PERF_CNT_0_LIMIT 0x1088
+#define LIMA_PP_PERF_CNT_0_VALUE 0x108c
+#define LIMA_PP_PERF_CNT_1_ENABLE 0x10a0
+#define LIMA_PP_PERF_CNT_1_SRC 0x10a4
+#define LIMA_PP_PERF_CNT_1_LIMIT 0x10a8
+#define LIMA_PP_PERF_CNT_1_VALUE 0x10ac
+#define LIMA_PP_PERFMON_CONTR 0x10b0
+#define LIMA_PP_PERFMON_BASE 0x10b4
+
+#define LIMA_PP_IRQ_MASK_ALL \
+ ( \
+ LIMA_PP_IRQ_END_OF_FRAME | \
+ LIMA_PP_IRQ_END_OF_TILE | \
+ LIMA_PP_IRQ_HANG | \
+ LIMA_PP_IRQ_FORCE_HANG | \
+ LIMA_PP_IRQ_BUS_ERROR | \
+ LIMA_PP_IRQ_BUS_STOP | \
+ LIMA_PP_IRQ_CNT_0_LIMIT | \
+ LIMA_PP_IRQ_CNT_1_LIMIT | \
+ LIMA_PP_IRQ_WRITE_BOUNDARY_ERROR | \
+ LIMA_PP_IRQ_INVALID_PLIST_COMMAND | \
+ LIMA_PP_IRQ_CALL_STACK_UNDERFLOW | \
+ LIMA_PP_IRQ_CALL_STACK_OVERFLOW | \
+ LIMA_PP_IRQ_RESET_COMPLETED)
+
+#define LIMA_PP_IRQ_MASK_ERROR \
+ ( \
+ LIMA_PP_IRQ_FORCE_HANG | \
+ LIMA_PP_IRQ_BUS_ERROR | \
+ LIMA_PP_IRQ_WRITE_BOUNDARY_ERROR | \
+ LIMA_PP_IRQ_INVALID_PLIST_COMMAND | \
+ LIMA_PP_IRQ_CALL_STACK_UNDERFLOW | \
+ LIMA_PP_IRQ_CALL_STACK_OVERFLOW)
+
+#define LIMA_PP_IRQ_MASK_USED \
+ ( \
+ LIMA_PP_IRQ_END_OF_FRAME | \
+ LIMA_PP_IRQ_MASK_ERROR)
+
+/* MMU regs */
+#define LIMA_MMU_DTE_ADDR 0x0000
+#define LIMA_MMU_STATUS 0x0004
+#define LIMA_MMU_STATUS_PAGING_ENABLED BIT(0)
+#define LIMA_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1)
+#define LIMA_MMU_STATUS_STALL_ACTIVE BIT(2)
+#define LIMA_MMU_STATUS_IDLE BIT(3)
+#define LIMA_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4)
+#define LIMA_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5)
+#define LIMA_MMU_STATUS_BUS_ID(x) ((x >> 6) & 0x1F)
+#define LIMA_MMU_COMMAND 0x0008
+#define LIMA_MMU_COMMAND_ENABLE_PAGING 0x00
+#define LIMA_MMU_COMMAND_DISABLE_PAGING 0x01
+#define LIMA_MMU_COMMAND_ENABLE_STALL 0x02
+#define LIMA_MMU_COMMAND_DISABLE_STALL 0x03
+#define LIMA_MMU_COMMAND_ZAP_CACHE 0x04
+#define LIMA_MMU_COMMAND_PAGE_FAULT_DONE 0x05
+#define LIMA_MMU_COMMAND_HARD_RESET 0x06
+#define LIMA_MMU_PAGE_FAULT_ADDR 0x000C
+#define LIMA_MMU_ZAP_ONE_LINE 0x0010
+#define LIMA_MMU_INT_RAWSTAT 0x0014
+#define LIMA_MMU_INT_CLEAR 0x0018
+#define LIMA_MMU_INT_MASK 0x001C
+#define LIMA_MMU_INT_PAGE_FAULT BIT(0)
+#define LIMA_MMU_INT_READ_BUS_ERROR BIT(1)
+#define LIMA_MMU_INT_STATUS 0x0020
+
+#define LIMA_VM_FLAG_PRESENT BIT(0)
+#define LIMA_VM_FLAG_READ_PERMISSION BIT(1)
+#define LIMA_VM_FLAG_WRITE_PERMISSION BIT(2)
+#define LIMA_VM_FLAG_OVERRIDE_CACHE BIT(3)
+#define LIMA_VM_FLAG_WRITE_CACHEABLE BIT(4)
+#define LIMA_VM_FLAG_WRITE_ALLOCATE BIT(5)
+#define LIMA_VM_FLAG_WRITE_BUFFERABLE BIT(6)
+#define LIMA_VM_FLAG_READ_CACHEABLE BIT(7)
+#define LIMA_VM_FLAG_READ_ALLOCATE BIT(8)
+#define LIMA_VM_FLAG_MASK 0x1FF
+
+#define LIMA_VM_FLAGS_CACHE ( \
+ LIMA_VM_FLAG_PRESENT | \
+ LIMA_VM_FLAG_READ_PERMISSION | \
+ LIMA_VM_FLAG_WRITE_PERMISSION | \
+ LIMA_VM_FLAG_OVERRIDE_CACHE | \
+ LIMA_VM_FLAG_WRITE_CACHEABLE | \
+ LIMA_VM_FLAG_WRITE_BUFFERABLE | \
+ LIMA_VM_FLAG_READ_CACHEABLE | \
+ LIMA_VM_FLAG_READ_ALLOCATE)
+
+#define LIMA_VM_FLAGS_UNCACHE ( \
+ LIMA_VM_FLAG_PRESENT | \
+ LIMA_VM_FLAG_READ_PERMISSION | \
+ LIMA_VM_FLAG_WRITE_PERMISSION)
+
+/* DLBU regs */
+#define LIMA_DLBU_MASTER_TLLIST_PHYS_ADDR 0x0000
+#define LIMA_DLBU_MASTER_TLLIST_VADDR 0x0004
+#define LIMA_DLBU_TLLIST_VBASEADDR 0x0008
+#define LIMA_DLBU_FB_DIM 0x000C
+#define LIMA_DLBU_TLLIST_CONF 0x0010
+#define LIMA_DLBU_START_TILE_POS 0x0014
+#define LIMA_DLBU_PP_ENABLE_MASK 0x0018
+
+/* BCAST regs */
+#define LIMA_BCAST_BROADCAST_MASK 0x0
+#define LIMA_BCAST_INTERRUPT_MASK 0x4
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
new file mode 100644
index 000000000000..d53bd45f8d96
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/xarray.h>
+
+#include "lima_drv.h"
+#include "lima_sched.h"
+#include "lima_vm.h"
+#include "lima_mmu.h"
+#include "lima_l2_cache.h"
+#include "lima_object.h"
+
+struct lima_fence {
+ struct dma_fence base;
+ struct lima_sched_pipe *pipe;
+};
+
+static struct kmem_cache *lima_fence_slab;
+static int lima_fence_slab_refcnt;
+
+int lima_sched_slab_init(void)
+{
+ if (!lima_fence_slab) {
+ lima_fence_slab = kmem_cache_create(
+ "lima_fence", sizeof(struct lima_fence), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!lima_fence_slab)
+ return -ENOMEM;
+ }
+
+ lima_fence_slab_refcnt++;
+ return 0;
+}
+
+void lima_sched_slab_fini(void)
+{
+ if (!--lima_fence_slab_refcnt) {
+ kmem_cache_destroy(lima_fence_slab);
+ lima_fence_slab = NULL;
+ }
+}
+
+static inline struct lima_fence *to_lima_fence(struct dma_fence *fence)
+{
+ return container_of(fence, struct lima_fence, base);
+}
+
+static const char *lima_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "lima";
+}
+
+static const char *lima_fence_get_timeline_name(struct dma_fence *fence)
+{
+ struct lima_fence *f = to_lima_fence(fence);
+
+ return f->pipe->base.name;
+}
+
+static void lima_fence_release_rcu(struct rcu_head *rcu)
+{
+ struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
+ struct lima_fence *fence = to_lima_fence(f);
+
+ kmem_cache_free(lima_fence_slab, fence);
+}
+
+static void lima_fence_release(struct dma_fence *fence)
+{
+ struct lima_fence *f = to_lima_fence(fence);
+
+ call_rcu(&f->base.rcu, lima_fence_release_rcu);
+}
+
+static const struct dma_fence_ops lima_fence_ops = {
+ .get_driver_name = lima_fence_get_driver_name,
+ .get_timeline_name = lima_fence_get_timeline_name,
+ .release = lima_fence_release,
+};
+
+static struct lima_fence *lima_fence_create(struct lima_sched_pipe *pipe)
+{
+ struct lima_fence *fence;
+
+ fence = kmem_cache_zalloc(lima_fence_slab, GFP_KERNEL);
+ if (!fence)
+ return NULL;
+
+ fence->pipe = pipe;
+ dma_fence_init(&fence->base, &lima_fence_ops, &pipe->fence_lock,
+ pipe->fence_context, ++pipe->fence_seqno);
+
+ return fence;
+}
+
+static inline struct lima_sched_task *to_lima_task(struct drm_sched_job *job)
+{
+ return container_of(job, struct lima_sched_task, base);
+}
+
+static inline struct lima_sched_pipe *to_lima_pipe(struct drm_gpu_scheduler *sched)
+{
+ return container_of(sched, struct lima_sched_pipe, base);
+}
+
+int lima_sched_task_init(struct lima_sched_task *task,
+ struct lima_sched_context *context,
+ struct lima_bo **bos, int num_bos,
+ struct lima_vm *vm)
+{
+ int err, i;
+
+ task->bos = kmemdup(bos, sizeof(*bos) * num_bos, GFP_KERNEL);
+ if (!task->bos)
+ return -ENOMEM;
+
+ for (i = 0; i < num_bos; i++)
+ drm_gem_object_get(&bos[i]->gem);
+
+ err = drm_sched_job_init(&task->base, &context->base, vm);
+ if (err) {
+ kfree(task->bos);
+ return err;
+ }
+
+ task->num_bos = num_bos;
+ task->vm = lima_vm_get(vm);
+
+ xa_init_flags(&task->deps, XA_FLAGS_ALLOC);
+
+ return 0;
+}
+
+void lima_sched_task_fini(struct lima_sched_task *task)
+{
+ struct dma_fence *fence;
+ unsigned long index;
+ int i;
+
+ drm_sched_job_cleanup(&task->base);
+
+ xa_for_each(&task->deps, index, fence) {
+ dma_fence_put(fence);
+ }
+ xa_destroy(&task->deps);
+
+ if (task->bos) {
+ for (i = 0; i < task->num_bos; i++)
+ drm_gem_object_put_unlocked(&task->bos[i]->gem);
+ kfree(task->bos);
+ }
+
+ lima_vm_put(task->vm);
+}
+
+int lima_sched_context_init(struct lima_sched_pipe *pipe,
+ struct lima_sched_context *context,
+ atomic_t *guilty)
+{
+ struct drm_sched_rq *rq = pipe->base.sched_rq + DRM_SCHED_PRIORITY_NORMAL;
+
+ return drm_sched_entity_init(&context->base, &rq, 1, guilty);
+}
+
+void lima_sched_context_fini(struct lima_sched_pipe *pipe,
+ struct lima_sched_context *context)
+{
+ drm_sched_entity_fini(&context->base);
+}
+
+struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *context,
+ struct lima_sched_task *task)
+{
+ struct dma_fence *fence = dma_fence_get(&task->base.s_fence->finished);
+
+ drm_sched_entity_push_job(&task->base, &context->base);
+ return fence;
+}
+
+static struct dma_fence *lima_sched_dependency(struct drm_sched_job *job,
+ struct drm_sched_entity *entity)
+{
+ struct lima_sched_task *task = to_lima_task(job);
+
+ if (!xa_empty(&task->deps))
+ return xa_erase(&task->deps, task->last_dep++);
+
+ return NULL;
+}
+
+static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
+{
+ struct lima_sched_task *task = to_lima_task(job);
+ struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
+ struct lima_fence *fence;
+ struct dma_fence *ret;
+ struct lima_vm *vm = NULL, *last_vm = NULL;
+ int i;
+
+ /* after GPU reset */
+ if (job->s_fence->finished.error < 0)
+ return NULL;
+
+ fence = lima_fence_create(pipe);
+ if (!fence)
+ return NULL;
+ task->fence = &fence->base;
+
+ /* for caller usage of the fence, otherwise irq handler
+ * may consume the fence before caller use it
+ */
+ ret = dma_fence_get(task->fence);
+
+ pipe->current_task = task;
+
+ /* this is needed for MMU to work correctly, otherwise GP/PP
+ * will hang or page fault for unknown reason after running for
+ * a while.
+ *
+ * Need to investigate:
+ * 1. is it related to TLB
+ * 2. how much performance will be affected by L2 cache flush
+ * 3. can we reduce the calling of this function because all
+ * GP/PP use the same L2 cache on mali400
+ *
+ * TODO:
+ * 1. move this to task fini to save some wait time?
+ * 2. when GP/PP use different l2 cache, need PP wait GP l2
+ * cache flush?
+ */
+ for (i = 0; i < pipe->num_l2_cache; i++)
+ lima_l2_cache_flush(pipe->l2_cache[i]);
+
+ if (task->vm != pipe->current_vm) {
+ vm = lima_vm_get(task->vm);
+ last_vm = pipe->current_vm;
+ pipe->current_vm = task->vm;
+ }
+
+ if (pipe->bcast_mmu)
+ lima_mmu_switch_vm(pipe->bcast_mmu, vm);
+ else {
+ for (i = 0; i < pipe->num_mmu; i++)
+ lima_mmu_switch_vm(pipe->mmu[i], vm);
+ }
+
+ if (last_vm)
+ lima_vm_put(last_vm);
+
+ pipe->error = false;
+ pipe->task_run(pipe, task);
+
+ return task->fence;
+}
+
+static void lima_sched_handle_error_task(struct lima_sched_pipe *pipe,
+ struct lima_sched_task *task)
+{
+ drm_sched_stop(&pipe->base);
+
+ if (task)
+ drm_sched_increase_karma(&task->base);
+
+ pipe->task_error(pipe);
+
+ if (pipe->bcast_mmu)
+ lima_mmu_page_fault_resume(pipe->bcast_mmu);
+ else {
+ int i;
+
+ for (i = 0; i < pipe->num_mmu; i++)
+ lima_mmu_page_fault_resume(pipe->mmu[i]);
+ }
+
+ if (pipe->current_vm)
+ lima_vm_put(pipe->current_vm);
+
+ pipe->current_vm = NULL;
+ pipe->current_task = NULL;
+
+ drm_sched_resubmit_jobs(&pipe->base);
+ drm_sched_start(&pipe->base, true);
+}
+
+static void lima_sched_timedout_job(struct drm_sched_job *job)
+{
+ struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
+ struct lima_sched_task *task = to_lima_task(job);
+
+ DRM_ERROR("lima job timeout\n");
+
+ lima_sched_handle_error_task(pipe, task);
+}
+
+static void lima_sched_free_job(struct drm_sched_job *job)
+{
+ struct lima_sched_task *task = to_lima_task(job);
+ struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
+ struct lima_vm *vm = task->vm;
+ struct lima_bo **bos = task->bos;
+ int i;
+
+ dma_fence_put(task->fence);
+
+ for (i = 0; i < task->num_bos; i++)
+ lima_vm_bo_del(vm, bos[i]);
+
+ lima_sched_task_fini(task);
+ kmem_cache_free(pipe->task_slab, task);
+}
+
+static const struct drm_sched_backend_ops lima_sched_ops = {
+ .dependency = lima_sched_dependency,
+ .run_job = lima_sched_run_job,
+ .timedout_job = lima_sched_timedout_job,
+ .free_job = lima_sched_free_job,
+};
+
+static void lima_sched_error_work(struct work_struct *work)
+{
+ struct lima_sched_pipe *pipe =
+ container_of(work, struct lima_sched_pipe, error_work);
+ struct lima_sched_task *task = pipe->current_task;
+
+ lima_sched_handle_error_task(pipe, task);
+}
+
+int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
+{
+ long timeout;
+
+ if (lima_sched_timeout_ms <= 0)
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ else
+ timeout = msecs_to_jiffies(lima_sched_timeout_ms);
+
+ pipe->fence_context = dma_fence_context_alloc(1);
+ spin_lock_init(&pipe->fence_lock);
+
+ INIT_WORK(&pipe->error_work, lima_sched_error_work);
+
+ return drm_sched_init(&pipe->base, &lima_sched_ops, 1, 0, timeout, name);
+}
+
+void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
+{
+ drm_sched_fini(&pipe->base);
+}
+
+void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
+{
+ if (pipe->error)
+ schedule_work(&pipe->error_work);
+ else {
+ struct lima_sched_task *task = pipe->current_task;
+
+ pipe->task_fini(pipe);
+ dma_fence_signal(task->fence);
+ }
+}
diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h
new file mode 100644
index 000000000000..928af91c1118
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_sched.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_SCHED_H__
+#define __LIMA_SCHED_H__
+
+#include <drm/gpu_scheduler.h>
+
+struct lima_vm;
+
+struct lima_sched_task {
+ struct drm_sched_job base;
+
+ struct lima_vm *vm;
+ void *frame;
+
+ struct xarray deps;
+ unsigned long last_dep;
+
+ struct lima_bo **bos;
+ int num_bos;
+
+ /* pipe fence */
+ struct dma_fence *fence;
+};
+
+struct lima_sched_context {
+ struct drm_sched_entity base;
+};
+
+#define LIMA_SCHED_PIPE_MAX_MMU 8
+#define LIMA_SCHED_PIPE_MAX_L2_CACHE 2
+#define LIMA_SCHED_PIPE_MAX_PROCESSOR 8
+
+struct lima_ip;
+
+struct lima_sched_pipe {
+ struct drm_gpu_scheduler base;
+
+ u64 fence_context;
+ u32 fence_seqno;
+ spinlock_t fence_lock;
+
+ struct lima_sched_task *current_task;
+ struct lima_vm *current_vm;
+
+ struct lima_ip *mmu[LIMA_SCHED_PIPE_MAX_MMU];
+ int num_mmu;
+
+ struct lima_ip *l2_cache[LIMA_SCHED_PIPE_MAX_L2_CACHE];
+ int num_l2_cache;
+
+ struct lima_ip *processor[LIMA_SCHED_PIPE_MAX_PROCESSOR];
+ int num_processor;
+
+ struct lima_ip *bcast_processor;
+ struct lima_ip *bcast_mmu;
+
+ u32 done;
+ bool error;
+ atomic_t task;
+
+ int frame_size;
+ struct kmem_cache *task_slab;
+
+ int (*task_validate)(struct lima_sched_pipe *pipe, struct lima_sched_task *task);
+ void (*task_run)(struct lima_sched_pipe *pipe, struct lima_sched_task *task);
+ void (*task_fini)(struct lima_sched_pipe *pipe);
+ void (*task_error)(struct lima_sched_pipe *pipe);
+ void (*task_mmu_error)(struct lima_sched_pipe *pipe);
+
+ struct work_struct error_work;
+};
+
+int lima_sched_task_init(struct lima_sched_task *task,
+ struct lima_sched_context *context,
+ struct lima_bo **bos, int num_bos,
+ struct lima_vm *vm);
+void lima_sched_task_fini(struct lima_sched_task *task);
+
+int lima_sched_context_init(struct lima_sched_pipe *pipe,
+ struct lima_sched_context *context,
+ atomic_t *guilty);
+void lima_sched_context_fini(struct lima_sched_pipe *pipe,
+ struct lima_sched_context *context);
+struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *context,
+ struct lima_sched_task *task);
+
+int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name);
+void lima_sched_pipe_fini(struct lima_sched_pipe *pipe);
+void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe);
+
+static inline void lima_sched_pipe_mmu_error(struct lima_sched_pipe *pipe)
+{
+ pipe->error = true;
+ pipe->task_mmu_error(pipe);
+}
+
+int lima_sched_slab_init(void);
+void lima_sched_slab_fini(void);
+
+#endif
diff --git a/drivers/gpu/drm/lima/lima_vm.c b/drivers/gpu/drm/lima/lima_vm.c
new file mode 100644
index 000000000000..19e88ca16527
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_vm.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+
+#include "lima_device.h"
+#include "lima_vm.h"
+#include "lima_object.h"
+#include "lima_regs.h"
+
+struct lima_bo_va {
+ struct list_head list;
+ unsigned int ref_count;
+
+ struct drm_mm_node node;
+
+ struct lima_vm *vm;
+};
+
+#define LIMA_VM_PD_SHIFT 22
+#define LIMA_VM_PT_SHIFT 12
+#define LIMA_VM_PB_SHIFT (LIMA_VM_PD_SHIFT + LIMA_VM_NUM_PT_PER_BT_SHIFT)
+#define LIMA_VM_BT_SHIFT LIMA_VM_PT_SHIFT
+
+#define LIMA_VM_PT_MASK ((1 << LIMA_VM_PD_SHIFT) - 1)
+#define LIMA_VM_BT_MASK ((1 << LIMA_VM_PB_SHIFT) - 1)
+
+#define LIMA_PDE(va) (va >> LIMA_VM_PD_SHIFT)
+#define LIMA_PTE(va) ((va & LIMA_VM_PT_MASK) >> LIMA_VM_PT_SHIFT)
+#define LIMA_PBE(va) (va >> LIMA_VM_PB_SHIFT)
+#define LIMA_BTE(va) ((va & LIMA_VM_BT_MASK) >> LIMA_VM_BT_SHIFT)
+
+
+static void lima_vm_unmap_page_table(struct lima_vm *vm, u32 start, u32 end)
+{
+ u32 addr;
+
+ for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) {
+ u32 pbe = LIMA_PBE(addr);
+ u32 bte = LIMA_BTE(addr);
+
+ vm->bts[pbe].cpu[bte] = 0;
+ }
+}
+
+static int lima_vm_map_page_table(struct lima_vm *vm, dma_addr_t *dma,
+ u32 start, u32 end)
+{
+ u64 addr;
+ int i = 0;
+
+ for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) {
+ u32 pbe = LIMA_PBE(addr);
+ u32 bte = LIMA_BTE(addr);
+
+ if (!vm->bts[pbe].cpu) {
+ dma_addr_t pts;
+ u32 *pd;
+ int j;
+
+ vm->bts[pbe].cpu = dma_alloc_wc(
+ vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
+ &vm->bts[pbe].dma, GFP_KERNEL | __GFP_ZERO);
+ if (!vm->bts[pbe].cpu) {
+ if (addr != start)
+ lima_vm_unmap_page_table(vm, start, addr - 1);
+ return -ENOMEM;
+ }
+
+ pts = vm->bts[pbe].dma;
+ pd = vm->pd.cpu + (pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT);
+ for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) {
+ pd[j] = pts | LIMA_VM_FLAG_PRESENT;
+ pts += LIMA_PAGE_SIZE;
+ }
+ }
+
+ vm->bts[pbe].cpu[bte] = dma[i++] | LIMA_VM_FLAGS_CACHE;
+ }
+
+ return 0;
+}
+
+static struct lima_bo_va *
+lima_vm_bo_find(struct lima_vm *vm, struct lima_bo *bo)
+{
+ struct lima_bo_va *bo_va, *ret = NULL;
+
+ list_for_each_entry(bo_va, &bo->va, list) {
+ if (bo_va->vm == vm) {
+ ret = bo_va;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create)
+{
+ struct lima_bo_va *bo_va;
+ int err;
+
+ mutex_lock(&bo->lock);
+
+ bo_va = lima_vm_bo_find(vm, bo);
+ if (bo_va) {
+ bo_va->ref_count++;
+ mutex_unlock(&bo->lock);
+ return 0;
+ }
+
+ /* should not create new bo_va if not asked by caller */
+ if (!create) {
+ mutex_unlock(&bo->lock);
+ return -ENOENT;
+ }
+
+ bo_va = kzalloc(sizeof(*bo_va), GFP_KERNEL);
+ if (!bo_va) {
+ err = -ENOMEM;
+ goto err_out0;
+ }
+
+ bo_va->vm = vm;
+ bo_va->ref_count = 1;
+
+ mutex_lock(&vm->lock);
+
+ err = drm_mm_insert_node(&vm->mm, &bo_va->node, bo->gem.size);
+ if (err)
+ goto err_out1;
+
+ err = lima_vm_map_page_table(vm, bo->pages_dma_addr, bo_va->node.start,
+ bo_va->node.start + bo_va->node.size - 1);
+ if (err)
+ goto err_out2;
+
+ mutex_unlock(&vm->lock);
+
+ list_add_tail(&bo_va->list, &bo->va);
+
+ mutex_unlock(&bo->lock);
+ return 0;
+
+err_out2:
+ drm_mm_remove_node(&bo_va->node);
+err_out1:
+ mutex_unlock(&vm->lock);
+ kfree(bo_va);
+err_out0:
+ mutex_unlock(&bo->lock);
+ return err;
+}
+
+void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo)
+{
+ struct lima_bo_va *bo_va;
+
+ mutex_lock(&bo->lock);
+
+ bo_va = lima_vm_bo_find(vm, bo);
+ if (--bo_va->ref_count > 0) {
+ mutex_unlock(&bo->lock);
+ return;
+ }
+
+ mutex_lock(&vm->lock);
+
+ lima_vm_unmap_page_table(vm, bo_va->node.start,
+ bo_va->node.start + bo_va->node.size - 1);
+
+ drm_mm_remove_node(&bo_va->node);
+
+ mutex_unlock(&vm->lock);
+
+ list_del(&bo_va->list);
+
+ mutex_unlock(&bo->lock);
+
+ kfree(bo_va);
+}
+
+u32 lima_vm_get_va(struct lima_vm *vm, struct lima_bo *bo)
+{
+ struct lima_bo_va *bo_va;
+ u32 ret;
+
+ mutex_lock(&bo->lock);
+
+ bo_va = lima_vm_bo_find(vm, bo);
+ ret = bo_va->node.start;
+
+ mutex_unlock(&bo->lock);
+
+ return ret;
+}
+
+struct lima_vm *lima_vm_create(struct lima_device *dev)
+{
+ struct lima_vm *vm;
+
+ vm = kzalloc(sizeof(*vm), GFP_KERNEL);
+ if (!vm)
+ return NULL;
+
+ vm->dev = dev;
+ mutex_init(&vm->lock);
+ kref_init(&vm->refcount);
+
+ vm->pd.cpu = dma_alloc_wc(dev->dev, LIMA_PAGE_SIZE, &vm->pd.dma,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!vm->pd.cpu)
+ goto err_out0;
+
+ if (dev->dlbu_cpu) {
+ int err = lima_vm_map_page_table(
+ vm, &dev->dlbu_dma, LIMA_VA_RESERVE_DLBU,
+ LIMA_VA_RESERVE_DLBU + LIMA_PAGE_SIZE - 1);
+ if (err)
+ goto err_out1;
+ }
+
+ drm_mm_init(&vm->mm, dev->va_start, dev->va_end - dev->va_start);
+
+ return vm;
+
+err_out1:
+ dma_free_wc(dev->dev, LIMA_PAGE_SIZE, vm->pd.cpu, vm->pd.dma);
+err_out0:
+ kfree(vm);
+ return NULL;
+}
+
+void lima_vm_release(struct kref *kref)
+{
+ struct lima_vm *vm = container_of(kref, struct lima_vm, refcount);
+ int i;
+
+ drm_mm_takedown(&vm->mm);
+
+ for (i = 0; i < LIMA_VM_NUM_BT; i++) {
+ if (vm->bts[i].cpu)
+ dma_free_wc(vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
+ vm->bts[i].cpu, vm->bts[i].dma);
+ }
+
+ if (vm->pd.cpu)
+ dma_free_wc(vm->dev->dev, LIMA_PAGE_SIZE, vm->pd.cpu, vm->pd.dma);
+
+ kfree(vm);
+}
+
+void lima_vm_print(struct lima_vm *vm)
+{
+ int i, j, k;
+ u32 *pd, *pt;
+
+ if (!vm->pd.cpu)
+ return;
+
+ pd = vm->pd.cpu;
+ for (i = 0; i < LIMA_VM_NUM_BT; i++) {
+ if (!vm->bts[i].cpu)
+ continue;
+
+ pt = vm->bts[i].cpu;
+ for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) {
+ int idx = (i << LIMA_VM_NUM_PT_PER_BT_SHIFT) + j;
+
+ printk(KERN_INFO "lima vm pd %03x:%08x\n", idx, pd[idx]);
+
+ for (k = 0; k < LIMA_PAGE_ENT_NUM; k++) {
+ u32 pte = *pt++;
+
+ if (pte)
+ printk(KERN_INFO " pt %03x:%08x\n", k, pte);
+ }
+ }
+ }
+}
diff --git a/drivers/gpu/drm/lima/lima_vm.h b/drivers/gpu/drm/lima/lima_vm.h
new file mode 100644
index 000000000000..caee2f8a29b4
--- /dev/null
+++ b/drivers/gpu/drm/lima/lima_vm.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
+
+#ifndef __LIMA_VM_H__
+#define __LIMA_VM_H__
+
+#include <drm/drm_mm.h>
+#include <linux/kref.h>
+
+#define LIMA_PAGE_SIZE 4096
+#define LIMA_PAGE_MASK (LIMA_PAGE_SIZE - 1)
+#define LIMA_PAGE_ENT_NUM (LIMA_PAGE_SIZE / sizeof(u32))
+
+#define LIMA_VM_NUM_PT_PER_BT_SHIFT 3
+#define LIMA_VM_NUM_PT_PER_BT (1 << LIMA_VM_NUM_PT_PER_BT_SHIFT)
+#define LIMA_VM_NUM_BT (LIMA_PAGE_ENT_NUM >> LIMA_VM_NUM_PT_PER_BT_SHIFT)
+
+#define LIMA_VA_RESERVE_START 0xFFF00000
+#define LIMA_VA_RESERVE_DLBU LIMA_VA_RESERVE_START
+#define LIMA_VA_RESERVE_END 0x100000000
+
+struct lima_device;
+
+struct lima_vm_page {
+ u32 *cpu;
+ dma_addr_t dma;
+};
+
+struct lima_vm {
+ struct mutex lock;
+ struct kref refcount;
+
+ struct drm_mm mm;
+
+ struct lima_device *dev;
+
+ struct lima_vm_page pd;
+ struct lima_vm_page bts[LIMA_VM_NUM_BT];
+};
+
+int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create);
+void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo);
+
+u32 lima_vm_get_va(struct lima_vm *vm, struct lima_bo *bo);
+
+struct lima_vm *lima_vm_create(struct lima_device *dev);
+void lima_vm_release(struct kref *kref);
+
+static inline struct lima_vm *lima_vm_get(struct lima_vm *vm)
+{
+ kref_get(&vm->refcount);
+ return vm;
+}
+
+static inline void lima_vm_put(struct lima_vm *vm)
+{
+ kref_put(&vm->refcount, lima_vm_release);
+}
+
+void lima_vm_print(struct lima_vm *vm);
+
+#endif
diff --git a/drivers/gpu/drm/meson/Makefile b/drivers/gpu/drm/meson/Makefile
index 7709f2fbb9f7..d4ea82fc493b 100644
--- a/drivers/gpu/drm/meson/Makefile
+++ b/drivers/gpu/drm/meson/Makefile
@@ -1,5 +1,5 @@
meson-drm-y := meson_drv.o meson_plane.o meson_crtc.o meson_venc_cvbs.o
-meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_canvas.o meson_overlay.o
+meson-drm-y += meson_viu.o meson_vpp.o meson_venc.o meson_vclk.o meson_overlay.o
obj-$(CONFIG_DRM_MESON) += meson-drm.o
obj-$(CONFIG_DRM_MESON_DW_HDMI) += meson_dw_hdmi.o
diff --git a/drivers/gpu/drm/meson/meson_canvas.c b/drivers/gpu/drm/meson/meson_canvas.c
deleted file mode 100644
index 5de11aa7c775..000000000000
--- a/drivers/gpu/drm/meson/meson_canvas.c
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (C) 2016 BayLibre, SAS
- * Author: Neil Armstrong <narmstrong@baylibre.com>
- * Copyright (C) 2015 Amlogic, Inc. All rights reserved.
- * Copyright (C) 2014 Endless Mobile
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include "meson_drv.h"
-#include "meson_canvas.h"
-#include "meson_registers.h"
-
-/**
- * DOC: Canvas
- *
- * CANVAS is a memory zone where physical memory frames information
- * are stored for the VIU to scanout.
- */
-
-/* DMC Registers */
-#define DMC_CAV_LUT_DATAL 0x48 /* 0x12 offset in data sheet */
-#define CANVAS_WIDTH_LBIT 29
-#define CANVAS_WIDTH_LWID 3
-#define DMC_CAV_LUT_DATAH 0x4c /* 0x13 offset in data sheet */
-#define CANVAS_WIDTH_HBIT 0
-#define CANVAS_HEIGHT_BIT 9
-#define CANVAS_BLKMODE_BIT 24
-#define CANVAS_ENDIAN_BIT 26
-#define DMC_CAV_LUT_ADDR 0x50 /* 0x14 offset in data sheet */
-#define CANVAS_LUT_WR_EN (0x2 << 8)
-#define CANVAS_LUT_RD_EN (0x1 << 8)
-
-void meson_canvas_setup(struct meson_drm *priv,
- uint32_t canvas_index, uint32_t addr,
- uint32_t stride, uint32_t height,
- unsigned int wrap,
- unsigned int blkmode,
- unsigned int endian)
-{
- unsigned int val;
-
- regmap_write(priv->dmc, DMC_CAV_LUT_DATAL,
- (((addr + 7) >> 3)) |
- (((stride + 7) >> 3) << CANVAS_WIDTH_LBIT));
-
- regmap_write(priv->dmc, DMC_CAV_LUT_DATAH,
- ((((stride + 7) >> 3) >> CANVAS_WIDTH_LWID) <<
- CANVAS_WIDTH_HBIT) |
- (height << CANVAS_HEIGHT_BIT) |
- (wrap << 22) |
- (blkmode << CANVAS_BLKMODE_BIT) |
- (endian << CANVAS_ENDIAN_BIT));
-
- regmap_write(priv->dmc, DMC_CAV_LUT_ADDR,
- CANVAS_LUT_WR_EN | canvas_index);
-
- /* Force a read-back to make sure everything is flushed. */
- regmap_read(priv->dmc, DMC_CAV_LUT_DATAH, &val);
-}
diff --git a/drivers/gpu/drm/meson/meson_canvas.h b/drivers/gpu/drm/meson/meson_canvas.h
deleted file mode 100644
index 85dbf26e2826..000000000000
--- a/drivers/gpu/drm/meson/meson_canvas.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (C) 2016 BayLibre, SAS
- * Author: Neil Armstrong <narmstrong@baylibre.com>
- * Copyright (C) 2014 Endless Mobile
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/* Canvas LUT Memory */
-
-#ifndef __MESON_CANVAS_H
-#define __MESON_CANVAS_H
-
-#define MESON_CANVAS_ID_OSD1 0x4e
-#define MESON_CANVAS_ID_VD1_0 0x60
-#define MESON_CANVAS_ID_VD1_1 0x61
-#define MESON_CANVAS_ID_VD1_2 0x62
-
-/* Canvas configuration. */
-#define MESON_CANVAS_WRAP_NONE 0x00
-#define MESON_CANVAS_WRAP_X 0x01
-#define MESON_CANVAS_WRAP_Y 0x02
-
-#define MESON_CANVAS_BLKMODE_LINEAR 0x00
-#define MESON_CANVAS_BLKMODE_32x32 0x01
-#define MESON_CANVAS_BLKMODE_64x64 0x02
-
-#define MESON_CANVAS_ENDIAN_SWAP16 0x1
-#define MESON_CANVAS_ENDIAN_SWAP32 0x3
-#define MESON_CANVAS_ENDIAN_SWAP64 0x7
-#define MESON_CANVAS_ENDIAN_SWAP128 0xf
-
-void meson_canvas_setup(struct meson_drm *priv,
- uint32_t canvas_index, uint32_t addr,
- uint32_t stride, uint32_t height,
- unsigned int wrap,
- unsigned int blkmode,
- unsigned int endian);
-
-#endif /* __MESON_CANVAS_H */
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index 43e29984f8b1..5579f8ac3e3f 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -37,15 +37,19 @@
#include "meson_venc.h"
#include "meson_vpp.h"
#include "meson_viu.h"
-#include "meson_canvas.h"
#include "meson_registers.h"
+#define MESON_G12A_VIU_OFFSET 0x5ec0
+
/* CRTC definition */
struct meson_crtc {
struct drm_crtc base;
struct drm_pending_vblank_event *event;
struct meson_drm *priv;
+ void (*enable_osd1)(struct meson_drm *priv);
+ void (*enable_vd1)(struct meson_drm *priv);
+ unsigned int viu_offset;
};
#define to_meson_crtc(x) container_of(x, struct meson_crtc, base)
@@ -81,6 +85,44 @@ static const struct drm_crtc_funcs meson_crtc_funcs = {
};
+static void meson_g12a_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
+ struct drm_crtc_state *crtc_state = crtc->state;
+ struct meson_drm *priv = meson_crtc->priv;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (!crtc_state) {
+ DRM_ERROR("Invalid crtc_state\n");
+ return;
+ }
+
+ /* VD1 Preblend vertical start/end */
+ writel(FIELD_PREP(GENMASK(11, 0), 2303),
+ priv->io_base + _REG(VPP_PREBLEND_VD1_V_START_END));
+
+ /* Setup Blender */
+ writel(crtc_state->mode.hdisplay |
+ crtc_state->mode.vdisplay << 16,
+ priv->io_base + _REG(VPP_POSTBLEND_H_SIZE));
+
+ writel_relaxed(0 << 16 |
+ (crtc_state->mode.hdisplay - 1),
+ priv->io_base + _REG(VPP_OSD1_BLD_H_SCOPE));
+ writel_relaxed(0 << 16 |
+ (crtc_state->mode.vdisplay - 1),
+ priv->io_base + _REG(VPP_OSD1_BLD_V_SCOPE));
+ writel_relaxed(crtc_state->mode.hdisplay << 16 |
+ crtc_state->mode.vdisplay,
+ priv->io_base + _REG(VPP_OUT_H_V_SIZE));
+
+ drm_crtc_vblank_on(crtc);
+
+ priv->viu.osd1_enabled = true;
+}
+
static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -111,6 +153,31 @@ static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
priv->viu.osd1_enabled = true;
}
+static void meson_g12a_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
+ struct meson_drm *priv = meson_crtc->priv;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ drm_crtc_vblank_off(crtc);
+
+ priv->viu.osd1_enabled = false;
+ priv->viu.osd1_commit = false;
+
+ priv->viu.vd1_enabled = false;
+ priv->viu.vd1_commit = false;
+
+ if (crtc->state->event && !crtc->state->active) {
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+
+ crtc->state->event = NULL;
+ }
+}
+
static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -174,6 +241,53 @@ static const struct drm_crtc_helper_funcs meson_crtc_helper_funcs = {
.atomic_disable = meson_crtc_atomic_disable,
};
+static const struct drm_crtc_helper_funcs meson_g12a_crtc_helper_funcs = {
+ .atomic_begin = meson_crtc_atomic_begin,
+ .atomic_flush = meson_crtc_atomic_flush,
+ .atomic_enable = meson_g12a_crtc_atomic_enable,
+ .atomic_disable = meson_g12a_crtc_atomic_disable,
+};
+
+static void meson_crtc_enable_osd1(struct meson_drm *priv)
+{
+ writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND,
+ priv->io_base + _REG(VPP_MISC));
+}
+
+static void meson_g12a_crtc_enable_osd1(struct meson_drm *priv)
+{
+ writel_relaxed(priv->viu.osd_blend_din0_scope_h,
+ priv->io_base +
+ _REG(VIU_OSD_BLEND_DIN0_SCOPE_H));
+ writel_relaxed(priv->viu.osd_blend_din0_scope_v,
+ priv->io_base +
+ _REG(VIU_OSD_BLEND_DIN0_SCOPE_V));
+ writel_relaxed(priv->viu.osb_blend0_size,
+ priv->io_base +
+ _REG(VIU_OSD_BLEND_BLEND0_SIZE));
+ writel_relaxed(priv->viu.osb_blend1_size,
+ priv->io_base +
+ _REG(VIU_OSD_BLEND_BLEND1_SIZE));
+}
+
+static void meson_crtc_enable_vd1(struct meson_drm *priv)
+{
+ writel_bits_relaxed(VPP_VD1_PREBLEND | VPP_VD1_POSTBLEND |
+ VPP_COLOR_MNG_ENABLE,
+ VPP_VD1_PREBLEND | VPP_VD1_POSTBLEND |
+ VPP_COLOR_MNG_ENABLE,
+ priv->io_base + _REG(VPP_MISC));
+}
+
+static void meson_g12a_crtc_enable_vd1(struct meson_drm *priv)
+{
+ writel_relaxed(((1 << 16) | /* post bld premult*/
+ (1 << 8) | /* post src */
+ (1 << 4) | /* pre bld premult*/
+ (1 << 0)),
+ priv->io_base + _REG(VD1_BLEND_SRC_CTRL));
+}
+
void meson_crtc_irq(struct meson_drm *priv)
{
struct meson_crtc *meson_crtc = to_meson_crtc(priv->crtc);
@@ -214,20 +328,14 @@ void meson_crtc_irq(struct meson_drm *priv)
writel_relaxed(priv->viu.osd_sc_v_ctrl0,
priv->io_base + _REG(VPP_OSD_VSC_CTRL0));
- if (priv->canvas)
- meson_canvas_config(priv->canvas, priv->canvas_id_osd1,
- priv->viu.osd1_addr, priv->viu.osd1_stride,
- priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE,
- MESON_CANVAS_BLKMODE_LINEAR, 0);
- else
- meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1,
+ meson_canvas_config(priv->canvas, priv->canvas_id_osd1,
priv->viu.osd1_addr, priv->viu.osd1_stride,
priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE,
MESON_CANVAS_BLKMODE_LINEAR, 0);
/* Enable OSD1 */
- writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND,
- priv->io_base + _REG(VPP_MISC));
+ if (meson_crtc->enable_osd1)
+ meson_crtc->enable_osd1(priv);
priv->viu.osd1_commit = false;
}
@@ -237,147 +345,164 @@ void meson_crtc_irq(struct meson_drm *priv)
switch (priv->viu.vd1_planes) {
case 3:
- if (priv->canvas)
- meson_canvas_config(priv->canvas,
- priv->canvas_id_vd1_2,
- priv->viu.vd1_addr2,
- priv->viu.vd1_stride2,
- priv->viu.vd1_height2,
- MESON_CANVAS_WRAP_NONE,
- MESON_CANVAS_BLKMODE_LINEAR,
- MESON_CANVAS_ENDIAN_SWAP64);
- else
- meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_2,
- priv->viu.vd1_addr2,
- priv->viu.vd1_stride2,
- priv->viu.vd1_height2,
- MESON_CANVAS_WRAP_NONE,
- MESON_CANVAS_BLKMODE_LINEAR,
- MESON_CANVAS_ENDIAN_SWAP64);
+ meson_canvas_config(priv->canvas,
+ priv->canvas_id_vd1_2,
+ priv->viu.vd1_addr2,
+ priv->viu.vd1_stride2,
+ priv->viu.vd1_height2,
+ MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
/* fallthrough */
case 2:
- if (priv->canvas)
- meson_canvas_config(priv->canvas,
- priv->canvas_id_vd1_1,
- priv->viu.vd1_addr1,
- priv->viu.vd1_stride1,
- priv->viu.vd1_height1,
- MESON_CANVAS_WRAP_NONE,
- MESON_CANVAS_BLKMODE_LINEAR,
- MESON_CANVAS_ENDIAN_SWAP64);
- else
- meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_1,
- priv->viu.vd1_addr2,
- priv->viu.vd1_stride2,
- priv->viu.vd1_height2,
- MESON_CANVAS_WRAP_NONE,
- MESON_CANVAS_BLKMODE_LINEAR,
- MESON_CANVAS_ENDIAN_SWAP64);
+ meson_canvas_config(priv->canvas,
+ priv->canvas_id_vd1_1,
+ priv->viu.vd1_addr1,
+ priv->viu.vd1_stride1,
+ priv->viu.vd1_height1,
+ MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
/* fallthrough */
case 1:
- if (priv->canvas)
- meson_canvas_config(priv->canvas,
- priv->canvas_id_vd1_0,
- priv->viu.vd1_addr0,
- priv->viu.vd1_stride0,
- priv->viu.vd1_height0,
- MESON_CANVAS_WRAP_NONE,
- MESON_CANVAS_BLKMODE_LINEAR,
- MESON_CANVAS_ENDIAN_SWAP64);
- else
- meson_canvas_setup(priv, MESON_CANVAS_ID_VD1_0,
- priv->viu.vd1_addr2,
- priv->viu.vd1_stride2,
- priv->viu.vd1_height2,
- MESON_CANVAS_WRAP_NONE,
- MESON_CANVAS_BLKMODE_LINEAR,
- MESON_CANVAS_ENDIAN_SWAP64);
+ meson_canvas_config(priv->canvas,
+ priv->canvas_id_vd1_0,
+ priv->viu.vd1_addr0,
+ priv->viu.vd1_stride0,
+ priv->viu.vd1_height0,
+ MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR,
+ MESON_CANVAS_ENDIAN_SWAP64);
};
writel_relaxed(priv->viu.vd1_if0_gen_reg,
- priv->io_base + _REG(VD1_IF0_GEN_REG));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_GEN_REG));
writel_relaxed(priv->viu.vd1_if0_gen_reg,
- priv->io_base + _REG(VD2_IF0_GEN_REG));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_GEN_REG));
writel_relaxed(priv->viu.vd1_if0_gen_reg2,
- priv->io_base + _REG(VD1_IF0_GEN_REG2));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_GEN_REG2));
writel_relaxed(priv->viu.viu_vd1_fmt_ctrl,
- priv->io_base + _REG(VIU_VD1_FMT_CTRL));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VIU_VD1_FMT_CTRL));
writel_relaxed(priv->viu.viu_vd1_fmt_ctrl,
- priv->io_base + _REG(VIU_VD2_FMT_CTRL));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VIU_VD2_FMT_CTRL));
writel_relaxed(priv->viu.viu_vd1_fmt_w,
- priv->io_base + _REG(VIU_VD1_FMT_W));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VIU_VD1_FMT_W));
writel_relaxed(priv->viu.viu_vd1_fmt_w,
- priv->io_base + _REG(VIU_VD2_FMT_W));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VIU_VD2_FMT_W));
writel_relaxed(priv->viu.vd1_if0_canvas0,
- priv->io_base + _REG(VD1_IF0_CANVAS0));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_CANVAS0));
writel_relaxed(priv->viu.vd1_if0_canvas0,
- priv->io_base + _REG(VD1_IF0_CANVAS1));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_CANVAS1));
writel_relaxed(priv->viu.vd1_if0_canvas0,
- priv->io_base + _REG(VD2_IF0_CANVAS0));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_CANVAS0));
writel_relaxed(priv->viu.vd1_if0_canvas0,
- priv->io_base + _REG(VD2_IF0_CANVAS1));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_CANVAS1));
writel_relaxed(priv->viu.vd1_if0_luma_x0,
- priv->io_base + _REG(VD1_IF0_LUMA_X0));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_LUMA_X0));
writel_relaxed(priv->viu.vd1_if0_luma_x0,
- priv->io_base + _REG(VD1_IF0_LUMA_X1));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_LUMA_X1));
writel_relaxed(priv->viu.vd1_if0_luma_x0,
- priv->io_base + _REG(VD2_IF0_LUMA_X0));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_LUMA_X0));
writel_relaxed(priv->viu.vd1_if0_luma_x0,
- priv->io_base + _REG(VD2_IF0_LUMA_X1));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_LUMA_X1));
writel_relaxed(priv->viu.vd1_if0_luma_y0,
- priv->io_base + _REG(VD1_IF0_LUMA_Y0));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_LUMA_Y0));
writel_relaxed(priv->viu.vd1_if0_luma_y0,
- priv->io_base + _REG(VD1_IF0_LUMA_Y1));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_LUMA_Y1));
writel_relaxed(priv->viu.vd1_if0_luma_y0,
- priv->io_base + _REG(VD2_IF0_LUMA_Y0));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_LUMA_Y0));
writel_relaxed(priv->viu.vd1_if0_luma_y0,
- priv->io_base + _REG(VD2_IF0_LUMA_Y1));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_LUMA_Y1));
writel_relaxed(priv->viu.vd1_if0_chroma_x0,
- priv->io_base + _REG(VD1_IF0_CHROMA_X0));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_CHROMA_X0));
writel_relaxed(priv->viu.vd1_if0_chroma_x0,
- priv->io_base + _REG(VD1_IF0_CHROMA_X1));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_CHROMA_X1));
writel_relaxed(priv->viu.vd1_if0_chroma_x0,
- priv->io_base + _REG(VD2_IF0_CHROMA_X0));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_CHROMA_X0));
writel_relaxed(priv->viu.vd1_if0_chroma_x0,
- priv->io_base + _REG(VD2_IF0_CHROMA_X1));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_CHROMA_X1));
writel_relaxed(priv->viu.vd1_if0_chroma_y0,
- priv->io_base + _REG(VD1_IF0_CHROMA_Y0));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_CHROMA_Y0));
writel_relaxed(priv->viu.vd1_if0_chroma_y0,
- priv->io_base + _REG(VD1_IF0_CHROMA_Y1));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_CHROMA_Y1));
writel_relaxed(priv->viu.vd1_if0_chroma_y0,
- priv->io_base + _REG(VD2_IF0_CHROMA_Y0));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_CHROMA_Y0));
writel_relaxed(priv->viu.vd1_if0_chroma_y0,
- priv->io_base + _REG(VD2_IF0_CHROMA_Y1));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_CHROMA_Y1));
writel_relaxed(priv->viu.vd1_if0_repeat_loop,
- priv->io_base + _REG(VD1_IF0_RPT_LOOP));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_RPT_LOOP));
writel_relaxed(priv->viu.vd1_if0_repeat_loop,
- priv->io_base + _REG(VD2_IF0_RPT_LOOP));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_RPT_LOOP));
writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
- priv->io_base + _REG(VD1_IF0_LUMA0_RPT_PAT));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_LUMA0_RPT_PAT));
writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
- priv->io_base + _REG(VD2_IF0_LUMA0_RPT_PAT));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_LUMA0_RPT_PAT));
writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
- priv->io_base + _REG(VD1_IF0_LUMA1_RPT_PAT));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_LUMA1_RPT_PAT));
writel_relaxed(priv->viu.vd1_if0_luma0_rpt_pat,
- priv->io_base + _REG(VD2_IF0_LUMA1_RPT_PAT));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_LUMA1_RPT_PAT));
writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
- priv->io_base + _REG(VD1_IF0_CHROMA0_RPT_PAT));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_CHROMA0_RPT_PAT));
writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
- priv->io_base + _REG(VD2_IF0_CHROMA0_RPT_PAT));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_CHROMA0_RPT_PAT));
writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
- priv->io_base + _REG(VD1_IF0_CHROMA1_RPT_PAT));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_CHROMA1_RPT_PAT));
writel_relaxed(priv->viu.vd1_if0_chroma0_rpt_pat,
- priv->io_base + _REG(VD2_IF0_CHROMA1_RPT_PAT));
- writel_relaxed(0, priv->io_base + _REG(VD1_IF0_LUMA_PSEL));
- writel_relaxed(0, priv->io_base + _REG(VD1_IF0_CHROMA_PSEL));
- writel_relaxed(0, priv->io_base + _REG(VD2_IF0_LUMA_PSEL));
- writel_relaxed(0, priv->io_base + _REG(VD2_IF0_CHROMA_PSEL));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_CHROMA1_RPT_PAT));
+ writel_relaxed(0, priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_LUMA_PSEL));
+ writel_relaxed(0, priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_CHROMA_PSEL));
+ writel_relaxed(0, priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_LUMA_PSEL));
+ writel_relaxed(0, priv->io_base + meson_crtc->viu_offset +
+ _REG(VD2_IF0_CHROMA_PSEL));
writel_relaxed(priv->viu.vd1_range_map_y,
- priv->io_base + _REG(VD1_IF0_RANGE_MAP_Y));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_RANGE_MAP_Y));
writel_relaxed(priv->viu.vd1_range_map_cb,
- priv->io_base + _REG(VD1_IF0_RANGE_MAP_CB));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_RANGE_MAP_CB));
writel_relaxed(priv->viu.vd1_range_map_cr,
- priv->io_base + _REG(VD1_IF0_RANGE_MAP_CR));
+ priv->io_base + meson_crtc->viu_offset +
+ _REG(VD1_IF0_RANGE_MAP_CR));
writel_relaxed(0x78404,
priv->io_base + _REG(VPP_SC_MISC));
writel_relaxed(priv->viu.vpp_pic_in_height,
@@ -423,11 +548,8 @@ void meson_crtc_irq(struct meson_drm *priv)
writel_relaxed(0x42, priv->io_base + _REG(VPP_SCALE_COEF_IDX));
/* Enable VD1 */
- writel_bits_relaxed(VPP_VD1_PREBLEND | VPP_VD1_POSTBLEND |
- VPP_COLOR_MNG_ENABLE,
- VPP_VD1_PREBLEND | VPP_VD1_POSTBLEND |
- VPP_COLOR_MNG_ENABLE,
- priv->io_base + _REG(VPP_MISC));
+ if (meson_crtc->enable_vd1)
+ meson_crtc->enable_vd1(priv);
priv->viu.vd1_commit = false;
}
@@ -464,7 +586,16 @@ int meson_crtc_create(struct meson_drm *priv)
return ret;
}
- drm_crtc_helper_add(crtc, &meson_crtc_helper_funcs);
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ meson_crtc->enable_osd1 = meson_g12a_crtc_enable_osd1;
+ meson_crtc->enable_vd1 = meson_g12a_crtc_enable_vd1;
+ meson_crtc->viu_offset = MESON_G12A_VIU_OFFSET;
+ drm_crtc_helper_add(crtc, &meson_g12a_crtc_helper_funcs);
+ } else {
+ meson_crtc->enable_osd1 = meson_crtc_enable_osd1;
+ meson_crtc->enable_vd1 = meson_crtc_enable_vd1;
+ drm_crtc_helper_add(crtc, &meson_crtc_helper_funcs);
+ }
priv->crtc = crtc;
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 8a4ebcb6405c..72b01e6be0d9 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -48,7 +48,6 @@
#include "meson_vpp.h"
#include "meson_viu.h"
#include "meson_venc.h"
-#include "meson_canvas.h"
#include "meson_registers.h"
#define DRIVER_NAME "meson"
@@ -91,6 +90,18 @@ static irqreturn_t meson_irq(int irq, void *arg)
return IRQ_HANDLED;
}
+static int meson_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ /*
+ * We need 64bytes aligned stride, and PAGE aligned size
+ */
+ args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), SZ_64);
+ args->size = PAGE_ALIGN(args->pitch * args->height);
+
+ return drm_gem_cma_dumb_create_internal(file, dev, args);
+}
+
DEFINE_DRM_GEM_CMA_FOPS(fops);
static struct drm_driver meson_driver = {
@@ -113,7 +124,7 @@ static struct drm_driver meson_driver = {
.gem_prime_mmap = drm_gem_cma_prime_mmap,
/* GEM Ops */
- .dumb_create = drm_gem_cma_dumb_create,
+ .dumb_create = meson_dumb_create,
.gem_free_object_unlocked = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
@@ -231,50 +242,31 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
}
priv->canvas = meson_canvas_get(dev);
- if (!IS_ERR(priv->canvas)) {
- ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_osd1);
- if (ret)
- goto free_drm;
- ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_0);
- if (ret) {
- meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
- goto free_drm;
- }
- ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_1);
- if (ret) {
- meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
- meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
- goto free_drm;
- }
- ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_2);
- if (ret) {
- meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
- meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
- meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1);
- goto free_drm;
- }
- } else {
- priv->canvas = NULL;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc");
- if (!res) {
- ret = -EINVAL;
- goto free_drm;
- }
- /* Simply ioremap since it may be a shared register zone */
- regs = devm_ioremap(dev, res->start, resource_size(res));
- if (!regs) {
- ret = -EADDRNOTAVAIL;
- goto free_drm;
- }
+ if (IS_ERR(priv->canvas)) {
+ ret = PTR_ERR(priv->canvas);
+ goto free_drm;
+ }
- priv->dmc = devm_regmap_init_mmio(dev, regs,
- &meson_regmap_config);
- if (IS_ERR(priv->dmc)) {
- dev_err(&pdev->dev, "Couldn't create the DMC regmap\n");
- ret = PTR_ERR(priv->dmc);
- goto free_drm;
- }
+ ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_osd1);
+ if (ret)
+ goto free_drm;
+ ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_0);
+ if (ret) {
+ meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+ goto free_drm;
+ }
+ ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_1);
+ if (ret) {
+ meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
+ goto free_drm;
+ }
+ ret = meson_canvas_alloc(priv->canvas, &priv->canvas_id_vd1_2);
+ if (ret) {
+ meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
+ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_0);
+ meson_canvas_free(priv->canvas, priv->canvas_id_vd1_1);
+ goto free_drm;
}
priv->vsync_irq = platform_get_irq(pdev, 0);
@@ -467,6 +459,7 @@ static const struct of_device_id dt_match[] = {
{ .compatible = "amlogic,meson-gxbb-vpu" },
{ .compatible = "amlogic,meson-gxl-vpu" },
{ .compatible = "amlogic,meson-gxm-vpu" },
+ { .compatible = "amlogic,meson-g12a-vpu" },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
index 4dccf4cd042a..9614baa836b9 100644
--- a/drivers/gpu/drm/meson/meson_drv.h
+++ b/drivers/gpu/drm/meson/meson_drv.h
@@ -29,7 +29,6 @@ struct meson_drm {
struct device *dev;
void __iomem *io_base;
struct regmap *hhi;
- struct regmap *dmc;
int vsync_irq;
struct meson_canvas *canvas;
@@ -63,6 +62,10 @@ struct meson_drm {
uint32_t osd_sc_h_phase_step;
uint32_t osd_sc_h_ctrl0;
uint32_t osd_sc_v_ctrl0;
+ uint32_t osd_blend_din0_scope_h;
+ uint32_t osd_blend_din0_scope_v;
+ uint32_t osb_blend0_size;
+ uint32_t osb_blend1_size;
bool vd1_enabled;
bool vd1_commit;
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 563953ec6ad0..779da21143b9 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/component.h>
+#include <linux/of_device.h>
#include <linux/of_graph.h>
#include <linux/reset.h>
#include <linux/clk.h>
@@ -105,6 +106,7 @@
#define HDMITX_TOP_ADDR_REG 0x0
#define HDMITX_TOP_DATA_REG 0x4
#define HDMITX_TOP_CTRL_REG 0x8
+#define HDMITX_TOP_G12A_OFFSET 0x8000
/* Controller Communication Channel */
#define HDMITX_DWC_ADDR_REG 0x10
@@ -118,6 +120,8 @@
#define HHI_HDMI_PHY_CNTL1 0x3a4 /* 0xe9 */
#define HHI_HDMI_PHY_CNTL2 0x3a8 /* 0xea */
#define HHI_HDMI_PHY_CNTL3 0x3ac /* 0xeb */
+#define HHI_HDMI_PHY_CNTL4 0x3b0 /* 0xec */
+#define HHI_HDMI_PHY_CNTL5 0x3b4 /* 0xed */
static DEFINE_SPINLOCK(reg_lock);
@@ -127,12 +131,26 @@ enum meson_venc_source {
MESON_VENC_SOURCE_ENCP = 2,
};
+struct meson_dw_hdmi;
+
+struct meson_dw_hdmi_data {
+ unsigned int (*top_read)(struct meson_dw_hdmi *dw_hdmi,
+ unsigned int addr);
+ void (*top_write)(struct meson_dw_hdmi *dw_hdmi,
+ unsigned int addr, unsigned int data);
+ unsigned int (*dwc_read)(struct meson_dw_hdmi *dw_hdmi,
+ unsigned int addr);
+ void (*dwc_write)(struct meson_dw_hdmi *dw_hdmi,
+ unsigned int addr, unsigned int data);
+};
+
struct meson_dw_hdmi {
struct drm_encoder encoder;
struct dw_hdmi_plat_data dw_plat_data;
struct meson_drm *priv;
struct device *dev;
void __iomem *hdmitx;
+ const struct meson_dw_hdmi_data *data;
struct reset_control *hdmitx_apb;
struct reset_control *hdmitx_ctrl;
struct reset_control *hdmitx_phy;
@@ -174,6 +192,12 @@ static unsigned int dw_hdmi_top_read(struct meson_dw_hdmi *dw_hdmi,
return data;
}
+static unsigned int dw_hdmi_g12a_top_read(struct meson_dw_hdmi *dw_hdmi,
+ unsigned int addr)
+{
+ return readl(dw_hdmi->hdmitx + HDMITX_TOP_G12A_OFFSET + (addr << 2));
+}
+
static inline void dw_hdmi_top_write(struct meson_dw_hdmi *dw_hdmi,
unsigned int addr, unsigned int data)
{
@@ -191,18 +215,24 @@ static inline void dw_hdmi_top_write(struct meson_dw_hdmi *dw_hdmi,
spin_unlock_irqrestore(&reg_lock, flags);
}
+static inline void dw_hdmi_g12a_top_write(struct meson_dw_hdmi *dw_hdmi,
+ unsigned int addr, unsigned int data)
+{
+ writel(data, dw_hdmi->hdmitx + HDMITX_TOP_G12A_OFFSET + (addr << 2));
+}
+
/* Helper to change specific bits in PHY registers */
static inline void dw_hdmi_top_write_bits(struct meson_dw_hdmi *dw_hdmi,
unsigned int addr,
unsigned int mask,
unsigned int val)
{
- unsigned int data = dw_hdmi_top_read(dw_hdmi, addr);
+ unsigned int data = dw_hdmi->data->top_read(dw_hdmi, addr);
data &= ~mask;
data |= val;
- dw_hdmi_top_write(dw_hdmi, addr, data);
+ dw_hdmi->data->top_write(dw_hdmi, addr, data);
}
static unsigned int dw_hdmi_dwc_read(struct meson_dw_hdmi *dw_hdmi,
@@ -226,6 +256,12 @@ static unsigned int dw_hdmi_dwc_read(struct meson_dw_hdmi *dw_hdmi,
return data;
}
+static unsigned int dw_hdmi_g12a_dwc_read(struct meson_dw_hdmi *dw_hdmi,
+ unsigned int addr)
+{
+ return readb(dw_hdmi->hdmitx + addr);
+}
+
static inline void dw_hdmi_dwc_write(struct meson_dw_hdmi *dw_hdmi,
unsigned int addr, unsigned int data)
{
@@ -243,18 +279,24 @@ static inline void dw_hdmi_dwc_write(struct meson_dw_hdmi *dw_hdmi,
spin_unlock_irqrestore(&reg_lock, flags);
}
+static inline void dw_hdmi_g12a_dwc_write(struct meson_dw_hdmi *dw_hdmi,
+ unsigned int addr, unsigned int data)
+{
+ writeb(data, dw_hdmi->hdmitx + addr);
+}
+
/* Helper to change specific bits in controller registers */
static inline void dw_hdmi_dwc_write_bits(struct meson_dw_hdmi *dw_hdmi,
unsigned int addr,
unsigned int mask,
unsigned int val)
{
- unsigned int data = dw_hdmi_dwc_read(dw_hdmi, addr);
+ unsigned int data = dw_hdmi->data->dwc_read(dw_hdmi, addr);
data &= ~mask;
data |= val;
- dw_hdmi_dwc_write(dw_hdmi, addr, data);
+ dw_hdmi->data->dwc_write(dw_hdmi, addr, data);
}
/* Bridge */
@@ -300,6 +342,24 @@ static void meson_hdmi_phy_setup_mode(struct meson_dw_hdmi *dw_hdmi,
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x33632122);
regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x2000115b);
}
+ } else if (dw_hdmi_is_compatible(dw_hdmi,
+ "amlogic,meson-g12a-dw-hdmi")) {
+ if (pixel_clock >= 371250) {
+ /* 5.94Gbps, 3.7125Gbps */
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x37eb65c4);
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x2ab0ff3b);
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL5, 0x0000080b);
+ } else if (pixel_clock >= 297000) {
+ /* 2.97Gbps */
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x33eb6262);
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x2ab0ff3b);
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL5, 0x00000003);
+ } else {
+ /* 1.485Gbps, and below */
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0x33eb4242);
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL3, 0x2ab0ff3b);
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL5, 0x00000003);
+ }
}
}
@@ -375,7 +435,7 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
regmap_update_bits(priv->hhi, HHI_MEM_PD_REG0, 0xff << 8, 0);
/* Bring out of reset */
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_SW_RESET, 0);
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_SW_RESET, 0);
/* Enable internal pixclk, tmds_clk, spdif_clk, i2s_clk, cecclk */
dw_hdmi_top_write_bits(dw_hdmi, HDMITX_TOP_CLK_CNTL,
@@ -384,24 +444,25 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
0x3 << 4, 0x3 << 4);
/* Enable normal output to PHY */
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12));
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12));
/* TMDS pattern setup (TOFIX Handle the YUV420 case) */
if (mode->clock > 340000) {
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01, 0);
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_23,
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01,
+ 0);
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_23,
0x03ff03ff);
} else {
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01,
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01,
0x001f001f);
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_23,
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_23,
0x001f001f);
}
/* Load TMDS pattern */
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_CNTL, 0x1);
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_CNTL, 0x1);
msleep(20);
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_CNTL, 0x2);
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_CNTL, 0x2);
/* Setup PHY parameters */
meson_hdmi_phy_setup_mode(dw_hdmi, mode);
@@ -412,7 +473,8 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
/* BIT_INVERT */
if (dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
- dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxm-dw-hdmi"))
+ dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxm-dw-hdmi") ||
+ dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-g12a-dw-hdmi"))
regmap_update_bits(priv->hhi, HHI_HDMI_PHY_CNTL1,
BIT(17), 0);
else
@@ -480,7 +542,7 @@ static enum drm_connector_status dw_hdmi_read_hpd(struct dw_hdmi *hdmi,
{
struct meson_dw_hdmi *dw_hdmi = (struct meson_dw_hdmi *)data;
- return !!dw_hdmi_top_read(dw_hdmi, HDMITX_TOP_STAT0) ?
+ return !!dw_hdmi->data->top_read(dw_hdmi, HDMITX_TOP_STAT0) ?
connector_status_connected : connector_status_disconnected;
}
@@ -490,11 +552,11 @@ static void dw_hdmi_setup_hpd(struct dw_hdmi *hdmi,
struct meson_dw_hdmi *dw_hdmi = (struct meson_dw_hdmi *)data;
/* Setup HPD Filter */
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_HPD_FILTER,
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_HPD_FILTER,
(0xa << 12) | 0xa0);
/* Clear interrupts */
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_INTR_STAT_CLR,
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_INTR_STAT_CLR,
HDMITX_TOP_INTR_HPD_RISE | HDMITX_TOP_INTR_HPD_FALL);
/* Unmask interrupts */
@@ -515,8 +577,8 @@ static irqreturn_t dw_hdmi_top_irq(int irq, void *dev_id)
struct meson_dw_hdmi *dw_hdmi = dev_id;
u32 stat;
- stat = dw_hdmi_top_read(dw_hdmi, HDMITX_TOP_INTR_STAT);
- dw_hdmi_top_write(dw_hdmi, HDMITX_TOP_INTR_STAT_CLR, stat);
+ stat = dw_hdmi->data->top_read(dw_hdmi, HDMITX_TOP_INTR_STAT);
+ dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_INTR_STAT_CLR, stat);
/* HPD Events, handle in the threaded interrupt handler */
if (stat & (HDMITX_TOP_INTR_HPD_RISE | HDMITX_TOP_INTR_HPD_FALL)) {
@@ -686,7 +748,9 @@ static const struct drm_encoder_helper_funcs
static int meson_dw_hdmi_reg_read(void *context, unsigned int reg,
unsigned int *result)
{
- *result = dw_hdmi_dwc_read(context, reg);
+ struct meson_dw_hdmi *dw_hdmi = context;
+
+ *result = dw_hdmi->data->dwc_read(dw_hdmi, reg);
return 0;
@@ -695,7 +759,9 @@ static int meson_dw_hdmi_reg_read(void *context, unsigned int reg,
static int meson_dw_hdmi_reg_write(void *context, unsigned int reg,
unsigned int val)
{
- dw_hdmi_dwc_write(context, reg, val);
+ struct meson_dw_hdmi *dw_hdmi = context;
+
+ dw_hdmi->data->dwc_write(dw_hdmi, reg, val);
return 0;
}
@@ -709,6 +775,20 @@ static const struct regmap_config meson_dw_hdmi_regmap_config = {
.fast_io = true,
};
+static const struct meson_dw_hdmi_data meson_dw_hdmi_gx_data = {
+ .top_read = dw_hdmi_top_read,
+ .top_write = dw_hdmi_top_write,
+ .dwc_read = dw_hdmi_dwc_read,
+ .dwc_write = dw_hdmi_dwc_write,
+};
+
+static const struct meson_dw_hdmi_data meson_dw_hdmi_g12a_data = {
+ .top_read = dw_hdmi_g12a_top_read,
+ .top_write = dw_hdmi_g12a_top_write,
+ .dwc_read = dw_hdmi_g12a_dwc_read,
+ .dwc_write = dw_hdmi_g12a_dwc_write,
+};
+
static bool meson_hdmi_connector_is_available(struct device *dev)
{
struct device_node *ep, *remote;
@@ -735,6 +815,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
void *data)
{
struct platform_device *pdev = to_platform_device(dev);
+ const struct meson_dw_hdmi_data *match;
struct meson_dw_hdmi *meson_dw_hdmi;
struct drm_device *drm = data;
struct meson_drm *priv = drm->dev_private;
@@ -751,6 +832,12 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
return -ENODEV;
}
+ match = of_device_get_match_data(&pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "failed to get match data\n");
+ return -ENODEV;
+ }
+
meson_dw_hdmi = devm_kzalloc(dev, sizeof(*meson_dw_hdmi),
GFP_KERNEL);
if (!meson_dw_hdmi)
@@ -758,6 +845,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
meson_dw_hdmi->priv = priv;
meson_dw_hdmi->dev = dev;
+ meson_dw_hdmi->data = match;
dw_plat_data = &meson_dw_hdmi->dw_plat_data;
encoder = &meson_dw_hdmi->encoder;
@@ -858,24 +946,28 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
reset_control_reset(meson_dw_hdmi->hdmitx_phy);
/* Enable APB3 fail on error */
- writel_bits_relaxed(BIT(15), BIT(15),
- meson_dw_hdmi->hdmitx + HDMITX_TOP_CTRL_REG);
- writel_bits_relaxed(BIT(15), BIT(15),
- meson_dw_hdmi->hdmitx + HDMITX_DWC_CTRL_REG);
+ if (!meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ writel_bits_relaxed(BIT(15), BIT(15),
+ meson_dw_hdmi->hdmitx + HDMITX_TOP_CTRL_REG);
+ writel_bits_relaxed(BIT(15), BIT(15),
+ meson_dw_hdmi->hdmitx + HDMITX_DWC_CTRL_REG);
+ }
/* Bring out of reset */
- dw_hdmi_top_write(meson_dw_hdmi, HDMITX_TOP_SW_RESET, 0);
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi,
+ HDMITX_TOP_SW_RESET, 0);
msleep(20);
- dw_hdmi_top_write(meson_dw_hdmi, HDMITX_TOP_CLK_CNTL, 0xff);
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi,
+ HDMITX_TOP_CLK_CNTL, 0xff);
/* Enable HDMI-TX Interrupt */
- dw_hdmi_top_write(meson_dw_hdmi, HDMITX_TOP_INTR_STAT_CLR,
- HDMITX_TOP_INTR_CORE);
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_STAT_CLR,
+ HDMITX_TOP_INTR_CORE);
- dw_hdmi_top_write(meson_dw_hdmi, HDMITX_TOP_INTR_MASKN,
- HDMITX_TOP_INTR_CORE);
+ meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_MASKN,
+ HDMITX_TOP_INTR_CORE);
/* Bridge / Connector */
@@ -924,9 +1016,14 @@ static int meson_dw_hdmi_remove(struct platform_device *pdev)
}
static const struct of_device_id meson_dw_hdmi_of_table[] = {
- { .compatible = "amlogic,meson-gxbb-dw-hdmi" },
- { .compatible = "amlogic,meson-gxl-dw-hdmi" },
- { .compatible = "amlogic,meson-gxm-dw-hdmi" },
+ { .compatible = "amlogic,meson-gxbb-dw-hdmi",
+ .data = &meson_dw_hdmi_gx_data },
+ { .compatible = "amlogic,meson-gxl-dw-hdmi",
+ .data = &meson_dw_hdmi_gx_data },
+ { .compatible = "amlogic,meson-gxm-dw-hdmi",
+ .data = &meson_dw_hdmi_gx_data },
+ { .compatible = "amlogic,meson-g12a-dw-hdmi",
+ .data = &meson_dw_hdmi_g12a_data },
{ }
};
MODULE_DEVICE_TABLE(of, meson_dw_hdmi_of_table);
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.h b/drivers/gpu/drm/meson/meson_dw_hdmi.h
index 0b81183125e3..03e2f0c1a2d5 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.h
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.h
@@ -21,9 +21,12 @@
#define __MESON_DW_HDMI_H
/*
- * Bit 7 RW Reserved. Default 1.
- * Bit 6 RW Reserved. Default 1.
- * Bit 5 RW Reserved. Default 1.
+ * Bit 15-10: RW Reserved. Default 1 starting from G12A
+ * Bit 9 RW sw_reset_i2c starting from G12A
+ * Bit 8 RW sw_reset_axiarb starting from G12A
+ * Bit 7 RW Reserved. Default 1, sw_reset_emp starting from G12A
+ * Bit 6 RW Reserved. Default 1, sw_reset_flt starting from G12A
+ * Bit 5 RW Reserved. Default 1, sw_reset_hdcp22 starting from G12A
* Bit 4 RW sw_reset_phyif: PHY interface. 1=Apply reset; 0=Release from reset.
* Default 1.
* Bit 3 RW sw_reset_intr: interrupt module. 1=Apply reset;
@@ -39,12 +42,16 @@
#define HDMITX_TOP_SW_RESET (0x000)
/*
+ * Bit 31 RW free_clk_en: 0=Enable clock gating for power saving; 1= Disable
* Bit 12 RW i2s_ws_inv:1=Invert i2s_ws; 0=No invert. Default 0.
* Bit 11 RW i2s_clk_inv: 1=Invert i2s_clk; 0=No invert. Default 0.
* Bit 10 RW spdif_clk_inv: 1=Invert spdif_clk; 0=No invert. Default 0.
* Bit 9 RW tmds_clk_inv: 1=Invert tmds_clk; 0=No invert. Default 0.
* Bit 8 RW pixel_clk_inv: 1=Invert pixel_clk; 0=No invert. Default 0.
- * Bit 4 RW cec_clk_en: 1=enable cec_clk; 0=disable. Default 0.
+ * Bit 7 RW hdcp22_skpclk_en: starting from G12A, 1=enable; 0=disable
+ * Bit 6 RW hdcp22_esmclk_en: starting from G12A, 1=enable; 0=disable
+ * Bit 5 RW hdcp22_tmdsclk_en: starting from G12A, 1=enable; 0=disable
+ * Bit 4 RW cec_clk_en: 1=enable cec_clk; 0=disable. Default 0. Reserved for G12A
* Bit 3 RW i2s_clk_en: 1=enable i2s_clk; 0=disable. Default 0.
* Bit 2 RW spdif_clk_en: 1=enable spdif_clk; 0=disable. Default 0.
* Bit 1 RW tmds_clk_en: 1=enable tmds_clk; 0=disable. Default 0.
@@ -53,6 +60,8 @@
#define HDMITX_TOP_CLK_CNTL (0x001)
/*
+ * Bit 31:28 RW rxsense_glitch_width: starting from G12A
+ * Bit 27:16 RW rxsense_valid_width: starting from G12A
* Bit 11: 0 RW hpd_valid_width: filter out width <= M*1024. Default 0.
* Bit 15:12 RW hpd_glitch_width: filter out glitch <= N. Default 0.
*/
@@ -61,6 +70,9 @@
/*
* intr_maskn: MASK_N, one bit per interrupt source.
* 1=Enable interrupt source; 0=Disable interrupt source. Default 0.
+ * [ 7] rxsense_fall starting from G12A
+ * [ 6] rxsense_rise starting from G12A
+ * [ 5] err_i2c_timeout starting from G12A
* [ 4] hdcp22_rndnum_err
* [ 3] nonce_rfrsh_rise
* [ 2] hpd_fall_intr
@@ -73,6 +85,9 @@
* Bit 30: 0 RW intr_stat: For each bit, write 1 to manually set the interrupt
* bit, read back the interrupt status.
* Bit 31 R IP interrupt status
+ * Bit 7 RW rxsense_fall starting from G12A
+ * Bit 6 RW rxsense_rise starting from G12A
+ * Bit 5 RW err_i2c_timeout starting from G12A
* Bit 2 RW hpd_fall
* Bit 1 RW hpd_rise
* Bit 0 RW IP interrupt
@@ -80,6 +95,9 @@
#define HDMITX_TOP_INTR_STAT (0x004)
/*
+ * [7] rxsense_fall starting from G12A
+ * [6] rxsense_rise starting from G12A
+ * [5] err_i2c_timeout starting from G12A
* [4] hdcp22_rndnum_err
* [3] nonce_rfrsh_rise
* [2] hpd_fall
@@ -91,6 +109,8 @@
#define HDMITX_TOP_INTR_CORE BIT(0)
#define HDMITX_TOP_INTR_HPD_RISE BIT(1)
#define HDMITX_TOP_INTR_HPD_FALL BIT(2)
+#define HDMITX_TOP_INTR_RXSENSE_RISE BIT(6)
+#define HDMITX_TOP_INTR_RXSENSE_FALL BIT(7)
/* Bit 14:12 RW tmds_sel: 3'b000=Output zero; 3'b001=Output normal TMDS data;
* 3'b010=Output PRBS data; 3'b100=Output shift pattern. Default 0.
@@ -140,7 +160,9 @@
*/
#define HDMITX_TOP_REVOCMEM_STAT (0x00D)
-/* Bit 0 R filtered HPD status. */
+/* Bit 1 R filtered RxSense status
+ * Bit 0 R filtered HPD status.
+ */
#define HDMITX_TOP_STAT0 (0x00E)
#endif /* __MESON_DW_HDMI_H */
diff --git a/drivers/gpu/drm/meson/meson_overlay.c b/drivers/gpu/drm/meson/meson_overlay.c
index 691a9fd16b36..bdbf925ff3e8 100644
--- a/drivers/gpu/drm/meson/meson_overlay.c
+++ b/drivers/gpu/drm/meson/meson_overlay.c
@@ -22,7 +22,6 @@
#include "meson_overlay.h"
#include "meson_vpp.h"
#include "meson_viu.h"
-#include "meson_canvas.h"
#include "meson_registers.h"
/* VD1_IF0_GEN_REG */
@@ -350,13 +349,6 @@ static void meson_overlay_atomic_update(struct drm_plane *plane,
DRM_DEBUG_DRIVER("\n");
- /* Fallback is canvas provider is not available */
- if (!priv->canvas) {
- priv->canvas_id_vd1_0 = MESON_CANVAS_ID_VD1_0;
- priv->canvas_id_vd1_1 = MESON_CANVAS_ID_VD1_1;
- priv->canvas_id_vd1_2 = MESON_CANVAS_ID_VD1_2;
- }
-
interlace_mode = state->crtc->mode.flags & DRM_MODE_FLAG_INTERLACE;
spin_lock_irqsave(&priv->drm->event_lock, flags);
@@ -524,8 +516,14 @@ static void meson_overlay_atomic_disable(struct drm_plane *plane,
priv->viu.vd1_enabled = false;
/* Disable VD1 */
- writel_bits_relaxed(VPP_VD1_POSTBLEND | VPP_VD1_PREBLEND, 0,
- priv->io_base + _REG(VPP_MISC));
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ writel_relaxed(0, priv->io_base + _REG(VD1_BLEND_SRC_CTRL));
+ writel_relaxed(0, priv->io_base + _REG(VD2_BLEND_SRC_CTRL));
+ writel_relaxed(0, priv->io_base + _REG(VD1_IF0_GEN_REG + 0x17b0));
+ writel_relaxed(0, priv->io_base + _REG(VD2_IF0_GEN_REG + 0x17b0));
+ } else
+ writel_bits_relaxed(VPP_VD1_POSTBLEND | VPP_VD1_PREBLEND, 0,
+ priv->io_base + _REG(VPP_MISC));
}
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index 6119a0224278..bf8f1fab63aa 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -38,7 +38,6 @@
#include "meson_plane.h"
#include "meson_vpp.h"
#include "meson_viu.h"
-#include "meson_canvas.h"
#include "meson_registers.h"
/* OSD_SCI_WH_M1 */
@@ -148,10 +147,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
(0xFF << OSD_GLOBAL_ALPHA_SHIFT) |
OSD_BLK0_ENABLE;
- if (priv->canvas)
- canvas_id_osd1 = priv->canvas_id_osd1;
- else
- canvas_id_osd1 = MESON_CANVAS_ID_OSD1;
+ canvas_id_osd1 = priv->canvas_id_osd1;
/* Set up BLK0 to point to the right canvas */
priv->viu.osd1_blk0_cfg[0] = ((canvas_id_osd1 << OSD_CANVAS_SEL) |
@@ -298,6 +294,13 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
priv->viu.osd1_blk0_cfg[3] = ((dest.x2 - 1) << 16) | dest.x1;
priv->viu.osd1_blk0_cfg[4] = ((dest.y2 - 1) << 16) | dest.y1;
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ priv->viu.osd_blend_din0_scope_h = ((dest.x2 - 1) << 16) | dest.x1;
+ priv->viu.osd_blend_din0_scope_v = ((dest.y2 - 1) << 16) | dest.y1;
+ priv->viu.osb_blend0_size = dst_h << 16 | dst_w;
+ priv->viu.osb_blend1_size = dst_h << 16 | dst_w;
+ }
+
/* Update Canvas with buffer address */
gem = drm_fb_cma_get_gem_obj(fb, 0);
@@ -324,8 +327,12 @@ static void meson_plane_atomic_disable(struct drm_plane *plane,
struct meson_drm *priv = meson_plane->priv;
/* Disable OSD1 */
- writel_bits_relaxed(VPP_OSD1_POSTBLEND, 0,
- priv->io_base + _REG(VPP_MISC));
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ writel_bits_relaxed(BIT(0) | BIT(21), 0,
+ priv->io_base + _REG(VIU_OSD1_CTRL_STAT));
+ else
+ writel_bits_relaxed(VPP_OSD1_POSTBLEND, 0,
+ priv->io_base + _REG(VPP_MISC));
meson_plane->enabled = false;
diff --git a/drivers/gpu/drm/meson/meson_registers.h b/drivers/gpu/drm/meson/meson_registers.h
index 5c7e02c703bc..cfaf90501bb1 100644
--- a/drivers/gpu/drm/meson/meson_registers.h
+++ b/drivers/gpu/drm/meson/meson_registers.h
@@ -216,6 +216,29 @@
#define VIU_OSD2_FIFO_CTRL_STAT 0x1a4b
#define VIU_OSD2_TEST_RDDATA 0x1a4c
#define VIU_OSD2_PROT_CTRL 0x1a4e
+#define VIU_OSD2_MALI_UNPACK_CTRL 0x1abd
+#define VIU_OSD2_DIMM_CTRL 0x1acf
+
+#define VIU_OSD3_CTRL_STAT 0x3d80
+#define VIU_OSD3_CTRL_STAT2 0x3d81
+#define VIU_OSD3_COLOR_ADDR 0x3d82
+#define VIU_OSD3_COLOR 0x3d83
+#define VIU_OSD3_TCOLOR_AG0 0x3d84
+#define VIU_OSD3_TCOLOR_AG1 0x3d85
+#define VIU_OSD3_TCOLOR_AG2 0x3d86
+#define VIU_OSD3_TCOLOR_AG3 0x3d87
+#define VIU_OSD3_BLK0_CFG_W0 0x3d88
+#define VIU_OSD3_BLK0_CFG_W1 0x3d8c
+#define VIU_OSD3_BLK0_CFG_W2 0x3d90
+#define VIU_OSD3_BLK0_CFG_W3 0x3d94
+#define VIU_OSD3_BLK0_CFG_W4 0x3d98
+#define VIU_OSD3_BLK1_CFG_W4 0x3d99
+#define VIU_OSD3_BLK2_CFG_W4 0x3d9a
+#define VIU_OSD3_FIFO_CTRL_STAT 0x3d9c
+#define VIU_OSD3_TEST_RDDATA 0x3d9d
+#define VIU_OSD3_PROT_CTRL 0x3d9e
+#define VIU_OSD3_MALI_UNPACK_CTRL 0x3d9f
+#define VIU_OSD3_DIMM_CTRL 0x3da0
#define VD1_IF0_GEN_REG 0x1a50
#define VD1_IF0_CANVAS0 0x1a51
@@ -287,6 +310,27 @@
#define VIU_OSD1_MATRIX_COEF31_32 0x1a9e
#define VIU_OSD1_MATRIX_COEF40_41 0x1a9f
#define VD1_IF0_GEN_REG3 0x1aa7
+
+#define VIU_OSD_BLENDO_H_START_END 0x1aa9
+#define VIU_OSD_BLENDO_V_START_END 0x1aaa
+#define VIU_OSD_BLEND_GEN_CTRL0 0x1aab
+#define VIU_OSD_BLEND_GEN_CTRL1 0x1aac
+#define VIU_OSD_BLEND_DUMMY_DATA 0x1aad
+#define VIU_OSD_BLEND_CURRENT_XY 0x1aae
+
+#define VIU_OSD2_MATRIX_CTRL 0x1ab0
+#define VIU_OSD2_MATRIX_COEF00_01 0x1ab1
+#define VIU_OSD2_MATRIX_COEF02_10 0x1ab2
+#define VIU_OSD2_MATRIX_COEF11_12 0x1ab3
+#define VIU_OSD2_MATRIX_COEF20_21 0x1ab4
+#define VIU_OSD2_MATRIX_COEF22 0x1ab5
+#define VIU_OSD2_MATRIX_OFFSET0_1 0x1ab6
+#define VIU_OSD2_MATRIX_OFFSET2 0x1ab7
+#define VIU_OSD2_MATRIX_PRE_OFFSET0_1 0x1ab8
+#define VIU_OSD2_MATRIX_PRE_OFFSET2 0x1ab9
+#define VIU_OSD2_MATRIX_PROBE_COLOR 0x1aba
+#define VIU_OSD2_MATRIX_HL_COLOR 0x1abb
+#define VIU_OSD2_MATRIX_PROBE_POS 0x1abc
#define VIU_OSD1_EOTF_CTL 0x1ad4
#define VIU_OSD1_EOTF_COEF00_01 0x1ad5
#define VIU_OSD1_EOTF_COEF02_10 0x1ad6
@@ -481,6 +525,82 @@
#define VPP_OSD_SCALE_COEF 0x1dcd
#define VPP_INT_LINE_NUM 0x1dce
+#define VPP_WRAP_OSD1_MATRIX_COEF00_01 0x3d60
+#define VPP_WRAP_OSD1_MATRIX_COEF02_10 0x3d61
+#define VPP_WRAP_OSD1_MATRIX_COEF11_12 0x3d62
+#define VPP_WRAP_OSD1_MATRIX_COEF20_21 0x3d63
+#define VPP_WRAP_OSD1_MATRIX_COEF22 0x3d64
+#define VPP_WRAP_OSD1_MATRIX_COEF13_14 0x3d65
+#define VPP_WRAP_OSD1_MATRIX_COEF23_24 0x3d66
+#define VPP_WRAP_OSD1_MATRIX_COEF15_25 0x3d67
+#define VPP_WRAP_OSD1_MATRIX_CLIP 0x3d68
+#define VPP_WRAP_OSD1_MATRIX_OFFSET0_1 0x3d69
+#define VPP_WRAP_OSD1_MATRIX_OFFSET2 0x3d6a
+#define VPP_WRAP_OSD1_MATRIX_PRE_OFFSET0_1 0x3d6b
+#define VPP_WRAP_OSD1_MATRIX_PRE_OFFSET2 0x3d6c
+#define VPP_WRAP_OSD1_MATRIX_EN_CTRL 0x3d6d
+
+#define VPP_WRAP_OSD2_MATRIX_COEF00_01 0x3d70
+#define VPP_WRAP_OSD2_MATRIX_COEF02_10 0x3d71
+#define VPP_WRAP_OSD2_MATRIX_COEF11_12 0x3d72
+#define VPP_WRAP_OSD2_MATRIX_COEF20_21 0x3d73
+#define VPP_WRAP_OSD2_MATRIX_COEF22 0x3d74
+#define VPP_WRAP_OSD2_MATRIX_COEF13_14 0x3d75
+#define VPP_WRAP_OSD2_MATRIX_COEF23_24 0x3d76
+#define VPP_WRAP_OSD2_MATRIX_COEF15_25 0x3d77
+#define VPP_WRAP_OSD2_MATRIX_CLIP 0x3d78
+#define VPP_WRAP_OSD2_MATRIX_OFFSET0_1 0x3d79
+#define VPP_WRAP_OSD2_MATRIX_OFFSET2 0x3d7a
+#define VPP_WRAP_OSD2_MATRIX_PRE_OFFSET0_1 0x3d7b
+#define VPP_WRAP_OSD2_MATRIX_PRE_OFFSET2 0x3d7c
+#define VPP_WRAP_OSD2_MATRIX_EN_CTRL 0x3d7d
+
+#define VPP_WRAP_OSD3_MATRIX_COEF00_01 0x3db0
+#define VPP_WRAP_OSD3_MATRIX_COEF02_10 0x3db1
+#define VPP_WRAP_OSD3_MATRIX_COEF11_12 0x3db2
+#define VPP_WRAP_OSD3_MATRIX_COEF20_21 0x3db3
+#define VPP_WRAP_OSD3_MATRIX_COEF22 0x3db4
+#define VPP_WRAP_OSD3_MATRIX_COEF13_14 0x3db5
+#define VPP_WRAP_OSD3_MATRIX_COEF23_24 0x3db6
+#define VPP_WRAP_OSD3_MATRIX_COEF15_25 0x3db7
+#define VPP_WRAP_OSD3_MATRIX_CLIP 0x3db8
+#define VPP_WRAP_OSD3_MATRIX_OFFSET0_1 0x3db9
+#define VPP_WRAP_OSD3_MATRIX_OFFSET2 0x3dba
+#define VPP_WRAP_OSD3_MATRIX_PRE_OFFSET0_1 0x3dbb
+#define VPP_WRAP_OSD3_MATRIX_PRE_OFFSET2 0x3dbc
+#define VPP_WRAP_OSD3_MATRIX_EN_CTRL 0x3dbd
+
+/* osd2 scaler */
+#define OSD2_VSC_PHASE_STEP 0x3d00
+#define OSD2_VSC_INI_PHASE 0x3d01
+#define OSD2_VSC_CTRL0 0x3d02
+#define OSD2_HSC_PHASE_STEP 0x3d03
+#define OSD2_HSC_INI_PHASE 0x3d04
+#define OSD2_HSC_CTRL0 0x3d05
+#define OSD2_HSC_INI_PAT_CTRL 0x3d06
+#define OSD2_SC_DUMMY_DATA 0x3d07
+#define OSD2_SC_CTRL0 0x3d08
+#define OSD2_SCI_WH_M1 0x3d09
+#define OSD2_SCO_H_START_END 0x3d0a
+#define OSD2_SCO_V_START_END 0x3d0b
+#define OSD2_SCALE_COEF_IDX 0x3d18
+#define OSD2_SCALE_COEF 0x3d19
+
+/* osd34 scaler */
+#define OSD34_SCALE_COEF_IDX 0x3d1e
+#define OSD34_SCALE_COEF 0x3d1f
+#define OSD34_VSC_PHASE_STEP 0x3d20
+#define OSD34_VSC_INI_PHASE 0x3d21
+#define OSD34_VSC_CTRL0 0x3d22
+#define OSD34_HSC_PHASE_STEP 0x3d23
+#define OSD34_HSC_INI_PHASE 0x3d24
+#define OSD34_HSC_CTRL0 0x3d25
+#define OSD34_HSC_INI_PAT_CTRL 0x3d26
+#define OSD34_SC_DUMMY_DATA 0x3d27
+#define OSD34_SC_CTRL0 0x3d28
+#define OSD34_SCI_WH_M1 0x3d29
+#define OSD34_SCO_H_START_END 0x3d2a
+#define OSD34_SCO_V_START_END 0x3d2b
/* viu2 */
#define VIU2_ADDR_START 0x1e00
#define VIU2_ADDR_END 0x1eff
@@ -1400,4 +1520,131 @@
#define OSDSR_YBIC_VCOEF0 0x3149
#define OSDSR_CBIC_VCOEF0 0x314a
+/* osd afbcd on gxtvbb */
+#define OSD1_AFBCD_ENABLE 0x31a0
+#define OSD1_AFBCD_MODE 0x31a1
+#define OSD1_AFBCD_SIZE_IN 0x31a2
+#define OSD1_AFBCD_HDR_PTR 0x31a3
+#define OSD1_AFBCD_FRAME_PTR 0x31a4
+#define OSD1_AFBCD_CHROMA_PTR 0x31a5
+#define OSD1_AFBCD_CONV_CTRL 0x31a6
+#define OSD1_AFBCD_STATUS 0x31a8
+#define OSD1_AFBCD_PIXEL_HSCOPE 0x31a9
+#define OSD1_AFBCD_PIXEL_VSCOPE 0x31aa
+#define VIU_MISC_CTRL1 0x1a07
+
+/* add for gxm and 962e dv core2 */
+#define DOLBY_CORE2A_SWAP_CTRL1 0x3434
+#define DOLBY_CORE2A_SWAP_CTRL2 0x3435
+
+/* osd afbc on g12a */
+#define VPU_MAFBC_BLOCK_ID 0x3a00
+#define VPU_MAFBC_IRQ_RAW_STATUS 0x3a01
+#define VPU_MAFBC_IRQ_CLEAR 0x3a02
+#define VPU_MAFBC_IRQ_MASK 0x3a03
+#define VPU_MAFBC_IRQ_STATUS 0x3a04
+#define VPU_MAFBC_COMMAND 0x3a05
+#define VPU_MAFBC_STATUS 0x3a06
+#define VPU_MAFBC_SURFACE_CFG 0x3a07
+
+/* osd afbc on g12a */
+#define VPU_MAFBC_HEADER_BUF_ADDR_LOW_S0 0x3a10
+#define VPU_MAFBC_HEADER_BUF_ADDR_HIGH_S0 0x3a11
+#define VPU_MAFBC_FORMAT_SPECIFIER_S0 0x3a12
+#define VPU_MAFBC_BUFFER_WIDTH_S0 0x3a13
+#define VPU_MAFBC_BUFFER_HEIGHT_S0 0x3a14
+#define VPU_MAFBC_BOUNDING_BOX_X_START_S0 0x3a15
+#define VPU_MAFBC_BOUNDING_BOX_X_END_S0 0x3a16
+#define VPU_MAFBC_BOUNDING_BOX_Y_START_S0 0x3a17
+#define VPU_MAFBC_BOUNDING_BOX_Y_END_S0 0x3a18
+#define VPU_MAFBC_OUTPUT_BUF_ADDR_LOW_S0 0x3a19
+#define VPU_MAFBC_OUTPUT_BUF_ADDR_HIGH_S0 0x3a1a
+#define VPU_MAFBC_OUTPUT_BUF_STRIDE_S0 0x3a1b
+#define VPU_MAFBC_PREFETCH_CFG_S0 0x3a1c
+
+#define VPU_MAFBC_HEADER_BUF_ADDR_LOW_S1 0x3a30
+#define VPU_MAFBC_HEADER_BUF_ADDR_HIGH_S1 0x3a31
+#define VPU_MAFBC_FORMAT_SPECIFIER_S1 0x3a32
+#define VPU_MAFBC_BUFFER_WIDTH_S1 0x3a33
+#define VPU_MAFBC_BUFFER_HEIGHT_S1 0x3a34
+#define VPU_MAFBC_BOUNDING_BOX_X_START_S1 0x3a35
+#define VPU_MAFBC_BOUNDING_BOX_X_END_S1 0x3a36
+#define VPU_MAFBC_BOUNDING_BOX_Y_START_S1 0x3a37
+#define VPU_MAFBC_BOUNDING_BOX_Y_END_S1 0x3a38
+#define VPU_MAFBC_OUTPUT_BUF_ADDR_LOW_S1 0x3a39
+#define VPU_MAFBC_OUTPUT_BUF_ADDR_HIGH_S1 0x3a3a
+#define VPU_MAFBC_OUTPUT_BUF_STRIDE_S1 0x3a3b
+#define VPU_MAFBC_PREFETCH_CFG_S1 0x3a3c
+
+#define VPU_MAFBC_HEADER_BUF_ADDR_LOW_S2 0x3a50
+#define VPU_MAFBC_HEADER_BUF_ADDR_HIGH_S2 0x3a51
+#define VPU_MAFBC_FORMAT_SPECIFIER_S2 0x3a52
+#define VPU_MAFBC_BUFFER_WIDTH_S2 0x3a53
+#define VPU_MAFBC_BUFFER_HEIGHT_S2 0x3a54
+#define VPU_MAFBC_BOUNDING_BOX_X_START_S2 0x3a55
+#define VPU_MAFBC_BOUNDING_BOX_X_END_S2 0x3a56
+#define VPU_MAFBC_BOUNDING_BOX_Y_START_S2 0x3a57
+#define VPU_MAFBC_BOUNDING_BOX_Y_END_S2 0x3a58
+#define VPU_MAFBC_OUTPUT_BUF_ADDR_LOW_S2 0x3a59
+#define VPU_MAFBC_OUTPUT_BUF_ADDR_HIGH_S2 0x3a5a
+#define VPU_MAFBC_OUTPUT_BUF_STRIDE_S2 0x3a5b
+#define VPU_MAFBC_PREFETCH_CFG_S2 0x3a5c
+
+#define VPU_MAFBC_HEADER_BUF_ADDR_LOW_S3 0x3a70
+#define VPU_MAFBC_HEADER_BUF_ADDR_HIGH_S3 0x3a71
+#define VPU_MAFBC_FORMAT_SPECIFIER_S3 0x3a72
+#define VPU_MAFBC_BUFFER_WIDTH_S3 0x3a73
+#define VPU_MAFBC_BUFFER_HEIGHT_S3 0x3a74
+#define VPU_MAFBC_BOUNDING_BOX_X_START_S3 0x3a75
+#define VPU_MAFBC_BOUNDING_BOX_X_END_S3 0x3a76
+#define VPU_MAFBC_BOUNDING_BOX_Y_START_S3 0x3a77
+#define VPU_MAFBC_BOUNDING_BOX_Y_END_S3 0x3a78
+#define VPU_MAFBC_OUTPUT_BUF_ADDR_LOW_S3 0x3a79
+#define VPU_MAFBC_OUTPUT_BUF_ADDR_HIGH_S3 0x3a7a
+#define VPU_MAFBC_OUTPUT_BUF_STRIDE_S3 0x3a7b
+#define VPU_MAFBC_PREFETCH_CFG_S3 0x3a7c
+
+#define DOLBY_PATH_CTRL 0x1a0c
+#define OSD_PATH_MISC_CTRL 0x1a0e
+#define MALI_AFBCD_TOP_CTRL 0x1a0f
+
+#define VIU_OSD_BLEND_CTRL 0x39b0
+#define VIU_OSD_BLEND_CTRL1 0x39c0
+#define VIU_OSD_BLEND_DIN0_SCOPE_H 0x39b1
+#define VIU_OSD_BLEND_DIN0_SCOPE_V 0x39b2
+#define VIU_OSD_BLEND_DIN1_SCOPE_H 0x39b3
+#define VIU_OSD_BLEND_DIN1_SCOPE_V 0x39b4
+#define VIU_OSD_BLEND_DIN2_SCOPE_H 0x39b5
+#define VIU_OSD_BLEND_DIN2_SCOPE_V 0x39b6
+#define VIU_OSD_BLEND_DIN3_SCOPE_H 0x39b7
+#define VIU_OSD_BLEND_DIN3_SCOPE_V 0x39b8
+#define VIU_OSD_BLEND_DUMMY_DATA0 0x39b9
+#define VIU_OSD_BLEND_DUMMY_ALPHA 0x39ba
+#define VIU_OSD_BLEND_BLEND0_SIZE 0x39bb
+#define VIU_OSD_BLEND_BLEND1_SIZE 0x39bc
+#define VIU_OSD_BLEND_RO_CURRENT_XY 0x39bf
+
+#define VPP_OUT_H_V_SIZE 0x1da5
+
+#define VPP_VD2_HDR_IN_SIZE 0x1df0
+#define VPP_OSD1_IN_SIZE 0x1df1
+#define VPP_GCLK_CTRL2 0x1df2
+#define VD2_PPS_DUMMY_DATA 0x1df4
+#define VPP_OSD1_BLD_H_SCOPE 0x1df5
+#define VPP_OSD1_BLD_V_SCOPE 0x1df6
+#define VPP_OSD2_BLD_H_SCOPE 0x1df7
+#define VPP_OSD2_BLD_V_SCOPE 0x1df8
+#define VPP_WRBAK_CTRL 0x1df9
+#define VPP_SLEEP_CTRL 0x1dfa
+#define VD1_BLEND_SRC_CTRL 0x1dfb
+#define VD2_BLEND_SRC_CTRL 0x1dfc
+#define OSD1_BLEND_SRC_CTRL 0x1dfd
+#define OSD2_BLEND_SRC_CTRL 0x1dfe
+
+#define VPP_POST_BLEND_BLEND_DUMMY_DATA 0x3968
+#define VPP_POST_BLEND_DUMMY_ALPHA 0x3969
+#define VPP_RDARB_MODE 0x3978
+#define VPP_RDARB_REQEN_SLV 0x3979
+#define VPU_RDARB_MODE_L2C1 0x279d
+
#endif /* __MESON_REGISTERS_H */
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index f6ba35a405f8..b39034745444 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -113,9 +113,12 @@
#define HHI_HDMI_PLL_CNTL4 0x32C /* 0xcb offset in data sheet */
#define HHI_HDMI_PLL_CNTL5 0x330 /* 0xcc offset in data sheet */
#define HHI_HDMI_PLL_CNTL6 0x334 /* 0xcd offset in data sheet */
+#define HHI_HDMI_PLL_CNTL7 0x338 /* 0xce offset in data sheet */
#define HDMI_PLL_RESET BIT(28)
+#define HDMI_PLL_RESET_G12A BIT(29)
#define HDMI_PLL_LOCK BIT(31)
+#define HDMI_PLL_LOCK_G12A (3 << 30)
#define FREQ_1000_1001(_freq) DIV_ROUND_CLOSEST(_freq * 1000, 1001)
@@ -257,6 +260,10 @@ static void meson_venci_cvbs_clock_config(struct meson_drm *priv)
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x71486980);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x00000e55);
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x4800023d);
+
+ /* Poll for lock bit */
+ regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
+ (val & HDMI_PLL_LOCK), 10, 0);
} else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x4000027b);
@@ -271,11 +278,26 @@ static void meson_venci_cvbs_clock_config(struct meson_drm *priv)
HDMI_PLL_RESET, HDMI_PLL_RESET);
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
HDMI_PLL_RESET, 0);
- }
- /* Poll for lock bit */
- regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
- (val & HDMI_PLL_LOCK), 10, 0);
+ /* Poll for lock bit */
+ regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
+ (val & HDMI_PLL_LOCK), 10, 0);
+ } else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x1a0504f7);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, 0x00010000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x00000000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x6a28dc00);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x65771290);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x39272000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL7, 0x56540000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x3a0504f7);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x1a0504f7);
+
+ /* Poll for lock bit */
+ regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
+ ((val & HDMI_PLL_LOCK_G12A) == HDMI_PLL_LOCK_G12A),
+ 10, 0);
+ }
/* Disable VCLK2 */
regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL, VCLK2_EN, 0);
@@ -288,8 +310,13 @@ static void meson_venci_cvbs_clock_config(struct meson_drm *priv)
VCLK2_DIV_MASK, (55 - 1));
/* select vid_pll for vclk2 */
- regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL,
- VCLK2_SEL_MASK, (4 << VCLK2_SEL_SHIFT));
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL,
+ VCLK2_SEL_MASK, (0 << VCLK2_SEL_SHIFT));
+ else
+ regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL,
+ VCLK2_SEL_MASK, (4 << VCLK2_SEL_SHIFT));
+
/* enable vclk2 gate */
regmap_update_bits(priv->hhi, HHI_VIID_CLK_CNTL, VCLK2_EN, VCLK2_EN);
@@ -396,8 +423,8 @@ struct meson_vclk_params {
},
[MESON_VCLK_HDMI_297000] = {
.pixel_freq = 297000,
- .pll_base_freq = 2970000,
- .pll_od1 = 1,
+ .pll_base_freq = 5940000,
+ .pll_od1 = 2,
.pll_od2 = 1,
.pll_od3 = 1,
.vid_pll_div = VID_PLL_DIV_5,
@@ -476,32 +503,80 @@ void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m,
/* Poll for lock bit */
regmap_read_poll_timeout(priv->hhi, HHI_HDMI_PLL_CNTL, val,
(val & HDMI_PLL_LOCK), 10, 0);
+ } else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL, 0x0b3a0400 | m);
+
+ /* Enable and reset */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ 0x3 << 28, 0x3 << 28);
+
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL2, frac);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL3, 0x00000000);
+
+ /* G12A HDMI PLL Needs specific parameters for 5.4GHz */
+ if (m >= 0xf7) {
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0xea68dc00);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x65771290);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x39272000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL7, 0x55540000);
+ } else {
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0x0a691c00);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x33771290);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x39270000);
+ regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL7, 0x50540000);
+ }
+
+ do {
+ /* Reset PLL */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ HDMI_PLL_RESET_G12A, HDMI_PLL_RESET_G12A);
+
+ /* UN-Reset PLL */
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ HDMI_PLL_RESET_G12A, 0);
+
+ /* Poll for lock bits */
+ if (!regmap_read_poll_timeout(priv->hhi,
+ HHI_HDMI_PLL_CNTL, val,
+ ((val & HDMI_PLL_LOCK_G12A)
+ == HDMI_PLL_LOCK_G12A),
+ 10, 100))
+ break;
+ } while(1);
}
if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
3 << 16, pll_od_to_reg(od1) << 16);
else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
3 << 21, pll_od_to_reg(od1) << 21);
+ else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ 3 << 16, pll_od_to_reg(od1) << 16);
if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
3 << 22, pll_od_to_reg(od2) << 22);
else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
3 << 23, pll_od_to_reg(od2) << 23);
+ else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ 3 << 18, pll_od_to_reg(od2) << 18);
if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL2,
3 << 18, pll_od_to_reg(od3) << 18);
else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL3,
3 << 19, pll_od_to_reg(od3) << 19);
-
+ else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ regmap_update_bits(priv->hhi, HHI_HDMI_PLL_CNTL,
+ 3 << 20, pll_od_to_reg(od3) << 20);
}
#define XTAL_FREQ 24000
@@ -518,6 +593,7 @@ static unsigned int meson_hdmi_pll_get_m(struct meson_drm *priv,
#define HDMI_FRAC_MAX_GXBB 4096
#define HDMI_FRAC_MAX_GXL 1024
+#define HDMI_FRAC_MAX_G12A 131072
static unsigned int meson_hdmi_pll_get_frac(struct meson_drm *priv,
unsigned int m,
@@ -534,6 +610,9 @@ static unsigned int meson_hdmi_pll_get_frac(struct meson_drm *priv,
parent_freq *= 2;
}
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ frac_max = HDMI_FRAC_MAX_G12A;
+
/* We can have a perfect match !*/
if (pll_freq / m == parent_freq &&
pll_freq % m == 0)
@@ -559,7 +638,8 @@ static bool meson_hdmi_pll_validate_params(struct meson_drm *priv,
if (frac >= HDMI_FRAC_MAX_GXBB)
return false;
} else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu") ||
+ meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
/* Empiric supported min/max dividers */
if (m < 106 || m > 247)
return false;
@@ -714,6 +794,23 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
}
meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
+ } else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ switch (pll_base_freq) {
+ case 2970000:
+ m = 0x7b;
+ frac = vic_alternate_clock ? 0x140b4 : 0x18000;
+ break;
+ case 4320000:
+ m = vic_alternate_clock ? 0xb3 : 0xb4;
+ frac = vic_alternate_clock ? 0x1a3ee : 0;
+ break;
+ case 5940000:
+ m = 0xf7;
+ frac = vic_alternate_clock ? 0x8148 : 0x10000;
+ break;
+ }
+
+ meson_hdmi_pll_set_params(priv, m, frac, od1, od2, od3);
}
/* Setup vid_pll divider */
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index 66d73a932d19..6faca7313339 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -73,7 +73,9 @@
/* HHI Registers */
#define HHI_GCLK_MPEG2 0x148 /* 0x52 offset in data sheet */
#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
+#define HHI_VDAC_CNTL0_G12A 0x2EC /* 0xbd offset in data sheet */
#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
+#define HHI_VDAC_CNTL1_G12A 0x2F0 /* 0xbe offset in data sheet */
#define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 offset in data sheet */
struct meson_cvbs_enci_mode meson_cvbs_enci_pal = {
@@ -1675,8 +1677,13 @@ void meson_venc_disable_vsync(struct meson_drm *priv)
void meson_venc_init(struct meson_drm *priv)
{
/* Disable CVBS VDAC */
- regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
- regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8);
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0);
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 8);
+ } else {
+ regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8);
+ }
/* Power Down Dacs */
writel_relaxed(0xff, priv->io_base + _REG(VENC_VDAC_SETTING));
diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c
index d622d817b6df..2c5341c881c4 100644
--- a/drivers/gpu/drm/meson/meson_venc_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c
@@ -37,7 +37,9 @@
/* HHI VDAC Registers */
#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
+#define HHI_VDAC_CNTL0_G12A 0x2EC /* 0xbd offset in data sheet */
#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
+#define HHI_VDAC_CNTL1_G12A 0x2F0 /* 0xbe offset in data sheet */
struct meson_venc_cvbs {
struct drm_encoder encoder;
@@ -166,8 +168,13 @@ static void meson_venc_cvbs_encoder_disable(struct drm_encoder *encoder)
struct meson_drm *priv = meson_venc_cvbs->priv;
/* Disable CVBS VDAC */
- regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
- regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8);
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0);
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 0);
+ } else {
+ regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8);
+ }
}
static void meson_venc_cvbs_encoder_enable(struct drm_encoder *encoder)
@@ -179,13 +186,17 @@ static void meson_venc_cvbs_encoder_enable(struct drm_encoder *encoder)
/* VDAC0 source is not from ATV */
writel_bits_relaxed(BIT(5), 0, priv->io_base + _REG(VENC_VDAC_DACSEL0));
- if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu"))
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) {
regmap_write(priv->hhi, HHI_VDAC_CNTL0, 1);
- else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
- meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
+ } else if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
+ meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu")) {
regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0xf0001);
-
- regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
+ } else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ regmap_write(priv->hhi, HHI_VDAC_CNTL0_G12A, 0x906001);
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1_G12A, 0);
+ }
}
static void meson_venc_cvbs_encoder_mode_set(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index e46e05f50bad..b59072342cae 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -25,7 +25,6 @@
#include "meson_viu.h"
#include "meson_vpp.h"
#include "meson_venc.h"
-#include "meson_canvas.h"
#include "meson_registers.h"
/**
@@ -91,8 +90,36 @@ static int eotf_bypass_coeff[EOTF_COEFF_SIZE] = {
EOTF_COEFF_RIGHTSHIFT /* right shift */
};
-void meson_viu_set_osd_matrix(struct meson_drm *priv,
- enum viu_matrix_sel_e m_select,
+static void meson_viu_set_g12a_osd1_matrix(struct meson_drm *priv,
+ int *m, bool csc_on)
+{
+ /* VPP WRAP OSD1 matrix */
+ writel(((m[0] & 0xfff) << 16) | (m[1] & 0xfff),
+ priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_PRE_OFFSET0_1));
+ writel(m[2] & 0xfff,
+ priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_PRE_OFFSET2));
+ writel(((m[3] & 0x1fff) << 16) | (m[4] & 0x1fff),
+ priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF00_01));
+ writel(((m[5] & 0x1fff) << 16) | (m[6] & 0x1fff),
+ priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF02_10));
+ writel(((m[7] & 0x1fff) << 16) | (m[8] & 0x1fff),
+ priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF11_12));
+ writel(((m[9] & 0x1fff) << 16) | (m[10] & 0x1fff),
+ priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF20_21));
+ writel((m[11] & 0x1fff) << 16,
+ priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_COEF22));
+
+ writel(((m[18] & 0xfff) << 16) | (m[19] & 0xfff),
+ priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_OFFSET0_1));
+ writel(m[20] & 0xfff,
+ priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_OFFSET2));
+
+ writel_bits_relaxed(BIT(0), csc_on ? BIT(0) : 0,
+ priv->io_base + _REG(VPP_WRAP_OSD1_MATRIX_EN_CTRL));
+}
+
+static void meson_viu_set_osd_matrix(struct meson_drm *priv,
+ enum viu_matrix_sel_e m_select,
int *m, bool csc_on)
{
if (m_select == VIU_MATRIX_OSD) {
@@ -160,10 +187,10 @@ void meson_viu_set_osd_matrix(struct meson_drm *priv,
#define OSD_EOTF_LUT_SIZE 33
#define OSD_OETF_LUT_SIZE 41
-void meson_viu_set_osd_lut(struct meson_drm *priv, enum viu_lut_sel_e lut_sel,
- unsigned int *r_map, unsigned int *g_map,
- unsigned int *b_map,
- bool csc_on)
+static void
+meson_viu_set_osd_lut(struct meson_drm *priv, enum viu_lut_sel_e lut_sel,
+ unsigned int *r_map, unsigned int *g_map,
+ unsigned int *b_map, bool csc_on)
{
unsigned int addr_port;
unsigned int data_port;
@@ -337,14 +364,24 @@ void meson_viu_init(struct meson_drm *priv)
if (meson_vpu_is_compatible(priv, "amlogic,meson-gxm-vpu") ||
meson_vpu_is_compatible(priv, "amlogic,meson-gxl-vpu"))
meson_viu_load_matrix(priv);
+ else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ meson_viu_set_g12a_osd1_matrix(priv, RGB709_to_YUV709l_coeff,
+ true);
/* Initialize OSD1 fifo control register */
reg = BIT(0) | /* Urgent DDR request priority */
- (4 << 5) | /* hold_fifo_lines */
- (3 << 10) | /* burst length 64 */
- (32 << 12) | /* fifo_depth_val: 32*8=256 */
- (2 << 22) | /* 4 words in 1 burst */
- (2 << 24);
+ (4 << 5); /* hold_fifo_lines */
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ reg |= (1 << 10) | /* burst length 32 */
+ (32 << 12) | /* fifo_depth_val: 32*8=256 */
+ (2 << 22) | /* 4 words in 1 burst */
+ (2 << 24) |
+ (1 << 31);
+ else
+ reg |= (3 << 10) | /* burst length 64 */
+ (32 << 12) | /* fifo_depth_val: 32*8=256 */
+ (2 << 22) | /* 4 words in 1 burst */
+ (2 << 24);
writel_relaxed(reg, priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
writel_relaxed(reg, priv->io_base + _REG(VIU_OSD2_FIFO_CTRL_STAT));
@@ -370,6 +407,30 @@ void meson_viu_init(struct meson_drm *priv)
writel_relaxed(0x00FF00C0,
priv->io_base + _REG(VD2_IF0_LUMA_FIFO_SIZE));
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ writel_relaxed(4 << 29 |
+ 1 << 27 |
+ 1 << 26 | /* blend_din0 input to blend0 */
+ 1 << 25 | /* blend1_dout to blend2 */
+ 1 << 24 | /* blend1_din3 input to blend1 */
+ 1 << 20 |
+ 0 << 16 |
+ 1,
+ priv->io_base + _REG(VIU_OSD_BLEND_CTRL));
+ writel_relaxed(3 << 8 |
+ 1 << 20,
+ priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
+ writel_relaxed(1 << 20,
+ priv->io_base + _REG(OSD2_BLEND_SRC_CTRL));
+ writel_relaxed(0, priv->io_base + _REG(VD1_BLEND_SRC_CTRL));
+ writel_relaxed(0, priv->io_base + _REG(VD2_BLEND_SRC_CTRL));
+ writel_relaxed(0,
+ priv->io_base + _REG(VIU_OSD_BLEND_DUMMY_DATA0));
+ writel_relaxed(0,
+ priv->io_base + _REG(VIU_OSD_BLEND_DUMMY_ALPHA));
+ writel_bits_relaxed(0x3 << 2, 0x3 << 2,
+ priv->io_base + _REG(DOLBY_PATH_CTRL));
+ }
priv->viu.osd1_enabled = false;
priv->viu.osd1_commit = false;
diff --git a/drivers/gpu/drm/meson/meson_vpp.c b/drivers/gpu/drm/meson/meson_vpp.c
index f9efb431e953..8c52a3455ef4 100644
--- a/drivers/gpu/drm/meson/meson_vpp.c
+++ b/drivers/gpu/drm/meson/meson_vpp.c
@@ -112,32 +112,39 @@ void meson_vpp_init(struct meson_drm *priv)
writel_relaxed(0x20000, priv->io_base + _REG(VPP_DOLBY_CTRL));
writel_relaxed(0x1020080,
priv->io_base + _REG(VPP_DUMMY_DATA1));
- }
+ } else if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ writel_relaxed(0xf, priv->io_base + _REG(DOLBY_PATH_CTRL));
/* Initialize vpu fifo control registers */
- writel_relaxed(readl_relaxed(priv->io_base + _REG(VPP_OFIFO_SIZE)) |
- 0x77f, priv->io_base + _REG(VPP_OFIFO_SIZE));
+ if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
+ writel_relaxed(0xfff << 20 | 0x1000,
+ priv->io_base + _REG(VPP_OFIFO_SIZE));
+ else
+ writel_relaxed(readl_relaxed(priv->io_base + _REG(VPP_OFIFO_SIZE)) |
+ 0x77f, priv->io_base + _REG(VPP_OFIFO_SIZE));
writel_relaxed(0x08080808, priv->io_base + _REG(VPP_HOLD_LINES));
- /* Turn off preblend */
- writel_bits_relaxed(VPP_PREBLEND_ENABLE, 0,
- priv->io_base + _REG(VPP_MISC));
-
- /* Turn off POSTBLEND */
- writel_bits_relaxed(VPP_POSTBLEND_ENABLE, 0,
- priv->io_base + _REG(VPP_MISC));
-
- /* Force all planes off */
- writel_bits_relaxed(VPP_OSD1_POSTBLEND | VPP_OSD2_POSTBLEND |
- VPP_VD1_POSTBLEND | VPP_VD2_POSTBLEND |
- VPP_VD1_PREBLEND | VPP_VD2_PREBLEND, 0,
- priv->io_base + _REG(VPP_MISC));
-
- /* Setup default VD settings */
- writel_relaxed(4096,
- priv->io_base + _REG(VPP_PREBLEND_VD1_H_START_END));
- writel_relaxed(4096,
- priv->io_base + _REG(VPP_BLEND_VD2_H_START_END));
+ if (!meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu")) {
+ /* Turn off preblend */
+ writel_bits_relaxed(VPP_PREBLEND_ENABLE, 0,
+ priv->io_base + _REG(VPP_MISC));
+
+ /* Turn off POSTBLEND */
+ writel_bits_relaxed(VPP_POSTBLEND_ENABLE, 0,
+ priv->io_base + _REG(VPP_MISC));
+
+ /* Force all planes off */
+ writel_bits_relaxed(VPP_OSD1_POSTBLEND | VPP_OSD2_POSTBLEND |
+ VPP_VD1_POSTBLEND | VPP_VD2_POSTBLEND |
+ VPP_VD1_PREBLEND | VPP_VD2_PREBLEND, 0,
+ priv->io_base + _REG(VPP_MISC));
+
+ /* Setup default VD settings */
+ writel_relaxed(4096,
+ priv->io_base + _REG(VPP_PREBLEND_VD1_H_START_END));
+ writel_relaxed(4096,
+ priv->io_base + _REG(VPP_BLEND_VD2_H_START_END));
+ }
/* Disable Scalers */
writel_relaxed(0, priv->io_base + _REG(VPP_OSD_SC_CTRL0));
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 0aaedc554879..8c31e4422cae 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -113,7 +113,7 @@ struct mga_framebuffer {
};
struct mga_fbdev {
- struct drm_fb_helper helper;
+ struct drm_fb_helper helper; /* must be first */
struct mga_framebuffer mfb;
void *sysram;
int size;
@@ -269,7 +269,6 @@ mgag200_dumb_mmap_offset(struct drm_file *file,
struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev);
void mgag200_i2c_destroy(struct mga_i2c_chan *i2c);
-#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
void mgag200_ttm_placement(struct mgag200_bo *bo, int domain);
static inline int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait)
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 6893934b26c0..5b7e64cac004 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -195,8 +195,6 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
goto err_alloc_fbi;
}
- info->par = mfbdev;
-
ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj);
if (ret)
goto err_alloc_fbi;
@@ -209,17 +207,13 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
/* setup helper */
mfbdev->helper.fb = fb;
- strcpy(info->fix.id, "mgadrmfb");
-
info->fbops = &mgag200fb_ops;
/* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = mdev->dev->mode_config.fb_base;
info->apertures->ranges[0].size = mdev->mc.vram_size;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(info, &mfbdev->helper, sizes->fb_width,
- sizes->fb_height);
+ drm_fb_helper_fill_info(info, &mfbdev->helper, sizes);
info->screen_base = sysram;
info->screen_size = size;
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index d96a9b32455e..bd42365a8aa8 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -178,7 +178,6 @@ int mgag200_mm_init(struct mga_device *mdev)
ret = ttm_bo_device_init(&mdev->ttm.bdev,
&mgag200_bo_driver,
dev->anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET,
true);
if (ret) {
DRM_ERROR("Error initialising bo driver; %d\n", ret);
@@ -345,13 +344,8 @@ int mgag200_bo_push_sysram(struct mgag200_bo *bo)
int mgag200_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct drm_file *file_priv;
- struct mga_device *mdev;
+ struct drm_file *file_priv = filp->private_data;
+ struct mga_device *mdev = file_priv->minor->dev->dev_private;
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
- return -EINVAL;
-
- file_priv = filp->private_data;
- mdev = file_priv->minor->dev->dev_private;
return ttm_bo_mmap(filp, vma, &mdev->ttm.bdev);
}
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 78c9e5a5e793..9f2029eca39f 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -21,6 +21,11 @@ config DRM_MSM
help
DRM/KMS driver for MSM/snapdragon.
+config DRM_MSM_GPU_STATE
+ bool
+ depends on DRM_MSM && (DEBUG_FS || DEV_COREDUMP)
+ default y
+
config DRM_MSM_REGISTER_LOGGING
bool "MSM DRM register logging"
depends on DRM_MSM
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 56a70c74af4e..7a05cbf2f820 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
-ccflags-y := -Idrivers/gpu/drm/msm
-ccflags-y += -Idrivers/gpu/drm/msm/disp/dpu1
-ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
+ccflags-y := -I $(srctree)/$(src)
+ccflags-y += -I $(srctree)/$(src)/disp/dpu1
+ccflags-$(CONFIG_DRM_MSM_DSI) += -I $(srctree)/$(src)/dsi
msm-y := \
adreno/adreno_device.o \
@@ -15,7 +15,6 @@ msm-y := \
adreno/a6xx_gpu.o \
adreno/a6xx_gmu.o \
adreno/a6xx_hfi.o \
- adreno/a6xx_gpu_state.o \
hdmi/hdmi.o \
hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
@@ -96,6 +95,8 @@ msm-y := \
msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o
+msm-$(CONFIG_DRM_MSM_GPU_STATE) += adreno/a6xx_gpu_state.o
+
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index d5f5e56422f5..e5fcefa49f19 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -15,9 +15,6 @@
#include <linux/types.h>
#include <linux/cpumask.h>
#include <linux/qcom_scm.h>
-#include <linux/dma-mapping.h>
-#include <linux/of_address.h>
-#include <linux/soc/qcom/mdt_loader.h>
#include <linux/pm_opp.h>
#include <linux/nvmem-consumer.h>
#include <linux/slab.h>
@@ -30,94 +27,6 @@ static void a5xx_dump(struct msm_gpu *gpu);
#define GPU_PAS_ID 13
-static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname)
-{
- struct device *dev = &gpu->pdev->dev;
- const struct firmware *fw;
- struct device_node *np;
- struct resource r;
- phys_addr_t mem_phys;
- ssize_t mem_size;
- void *mem_region = NULL;
- int ret;
-
- if (!IS_ENABLED(CONFIG_ARCH_QCOM))
- return -EINVAL;
-
- np = of_get_child_by_name(dev->of_node, "zap-shader");
- if (!np)
- return -ENODEV;
-
- np = of_parse_phandle(np, "memory-region", 0);
- if (!np)
- return -EINVAL;
-
- ret = of_address_to_resource(np, 0, &r);
- if (ret)
- return ret;
-
- mem_phys = r.start;
- mem_size = resource_size(&r);
-
- /* Request the MDT file for the firmware */
- fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
- if (IS_ERR(fw)) {
- DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname);
- return PTR_ERR(fw);
- }
-
- /* Figure out how much memory we need */
- mem_size = qcom_mdt_get_size(fw);
- if (mem_size < 0) {
- ret = mem_size;
- goto out;
- }
-
- /* Allocate memory for the firmware image */
- mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC);
- if (!mem_region) {
- ret = -ENOMEM;
- goto out;
- }
-
- /*
- * Load the rest of the MDT
- *
- * Note that we could be dealing with two different paths, since
- * with upstream linux-firmware it would be in a qcom/ subdir..
- * adreno_request_fw() handles this, but qcom_mdt_load() does
- * not. But since we've already gotten thru adreno_request_fw()
- * we know which of the two cases it is:
- */
- if (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY) {
- ret = qcom_mdt_load(dev, fw, fwname, GPU_PAS_ID,
- mem_region, mem_phys, mem_size, NULL);
- } else {
- char *newname;
-
- newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
-
- ret = qcom_mdt_load(dev, fw, newname, GPU_PAS_ID,
- mem_region, mem_phys, mem_size, NULL);
- kfree(newname);
- }
- if (ret)
- goto out;
-
- /* Send the image to the secure world */
- ret = qcom_scm_pas_auth_and_reset(GPU_PAS_ID);
- if (ret)
- DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
-
-out:
- if (mem_region)
- memunmap(mem_region);
-
- release_firmware(fw);
-
- return ret;
-}
-
static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -563,8 +472,6 @@ static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
static int a5xx_zap_shader_init(struct msm_gpu *gpu)
{
static bool loaded;
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- struct platform_device *pdev = gpu->pdev;
int ret;
/*
@@ -574,23 +481,9 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
if (loaded)
return a5xx_zap_shader_resume(gpu);
- /* We need SCM to be able to load the firmware */
- if (!qcom_scm_is_available()) {
- DRM_DEV_ERROR(&pdev->dev, "SCM is not available\n");
- return -EPROBE_DEFER;
- }
-
- /* Each GPU has a target specific zap shader firmware name to use */
- if (!adreno_gpu->info->zapfw) {
- DRM_DEV_ERROR(&pdev->dev,
- "Zap shader firmware file not specified for this target\n");
- return -ENODEV;
- }
-
- ret = zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw);
+ ret = adreno_zap_shader_load(gpu, GPU_PAS_ID);
loaded = !ret;
-
return ret;
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index d1662a75c7ec..9155dafae2a9 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -3,12 +3,31 @@
#include <linux/clk.h>
#include <linux/interconnect.h>
+#include <linux/pm_domain.h>
#include <linux/pm_opp.h>
#include <soc/qcom/cmd-db.h>
#include "a6xx_gpu.h"
#include "a6xx_gmu.xml.h"
+static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ /* FIXME: add a banner here */
+ gmu->hung = true;
+
+ /* Turn off the hangcheck timer while we are resetting */
+ del_timer(&gpu->hangcheck_timer);
+
+ /* Queue the GPU handler because we need to treat this as a recovery */
+ queue_work(priv->wq, &gpu->recover_work);
+}
+
static irqreturn_t a6xx_gmu_irq(int irq, void *data)
{
struct a6xx_gmu *gmu = data;
@@ -20,8 +39,7 @@ static irqreturn_t a6xx_gmu_irq(int irq, void *data)
if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) {
dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n");
- /* Temporary until we can recover safely */
- BUG();
+ a6xx_gmu_fault(gmu);
}
if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR)
@@ -45,8 +63,7 @@ static irqreturn_t a6xx_hfi_irq(int irq, void *data)
if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");
- /* Temporary until we can recover safely */
- BUG();
+ a6xx_gmu_fault(gmu);
}
return IRQ_HANDLED;
@@ -165,10 +182,8 @@ static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
}
/* Wait for the GMU to get to its most idle state */
-int a6xx_gmu_wait_for_idle(struct a6xx_gpu *a6xx_gpu)
+int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu)
{
- struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
-
return spin_until(a6xx_gmu_check_idle_level(gmu));
}
@@ -567,7 +582,7 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
if (!rpmh_init) {
a6xx_gmu_rpmh_init(gmu);
rpmh_init = true;
- } else if (state != GMU_RESET) {
+ } else {
ret = a6xx_rpmh_start(gmu);
if (ret)
return ret;
@@ -633,20 +648,6 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \
A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
-static void a6xx_gmu_irq_enable(struct a6xx_gmu *gmu)
-{
- gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
- gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
-
- gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK,
- ~A6XX_GMU_IRQ_MASK);
- gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK,
- ~A6XX_HFI_IRQ_MASK);
-
- enable_irq(gmu->gmu_irq);
- enable_irq(gmu->hfi_irq);
-}
-
static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
{
disable_irq(gmu->gmu_irq);
@@ -656,21 +657,10 @@ static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0);
}
-int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu)
+static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
{
- struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
- int ret;
u32 val;
- /* Flush all the queues */
- a6xx_hfi_stop(gmu);
-
- /* Stop the interrupts */
- a6xx_gmu_irq_disable(gmu);
-
- /* Force off SPTP in case the GMU is managing it */
- a6xx_sptprac_disable(gmu);
-
/* Make sure there are no outstanding RPMh votes */
gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
(val & 1), 100, 10000);
@@ -680,37 +670,22 @@ int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu)
(val & 1), 100, 10000);
gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
(val & 1), 100, 1000);
+}
- /* Force off the GX GSDC */
- regulator_force_disable(gmu->gx);
-
- /* Disable the resources */
- clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
- pm_runtime_put_sync(gmu->dev);
-
- /* Re-enable the resources */
- pm_runtime_get_sync(gmu->dev);
-
- /* Use a known rate to bring up the GMU */
- clk_set_rate(gmu->core_clk, 200000000);
- ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
- if (ret)
- goto out;
-
- a6xx_gmu_irq_enable(gmu);
-
- ret = a6xx_gmu_fw_start(gmu, GMU_RESET);
- if (!ret)
- ret = a6xx_hfi_start(gmu, GMU_COLD_BOOT);
+/* Force the GMU off in case it isn't responsive */
+static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
+{
+ /* Flush all the queues */
+ a6xx_hfi_stop(gmu);
- /* Set the GPU back to the highest power frequency */
- __a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
+ /* Stop the interrupts */
+ a6xx_gmu_irq_disable(gmu);
-out:
- if (ret)
- a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+ /* Force off SPTP in case the GMU is managing it */
+ a6xx_sptprac_disable(gmu);
- return ret;
+ /* Make sure there are no outstanding RPMh votes */
+ a6xx_gmu_rpmh_off(gmu);
}
int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
@@ -723,19 +698,26 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
if (WARN(!gmu->mmio, "The GMU is not set up yet\n"))
return 0;
+ gmu->hung = false;
+
/* Turn on the resources */
pm_runtime_get_sync(gmu->dev);
/* Use a known rate to bring up the GMU */
clk_set_rate(gmu->core_clk, 200000000);
ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
- if (ret)
- goto out;
+ if (ret) {
+ pm_runtime_put(gmu->dev);
+ return ret;
+ }
/* Set the bus quota to a reasonable value for boot */
icc_set_bw(gpu->icc_path, 0, MBps_to_icc(3072));
- a6xx_gmu_irq_enable(gmu);
+ /* Enable the GMU interrupt */
+ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
+ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~A6XX_GMU_IRQ_MASK);
+ enable_irq(gmu->gmu_irq);
/* Check to see if we are doing a cold or warm boot */
status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
@@ -746,14 +728,35 @@ int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
goto out;
ret = a6xx_hfi_start(gmu, status);
+ if (ret)
+ goto out;
+
+ /*
+ * Turn on the GMU firmware fault interrupt after we know the boot
+ * sequence is successful
+ */
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~A6XX_HFI_IRQ_MASK);
+ enable_irq(gmu->hfi_irq);
/* Set the GPU to the highest power frequency */
__a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
+ /*
+ * "enable" the GX power domain which won't actually do anything but it
+ * will make sure that the refcounting is correct in case we need to
+ * bring down the GX after a GMU failure
+ */
+ if (!IS_ERR(gmu->gxpd))
+ pm_runtime_get(gmu->gxpd);
+
out:
- /* Make sure to turn off the boot OOB request on error */
- if (ret)
- a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+ /* On failure, shut down the GMU to leave it in a good state */
+ if (ret) {
+ disable_irq(gmu->gmu_irq);
+ a6xx_rpmh_stop(gmu);
+ pm_runtime_put(gmu->dev);
+ }
return ret;
}
@@ -773,11 +776,12 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
return true;
}
-int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
+/* Gracefully try to shut down the GMU and by extension the GPU */
+static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
- struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
u32 val;
/*
@@ -787,10 +791,19 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
if (val != 0xf) {
- int ret = a6xx_gmu_wait_for_idle(a6xx_gpu);
+ int ret = a6xx_gmu_wait_for_idle(gmu);
- /* Temporary until we can recover safely */
- BUG_ON(ret);
+ /* If the GMU isn't responding assume it is hung */
+ if (ret) {
+ a6xx_gmu_force_off(gmu);
+ return;
+ }
+
+ /* Clear the VBIF pipe before shutting down */
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
+ spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 0xf)
+ == 0xf);
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
/* tell the GMU we want to slumber */
a6xx_gmu_notify_slumber(gmu);
@@ -822,10 +835,37 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
/* Tell RPMh to power off the GPU */
a6xx_rpmh_stop(gmu);
+}
+
+
+int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
+{
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ struct msm_gpu *gpu = &a6xx_gpu->base.base;
+
+ if (!pm_runtime_active(gmu->dev))
+ return 0;
+
+ /*
+ * Force the GMU off if we detected a hang, otherwise try to shut it
+ * down gracefully
+ */
+ if (gmu->hung)
+ a6xx_gmu_force_off(gmu);
+ else
+ a6xx_gmu_shutdown(gmu);
/* Remove the bus vote */
icc_set_bw(gpu->icc_path, 0, 0);
+ /*
+ * Make sure the GX domain is off before turning off the GMU (CX)
+ * domain. Usually the GMU does this but only if the shutdown sequence
+ * was successful
+ */
+ if (!IS_ERR(gmu->gxpd))
+ pm_runtime_put_sync(gmu->gxpd);
+
clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
pm_runtime_put_sync(gmu->dev);
@@ -948,25 +988,20 @@ static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
}
/* Return the 'arc-level' for the given frequency */
-static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq)
+static unsigned int a6xx_gmu_get_arc_level(struct device *dev,
+ unsigned long freq)
{
struct dev_pm_opp *opp;
- struct device_node *np;
- u32 val = 0;
+ unsigned int val;
if (!freq)
return 0;
- opp = dev_pm_opp_find_freq_exact(dev, freq, true);
+ opp = dev_pm_opp_find_freq_exact(dev, freq, true);
if (IS_ERR(opp))
return 0;
- np = dev_pm_opp_get_of_node(opp);
-
- if (np) {
- of_property_read_u32(np, "opp-level", &val);
- of_node_put(np);
- }
+ val = dev_pm_opp_get_level(opp);
dev_pm_opp_put(opp);
@@ -1002,7 +1037,7 @@ static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
/* Construct a vote for each frequency */
for (i = 0; i < freqs_count; i++) {
u8 pindex = 0, sindex = 0;
- u32 level = a6xx_gmu_get_arc_level(dev, freqs[i]);
+ unsigned int level = a6xx_gmu_get_arc_level(dev, freqs[i]);
/* Get the primary index that matches the arc level */
for (j = 0; j < pri_count; j++) {
@@ -1195,9 +1230,15 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
if (IS_ERR_OR_NULL(gmu->mmio))
return;
- pm_runtime_disable(gmu->dev);
a6xx_gmu_stop(a6xx_gpu);
+ pm_runtime_disable(gmu->dev);
+
+ if (!IS_ERR(gmu->gxpd)) {
+ pm_runtime_disable(gmu->gxpd);
+ dev_pm_domain_detach(gmu->gxpd, false);
+ }
+
a6xx_gmu_irq_disable(gmu);
a6xx_gmu_memory_free(gmu, gmu->hfi);
@@ -1223,7 +1264,6 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
pm_runtime_enable(gmu->dev);
- gmu->gx = devm_regulator_get(gmu->dev, "vdd");
/* Get the list of clocks */
ret = a6xx_gmu_clocks_probe(gmu);
@@ -1257,6 +1297,12 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
goto err;
+ /*
+ * Get a link to the GX power domain to reset the GPU in case of GMU
+ * crash
+ */
+ gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx");
+
/* Get the power levels for the GMU and GPU */
a6xx_gmu_pwrlevels_probe(gmu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
index c721d9165d8e..bedd8e6a63aa 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -27,9 +27,6 @@ struct a6xx_gmu_bo {
/* the GMU is coming up for the first time or back from a power collapse */
#define GMU_COLD_BOOT 1
-/* The GMU is being soft reset after a fault */
-#define GMU_RESET 2
-
/*
* These define the level of control that the GMU has - the higher the number
* the more things that the GMU hardware controls on its own.
@@ -52,11 +49,11 @@ struct a6xx_gmu {
int hfi_irq;
int gmu_irq;
- struct regulator *gx;
-
struct iommu_domain *domain;
u64 uncached_iova_base;
+ struct device *gxpd;
+
int idle_level;
struct a6xx_gmu_bo *hfi;
@@ -78,7 +75,7 @@ struct a6xx_gmu {
struct a6xx_hfi_queue queues[2];
- struct tasklet_struct hfi_tasklet;
+ bool hung;
};
static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index fefe773c989e..e74dce474250 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -10,6 +10,8 @@
#include <linux/devfreq.h>
+#define GPU_PAS_ID 13
+
static inline bool _a6xx_check_idle(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -343,6 +345,20 @@ static int a6xx_ucode_init(struct msm_gpu *gpu)
return 0;
}
+static int a6xx_zap_shader_init(struct msm_gpu *gpu)
+{
+ static bool loaded;
+ int ret;
+
+ if (loaded)
+ return 0;
+
+ ret = adreno_zap_shader_load(gpu, GPU_PAS_ID);
+
+ loaded = !ret;
+ return ret;
+}
+
#define A6XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
@@ -491,7 +507,28 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
if (ret)
goto out;
- gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+ /*
+ * Try to load a zap shader into the secure world. If successful
+ * we can use the CP to switch out of secure mode. If not then we
+ * have no resource but to try to switch ourselves out manually. If we
+ * guessed wrong then access to the RBBM_SECVID_TRUST_CNTL register will
+ * be blocked and a permissions violation will soon follow.
+ */
+ ret = a6xx_zap_shader_init(gpu);
+ if (!ret) {
+ OUT_PKT7(gpu->rb[0], CP_SET_SECURE_MODE, 1);
+ OUT_RING(gpu->rb[0], 0x00000000);
+
+ a6xx_flush(gpu, gpu->rb[0]);
+ if (!a6xx_idle(gpu, gpu->rb[0]))
+ return -EINVAL;
+ } else {
+ /* Print a warning so if we die, we know why */
+ dev_warn_once(gpu->dev->dev,
+ "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+ ret = 0;
+ }
out:
/*
@@ -678,13 +715,15 @@ static int a6xx_pm_resume(struct msm_gpu *gpu)
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
int ret;
- ret = a6xx_gmu_resume(a6xx_gpu);
-
gpu->needs_hw_init = true;
+ ret = a6xx_gmu_resume(a6xx_gpu);
+ if (ret)
+ return ret;
+
msm_gpu_resume_devfreq(gpu);
- return ret;
+ return 0;
}
static int a6xx_pm_suspend(struct msm_gpu *gpu)
@@ -694,18 +733,6 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
devfreq_suspend_device(gpu->devfreq.devfreq);
- /*
- * Make sure the GMU is idle before continuing (because some transitions
- * may use VBIF
- */
- a6xx_gmu_wait_for_idle(a6xx_gpu);
-
- /* Clear the VBIF pipe before shutting down */
- /* FIXME: This accesses the GPU - do we need to make sure it is on? */
- gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
- spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 0xf) == 0xf);
- gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
-
return a6xx_gmu_stop(a6xx_gpu);
}
@@ -781,14 +808,16 @@ static const struct adreno_gpu_funcs funcs = {
.active_ring = a6xx_active_ring,
.irq = a6xx_irq,
.destroy = a6xx_destroy,
-#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+#if defined(CONFIG_DRM_MSM_GPU_STATE)
.show = a6xx_show,
#endif
.gpu_busy = a6xx_gpu_busy,
.gpu_get_freq = a6xx_gmu_get_freq,
.gpu_set_freq = a6xx_gmu_set_freq,
+#if defined(CONFIG_DRM_MSM_GPU_STATE)
.gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put,
+#endif
},
.get_timestamp = a6xx_get_timestamp,
};
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 528a4cfe07cd..b46279eb18c5 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -46,9 +46,8 @@ struct a6xx_gpu {
int a6xx_gmu_resume(struct a6xx_gpu *gpu);
int a6xx_gmu_stop(struct a6xx_gpu *gpu);
-int a6xx_gmu_wait_for_idle(struct a6xx_gpu *gpu);
+int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu);
-int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu);
bool a6xx_gmu_isidle(struct a6xx_gmu *gmu);
int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 714ed6505e47..b907245d3d96 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -155,6 +155,7 @@ static const struct adreno_info gpulist[] = {
.gmem = SZ_1M,
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a6xx_gpu_init,
+ .zapfw = "a630_zap.mdt",
},
};
@@ -229,6 +230,7 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
+ pm_runtime_put_sync(&pdev->dev);
DRM_DEV_ERROR(dev->dev, "Couldn't power up the GPU: %d\n", ret);
return NULL;
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 27898475cdf4..6f7f4114afcf 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -19,13 +19,148 @@
#include <linux/ascii85.h>
#include <linux/interconnect.h>
+#include <linux/qcom_scm.h>
#include <linux/kernel.h>
+#include <linux/of_address.h>
#include <linux/pm_opp.h>
#include <linux/slab.h>
+#include <linux/soc/qcom/mdt_loader.h>
#include "adreno_gpu.h"
#include "msm_gem.h"
#include "msm_mmu.h"
+static bool zap_available = true;
+
+static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
+ u32 pasid)
+{
+ struct device *dev = &gpu->pdev->dev;
+ const struct firmware *fw;
+ struct device_node *np, *mem_np;
+ struct resource r;
+ phys_addr_t mem_phys;
+ ssize_t mem_size;
+ void *mem_region = NULL;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_ARCH_QCOM)) {
+ zap_available = false;
+ return -EINVAL;
+ }
+
+ np = of_get_child_by_name(dev->of_node, "zap-shader");
+ if (!np) {
+ zap_available = false;
+ return -ENODEV;
+ }
+
+ mem_np = of_parse_phandle(np, "memory-region", 0);
+ of_node_put(np);
+ if (!mem_np) {
+ zap_available = false;
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(mem_np, 0, &r);
+ of_node_put(mem_np);
+ if (ret)
+ return ret;
+
+ mem_phys = r.start;
+ mem_size = resource_size(&r);
+
+ /* Request the MDT file for the firmware */
+ fw = adreno_request_fw(to_adreno_gpu(gpu), fwname);
+ if (IS_ERR(fw)) {
+ DRM_DEV_ERROR(dev, "Unable to load %s\n", fwname);
+ return PTR_ERR(fw);
+ }
+
+ /* Figure out how much memory we need */
+ mem_size = qcom_mdt_get_size(fw);
+ if (mem_size < 0) {
+ ret = mem_size;
+ goto out;
+ }
+
+ /* Allocate memory for the firmware image */
+ mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC);
+ if (!mem_region) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * Load the rest of the MDT
+ *
+ * Note that we could be dealing with two different paths, since
+ * with upstream linux-firmware it would be in a qcom/ subdir..
+ * adreno_request_fw() handles this, but qcom_mdt_load() does
+ * not. But since we've already gotten through adreno_request_fw()
+ * we know which of the two cases it is:
+ */
+ if (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY) {
+ ret = qcom_mdt_load(dev, fw, fwname, pasid,
+ mem_region, mem_phys, mem_size, NULL);
+ } else {
+ char *newname;
+
+ newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
+
+ ret = qcom_mdt_load(dev, fw, newname, pasid,
+ mem_region, mem_phys, mem_size, NULL);
+ kfree(newname);
+ }
+ if (ret)
+ goto out;
+
+ /* Send the image to the secure world */
+ ret = qcom_scm_pas_auth_and_reset(pasid);
+
+ /*
+ * If the scm call returns -EOPNOTSUPP we assume that this target
+ * doesn't need/support the zap shader so quietly fail
+ */
+ if (ret == -EOPNOTSUPP)
+ zap_available = false;
+ else if (ret)
+ DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
+
+out:
+ if (mem_region)
+ memunmap(mem_region);
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct platform_device *pdev = gpu->pdev;
+
+ /* Short cut if we determine the zap shader isn't available/needed */
+ if (!zap_available)
+ return -ENODEV;
+
+ /* We need SCM to be able to load the firmware */
+ if (!qcom_scm_is_available()) {
+ DRM_DEV_ERROR(&pdev->dev, "SCM is not available\n");
+ return -EPROBE_DEFER;
+ }
+
+ /* Each GPU has a target specific zap shader firmware name to use */
+ if (!adreno_gpu->info->zapfw) {
+ zap_available = false;
+ DRM_DEV_ERROR(&pdev->dev,
+ "Zap shader firmware file not specified for this target\n");
+ return -ENODEV;
+ }
+
+ return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid);
+}
+
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -63,6 +198,12 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
case MSM_PARAM_NR_RINGS:
*value = gpu->nr_rings;
return 0;
+ case MSM_PARAM_PP_PGTABLE:
+ *value = 0;
+ return 0;
+ case MSM_PARAM_FAULTS:
+ *value = gpu->global_faults;
+ return 0;
default:
DBG("%s: invalid param: %u", gpu->name, param);
return -EINVAL;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 5db459bc28a7..0925606ec9b5 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -252,6 +252,12 @@ void adreno_gpu_state_destroy(struct msm_gpu_state *state);
int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state);
int adreno_gpu_state_put(struct msm_gpu_state *state);
+/*
+ * For a5xx and a6xx targets load the zap shader that is used to pull the GPU
+ * out of secure mode
+ */
+int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid);
+
/* ringbuffer helpers (the parts that are adreno specific) */
static inline void
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index b776fca571f3..dfdfa766da8f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -46,6 +46,9 @@
#define LEFT_MIXER 0
#define RIGHT_MIXER 1
+/* timeout in ms waiting for frame done */
+#define DPU_CRTC_FRAME_DONE_TIMEOUT_MS 60
+
static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
{
struct msm_drm_private *priv = crtc->dev->dev_private;
@@ -425,65 +428,6 @@ void dpu_crtc_complete_commit(struct drm_crtc *crtc,
trace_dpu_crtc_complete_commit(DRMID(crtc));
}
-static void _dpu_crtc_setup_mixer_for_encoder(
- struct drm_crtc *crtc,
- struct drm_encoder *enc)
-{
- struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
- struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
- struct dpu_rm *rm = &dpu_kms->rm;
- struct dpu_crtc_mixer *mixer;
- struct dpu_hw_ctl *last_valid_ctl = NULL;
- int i;
- struct dpu_rm_hw_iter lm_iter, ctl_iter;
-
- dpu_rm_init_hw_iter(&lm_iter, enc->base.id, DPU_HW_BLK_LM);
- dpu_rm_init_hw_iter(&ctl_iter, enc->base.id, DPU_HW_BLK_CTL);
-
- /* Set up all the mixers and ctls reserved by this encoder */
- for (i = cstate->num_mixers; i < ARRAY_SIZE(cstate->mixers); i++) {
- mixer = &cstate->mixers[i];
-
- if (!dpu_rm_get_hw(rm, &lm_iter))
- break;
- mixer->hw_lm = (struct dpu_hw_mixer *)lm_iter.hw;
-
- /* CTL may be <= LMs, if <, multiple LMs controlled by 1 CTL */
- if (!dpu_rm_get_hw(rm, &ctl_iter)) {
- DPU_DEBUG("no ctl assigned to lm %d, using previous\n",
- mixer->hw_lm->idx - LM_0);
- mixer->lm_ctl = last_valid_ctl;
- } else {
- mixer->lm_ctl = (struct dpu_hw_ctl *)ctl_iter.hw;
- last_valid_ctl = mixer->lm_ctl;
- }
-
- /* Shouldn't happen, mixers are always >= ctls */
- if (!mixer->lm_ctl) {
- DPU_ERROR("no valid ctls found for lm %d\n",
- mixer->hw_lm->idx - LM_0);
- return;
- }
-
- cstate->num_mixers++;
- DPU_DEBUG("setup mixer %d: lm %d\n",
- i, mixer->hw_lm->idx - LM_0);
- DPU_DEBUG("setup mixer %d: ctl %d\n",
- i, mixer->lm_ctl->idx - CTL_0);
- }
-}
-
-static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc)
-{
- struct drm_encoder *enc;
-
- WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
-
- /* Check for mixers on all encoders attached to this crtc */
- drm_for_each_encoder_mask(enc, crtc->dev, crtc->state->encoder_mask)
- _dpu_crtc_setup_mixer_for_encoder(crtc, enc);
-}
-
static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
@@ -533,10 +477,7 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
dev = crtc->dev;
smmu_state = &dpu_crtc->smmu_state;
- if (!cstate->num_mixers) {
- _dpu_crtc_setup_mixers(crtc);
- _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
- }
+ _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
if (dpu_crtc->event) {
WARN_ON(dpu_crtc->event);
@@ -683,7 +624,7 @@ static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
DPU_ATRACE_BEGIN("frame done completion wait");
ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
- msecs_to_jiffies(DPU_FRAME_DONE_TIMEOUT));
+ msecs_to_jiffies(DPU_CRTC_FRAME_DONE_TIMEOUT_MS));
if (!ret) {
DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
rc = -ETIMEDOUT;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 5aa3307f3f0c..82bf16d61a45 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -69,6 +69,9 @@
#define MAX_VDISPLAY_SPLIT 1080
+/* timeout in frames waiting for frame done */
+#define DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES 5
+
/**
* enum dpu_enc_rc_events - events for resource control state machine
* @DPU_ENC_RC_EVENT_KICKOFF:
@@ -158,7 +161,7 @@ enum dpu_enc_rc_states {
* Bit0 = phys_encs[0] etc.
* @crtc_frame_event_cb: callback handler for frame event
* @crtc_frame_event_cb_data: callback handler private data
- * @frame_done_timeout: frame done timeout in Hz
+ * @frame_done_timeout_ms: frame done timeout in ms
* @frame_done_timer: watchdog timer for frame done event
* @vsync_event_timer: vsync timer
* @disp_info: local copy of msm_display_info struct
@@ -196,7 +199,7 @@ struct dpu_encoder_virt {
void (*crtc_frame_event_cb)(void *, u32 event);
void *crtc_frame_event_cb_data;
- atomic_t frame_done_timeout;
+ atomic_t frame_done_timeout_ms;
struct timer_list frame_done_timer;
struct timer_list vsync_event_timer;
@@ -520,8 +523,8 @@ static void _dpu_encoder_adjust_mode(struct drm_connector *connector,
list_for_each_entry(cur_mode, &connector->modes, head) {
if (cur_mode->vdisplay == adj_mode->vdisplay &&
- cur_mode->hdisplay == adj_mode->hdisplay &&
- cur_mode->vrefresh == adj_mode->vrefresh) {
+ cur_mode->hdisplay == adj_mode->hdisplay &&
+ drm_mode_vrefresh(cur_mode) == drm_mode_vrefresh(adj_mode)) {
adj_mode->private = cur_mode->private;
adj_mode->private_flags |= cur_mode->private_flags;
}
@@ -959,10 +962,14 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
struct dpu_kms *dpu_kms;
struct list_head *connector_list;
struct drm_connector *conn = NULL, *conn_iter;
- struct dpu_rm_hw_iter pp_iter, ctl_iter;
+ struct drm_crtc *drm_crtc;
+ struct dpu_crtc_state *cstate;
+ struct dpu_rm_hw_iter hw_iter;
struct msm_display_topology topology;
struct dpu_hw_ctl *hw_ctl[MAX_CHANNELS_PER_ENC] = { NULL };
- int i = 0, ret;
+ struct dpu_hw_mixer *hw_lm[MAX_CHANNELS_PER_ENC] = { NULL };
+ int num_lm = 0, num_ctl = 0;
+ int i, j, ret;
if (!drm_enc) {
DPU_ERROR("invalid encoder\n");
@@ -990,10 +997,14 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
return;
}
+ drm_for_each_crtc(drm_crtc, drm_enc->dev)
+ if (drm_crtc->state->encoder_mask & drm_encoder_mask(drm_enc))
+ break;
+
topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
/* Reserve dynamic resources now. Indicating non-AtomicTest phase */
- ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_enc->crtc->state,
+ ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_crtc->state,
topology, false);
if (ret) {
DPU_ERROR_ENC(dpu_enc,
@@ -1001,21 +1012,41 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
return;
}
- dpu_rm_init_hw_iter(&pp_iter, drm_enc->base.id, DPU_HW_BLK_PINGPONG);
+ dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_PINGPONG);
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
dpu_enc->hw_pp[i] = NULL;
- if (!dpu_rm_get_hw(&dpu_kms->rm, &pp_iter))
+ if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
+ break;
+ dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) hw_iter.hw;
+ }
+
+ dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_CTL);
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+ if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
break;
- dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) pp_iter.hw;
+ hw_ctl[i] = (struct dpu_hw_ctl *)hw_iter.hw;
+ num_ctl++;
}
- dpu_rm_init_hw_iter(&ctl_iter, drm_enc->base.id, DPU_HW_BLK_CTL);
+ dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id, DPU_HW_BLK_LM);
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
- if (!dpu_rm_get_hw(&dpu_kms->rm, &ctl_iter))
+ if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
break;
- hw_ctl[i] = (struct dpu_hw_ctl *)ctl_iter.hw;
+ hw_lm[i] = (struct dpu_hw_mixer *)hw_iter.hw;
+ num_lm++;
}
+ cstate = to_dpu_crtc_state(drm_crtc->state);
+
+ for (i = 0; i < num_lm; i++) {
+ int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
+
+ cstate->mixers[i].hw_lm = hw_lm[i];
+ cstate->mixers[i].lm_ctl = hw_ctl[ctl_idx];
+ }
+
+ cstate->num_mixers = num_lm;
+
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
@@ -1023,18 +1054,38 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
if (!dpu_enc->hw_pp[i]) {
DPU_ERROR_ENC(dpu_enc, "no pp block assigned"
"at idx: %d\n", i);
- return;
+ goto error;
}
if (!hw_ctl[i]) {
DPU_ERROR_ENC(dpu_enc, "no ctl block assigned"
"at idx: %d\n", i);
- return;
+ goto error;
}
phys->hw_pp = dpu_enc->hw_pp[i];
phys->hw_ctl = hw_ctl[i];
+ dpu_rm_init_hw_iter(&hw_iter, drm_enc->base.id,
+ DPU_HW_BLK_INTF);
+ for (j = 0; j < MAX_CHANNELS_PER_ENC; j++) {
+ struct dpu_hw_intf *hw_intf;
+
+ if (!dpu_rm_get_hw(&dpu_kms->rm, &hw_iter))
+ break;
+
+ hw_intf = (struct dpu_hw_intf *)hw_iter.hw;
+ if (hw_intf->idx == phys->intf_idx)
+ phys->hw_intf = hw_intf;
+ }
+
+ if (!phys->hw_intf) {
+ DPU_ERROR_ENC(dpu_enc,
+ "no intf block assigned at idx: %d\n",
+ i);
+ goto error;
+ }
+
phys->connector = conn->state->connector;
if (phys->ops.mode_set)
phys->ops.mode_set(phys, mode, adj_mode);
@@ -1042,6 +1093,9 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
}
dpu_enc->mode_set_complete = true;
+
+error:
+ dpu_rm_release(&dpu_kms->rm, drm_enc);
}
static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
@@ -1182,7 +1236,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
}
/* after phys waits for frame-done, should be no more frames pending */
- if (atomic_xchg(&dpu_enc->frame_done_timeout, 0)) {
+ if (atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
del_timer_sync(&dpu_enc->frame_done_timer);
}
@@ -1339,7 +1393,7 @@ static void dpu_encoder_frame_done_callback(
}
if (!dpu_enc->frame_busy_mask[0]) {
- atomic_set(&dpu_enc->frame_done_timeout, 0);
+ atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
del_timer(&dpu_enc->frame_done_timer);
dpu_encoder_resource_control(drm_enc,
@@ -1547,8 +1601,14 @@ static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc,
if (!ctl)
continue;
- if (phys->split_role != ENC_ROLE_SLAVE)
+ /*
+ * This is cleared in frame_done worker, which isn't invoked
+ * for async commits. So don't set this for async, since it'll
+ * roll over to the next commit.
+ */
+ if (!async && phys->split_role != ENC_ROLE_SLAVE)
set_bit(i, dpu_enc->frame_busy_mask);
+
if (!phys->ops.needs_single_flush ||
!phys->ops.needs_single_flush(phys))
_dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0,
@@ -1800,11 +1860,20 @@ void dpu_encoder_kickoff(struct drm_encoder *drm_enc, bool async)
trace_dpu_enc_kickoff(DRMID(drm_enc));
- atomic_set(&dpu_enc->frame_done_timeout,
- DPU_FRAME_DONE_TIMEOUT * 1000 /
- drm_enc->crtc->state->adjusted_mode.vrefresh);
- mod_timer(&dpu_enc->frame_done_timer, jiffies +
- ((atomic_read(&dpu_enc->frame_done_timeout) * HZ) / 1000));
+ /*
+ * Asynchronous frames don't handle FRAME_DONE events. As such, they
+ * shouldn't enable the frame_done watchdog since it will always time
+ * out.
+ */
+ if (!async) {
+ unsigned long timeout_ms;
+ timeout_ms = DPU_ENCODER_FRAME_DONE_TIMEOUT_FRAMES * 1000 /
+ drm_mode_vrefresh(&drm_enc->crtc->state->adjusted_mode);
+
+ atomic_set(&dpu_enc->frame_done_timeout_ms, timeout_ms);
+ mod_timer(&dpu_enc->frame_done_timer,
+ jiffies + msecs_to_jiffies(timeout_ms));
+ }
/* All phys encs are ready to go, trigger the kickoff */
_dpu_encoder_kickoff_phys(dpu_enc, async);
@@ -2124,7 +2193,7 @@ static void dpu_encoder_frame_done_timeout(struct timer_list *t)
DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
return;
- } else if (!atomic_xchg(&dpu_enc->frame_done_timeout, 0)) {
+ } else if (!atomic_xchg(&dpu_enc->frame_done_timeout_ms, 0)) {
DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc));
return;
}
@@ -2170,7 +2239,7 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
spin_lock_init(&dpu_enc->enc_spinlock);
- atomic_set(&dpu_enc->frame_done_timeout, 0);
+ atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
timer_setup(&dpu_enc->frame_done_timer,
dpu_encoder_frame_done_timeout, 0);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
index db94f3d3bea3..97fb868a4ef6 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -200,6 +200,7 @@ struct dpu_encoder_irq {
* @hw_mdptop: Hardware interface to the top registers
* @hw_ctl: Hardware interface to the ctl registers
* @hw_pp: Hardware interface to the ping pong registers
+ * @hw_intf: Hardware interface to the intf registers
* @dpu_kms: Pointer to the dpu_kms top level
* @cached_mode: DRM mode cached at mode_set time, acted on in enable
* @enabled: Whether the encoder has enabled and running a mode
@@ -228,6 +229,7 @@ struct dpu_encoder_phys {
struct dpu_hw_mdp *hw_mdptop;
struct dpu_hw_ctl *hw_ctl;
struct dpu_hw_pingpong *hw_pp;
+ struct dpu_hw_intf *hw_intf;
struct dpu_kms *dpu_kms;
struct drm_display_mode cached_mode;
enum dpu_enc_split_role split_role;
@@ -251,19 +253,6 @@ static inline int dpu_encoder_phys_inc_pending(struct dpu_encoder_phys *phys)
}
/**
- * struct dpu_encoder_phys_vid - sub-class of dpu_encoder_phys to handle video
- * mode specific operations
- * @base: Baseclass physical encoder structure
- * @hw_intf: Hardware interface to the intf registers
- * @timing_params: Current timing parameter
- */
-struct dpu_encoder_phys_vid {
- struct dpu_encoder_phys base;
- struct dpu_hw_intf *hw_intf;
- struct intf_timing_params timing_params;
-};
-
-/**
* struct dpu_encoder_phys_cmd - sub-class of dpu_encoder_phys to handle command
* mode specific operations
* @base: Baseclass physical encoder structure
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index a399e1edd313..973737fb5c9f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -404,7 +404,8 @@ static void dpu_encoder_phys_cmd_tearcheck_config(
return;
}
- tc_cfg.vsync_count = vsync_hz / (mode->vtotal * mode->vrefresh);
+ tc_cfg.vsync_count = vsync_hz /
+ (mode->vtotal * drm_mode_vrefresh(mode));
/* enable external TE after kickoff to avoid premature autorefresh */
tc_cfg.hw_vsync_mode = 0;
@@ -424,7 +425,7 @@ static void dpu_encoder_phys_cmd_tearcheck_config(
DPU_DEBUG_CMDENC(cmd_enc,
"tc %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n",
phys_enc->hw_pp->idx - PINGPONG_0, vsync_hz,
- mode->vtotal, mode->vrefresh);
+ mode->vtotal, drm_mode_vrefresh(mode));
DPU_DEBUG_CMDENC(cmd_enc,
"tc %d enable %u start_pos %u rd_ptr_irq %u\n",
phys_enc->hw_pp->idx - PINGPONG_0, tc_enable, tc_cfg.start_pos,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index 3c4eb470a82c..1b7a335a6140 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -18,14 +18,14 @@
#include "dpu_trace.h"
#define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
- (e) && (e)->base.parent ? \
- (e)->base.parent->base.id : -1, \
+ (e) && (e)->parent ? \
+ (e)->parent->base.id : -1, \
(e) && (e)->hw_intf ? \
(e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
#define DPU_ERROR_VIDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
- (e) && (e)->base.parent ? \
- (e)->base.parent->base.id : -1, \
+ (e) && (e)->parent ? \
+ (e)->parent->base.id : -1, \
(e) && (e)->hw_intf ? \
(e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
@@ -44,7 +44,7 @@ static bool dpu_encoder_phys_vid_is_master(
}
static void drm_mode_to_intf_timing_params(
- const struct dpu_encoder_phys_vid *vid_enc,
+ const struct dpu_encoder_phys *phys_enc,
const struct drm_display_mode *mode,
struct intf_timing_params *timing)
{
@@ -92,7 +92,7 @@ static void drm_mode_to_intf_timing_params(
timing->hsync_skew = mode->hskew;
/* DSI controller cannot handle active-low sync signals. */
- if (vid_enc->hw_intf->cap->type == INTF_DSI) {
+ if (phys_enc->hw_intf->cap->type == INTF_DSI) {
timing->hsync_polarity = 0;
timing->vsync_polarity = 0;
}
@@ -143,11 +143,11 @@ static u32 get_vertical_total(const struct intf_timing_params *timing)
* lines based on the chip worst case latencies.
*/
static u32 programmable_fetch_get_num_lines(
- struct dpu_encoder_phys_vid *vid_enc,
+ struct dpu_encoder_phys *phys_enc,
const struct intf_timing_params *timing)
{
u32 worst_case_needed_lines =
- vid_enc->hw_intf->cap->prog_fetch_lines_worst_case;
+ phys_enc->hw_intf->cap->prog_fetch_lines_worst_case;
u32 start_of_frame_lines =
timing->v_back_porch + timing->vsync_pulse_width;
u32 needed_vfp_lines = worst_case_needed_lines - start_of_frame_lines;
@@ -155,26 +155,26 @@ static u32 programmable_fetch_get_num_lines(
/* Fetch must be outside active lines, otherwise undefined. */
if (start_of_frame_lines >= worst_case_needed_lines) {
- DPU_DEBUG_VIDENC(vid_enc,
+ DPU_DEBUG_VIDENC(phys_enc,
"prog fetch is not needed, large vbp+vsw\n");
actual_vfp_lines = 0;
} else if (timing->v_front_porch < needed_vfp_lines) {
/* Warn fetch needed, but not enough porch in panel config */
pr_warn_once
("low vbp+vfp may lead to perf issues in some cases\n");
- DPU_DEBUG_VIDENC(vid_enc,
+ DPU_DEBUG_VIDENC(phys_enc,
"less vfp than fetch req, using entire vfp\n");
actual_vfp_lines = timing->v_front_porch;
} else {
- DPU_DEBUG_VIDENC(vid_enc, "room in vfp for needed prefetch\n");
+ DPU_DEBUG_VIDENC(phys_enc, "room in vfp for needed prefetch\n");
actual_vfp_lines = needed_vfp_lines;
}
- DPU_DEBUG_VIDENC(vid_enc,
+ DPU_DEBUG_VIDENC(phys_enc,
"v_front_porch %u v_back_porch %u vsync_pulse_width %u\n",
timing->v_front_porch, timing->v_back_porch,
timing->vsync_pulse_width);
- DPU_DEBUG_VIDENC(vid_enc,
+ DPU_DEBUG_VIDENC(phys_enc,
"wc_lines %u needed_vfp_lines %u actual_vfp_lines %u\n",
worst_case_needed_lines, needed_vfp_lines, actual_vfp_lines);
@@ -194,8 +194,6 @@ static u32 programmable_fetch_get_num_lines(
static void programmable_fetch_config(struct dpu_encoder_phys *phys_enc,
const struct intf_timing_params *timing)
{
- struct dpu_encoder_phys_vid *vid_enc =
- to_dpu_encoder_phys_vid(phys_enc);
struct intf_prog_fetch f = { 0 };
u32 vfp_fetch_lines = 0;
u32 horiz_total = 0;
@@ -203,10 +201,10 @@ static void programmable_fetch_config(struct dpu_encoder_phys *phys_enc,
u32 vfp_fetch_start_vsync_counter = 0;
unsigned long lock_flags;
- if (WARN_ON_ONCE(!vid_enc->hw_intf->ops.setup_prg_fetch))
+ if (WARN_ON_ONCE(!phys_enc->hw_intf->ops.setup_prg_fetch))
return;
- vfp_fetch_lines = programmable_fetch_get_num_lines(vid_enc, timing);
+ vfp_fetch_lines = programmable_fetch_get_num_lines(phys_enc, timing);
if (vfp_fetch_lines) {
vert_total = get_vertical_total(timing);
horiz_total = get_horizontal_total(timing);
@@ -216,12 +214,12 @@ static void programmable_fetch_config(struct dpu_encoder_phys *phys_enc,
f.fetch_start = vfp_fetch_start_vsync_counter;
}
- DPU_DEBUG_VIDENC(vid_enc,
+ DPU_DEBUG_VIDENC(phys_enc,
"vfp_fetch_lines %u vfp_fetch_start_vsync_counter %u\n",
vfp_fetch_lines, vfp_fetch_start_vsync_counter);
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
- vid_enc->hw_intf->ops.setup_prg_fetch(vid_enc->hw_intf, &f);
+ phys_enc->hw_intf->ops.setup_prg_fetch(phys_enc->hw_intf, &f);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
}
@@ -231,7 +229,7 @@ static bool dpu_encoder_phys_vid_mode_fixup(
struct drm_display_mode *adj_mode)
{
if (phys_enc)
- DPU_DEBUG_VIDENC(to_dpu_encoder_phys_vid(phys_enc), "\n");
+ DPU_DEBUG_VIDENC(phys_enc, "\n");
/*
* Modifying mode has consequences when the mode comes back to us
@@ -242,7 +240,6 @@ static bool dpu_encoder_phys_vid_mode_fixup(
static void dpu_encoder_phys_vid_setup_timing_engine(
struct dpu_encoder_phys *phys_enc)
{
- struct dpu_encoder_phys_vid *vid_enc;
struct drm_display_mode mode;
struct intf_timing_params timing_params = { 0 };
const struct dpu_format *fmt = NULL;
@@ -256,13 +253,12 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
}
mode = phys_enc->cached_mode;
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
- if (!vid_enc->hw_intf->ops.setup_timing_gen) {
+ if (!phys_enc->hw_intf->ops.setup_timing_gen) {
DPU_ERROR("timing engine setup is not supported\n");
return;
}
- DPU_DEBUG_VIDENC(vid_enc, "enabling mode:\n");
+ DPU_DEBUG_VIDENC(phys_enc, "enabling mode:\n");
drm_mode_debug_printmodeline(&mode);
if (phys_enc->split_role != ENC_ROLE_SOLO) {
@@ -271,32 +267,30 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
mode.hsync_start >>= 1;
mode.hsync_end >>= 1;
- DPU_DEBUG_VIDENC(vid_enc,
+ DPU_DEBUG_VIDENC(phys_enc,
"split_role %d, halve horizontal %d %d %d %d\n",
phys_enc->split_role,
mode.hdisplay, mode.htotal,
mode.hsync_start, mode.hsync_end);
}
- drm_mode_to_intf_timing_params(vid_enc, &mode, &timing_params);
+ drm_mode_to_intf_timing_params(phys_enc, &mode, &timing_params);
fmt = dpu_get_dpu_format(fmt_fourcc);
- DPU_DEBUG_VIDENC(vid_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
+ DPU_DEBUG_VIDENC(phys_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
- intf_cfg.intf = vid_enc->hw_intf->idx;
+ intf_cfg.intf = phys_enc->hw_intf->idx;
intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_VID;
intf_cfg.stream_sel = 0; /* Don't care value for video mode */
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
- vid_enc->hw_intf->ops.setup_timing_gen(vid_enc->hw_intf,
+ phys_enc->hw_intf->ops.setup_timing_gen(phys_enc->hw_intf,
&timing_params, fmt);
phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
programmable_fetch_config(phys_enc, &timing_params);
-
- vid_enc->timing_params = timing_params;
}
static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
@@ -353,22 +347,10 @@ static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
phys_enc);
}
-static bool _dpu_encoder_phys_is_dual_ctl(struct dpu_encoder_phys *phys_enc)
-{
- struct dpu_crtc_state *dpu_cstate;
-
- if (!phys_enc)
- return false;
-
- dpu_cstate = to_dpu_crtc_state(phys_enc->parent->crtc->state);
-
- return dpu_cstate->num_ctls > 1;
-}
-
static bool dpu_encoder_phys_vid_needs_single_flush(
struct dpu_encoder_phys *phys_enc)
{
- return (phys_enc && _dpu_encoder_phys_is_dual_ctl(phys_enc));
+ return phys_enc->split_role != ENC_ROLE_SOLO;
}
static void _dpu_encoder_phys_vid_setup_irq_hw_idx(
@@ -396,19 +378,15 @@ static void dpu_encoder_phys_vid_mode_set(
struct drm_display_mode *mode,
struct drm_display_mode *adj_mode)
{
- struct dpu_encoder_phys_vid *vid_enc;
-
if (!phys_enc || !phys_enc->dpu_kms) {
DPU_ERROR("invalid encoder/kms\n");
return;
}
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
-
if (adj_mode) {
phys_enc->cached_mode = *adj_mode;
drm_mode_debug_printmodeline(adj_mode);
- DPU_DEBUG_VIDENC(vid_enc, "caching mode:\n");
+ DPU_DEBUG_VIDENC(phys_enc, "caching mode:\n");
}
_dpu_encoder_phys_vid_setup_irq_hw_idx(phys_enc);
@@ -419,7 +397,6 @@ static int dpu_encoder_phys_vid_control_vblank_irq(
bool enable)
{
int ret = 0;
- struct dpu_encoder_phys_vid *vid_enc;
int refcount;
if (!phys_enc) {
@@ -428,7 +405,6 @@ static int dpu_encoder_phys_vid_control_vblank_irq(
}
refcount = atomic_read(&phys_enc->vblank_refcount);
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
/* Slave encoders don't report vblank */
if (!dpu_encoder_phys_vid_is_master(phys_enc))
@@ -453,7 +429,7 @@ end:
if (ret) {
DRM_ERROR("failed: id:%u intf:%d ret:%d enable:%d refcnt:%d\n",
DRMID(phys_enc->parent),
- vid_enc->hw_intf->idx - INTF_0, ret, enable,
+ phys_enc->hw_intf->idx - INTF_0, ret, enable,
refcount);
}
return ret;
@@ -461,43 +437,17 @@ end:
static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
{
- struct msm_drm_private *priv;
- struct dpu_encoder_phys_vid *vid_enc;
- struct dpu_rm_hw_iter iter;
struct dpu_hw_ctl *ctl;
u32 flush_mask = 0;
- if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
- !phys_enc->parent->dev->dev_private) {
- DPU_ERROR("invalid encoder/device\n");
- return;
- }
- priv = phys_enc->parent->dev->dev_private;
-
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
ctl = phys_enc->hw_ctl;
- dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_INTF);
- while (dpu_rm_get_hw(&phys_enc->dpu_kms->rm, &iter)) {
- struct dpu_hw_intf *hw_intf = (struct dpu_hw_intf *)iter.hw;
-
- if (hw_intf->idx == phys_enc->intf_idx) {
- vid_enc->hw_intf = hw_intf;
- break;
- }
- }
-
- if (!vid_enc->hw_intf) {
- DPU_ERROR("hw_intf not assigned\n");
- return;
- }
-
- DPU_DEBUG_VIDENC(vid_enc, "\n");
+ DPU_DEBUG_VIDENC(phys_enc, "\n");
- if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+ if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing))
return;
- dpu_encoder_helper_split_config(phys_enc, vid_enc->hw_intf->idx);
+ dpu_encoder_helper_split_config(phys_enc, phys_enc->hw_intf->idx);
dpu_encoder_phys_vid_setup_timing_engine(phys_enc);
@@ -510,12 +460,13 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
!dpu_encoder_phys_vid_is_master(phys_enc))
goto skip_flush;
- ctl->ops.get_bitmask_intf(ctl, &flush_mask, vid_enc->hw_intf->idx);
+ ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->hw_intf->idx);
ctl->ops.update_pending_flush(ctl, flush_mask);
skip_flush:
- DPU_DEBUG_VIDENC(vid_enc, "update pending flush ctl %d flush_mask %x\n",
- ctl->idx - CTL_0, flush_mask);
+ DPU_DEBUG_VIDENC(phys_enc,
+ "update pending flush ctl %d flush_mask %x\n",
+ ctl->idx - CTL_0, flush_mask);
/* ctl_flush & timing engine enable will be triggered by framework */
if (phys_enc->enable_state == DPU_ENC_DISABLED)
@@ -524,16 +475,13 @@ skip_flush:
static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc)
{
- struct dpu_encoder_phys_vid *vid_enc;
-
if (!phys_enc) {
DPU_ERROR("invalid encoder\n");
return;
}
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
- DPU_DEBUG_VIDENC(vid_enc, "\n");
- kfree(vid_enc);
+ DPU_DEBUG_VIDENC(phys_enc, "\n");
+ kfree(phys_enc);
}
static void dpu_encoder_phys_vid_get_hw_resources(
@@ -589,7 +537,6 @@ static int dpu_encoder_phys_vid_wait_for_vblank(
static void dpu_encoder_phys_vid_prepare_for_kickoff(
struct dpu_encoder_phys *phys_enc)
{
- struct dpu_encoder_phys_vid *vid_enc;
struct dpu_hw_ctl *ctl;
int rc;
@@ -597,7 +544,6 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff(
DPU_ERROR("invalid encoder/parameters\n");
return;
}
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
ctl = phys_enc->hw_ctl;
if (!ctl || !ctl->ops.wait_reset_status)
@@ -609,7 +555,7 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff(
*/
rc = ctl->ops.wait_reset_status(ctl);
if (rc) {
- DPU_ERROR_VIDENC(vid_enc, "ctl %d reset failure: %d\n",
+ DPU_ERROR_VIDENC(phys_enc, "ctl %d reset failure: %d\n",
ctl->idx, rc);
dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_VSYNC);
}
@@ -618,7 +564,6 @@ static void dpu_encoder_phys_vid_prepare_for_kickoff(
static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
{
struct msm_drm_private *priv;
- struct dpu_encoder_phys_vid *vid_enc;
unsigned long lock_flags;
int ret;
@@ -629,16 +574,13 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
}
priv = phys_enc->parent->dev->dev_private;
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
- if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
+ if (!phys_enc->hw_intf || !phys_enc->hw_ctl) {
DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
- vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+ phys_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
return;
}
- DPU_DEBUG_VIDENC(vid_enc, "\n");
-
- if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+ if (WARN_ON(!phys_enc->hw_intf->ops.enable_timing))
return;
if (phys_enc->enable_state == DPU_ENC_DISABLED) {
@@ -647,7 +589,7 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
}
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
- vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 0);
+ phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 0);
if (dpu_encoder_phys_vid_is_master(phys_enc))
dpu_encoder_phys_inc_pending(phys_enc);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
@@ -666,7 +608,7 @@ static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
atomic_set(&phys_enc->pending_kickoff_cnt, 0);
DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
DRMID(phys_enc->parent),
- vid_enc->hw_intf->idx - INTF_0, ret);
+ phys_enc->hw_intf->idx - INTF_0, ret);
}
}
@@ -677,25 +619,21 @@ static void dpu_encoder_phys_vid_handle_post_kickoff(
struct dpu_encoder_phys *phys_enc)
{
unsigned long lock_flags;
- struct dpu_encoder_phys_vid *vid_enc;
if (!phys_enc) {
DPU_ERROR("invalid encoder\n");
return;
}
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
- DPU_DEBUG_VIDENC(vid_enc, "enable_state %d\n", phys_enc->enable_state);
-
/*
* Video mode must flush CTL before enabling timing engine
* Video encoders need to turn on their interfaces now
*/
if (phys_enc->enable_state == DPU_ENC_ENABLING) {
trace_dpu_enc_phys_vid_post_kickoff(DRMID(phys_enc->parent),
- vid_enc->hw_intf->idx - INTF_0);
+ phys_enc->hw_intf->idx - INTF_0);
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
- vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 1);
+ phys_enc->hw_intf->ops.enable_timing(phys_enc->hw_intf, 1);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
phys_enc->enable_state = DPU_ENC_ENABLED;
}
@@ -704,16 +642,13 @@ static void dpu_encoder_phys_vid_handle_post_kickoff(
static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
bool enable)
{
- struct dpu_encoder_phys_vid *vid_enc;
int ret;
if (!phys_enc)
return;
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
-
trace_dpu_enc_phys_vid_irq_ctrl(DRMID(phys_enc->parent),
- vid_enc->hw_intf->idx - INTF_0,
+ phys_enc->hw_intf->idx - INTF_0,
enable,
atomic_read(&phys_enc->vblank_refcount));
@@ -732,19 +667,16 @@ static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
static int dpu_encoder_phys_vid_get_line_count(
struct dpu_encoder_phys *phys_enc)
{
- struct dpu_encoder_phys_vid *vid_enc;
-
if (!phys_enc)
return -EINVAL;
if (!dpu_encoder_phys_vid_is_master(phys_enc))
return -EINVAL;
- vid_enc = to_dpu_encoder_phys_vid(phys_enc);
- if (!vid_enc->hw_intf || !vid_enc->hw_intf->ops.get_line_count)
+ if (!phys_enc->hw_intf || !phys_enc->hw_intf->ops.get_line_count)
return -EINVAL;
- return vid_enc->hw_intf->ops.get_line_count(vid_enc->hw_intf);
+ return phys_enc->hw_intf->ops.get_line_count(phys_enc->hw_intf);
}
static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
@@ -771,7 +703,6 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
struct dpu_enc_phys_init_params *p)
{
struct dpu_encoder_phys *phys_enc = NULL;
- struct dpu_encoder_phys_vid *vid_enc = NULL;
struct dpu_encoder_irq *irq;
int i, ret = 0;
@@ -780,18 +711,16 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
goto fail;
}
- vid_enc = kzalloc(sizeof(*vid_enc), GFP_KERNEL);
- if (!vid_enc) {
+ phys_enc = kzalloc(sizeof(*phys_enc), GFP_KERNEL);
+ if (!phys_enc) {
ret = -ENOMEM;
goto fail;
}
- phys_enc = &vid_enc->base;
-
phys_enc->hw_mdptop = p->dpu_kms->hw_mdp;
phys_enc->intf_idx = p->intf_idx;
- DPU_DEBUG_VIDENC(vid_enc, "\n");
+ DPU_DEBUG_VIDENC(phys_enc, "\n");
dpu_encoder_phys_vid_init_ops(&phys_enc->ops);
phys_enc->parent = p->parent;
@@ -825,13 +754,13 @@ struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
init_waitqueue_head(&phys_enc->pending_kickoff_wq);
phys_enc->enable_state = DPU_ENC_DISABLED;
- DPU_DEBUG_VIDENC(vid_enc, "created intf idx:%d\n", p->intf_idx);
+ DPU_DEBUG_VIDENC(phys_enc, "created intf idx:%d\n", p->intf_idx);
return phys_enc;
fail:
DPU_ERROR("failed to create encoder\n");
- if (vid_enc)
+ if (phys_enc)
dpu_encoder_phys_vid_destroy(phys_enc);
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index ac75cfc267f4..31e9ef96ca5d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -73,9 +73,6 @@
#define DPU_NAME_SIZE 12
-/* timeout in frames waiting for frame done */
-#define DPU_FRAME_DONE_TIMEOUT 60
-
/*
* struct dpu_irq_callback - IRQ callback handlers
* @list: list to callback
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index b01183b309b9..da1f727d7495 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -387,7 +387,7 @@ static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
ot_params.width = drm_rect_width(&pdpu->pipe_cfg.src_rect);
ot_params.height = drm_rect_height(&pdpu->pipe_cfg.src_rect);
ot_params.is_wfd = !pdpu->is_rt_pipe;
- ot_params.frame_rate = crtc->mode.vrefresh;
+ ot_params.frame_rate = drm_mode_vrefresh(&crtc->mode);
ot_params.vbif_idx = VBIF_RT;
ot_params.clk_ctrl = pdpu->pipe_hw->cap->clk_ctrl;
ot_params.rd = true;
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
index 9bf9d6065c55..7b9edc21bc2c 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c
@@ -59,10 +59,10 @@ static int pingpong_tearcheck_setup(struct drm_encoder *encoder,
return -EINVAL;
}
- total_lines_x100 = mode->vtotal * mode->vrefresh;
+ total_lines_x100 = mode->vtotal * drm_mode_vrefresh(mode);
if (!total_lines_x100) {
DRM_DEV_ERROR(dev, "%s: vtotal(%d) or vrefresh(%d) is 0\n",
- __func__, mode->vtotal, mode->vrefresh);
+ __func__, mode->vtotal, drm_mode_vrefresh(mode));
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index fb423d309e91..67ef300559cf 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -75,7 +75,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
struct msm_gpu_show_priv *show_priv;
int ret;
- if (!gpu)
+ if (!gpu || !gpu->funcs->gpu_state_get)
return -ENODEV;
show_priv = kmalloc(sizeof(*show_priv), GFP_KERNEL);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 0bdd93648761..31deb87abfc6 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -39,9 +39,10 @@
* MSM_GEM_INFO ioctl.
* - 1.4.0 - softpin, MSM_RELOC_BO_DUMP, and GEM_INFO support to set/get
* GEM object's debug name
+ * - 1.5.0 - Add SUBMITQUERY_QUERY ioctl
*/
#define MSM_VERSION_MAJOR 1
-#define MSM_VERSION_MINOR 4
+#define MSM_VERSION_MINOR 5
#define MSM_VERSION_PATCHLEVEL 0
static const struct drm_mode_config_funcs mode_config_funcs = {
@@ -457,6 +458,9 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
priv->wq = alloc_ordered_workqueue("msm", 0);
+ INIT_WORK(&priv->free_work, msm_gem_free_work);
+ init_llist_head(&priv->free_list);
+
INIT_LIST_HEAD(&priv->inactive_list);
drm_mode_config_init(ddev);
@@ -964,6 +968,11 @@ static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
args->flags, &args->id);
}
+static int msm_ioctl_submitqueue_query(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ return msm_submitqueue_query(dev, file->driver_priv, data);
+}
static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
struct drm_file *file)
@@ -984,6 +993,7 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct vm_operations_struct vm_ops = {
@@ -1019,7 +1029,7 @@ static struct drm_driver msm_driver = {
.irq_uninstall = msm_irq_uninstall,
.enable_vblank = msm_enable_vblank,
.disable_vblank = msm_disable_vblank,
- .gem_free_object = msm_gem_free_object,
+ .gem_free_object_unlocked = msm_gem_free_object,
.gem_vm_ops = &vm_ops,
.dumb_create = msm_gem_dumb_create,
.dumb_map_offset = msm_gem_dumb_map_offset,
@@ -1027,7 +1037,6 @@ static struct drm_driver msm_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
- .gem_prime_res_obj = msm_gem_prime_res_obj,
.gem_prime_pin = msm_gem_prime_pin,
.gem_prime_unpin = msm_gem_prime_unpin,
.gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index c56dade2c1dc..eb33d2d00d77 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -185,6 +185,10 @@ struct msm_drm_private {
/* list of GEM objects: */
struct list_head inactive_list;
+ /* worker for delayed free of objects: */
+ struct work_struct free_work;
+ struct llist_head free_list;
+
struct workqueue_struct *wq;
unsigned int num_planes;
@@ -292,7 +296,6 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
void *msm_gem_prime_vmap(struct drm_gem_object *obj);
void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
-struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj);
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
int msm_gem_prime_pin(struct drm_gem_object *obj);
@@ -325,6 +328,7 @@ void msm_gem_kernel_put(struct drm_gem_object *bo,
struct msm_gem_address_space *aspace, bool locked);
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
struct dma_buf *dmabuf, struct sg_table *sgt);
+void msm_gem_free_work(struct work_struct *work);
__printf(2, 3)
void msm_gem_object_set_name(struct drm_gem_object *bo, const char *fmt, ...);
@@ -420,6 +424,8 @@ struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
u32 id);
int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
u32 prio, u32 flags, u32 *id);
+int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
+ struct drm_msm_submitqueue_query *args);
int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
void msm_submitqueue_close(struct msm_file_private *ctx);
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index c03e860ba737..d088299babf3 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -122,13 +122,9 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
fbdev->fb = fb;
helper->fb = fb;
- fbi->par = helper;
fbi->fbops = &msm_fb_ops;
- strcpy(fbi->fix.id, "msm");
-
- drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(fbi, helper, sizes);
dev->mode_config.fb_base = paddr;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 18ca651ab942..31d5a744d84f 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -672,14 +672,13 @@ void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass)
int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct reservation_object_list *fobj;
struct dma_fence *fence;
int i, ret;
- fobj = reservation_object_get_list(msm_obj->resv);
+ fobj = reservation_object_get_list(obj->resv);
if (!fobj || (fobj->shared_count == 0)) {
- fence = reservation_object_get_excl(msm_obj->resv);
+ fence = reservation_object_get_excl(obj->resv);
/* don't need to wait on our own fences, since ring is fifo */
if (fence && (fence->context != fctx->context)) {
ret = dma_fence_wait(fence, true);
@@ -693,7 +692,7 @@ int msm_gem_sync_object(struct drm_gem_object *obj,
for (i = 0; i < fobj->shared_count; i++) {
fence = rcu_dereference_protected(fobj->shared[i],
- reservation_object_held(msm_obj->resv));
+ reservation_object_held(obj->resv));
if (fence->context != fctx->context) {
ret = dma_fence_wait(fence, true);
if (ret)
@@ -711,9 +710,9 @@ void msm_gem_move_to_active(struct drm_gem_object *obj,
WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
msm_obj->gpu = gpu;
if (exclusive)
- reservation_object_add_excl_fence(msm_obj->resv, fence);
+ reservation_object_add_excl_fence(obj->resv, fence);
else
- reservation_object_add_shared_fence(msm_obj->resv, fence);
+ reservation_object_add_shared_fence(obj->resv, fence);
list_del_init(&msm_obj->mm_list);
list_add_tail(&msm_obj->mm_list, &gpu->active_list);
}
@@ -733,13 +732,12 @@ void msm_gem_move_to_inactive(struct drm_gem_object *obj)
int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
bool write = !!(op & MSM_PREP_WRITE);
unsigned long remain =
op & MSM_PREP_NOSYNC ? 0 : timeout_to_jiffies(timeout);
long ret;
- ret = reservation_object_wait_timeout_rcu(msm_obj->resv, write,
+ ret = reservation_object_wait_timeout_rcu(obj->resv, write,
true, remain);
if (ret == 0)
return remain == 0 ? -EBUSY : -ETIMEDOUT;
@@ -771,7 +769,7 @@ static void describe_fence(struct dma_fence *fence, const char *type,
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct reservation_object *robj = msm_obj->resv;
+ struct reservation_object *robj = obj->resv;
struct reservation_object_list *fobj;
struct dma_fence *fence;
struct msm_gem_vma *vma;
@@ -853,8 +851,18 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
/* don't call directly! Use drm_gem_object_put() and friends */
void msm_gem_free_object(struct drm_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct drm_device *dev = obj->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ if (llist_add(&msm_obj->freed, &priv->free_list))
+ queue_work(priv->wq, &priv->free_work);
+}
+
+static void free_object(struct msm_gem_object *msm_obj)
+{
+ struct drm_gem_object *obj = &msm_obj->base;
+ struct drm_device *dev = obj->dev;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -883,15 +891,35 @@ void msm_gem_free_object(struct drm_gem_object *obj)
put_pages(obj);
}
- if (msm_obj->resv == &msm_obj->_resv)
- reservation_object_fini(msm_obj->resv);
-
drm_gem_object_release(obj);
mutex_unlock(&msm_obj->lock);
kfree(msm_obj);
}
+void msm_gem_free_work(struct work_struct *work)
+{
+ struct msm_drm_private *priv =
+ container_of(work, struct msm_drm_private, free_work);
+ struct drm_device *dev = priv->dev;
+ struct llist_node *freed;
+ struct msm_gem_object *msm_obj, *next;
+
+ while ((freed = llist_del_all(&priv->free_list))) {
+
+ mutex_lock(&dev->struct_mutex);
+
+ llist_for_each_entry_safe(msm_obj, next,
+ freed, freed)
+ free_object(msm_obj);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ if (need_resched())
+ break;
+ }
+}
+
/* convenience method to construct a GEM buffer object, and userspace handle */
int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
uint32_t size, uint32_t flags, uint32_t *handle,
@@ -945,12 +973,8 @@ static int msm_gem_new_impl(struct drm_device *dev,
msm_obj->flags = flags;
msm_obj->madv = MSM_MADV_WILLNEED;
- if (resv) {
- msm_obj->resv = resv;
- } else {
- msm_obj->resv = &msm_obj->_resv;
- reservation_object_init(msm_obj->resv);
- }
+ if (resv)
+ msm_obj->base.resv = resv;
INIT_LIST_HEAD(&msm_obj->submit_entry);
INIT_LIST_HEAD(&msm_obj->vmas);
@@ -1026,6 +1050,13 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
ret = drm_gem_object_init(dev, obj, size);
if (ret)
goto fail;
+ /*
+ * Our buffers are kept pinned, so allocating them from the
+ * MOVABLE zone is a really bad idea, and conflicts with CMA.
+ * See comments above new_inode() why this is required _and_
+ * expected if you're going to pin these pages.
+ */
+ mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
}
return obj;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 2064fac871b8..c5ac781dffee 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -84,6 +84,8 @@ struct msm_gem_object {
struct list_head vmas; /* list of msm_gem_vma */
+ struct llist_node freed;
+
/* normally (resv == &_resv) except for imported bo's */
struct reservation_object *resv;
struct reservation_object _resv;
@@ -133,6 +135,7 @@ enum msm_gem_lock {
void msm_gem_purge(struct drm_gem_object *obj, enum msm_gem_lock subclass);
void msm_gem_vunmap(struct drm_gem_object *obj, enum msm_gem_lock subclass);
+void msm_gem_free_work(struct work_struct *work);
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
* associated with the cmdstream submission for synchronization (and
@@ -163,7 +166,10 @@ struct msm_gem_submit {
} *cmd; /* array of size nr_cmds */
struct {
uint32_t flags;
- struct msm_gem_object *obj;
+ union {
+ struct msm_gem_object *obj;
+ uint32_t handle;
+ };
uint64_t iova;
} bos[0];
};
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 13403c6da6c7..60bb290700ce 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -70,10 +70,3 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj)
if (!obj->import_attach)
msm_gem_put_pages(obj);
}
-
-struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj)
-{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
-
- return msm_obj->resv;
-}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 12b983fc0b56..1b681306aca3 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -74,27 +74,14 @@ void msm_gem_submit_free(struct msm_gem_submit *submit)
kfree(submit);
}
-static inline unsigned long __must_check
-copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
-{
- if (access_ok(from, n))
- return __copy_from_user_inatomic(to, from, n);
- return -EFAULT;
-}
-
static int submit_lookup_objects(struct msm_gem_submit *submit,
struct drm_msm_gem_submit *args, struct drm_file *file)
{
unsigned i;
int ret = 0;
- spin_lock(&file->table_lock);
- pagefault_disable();
-
for (i = 0; i < args->nr_bos; i++) {
struct drm_msm_gem_submit_bo submit_bo;
- struct drm_gem_object *obj;
- struct msm_gem_object *msm_obj;
void __user *userptr =
u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
@@ -103,15 +90,10 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
*/
submit->bos[i].flags = 0;
- if (copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo))) {
- pagefault_enable();
- spin_unlock(&file->table_lock);
- if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) {
- ret = -EFAULT;
- goto out;
- }
- spin_lock(&file->table_lock);
- pagefault_disable();
+ if (copy_from_user(&submit_bo, userptr, sizeof(submit_bo))) {
+ ret = -EFAULT;
+ i = 0;
+ goto out;
}
/* at least one of READ and/or WRITE flags should be set: */
@@ -121,19 +103,28 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
!(submit_bo.flags & MANDATORY_FLAGS)) {
DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
ret = -EINVAL;
- goto out_unlock;
+ i = 0;
+ goto out;
}
+ submit->bos[i].handle = submit_bo.handle;
submit->bos[i].flags = submit_bo.flags;
/* in validate_objects() we figure out if this is true: */
submit->bos[i].iova = submit_bo.presumed;
+ }
+
+ spin_lock(&file->table_lock);
+
+ for (i = 0; i < args->nr_bos; i++) {
+ struct drm_gem_object *obj;
+ struct msm_gem_object *msm_obj;
/* normally use drm_gem_object_lookup(), but for bulk lookup
* all under single table_lock just hit object_idr directly:
*/
- obj = idr_find(&file->object_idr, submit_bo.handle);
+ obj = idr_find(&file->object_idr, submit->bos[i].handle);
if (!obj) {
- DRM_ERROR("invalid handle %u at index %u\n", submit_bo.handle, i);
+ DRM_ERROR("invalid handle %u at index %u\n", submit->bos[i].handle, i);
ret = -EINVAL;
goto out_unlock;
}
@@ -142,7 +133,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
if (!list_empty(&msm_obj->submit_entry)) {
DRM_ERROR("handle %u at index %u already on submit list\n",
- submit_bo.handle, i);
+ submit->bos[i].handle, i);
ret = -EINVAL;
goto out_unlock;
}
@@ -155,7 +146,6 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
}
out_unlock:
- pagefault_enable();
spin_unlock(&file->table_lock);
out:
@@ -173,7 +163,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit,
msm_gem_unpin_iova(&msm_obj->base, submit->gpu->aspace);
if (submit->bos[i].flags & BO_LOCKED)
- ww_mutex_unlock(&msm_obj->resv->lock);
+ ww_mutex_unlock(&msm_obj->base.resv->lock);
if (backoff && !(submit->bos[i].flags & BO_VALID))
submit->bos[i].iova = 0;
@@ -196,7 +186,7 @@ retry:
contended = i;
if (!(submit->bos[i].flags & BO_LOCKED)) {
- ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock,
+ ret = ww_mutex_lock_interruptible(&msm_obj->base.resv->lock,
&submit->ticket);
if (ret)
goto fail;
@@ -218,7 +208,7 @@ fail:
if (ret == -EDEADLK) {
struct msm_gem_object *msm_obj = submit->bos[contended].obj;
/* we lost out in a seqno race, lock and retry.. */
- ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock,
+ ret = ww_mutex_lock_slow_interruptible(&msm_obj->base.resv->lock,
&submit->ticket);
if (!ret) {
submit->bos[contended].flags |= BO_LOCKED;
@@ -244,7 +234,7 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
* strange place to call it. OTOH this is a
* convenient can-fail point to hook it in.
*/
- ret = reservation_object_reserve_shared(msm_obj->resv,
+ ret = reservation_object_reserve_shared(msm_obj->base.resv,
1);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 49c04829cf34..fcf7a83f0e6f 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -85,7 +85,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace,
vma->mapped = true;
- if (aspace->mmu)
+ if (aspace && aspace->mmu)
ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
size, prot);
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 10babd18e286..bf4ee2766431 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -443,24 +443,15 @@ static void recover_worker(struct work_struct *work)
if (submit) {
struct task_struct *task;
+ /* Increment the fault counts */
+ gpu->global_faults++;
+ submit->queue->faults++;
+
task = get_pid_task(submit->pid, PIDTYPE_PID);
if (task) {
comm = kstrdup(task->comm, GFP_KERNEL);
-
- /*
- * So slightly annoying, in other paths like
- * mmap'ing gem buffers, mmap_sem is acquired
- * before struct_mutex, which means we can't
- * hold struct_mutex across the call to
- * get_cmdline(). But submits are retired
- * from the same in-order workqueue, so we can
- * safely drop the lock here without worrying
- * about the submit going away.
- */
- mutex_unlock(&dev->struct_mutex);
cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
put_task_struct(task);
- mutex_lock(&dev->struct_mutex);
}
if (comm && cmd) {
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 6241986bab51..f2739cd97cea 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -104,6 +104,9 @@ struct msm_gpu {
/* does gpu need hw_init? */
bool needs_hw_init;
+ /* number of GPU hangs (for all contexts) */
+ int global_faults;
+
/* worker for handling active-list retiring: */
struct work_struct retire_work;
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 4d62790cd425..12bb54cefd46 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -38,13 +38,8 @@ static int msm_iommu_attach(struct msm_mmu *mmu, const char * const *names,
int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- int ret;
- pm_runtime_get_sync(mmu->dev);
- ret = iommu_attach_device(iommu->domain, mmu->dev);
- pm_runtime_put_sync(mmu->dev);
-
- return ret;
+ return iommu_attach_device(iommu->domain, mmu->dev);
}
static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names,
@@ -52,9 +47,7 @@ static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names,
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- pm_runtime_get_sync(mmu->dev);
iommu_detach_device(iommu->domain, mmu->dev);
- pm_runtime_put_sync(mmu->dev);
}
static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
@@ -63,9 +56,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
struct msm_iommu *iommu = to_msm_iommu(mmu);
size_t ret;
-// pm_runtime_get_sync(mmu->dev);
ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot);
-// pm_runtime_put_sync(mmu->dev);
WARN_ON(!ret);
return (ret == len) ? 0 : -EINVAL;
@@ -75,9 +66,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, unsigned len)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- pm_runtime_get_sync(mmu->dev);
iommu_unmap(iommu->domain, iova, len);
- pm_runtime_put_sync(mmu->dev);
return 0;
}
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
index 5115f75b5b7f..f160ec40a39b 100644
--- a/drivers/gpu/drm/msm/msm_submitqueue.c
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -120,6 +120,47 @@ int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
}
+static int msm_submitqueue_query_faults(struct msm_gpu_submitqueue *queue,
+ struct drm_msm_submitqueue_query *args)
+{
+ size_t size = min_t(size_t, args->len, sizeof(queue->faults));
+ int ret;
+
+ /* If a zero length was passed in, return the data size we expect */
+ if (!args->len) {
+ args->len = sizeof(queue->faults);
+ return 0;
+ }
+
+ /* Set the length to the actual size of the data */
+ args->len = size;
+
+ ret = copy_to_user(u64_to_user_ptr(args->data), &queue->faults, size);
+
+ return ret ? -EFAULT : 0;
+}
+
+int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
+ struct drm_msm_submitqueue_query *args)
+{
+ struct msm_gpu_submitqueue *queue;
+ int ret = -EINVAL;
+
+ if (args->pad)
+ return -EINVAL;
+
+ queue = msm_submitqueue_get(ctx, args->id);
+ if (!queue)
+ return -ENOENT;
+
+ if (args->param == MSM_SUBMITQUEUE_PARAM_FAULTS)
+ ret = msm_submitqueue_query_faults(queue, args);
+
+ msm_submitqueue_put(queue);
+
+ return ret;
+}
+
int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
{
struct msm_gpu_submitqueue *entry;
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
index 0ee1ca8a316a..98e9bda91e80 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
@@ -253,12 +253,12 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
if (!(bus_flags & DRM_BUS_FLAG_DE_LOW))
vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH;
/*
- * DRM_BUS_FLAG_PIXDATA_ defines are controller centric,
+ * DRM_BUS_FLAG_PIXDATA_DRIVE_ defines are controller centric,
* controllers VDCTRL0_DOTCLK is display centric.
* Drive on positive edge -> display samples on falling edge
- * DRM_BUS_FLAG_PIXDATA_POSEDGE -> VDCTRL0_DOTCLK_ACT_FALLING
+ * DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE -> VDCTRL0_DOTCLK_ACT_FALLING
*/
- if (bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
+ if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)
vdctrl0 |= VDCTRL0_DOTCLK_ACT_FALLING;
writel(vdctrl0, mxsfb->base + LCDC_VDCTRL0);
diff --git a/drivers/gpu/drm/nouveau/Kbuild b/drivers/gpu/drm/nouveau/Kbuild
index 581404e6544d..378c5dd692b0 100644
--- a/drivers/gpu/drm/nouveau/Kbuild
+++ b/drivers/gpu/drm/nouveau/Kbuild
@@ -1,7 +1,7 @@
-ccflags-y += -I$(src)/include
-ccflags-y += -I$(src)/include/nvkm
-ccflags-y += -I$(src)/nvkm
-ccflags-y += -I$(src)
+ccflags-y += -I $(srctree)/$(src)/include
+ccflags-y += -I $(srctree)/$(src)/include/nvkm
+ccflags-y += -I $(srctree)/$(src)/nvkm
+ccflags-y += -I $(srctree)/$(src)
# NVKM - HW resource manager
#- code also used by various userspace tools/tests
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 00cd9ab8948d..553c7da5e8e0 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -17,10 +17,21 @@ config DRM_NOUVEAU
select INPUT if ACPI && X86
select THERMAL if ACPI && X86
select ACPI_VIDEO if ACPI && X86
- select DRM_VM
help
Choose this option for open-source NVIDIA support.
+config NOUVEAU_LEGACY_CTX_SUPPORT
+ bool "Nouveau legacy context support"
+ depends on DRM_NOUVEAU
+ select DRM_LEGACY
+ default y
+ help
+ There was a version of the nouveau DDX that relied on legacy
+ ctx ioctls not erroring out. But that was back in time a long
+ ways, so offer a way to disable it now. For uapi compat with
+ old nouveau ddx this should be on by default, but modern distros
+ should consider turning it off.
+
config NOUVEAU_PLATFORM_DRIVER
bool "Nouveau (NVIDIA) SoC GPUs"
depends on DRM_NOUVEAU && ARCH_TEGRA
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
index eef54e9b5d77..7957eafa5f0e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
@@ -38,6 +38,7 @@ struct nvkm_i2c_bus {
struct mutex mutex;
struct list_head head;
struct i2c_adapter i2c;
+ u8 enabled;
};
int nvkm_i2c_bus_acquire(struct nvkm_i2c_bus *);
@@ -57,6 +58,7 @@ struct nvkm_i2c_aux {
struct mutex mutex;
struct list_head head;
struct i2c_adapter i2c;
+ u8 enabled;
u32 intr;
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 55c0fa451163..832da8e0020d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -358,15 +358,6 @@ nouveau_display_hpd_work(struct work_struct *work)
#ifdef CONFIG_ACPI
-/*
- * Hans de Goede: This define belongs in acpi/video.h, I've submitted a patch
- * to the acpi subsys to move it there from drivers/acpi/acpi_video.c .
- * This should be dropped once that is merged.
- */
-#ifndef ACPI_VIDEO_NOTIFY_PROBE
-#define ACPI_VIDEO_NOTIFY_PROBE 0x81
-#endif
-
static int
nouveau_display_acpi_ntfy(struct notifier_block *nb, unsigned long val,
void *data)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 5020265bfbd9..22cd45845e07 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -802,10 +802,15 @@ fail_display:
static int
nouveau_do_resume(struct drm_device *dev, bool runtime)
{
+ int ret = 0;
struct nouveau_drm *drm = nouveau_drm(dev);
NV_DEBUG(drm, "resuming object tree...\n");
- nvif_client_resume(&drm->master.base);
+ ret = nvif_client_resume(&drm->master.base);
+ if (ret) {
+ NV_ERROR(drm, "Client resume failed with error: %d\n", ret);
+ return ret;
+ }
NV_DEBUG(drm, "resuming fence...\n");
if (drm->fence && nouveau_fence(drm)->resume)
@@ -925,6 +930,7 @@ nouveau_pmops_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct nvif_device *device = &nouveau_drm(drm_dev)->client.device;
int ret;
@@ -941,6 +947,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
pci_set_master(pdev);
ret = nouveau_do_resume(drm_dev, true);
+ if (ret) {
+ NV_ERROR(drm, "resume failed with: %d\n", ret);
+ return ret;
+ }
/* do magic */
nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
@@ -1094,8 +1104,11 @@ nouveau_driver_fops = {
static struct drm_driver
driver_stub = {
.driver_features =
- DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER |
- DRIVER_KMS_LEGACY_CONTEXT,
+ DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER
+#if defined(CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT)
+ | DRIVER_KMS_LEGACY_CONTEXT
+#endif
+ ,
.open = nouveau_drm_open,
.postclose = nouveau_drm_postclose,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index da847244479d..35ff0ca01a3b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -60,8 +60,6 @@
struct nouveau_channel;
struct platform_device;
-#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
-
#include "nouveau_fence.h"
#include "nouveau_bios.h"
#include "nouveau_vmm.h"
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 0d3cd4e05728..73cc3217068a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -365,14 +365,10 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
ret = PTR_ERR(info);
goto out_unlock;
}
- info->skip_vt_switch = 1;
-
- info->par = fbcon;
/* setup helper */
fbcon->helper.fb = &fb->base;
- strcpy(info->fix.id, "nouveaufb");
if (!chan)
info->flags = FBINFO_HWACCEL_DISABLED;
else
@@ -387,9 +383,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
info->screen_base = nvbo_kmap_obj_iovirtual(fb->nvbo);
info->screen_size = fb->nvbo->bo.mem.num_pages << PAGE_SHIFT;
- drm_fb_helper_fill_fix(info, fb->base.pitches[0],
- fb->base.format->depth);
- drm_fb_helper_fill_var(info, &fbcon->helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(info, &fbcon->helper, sizes);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index db9d52047ef8..73a7eeba3973 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -32,7 +32,7 @@
#include "nouveau_display.h"
struct nouveau_fbdev {
- struct drm_fb_helper helper;
+ struct drm_fb_helper helper; /* must be first */
unsigned int saved_flags;
struct nvif_object surf2d;
struct nvif_object clip;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 1543c2f8d3d3..f0daf958e03a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -168,9 +168,6 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
struct drm_file *file_priv = filp->private_data;
struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
- return drm_legacy_mmap(filp, vma);
-
return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
}
@@ -239,7 +236,6 @@ nouveau_ttm_init(struct nouveau_drm *drm)
ret = ttm_bo_device_init(&drm->ttm.bdev,
&nouveau_bo_driver,
dev->anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET,
drm->client.mmu.dmabits <= 32 ? true : false);
if (ret) {
NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
index a3ba7f50198b..a3dcb09a40ee 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
@@ -94,6 +94,8 @@ gf100_bar_oneinit_bar(struct gf100_bar *bar, struct gf100_barN *bar_vm,
return ret;
bar_len = device->func->resource_size(device, bar_nr);
+ if (!bar_len)
+ return -ENOMEM;
if (bar_nr == 3 && bar->bar2_halve)
bar_len >>= 1;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
index 157b076a1272..f23a0ccc2bec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
@@ -109,7 +109,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
struct nvkm_device *device = bar->base.subdev.device;
static struct lock_class_key bar1_lock;
static struct lock_class_key bar2_lock;
- u64 start, limit;
+ u64 start, limit, size;
int ret;
ret = nvkm_gpuobj_new(device, 0x20000, 0, false, NULL, &bar->mem);
@@ -127,7 +127,10 @@ nv50_bar_oneinit(struct nvkm_bar *base)
/* BAR2 */
start = 0x0100000000ULL;
- limit = start + device->func->resource_size(device, 3);
+ size = device->func->resource_size(device, 3);
+ if (!size)
+ return -ENOMEM;
+ limit = start + size;
ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0,
&bar2_lock, "bar2", &bar->bar2_vmm);
@@ -164,10 +167,15 @@ nv50_bar_oneinit(struct nvkm_bar *base)
/* BAR1 */
start = 0x0000000000ULL;
- limit = start + device->func->resource_size(device, 1);
+ size = device->func->resource_size(device, 1);
+ if (!size)
+ return -ENOMEM;
+ limit = start + size;
ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0,
&bar1_lock, "bar1", &bar->bar1_vmm);
+ if (ret)
+ return ret;
atomic_inc(&bar->bar1_vmm->engref[NVKM_SUBDEV_BAR]);
bar->bar1_vmm->debug = bar->base.subdev.debug;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
index 8bcb7e79a0cb..456aed1f2a02 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
@@ -1070,7 +1070,7 @@ gk104_ram_calc_xits(struct gk104_ram *ram, struct nvkm_ram_data *next)
nvkm_error(subdev, "unable to calc plls\n");
return -EINVAL;
}
- nvkm_debug(subdev, "sucessfully calced PLLs for clock %i kHz"
+ nvkm_debug(subdev, "successfully calced PLLs for clock %i kHz"
" (refclock: %i kHz)\n", next->freq, ret);
} else {
/* calculate refpll coefficients */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index 4c1f547da463..b4e7404fe660 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -105,9 +105,15 @@ nvkm_i2c_aux_acquire(struct nvkm_i2c_aux *aux)
{
struct nvkm_i2c_pad *pad = aux->pad;
int ret;
+
AUX_TRACE(aux, "acquire");
mutex_lock(&aux->mutex);
- ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_AUX);
+
+ if (aux->enabled)
+ ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_AUX);
+ else
+ ret = -EIO;
+
if (ret)
mutex_unlock(&aux->mutex);
return ret;
@@ -145,6 +151,24 @@ nvkm_i2c_aux_del(struct nvkm_i2c_aux **paux)
}
}
+void
+nvkm_i2c_aux_init(struct nvkm_i2c_aux *aux)
+{
+ AUX_TRACE(aux, "init");
+ mutex_lock(&aux->mutex);
+ aux->enabled = true;
+ mutex_unlock(&aux->mutex);
+}
+
+void
+nvkm_i2c_aux_fini(struct nvkm_i2c_aux *aux)
+{
+ AUX_TRACE(aux, "fini");
+ mutex_lock(&aux->mutex);
+ aux->enabled = false;
+ mutex_unlock(&aux->mutex);
+}
+
int
nvkm_i2c_aux_ctor(const struct nvkm_i2c_aux_func *func,
struct nvkm_i2c_pad *pad, int id,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
index 7d56c4ba693c..08f6b2ee64ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
@@ -16,6 +16,8 @@ int nvkm_i2c_aux_ctor(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *,
int nvkm_i2c_aux_new_(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *,
int id, struct nvkm_i2c_aux **);
void nvkm_i2c_aux_del(struct nvkm_i2c_aux **);
+void nvkm_i2c_aux_init(struct nvkm_i2c_aux *);
+void nvkm_i2c_aux_fini(struct nvkm_i2c_aux *);
int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type,
u32 addr, u8 *data, u8 *size);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
index 4f197b15acf6..ecacb22834d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c
@@ -160,8 +160,18 @@ nvkm_i2c_fini(struct nvkm_subdev *subdev, bool suspend)
{
struct nvkm_i2c *i2c = nvkm_i2c(subdev);
struct nvkm_i2c_pad *pad;
+ struct nvkm_i2c_bus *bus;
+ struct nvkm_i2c_aux *aux;
u32 mask;
+ list_for_each_entry(aux, &i2c->aux, head) {
+ nvkm_i2c_aux_fini(aux);
+ }
+
+ list_for_each_entry(bus, &i2c->bus, head) {
+ nvkm_i2c_bus_fini(bus);
+ }
+
if ((mask = (1 << i2c->func->aux) - 1), i2c->func->aux_stat) {
i2c->func->aux_mask(i2c, NVKM_I2C_ANY, mask, 0);
i2c->func->aux_stat(i2c, &mask, &mask, &mask, &mask);
@@ -180,6 +190,7 @@ nvkm_i2c_init(struct nvkm_subdev *subdev)
struct nvkm_i2c *i2c = nvkm_i2c(subdev);
struct nvkm_i2c_bus *bus;
struct nvkm_i2c_pad *pad;
+ struct nvkm_i2c_aux *aux;
list_for_each_entry(pad, &i2c->pad, head) {
nvkm_i2c_pad_init(pad);
@@ -189,6 +200,10 @@ nvkm_i2c_init(struct nvkm_subdev *subdev)
nvkm_i2c_bus_init(bus);
}
+ list_for_each_entry(aux, &i2c->aux, head) {
+ nvkm_i2c_aux_init(aux);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c
index 807a2b67bd64..ed50cc3736b9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.c
@@ -110,6 +110,19 @@ nvkm_i2c_bus_init(struct nvkm_i2c_bus *bus)
BUS_TRACE(bus, "init");
if (bus->func->init)
bus->func->init(bus);
+
+ mutex_lock(&bus->mutex);
+ bus->enabled = true;
+ mutex_unlock(&bus->mutex);
+}
+
+void
+nvkm_i2c_bus_fini(struct nvkm_i2c_bus *bus)
+{
+ BUS_TRACE(bus, "fini");
+ mutex_lock(&bus->mutex);
+ bus->enabled = false;
+ mutex_unlock(&bus->mutex);
}
void
@@ -126,9 +139,15 @@ nvkm_i2c_bus_acquire(struct nvkm_i2c_bus *bus)
{
struct nvkm_i2c_pad *pad = bus->pad;
int ret;
+
BUS_TRACE(bus, "acquire");
mutex_lock(&bus->mutex);
- ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_I2C);
+
+ if (bus->enabled)
+ ret = nvkm_i2c_pad_acquire(pad, NVKM_I2C_PAD_I2C);
+ else
+ ret = -EIO;
+
if (ret)
mutex_unlock(&bus->mutex);
return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h
index bea0dd33961e..465464bba58b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/bus.h
@@ -18,6 +18,7 @@ int nvkm_i2c_bus_new_(const struct nvkm_i2c_bus_func *, struct nvkm_i2c_pad *,
int id, struct nvkm_i2c_bus **);
void nvkm_i2c_bus_del(struct nvkm_i2c_bus **);
void nvkm_i2c_bus_init(struct nvkm_i2c_bus *);
+void nvkm_i2c_bus_fini(struct nvkm_i2c_bus *);
int nvkm_i2c_bit_xfer(struct nvkm_i2c_bus *, struct i2c_msg *, int);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index fa93f964e6a4..41640e0584ac 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -1783,7 +1783,7 @@ nvkm_vmm_get(struct nvkm_vmm *vmm, u8 page, u64 size, struct nvkm_vma **pvma)
void
nvkm_vmm_part(struct nvkm_vmm *vmm, struct nvkm_memory *inst)
{
- if (inst && vmm->func->part) {
+ if (inst && vmm && vmm->func->part) {
mutex_lock(&vmm->mutex);
vmm->func->part(vmm, inst);
mutex_unlock(&vmm->mutex);
diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig
index a349cb61961e..7b0bcb494b5c 100644
--- a/drivers/gpu/drm/omapdrm/displays/Kconfig
+++ b/drivers/gpu/drm/omapdrm/displays/Kconfig
@@ -6,23 +6,12 @@ config DRM_OMAP_ENCODER_OPA362
Driver for OPA362 external analog TV amplifier controlled
through a GPIO.
-config DRM_OMAP_ENCODER_TFP410
- tristate "TFP410 DPI to DVI Encoder"
- help
- Driver for TFP410 DPI to DVI encoder.
-
config DRM_OMAP_ENCODER_TPD12S015
tristate "TPD12S015 HDMI ESD protection and level shifter"
help
Driver for TPD12S015, which offers HDMI ESD protection and level
shifting.
-config DRM_OMAP_CONNECTOR_DVI
- tristate "DVI Connector"
- depends on I2C
- help
- Driver for a generic DVI connector.
-
config DRM_OMAP_CONNECTOR_HDMI
tristate "HDMI Connector"
help
@@ -33,12 +22,6 @@ config DRM_OMAP_CONNECTOR_ANALOG_TV
help
Driver for a generic analog TV connector.
-config DRM_OMAP_PANEL_DPI
- tristate "Generic DPI panel"
- depends on BACKLIGHT_CLASS_DEVICE
- help
- Driver for generic DPI panels.
-
config DRM_OMAP_PANEL_DSI_CM
tristate "Generic DSI Command Mode Panel"
depends on BACKLIGHT_CLASS_DEVICE
diff --git a/drivers/gpu/drm/omapdrm/displays/Makefile b/drivers/gpu/drm/omapdrm/displays/Makefile
index d99659e1381b..1db34d4fed64 100644
--- a/drivers/gpu/drm/omapdrm/displays/Makefile
+++ b/drivers/gpu/drm/omapdrm/displays/Makefile
@@ -1,11 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DRM_OMAP_ENCODER_OPA362) += encoder-opa362.o
-obj-$(CONFIG_DRM_OMAP_ENCODER_TFP410) += encoder-tfp410.o
obj-$(CONFIG_DRM_OMAP_ENCODER_TPD12S015) += encoder-tpd12s015.o
-obj-$(CONFIG_DRM_OMAP_CONNECTOR_DVI) += connector-dvi.o
obj-$(CONFIG_DRM_OMAP_CONNECTOR_HDMI) += connector-hdmi.o
obj-$(CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV) += connector-analog-tv.o
-obj-$(CONFIG_DRM_OMAP_PANEL_DPI) += panel-dpi.o
obj-$(CONFIG_DRM_OMAP_PANEL_DSI_CM) += panel-dsi-cm.o
obj-$(CONFIG_DRM_OMAP_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
obj-$(CONFIG_DRM_OMAP_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
index 28a3ce8f88d2..6c0561101874 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
@@ -35,50 +35,9 @@ static void tvc_disconnect(struct omap_dss_device *src,
{
}
-static int tvc_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
- int r;
-
- dev_dbg(ddata->dev, "enable\n");
-
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return r;
-}
-
-static void tvc_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
-
- dev_dbg(ddata->dev, "disable\n");
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
-
- src->ops->disable(src);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-
static const struct omap_dss_device_ops tvc_ops = {
.connect = tvc_connect,
.disconnect = tvc_disconnect,
-
- .enable = tvc_enable,
- .disable = tvc_disable,
};
static int tvc_probe(struct platform_device *pdev)
@@ -97,6 +56,7 @@ static int tvc_probe(struct platform_device *pdev)
dssdev->ops = &tvc_ops;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_VENC;
+ dssdev->display = true;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(0);
@@ -109,12 +69,9 @@ static int tvc_probe(struct platform_device *pdev)
static int __exit tvc_remove(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
omapdss_device_unregister(&ddata->dssdev);
- tvc_disable(dssdev);
-
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
deleted file mode 100644
index 24b14f44248e..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
+++ /dev/null
@@ -1,330 +0,0 @@
-/*
- * Generic DVI Connector driver
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- */
-
-#include <linux/gpio/consumer.h>
-#include <linux/i2c.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include <drm/drm_edid.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
-
- struct i2c_adapter *i2c_adapter;
-
- struct gpio_desc *hpd_gpio;
-
- void (*hpd_cb)(void *cb_data, enum drm_connector_status status);
- void *hpd_cb_data;
- bool hpd_enabled;
- /* mutex for hpd fields above */
- struct mutex hpd_lock;
-};
-
-#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-
-static int dvic_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return 0;
-}
-
-static void dvic_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
-}
-
-static int dvic_enable(struct omap_dss_device *dssdev)
-{
- struct omap_dss_device *src = dssdev->src;
- int r;
-
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
-}
-
-static void dvic_disable(struct omap_dss_device *dssdev)
-{
- struct omap_dss_device *src = dssdev->src;
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
-
- src->ops->disable(src);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-
-static int dvic_ddc_read(struct i2c_adapter *adapter,
- unsigned char *buf, u16 count, u8 offset)
-{
- int r, retries;
-
- for (retries = 3; retries > 0; retries--) {
- struct i2c_msg msgs[] = {
- {
- .addr = DDC_ADDR,
- .flags = 0,
- .len = 1,
- .buf = &offset,
- }, {
- .addr = DDC_ADDR,
- .flags = I2C_M_RD,
- .len = count,
- .buf = buf,
- }
- };
-
- r = i2c_transfer(adapter, msgs, 2);
- if (r == 2)
- return 0;
-
- if (r != -EAGAIN)
- break;
- }
-
- return r < 0 ? r : -EIO;
-}
-
-static int dvic_read_edid(struct omap_dss_device *dssdev,
- u8 *edid, int len)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- int r, l, bytes_read;
-
- l = min(EDID_LENGTH, len);
- r = dvic_ddc_read(ddata->i2c_adapter, edid, l, 0);
- if (r)
- return r;
-
- bytes_read = l;
-
- /* if there are extensions, read second block */
- if (len > EDID_LENGTH && edid[0x7e] > 0) {
- l = min(EDID_LENGTH, len - EDID_LENGTH);
-
- r = dvic_ddc_read(ddata->i2c_adapter, edid + EDID_LENGTH,
- l, EDID_LENGTH);
- if (r)
- return r;
-
- bytes_read += l;
- }
-
- return bytes_read;
-}
-
-static bool dvic_detect(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- unsigned char out;
- int r;
-
- if (ddata->hpd_gpio)
- return gpiod_get_value_cansleep(ddata->hpd_gpio);
-
- if (!ddata->i2c_adapter)
- return true;
-
- r = dvic_ddc_read(ddata->i2c_adapter, &out, 1, 0);
-
- return r == 0;
-}
-
-static void dvic_register_hpd_cb(struct omap_dss_device *dssdev,
- void (*cb)(void *cb_data,
- enum drm_connector_status status),
- void *cb_data)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_cb = cb;
- ddata->hpd_cb_data = cb_data;
- mutex_unlock(&ddata->hpd_lock);
-}
-
-static void dvic_unregister_hpd_cb(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_cb = NULL;
- ddata->hpd_cb_data = NULL;
- mutex_unlock(&ddata->hpd_lock);
-}
-
-static const struct omap_dss_device_ops dvic_ops = {
- .connect = dvic_connect,
- .disconnect = dvic_disconnect,
-
- .enable = dvic_enable,
- .disable = dvic_disable,
-
- .read_edid = dvic_read_edid,
- .detect = dvic_detect,
-
- .register_hpd_cb = dvic_register_hpd_cb,
- .unregister_hpd_cb = dvic_unregister_hpd_cb,
-};
-
-static irqreturn_t dvic_hpd_isr(int irq, void *data)
-{
- struct panel_drv_data *ddata = data;
-
- mutex_lock(&ddata->hpd_lock);
- if (ddata->hpd_enabled && ddata->hpd_cb) {
- enum drm_connector_status status;
-
- if (dvic_detect(&ddata->dssdev))
- status = connector_status_connected;
- else
- status = connector_status_disconnected;
-
- ddata->hpd_cb(ddata->hpd_cb_data, status);
- }
- mutex_unlock(&ddata->hpd_lock);
-
- return IRQ_HANDLED;
-}
-
-static int dvic_probe_of(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct device_node *node = pdev->dev.of_node;
- struct device_node *adapter_node;
- struct i2c_adapter *adapter;
- struct gpio_desc *gpio;
- int r;
-
- gpio = devm_gpiod_get_optional(&pdev->dev, "hpd", GPIOD_IN);
- if (IS_ERR(gpio)) {
- dev_err(&pdev->dev, "failed to parse HPD gpio\n");
- return PTR_ERR(gpio);
- }
-
- ddata->hpd_gpio = gpio;
-
- mutex_init(&ddata->hpd_lock);
-
- if (ddata->hpd_gpio) {
- r = devm_request_threaded_irq(&pdev->dev,
- gpiod_to_irq(ddata->hpd_gpio), NULL, dvic_hpd_isr,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "DVI HPD", ddata);
- if (r)
- return r;
- }
-
- adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0);
- if (adapter_node) {
- adapter = of_get_i2c_adapter_by_node(adapter_node);
- of_node_put(adapter_node);
- if (adapter == NULL) {
- dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n");
- return -EPROBE_DEFER;
- }
-
- ddata->i2c_adapter = adapter;
- }
-
- return 0;
-}
-
-static int dvic_probe(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
- int r;
-
- ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ddata);
-
- r = dvic_probe_of(pdev);
- if (r)
- return r;
-
- dssdev = &ddata->dssdev;
- dssdev->ops = &dvic_ops;
- dssdev->dev = &pdev->dev;
- dssdev->type = OMAP_DISPLAY_TYPE_DVI;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(0);
-
- if (ddata->hpd_gpio)
- dssdev->ops_flags |= OMAP_DSS_DEVICE_OP_DETECT
- | OMAP_DSS_DEVICE_OP_HPD;
- if (ddata->i2c_adapter)
- dssdev->ops_flags |= OMAP_DSS_DEVICE_OP_DETECT
- | OMAP_DSS_DEVICE_OP_EDID;
-
- omapdss_display_init(dssdev);
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int __exit dvic_remove(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
-
- omapdss_device_unregister(&ddata->dssdev);
-
- dvic_disable(dssdev);
-
- i2c_put_adapter(ddata->i2c_adapter);
-
- mutex_destroy(&ddata->hpd_lock);
-
- return 0;
-}
-
-static const struct of_device_id dvic_of_match[] = {
- { .compatible = "omapdss,dvi-connector", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, dvic_of_match);
-
-static struct platform_driver dvi_connector_driver = {
- .probe = dvic_probe,
- .remove = __exit_p(dvic_remove),
- .driver = {
- .name = "connector-dvi",
- .of_match_table = dvic_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_platform_driver(dvi_connector_driver);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("Generic DVI Connector driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
index e602fa4a50a4..68d6f6e44b03 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
@@ -41,44 +41,6 @@ static void hdmic_disconnect(struct omap_dss_device *src,
{
}
-static int hdmic_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
- int r;
-
- dev_dbg(ddata->dev, "enable\n");
-
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return r;
-}
-
-static void hdmic_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
-
- dev_dbg(ddata->dev, "disable\n");
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
-
- src->ops->disable(src);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-
static bool hdmic_detect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
@@ -113,9 +75,6 @@ static const struct omap_dss_device_ops hdmic_ops = {
.connect = hdmic_connect,
.disconnect = hdmic_disconnect,
- .enable = hdmic_enable,
- .disable = hdmic_disable,
-
.detect = hdmic_detect,
.register_hpd_cb = hdmic_register_hpd_cb,
.unregister_hpd_cb = hdmic_unregister_hpd_cb,
@@ -181,6 +140,7 @@ static int hdmic_probe(struct platform_device *pdev)
dssdev->ops = &hdmic_ops;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_HDMI;
+ dssdev->display = true;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(0);
dssdev->ops_flags = ddata->hpd_gpio
@@ -196,12 +156,9 @@ static int hdmic_probe(struct platform_device *pdev)
static int __exit hdmic_remove(struct platform_device *pdev)
{
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
omapdss_device_unregister(&ddata->dssdev);
- hdmic_disable(dssdev);
-
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
index 4fefd80f53bb..29a5a130ebd1 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
@@ -41,48 +41,20 @@ static void opa362_disconnect(struct omap_dss_device *src,
omapdss_device_disconnect(dst, dst->next);
}
-static int opa362_enable(struct omap_dss_device *dssdev)
+static void opa362_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
- int r;
-
- dev_dbg(dssdev->dev, "enable\n");
-
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
if (ddata->enable_gpio)
gpiod_set_value_cansleep(ddata->enable_gpio, 1);
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
}
static void opa362_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
-
- dev_dbg(dssdev->dev, "disable\n");
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
if (ddata->enable_gpio)
gpiod_set_value_cansleep(ddata->enable_gpio, 0);
-
- src->ops->disable(src);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
static const struct omap_dss_device_ops opa362_ops = {
@@ -116,7 +88,6 @@ static int opa362_probe(struct platform_device *pdev)
dssdev->ops = &opa362_ops;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_VENC;
- dssdev->output_type = OMAP_DISPLAY_TYPE_VENC;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(1) | BIT(0);
@@ -141,13 +112,7 @@ static int __exit opa362_remove(struct platform_device *pdev)
omapdss_device_put(dssdev->next);
omapdss_device_unregister(&ddata->dssdev);
- WARN_ON(omapdss_device_is_enabled(dssdev));
- if (omapdss_device_is_enabled(dssdev))
- opa362_disable(dssdev);
-
- WARN_ON(omapdss_device_is_connected(dssdev));
- if (omapdss_device_is_connected(dssdev))
- omapdss_device_disconnect(NULL, dssdev);
+ opa362_disable(dssdev);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
deleted file mode 100644
index f1a748353279..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
+++ /dev/null
@@ -1,170 +0,0 @@
-/*
- * TFP410 DPI-to-DVI encoder driver
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- */
-
-#include <linux/gpio/consumer.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
-
- struct gpio_desc *pd_gpio;
-};
-
-#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-
-static int tfp410_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return omapdss_device_connect(dst->dss, dst, dst->next);
-}
-
-static void tfp410_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- omapdss_device_disconnect(dst, dst->next);
-}
-
-static int tfp410_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
- int r;
-
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
-
- if (ddata->pd_gpio)
- gpiod_set_value_cansleep(ddata->pd_gpio, 0);
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
-}
-
-static void tfp410_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
-
- if (ddata->pd_gpio)
- gpiod_set_value_cansleep(ddata->pd_gpio, 0);
-
- src->ops->disable(src);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-
-static const struct omap_dss_device_ops tfp410_ops = {
- .connect = tfp410_connect,
- .disconnect = tfp410_disconnect,
- .enable = tfp410_enable,
- .disable = tfp410_disable,
-};
-
-static int tfp410_probe(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
- struct gpio_desc *gpio;
-
- ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ddata);
-
- /* Powerdown GPIO */
- gpio = devm_gpiod_get_optional(&pdev->dev, "powerdown", GPIOD_OUT_HIGH);
- if (IS_ERR(gpio)) {
- dev_err(&pdev->dev, "failed to parse powerdown gpio\n");
- return PTR_ERR(gpio);
- }
-
- ddata->pd_gpio = gpio;
-
- dssdev = &ddata->dssdev;
- dssdev->ops = &tfp410_ops;
- dssdev->dev = &pdev->dev;
- dssdev->type = OMAP_DISPLAY_TYPE_DPI;
- dssdev->output_type = OMAP_DISPLAY_TYPE_DVI;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(1) | BIT(0);
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE
- | DRM_BUS_FLAG_PIXDATA_POSEDGE;
-
- dssdev->next = omapdss_of_find_connected_device(pdev->dev.of_node, 1);
- if (IS_ERR(dssdev->next)) {
- if (PTR_ERR(dssdev->next) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to find video sink\n");
- return PTR_ERR(dssdev->next);
- }
-
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int __exit tfp410_remove(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
-
- if (dssdev->next)
- omapdss_device_put(dssdev->next);
- omapdss_device_unregister(&ddata->dssdev);
-
- WARN_ON(omapdss_device_is_enabled(dssdev));
- if (omapdss_device_is_enabled(dssdev))
- tfp410_disable(dssdev);
-
- WARN_ON(omapdss_device_is_connected(dssdev));
- if (omapdss_device_is_connected(dssdev))
- omapdss_device_disconnect(NULL, dssdev);
-
- return 0;
-}
-
-static const struct of_device_id tfp410_of_match[] = {
- { .compatible = "omapdss,ti,tfp410", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, tfp410_of_match);
-
-static struct platform_driver tfp410_driver = {
- .probe = tfp410_probe,
- .remove = __exit_p(tfp410_remove),
- .driver = {
- .name = "tfp410",
- .of_match_table = tfp410_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_platform_driver(tfp410_driver);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("TFP410 DPI to DVI encoder driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
index 94de55fd8884..bc03752d2762 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
@@ -62,35 +62,6 @@ static void tpd_disconnect(struct omap_dss_device *src,
omapdss_device_disconnect(dst, dst->next);
}
-static int tpd_enable(struct omap_dss_device *dssdev)
-{
- struct omap_dss_device *src = dssdev->src;
- int r;
-
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return r;
-}
-
-static void tpd_disable(struct omap_dss_device *dssdev)
-{
- struct omap_dss_device *src = dssdev->src;
-
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
- return;
-
- src->ops->disable(src);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-
static bool tpd_detect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
@@ -124,8 +95,6 @@ static void tpd_unregister_hpd_cb(struct omap_dss_device *dssdev)
static const struct omap_dss_device_ops tpd_ops = {
.connect = tpd_connect,
.disconnect = tpd_disconnect,
- .enable = tpd_enable,
- .disable = tpd_disable,
.detect = tpd_detect,
.register_hpd_cb = tpd_register_hpd_cb,
.unregister_hpd_cb = tpd_unregister_hpd_cb,
@@ -198,7 +167,6 @@ static int tpd_probe(struct platform_device *pdev)
dssdev->ops = &tpd_ops;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_HDMI;
- dssdev->output_type = OMAP_DISPLAY_TYPE_HDMI;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(1) | BIT(0);
dssdev->ops_flags = OMAP_DSS_DEVICE_OP_DETECT
@@ -225,14 +193,6 @@ static int __exit tpd_remove(struct platform_device *pdev)
omapdss_device_put(dssdev->next);
omapdss_device_unregister(&ddata->dssdev);
- WARN_ON(omapdss_device_is_enabled(dssdev));
- if (omapdss_device_is_enabled(dssdev))
- tpd_disable(dssdev);
-
- WARN_ON(omapdss_device_is_connected(dssdev));
- if (omapdss_device_is_connected(dssdev))
- omapdss_device_disconnect(NULL, dssdev);
-
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
deleted file mode 100644
index 465120809eb3..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
+++ /dev/null
@@ -1,221 +0,0 @@
-/*
- * Generic MIPI DPI Panel Driver
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- */
-
-#include <linux/gpio/consumer.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/regulator/consumer.h>
-#include <linux/backlight.h>
-
-#include <video/of_display_timing.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
-
- struct videomode vm;
-
- struct backlight_device *backlight;
-
- struct gpio_desc *enable_gpio;
- struct regulator *vcc_supply;
-};
-
-#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
-
-static int panel_dpi_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return 0;
-}
-
-static void panel_dpi_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
-}
-
-static int panel_dpi_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
- int r;
-
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
-
- r = regulator_enable(ddata->vcc_supply);
- if (r) {
- src->ops->disable(src);
- return r;
- }
-
- gpiod_set_value_cansleep(ddata->enable_gpio, 1);
- backlight_enable(ddata->backlight);
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
-}
-
-static void panel_dpi_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
-
- backlight_disable(ddata->backlight);
-
- gpiod_set_value_cansleep(ddata->enable_gpio, 0);
- regulator_disable(ddata->vcc_supply);
-
- src->ops->disable(src);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-
-static void panel_dpi_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- *vm = ddata->vm;
-}
-
-static const struct omap_dss_device_ops panel_dpi_ops = {
- .connect = panel_dpi_connect,
- .disconnect = panel_dpi_disconnect,
-
- .enable = panel_dpi_enable,
- .disable = panel_dpi_disable,
-
- .get_timings = panel_dpi_get_timings,
-};
-
-static int panel_dpi_probe_of(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct device_node *node = pdev->dev.of_node;
- int r;
- struct display_timing timing;
- struct gpio_desc *gpio;
-
- gpio = devm_gpiod_get_optional(&pdev->dev, "enable", GPIOD_OUT_LOW);
- if (IS_ERR(gpio))
- return PTR_ERR(gpio);
-
- ddata->enable_gpio = gpio;
-
- /*
- * Many different panels are supported by this driver and there are
- * probably very different needs for their reset pins in regards to
- * timing and order relative to the enable gpio. So for now it's just
- * ensured that the reset line isn't active.
- */
- gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(gpio))
- return PTR_ERR(gpio);
-
- ddata->vcc_supply = devm_regulator_get(&pdev->dev, "vcc");
- if (IS_ERR(ddata->vcc_supply))
- return PTR_ERR(ddata->vcc_supply);
-
- ddata->backlight = devm_of_find_backlight(&pdev->dev);
-
- if (IS_ERR(ddata->backlight))
- return PTR_ERR(ddata->backlight);
-
- r = of_get_display_timing(node, "panel-timing", &timing);
- if (r) {
- dev_err(&pdev->dev, "failed to get video timing\n");
- return r;
- }
-
- videomode_from_timing(&timing, &ddata->vm);
-
- return 0;
-}
-
-static int panel_dpi_probe(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
- int r;
-
- ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
- if (ddata == NULL)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ddata);
-
- r = panel_dpi_probe_of(pdev);
- if (r)
- return r;
-
- dssdev = &ddata->dssdev;
- dssdev->dev = &pdev->dev;
- dssdev->ops = &panel_dpi_ops;
- dssdev->type = OMAP_DISPLAY_TYPE_DPI;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(0);
- drm_bus_flags_from_videomode(&ddata->vm, &dssdev->bus_flags);
-
- omapdss_display_init(dssdev);
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int __exit panel_dpi_remove(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
-
- omapdss_device_unregister(dssdev);
-
- panel_dpi_disable(dssdev);
-
- return 0;
-}
-
-static const struct of_device_id panel_dpi_of_match[] = {
- { .compatible = "omapdss,panel-dpi", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, panel_dpi_of_match);
-
-static struct platform_driver panel_dpi_driver = {
- .probe = panel_dpi_probe,
- .remove = __exit_p(panel_dpi_remove),
- .driver = {
- .name = "panel-dpi",
- .of_match_table = panel_dpi_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_platform_driver(panel_dpi_driver);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
-MODULE_DESCRIPTION("Generic MIPI DPI Panel Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index 29692a5217c5..741a5e324767 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -24,6 +24,8 @@
#include <linux/of_device.h>
#include <linux/regulator/consumer.h>
+#include <drm/drm_connector.h>
+
#include <video/mipi_display.h>
#include <video/of_display_timing.h>
@@ -41,6 +43,7 @@
struct panel_drv_data {
struct omap_dss_device dssdev;
+ struct omap_dss_device *src;
struct videomode vm;
@@ -141,7 +144,7 @@ static void hw_guard_wait(struct panel_drv_data *ddata)
static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data)
{
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
int r;
u8 buf[1];
@@ -157,14 +160,14 @@ static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data)
static int dsicm_dcs_write_0(struct panel_drv_data *ddata, u8 dcs_cmd)
{
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
return src->ops->dsi.dcs_write(src, ddata->channel, &dcs_cmd, 1);
}
static int dsicm_dcs_write_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 param)
{
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
u8 buf[2] = { dcs_cmd, param };
return src->ops->dsi.dcs_write(src, ddata->channel, buf, 2);
@@ -173,7 +176,7 @@ static int dsicm_dcs_write_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 param)
static int dsicm_sleep_in(struct panel_drv_data *ddata)
{
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
u8 cmd;
int r;
@@ -228,7 +231,7 @@ static int dsicm_get_id(struct panel_drv_data *ddata, u8 *id1, u8 *id2, u8 *id3)
static int dsicm_set_update_window(struct panel_drv_data *ddata,
u16 x, u16 y, u16 w, u16 h)
{
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
int r;
u16 x1 = x;
u16 x2 = x + w - 1;
@@ -275,7 +278,7 @@ static void dsicm_cancel_ulps_work(struct panel_drv_data *ddata)
static int dsicm_enter_ulps(struct panel_drv_data *ddata)
{
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
int r;
if (ddata->ulps_enabled)
@@ -309,18 +312,13 @@ err:
static int dsicm_exit_ulps(struct panel_drv_data *ddata)
{
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
int r;
if (!ddata->ulps_enabled)
return 0;
- r = src->ops->enable(src);
- if (r) {
- dev_err(&ddata->pdev->dev, "failed to enable DSI\n");
- goto err1;
- }
-
+ src->ops->enable(src);
src->ops->dsi.enable_hs(src, ddata->channel, true);
r = _dsicm_enable_te(ddata, true);
@@ -347,7 +345,7 @@ err2:
enable_irq(gpiod_to_irq(ddata->ext_te_gpio));
ddata->ulps_enabled = false;
}
-err1:
+
dsicm_queue_ulps_work(ddata);
return r;
@@ -366,7 +364,7 @@ static int dsicm_wake_up(struct panel_drv_data *ddata)
static int dsicm_bl_update_status(struct backlight_device *dev)
{
struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
int r = 0;
int level;
@@ -414,7 +412,7 @@ static ssize_t dsicm_num_errors_show(struct device *dev,
{
struct platform_device *pdev = to_platform_device(dev);
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
u8 errors = 0;
int r;
@@ -446,7 +444,7 @@ static ssize_t dsicm_hw_revision_show(struct device *dev,
{
struct platform_device *pdev = to_platform_device(dev);
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
u8 id1, id2, id3;
int r;
@@ -478,7 +476,7 @@ static ssize_t dsicm_store_ulps(struct device *dev,
{
struct platform_device *pdev = to_platform_device(dev);
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
unsigned long t;
int r;
@@ -528,7 +526,7 @@ static ssize_t dsicm_store_ulps_timeout(struct device *dev,
{
struct platform_device *pdev = to_platform_device(dev);
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
unsigned long t;
int r;
@@ -603,7 +601,7 @@ static void dsicm_hw_reset(struct panel_drv_data *ddata)
static int dsicm_power_on(struct panel_drv_data *ddata)
{
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
u8 id1, id2, id3;
int r;
struct omap_dss_dsi_config dsi_config = {
@@ -649,11 +647,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
goto err_vddi;
}
- r = src->ops->enable(src);
- if (r) {
- dev_err(&ddata->pdev->dev, "failed to enable DSI\n");
- goto err_vddi;
- }
+ src->ops->enable(src);
dsicm_hw_reset(ddata);
@@ -722,7 +716,7 @@ err_vpnl:
static void dsicm_power_off(struct panel_drv_data *ddata)
{
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
int r;
src->ops->dsi.disable_video_output(src, ddata->channel);
@@ -776,6 +770,7 @@ static int dsicm_connect(struct omap_dss_device *src,
return r;
}
+ ddata->src = src;
return 0;
}
@@ -785,28 +780,17 @@ static void dsicm_disconnect(struct omap_dss_device *src,
struct panel_drv_data *ddata = to_panel_data(dst);
src->ops->dsi.release_vc(src, ddata->channel);
+ ddata->src = NULL;
}
-static int dsicm_enable(struct omap_dss_device *dssdev)
+static void dsicm_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
+ struct omap_dss_device *src = ddata->src;
int r;
- dev_dbg(&ddata->pdev->dev, "enable\n");
-
mutex_lock(&ddata->lock);
- if (!omapdss_device_is_connected(dssdev)) {
- r = -ENODEV;
- goto err;
- }
-
- if (omapdss_device_is_enabled(dssdev)) {
- r = 0;
- goto err;
- }
-
src->ops->dsi.bus_lock(src);
r = dsicm_power_on(ddata);
@@ -816,27 +800,22 @@ static int dsicm_enable(struct omap_dss_device *dssdev)
if (r)
goto err;
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
mutex_unlock(&ddata->lock);
dsicm_bl_power(ddata, true);
- return 0;
+ return;
err:
- dev_dbg(&ddata->pdev->dev, "enable failed\n");
+ dev_dbg(&ddata->pdev->dev, "enable failed (%d)\n", r);
mutex_unlock(&ddata->lock);
- return r;
}
static void dsicm_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
+ struct omap_dss_device *src = ddata->src;
int r;
- dev_dbg(&ddata->pdev->dev, "disable\n");
-
dsicm_bl_power(ddata, false);
mutex_lock(&ddata->lock);
@@ -845,23 +824,19 @@ static void dsicm_disable(struct omap_dss_device *dssdev)
src->ops->dsi.bus_lock(src);
- if (omapdss_device_is_enabled(dssdev)) {
- r = dsicm_wake_up(ddata);
- if (!r)
- dsicm_power_off(ddata);
- }
+ r = dsicm_wake_up(ddata);
+ if (!r)
+ dsicm_power_off(ddata);
src->ops->dsi.bus_unlock(src);
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-
mutex_unlock(&ddata->lock);
}
static void dsicm_framedone_cb(int err, void *data)
{
struct panel_drv_data *ddata = data;
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
dev_dbg(&ddata->pdev->dev, "framedone, err %d\n", err);
src->ops->dsi.bus_unlock(src);
@@ -870,7 +845,7 @@ static void dsicm_framedone_cb(int err, void *data)
static irqreturn_t dsicm_te_isr(int irq, void *data)
{
struct panel_drv_data *ddata = data;
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
int old;
int r;
@@ -896,7 +871,7 @@ static void dsicm_te_timeout_work_callback(struct work_struct *work)
{
struct panel_drv_data *ddata = container_of(work, struct panel_drv_data,
te_timeout_work.work);
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
dev_err(&ddata->pdev->dev, "TE not received for 250ms!\n");
@@ -908,7 +883,7 @@ static int dsicm_update(struct omap_dss_device *dssdev,
u16 x, u16 y, u16 w, u16 h)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
+ struct omap_dss_device *src = ddata->src;
int r;
dev_dbg(&ddata->pdev->dev, "update %d, %d, %d x %d\n", x, y, w, h);
@@ -954,7 +929,7 @@ err:
static int dsicm_sync(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
+ struct omap_dss_device *src = ddata->src;
dev_dbg(&ddata->pdev->dev, "sync\n");
@@ -970,7 +945,7 @@ static int dsicm_sync(struct omap_dss_device *dssdev)
static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable)
{
- struct omap_dss_device *src = ddata->dssdev.src;
+ struct omap_dss_device *src = ddata->src;
int r;
if (enable)
@@ -990,7 +965,7 @@ static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable)
static int dsicm_enable_te(struct omap_dss_device *dssdev, bool enable)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
+ struct omap_dss_device *src = ddata->src;
int r;
mutex_lock(&ddata->lock);
@@ -1041,7 +1016,7 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev,
u16 x, u16 y, u16 w, u16 h)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
+ struct omap_dss_device *src = ddata->src;
int r;
int first = 1;
int plen;
@@ -1123,7 +1098,7 @@ static void dsicm_ulps_work(struct work_struct *work)
struct panel_drv_data *ddata = container_of(work, struct panel_drv_data,
ulps_work.work);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *src = dssdev->src;
+ struct omap_dss_device *src = ddata->src;
mutex_lock(&ddata->lock);
@@ -1140,29 +1115,32 @@ static void dsicm_ulps_work(struct work_struct *work)
mutex_unlock(&ddata->lock);
}
-static void dsicm_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+static int dsicm_get_modes(struct omap_dss_device *dssdev,
+ struct drm_connector *connector)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *vm = ddata->vm;
+ connector->display_info.width_mm = ddata->width_mm;
+ connector->display_info.height_mm = ddata->height_mm;
+
+ return omapdss_display_get_modes(connector, &ddata->vm);
}
static int dsicm_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+ struct drm_display_mode *mode)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
int ret = 0;
- if (vm->hactive != ddata->vm.hactive)
+ if (mode->hdisplay != ddata->vm.hactive)
ret = -EINVAL;
- if (vm->vactive != ddata->vm.vactive)
+ if (mode->vdisplay != ddata->vm.vactive)
ret = -EINVAL;
if (ret) {
dev_warn(dssdev->dev, "wrong resolution: %d x %d",
- vm->hactive, vm->vactive);
+ mode->hdisplay, mode->vdisplay);
dev_warn(dssdev->dev, "panel resolution: %d x %d",
ddata->vm.hactive, ddata->vm.vactive);
}
@@ -1170,15 +1148,6 @@ static int dsicm_check_timings(struct omap_dss_device *dssdev,
return ret;
}
-static void dsicm_get_size(struct omap_dss_device *dssdev,
- unsigned int *width, unsigned int *height)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- *width = ddata->width_mm;
- *height = ddata->height_mm;
-}
-
static const struct omap_dss_device_ops dsicm_ops = {
.connect = dsicm_connect,
.disconnect = dsicm_disconnect,
@@ -1186,7 +1155,7 @@ static const struct omap_dss_device_ops dsicm_ops = {
.enable = dsicm_enable,
.disable = dsicm_disable,
- .get_timings = dsicm_get_timings,
+ .get_modes = dsicm_get_modes,
.check_timings = dsicm_check_timings,
};
@@ -1194,8 +1163,6 @@ static const struct omap_dss_driver dsicm_dss_driver = {
.update = dsicm_update,
.sync = dsicm_sync,
- .get_size = dsicm_get_size,
-
.enable_te = dsicm_enable_te,
.get_te = dsicm_get_te,
@@ -1305,8 +1272,10 @@ static int dsicm_probe(struct platform_device *pdev)
dssdev->ops = &dsicm_ops;
dssdev->driver = &dsicm_dss_driver;
dssdev->type = OMAP_DISPLAY_TYPE_DSI;
+ dssdev->display = true;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(0);
+ dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
@@ -1385,8 +1354,9 @@ static int __exit dsicm_remove(struct platform_device *pdev)
omapdss_device_unregister(dssdev);
- dsicm_disable(dssdev);
- omapdss_device_disconnect(dssdev->src, dssdev);
+ if (omapdss_device_is_enabled(dssdev))
+ dsicm_disable(dssdev);
+ omapdss_device_disconnect(ddata->src, dssdev);
sysfs_remove_group(&pdev->dev.kobj, &dsicm_attr_group);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
index f6ef8ff964dd..99f2350d462c 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
@@ -123,52 +123,28 @@ static void lb035q02_disconnect(struct omap_dss_device *src,
{
}
-static int lb035q02_enable(struct omap_dss_device *dssdev)
+static void lb035q02_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
- int r;
-
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
if (ddata->enable_gpio)
gpiod_set_value_cansleep(ddata->enable_gpio, 1);
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
}
static void lb035q02_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
if (ddata->enable_gpio)
gpiod_set_value_cansleep(ddata->enable_gpio, 0);
-
- src->ops->disable(src);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void lb035q02_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+static int lb035q02_get_modes(struct omap_dss_device *dssdev,
+ struct drm_connector *connector)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *vm = ddata->vm;
+ return omapdss_display_get_modes(connector, &ddata->vm);
}
static const struct omap_dss_device_ops lb035q02_ops = {
@@ -178,7 +154,7 @@ static const struct omap_dss_device_ops lb035q02_ops = {
.enable = lb035q02_enable,
.disable = lb035q02_disable,
- .get_timings = lb035q02_get_timings,
+ .get_modes = lb035q02_get_modes,
};
static int lb035q02_probe_of(struct spi_device *spi)
@@ -221,16 +197,19 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
dssdev->dev = &spi->dev;
dssdev->ops = &lb035q02_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
+ dssdev->display = true;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(0);
+ dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
/*
* Note: According to the panel documentation:
* DE is active LOW
* DATA needs to be driven on the FALLING edge
*/
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_NEGEDGE
- | DRM_BUS_FLAG_PIXDATA_POSEDGE;
+ dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE
+ | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
omapdss_display_init(dssdev);
omapdss_device_register(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
index f445de6369f7..c2409815a204 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
@@ -118,50 +118,26 @@ static void nec_8048_disconnect(struct omap_dss_device *src,
{
}
-static int nec_8048_enable(struct omap_dss_device *dssdev)
+static void nec_8048_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
- int r;
-
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
gpiod_set_value_cansleep(ddata->res_gpio, 1);
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
}
static void nec_8048_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
gpiod_set_value_cansleep(ddata->res_gpio, 0);
-
- src->ops->disable(src);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void nec_8048_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+static int nec_8048_get_modes(struct omap_dss_device *dssdev,
+ struct drm_connector *connector)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *vm = ddata->vm;
+ return omapdss_display_get_modes(connector, &ddata->vm);
}
static const struct omap_dss_device_ops nec_8048_ops = {
@@ -171,7 +147,7 @@ static const struct omap_dss_device_ops nec_8048_ops = {
.enable = nec_8048_enable,
.disable = nec_8048_disable,
- .get_timings = nec_8048_get_timings,
+ .get_modes = nec_8048_get_modes,
};
static int nec_8048_probe(struct spi_device *spi)
@@ -216,10 +192,13 @@ static int nec_8048_probe(struct spi_device *spi)
dssdev->dev = &spi->dev;
dssdev->ops = &nec_8048_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
+ dssdev->display = true;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(0);
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE
- | DRM_BUS_FLAG_PIXDATA_POSEDGE;
+ dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
+ dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE
+ | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
omapdss_display_init(dssdev);
omapdss_device_register(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
index 64b1369cb274..9c545de430f6 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
@@ -62,29 +62,22 @@ static void sharp_ls_disconnect(struct omap_dss_device *src,
{
}
-static int sharp_ls_enable(struct omap_dss_device *dssdev)
+static void sharp_ls_pre_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
int r;
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
if (ddata->vcc) {
r = regulator_enable(ddata->vcc);
- if (r != 0)
- return r;
+ if (r)
+ dev_err(dssdev->dev, "%s: failed to enable regulator\n",
+ __func__);
}
+}
- r = src->ops->enable(src);
- if (r) {
- regulator_disable(ddata->vcc);
- return r;
- }
+static void sharp_ls_enable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
/* wait couple of vsyncs until enabling the LCD */
msleep(50);
@@ -94,19 +87,11 @@ static int sharp_ls_enable(struct omap_dss_device *dssdev)
if (ddata->ini_gpio)
gpiod_set_value_cansleep(ddata->ini_gpio, 1);
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
}
static void sharp_ls_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
if (ddata->ini_gpio)
gpiod_set_value_cansleep(ddata->ini_gpio, 0);
@@ -115,33 +100,35 @@ static void sharp_ls_disable(struct omap_dss_device *dssdev)
gpiod_set_value_cansleep(ddata->resb_gpio, 0);
/* wait at least 5 vsyncs after disabling the LCD */
-
msleep(100);
+}
- src->ops->disable(src);
+static void sharp_ls_post_disable(struct omap_dss_device *dssdev)
+{
+ struct panel_drv_data *ddata = to_panel_data(dssdev);
if (ddata->vcc)
regulator_disable(ddata->vcc);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void sharp_ls_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+static int sharp_ls_get_modes(struct omap_dss_device *dssdev,
+ struct drm_connector *connector)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *vm = ddata->vm;
+ return omapdss_display_get_modes(connector, &ddata->vm);
}
static const struct omap_dss_device_ops sharp_ls_ops = {
.connect = sharp_ls_connect,
.disconnect = sharp_ls_disconnect,
+ .pre_enable = sharp_ls_pre_enable,
.enable = sharp_ls_enable,
.disable = sharp_ls_disable,
+ .post_disable = sharp_ls_post_disable,
- .get_timings = sharp_ls_get_timings,
+ .get_modes = sharp_ls_get_modes,
};
static int sharp_ls_get_gpio_of(struct device *dev, int index, int val,
@@ -220,15 +207,18 @@ static int sharp_ls_probe(struct platform_device *pdev)
dssdev->dev = &pdev->dev;
dssdev->ops = &sharp_ls_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
+ dssdev->display = true;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(0);
+ dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
/*
* Note: According to the panel documentation:
* DATA needs to be driven on the FALLING edge
*/
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_NEGEDGE
- | DRM_BUS_FLAG_PIXDATA_POSEDGE;
+ dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE
+ | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
omapdss_display_init(dssdev);
omapdss_device_register(dssdev);
@@ -243,7 +233,10 @@ static int __exit sharp_ls_remove(struct platform_device *pdev)
omapdss_device_unregister(dssdev);
- sharp_ls_disable(dssdev);
+ if (omapdss_device_is_enabled(dssdev)) {
+ sharp_ls_disable(dssdev);
+ sharp_ls_post_disable(dssdev);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
index e04663856b31..2038def14ba1 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
@@ -516,17 +516,9 @@ static void acx565akm_disconnect(struct omap_dss_device *src,
static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
- int r;
dev_dbg(&ddata->spi->dev, "%s\n", __func__);
- r = src->ops->enable(src);
- if (r) {
- pr_err("%s sdi enable failed\n", __func__);
- return r;
- }
-
/*FIXME tweak me */
msleep(50);
@@ -562,7 +554,6 @@ static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
static void acx565akm_panel_power_off(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
dev_dbg(dssdev->dev, "%s\n", __func__);
@@ -585,56 +576,32 @@ static void acx565akm_panel_power_off(struct omap_dss_device *dssdev)
/* FIXME need to tweak this delay */
msleep(100);
-
- src->ops->disable(src);
}
-static int acx565akm_enable(struct omap_dss_device *dssdev)
+static void acx565akm_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- int r;
-
- dev_dbg(dssdev->dev, "%s\n", __func__);
-
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
mutex_lock(&ddata->mutex);
- r = acx565akm_panel_power_on(dssdev);
+ acx565akm_panel_power_on(dssdev);
mutex_unlock(&ddata->mutex);
- if (r)
- return r;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
}
static void acx565akm_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- dev_dbg(dssdev->dev, "%s\n", __func__);
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
-
mutex_lock(&ddata->mutex);
acx565akm_panel_power_off(dssdev);
mutex_unlock(&ddata->mutex);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void acx565akm_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+static int acx565akm_get_modes(struct omap_dss_device *dssdev,
+ struct drm_connector *connector)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *vm = ddata->vm;
+ return omapdss_display_get_modes(connector, &ddata->vm);
}
static const struct omap_dss_device_ops acx565akm_ops = {
@@ -644,7 +611,7 @@ static const struct omap_dss_device_ops acx565akm_ops = {
.enable = acx565akm_enable,
.disable = acx565akm_disable,
- .get_timings = acx565akm_get_timings,
+ .get_modes = acx565akm_get_modes,
};
static int acx565akm_probe(struct spi_device *spi)
@@ -739,10 +706,13 @@ static int acx565akm_probe(struct spi_device *spi)
dssdev->dev = &spi->dev;
dssdev->ops = &acx565akm_ops;
dssdev->type = OMAP_DISPLAY_TYPE_SDI;
+ dssdev->display = true;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(0);
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_NEGEDGE
- | DRM_BUS_FLAG_PIXDATA_POSEDGE;
+ dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
+ dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE
+ | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
omapdss_display_init(dssdev);
omapdss_device_register(dssdev);
@@ -766,7 +736,8 @@ static int acx565akm_remove(struct spi_device *spi)
omapdss_device_unregister(dssdev);
- acx565akm_disable(dssdev);
+ if (omapdss_device_is_enabled(dssdev))
+ acx565akm_disable(dssdev);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
index 7ddc8c574a61..2ad161e33106 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
@@ -35,6 +35,8 @@ struct panel_drv_data {
struct videomode vm;
+ struct backlight_device *backlight;
+
struct spi_device *spi_dev;
};
@@ -169,24 +171,12 @@ static void td028ttec1_panel_disconnect(struct omap_dss_device *src,
{
}
-static int td028ttec1_panel_enable(struct omap_dss_device *dssdev)
+static void td028ttec1_panel_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
- int r;
-
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
+ int r = 0;
- dev_dbg(dssdev->dev, "td028ttec1_panel_enable() - state %d\n",
- dssdev->state);
+ dev_dbg(dssdev->dev, "%s: state %d\n", __func__, dssdev->state);
/* three times command zero */
r |= jbt_ret_write_0(ddata, 0x00);
@@ -197,8 +187,8 @@ static int td028ttec1_panel_enable(struct omap_dss_device *dssdev)
usleep_range(1000, 2000);
if (r) {
- dev_warn(dssdev->dev, "transfer error\n");
- goto transfer_err;
+ dev_warn(dssdev->dev, "%s: transfer error\n", __func__);
+ return;
}
/* deep standby out */
@@ -268,20 +258,17 @@ static int td028ttec1_panel_enable(struct omap_dss_device *dssdev)
r |= jbt_ret_write_0(ddata, JBT_REG_DISPLAY_ON);
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
-transfer_err:
+ if (r)
+ dev_err(dssdev->dev, "%s: write error\n", __func__);
- return r ? -EIO : 0;
+ backlight_enable(ddata->backlight);
}
static void td028ttec1_panel_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
- if (!omapdss_device_is_enabled(dssdev))
- return;
+ backlight_disable(ddata->backlight);
dev_dbg(dssdev->dev, "td028ttec1_panel_disable()\n");
@@ -289,18 +276,14 @@ static void td028ttec1_panel_disable(struct omap_dss_device *dssdev)
jbt_reg_write_2(ddata, JBT_REG_OUTPUT_CONTROL, 0x8002);
jbt_ret_write_0(ddata, JBT_REG_SLEEP_IN);
jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x00);
-
- src->ops->disable(src);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void td028ttec1_panel_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+static int td028ttec1_panel_get_modes(struct omap_dss_device *dssdev,
+ struct drm_connector *connector)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *vm = ddata->vm;
+ return omapdss_display_get_modes(connector, &ddata->vm);
}
static const struct omap_dss_device_ops td028ttec1_ops = {
@@ -310,7 +293,7 @@ static const struct omap_dss_device_ops td028ttec1_ops = {
.enable = td028ttec1_panel_enable,
.disable = td028ttec1_panel_disable,
- .get_timings = td028ttec1_panel_get_timings,
+ .get_modes = td028ttec1_panel_get_modes,
};
static int td028ttec1_panel_probe(struct spi_device *spi)
@@ -334,6 +317,10 @@ static int td028ttec1_panel_probe(struct spi_device *spi)
if (ddata == NULL)
return -ENOMEM;
+ ddata->backlight = devm_of_find_backlight(&spi->dev);
+ if (IS_ERR(ddata->backlight))
+ return PTR_ERR(ddata->backlight);
+
dev_set_drvdata(&spi->dev, ddata);
ddata->spi_dev = spi;
@@ -344,15 +331,18 @@ static int td028ttec1_panel_probe(struct spi_device *spi)
dssdev->dev = &spi->dev;
dssdev->ops = &td028ttec1_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
+ dssdev->display = true;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(0);
+ dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
/*
* Note: According to the panel documentation:
* SYNC needs to be driven on the FALLING edge
*/
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE
- | DRM_BUS_FLAG_PIXDATA_NEGEDGE;
+ dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE
+ | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
omapdss_display_init(dssdev);
omapdss_device_register(dssdev);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
index 8440fcb744d9..0b692fc7e5ea 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
@@ -320,22 +320,11 @@ static void tpo_td043_disconnect(struct omap_dss_device *src,
{
}
-static int tpo_td043_enable(struct omap_dss_device *dssdev)
+static void tpo_td043_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
int r;
- if (!omapdss_device_is_connected(dssdev))
- return -ENODEV;
-
- if (omapdss_device_is_enabled(dssdev))
- return 0;
-
- r = src->ops->enable(src);
- if (r)
- return r;
-
/*
* If we are resuming from system suspend, SPI clocks might not be
* enabled yet, so we'll program the LCD from SPI PM resume callback.
@@ -343,38 +332,27 @@ static int tpo_td043_enable(struct omap_dss_device *dssdev)
if (!ddata->spi_suspended) {
r = tpo_td043_power_on(ddata);
if (r) {
- src->ops->disable(src);
- return r;
+ dev_err(&ddata->spi->dev, "%s: power on failed (%d)\n",
+ __func__, r);
+ return;
}
}
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
}
static void tpo_td043_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *src = dssdev->src;
-
- if (!omapdss_device_is_enabled(dssdev))
- return;
-
- src->ops->disable(src);
if (!ddata->spi_suspended)
tpo_td043_power_off(ddata);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void tpo_td043_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+static int tpo_td043_get_modes(struct omap_dss_device *dssdev,
+ struct drm_connector *connector)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- *vm = ddata->vm;
+ return omapdss_display_get_modes(connector, &ddata->vm);
}
static const struct omap_dss_device_ops tpo_td043_ops = {
@@ -384,7 +362,7 @@ static const struct omap_dss_device_ops tpo_td043_ops = {
.enable = tpo_td043_enable,
.disable = tpo_td043_disable,
- .get_timings = tpo_td043_get_timings,
+ .get_modes = tpo_td043_get_modes,
};
static int tpo_td043_probe(struct spi_device *spi)
@@ -442,15 +420,18 @@ static int tpo_td043_probe(struct spi_device *spi)
dssdev->dev = &spi->dev;
dssdev->ops = &tpo_td043_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
+ dssdev->display = true;
dssdev->owner = THIS_MODULE;
dssdev->of_ports = BIT(0);
+ dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
/*
* Note: According to the panel documentation:
* SYNC needs to be driven on the FALLING edge
*/
- dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE
- | DRM_BUS_FLAG_PIXDATA_NEGEDGE;
+ dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE
+ | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
omapdss_display_init(dssdev);
omapdss_device_register(dssdev);
@@ -467,7 +448,8 @@ static int tpo_td043_remove(struct spi_device *spi)
omapdss_device_unregister(dssdev);
- tpo_td043_disable(dssdev);
+ if (omapdss_device_is_enabled(dssdev))
+ tpo_td043_disable(dssdev);
sysfs_remove_group(&spi->dev.kobj, &tpo_td043_attr_group);
diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c
index 472f56e3de70..f8dad99013e8 100644
--- a/drivers/gpu/drm/omapdrm/dss/base.c
+++ b/drivers/gpu/drm/omapdrm/dss/base.c
@@ -19,6 +19,7 @@
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_graph.h>
+#include <linux/platform_device.h>
#include "dss.h"
#include "omapdss.h"
@@ -112,13 +113,12 @@ void omapdss_device_put(struct omap_dss_device *dssdev)
}
EXPORT_SYMBOL(omapdss_device_put);
-struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src,
- unsigned int port)
+struct omap_dss_device *omapdss_find_device_by_node(struct device_node *node)
{
struct omap_dss_device *dssdev;
list_for_each_entry(dssdev, &omapdss_devices_list, list) {
- if (dssdev->dev->of_node == src && dssdev->of_ports & BIT(port))
+ if (dssdev->dev->of_node == node)
return omapdss_device_get(dssdev);
}
@@ -126,13 +126,10 @@ struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src,
}
/*
- * Search for the next device starting at @from. The type argument specfies
- * which device types to consider when searching. Searching for multiple types
- * is supported by and'ing their type flags. Release the reference to the @from
- * device, and acquire a reference to the returned device if found.
+ * Search for the next output device starting at @from. Release the reference to
+ * the @from device, and acquire a reference to the returned device if found.
*/
-struct omap_dss_device *omapdss_device_get_next(struct omap_dss_device *from,
- enum omap_dss_device_type type)
+struct omap_dss_device *omapdss_device_next_output(struct omap_dss_device *from)
{
struct omap_dss_device *dssdev;
struct list_head *list;
@@ -160,15 +157,8 @@ struct omap_dss_device *omapdss_device_get_next(struct omap_dss_device *from,
goto done;
}
- /*
- * Accept display entities if the display type is requested,
- * and output entities if the output type is requested.
- */
- if ((type & OMAP_DSS_DEVICE_TYPE_DISPLAY) &&
- !dssdev->output_type)
- goto done;
- if ((type & OMAP_DSS_DEVICE_TYPE_OUTPUT) && dssdev->id &&
- dssdev->next)
+ if (dssdev->id &&
+ (dssdev->next || dssdev->bridge || dssdev->panel))
goto done;
}
@@ -183,7 +173,12 @@ done:
mutex_unlock(&omapdss_devices_lock);
return dssdev;
}
-EXPORT_SYMBOL(omapdss_device_get_next);
+EXPORT_SYMBOL(omapdss_device_next_output);
+
+static bool omapdss_device_is_connected(struct omap_dss_device *dssdev)
+{
+ return dssdev->dss;
+}
int omapdss_device_connect(struct dss_device *dss,
struct omap_dss_device *src,
@@ -191,7 +186,19 @@ int omapdss_device_connect(struct dss_device *dss,
{
int ret;
- dev_dbg(dst->dev, "connect\n");
+ dev_dbg(&dss->pdev->dev, "connect(%s, %s)\n",
+ src ? dev_name(src->dev) : "NULL",
+ dst ? dev_name(dst->dev) : "NULL");
+
+ if (!dst) {
+ /*
+ * The destination is NULL when the source is connected to a
+ * bridge or panel instead of a DSS device. Stop here, we will
+ * attach the bridge or panel later when we will have a DRM
+ * encoder.
+ */
+ return src && (src->bridge || src->panel) ? 0 : -EINVAL;
+ }
if (omapdss_device_is_connected(dst))
return -EBUSY;
@@ -204,12 +211,6 @@ int omapdss_device_connect(struct dss_device *dss,
return ret;
}
- if (src) {
- WARN_ON(src->dst);
- dst->src = src;
- src->dst = dst;
- }
-
return 0;
}
EXPORT_SYMBOL_GPL(omapdss_device_connect);
@@ -217,19 +218,20 @@ EXPORT_SYMBOL_GPL(omapdss_device_connect);
void omapdss_device_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- dev_dbg(dst->dev, "disconnect\n");
+ struct dss_device *dss = src ? src->dss : dst->dss;
- if (!dst->id && !omapdss_device_is_connected(dst)) {
- WARN_ON(dst->output_type);
+ dev_dbg(&dss->pdev->dev, "disconnect(%s, %s)\n",
+ src ? dev_name(src->dev) : "NULL",
+ dst ? dev_name(dst->dev) : "NULL");
+
+ if (!dst) {
+ WARN_ON(!src->bridge && !src->panel);
return;
}
- if (src) {
- if (WARN_ON(dst != src->dst))
- return;
-
- dst->src = NULL;
- src->dst = NULL;
+ if (!dst->id && !omapdss_device_is_connected(dst)) {
+ WARN_ON(!dst->display);
+ return;
}
WARN_ON(dst->state != OMAP_DSS_DISPLAY_DISABLED);
@@ -239,6 +241,58 @@ void omapdss_device_disconnect(struct omap_dss_device *src,
}
EXPORT_SYMBOL_GPL(omapdss_device_disconnect);
+void omapdss_device_pre_enable(struct omap_dss_device *dssdev)
+{
+ if (!dssdev)
+ return;
+
+ omapdss_device_pre_enable(dssdev->next);
+
+ if (dssdev->ops->pre_enable)
+ dssdev->ops->pre_enable(dssdev);
+}
+EXPORT_SYMBOL_GPL(omapdss_device_pre_enable);
+
+void omapdss_device_enable(struct omap_dss_device *dssdev)
+{
+ if (!dssdev)
+ return;
+
+ if (dssdev->ops->enable)
+ dssdev->ops->enable(dssdev);
+
+ omapdss_device_enable(dssdev->next);
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+}
+EXPORT_SYMBOL_GPL(omapdss_device_enable);
+
+void omapdss_device_disable(struct omap_dss_device *dssdev)
+{
+ if (!dssdev)
+ return;
+
+ omapdss_device_disable(dssdev->next);
+
+ if (dssdev->ops->disable)
+ dssdev->ops->disable(dssdev);
+}
+EXPORT_SYMBOL_GPL(omapdss_device_disable);
+
+void omapdss_device_post_disable(struct omap_dss_device *dssdev)
+{
+ if (!dssdev)
+ return;
+
+ if (dssdev->ops->post_disable)
+ dssdev->ops->post_disable(dssdev);
+
+ omapdss_device_post_disable(dssdev->next);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+}
+EXPORT_SYMBOL_GPL(omapdss_device_post_disable);
+
/* -----------------------------------------------------------------------------
* Components Handling
*/
@@ -249,6 +303,7 @@ struct omapdss_comp_node {
struct list_head list;
struct device_node *node;
bool dss_core_component;
+ const char *compat;
};
static bool omapdss_list_contains(const struct device_node *node)
@@ -266,13 +321,20 @@ static bool omapdss_list_contains(const struct device_node *node)
static void omapdss_walk_device(struct device *dev, struct device_node *node,
bool dss_core)
{
+ struct omapdss_comp_node *comp;
struct device_node *n;
- struct omapdss_comp_node *comp = devm_kzalloc(dev, sizeof(*comp),
- GFP_KERNEL);
+ const char *compat;
+ int ret;
+ ret = of_property_read_string(node, "compatible", &compat);
+ if (ret < 0)
+ return;
+
+ comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
if (comp) {
comp->node = node;
comp->dss_core_component = dss_core;
+ comp->compat = compat;
list_add(&comp->list, &omapdss_comp_list);
}
@@ -312,12 +374,8 @@ void omapdss_gather_components(struct device *dev)
omapdss_walk_device(dev, dev->of_node, true);
- for_each_available_child_of_node(dev->of_node, child) {
- if (!of_find_property(child, "compatible", NULL))
- continue;
-
+ for_each_available_child_of_node(dev->of_node, child)
omapdss_walk_device(dev, child, true);
- }
}
EXPORT_SYMBOL(omapdss_gather_components);
@@ -325,6 +383,8 @@ static bool omapdss_component_is_loaded(struct omapdss_comp_node *comp)
{
if (comp->dss_core_component)
return true;
+ if (!strstarts(comp->compat, "omapdss,"))
+ return true;
if (omapdss_device_is_registered(comp->node))
return true;
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
index 34b2a4ef63a4..e93f61a567a8 100644
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ b/drivers/gpu/drm/omapdrm/dss/display.c
@@ -23,6 +23,9 @@
#include <linux/kernel.h>
#include <linux/of.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_modes.h>
+
#include "omapdss.h"
static int disp_num_counter;
@@ -39,8 +42,6 @@ void omapdss_display_init(struct omap_dss_device *dssdev)
if (id < 0)
id = disp_num_counter++;
- dssdev->alias_id = id;
-
/* Use 'label' property for name, if it exists */
of_property_read_string(dssdev->dev->of_node, "label", &dssdev->name);
@@ -58,3 +59,22 @@ struct omap_dss_device *omapdss_display_get(struct omap_dss_device *output)
return omapdss_device_get(output);
}
EXPORT_SYMBOL_GPL(omapdss_display_get);
+
+int omapdss_display_get_modes(struct drm_connector *connector,
+ const struct videomode *vm)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_create(connector->dev);
+ if (!mode)
+ return 0;
+
+ drm_display_mode_from_videomode(vm, mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(omapdss_display_get_modes);
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index ca4f3c4c6318..cc78dfa07f04 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -47,8 +47,8 @@ struct dpi_data {
struct mutex lock;
- struct videomode vm;
struct dss_lcd_mgr_config mgr_config;
+ unsigned long pixelclock;
int data_lines;
struct omap_dss_device output;
@@ -347,16 +347,15 @@ static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req,
static int dpi_set_mode(struct dpi_data *dpi)
{
- const struct videomode *vm = &dpi->vm;
int lck_div = 0, pck_div = 0;
unsigned long fck = 0;
int r = 0;
if (dpi->pll)
r = dpi_set_pll_clk(dpi, dpi->output.dispc_channel,
- vm->pixelclock, &fck, &lck_div, &pck_div);
+ dpi->pixelclock, &fck, &lck_div, &pck_div);
else
- r = dpi_set_dispc_clk(dpi, vm->pixelclock, &fck,
+ r = dpi_set_dispc_clk(dpi, dpi->pixelclock, &fck,
&lck_div, &pck_div);
if (r)
return r;
@@ -378,7 +377,7 @@ static void dpi_config_lcd_manager(struct dpi_data *dpi)
dss_mgr_set_lcd_config(&dpi->output, &dpi->mgr_config);
}
-static int dpi_display_enable(struct omap_dss_device *dssdev)
+static void dpi_display_enable(struct omap_dss_device *dssdev)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
struct omap_dss_device *out = &dpi->output;
@@ -386,12 +385,6 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
mutex_lock(&dpi->lock);
- if (!out->dispc_channel_connected) {
- DSSERR("failed to enable display: no output/manager\n");
- r = -ENODEV;
- goto err_no_out_mgr;
- }
-
if (dpi->vdds_dsi_reg) {
r = regulator_enable(dpi->vdds_dsi_reg);
if (r)
@@ -426,7 +419,7 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
mutex_unlock(&dpi->lock);
- return 0;
+ return;
err_mgr_enable:
err_set_mode:
@@ -439,9 +432,7 @@ err_get_dispc:
if (dpi->vdds_dsi_reg)
regulator_disable(dpi->vdds_dsi_reg);
err_reg_enable:
-err_no_out_mgr:
mutex_unlock(&dpi->lock);
- return r;
}
static void dpi_display_disable(struct omap_dss_device *dssdev)
@@ -467,7 +458,7 @@ static void dpi_display_disable(struct omap_dss_device *dssdev)
}
static void dpi_set_timings(struct omap_dss_device *dssdev,
- const struct videomode *vm)
+ const struct drm_display_mode *mode)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
@@ -475,13 +466,13 @@ static void dpi_set_timings(struct omap_dss_device *dssdev,
mutex_lock(&dpi->lock);
- dpi->vm = *vm;
+ dpi->pixelclock = mode->clock * 1000;
mutex_unlock(&dpi->lock);
}
static int dpi_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+ struct drm_display_mode *mode)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
int lck_div, pck_div;
@@ -490,20 +481,20 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
struct dpi_clk_calc_ctx ctx;
bool ok;
- if (vm->hactive % 8 != 0)
+ if (mode->hdisplay % 8 != 0)
return -EINVAL;
- if (vm->pixelclock == 0)
+ if (mode->clock == 0)
return -EINVAL;
if (dpi->pll) {
- ok = dpi_pll_clk_calc(dpi, vm->pixelclock, &ctx);
+ ok = dpi_pll_clk_calc(dpi, mode->clock * 1000, &ctx);
if (!ok)
return -EINVAL;
fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
} else {
- ok = dpi_dss_clk_calc(dpi, vm->pixelclock, &ctx);
+ ok = dpi_dss_clk_calc(dpi, mode->clock * 1000, &ctx);
if (!ok)
return -EINVAL;
@@ -515,7 +506,7 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
pck = fck / lck_div / pck_div;
- vm->pixelclock = pck;
+ mode->clock = pck / 1000;
return 0;
}
@@ -596,23 +587,15 @@ static int dpi_connect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dst);
- int r;
dpi_init_pll(dpi);
- r = omapdss_device_connect(dst->dss, dst, dst->next);
- if (r)
- return r;
-
- dst->dispc_channel_connected = true;
- return 0;
+ return omapdss_device_connect(dst->dss, dst, dst->next);
}
static void dpi_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- dst->dispc_channel_connected = false;
-
omapdss_device_disconnect(dst, dst->next);
}
@@ -651,25 +634,15 @@ static int dpi_init_output_port(struct dpi_data *dpi, struct device_node *port)
out->dev = &dpi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_DPI;
- out->output_type = OMAP_DISPLAY_TYPE_DPI;
+ out->type = OMAP_DISPLAY_TYPE_DPI;
out->dispc_channel = dpi_get_channel(dpi);
out->of_ports = BIT(port_num);
out->ops = &dpi_ops;
out->owner = THIS_MODULE;
- out->next = omapdss_of_find_connected_device(out->dev->of_node, 0);
- if (IS_ERR(out->next)) {
- if (PTR_ERR(out->next) != -EPROBE_DEFER)
- dev_err(out->dev, "failed to find video sink\n");
- return PTR_ERR(out->next);
- }
-
- r = omapdss_output_validate(out);
- if (r) {
- omapdss_device_put(out->next);
- out->next = NULL;
+ r = omapdss_device_init_output(out);
+ if (r < 0)
return r;
- }
omapdss_device_register(out);
@@ -681,9 +654,8 @@ static void dpi_uninit_output_port(struct device_node *port)
struct dpi_data *dpi = port->data;
struct omap_dss_device *out = &dpi->output;
- if (out->next)
- omapdss_device_put(out->next);
omapdss_device_unregister(out);
+ omapdss_device_cleanup_output(out);
}
static const struct soc_device_attribute dpi_soc_devices[] = {
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 64fb788b6647..5202862d89b5 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -1342,12 +1342,9 @@ static int dsi_pll_enable(struct dss_pll *pll)
*/
dsi_enable_scp_clk(dsi);
- if (!dsi->vdds_dsi_enabled) {
- r = regulator_enable(dsi->vdds_dsi_reg);
- if (r)
- goto err0;
- dsi->vdds_dsi_enabled = true;
- }
+ r = regulator_enable(dsi->vdds_dsi_reg);
+ if (r)
+ goto err0;
/* XXX PLL does not come out of reset without this... */
dispc_pck_free_enable(dsi->dss->dispc, 1);
@@ -1372,36 +1369,25 @@ static int dsi_pll_enable(struct dss_pll *pll)
return 0;
err1:
- if (dsi->vdds_dsi_enabled) {
- regulator_disable(dsi->vdds_dsi_reg);
- dsi->vdds_dsi_enabled = false;
- }
+ regulator_disable(dsi->vdds_dsi_reg);
err0:
dsi_disable_scp_clk(dsi);
dsi_runtime_put(dsi);
return r;
}
-static void dsi_pll_uninit(struct dsi_data *dsi, bool disconnect_lanes)
+static void dsi_pll_disable(struct dss_pll *pll)
{
+ struct dsi_data *dsi = container_of(pll, struct dsi_data, pll);
+
dsi_pll_power(dsi, DSI_PLL_POWER_OFF);
- if (disconnect_lanes) {
- WARN_ON(!dsi->vdds_dsi_enabled);
- regulator_disable(dsi->vdds_dsi_reg);
- dsi->vdds_dsi_enabled = false;
- }
+
+ regulator_disable(dsi->vdds_dsi_reg);
dsi_disable_scp_clk(dsi);
dsi_runtime_put(dsi);
- DSSDBG("PLL uninit done\n");
-}
-
-static void dsi_pll_disable(struct dss_pll *pll)
-{
- struct dsi_data *dsi = container_of(pll, struct dsi_data, pll);
-
- dsi_pll_uninit(dsi, true);
+ DSSDBG("PLL disable done\n");
}
static int dsi_dump_dsi_clocks(struct seq_file *s, void *p)
@@ -3753,19 +3739,13 @@ static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
- struct omap_dss_device *out = &dsi->output;
u8 data_type;
u16 word_count;
int r;
- if (!out->dispc_channel_connected) {
- DSSERR("failed to enable display: no output/manager\n");
- return -ENODEV;
- }
-
r = dsi_display_init_dispc(dsi);
if (r)
- goto err_init_dispc;
+ return r;
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
switch (dsi->pix_fmt) {
@@ -3814,7 +3794,6 @@ err_mgr_enable:
}
err_pix_fmt:
dsi_display_uninit_dispc(dsi);
-err_init_dispc:
return r;
}
@@ -4096,11 +4075,11 @@ static int dsi_display_init_dsi(struct dsi_data *dsi)
r = dss_pll_enable(&dsi->pll);
if (r)
- goto err0;
+ return r;
r = dsi_configure_dsi_clocks(dsi);
if (r)
- goto err1;
+ goto err0;
dss_select_dsi_clk_source(dsi->dss, dsi->module_id,
dsi->module_id == 0 ?
@@ -4108,6 +4087,14 @@ static int dsi_display_init_dsi(struct dsi_data *dsi)
DSSDBG("PLL OK\n");
+ if (!dsi->vdds_dsi_enabled) {
+ r = regulator_enable(dsi->vdds_dsi_reg);
+ if (r)
+ goto err1;
+
+ dsi->vdds_dsi_enabled = true;
+ }
+
r = dsi_cio_init(dsi);
if (r)
goto err2;
@@ -4136,10 +4123,13 @@ static int dsi_display_init_dsi(struct dsi_data *dsi)
err3:
dsi_cio_uninit(dsi);
err2:
- dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK);
+ regulator_disable(dsi->vdds_dsi_reg);
+ dsi->vdds_dsi_enabled = false;
err1:
- dss_pll_disable(&dsi->pll);
+ dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK);
err0:
+ dss_pll_disable(&dsi->pll);
+
return r;
}
@@ -4158,13 +4148,18 @@ static void dsi_display_uninit_dsi(struct dsi_data *dsi, bool disconnect_lanes,
dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK);
dsi_cio_uninit(dsi);
- dsi_pll_uninit(dsi, disconnect_lanes);
+ dss_pll_disable(&dsi->pll);
+
+ if (disconnect_lanes) {
+ regulator_disable(dsi->vdds_dsi_reg);
+ dsi->vdds_dsi_enabled = false;
+ }
}
-static int dsi_display_enable(struct omap_dss_device *dssdev)
+static void dsi_display_enable(struct omap_dss_device *dssdev)
{
struct dsi_data *dsi = to_dsi_data(dssdev);
- int r = 0;
+ int r;
DSSDBG("dsi_display_enable\n");
@@ -4184,14 +4179,13 @@ static int dsi_display_enable(struct omap_dss_device *dssdev)
mutex_unlock(&dsi->lock);
- return 0;
+ return;
err_init_dsi:
dsi_runtime_put(dsi);
err_get_dsi:
mutex_unlock(&dsi->lock);
DSSDBG("dsi_display_enable FAILED\n");
- return r;
}
static void dsi_display_disable(struct omap_dss_device *dssdev,
@@ -4888,21 +4882,12 @@ static int dsi_get_clocks(struct dsi_data *dsi)
static int dsi_connect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- int r;
-
- r = omapdss_device_connect(dst->dss, dst, dst->next);
- if (r)
- return r;
-
- dst->dispc_channel_connected = true;
- return 0;
+ return omapdss_device_connect(dst->dss, dst, dst->next);
}
static void dsi_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- dst->dispc_channel_connected = false;
-
omapdss_device_disconnect(dst, dst->next);
}
@@ -5138,29 +5123,19 @@ static int dsi_init_output(struct dsi_data *dsi)
out->id = dsi->module_id == 0 ?
OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2;
- out->output_type = OMAP_DISPLAY_TYPE_DSI;
+ out->type = OMAP_DISPLAY_TYPE_DSI;
out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1";
out->dispc_channel = dsi_get_channel(dsi);
out->ops = &dsi_ops;
out->owner = THIS_MODULE;
out->of_ports = BIT(0);
- out->bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE
+ out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE
| DRM_BUS_FLAG_DE_HIGH
- | DRM_BUS_FLAG_SYNC_NEGEDGE;
-
- out->next = omapdss_of_find_connected_device(out->dev->of_node, 0);
- if (IS_ERR(out->next)) {
- if (PTR_ERR(out->next) != -EPROBE_DEFER)
- dev_err(out->dev, "failed to find video sink\n");
- return PTR_ERR(out->next);
- }
+ | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE;
- r = omapdss_output_validate(out);
- if (r) {
- omapdss_device_put(out->next);
- out->next = NULL;
+ r = omapdss_device_init_output(out);
+ if (r < 0)
return r;
- }
omapdss_device_register(out);
@@ -5171,9 +5146,8 @@ static void dsi_uninit_output(struct dsi_data *dsi)
{
struct omap_dss_device *out = &dsi->output;
- if (out->next)
- omapdss_device_put(out->next);
omapdss_device_unregister(out);
+ omapdss_device_cleanup_output(out);
}
static int dsi_probe_of(struct dsi_data *dsi)
diff --git a/drivers/gpu/drm/omapdrm/dss/dss-of.c b/drivers/gpu/drm/omapdrm/dss/dss-of.c
index 0422597ac6b0..b2094055c5fc 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss-of.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss-of.c
@@ -12,71 +12,25 @@
* more details.
*/
-#include <linux/device.h>
#include <linux/err.h>
-#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_graph.h>
-#include <linux/seq_file.h>
#include "omapdss.h"
-static struct device_node *
-dss_of_port_get_parent_device(struct device_node *port)
-{
- struct device_node *np;
- int i;
-
- if (!port)
- return NULL;
-
- np = of_get_parent(port);
-
- for (i = 0; i < 2 && np; ++i) {
- struct property *prop;
-
- prop = of_find_property(np, "compatible", NULL);
-
- if (prop)
- return np;
-
- np = of_get_next_parent(np);
- }
-
- return NULL;
-}
-
struct omap_dss_device *
omapdss_of_find_connected_device(struct device_node *node, unsigned int port)
{
- struct device_node *src_node;
- struct device_node *src_port;
- struct device_node *ep;
- struct omap_dss_device *src;
- u32 port_number = 0;
+ struct device_node *remote_node;
+ struct omap_dss_device *dssdev;
- /* Get the endpoint... */
- ep = of_graph_get_endpoint_by_regs(node, port, 0);
- if (!ep)
+ remote_node = of_graph_get_remote_node(node, port, 0);
+ if (!remote_node)
return NULL;
- /* ... and its remote port... */
- src_port = of_graph_get_remote_port(ep);
- of_node_put(ep);
- if (!src_port)
- return NULL;
-
- /* ... and the remote port's number and parent... */
- of_property_read_u32(src_port, "reg", &port_number);
- src_node = dss_of_port_get_parent_device(src_port);
- of_node_put(src_port);
- if (!src_node)
- return ERR_PTR(-EINVAL);
-
- /* ... and finally the connected device. */
- src = omapdss_find_device_by_port(src_node, port_number);
- of_node_put(src_node);
+ dssdev = omapdss_find_device_by_node(remote_node);
+ of_node_put(remote_node);
- return src ? src : ERR_PTR(-EPROBE_DEFER);
+ return dssdev ? dssdev : ERR_PTR(-EPROBE_DEFER);
}
EXPORT_SYMBOL_GPL(omapdss_of_find_connected_device);
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 7553c7fc1c45..55e68863ef15 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1560,7 +1560,7 @@ static void dss_shutdown(struct platform_device *pdev)
DSSDBG("shutdown\n");
- for_each_dss_display(dssdev) {
+ for_each_dss_output(dssdev) {
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
dssdev->ops->disable(dssdev);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index aabdda394c9c..6339e2756b34 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -249,15 +249,15 @@ static void hdmi_power_off_full(struct omap_hdmi *hdmi)
}
static void hdmi_display_set_timings(struct omap_dss_device *dssdev,
- const struct videomode *vm)
+ const struct drm_display_mode *mode)
{
struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
mutex_lock(&hdmi->lock);
- hdmi->cfg.vm = *vm;
+ drm_display_mode_to_videomode(mode, &hdmi->cfg.vm);
- dispc_set_tv_pclk(hdmi->dss->dispc, vm->pixelclock);
+ dispc_set_tv_pclk(hdmi->dss->dispc, mode->clock * 1000);
mutex_unlock(&hdmi->lock);
}
@@ -312,26 +312,20 @@ static void hdmi_stop_audio_stream(struct omap_hdmi *hd)
hdmi_wp_audio_enable(&hd->wp, false);
}
-static int hdmi_display_enable(struct omap_dss_device *dssdev)
+static void hdmi_display_enable(struct omap_dss_device *dssdev)
{
struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
unsigned long flags;
- int r = 0;
+ int r;
DSSDBG("ENTER hdmi_display_enable\n");
mutex_lock(&hdmi->lock);
- if (!dssdev->dispc_channel_connected) {
- DSSERR("failed to enable display: no output/manager\n");
- r = -ENODEV;
- goto err0;
- }
-
r = hdmi_power_on_full(hdmi);
if (r) {
DSSERR("failed to power on device\n");
- goto err0;
+ goto done;
}
if (hdmi->audio_configured) {
@@ -351,12 +345,8 @@ static int hdmi_display_enable(struct omap_dss_device *dssdev)
hdmi->display_enabled = true;
spin_unlock_irqrestore(&hdmi->audio_playing_lock, flags);
+done:
mutex_unlock(&hdmi->lock);
- return 0;
-
-err0:
- mutex_unlock(&hdmi->lock);
- return r;
}
static void hdmi_display_disable(struct omap_dss_device *dssdev)
@@ -417,21 +407,12 @@ void hdmi4_core_disable(struct hdmi_core_data *core)
static int hdmi_connect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- int r;
-
- r = omapdss_device_connect(dst->dss, dst, dst->next);
- if (r)
- return r;
-
- dst->dispc_channel_connected = true;
- return 0;
+ return omapdss_device_connect(dst->dss, dst, dst->next);
}
static void hdmi_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- dst->dispc_channel_connected = false;
-
omapdss_device_disconnect(dst, dst->next);
}
@@ -698,7 +679,7 @@ static int hdmi4_init_output(struct omap_hdmi *hdmi)
out->dev = &hdmi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_HDMI;
- out->output_type = OMAP_DISPLAY_TYPE_HDMI;
+ out->type = OMAP_DISPLAY_TYPE_HDMI;
out->name = "hdmi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
out->ops = &hdmi_ops;
@@ -706,19 +687,9 @@ static int hdmi4_init_output(struct omap_hdmi *hdmi)
out->of_ports = BIT(0);
out->ops_flags = OMAP_DSS_DEVICE_OP_EDID;
- out->next = omapdss_of_find_connected_device(out->dev->of_node, 0);
- if (IS_ERR(out->next)) {
- if (PTR_ERR(out->next) != -EPROBE_DEFER)
- dev_err(out->dev, "failed to find video sink\n");
- return PTR_ERR(out->next);
- }
-
- r = omapdss_output_validate(out);
- if (r) {
- omapdss_device_put(out->next);
- out->next = NULL;
+ r = omapdss_device_init_output(out);
+ if (r < 0)
return r;
- }
omapdss_device_register(out);
@@ -729,9 +700,8 @@ static void hdmi4_uninit_output(struct omap_hdmi *hdmi)
{
struct omap_dss_device *out = &hdmi->output;
- if (out->next)
- omapdss_device_put(out->next);
omapdss_device_unregister(out);
+ omapdss_device_cleanup_output(out);
}
static int hdmi4_probe_of(struct omap_hdmi *hdmi)
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index 9e8556f67a29..2955bbad13bb 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -248,15 +248,15 @@ static void hdmi_power_off_full(struct omap_hdmi *hdmi)
}
static void hdmi_display_set_timings(struct omap_dss_device *dssdev,
- const struct videomode *vm)
+ const struct drm_display_mode *mode)
{
struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
mutex_lock(&hdmi->lock);
- hdmi->cfg.vm = *vm;
+ drm_display_mode_to_videomode(mode, &hdmi->cfg.vm);
- dispc_set_tv_pclk(hdmi->dss->dispc, vm->pixelclock);
+ dispc_set_tv_pclk(hdmi->dss->dispc, mode->clock * 1000);
mutex_unlock(&hdmi->lock);
}
@@ -320,26 +320,20 @@ static void hdmi_stop_audio_stream(struct omap_hdmi *hd)
REG_FLD_MOD(hd->wp.base, HDMI_WP_SYSCONFIG, hd->wp_idlemode, 3, 2);
}
-static int hdmi_display_enable(struct omap_dss_device *dssdev)
+static void hdmi_display_enable(struct omap_dss_device *dssdev)
{
struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
unsigned long flags;
- int r = 0;
+ int r;
DSSDBG("ENTER hdmi_display_enable\n");
mutex_lock(&hdmi->lock);
- if (!dssdev->dispc_channel_connected) {
- DSSERR("failed to enable display: no output/manager\n");
- r = -ENODEV;
- goto err0;
- }
-
r = hdmi_power_on_full(hdmi);
if (r) {
DSSERR("failed to power on device\n");
- goto err0;
+ goto done;
}
if (hdmi->audio_configured) {
@@ -359,12 +353,8 @@ static int hdmi_display_enable(struct omap_dss_device *dssdev)
hdmi->display_enabled = true;
spin_unlock_irqrestore(&hdmi->audio_playing_lock, flags);
+done:
mutex_unlock(&hdmi->lock);
- return 0;
-
-err0:
- mutex_unlock(&hdmi->lock);
- return r;
}
static void hdmi_display_disable(struct omap_dss_device *dssdev)
@@ -422,21 +412,12 @@ static void hdmi_core_disable(struct omap_hdmi *hdmi)
static int hdmi_connect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- int r;
-
- r = omapdss_device_connect(dst->dss, dst, dst->next);
- if (r)
- return r;
-
- dst->dispc_channel_connected = true;
- return 0;
+ return omapdss_device_connect(dst->dss, dst, dst->next);
}
static void hdmi_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- dst->dispc_channel_connected = false;
-
omapdss_device_disconnect(dst, dst->next);
}
@@ -682,7 +663,7 @@ static int hdmi5_init_output(struct omap_hdmi *hdmi)
out->dev = &hdmi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_HDMI;
- out->output_type = OMAP_DISPLAY_TYPE_HDMI;
+ out->type = OMAP_DISPLAY_TYPE_HDMI;
out->name = "hdmi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
out->ops = &hdmi_ops;
@@ -690,19 +671,9 @@ static int hdmi5_init_output(struct omap_hdmi *hdmi)
out->of_ports = BIT(0);
out->ops_flags = OMAP_DSS_DEVICE_OP_EDID;
- out->next = omapdss_of_find_connected_device(out->dev->of_node, 0);
- if (IS_ERR(out->next)) {
- if (PTR_ERR(out->next) != -EPROBE_DEFER)
- dev_err(out->dev, "failed to find video sink\n");
- return PTR_ERR(out->next);
- }
-
- r = omapdss_output_validate(out);
- if (r) {
- omapdss_device_put(out->next);
- out->next = NULL;
+ r = omapdss_device_init_output(out);
+ if (r < 0)
return r;
- }
omapdss_device_register(out);
@@ -713,9 +684,8 @@ static void hdmi5_uninit_output(struct omap_hdmi *hdmi)
{
struct omap_dss_device *out = &hdmi->output;
- if (out->next)
- omapdss_device_put(out->next);
omapdss_device_unregister(out);
+ omapdss_device_cleanup_output(out);
}
static int hdmi5_probe_of(struct omap_hdmi *hdmi)
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
index 3bfb95d230e0..2b41c75ce988 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
@@ -184,6 +184,22 @@ static const struct of_device_id omapdss_of_match[] __initconst = {
{},
};
+static const struct of_device_id omapdss_of_fixups_whitelist[] __initconst = {
+ { .compatible = "composite-video-connector" },
+ { .compatible = "hdmi-connector" },
+ { .compatible = "lgphilips,lb035q02" },
+ { .compatible = "nec,nl8048hl11" },
+ { .compatible = "panel-dsi-cm" },
+ { .compatible = "sharp,ls037v7dw01" },
+ { .compatible = "sony,acx565akm" },
+ { .compatible = "svideo-connector" },
+ { .compatible = "ti,opa362" },
+ { .compatible = "ti,tpd12s015" },
+ { .compatible = "toppoly,td028ttec1" },
+ { .compatible = "tpo,td028ttec1" },
+ { .compatible = "tpo,td043mtea1" },
+};
+
static int __init omapdss_boot_init(void)
{
struct device_node *dss, *child;
@@ -210,7 +226,7 @@ static int __init omapdss_boot_init(void)
n = list_first_entry(&dss_conv_list, struct dss_conv_node,
list);
- if (!n->root)
+ if (of_match_node(omapdss_of_fixups_whitelist, n->node))
omapdss_omapify_node(n->node);
list_del(&n->list);
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index 33e15cb77efa..0c734d1f89e1 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -19,7 +19,6 @@
#define __OMAP_DRM_DSS_H
#include <linux/list.h>
-#include <linux/kobject.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <video/videomode.h>
@@ -68,6 +67,7 @@ struct dss_lcd_mgr_config;
struct snd_aes_iec958;
struct snd_cea_861_aud_if;
struct hdmi_avi_infoframe;
+struct drm_connector;
enum omap_display_type {
OMAP_DISPLAY_TYPE_NONE = 0,
@@ -360,15 +360,15 @@ struct omap_dss_device_ops {
void (*disconnect)(struct omap_dss_device *dssdev,
struct omap_dss_device *dst);
- int (*enable)(struct omap_dss_device *dssdev);
+ void (*pre_enable)(struct omap_dss_device *dssdev);
+ void (*enable)(struct omap_dss_device *dssdev);
void (*disable)(struct omap_dss_device *dssdev);
+ void (*post_disable)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
- void (*get_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
+ struct drm_display_mode *mode);
void (*set_timings)(struct omap_dss_device *dssdev,
- const struct videomode *vm);
+ const struct drm_display_mode *mode);
bool (*detect)(struct omap_dss_device *dssdev);
@@ -380,6 +380,9 @@ struct omap_dss_device_ops {
int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
+ int (*get_modes)(struct omap_dss_device *dssdev,
+ struct drm_connector *connector);
+
union {
const struct omapdss_hdmi_ops hdmi;
const struct omapdss_dsi_ops dsi;
@@ -390,42 +393,40 @@ struct omap_dss_device_ops {
* enum omap_dss_device_ops_flag - Indicates which device ops are supported
* @OMAP_DSS_DEVICE_OP_DETECT: The device supports output connection detection
* @OMAP_DSS_DEVICE_OP_HPD: The device supports all hot-plug-related operations
- * @OMAP_DSS_DEVICE_OP_EDID: The device supports readind EDID
+ * @OMAP_DSS_DEVICE_OP_EDID: The device supports reading EDID
+ * @OMAP_DSS_DEVICE_OP_MODES: The device supports reading modes
*/
enum omap_dss_device_ops_flag {
OMAP_DSS_DEVICE_OP_DETECT = BIT(0),
OMAP_DSS_DEVICE_OP_HPD = BIT(1),
OMAP_DSS_DEVICE_OP_EDID = BIT(2),
-};
-
-enum omap_dss_device_type {
- OMAP_DSS_DEVICE_TYPE_OUTPUT = (1 << 0),
- OMAP_DSS_DEVICE_TYPE_DISPLAY = (1 << 1),
+ OMAP_DSS_DEVICE_OP_MODES = BIT(3),
};
struct omap_dss_device {
- struct kobject kobj;
struct device *dev;
struct module *owner;
struct dss_device *dss;
- struct omap_dss_device *src;
- struct omap_dss_device *dst;
struct omap_dss_device *next;
+ struct drm_bridge *bridge;
+ struct drm_panel *panel;
struct list_head list;
- unsigned int alias_id;
-
+ /*
+ * DSS type that this device generates (for DSS internal devices) or
+ * requires (for external encoders, connectors and panels). Must be a
+ * non-zero (different than OMAP_DISPLAY_TYPE_NONE) value.
+ */
enum omap_display_type type;
+
/*
- * DSS output type that this device generates (for DSS internal devices)
- * or requires (for external encoders). Must be OMAP_DISPLAY_TYPE_NONE
- * for display devices (connectors and panels) and to non-zero value for
- * all other devices.
+ * True if the device is a display (panel or connector) at the end of
+ * the pipeline, false otherwise.
*/
- enum omap_display_type output_type;
+ bool display;
const char *name;
@@ -434,9 +435,6 @@ struct omap_dss_device {
unsigned long ops_flags;
u32 bus_flags;
- /* helper variable for driver suspend/resume */
- bool activate_after_resume;
-
enum omap_display_caps caps;
enum omap_dss_display_state state;
@@ -445,7 +443,6 @@ struct omap_dss_device {
/* DISPC channel for this output */
enum omap_channel dispc_channel;
- bool dispc_channel_connected;
/* output instance */
enum omap_dss_output_id id;
@@ -465,9 +462,6 @@ struct omap_dss_driver {
int (*memory_read)(struct omap_dss_device *dssdev,
void *buf, size_t size,
u16 x, u16 y, u16 w, u16 h);
-
- void (*get_size)(struct omap_dss_device *dssdev,
- unsigned int *width, unsigned int *height);
};
struct dss_device *omapdss_get_dss(void);
@@ -477,32 +471,35 @@ static inline bool omapdss_is_initialized(void)
return !!omapdss_get_dss();
}
-#define for_each_dss_display(d) \
- while ((d = omapdss_device_get_next(d, OMAP_DSS_DEVICE_TYPE_DISPLAY)) != NULL)
void omapdss_display_init(struct omap_dss_device *dssdev);
struct omap_dss_device *omapdss_display_get(struct omap_dss_device *output);
+int omapdss_display_get_modes(struct drm_connector *connector,
+ const struct videomode *vm);
void omapdss_device_register(struct omap_dss_device *dssdev);
void omapdss_device_unregister(struct omap_dss_device *dssdev);
struct omap_dss_device *omapdss_device_get(struct omap_dss_device *dssdev);
void omapdss_device_put(struct omap_dss_device *dssdev);
-struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src,
- unsigned int port);
-struct omap_dss_device *omapdss_device_get_next(struct omap_dss_device *from,
- enum omap_dss_device_type type);
+struct omap_dss_device *omapdss_find_device_by_node(struct device_node *node);
int omapdss_device_connect(struct dss_device *dss,
struct omap_dss_device *src,
struct omap_dss_device *dst);
void omapdss_device_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst);
+void omapdss_device_pre_enable(struct omap_dss_device *dssdev);
+void omapdss_device_enable(struct omap_dss_device *dssdev);
+void omapdss_device_disable(struct omap_dss_device *dssdev);
+void omapdss_device_post_disable(struct omap_dss_device *dssdev);
int omap_dss_get_num_overlay_managers(void);
int omap_dss_get_num_overlays(void);
#define for_each_dss_output(d) \
- while ((d = omapdss_device_get_next(d, OMAP_DSS_DEVICE_TYPE_OUTPUT)) != NULL)
-int omapdss_output_validate(struct omap_dss_device *out);
+ while ((d = omapdss_device_next_output(d)) != NULL)
+struct omap_dss_device *omapdss_device_next_output(struct omap_dss_device *from);
+int omapdss_device_init_output(struct omap_dss_device *out);
+void omapdss_device_cleanup_output(struct omap_dss_device *out);
typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
@@ -511,11 +508,6 @@ int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
int omapdss_compat_init(void);
void omapdss_compat_uninit(void);
-static inline bool omapdss_device_is_connected(struct omap_dss_device *dssdev)
-{
- return dssdev->src;
-}
-
static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev)
{
return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE;
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index 18505bc70f7e..10a9ee5cdc61 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -20,20 +20,48 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/of_graph.h>
+
+#include <drm/drm_panel.h>
#include "dss.h"
#include "omapdss.h"
-int omapdss_output_validate(struct omap_dss_device *out)
+int omapdss_device_init_output(struct omap_dss_device *out)
{
- if (out->next && out->output_type != out->next->type) {
+ struct device_node *remote_node;
+
+ remote_node = of_graph_get_remote_node(out->dev->of_node, 0, 0);
+ if (!remote_node) {
+ dev_dbg(out->dev, "failed to find video sink\n");
+ return 0;
+ }
+
+ out->next = omapdss_find_device_by_node(remote_node);
+ out->bridge = of_drm_find_bridge(remote_node);
+ out->panel = of_drm_find_panel(remote_node);
+ if (IS_ERR(out->panel))
+ out->panel = NULL;
+
+ of_node_put(remote_node);
+
+ if (out->next && out->type != out->next->type) {
dev_err(out->dev, "output type and display type don't match\n");
+ omapdss_device_put(out->next);
+ out->next = NULL;
return -EINVAL;
}
- return 0;
+ return out->next || out->bridge || out->panel ? 0 : -EPROBE_DEFER;
+}
+EXPORT_SYMBOL(omapdss_device_init_output);
+
+void omapdss_device_cleanup_output(struct omap_dss_device *out)
+{
+ if (out->next)
+ omapdss_device_put(out->next);
}
-EXPORT_SYMBOL(omapdss_output_validate);
+EXPORT_SYMBOL(omapdss_device_cleanup_output);
int dss_install_mgr_ops(struct dss_device *dss,
const struct dss_mgr_ops *mgr_ops,
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index b2fe2387037a..7aae52984fed 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -37,7 +37,7 @@ struct sdi_device {
struct regulator *vdds_sdi_reg;
struct dss_lcd_mgr_config mgr_config;
- struct videomode vm;
+ unsigned long pixelclock;
int datapairs;
struct omap_dss_device output;
@@ -129,27 +129,22 @@ static void sdi_config_lcd_manager(struct sdi_device *sdi)
dss_mgr_set_lcd_config(&sdi->output, &sdi->mgr_config);
}
-static int sdi_display_enable(struct omap_dss_device *dssdev)
+static void sdi_display_enable(struct omap_dss_device *dssdev)
{
struct sdi_device *sdi = dssdev_to_sdi(dssdev);
struct dispc_clock_info dispc_cinfo;
unsigned long fck;
int r;
- if (!sdi->output.dispc_channel_connected) {
- DSSERR("failed to enable display: no output/manager\n");
- return -ENODEV;
- }
-
r = regulator_enable(sdi->vdds_sdi_reg);
if (r)
- goto err_reg_enable;
+ return;
r = dispc_runtime_get(sdi->dss->dispc);
if (r)
goto err_get_dispc;
- r = sdi_calc_clock_div(sdi, sdi->vm.pixelclock, &fck, &dispc_cinfo);
+ r = sdi_calc_clock_div(sdi, sdi->pixelclock, &fck, &dispc_cinfo);
if (r)
goto err_calc_clock_div;
@@ -185,7 +180,7 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
if (r)
goto err_mgr_enable;
- return 0;
+ return;
err_mgr_enable:
dss_sdi_disable(sdi->dss);
@@ -195,8 +190,6 @@ err_calc_clock_div:
dispc_runtime_put(sdi->dss->dispc);
err_get_dispc:
regulator_disable(sdi->vdds_sdi_reg);
-err_reg_enable:
- return r;
}
static void sdi_display_disable(struct omap_dss_device *dssdev)
@@ -213,36 +206,37 @@ static void sdi_display_disable(struct omap_dss_device *dssdev)
}
static void sdi_set_timings(struct omap_dss_device *dssdev,
- const struct videomode *vm)
+ const struct drm_display_mode *mode)
{
struct sdi_device *sdi = dssdev_to_sdi(dssdev);
- sdi->vm = *vm;
+ sdi->pixelclock = mode->clock * 1000;
}
static int sdi_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+ struct drm_display_mode *mode)
{
struct sdi_device *sdi = dssdev_to_sdi(dssdev);
struct dispc_clock_info dispc_cinfo;
+ unsigned long pixelclock = mode->clock * 1000;
unsigned long fck;
unsigned long pck;
int r;
- if (vm->pixelclock == 0)
+ if (pixelclock == 0)
return -EINVAL;
- r = sdi_calc_clock_div(sdi, vm->pixelclock, &fck, &dispc_cinfo);
+ r = sdi_calc_clock_div(sdi, pixelclock, &fck, &dispc_cinfo);
if (r)
return r;
pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div;
- if (pck != vm->pixelclock) {
+ if (pck != pixelclock) {
DSSWARN("Pixel clock adjusted from %lu Hz to %lu Hz\n",
- vm->pixelclock, pck);
+ pixelclock, pck);
- vm->pixelclock = pck;
+ mode->clock = pck / 1000;
}
return 0;
@@ -251,21 +245,12 @@ static int sdi_check_timings(struct omap_dss_device *dssdev,
static int sdi_connect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- int r;
-
- r = omapdss_device_connect(dst->dss, dst, dst->next);
- if (r)
- return r;
-
- dst->dispc_channel_connected = true;
- return 0;
+ return omapdss_device_connect(dst->dss, dst, dst->next);
}
static void sdi_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- dst->dispc_channel_connected = false;
-
omapdss_device_disconnect(dst, dst->next);
}
@@ -287,29 +272,19 @@ static int sdi_init_output(struct sdi_device *sdi)
out->dev = &sdi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_SDI;
- out->output_type = OMAP_DISPLAY_TYPE_SDI;
+ out->type = OMAP_DISPLAY_TYPE_SDI;
out->name = "sdi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_LCD;
/* We have SDI only on OMAP3, where it's on port 1 */
out->of_ports = BIT(1);
out->ops = &sdi_ops;
out->owner = THIS_MODULE;
- out->bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE /* 15.5.9.1.2 */
- | DRM_BUS_FLAG_SYNC_POSEDGE;
-
- out->next = omapdss_of_find_connected_device(out->dev->of_node, 1);
- if (IS_ERR(out->next)) {
- if (PTR_ERR(out->next) != -EPROBE_DEFER)
- dev_err(out->dev, "failed to find video sink\n");
- return PTR_ERR(out->next);
- }
+ out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE /* 15.5.9.1.2 */
+ | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE;
- r = omapdss_output_validate(out);
- if (r) {
- omapdss_device_put(out->next);
- out->next = NULL;
+ r = omapdss_device_init_output(out);
+ if (r < 0)
return r;
- }
omapdss_device_register(out);
@@ -318,9 +293,8 @@ static int sdi_init_output(struct sdi_device *sdi)
static void sdi_uninit_output(struct sdi_device *sdi)
{
- if (sdi->output.next)
- omapdss_device_put(sdi->output.next);
omapdss_device_unregister(&sdi->output);
+ omapdss_device_cleanup_output(&sdi->output);
}
int sdi_init_port(struct dss_device *dss, struct platform_device *pdev,
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index b5f52727f8b1..da43b865d973 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -267,63 +267,40 @@ enum venc_videomode {
VENC_MODE_NTSC,
};
-static const struct videomode omap_dss_pal_vm = {
- .hactive = 720,
- .vactive = 574,
- .pixelclock = 13500000,
- .hsync_len = 64,
- .hfront_porch = 12,
- .hback_porch = 68,
- .vsync_len = 5,
- .vfront_porch = 5,
- .vback_porch = 41,
-
- .flags = DISPLAY_FLAGS_INTERLACED | DISPLAY_FLAGS_HSYNC_LOW |
- DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_DE_HIGH |
- DISPLAY_FLAGS_PIXDATA_POSEDGE |
- DISPLAY_FLAGS_SYNC_NEGEDGE,
+static const struct drm_display_mode omap_dss_pal_mode = {
+ .hdisplay = 720,
+ .hsync_start = 732,
+ .hsync_end = 796,
+ .htotal = 864,
+ .vdisplay = 574,
+ .vsync_start = 579,
+ .vsync_end = 584,
+ .vtotal = 625,
+ .clock = 13500,
+
+ .flags = DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_NHSYNC |
+ DRM_MODE_FLAG_NVSYNC,
};
-static const struct videomode omap_dss_ntsc_vm = {
- .hactive = 720,
- .vactive = 482,
- .pixelclock = 13500000,
- .hsync_len = 64,
- .hfront_porch = 16,
- .hback_porch = 58,
- .vsync_len = 6,
- .vfront_porch = 6,
- .vback_porch = 31,
-
- .flags = DISPLAY_FLAGS_INTERLACED | DISPLAY_FLAGS_HSYNC_LOW |
- DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_DE_HIGH |
- DISPLAY_FLAGS_PIXDATA_POSEDGE |
- DISPLAY_FLAGS_SYNC_NEGEDGE,
+static const struct drm_display_mode omap_dss_ntsc_mode = {
+ .hdisplay = 720,
+ .hsync_start = 736,
+ .hsync_end = 800,
+ .htotal = 858,
+ .vdisplay = 482,
+ .vsync_start = 488,
+ .vsync_end = 494,
+ .vtotal = 525,
+ .clock = 13500,
+
+ .flags = DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_NHSYNC |
+ DRM_MODE_FLAG_NVSYNC,
};
-static enum venc_videomode venc_get_videomode(const struct videomode *vm)
-{
- if (!(vm->flags & DISPLAY_FLAGS_INTERLACED))
- return VENC_MODE_UNKNOWN;
-
- if (vm->pixelclock == omap_dss_pal_vm.pixelclock &&
- vm->hactive == omap_dss_pal_vm.hactive &&
- vm->vactive == omap_dss_pal_vm.vactive)
- return VENC_MODE_PAL;
-
- if (vm->pixelclock == omap_dss_ntsc_vm.pixelclock &&
- vm->hactive == omap_dss_ntsc_vm.hactive &&
- vm->vactive == omap_dss_ntsc_vm.vactive)
- return VENC_MODE_NTSC;
-
- return VENC_MODE_UNKNOWN;
-}
-
struct venc_device {
struct platform_device *pdev;
void __iomem *base;
struct mutex venc_lock;
- u32 wss_data;
struct regulator *vdda_dac_reg;
struct dss_device *dss;
@@ -331,7 +308,7 @@ struct venc_device {
struct clk *tv_dac_clk;
- struct videomode vm;
+ const struct venc_config *config;
enum omap_dss_venc_type type;
bool invert_polarity;
bool requires_tv_dac_clk;
@@ -367,8 +344,7 @@ static void venc_write_config(struct venc_device *venc,
venc_write_reg(venc, VENC_BLACK_LEVEL, config->black_level);
venc_write_reg(venc, VENC_BLANK_LEVEL, config->blank_level);
venc_write_reg(venc, VENC_M_CONTROL, config->m_control);
- venc_write_reg(venc, VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data |
- venc->wss_data);
+ venc_write_reg(venc, VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data);
venc_write_reg(venc, VENC_S_CARR, config->s_carr);
venc_write_reg(venc, VENC_L21__WC_CTL, config->l21__wc_ctl);
venc_write_reg(venc, VENC_SAVID__EAVID, config->savid__eavid);
@@ -452,18 +428,6 @@ static void venc_runtime_put(struct venc_device *venc)
WARN_ON(r < 0 && r != -ENOSYS);
}
-static const struct venc_config *venc_timings_to_config(const struct videomode *vm)
-{
- switch (venc_get_videomode(vm)) {
- default:
- WARN_ON_ONCE(1);
- case VENC_MODE_PAL:
- return &venc_config_pal_trm;
- case VENC_MODE_NTSC:
- return &venc_config_ntsc_trm;
- }
-}
-
static int venc_power_on(struct venc_device *venc)
{
u32 l;
@@ -474,7 +438,7 @@ static int venc_power_on(struct venc_device *venc)
goto err0;
venc_reset(venc);
- venc_write_config(venc, venc_timings_to_config(&venc->vm));
+ venc_write_config(venc, venc->config);
dss_set_venc_output(venc->dss, venc->type);
dss_set_dac_pwrdn_bgz(venc->dss, 1);
@@ -524,33 +488,17 @@ static void venc_power_off(struct venc_device *venc)
venc_runtime_put(venc);
}
-static int venc_display_enable(struct omap_dss_device *dssdev)
+static void venc_display_enable(struct omap_dss_device *dssdev)
{
struct venc_device *venc = dssdev_to_venc(dssdev);
- int r;
DSSDBG("venc_display_enable\n");
mutex_lock(&venc->venc_lock);
- if (!dssdev->dispc_channel_connected) {
- DSSERR("Failed to enable display: no output/manager\n");
- r = -ENODEV;
- goto err0;
- }
-
- r = venc_power_on(venc);
- if (r)
- goto err0;
-
- venc->wss_data = 0;
+ venc_power_on(venc);
mutex_unlock(&venc->venc_lock);
-
- return 0;
-err0:
- mutex_unlock(&venc->venc_lock);
- return r;
}
static void venc_display_disable(struct omap_dss_device *dssdev)
@@ -566,30 +514,70 @@ static void venc_display_disable(struct omap_dss_device *dssdev)
mutex_unlock(&venc->venc_lock);
}
-static void venc_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+static int venc_get_modes(struct omap_dss_device *dssdev,
+ struct drm_connector *connector)
{
- struct venc_device *venc = dssdev_to_venc(dssdev);
+ static const struct drm_display_mode *modes[] = {
+ &omap_dss_pal_mode,
+ &omap_dss_ntsc_mode,
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(modes); ++i) {
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, modes[i]);
+ if (!mode)
+ return i;
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+ }
- mutex_lock(&venc->venc_lock);
- *vm = venc->vm;
- mutex_unlock(&venc->venc_lock);
+ return ARRAY_SIZE(modes);
+}
+
+static enum venc_videomode venc_get_videomode(const struct drm_display_mode *mode)
+{
+ if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
+ return VENC_MODE_UNKNOWN;
+
+ if (mode->clock == omap_dss_pal_mode.clock &&
+ mode->hdisplay == omap_dss_pal_mode.hdisplay &&
+ mode->vdisplay == omap_dss_pal_mode.vdisplay)
+ return VENC_MODE_PAL;
+
+ if (mode->clock == omap_dss_ntsc_mode.clock &&
+ mode->hdisplay == omap_dss_ntsc_mode.hdisplay &&
+ mode->vdisplay == omap_dss_ntsc_mode.vdisplay)
+ return VENC_MODE_NTSC;
+
+ return VENC_MODE_UNKNOWN;
}
static void venc_set_timings(struct omap_dss_device *dssdev,
- const struct videomode *vm)
+ const struct drm_display_mode *mode)
{
struct venc_device *venc = dssdev_to_venc(dssdev);
+ enum venc_videomode venc_mode = venc_get_videomode(mode);
DSSDBG("venc_set_timings\n");
mutex_lock(&venc->venc_lock);
- /* Reset WSS data when the TV standard changes. */
- if (memcmp(&venc->vm, vm, sizeof(*vm)))
- venc->wss_data = 0;
+ switch (venc_mode) {
+ default:
+ WARN_ON_ONCE(1);
+ /* Fall-through */
+ case VENC_MODE_PAL:
+ venc->config = &venc_config_pal_trm;
+ break;
- venc->vm = *vm;
+ case VENC_MODE_NTSC:
+ venc->config = &venc_config_ntsc_trm;
+ break;
+ }
dispc_set_tv_pclk(venc->dss->dispc, 13500000);
@@ -597,22 +585,26 @@ static void venc_set_timings(struct omap_dss_device *dssdev,
}
static int venc_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+ struct drm_display_mode *mode)
{
DSSDBG("venc_check_timings\n");
- switch (venc_get_videomode(vm)) {
+ switch (venc_get_videomode(mode)) {
case VENC_MODE_PAL:
- *vm = omap_dss_pal_vm;
- return 0;
+ drm_mode_copy(mode, &omap_dss_pal_mode);
+ break;
case VENC_MODE_NTSC:
- *vm = omap_dss_ntsc_vm;
- return 0;
+ drm_mode_copy(mode, &omap_dss_ntsc_mode);
+ break;
default:
return -EINVAL;
}
+
+ drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
+ drm_mode_set_name(mode);
+ return 0;
}
static int venc_dump_regs(struct seq_file *s, void *p)
@@ -695,21 +687,12 @@ static int venc_get_clocks(struct venc_device *venc)
static int venc_connect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- int r;
-
- r = omapdss_device_connect(dst->dss, dst, dst->next);
- if (r)
- return r;
-
- dst->dispc_channel_connected = true;
- return 0;
+ return omapdss_device_connect(dst->dss, dst, dst->next);
}
static void venc_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst)
{
- dst->dispc_channel_connected = false;
-
omapdss_device_disconnect(dst, dst->next);
}
@@ -721,8 +704,9 @@ static const struct omap_dss_device_ops venc_ops = {
.disable = venc_display_disable,
.check_timings = venc_check_timings,
- .get_timings = venc_get_timings,
.set_timings = venc_set_timings,
+
+ .get_modes = venc_get_modes,
};
/* -----------------------------------------------------------------------------
@@ -776,26 +760,17 @@ static int venc_init_output(struct venc_device *venc)
out->dev = &venc->pdev->dev;
out->id = OMAP_DSS_OUTPUT_VENC;
- out->output_type = OMAP_DISPLAY_TYPE_VENC;
+ out->type = OMAP_DISPLAY_TYPE_VENC;
out->name = "venc.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
out->ops = &venc_ops;
out->owner = THIS_MODULE;
out->of_ports = BIT(0);
+ out->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
- out->next = omapdss_of_find_connected_device(out->dev->of_node, 0);
- if (IS_ERR(out->next)) {
- if (PTR_ERR(out->next) != -EPROBE_DEFER)
- dev_err(out->dev, "failed to find video sink\n");
- return PTR_ERR(out->next);
- }
-
- r = omapdss_output_validate(out);
- if (r) {
- omapdss_device_put(out->next);
- out->next = NULL;
+ r = omapdss_device_init_output(out);
+ if (r < 0)
return r;
- }
omapdss_device_register(out);
@@ -804,9 +779,8 @@ static int venc_init_output(struct venc_device *venc)
static void venc_uninit_output(struct venc_device *venc)
{
- if (venc->output.next)
- omapdss_device_put(venc->output.next);
omapdss_device_unregister(&venc->output);
+ omapdss_device_cleanup_output(&venc->output);
}
static int venc_probe_of(struct venc_device *venc)
@@ -878,8 +852,7 @@ static int venc_probe(struct platform_device *pdev)
mutex_init(&venc->venc_lock);
- venc->wss_data = 0;
- venc->vm = omap_dss_pal_vm;
+ venc->config = &venc_config_pal_trm;
venc_mem = platform_get_resource(venc->pdev, IORESOURCE_MEM, 0);
venc->base = devm_ioremap_resource(&pdev->dev, venc_mem);
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 9da94d10782a..5967283934e1 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -17,6 +17,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
#include "omap_drv.h"
@@ -30,24 +31,27 @@
struct omap_connector {
struct drm_connector base;
struct omap_dss_device *output;
- struct omap_dss_device *display;
struct omap_dss_device *hpd;
bool hdmi_mode;
};
static void omap_connector_hpd_notify(struct drm_connector *connector,
- struct omap_dss_device *src,
enum drm_connector_status status)
{
- if (status == connector_status_disconnected) {
- /*
- * If the source is an HDMI encoder, notify it of disconnection.
- * This is required to let the HDMI encoder reset any internal
- * state related to connection status, such as the CEC address.
- */
- if (src && src->type == OMAP_DISPLAY_TYPE_HDMI &&
- src->ops->hdmi.lost_hotplug)
- src->ops->hdmi.lost_hotplug(src);
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev;
+
+ if (status != connector_status_disconnected)
+ return;
+
+ /*
+ * Notify all devics in the pipeline of disconnection. This is required
+ * to let the HDMI encoders reset their internal state related to
+ * connection status, such as the CEC address.
+ */
+ for (dssdev = omap_connector->output; dssdev; dssdev = dssdev->next) {
+ if (dssdev->ops && dssdev->ops->hdmi.lost_hotplug)
+ dssdev->ops->hdmi.lost_hotplug(dssdev);
}
}
@@ -67,7 +71,7 @@ static void omap_connector_hpd_cb(void *cb_data,
if (old_status == status)
return;
- omap_connector_hpd_notify(connector, omap_connector->hpd, status);
+ omap_connector_hpd_notify(connector, status);
drm_kms_helper_hotplug_event(dev);
}
@@ -103,20 +107,20 @@ omap_connector_find_device(struct drm_connector *connector,
enum omap_dss_device_ops_flag op)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *dssdev;
+ struct omap_dss_device *dssdev = NULL;
+ struct omap_dss_device *d;
- for (dssdev = omap_connector->display; dssdev; dssdev = dssdev->src) {
- if (dssdev->ops_flags & op)
- return dssdev;
+ for (d = omap_connector->output; d; d = d->next) {
+ if (d->ops_flags & op)
+ dssdev = d;
}
- return NULL;
+ return dssdev;
}
static enum drm_connector_status omap_connector_detect(
struct drm_connector *connector, bool force)
{
- struct omap_connector *omap_connector = to_omap_connector(connector);
struct omap_dss_device *dssdev;
enum drm_connector_status status;
@@ -128,13 +132,12 @@ static enum drm_connector_status omap_connector_detect(
? connector_status_connected
: connector_status_disconnected;
- omap_connector_hpd_notify(connector, dssdev->src, status);
+ omap_connector_hpd_notify(connector, status);
} else {
- switch (omap_connector->display->type) {
- case OMAP_DISPLAY_TYPE_DPI:
- case OMAP_DISPLAY_TYPE_DBI:
- case OMAP_DISPLAY_TYPE_SDI:
- case OMAP_DISPLAY_TYPE_DSI:
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_DPI:
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_DSI:
status = connector_status_connected;
break;
default:
@@ -143,7 +146,7 @@ static enum drm_connector_status omap_connector_detect(
}
}
- VERB("%s: %d (force=%d)", omap_connector->display->name, status, force);
+ VERB("%s: %d (force=%d)", connector->name, status, force);
return status;
}
@@ -152,7 +155,7 @@ static void omap_connector_destroy(struct drm_connector *connector)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
- DBG("%s", omap_connector->display->name);
+ DBG("%s", connector->name);
if (omap_connector->hpd) {
struct omap_dss_device *hpd = omap_connector->hpd;
@@ -166,7 +169,6 @@ static void omap_connector_destroy(struct drm_connector *connector)
drm_connector_cleanup(connector);
omapdss_device_put(omap_connector->output);
- omapdss_device_put(omap_connector->display);
kfree(omap_connector);
}
@@ -212,10 +214,8 @@ static int omap_connector_get_modes(struct drm_connector *connector)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
struct omap_dss_device *dssdev;
- struct drm_display_mode *mode;
- struct videomode vm = {0};
- DBG("%s", omap_connector->display->name);
+ DBG("%s", connector->name);
/*
* If display exposes EDID, then we parse that in the normal way to
@@ -227,89 +227,71 @@ static int omap_connector_get_modes(struct drm_connector *connector)
return omap_connector_get_modes_edid(connector, dssdev);
/*
- * Otherwise we have either a fixed resolution panel or an output that
- * doesn't support modes discovery (e.g. DVI or VGA with the DDC bus
- * unconnected, or analog TV). Start by querying the size.
+ * Otherwise if the display pipeline reports modes (e.g. with a fixed
+ * resolution panel or an analog TV output), query it.
*/
- dssdev = omap_connector->display;
- if (dssdev->driver && dssdev->driver->get_size)
- dssdev->driver->get_size(dssdev,
- &connector->display_info.width_mm,
- &connector->display_info.height_mm);
+ dssdev = omap_connector_find_device(connector,
+ OMAP_DSS_DEVICE_OP_MODES);
+ if (dssdev)
+ return dssdev->ops->get_modes(dssdev, connector);
/*
- * Iterate over the pipeline to find the first device that can provide
- * timing information. If we can't find any, we just let the KMS core
- * add the default modes.
+ * Otherwise if the display pipeline uses a drm_panel, we delegate the
+ * operation to the panel API.
*/
- for (dssdev = omap_connector->display; dssdev; dssdev = dssdev->src) {
- if (dssdev->ops->get_timings)
- break;
- }
- if (!dssdev)
- return 0;
+ if (omap_connector->output->panel)
+ return drm_panel_get_modes(omap_connector->output->panel);
- /* Add a single mode corresponding to the fixed panel timings. */
- mode = drm_mode_create(connector->dev);
- if (!mode)
- return 0;
+ /*
+ * We can't retrieve modes, which can happen for instance for a DVI or
+ * VGA output with the DDC bus unconnected. The KMS core will add the
+ * default modes.
+ */
+ return 0;
+}
- dssdev->ops->get_timings(dssdev, &vm);
+enum drm_mode_status omap_connector_mode_fixup(struct omap_dss_device *dssdev,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ int ret;
- drm_display_mode_from_videomode(&vm, mode);
+ drm_mode_copy(adjusted_mode, mode);
- mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- drm_mode_set_name(mode);
- drm_mode_probed_add(connector, mode);
+ for (; dssdev; dssdev = dssdev->next) {
+ if (!dssdev->ops->check_timings)
+ continue;
+
+ ret = dssdev->ops->check_timings(dssdev, adjusted_mode);
+ if (ret)
+ return MODE_BAD;
+ }
- return 1;
+ return MODE_OK;
}
-static int omap_connector_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status omap_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
- enum omap_channel channel = omap_connector->output->dispc_channel;
- struct omap_drm_private *priv = connector->dev->dev_private;
- struct omap_dss_device *dssdev;
- struct videomode vm = {0};
- struct drm_device *dev = connector->dev;
- struct drm_display_mode *new_mode;
- int r, ret = MODE_BAD;
-
- drm_display_mode_to_videomode(mode, &vm);
- mode->vrefresh = drm_mode_vrefresh(mode);
+ struct drm_display_mode new_mode = { { 0 } };
+ enum drm_mode_status status;
- r = priv->dispc_ops->mgr_check_timings(priv->dispc, channel, &vm);
- if (r)
+ status = omap_connector_mode_fixup(omap_connector->output, mode,
+ &new_mode);
+ if (status != MODE_OK)
goto done;
- for (dssdev = omap_connector->output; dssdev; dssdev = dssdev->next) {
- if (!dssdev->ops->check_timings)
- continue;
-
- r = dssdev->ops->check_timings(dssdev, &vm);
- if (r)
- goto done;
- }
-
- /* check if vrefresh is still valid */
- new_mode = drm_mode_duplicate(dev, mode);
- if (!new_mode)
- return MODE_BAD;
-
- new_mode->clock = vm.pixelclock / 1000;
- new_mode->vrefresh = 0;
- if (mode->vrefresh == drm_mode_vrefresh(new_mode))
- ret = MODE_OK;
- drm_mode_destroy(dev, new_mode);
+ /* Check if vrefresh is still valid. */
+ if (drm_mode_vrefresh(mode) != drm_mode_vrefresh(&new_mode))
+ status = MODE_NOCLOCK;
done:
DBG("connector: mode %s: " DRM_MODE_FMT,
- (ret == MODE_OK) ? "valid" : "invalid",
+ (status == MODE_OK) ? "valid" : "invalid",
DRM_MODE_ARG(mode));
- return ret;
+ return status;
}
static const struct drm_connector_funcs omap_connector_funcs = {
@@ -326,9 +308,16 @@ static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
.mode_valid = omap_connector_mode_valid,
};
-static int omap_connector_get_type(struct omap_dss_device *display)
+static int omap_connector_get_type(struct omap_dss_device *output)
{
- switch (display->type) {
+ struct omap_dss_device *display;
+ enum omap_display_type type;
+
+ display = omapdss_display_get(output);
+ type = display->type;
+ omapdss_device_put(display);
+
+ switch (type) {
case OMAP_DISPLAY_TYPE_HDMI:
return DRM_MODE_CONNECTOR_HDMIA;
case OMAP_DISPLAY_TYPE_DVI:
@@ -351,28 +340,26 @@ static int omap_connector_get_type(struct omap_dss_device *display)
/* initialize connector */
struct drm_connector *omap_connector_init(struct drm_device *dev,
struct omap_dss_device *output,
- struct omap_dss_device *display,
struct drm_encoder *encoder)
{
struct drm_connector *connector = NULL;
struct omap_connector *omap_connector;
struct omap_dss_device *dssdev;
- DBG("%s", display->name);
+ DBG("%s", output->name);
omap_connector = kzalloc(sizeof(*omap_connector), GFP_KERNEL);
if (!omap_connector)
goto fail;
omap_connector->output = omapdss_device_get(output);
- omap_connector->display = omapdss_device_get(display);
connector = &omap_connector->base;
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
drm_connector_init(dev, connector, &omap_connector_funcs,
- omap_connector_get_type(display));
+ omap_connector_get_type(output));
drm_connector_helper_add(connector, &omap_connector_helper_funcs);
/*
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.h b/drivers/gpu/drm/omapdrm/omap_connector.h
index 854099801649..608085219336 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.h
+++ b/drivers/gpu/drm/omapdrm/omap_connector.h
@@ -22,6 +22,8 @@
#include <linux/types.h>
+enum drm_mode_status;
+
struct drm_connector;
struct drm_device;
struct drm_encoder;
@@ -29,12 +31,12 @@ struct omap_dss_device;
struct drm_connector *omap_connector_init(struct drm_device *dev,
struct omap_dss_device *output,
- struct omap_dss_device *display,
struct drm_encoder *encoder);
-struct drm_encoder *omap_connector_attached_encoder(
- struct drm_connector *connector);
bool omap_connector_get_hdmi_mode(struct drm_connector *connector);
void omap_connector_enable_hpd(struct drm_connector *connector);
void omap_connector_disable_hpd(struct drm_connector *connector);
+enum drm_mode_status omap_connector_mode_fixup(struct omap_dss_device *dssdev,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
#endif /* __OMAPDRM_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index d99e24dcc0bf..5a29bf01c0e8 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -128,7 +128,7 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
if (WARN_ON(omap_crtc->enabled == enable))
return;
- if (omap_crtc->pipe->output->output_type == OMAP_DISPLAY_TYPE_HDMI) {
+ if (omap_crtc->pipe->output->type == OMAP_DISPLAY_TYPE_HDMI) {
priv->dispc_ops->mgr_enable(priv->dispc, channel, enable);
omap_crtc->enabled = enable;
return;
@@ -390,6 +390,15 @@ static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
struct omap_drm_private *priv = crtc->dev->dev_private;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ struct videomode vm = {0};
+ int r;
+
+ drm_display_mode_to_videomode(mode, &vm);
+ r = priv->dispc_ops->mgr_check_timings(priv->dispc, omap_crtc->channel,
+ &vm);
+ if (r)
+ return r;
/* Check for bandwidth limit */
if (priv->max_bandwidth) {
@@ -657,7 +666,7 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
&omap_crtc_funcs, NULL);
if (ret < 0) {
dev_err(dev->dev, "%s(): could not init crtc for: %s\n",
- __func__, pipe->display->name);
+ __func__, pipe->output->name);
kfree(omap_crtc);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index f8292278f57d..1b9b6f5e48e1 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -23,6 +23,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_panel.h>
#include "omap_dmm_tiler.h"
#include "omap_drv.h"
@@ -137,12 +138,13 @@ static void omap_disconnect_pipelines(struct drm_device *ddev)
for (i = 0; i < priv->num_pipes; i++) {
struct omap_drm_pipeline *pipe = &priv->pipes[i];
+ if (pipe->output->panel)
+ drm_panel_detach(pipe->output->panel);
+
omapdss_device_disconnect(NULL, pipe->output);
omapdss_device_put(pipe->output);
- omapdss_device_put(pipe->display);
pipe->output = NULL;
- pipe->display = NULL;
}
memset(&priv->channels, 0, sizeof(priv->channels));
@@ -150,33 +152,17 @@ static void omap_disconnect_pipelines(struct drm_device *ddev)
priv->num_pipes = 0;
}
-static int omap_compare_pipes(const void *a, const void *b)
-{
- const struct omap_drm_pipeline *pipe1 = a;
- const struct omap_drm_pipeline *pipe2 = b;
-
- if (pipe1->display->alias_id > pipe2->display->alias_id)
- return 1;
- else if (pipe1->display->alias_id < pipe2->display->alias_id)
- return -1;
- return 0;
-}
-
static int omap_connect_pipelines(struct drm_device *ddev)
{
struct omap_drm_private *priv = ddev->dev_private;
struct omap_dss_device *output = NULL;
- unsigned int i;
int r;
- if (!omapdss_stack_is_ready())
- return -EPROBE_DEFER;
-
for_each_dss_output(output) {
r = omapdss_device_connect(priv->dss, NULL, output);
if (r == -EPROBE_DEFER) {
omapdss_device_put(output);
- goto cleanup;
+ return r;
} else if (r) {
dev_warn(output->dev, "could not connect output %s\n",
output->name);
@@ -185,7 +171,6 @@ static int omap_connect_pipelines(struct drm_device *ddev)
pipe = &priv->pipes[priv->num_pipes++];
pipe->output = omapdss_device_get(output);
- pipe->display = omapdss_display_get(output);
if (priv->num_pipes == ARRAY_SIZE(priv->pipes)) {
/* To balance the 'for_each_dss_output' loop */
@@ -195,36 +180,19 @@ static int omap_connect_pipelines(struct drm_device *ddev)
}
}
- /* Sort the list by DT aliases */
- sort(priv->pipes, priv->num_pipes, sizeof(priv->pipes[0]),
- omap_compare_pipes, NULL);
-
- /*
- * Populate the pipeline lookup table by DISPC channel. Only one display
- * is allowed per channel.
- */
- for (i = 0; i < priv->num_pipes; ++i) {
- struct omap_drm_pipeline *pipe = &priv->pipes[i];
- enum omap_channel channel = pipe->output->dispc_channel;
-
- if (WARN_ON(priv->channels[channel] != NULL)) {
- r = -EINVAL;
- goto cleanup;
- }
-
- priv->channels[channel] = pipe;
- }
-
return 0;
+}
-cleanup:
- /*
- * if we are deferring probe, we disconnect the devices we previously
- * connected
- */
- omap_disconnect_pipelines(ddev);
+static int omap_compare_pipelines(const void *a, const void *b)
+{
+ const struct omap_drm_pipeline *pipe1 = a;
+ const struct omap_drm_pipeline *pipe2 = b;
- return r;
+ if (pipe1->alias_id > pipe2->alias_id)
+ return 1;
+ else if (pipe1->alias_id < pipe2->alias_id)
+ return -1;
+ return 0;
}
static int omap_modeset_init_properties(struct drm_device *dev)
@@ -240,6 +208,30 @@ static int omap_modeset_init_properties(struct drm_device *dev)
return 0;
}
+static int omap_display_id(struct omap_dss_device *output)
+{
+ struct device_node *node = NULL;
+
+ if (output->next) {
+ struct omap_dss_device *display;
+
+ display = omapdss_display_get(output);
+ node = display->dev->of_node;
+ omapdss_device_put(display);
+ } else if (output->bridge) {
+ struct drm_bridge *bridge = output->bridge;
+
+ while (bridge->next)
+ bridge = bridge->next;
+
+ node = bridge->of_node;
+ } else if (output->panel) {
+ node = output->panel->dev->of_node;
+ }
+
+ return node ? of_alias_get_id(node, "display") : -ENODEV;
+}
+
static int omap_modeset_init(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
@@ -249,6 +241,9 @@ static int omap_modeset_init(struct drm_device *dev)
int ret;
u32 plane_crtc_mask;
+ if (!omapdss_stack_is_ready())
+ return -EPROBE_DEFER;
+
drm_mode_config_init(dev);
ret = omap_modeset_init_properties(dev);
@@ -263,6 +258,10 @@ static int omap_modeset_init(struct drm_device *dev)
* configuration does not match the expectations or exceeds
* the available resources, the configuration is rejected.
*/
+ ret = omap_connect_pipelines(dev);
+ if (ret < 0)
+ return ret;
+
if (priv->num_pipes > num_mgrs || priv->num_pipes > num_ovls) {
dev_err(dev->dev, "%s(): Too many connected displays\n",
__func__);
@@ -288,33 +287,75 @@ static int omap_modeset_init(struct drm_device *dev)
priv->planes[priv->num_planes++] = plane;
}
- /* Create the CRTCs, encoders and connectors. */
+ /*
+ * Create the encoders, attach the bridges and get the pipeline alias
+ * IDs.
+ */
for (i = 0; i < priv->num_pipes; i++) {
struct omap_drm_pipeline *pipe = &priv->pipes[i];
- struct omap_dss_device *display = pipe->display;
- struct drm_connector *connector;
- struct drm_encoder *encoder;
- struct drm_crtc *crtc;
+ int id;
- encoder = omap_encoder_init(dev, pipe->output, display);
- if (!encoder)
+ pipe->encoder = omap_encoder_init(dev, pipe->output);
+ if (!pipe->encoder)
return -ENOMEM;
- connector = omap_connector_init(dev, pipe->output, display,
- encoder);
- if (!connector)
- return -ENOMEM;
+ if (pipe->output->bridge) {
+ ret = drm_bridge_attach(pipe->encoder,
+ pipe->output->bridge, NULL);
+ if (ret < 0)
+ return ret;
+ }
+
+ id = omap_display_id(pipe->output);
+ pipe->alias_id = id >= 0 ? id : i;
+ }
+
+ /* Sort the pipelines by DT aliases. */
+ sort(priv->pipes, priv->num_pipes, sizeof(priv->pipes[0]),
+ omap_compare_pipelines, NULL);
+
+ /*
+ * Populate the pipeline lookup table by DISPC channel. Only one display
+ * is allowed per channel.
+ */
+ for (i = 0; i < priv->num_pipes; ++i) {
+ struct omap_drm_pipeline *pipe = &priv->pipes[i];
+ enum omap_channel channel = pipe->output->dispc_channel;
+
+ if (WARN_ON(priv->channels[channel] != NULL))
+ return -EINVAL;
+
+ priv->channels[channel] = pipe;
+ }
+
+ /* Create the connectors and CRTCs. */
+ for (i = 0; i < priv->num_pipes; i++) {
+ struct omap_drm_pipeline *pipe = &priv->pipes[i];
+ struct drm_encoder *encoder = pipe->encoder;
+ struct drm_crtc *crtc;
+
+ if (!pipe->output->bridge) {
+ pipe->connector = omap_connector_init(dev, pipe->output,
+ encoder);
+ if (!pipe->connector)
+ return -ENOMEM;
+
+ drm_connector_attach_encoder(pipe->connector, encoder);
+
+ if (pipe->output->panel) {
+ ret = drm_panel_attach(pipe->output->panel,
+ pipe->connector);
+ if (ret < 0)
+ return ret;
+ }
+ }
crtc = omap_crtc_init(dev, pipe, priv->planes[i]);
if (IS_ERR(crtc))
return PTR_ERR(crtc);
- drm_connector_attach_encoder(connector, encoder);
encoder->possible_crtcs = 1 << i;
-
pipe->crtc = crtc;
- pipe->encoder = encoder;
- pipe->connector = connector;
}
DBG("registered %u planes, %u crtcs/encoders/connectors\n",
@@ -351,10 +392,12 @@ static int omap_modeset_init(struct drm_device *dev)
static void omap_modeset_enable_external_hpd(struct drm_device *ddev)
{
struct omap_drm_private *priv = ddev->dev_private;
- int i;
+ unsigned int i;
- for (i = 0; i < priv->num_pipes; i++)
- omap_connector_enable_hpd(priv->pipes[i].connector);
+ for (i = 0; i < priv->num_pipes; i++) {
+ if (priv->pipes[i].connector)
+ omap_connector_enable_hpd(priv->pipes[i].connector);
+ }
}
/*
@@ -363,10 +406,12 @@ static void omap_modeset_enable_external_hpd(struct drm_device *ddev)
static void omap_modeset_disable_external_hpd(struct drm_device *ddev)
{
struct omap_drm_private *priv = ddev->dev_private;
- int i;
+ unsigned int i;
- for (i = 0; i < priv->num_pipes; i++)
- omap_connector_disable_hpd(priv->pipes[i].connector);
+ for (i = 0; i < priv->num_pipes; i++) {
+ if (priv->pipes[i].connector)
+ omap_connector_disable_hpd(priv->pipes[i].connector);
+ }
}
/*
@@ -551,10 +596,6 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
omap_crtc_pre_init(priv);
- ret = omap_connect_pipelines(ddev);
- if (ret)
- goto err_crtc_uninit;
-
soc = soc_device_match(omapdrm_soc_devices);
priv->omaprev = soc ? (unsigned int)soc->data : 0;
priv->wq = alloc_ordered_workqueue("omapdrm", 0);
@@ -612,7 +653,6 @@ err_gem_deinit:
omap_gem_deinit(ddev);
destroy_workqueue(priv->wq);
omap_disconnect_pipelines(ddev);
-err_crtc_uninit:
omap_crtc_pre_uninit(priv);
drm_dev_put(ddev);
return ret;
@@ -685,54 +725,12 @@ static int pdev_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-static int omap_drm_suspend_all_displays(struct drm_device *ddev)
-{
- struct omap_drm_private *priv = ddev->dev_private;
- int i;
-
- for (i = 0; i < priv->num_pipes; i++) {
- struct omap_dss_device *display = priv->pipes[i].display;
-
- if (display->state == OMAP_DSS_DISPLAY_ACTIVE) {
- display->ops->disable(display);
- display->activate_after_resume = true;
- } else {
- display->activate_after_resume = false;
- }
- }
-
- return 0;
-}
-
-static int omap_drm_resume_all_displays(struct drm_device *ddev)
-{
- struct omap_drm_private *priv = ddev->dev_private;
- int i;
-
- for (i = 0; i < priv->num_pipes; i++) {
- struct omap_dss_device *display = priv->pipes[i].display;
-
- if (display->activate_after_resume) {
- display->ops->enable(display);
- display->activate_after_resume = false;
- }
- }
-
- return 0;
-}
-
static int omap_drm_suspend(struct device *dev)
{
struct omap_drm_private *priv = dev_get_drvdata(dev);
struct drm_device *drm_dev = priv->ddev;
- drm_kms_helper_poll_disable(drm_dev);
-
- drm_modeset_lock_all(drm_dev);
- omap_drm_suspend_all_displays(drm_dev);
- drm_modeset_unlock_all(drm_dev);
-
- return 0;
+ return drm_mode_config_helper_suspend(drm_dev);
}
static int omap_drm_resume(struct device *dev)
@@ -740,11 +738,7 @@ static int omap_drm_resume(struct device *dev)
struct omap_drm_private *priv = dev_get_drvdata(dev);
struct drm_device *drm_dev = priv->ddev;
- drm_modeset_lock_all(drm_dev);
- omap_drm_resume_all_displays(drm_dev);
- drm_modeset_unlock_all(drm_dev);
-
- drm_kms_helper_poll_enable(drm_dev);
+ drm_mode_config_helper_resume(drm_dev);
return omap_gem_resume(drm_dev);
}
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 0c57d2814c51..3cca45cb25f3 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -49,7 +49,7 @@ struct omap_drm_pipeline {
struct drm_encoder *encoder;
struct drm_connector *connector;
struct omap_dss_device *output;
- struct omap_dss_device *display;
+ unsigned int alias_id;
};
struct omap_drm_private {
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 0d85b3a35767..40512419642b 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -20,6 +20,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_edid.h>
+#include <drm/drm_panel.h>
#include "omap_drv.h"
@@ -37,7 +38,6 @@
struct omap_encoder {
struct drm_encoder base;
struct omap_dss_device *output;
- struct omap_dss_device *display;
};
static void omap_encoder_destroy(struct drm_encoder *encoder)
@@ -52,22 +52,43 @@ static const struct drm_encoder_funcs omap_encoder_funcs = {
.destroy = omap_encoder_destroy,
};
-static void omap_encoder_hdmi_mode_set(struct drm_encoder *encoder,
+static void omap_encoder_update_videomode_flags(struct videomode *vm,
+ u32 bus_flags)
+{
+ if (!(vm->flags & (DISPLAY_FLAGS_DE_LOW |
+ DISPLAY_FLAGS_DE_HIGH))) {
+ if (bus_flags & DRM_BUS_FLAG_DE_LOW)
+ vm->flags |= DISPLAY_FLAGS_DE_LOW;
+ else if (bus_flags & DRM_BUS_FLAG_DE_HIGH)
+ vm->flags |= DISPLAY_FLAGS_DE_HIGH;
+ }
+
+ if (!(vm->flags & (DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_PIXDATA_NEGEDGE))) {
+ if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)
+ vm->flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
+ else if (bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
+ vm->flags |= DISPLAY_FLAGS_PIXDATA_NEGEDGE;
+ }
+
+ if (!(vm->flags & (DISPLAY_FLAGS_SYNC_POSEDGE |
+ DISPLAY_FLAGS_SYNC_NEGEDGE))) {
+ if (bus_flags & DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE)
+ vm->flags |= DISPLAY_FLAGS_SYNC_POSEDGE;
+ else if (bus_flags & DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE)
+ vm->flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
+ }
+}
+
+static void omap_encoder_hdmi_mode_set(struct drm_connector *connector,
+ struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
- struct drm_device *dev = encoder->dev;
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
struct omap_dss_device *dssdev = omap_encoder->output;
- struct drm_connector *connector;
bool hdmi_mode;
- hdmi_mode = false;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- hdmi_mode = omap_connector_get_hdmi_mode(connector);
- break;
- }
- }
+ hdmi_mode = omap_connector_get_hdmi_mode(connector);
if (dssdev->ops->hdmi.set_hdmi_mode)
dssdev->ops->hdmi.set_hdmi_mode(dssdev, hdmi_mode);
@@ -88,8 +109,18 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ struct omap_dss_device *output = omap_encoder->output;
struct omap_dss_device *dssdev;
+ struct drm_device *dev = encoder->dev;
+ struct drm_connector *connector;
+ struct drm_bridge *bridge;
struct videomode vm = { 0 };
+ u32 bus_flags;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder)
+ break;
+ }
drm_display_mode_to_videomode(adjusted_mode, &vm);
@@ -102,66 +133,102 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
*
* A better solution is to use DRM's bus-flags through the whole driver.
*/
- for (dssdev = omap_encoder->output; dssdev; dssdev = dssdev->next) {
- unsigned long bus_flags = dssdev->bus_flags;
-
- if (!(vm.flags & (DISPLAY_FLAGS_DE_LOW |
- DISPLAY_FLAGS_DE_HIGH))) {
- if (bus_flags & DRM_BUS_FLAG_DE_LOW)
- vm.flags |= DISPLAY_FLAGS_DE_LOW;
- else if (bus_flags & DRM_BUS_FLAG_DE_HIGH)
- vm.flags |= DISPLAY_FLAGS_DE_HIGH;
- }
-
- if (!(vm.flags & (DISPLAY_FLAGS_PIXDATA_POSEDGE |
- DISPLAY_FLAGS_PIXDATA_NEGEDGE))) {
- if (bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
- vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
- else if (bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
- vm.flags |= DISPLAY_FLAGS_PIXDATA_NEGEDGE;
- }
-
- if (!(vm.flags & (DISPLAY_FLAGS_SYNC_POSEDGE |
- DISPLAY_FLAGS_SYNC_NEGEDGE))) {
- if (bus_flags & DRM_BUS_FLAG_SYNC_POSEDGE)
- vm.flags |= DISPLAY_FLAGS_SYNC_POSEDGE;
- else if (bus_flags & DRM_BUS_FLAG_SYNC_NEGEDGE)
- vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
- }
+ for (dssdev = output; dssdev; dssdev = dssdev->next)
+ omap_encoder_update_videomode_flags(&vm, dssdev->bus_flags);
+
+ for (bridge = output->bridge; bridge; bridge = bridge->next) {
+ if (!bridge->timings)
+ continue;
+
+ bus_flags = bridge->timings->input_bus_flags;
+ omap_encoder_update_videomode_flags(&vm, bus_flags);
}
+ bus_flags = connector->display_info.bus_flags;
+ omap_encoder_update_videomode_flags(&vm, bus_flags);
+
/* Set timings for all devices in the display pipeline. */
- dss_mgr_set_timings(omap_encoder->output, &vm);
+ dss_mgr_set_timings(output, &vm);
- for (dssdev = omap_encoder->output; dssdev; dssdev = dssdev->next) {
+ for (dssdev = output; dssdev; dssdev = dssdev->next) {
if (dssdev->ops->set_timings)
- dssdev->ops->set_timings(dssdev, &vm);
+ dssdev->ops->set_timings(dssdev, adjusted_mode);
}
/* Set the HDMI mode and HDMI infoframe if applicable. */
- if (omap_encoder->output->output_type == OMAP_DISPLAY_TYPE_HDMI)
- omap_encoder_hdmi_mode_set(encoder, adjusted_mode);
+ if (output->type == OMAP_DISPLAY_TYPE_HDMI)
+ omap_encoder_hdmi_mode_set(connector, encoder, adjusted_mode);
}
static void omap_encoder_disable(struct drm_encoder *encoder)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct omap_dss_device *dssdev = omap_encoder->display;
+ struct omap_dss_device *dssdev = omap_encoder->output;
+ struct drm_device *dev = encoder->dev;
+
+ dev_dbg(dev->dev, "disable(%s)\n", dssdev->name);
+
+ /* Disable the panel if present. */
+ if (dssdev->panel) {
+ drm_panel_disable(dssdev->panel);
+ drm_panel_unprepare(dssdev->panel);
+ }
+
+ /*
+ * Disable the chain of external devices, starting at the one at the
+ * internal encoder's output.
+ */
+ omapdss_device_disable(dssdev->next);
+
+ /*
+ * Disable the internal encoder. This will disable the DSS output. The
+ * DSI is treated as an exception as DSI pipelines still use the legacy
+ * flow where the pipeline output controls the encoder.
+ */
+ if (dssdev->type != OMAP_DISPLAY_TYPE_DSI) {
+ dssdev->ops->disable(dssdev);
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+ }
- dssdev->ops->disable(dssdev);
+ /*
+ * Perform the post-disable operations on the chain of external devices
+ * to complete the display pipeline disable.
+ */
+ omapdss_device_post_disable(dssdev->next);
}
static void omap_encoder_enable(struct drm_encoder *encoder)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct omap_dss_device *dssdev = omap_encoder->display;
- int r;
-
- r = dssdev->ops->enable(dssdev);
- if (r)
- dev_err(encoder->dev->dev,
- "Failed to enable display '%s': %d\n",
- dssdev->name, r);
+ struct omap_dss_device *dssdev = omap_encoder->output;
+ struct drm_device *dev = encoder->dev;
+
+ dev_dbg(dev->dev, "enable(%s)\n", dssdev->name);
+
+ /* Prepare the chain of external devices for pipeline enable. */
+ omapdss_device_pre_enable(dssdev->next);
+
+ /*
+ * Enable the internal encoder. This will enable the DSS output. The
+ * DSI is treated as an exception as DSI pipelines still use the legacy
+ * flow where the pipeline output controls the encoder.
+ */
+ if (dssdev->type != OMAP_DISPLAY_TYPE_DSI) {
+ dssdev->ops->enable(dssdev);
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+ }
+
+ /*
+ * Enable the chain of external devices, starting at the one at the
+ * internal encoder's output.
+ */
+ omapdss_device_enable(dssdev->next);
+
+ /* Enable the panel if present. */
+ if (dssdev->panel) {
+ drm_panel_prepare(dssdev->panel);
+ drm_panel_enable(dssdev->panel);
+ }
}
static int omap_encoder_atomic_check(struct drm_encoder *encoder,
@@ -169,35 +236,17 @@ static int omap_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- enum omap_channel channel = omap_encoder->output->dispc_channel;
- struct drm_device *dev = encoder->dev;
- struct omap_drm_private *priv = dev->dev_private;
- struct omap_dss_device *dssdev;
- struct videomode vm = { 0 };
- int ret;
-
- drm_display_mode_to_videomode(&crtc_state->mode, &vm);
-
- ret = priv->dispc_ops->mgr_check_timings(priv->dispc, channel, &vm);
- if (ret)
- goto done;
-
- for (dssdev = omap_encoder->output; dssdev; dssdev = dssdev->next) {
- if (!dssdev->ops->check_timings)
- continue;
-
- ret = dssdev->ops->check_timings(dssdev, &vm);
- if (ret)
- goto done;
+ enum drm_mode_status status;
+
+ status = omap_connector_mode_fixup(omap_encoder->output,
+ &crtc_state->mode,
+ &crtc_state->adjusted_mode);
+ if (status != MODE_OK) {
+ dev_err(encoder->dev->dev, "invalid timings: %d\n", status);
+ return -EINVAL;
}
- drm_display_mode_from_videomode(&vm, &crtc_state->adjusted_mode);
-
-done:
- if (ret)
- dev_err(dev->dev, "invalid timings: %d\n", ret);
-
- return ret;
+ return 0;
}
static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
@@ -209,8 +258,7 @@ static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
/* initialize encoder */
struct drm_encoder *omap_encoder_init(struct drm_device *dev,
- struct omap_dss_device *output,
- struct omap_dss_device *display)
+ struct omap_dss_device *output)
{
struct drm_encoder *encoder = NULL;
struct omap_encoder *omap_encoder;
@@ -220,7 +268,6 @@ struct drm_encoder *omap_encoder_init(struct drm_device *dev,
goto fail;
omap_encoder->output = output;
- omap_encoder->display = display;
encoder = &omap_encoder->base;
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.h b/drivers/gpu/drm/omapdrm/omap_encoder.h
index a7b5dde63ecb..4aefb3142886 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.h
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.h
@@ -25,7 +25,6 @@ struct drm_encoder;
struct omap_dss_device;
struct drm_encoder *omap_encoder_init(struct drm_device *dev,
- struct omap_dss_device *output,
- struct omap_dss_device *display);
+ struct omap_dss_device *output);
#endif /* __OMAPDRM_ENCODER_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 851c59f07eb1..50aabd854f4d 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -183,13 +183,9 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
fbdev->fb = fb;
helper->fb = fb;
- fbi->par = helper;
fbi->fbops = &omap_fb_ops;
- strcpy(fbi->fix.id, MODULE_NAME);
-
- drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(fbi, helper, sizes);
dev->mode_config.fb_base = dma_addr;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 3e070153ef21..e36dbb4df867 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -38,6 +38,15 @@ config DRM_PANEL_SIMPLE
that it can be automatically turned off when the panel goes into a
low power state.
+config DRM_PANEL_FEIYANG_FY07024DI26A30D
+ tristate "Feiyang FY07024DI26A30-D MIPI-DSI LCD panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y if you want to enable support for panels based on the
+ Feiyang FY07024DI26A30-D MIPI-DSI interface.
+
config DRM_PANEL_ILITEK_IL9322
tristate "Ilitek ILI9322 320x240 QVGA panels"
depends on OF && SPI
@@ -149,6 +158,28 @@ config DRM_PANEL_RAYDIUM_RM68200
Say Y here if you want to enable support for Raydium RM68200
720x1280 DSI video mode panel.
+config DRM_PANEL_ROCKTECH_JH057N00900
+ tristate "Rocktech JH057N00900 MIPI touchscreen panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Rocktech JH057N00900
+ MIPI DSI panel as e.g. used in the Librem 5 devkit. It has a
+ resolution of 720x1440 pixels, a built in backlight and touch
+ controller.
+ Touch input support is provided by the goodix driver and needs to be
+ selected separately.
+
+config DRM_PANEL_RONBO_RB070D30
+ tristate "Ronbo Electronics RB070D30 panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Ronbo Electronics
+ RB070D30 1024x600 DSI panel.
+
config DRM_PANEL_SAMSUNG_S6D16D0
tristate "Samsung S6D16D0 DSI video mode panel"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index e7ab71968bbf..78e3dc376bdd 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o
obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
+obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d.o
obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
@@ -13,6 +14,8 @@ obj-$(CONFIG_DRM_PANEL_ORISETECH_OTM8009A) += panel-orisetech-otm8009a.o
obj-$(CONFIG_DRM_PANEL_PANASONIC_VVX10F034N00) += panel-panasonic-vvx10f034n00.o
obj-$(CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN) += panel-raspberrypi-touchscreen.o
obj-$(CONFIG_DRM_PANEL_RAYDIUM_RM68200) += panel-raydium-rm68200.o
+obj-$(CONFIG_DRM_PANEL_ROCKTECH_JH057N00900) += panel-rocktech-jh057n00900.o
+obj-$(CONFIG_DRM_PANEL_RONBO_RB070D30) += panel-ronbo-rb070d30.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c
index b428c4678106..a79908dfa3c8 100644
--- a/drivers/gpu/drm/panel/panel-arm-versatile.c
+++ b/drivers/gpu/drm/panel/panel-arm-versatile.c
@@ -191,7 +191,7 @@ static const struct versatile_panel_type versatile_panels[] = {
.vrefresh = 390,
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
},
- .bus_flags = DRM_BUS_FLAG_PIXDATA_NEGEDGE,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
},
/*
* Sanyo ALR252RGT 240x320 portrait display found on the
@@ -215,7 +215,7 @@ static const struct versatile_panel_type versatile_panels[] = {
.vrefresh = 116,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
},
- .bus_flags = DRM_BUS_FLAG_PIXDATA_NEGEDGE,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
.ib2 = true,
},
};
@@ -264,8 +264,6 @@ static int versatile_panel_get_modes(struct drm_panel *panel)
struct versatile_panel *vpanel = to_versatile_panel(panel);
struct drm_display_mode *mode;
- strncpy(connector->display_info.name, vpanel->panel_type->name,
- DRM_DISPLAY_INFO_LEN);
connector->display_info.width_mm = vpanel->panel_type->width_mm;
connector->display_info.height_mm = vpanel->panel_type->height_mm;
connector->display_info.bus_flags = vpanel->panel_type->bus_flags;
diff --git a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
new file mode 100644
index 000000000000..dabf59e0f56f
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018 Amarula Solutions
+ * Author: Jagan Teki <jagan@amarulasolutions.com>
+ */
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#include <linux/backlight.h>
+#include <linux/gpio/consumer.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+
+#define FEIYANG_INIT_CMD_LEN 2
+
+struct feiyang {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+
+ struct backlight_device *backlight;
+ struct regulator *dvdd;
+ struct regulator *avdd;
+ struct gpio_desc *reset;
+};
+
+static inline struct feiyang *panel_to_feiyang(struct drm_panel *panel)
+{
+ return container_of(panel, struct feiyang, panel);
+}
+
+struct feiyang_init_cmd {
+ u8 data[FEIYANG_INIT_CMD_LEN];
+};
+
+static const struct feiyang_init_cmd feiyang_init_cmds[] = {
+ { .data = { 0x80, 0x58 } },
+ { .data = { 0x81, 0x47 } },
+ { .data = { 0x82, 0xD4 } },
+ { .data = { 0x83, 0x88 } },
+ { .data = { 0x84, 0xA9 } },
+ { .data = { 0x85, 0xC3 } },
+ { .data = { 0x86, 0x82 } },
+};
+
+static int feiyang_prepare(struct drm_panel *panel)
+{
+ struct feiyang *ctx = panel_to_feiyang(panel);
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ unsigned int i;
+ int ret;
+
+ ret = regulator_enable(ctx->dvdd);
+ if (ret)
+ return ret;
+
+ /* T1 (dvdd start + dvdd rise) 0 < T1 <= 10ms */
+ msleep(10);
+
+ ret = regulator_enable(ctx->avdd);
+ if (ret)
+ return ret;
+
+ /* T3 (dvdd rise + avdd start + avdd rise) T3 >= 20ms */
+ msleep(20);
+
+ gpiod_set_value(ctx->reset, 0);
+
+ /*
+ * T5 + T6 (avdd rise + video & logic signal rise)
+ * T5 >= 10ms, 0 < T6 <= 10ms
+ */
+ msleep(20);
+
+ gpiod_set_value(ctx->reset, 1);
+
+ /* T12 (video & logic signal rise + backlight rise) T12 >= 200ms */
+ msleep(200);
+
+ for (i = 0; i < ARRAY_SIZE(feiyang_init_cmds); i++) {
+ const struct feiyang_init_cmd *cmd =
+ &feiyang_init_cmds[i];
+
+ ret = mipi_dsi_dcs_write_buffer(dsi, cmd->data,
+ FEIYANG_INIT_CMD_LEN);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int feiyang_enable(struct drm_panel *panel)
+{
+ struct feiyang *ctx = panel_to_feiyang(panel);
+
+ /* T12 (video & logic signal rise + backlight rise) T12 >= 200ms */
+ msleep(200);
+
+ mipi_dsi_dcs_set_display_on(ctx->dsi);
+ backlight_enable(ctx->backlight);
+
+ return 0;
+}
+
+static int feiyang_disable(struct drm_panel *panel)
+{
+ struct feiyang *ctx = panel_to_feiyang(panel);
+
+ backlight_disable(ctx->backlight);
+ return mipi_dsi_dcs_set_display_off(ctx->dsi);
+}
+
+static int feiyang_unprepare(struct drm_panel *panel)
+{
+ struct feiyang *ctx = panel_to_feiyang(panel);
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_off(ctx->dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
+ ret);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(panel->dev, "failed to enter sleep mode: %d\n",
+ ret);
+
+ /* T13 (backlight fall + video & logic signal fall) T13 >= 200ms */
+ msleep(200);
+
+ gpiod_set_value(ctx->reset, 0);
+
+ regulator_disable(ctx->avdd);
+
+ /* T11 (dvdd rise to fall) 0 < T11 <= 10ms */
+ msleep(10);
+
+ regulator_disable(ctx->dvdd);
+
+ return 0;
+}
+
+static const struct drm_display_mode feiyang_default_mode = {
+ .clock = 55000,
+
+ .hdisplay = 1024,
+ .hsync_start = 1024 + 310,
+ .hsync_end = 1024 + 310 + 20,
+ .htotal = 1024 + 310 + 20 + 90,
+
+ .vdisplay = 600,
+ .vsync_start = 600 + 12,
+ .vsync_end = 600 + 12 + 2,
+ .vtotal = 600 + 12 + 2 + 21,
+ .vrefresh = 60,
+
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static int feiyang_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct feiyang *ctx = panel_to_feiyang(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &feiyang_default_mode);
+ if (!mode) {
+ DRM_DEV_ERROR(&ctx->dsi->dev, "failed to add mode %ux%ux@%u\n",
+ feiyang_default_mode.hdisplay,
+ feiyang_default_mode.vdisplay,
+ feiyang_default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs feiyang_funcs = {
+ .disable = feiyang_disable,
+ .unprepare = feiyang_unprepare,
+ .prepare = feiyang_prepare,
+ .enable = feiyang_enable,
+ .get_modes = feiyang_get_modes,
+};
+
+static int feiyang_dsi_probe(struct mipi_dsi_device *dsi)
+{
+ struct feiyang *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+ ctx->dsi = dsi;
+
+ drm_panel_init(&ctx->panel);
+ ctx->panel.dev = &dsi->dev;
+ ctx->panel.funcs = &feiyang_funcs;
+
+ ctx->dvdd = devm_regulator_get(&dsi->dev, "dvdd");
+ if (IS_ERR(ctx->dvdd)) {
+ DRM_DEV_ERROR(&dsi->dev, "Couldn't get dvdd regulator\n");
+ return PTR_ERR(ctx->dvdd);
+ }
+
+ ctx->avdd = devm_regulator_get(&dsi->dev, "avdd");
+ if (IS_ERR(ctx->avdd)) {
+ DRM_DEV_ERROR(&dsi->dev, "Couldn't get avdd regulator\n");
+ return PTR_ERR(ctx->avdd);
+ }
+
+ ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset)) {
+ DRM_DEV_ERROR(&dsi->dev, "Couldn't get our reset GPIO\n");
+ return PTR_ERR(ctx->reset);
+ }
+
+ ctx->backlight = devm_of_find_backlight(&dsi->dev);
+ if (IS_ERR(ctx->backlight))
+ return PTR_ERR(ctx->backlight);
+
+ ret = drm_panel_add(&ctx->panel);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->lanes = 4;
+
+ return mipi_dsi_attach(dsi);
+}
+
+static int feiyang_dsi_remove(struct mipi_dsi_device *dsi)
+{
+ struct feiyang *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id feiyang_of_match[] = {
+ { .compatible = "feiyang,fy07024di26a30d", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, feiyang_of_match);
+
+static struct mipi_dsi_driver feiyang_driver = {
+ .probe = feiyang_dsi_probe,
+ .remove = feiyang_dsi_remove,
+ .driver = {
+ .name = "feiyang-fy07024di26a30d",
+ .of_match_table = feiyang_of_match,
+ },
+};
+module_mipi_dsi_driver(feiyang_driver);
+
+MODULE_AUTHOR("Jagan Teki <jagan@amarulasolutions.com>");
+MODULE_DESCRIPTION("Feiyang FY07024DI26A30-D MIPI-DSI LCD panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
index bd38bf4f1ba6..a1c4cd2940fb 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -412,11 +412,11 @@ static int ili9322_init(struct drm_panel *panel, struct ili9322 *ili)
if (ili->conf->dclk_active_high) {
reg = ILI9322_POL_DCLK;
connector->display_info.bus_flags |=
- DRM_BUS_FLAG_PIXDATA_POSEDGE;
+ DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE;
} else {
reg = 0;
connector->display_info.bus_flags |=
- DRM_BUS_FLAG_PIXDATA_NEGEDGE;
+ DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
}
if (ili->conf->de_active_high) {
reg |= ILI9322_POL_DE;
@@ -662,8 +662,6 @@ static int ili9322_get_modes(struct drm_panel *panel)
struct ili9322 *ili = panel_to_ili9322(panel);
struct drm_display_mode *mode;
- strncpy(connector->display_info.name, "ILI9322 TFT LCD driver\0",
- DRM_DISPLAY_INFO_LEN);
connector->display_info.width_mm = ili->conf->width_mm;
connector->display_info.height_mm = ili->conf->height_mm;
diff --git a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
index 5e8d4523e9ed..a1d8d92fac2b 100644
--- a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
+++ b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
@@ -190,7 +190,6 @@ static int lcd_olinuxino_get_modes(struct drm_panel *panel)
num++;
}
- memcpy(connector->display_info.name, lcd_info->name, 32);
connector->display_info.width_mm = lcd_info->width_mm;
connector->display_info.height_mm = lcd_info->height_mm;
connector->display_info.bpc = lcd_info->bpc;
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index 87fa316e1d7b..f27a7e426574 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -67,15 +67,15 @@ struct otm8009a {
};
static const struct drm_display_mode default_mode = {
- .clock = 32729,
+ .clock = 29700,
.hdisplay = 480,
- .hsync_start = 480 + 120,
- .hsync_end = 480 + 120 + 63,
- .htotal = 480 + 120 + 63 + 120,
+ .hsync_start = 480 + 98,
+ .hsync_end = 480 + 98 + 32,
+ .htotal = 480 + 98 + 32 + 98,
.vdisplay = 800,
- .vsync_start = 800 + 12,
- .vsync_end = 800 + 12 + 12,
- .vtotal = 800 + 12 + 12 + 12,
+ .vsync_start = 800 + 15,
+ .vsync_end = 800 + 15 + 10,
+ .vtotal = 800 + 15 + 10 + 14,
.vrefresh = 50,
.flags = 0,
.width_mm = 52,
@@ -248,6 +248,9 @@ static int otm8009a_init_sequence(struct otm8009a *ctx)
/* Send Command GRAM memory write (no parameters) */
dcs_write_seq(ctx, MIPI_DCS_WRITE_MEMORY_START);
+ /* Wait a short while to let the panel be ready before the 1st frame */
+ mdelay(10);
+
return 0;
}
@@ -433,7 +436,8 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi)
ctx->supply = devm_regulator_get(dev, "power");
if (IS_ERR(ctx->supply)) {
ret = PTR_ERR(ctx->supply);
- dev_err(dev, "failed to request regulator: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to request regulator: %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/panel/panel-raydium-rm68200.c b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
index 77593533abcd..14186827e591 100644
--- a/drivers/gpu/drm/panel/panel-raydium-rm68200.c
+++ b/drivers/gpu/drm/panel/panel-raydium-rm68200.c
@@ -383,7 +383,8 @@ static int rm68200_probe(struct mipi_dsi_device *dsi)
ctx->supply = devm_regulator_get(dev, "power");
if (IS_ERR(ctx->supply)) {
ret = PTR_ERR(ctx->supply);
- dev_err(dev, "cannot get regulator: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "cannot get regulator: %d\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c b/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
new file mode 100644
index 000000000000..d88ea8da2ec2
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
@@ -0,0 +1,387 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Rockteck jh057n00900 5.5" MIPI-DSI panel driver
+ *
+ * Copyright (C) Purism SPC 2019
+ */
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+#include <linux/backlight.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <video/display_timing.h>
+#include <video/mipi_display.h>
+
+#define DRV_NAME "panel-rocktech-jh057n00900"
+
+/* Manufacturer specific Commands send via DSI */
+#define ST7703_CMD_ALL_PIXEL_OFF 0x22
+#define ST7703_CMD_ALL_PIXEL_ON 0x23
+#define ST7703_CMD_SETDISP 0xB2
+#define ST7703_CMD_SETRGBIF 0xB3
+#define ST7703_CMD_SETCYC 0xB4
+#define ST7703_CMD_SETBGP 0xB5
+#define ST7703_CMD_SETVCOM 0xB6
+#define ST7703_CMD_SETOTP 0xB7
+#define ST7703_CMD_SETPOWER_EXT 0xB8
+#define ST7703_CMD_SETEXTC 0xB9
+#define ST7703_CMD_SETMIPI 0xBA
+#define ST7703_CMD_SETVDC 0xBC
+#define ST7703_CMD_SETSCR 0xC0
+#define ST7703_CMD_SETPOWER 0xC1
+#define ST7703_CMD_SETPANEL 0xCC
+#define ST7703_CMD_SETGAMMA 0xE0
+#define ST7703_CMD_SETEQ 0xE3
+#define ST7703_CMD_SETGIP1 0xE9
+#define ST7703_CMD_SETGIP2 0xEA
+
+struct jh057n {
+ struct device *dev;
+ struct drm_panel panel;
+ struct gpio_desc *reset_gpio;
+ struct backlight_device *backlight;
+ bool prepared;
+
+ struct dentry *debugfs;
+};
+
+static inline struct jh057n *panel_to_jh057n(struct drm_panel *panel)
+{
+ return container_of(panel, struct jh057n, panel);
+}
+
+#define dsi_generic_write_seq(dsi, seq...) do { \
+ static const u8 d[] = { seq }; \
+ int ret; \
+ ret = mipi_dsi_generic_write(dsi, d, ARRAY_SIZE(d)); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+static int jh057n_init_sequence(struct jh057n *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ struct device *dev = ctx->dev;
+ int ret;
+
+ /*
+ * Init sequence was supplied by the panel vendor. Most of the commands
+ * resemble the ST7703 but the number of parameters often don't match
+ * so it's likely a clone.
+ */
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETEXTC,
+ 0xF1, 0x12, 0x83);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETRGBIF,
+ 0x10, 0x10, 0x05, 0x05, 0x03, 0xFF, 0x00, 0x00,
+ 0x00, 0x00);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETSCR,
+ 0x73, 0x73, 0x50, 0x50, 0x00, 0x00, 0x08, 0x70,
+ 0x00);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETVDC, 0x4E);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETPANEL, 0x0B);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETCYC, 0x80);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETDISP, 0xF0, 0x12, 0x30);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETEQ,
+ 0x07, 0x07, 0x0B, 0x0B, 0x03, 0x0B, 0x00, 0x00,
+ 0x00, 0x00, 0xFF, 0x00, 0xC0, 0x10);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETBGP, 0x08, 0x08);
+ msleep(20);
+
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETVCOM, 0x3F, 0x3F);
+ dsi_generic_write_seq(dsi, 0xBF, 0x02, 0x11, 0x00);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETGIP1,
+ 0x82, 0x10, 0x06, 0x05, 0x9E, 0x0A, 0xA5, 0x12,
+ 0x31, 0x23, 0x37, 0x83, 0x04, 0xBC, 0x27, 0x38,
+ 0x0C, 0x00, 0x03, 0x00, 0x00, 0x00, 0x0C, 0x00,
+ 0x03, 0x00, 0x00, 0x00, 0x75, 0x75, 0x31, 0x88,
+ 0x88, 0x88, 0x88, 0x88, 0x88, 0x13, 0x88, 0x64,
+ 0x64, 0x20, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
+ 0x02, 0x88, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETGIP2,
+ 0x02, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x02, 0x46, 0x02, 0x88,
+ 0x88, 0x88, 0x88, 0x88, 0x88, 0x64, 0x88, 0x13,
+ 0x57, 0x13, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
+ 0x75, 0x88, 0x23, 0x14, 0x00, 0x00, 0x02, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x0A,
+ 0xA5, 0x00, 0x00, 0x00, 0x00);
+ dsi_generic_write_seq(dsi, ST7703_CMD_SETGAMMA,
+ 0x00, 0x09, 0x0E, 0x29, 0x2D, 0x3C, 0x41, 0x37,
+ 0x07, 0x0B, 0x0D, 0x10, 0x11, 0x0F, 0x10, 0x11,
+ 0x18, 0x00, 0x09, 0x0E, 0x29, 0x2D, 0x3C, 0x41,
+ 0x37, 0x07, 0x0B, 0x0D, 0x10, 0x11, 0x0F, 0x10,
+ 0x11, 0x18);
+ msleep(20);
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to exit sleep mode\n");
+ return ret;
+ }
+ /* Panel is operational 120 msec after reset */
+ msleep(60);
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret)
+ return ret;
+
+ DRM_DEV_DEBUG_DRIVER(dev, "Panel init sequence done\n");
+ return 0;
+}
+
+static int jh057n_enable(struct drm_panel *panel)
+{
+ struct jh057n *ctx = panel_to_jh057n(panel);
+
+ return backlight_enable(ctx->backlight);
+}
+
+static int jh057n_disable(struct drm_panel *panel)
+{
+ struct jh057n *ctx = panel_to_jh057n(panel);
+
+ return backlight_disable(ctx->backlight);
+}
+
+static int jh057n_unprepare(struct drm_panel *panel)
+{
+ struct jh057n *ctx = panel_to_jh057n(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+
+ if (!ctx->prepared)
+ return 0;
+
+ mipi_dsi_dcs_set_display_off(dsi);
+ ctx->prepared = false;
+
+ return 0;
+}
+
+static int jh057n_prepare(struct drm_panel *panel)
+{
+ struct jh057n *ctx = panel_to_jh057n(panel);
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ DRM_DEV_DEBUG_DRIVER(ctx->dev, "Resetting the panel\n");
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(20, 40);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ msleep(20);
+
+ ret = jh057n_init_sequence(ctx);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "Panel init sequence failed: %d\n",
+ ret);
+ return ret;
+ }
+
+ ctx->prepared = true;
+
+ return 0;
+}
+
+static const struct drm_display_mode default_mode = {
+ .hdisplay = 720,
+ .hsync_start = 720 + 90,
+ .hsync_end = 720 + 90 + 20,
+ .htotal = 720 + 90 + 20 + 20,
+ .vdisplay = 1440,
+ .vsync_start = 1440 + 20,
+ .vsync_end = 1440 + 20 + 4,
+ .vtotal = 1440 + 20 + 4 + 12,
+ .vrefresh = 60,
+ .clock = 75276,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 65,
+ .height_mm = 130,
+};
+
+static int jh057n_get_modes(struct drm_panel *panel)
+{
+ struct jh057n *ctx = panel_to_jh057n(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(panel->drm, &default_mode);
+ if (!mode) {
+ DRM_DEV_ERROR(ctx->dev, "Failed to add mode %ux%u@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ panel->connector->display_info.width_mm = mode->width_mm;
+ panel->connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(panel->connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs jh057n_drm_funcs = {
+ .disable = jh057n_disable,
+ .unprepare = jh057n_unprepare,
+ .prepare = jh057n_prepare,
+ .enable = jh057n_enable,
+ .get_modes = jh057n_get_modes,
+};
+
+static int allpixelson_set(void *data, u64 val)
+{
+ struct jh057n *ctx = data;
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+
+ DRM_DEV_DEBUG_DRIVER(ctx->dev, "Setting all pixels on\n");
+ dsi_generic_write_seq(dsi, ST7703_CMD_ALL_PIXEL_ON);
+ msleep(val * 1000);
+ /* Reset the panel to get video back */
+ drm_panel_disable(&ctx->panel);
+ drm_panel_unprepare(&ctx->panel);
+ drm_panel_prepare(&ctx->panel);
+ drm_panel_enable(&ctx->panel);
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(allpixelson_fops, NULL,
+ allpixelson_set, "%llu\n");
+
+static int jh057n_debugfs_init(struct jh057n *ctx)
+{
+ struct dentry *f;
+
+ ctx->debugfs = debugfs_create_dir(DRV_NAME, NULL);
+ if (!ctx->debugfs)
+ return -ENOMEM;
+
+ f = debugfs_create_file("allpixelson", 0600,
+ ctx->debugfs, ctx, &allpixelson_fops);
+ if (!f)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void jh057n_debugfs_remove(struct jh057n *ctx)
+{
+ debugfs_remove_recursive(ctx->debugfs);
+ ctx->debugfs = NULL;
+}
+
+static int jh057n_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct jh057n *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ DRM_DEV_ERROR(dev, "cannot get reset gpio\n");
+ return PTR_ERR(ctx->reset_gpio);
+ }
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->dev = dev;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+
+ ctx->backlight = devm_of_find_backlight(dev);
+ if (IS_ERR(ctx->backlight))
+ return PTR_ERR(ctx->backlight);
+
+ drm_panel_init(&ctx->panel);
+ ctx->panel.dev = dev;
+ ctx->panel.funcs = &jh057n_drm_funcs;
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "mipi_dsi_attach failed. Is host ready?\n");
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ DRM_DEV_INFO(dev, "%ux%u@%u %ubpp dsi %udl - ready\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh,
+ mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes);
+
+ jh057n_debugfs_init(ctx);
+ return 0;
+}
+
+static void jh057n_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct jh057n *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = jh057n_unprepare(&ctx->panel);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to unprepare panel: %d\n",
+ ret);
+
+ ret = jh057n_disable(&ctx->panel);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to disable panel: %d\n",
+ ret);
+}
+
+static int jh057n_remove(struct mipi_dsi_device *dsi)
+{
+ struct jh057n *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ jh057n_shutdown(dsi);
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to detach from DSI host: %d\n",
+ ret);
+
+ drm_panel_remove(&ctx->panel);
+
+ jh057n_debugfs_remove(ctx);
+
+ return 0;
+}
+
+static const struct of_device_id jh057n_of_match[] = {
+ { .compatible = "rocktech,jh057n00900" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, jh057n_of_match);
+
+static struct mipi_dsi_driver jh057n_driver = {
+ .probe = jh057n_probe,
+ .remove = jh057n_remove,
+ .shutdown = jh057n_shutdown,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = jh057n_of_match,
+ },
+};
+module_mipi_dsi_driver(jh057n_driver);
+
+MODULE_AUTHOR("Guido Günther <agx@sigxcpu.org>");
+MODULE_DESCRIPTION("DRM driver for Rocktech JH057N00900 MIPI DSI panel");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
new file mode 100644
index 000000000000..3c15764f0c03
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2018-2019, Bridge Systems BV
+ * Copyright (C) 2018-2019, Bootlin
+ * Copyright (C) 2017, Free Electrons
+ *
+ * This file based on panel-ilitek-ili9881c.c
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/fb.h>
+#include <linux/kernel.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+
+#include <linux/gpio/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+struct rb070d30_panel {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct backlight_device *backlight;
+ struct regulator *supply;
+
+ struct {
+ struct gpio_desc *power;
+ struct gpio_desc *reset;
+ struct gpio_desc *updn;
+ struct gpio_desc *shlr;
+ } gpios;
+};
+
+static inline struct rb070d30_panel *panel_to_rb070d30_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct rb070d30_panel, panel);
+}
+
+static int rb070d30_panel_prepare(struct drm_panel *panel)
+{
+ struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel);
+ int ret;
+
+ ret = regulator_enable(ctx->supply);
+ if (ret < 0) {
+ DRM_DEV_ERROR(&ctx->dsi->dev, "Failed to enable supply: %d\n", ret);
+ return ret;
+ }
+
+ msleep(20);
+ gpiod_set_value(ctx->gpios.power, 1);
+ msleep(20);
+ gpiod_set_value(ctx->gpios.reset, 1);
+ msleep(20);
+ return 0;
+}
+
+static int rb070d30_panel_unprepare(struct drm_panel *panel)
+{
+ struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel);
+
+ gpiod_set_value(ctx->gpios.reset, 0);
+ gpiod_set_value(ctx->gpios.power, 0);
+ regulator_disable(ctx->supply);
+
+ return 0;
+}
+
+static int rb070d30_panel_enable(struct drm_panel *panel)
+{
+ struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel);
+ int ret;
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(ctx->dsi);
+ if (ret)
+ return ret;
+
+ ret = backlight_enable(ctx->backlight);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
+ return ret;
+}
+
+static int rb070d30_panel_disable(struct drm_panel *panel)
+{
+ struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel);
+
+ backlight_disable(ctx->backlight);
+ return mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
+}
+
+/* Default timings */
+static const struct drm_display_mode default_mode = {
+ .clock = 51206,
+ .hdisplay = 1024,
+ .hsync_start = 1024 + 160,
+ .hsync_end = 1024 + 160 + 80,
+ .htotal = 1024 + 160 + 80 + 80,
+ .vdisplay = 600,
+ .vsync_start = 600 + 12,
+ .vsync_end = 600 + 12 + 10,
+ .vtotal = 600 + 12 + 10 + 13,
+ .vrefresh = 60,
+
+ .width_mm = 154,
+ .height_mm = 85,
+};
+
+static int rb070d30_panel_get_modes(struct drm_panel *panel)
+{
+ struct drm_connector *connector = panel->connector;
+ struct rb070d30_panel *ctx = panel_to_rb070d30_panel(panel);
+ struct drm_display_mode *mode;
+ static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+
+ mode = drm_mode_duplicate(panel->drm, &default_mode);
+ if (!mode) {
+ DRM_DEV_ERROR(&ctx->dsi->dev,
+ "Failed to add mode " DRM_MODE_FMT "\n",
+ DRM_MODE_ARG(&default_mode));
+ return -EINVAL;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ panel->connector->display_info.bpc = 8;
+ panel->connector->display_info.width_mm = mode->width_mm;
+ panel->connector->display_info.height_mm = mode->height_mm;
+ drm_display_info_set_bus_formats(&connector->display_info,
+ &bus_format, 1);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs rb070d30_panel_funcs = {
+ .get_modes = rb070d30_panel_get_modes,
+ .prepare = rb070d30_panel_prepare,
+ .enable = rb070d30_panel_enable,
+ .disable = rb070d30_panel_disable,
+ .unprepare = rb070d30_panel_unprepare,
+};
+
+static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi)
+{
+ struct rb070d30_panel *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->supply = devm_regulator_get(&dsi->dev, "vcc-lcd");
+ if (IS_ERR(ctx->supply))
+ return PTR_ERR(ctx->supply);
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+ ctx->dsi = dsi;
+
+ drm_panel_init(&ctx->panel);
+ ctx->panel.dev = &dsi->dev;
+ ctx->panel.funcs = &rb070d30_panel_funcs;
+
+ ctx->gpios.reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->gpios.reset)) {
+ DRM_DEV_ERROR(&dsi->dev, "Couldn't get our reset GPIO\n");
+ return PTR_ERR(ctx->gpios.reset);
+ }
+
+ ctx->gpios.power = devm_gpiod_get(&dsi->dev, "power", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->gpios.power)) {
+ DRM_DEV_ERROR(&dsi->dev, "Couldn't get our power GPIO\n");
+ return PTR_ERR(ctx->gpios.power);
+ }
+
+ /*
+ * We don't change the state of that GPIO later on but we need
+ * to force it into a low state.
+ */
+ ctx->gpios.updn = devm_gpiod_get(&dsi->dev, "updn", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->gpios.updn)) {
+ DRM_DEV_ERROR(&dsi->dev, "Couldn't get our updn GPIO\n");
+ return PTR_ERR(ctx->gpios.updn);
+ }
+
+ /*
+ * We don't change the state of that GPIO later on but we need
+ * to force it into a low state.
+ */
+ ctx->gpios.shlr = devm_gpiod_get(&dsi->dev, "shlr", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->gpios.shlr)) {
+ DRM_DEV_ERROR(&dsi->dev, "Couldn't get our shlr GPIO\n");
+ return PTR_ERR(ctx->gpios.shlr);
+ }
+
+ ctx->backlight = devm_of_find_backlight(&dsi->dev);
+ if (IS_ERR(ctx->backlight)) {
+ DRM_DEV_ERROR(&dsi->dev, "Couldn't get our backlight\n");
+ return PTR_ERR(ctx->backlight);
+ }
+
+ ret = drm_panel_add(&ctx->panel);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_LPM;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->lanes = 4;
+
+ return mipi_dsi_attach(dsi);
+}
+
+static int rb070d30_panel_dsi_remove(struct mipi_dsi_device *dsi)
+{
+ struct rb070d30_panel *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id rb070d30_panel_of_match[] = {
+ { .compatible = "ronbo,rb070d30" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rb070d30_panel_of_match);
+
+static struct mipi_dsi_driver rb070d30_panel_driver = {
+ .probe = rb070d30_panel_dsi_probe,
+ .remove = rb070d30_panel_dsi_remove,
+ .driver = {
+ .name = "panel-ronbo-rb070d30",
+ .of_match_table = rb070d30_panel_of_match,
+ },
+};
+module_mipi_dsi_driver(rb070d30_panel_driver);
+
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@bootlin.com>");
+MODULE_AUTHOR("Konstantin Sudakov <k.sudakov@integrasources.com>");
+MODULE_DESCRIPTION("Ronbo RB070D30 Panel Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
index 33c22ee036f8..f75bef24e050 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c
@@ -148,9 +148,6 @@ static int s6d16d0_get_modes(struct drm_panel *panel)
struct drm_connector *connector = panel->connector;
struct drm_display_mode *mode;
- strncpy(connector->display_info.name, "Samsung S6D16D0\0",
- DRM_DISPLAY_INFO_LEN);
-
mode = drm_mode_duplicate(panel->drm, &samsung_s6d16d0_mode);
if (!mode) {
DRM_ERROR("bad mode or failed to add mode\n");
diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
index 2d99e28ff117..bdcc5d80823d 100644
--- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
+++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c
@@ -328,7 +328,7 @@ static const struct seiko_panel_desc seiko_43wvf1g = {
.height = 57,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_NEGEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
};
static const struct of_device_id platform_of_match[] = {
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 9e8218f6a3f2..569be4efd8d1 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -914,7 +914,7 @@ static const struct panel_desc cdtech_s043wq26h_ct7 = {
.width = 95,
.height = 54,
},
- .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
static const struct drm_display_mode cdtech_s070wv95_ct16_mode = {
@@ -1034,7 +1034,7 @@ static const struct panel_desc dataimage_scf0700c48ggu18 = {
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
static const struct display_timing dlc_dlc0700yzg_1_timing = {
@@ -1119,7 +1119,7 @@ static const struct panel_desc edt_et057090dhu = {
.height = 86,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_NEGEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
};
static const struct drm_display_mode edt_etm0700g0dh6_mode = {
@@ -1145,7 +1145,7 @@ static const struct panel_desc edt_etm0700g0dh6 = {
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_NEGEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE,
};
static const struct panel_desc edt_etm0700g0bdh6 = {
@@ -1157,7 +1157,7 @@ static const struct panel_desc edt_etm0700g0bdh6 = {
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
static const struct drm_display_mode foxlink_fl500wvr00_a0t_mode = {
@@ -1311,7 +1311,7 @@ static const struct panel_desc innolux_at043tn24 = {
.height = 54,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
static const struct drm_display_mode innolux_at070tn92_mode = {
@@ -1818,7 +1818,7 @@ static const struct panel_desc nec_nl4827hc19_05b = {
.height = 54,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
static const struct drm_display_mode netron_dy_e231732_mode = {
@@ -1867,8 +1867,8 @@ static const struct panel_desc newhaven_nhd_43_480272ef_atxl = {
.height = 54,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE |
- DRM_BUS_FLAG_SYNC_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
+ DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE,
};
static const struct display_timing nlt_nl192108ac18_02d_timing = {
@@ -2029,7 +2029,33 @@ static const struct panel_desc ortustech_com43h4m85ulc = {
.height = 93,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+};
+
+static const struct drm_display_mode osddisplays_osd070t1718_19ts_mode = {
+ .clock = 33000,
+ .hdisplay = 800,
+ .hsync_start = 800 + 210,
+ .hsync_end = 800 + 210 + 30,
+ .htotal = 800 + 210 + 30 + 16,
+ .vdisplay = 480,
+ .vsync_start = 480 + 22,
+ .vsync_end = 480 + 22 + 13,
+ .vtotal = 480 + 22 + 13 + 10,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc osddisplays_osd070t1718_19ts = {
+ .modes = &osddisplays_osd070t1718_19ts_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
static const struct drm_display_mode pda_91_00156_a0_mode = {
@@ -2398,7 +2424,7 @@ static const struct panel_desc toshiba_lt089ac29000 = {
.height = 116,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
static const struct drm_display_mode tpk_f07a_0102_mode = {
@@ -2421,7 +2447,7 @@ static const struct panel_desc tpk_f07a_0102 = {
.width = 152,
.height = 91,
},
- .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
};
static const struct drm_display_mode tpk_f10a_0102_mode = {
@@ -2737,6 +2763,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "ortustech,com43h4m85ulc",
.data = &ortustech_com43h4m85ulc,
}, {
+ .compatible = "osddisplays,osd070t1718-19ts",
+ .data = &osddisplays_osd070t1718_19ts,
+ }, {
.compatible = "pda,91-00156-a0",
.data = &pda_91_00156_a0,
}, {
@@ -2996,6 +3025,34 @@ static const struct panel_desc_dsi panasonic_vvx10f004b00 = {
.lanes = 4,
};
+static const struct drm_display_mode lg_acx467akm_7_mode = {
+ .clock = 150000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 2,
+ .hsync_end = 1080 + 2 + 2,
+ .htotal = 1080 + 2 + 2 + 2,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 2,
+ .vsync_end = 1920 + 2 + 2,
+ .vtotal = 1920 + 2 + 2 + 2,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc_dsi lg_acx467akm_7 = {
+ .desc = {
+ .modes = &lg_acx467akm_7_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 62,
+ .height = 110,
+ },
+ },
+ .flags = 0,
+ .format = MIPI_DSI_FMT_RGB888,
+ .lanes = 4,
+};
+
static const struct of_device_id dsi_of_match[] = {
{
.compatible = "auo,b080uan01",
@@ -3013,6 +3070,9 @@ static const struct of_device_id dsi_of_match[] = {
.compatible = "panasonic,vvx10f004b00",
.data = &panasonic_vvx10f004b00
}, {
+ .compatible = "lg,acx467akm-7",
+ .data = &lg_acx467akm_7
+ }, {
/* sentinel */
}
};
diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
index 5a9f8f4d5d24..71591e5f5938 100644
--- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c
+++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c
@@ -118,7 +118,7 @@ static const struct tpg110_panel_mode tpg110_modes[] = {
.vtotal = 480 + 10 + 1 + 35,
.vrefresh = 60,
},
- .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
},
{
.name = "640x480 RGB",
@@ -135,7 +135,7 @@ static const struct tpg110_panel_mode tpg110_modes[] = {
.vtotal = 480 + 18 + 1 + 27,
.vrefresh = 60,
},
- .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
},
{
.name = "480x272 RGB",
@@ -152,7 +152,7 @@ static const struct tpg110_panel_mode tpg110_modes[] = {
.vtotal = 272 + 2 + 1 + 12,
.vrefresh = 60,
},
- .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
},
{
.name = "480x640 RGB",
@@ -169,7 +169,7 @@ static const struct tpg110_panel_mode tpg110_modes[] = {
.vtotal = 640 + 4 + 1 + 8,
.vrefresh = 60,
},
- .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
},
{
.name = "400x240 RGB",
@@ -186,7 +186,7 @@ static const struct tpg110_panel_mode tpg110_modes[] = {
.vtotal = 240 + 2 + 1 + 20,
.vrefresh = 60,
},
- .bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
},
};
@@ -390,8 +390,6 @@ static int tpg110_get_modes(struct drm_panel *panel)
struct tpg110 *tpg = to_tpg110(panel);
struct drm_display_mode *mode;
- strncpy(connector->display_info.name, tpg->panel_mode->name,
- DRM_DISPLAY_INFO_LEN);
connector->display_info.width_mm = tpg->width;
connector->display_info.height_mm = tpg->height;
connector->display_info.bus_flags = tpg->panel_mode->bus_flags;
diff --git a/drivers/gpu/drm/panfrost/Kconfig b/drivers/gpu/drm/panfrost/Kconfig
new file mode 100644
index 000000000000..591611dc4e34
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+
+config DRM_PANFROST
+ tristate "Panfrost (DRM support for ARM Mali Midgard/Bifrost GPUs)"
+ depends on DRM
+ depends on ARM || ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64)
+ depends on MMU
+ select DRM_SCHED
+ select IOMMU_SUPPORT
+ select IOMMU_IO_PGTABLE_LPAE
+ select DRM_GEM_SHMEM_HELPER
+ help
+ DRM driver for ARM Mali Midgard (T6xx, T7xx, T8xx) and
+ Bifrost (G3x, G5x, G7x) GPUs.
diff --git a/drivers/gpu/drm/panfrost/Makefile b/drivers/gpu/drm/panfrost/Makefile
new file mode 100644
index 000000000000..6de72d13c58f
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+
+panfrost-y := \
+ panfrost_drv.o \
+ panfrost_device.o \
+ panfrost_devfreq.o \
+ panfrost_gem.o \
+ panfrost_gpu.o \
+ panfrost_job.o \
+ panfrost_mmu.o
+
+obj-$(CONFIG_DRM_PANFROST) += panfrost.o
diff --git a/drivers/gpu/drm/panfrost/TODO b/drivers/gpu/drm/panfrost/TODO
new file mode 100644
index 000000000000..c2e44add37d8
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/TODO
@@ -0,0 +1,27 @@
+- Thermal support.
+
+- Bifrost support:
+ - DT bindings (Neil, WIP)
+ - MMU page table format and address space setup
+ - Bifrost specific feature and issue handling
+ - Coherent DMA support
+
+- Support for 2MB pages. The io-pgtable code already supports this. Finishing
+ support involves either copying or adapting the iommu API to handle passing
+ aligned addresses and sizes to the io-pgtable code.
+
+- Per FD address space support. The h/w supports multiple addresses spaces.
+ The hard part is handling when more address spaces are needed than what
+ the h/w provides.
+
+- Support pinning pages on demand (GPU page faults).
+
+- Support userspace controlled GPU virtual addresses. Needed for Vulkan. (Tomeu)
+
+- Support for madvise and a shrinker.
+
+- Compute job support. So called 'compute only' jobs need to be plumbed up to
+ userspace.
+
+- Performance counter support. (Boris)
+
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
new file mode 100644
index 000000000000..238bd1d89d43
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2019 Collabora ltd. */
+#include <linux/devfreq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include "panfrost_device.h"
+#include "panfrost_devfreq.h"
+#include "panfrost_features.h"
+#include "panfrost_issues.h"
+#include "panfrost_gpu.h"
+#include "panfrost_regs.h"
+
+static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, int slot);
+
+static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
+ u32 flags)
+{
+ struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
+ struct dev_pm_opp *opp;
+ unsigned long old_clk_rate = pfdev->devfreq.cur_freq;
+ unsigned long target_volt, target_rate;
+ int err;
+
+ opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ target_rate = dev_pm_opp_get_freq(opp);
+ target_volt = dev_pm_opp_get_voltage(opp);
+ dev_pm_opp_put(opp);
+
+ if (old_clk_rate == target_rate)
+ return 0;
+
+ /*
+ * If frequency scaling from low to high, adjust voltage first.
+ * If frequency scaling from high to low, adjust frequency first.
+ */
+ if (old_clk_rate < target_rate) {
+ err = regulator_set_voltage(pfdev->regulator, target_volt,
+ target_volt);
+ if (err) {
+ dev_err(dev, "Cannot set voltage %lu uV\n",
+ target_volt);
+ return err;
+ }
+ }
+
+ err = clk_set_rate(pfdev->clock, target_rate);
+ if (err) {
+ dev_err(dev, "Cannot set frequency %lu (%d)\n", target_rate,
+ err);
+ regulator_set_voltage(pfdev->regulator, pfdev->devfreq.cur_volt,
+ pfdev->devfreq.cur_volt);
+ return err;
+ }
+
+ if (old_clk_rate > target_rate) {
+ err = regulator_set_voltage(pfdev->regulator, target_volt,
+ target_volt);
+ if (err)
+ dev_err(dev, "Cannot set voltage %lu uV\n", target_volt);
+ }
+
+ pfdev->devfreq.cur_freq = target_rate;
+ pfdev->devfreq.cur_volt = target_volt;
+
+ return 0;
+}
+
+static void panfrost_devfreq_reset(struct panfrost_device *pfdev)
+{
+ ktime_t now = ktime_get();
+ int i;
+
+ for (i = 0; i < NUM_JOB_SLOTS; i++) {
+ pfdev->devfreq.slot[i].busy_time = 0;
+ pfdev->devfreq.slot[i].idle_time = 0;
+ pfdev->devfreq.slot[i].time_last_update = now;
+ }
+}
+
+static int panfrost_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *status)
+{
+ struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
+ int i;
+
+ for (i = 0; i < NUM_JOB_SLOTS; i++) {
+ panfrost_devfreq_update_utilization(pfdev, i);
+ }
+
+ status->current_frequency = clk_get_rate(pfdev->clock);
+ status->total_time = ktime_to_ns(ktime_add(pfdev->devfreq.slot[0].busy_time,
+ pfdev->devfreq.slot[0].idle_time));
+
+ status->busy_time = 0;
+ for (i = 0; i < NUM_JOB_SLOTS; i++) {
+ status->busy_time += ktime_to_ns(pfdev->devfreq.slot[i].busy_time);
+ }
+
+ /* We're scheduling only to one core atm, so don't divide for now */
+ /* status->busy_time /= NUM_JOB_SLOTS; */
+
+ panfrost_devfreq_reset(pfdev);
+
+ dev_dbg(pfdev->dev, "busy %lu total %lu %lu %% freq %lu MHz\n", status->busy_time,
+ status->total_time,
+ status->busy_time / (status->total_time / 100),
+ status->current_frequency / 1000 / 1000);
+
+ return 0;
+}
+
+static int panfrost_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+ struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
+
+ *freq = pfdev->devfreq.cur_freq;
+
+ return 0;
+}
+
+static struct devfreq_dev_profile panfrost_devfreq_profile = {
+ .polling_ms = 50, /* ~3 frames */
+ .target = panfrost_devfreq_target,
+ .get_dev_status = panfrost_devfreq_get_dev_status,
+ .get_cur_freq = panfrost_devfreq_get_cur_freq,
+};
+
+int panfrost_devfreq_init(struct panfrost_device *pfdev)
+{
+ int ret;
+ struct dev_pm_opp *opp;
+
+ if (!pfdev->regulator)
+ return 0;
+
+ ret = dev_pm_opp_of_add_table(&pfdev->pdev->dev);
+ if (ret == -ENODEV) /* Optional, continue without devfreq */
+ return 0;
+
+ panfrost_devfreq_reset(pfdev);
+
+ pfdev->devfreq.cur_freq = clk_get_rate(pfdev->clock);
+
+ opp = devfreq_recommended_opp(&pfdev->pdev->dev, &pfdev->devfreq.cur_freq, 0);
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ panfrost_devfreq_profile.initial_freq = pfdev->devfreq.cur_freq;
+ dev_pm_opp_put(opp);
+
+ pfdev->devfreq.devfreq = devm_devfreq_add_device(&pfdev->pdev->dev,
+ &panfrost_devfreq_profile, "simple_ondemand", NULL);
+ if (IS_ERR(pfdev->devfreq.devfreq)) {
+ DRM_DEV_ERROR(&pfdev->pdev->dev, "Couldn't initialize GPU devfreq\n");
+ ret = PTR_ERR(pfdev->devfreq.devfreq);
+ pfdev->devfreq.devfreq = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+void panfrost_devfreq_resume(struct panfrost_device *pfdev)
+{
+ int i;
+
+ if (!pfdev->devfreq.devfreq)
+ return;
+
+ panfrost_devfreq_reset(pfdev);
+ for (i = 0; i < NUM_JOB_SLOTS; i++)
+ pfdev->devfreq.slot[i].busy = false;
+
+ devfreq_resume_device(pfdev->devfreq.devfreq);
+}
+
+void panfrost_devfreq_suspend(struct panfrost_device *pfdev)
+{
+ if (!pfdev->devfreq.devfreq)
+ return;
+
+ devfreq_suspend_device(pfdev->devfreq.devfreq);
+}
+
+static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, int slot)
+{
+ struct panfrost_devfreq_slot *devfreq_slot = &pfdev->devfreq.slot[slot];
+ ktime_t now;
+ ktime_t last;
+
+ if (!pfdev->devfreq.devfreq)
+ return;
+
+ now = ktime_get();
+ last = pfdev->devfreq.slot[slot].time_last_update;
+
+ /* If we last recorded a transition to busy, we have been idle since */
+ if (devfreq_slot->busy)
+ pfdev->devfreq.slot[slot].busy_time += ktime_sub(now, last);
+ else
+ pfdev->devfreq.slot[slot].idle_time += ktime_sub(now, last);
+
+ pfdev->devfreq.slot[slot].time_last_update = now;
+}
+
+/* The job scheduler is expected to call this at every transition busy <-> idle */
+void panfrost_devfreq_record_transition(struct panfrost_device *pfdev, int slot)
+{
+ struct panfrost_devfreq_slot *devfreq_slot = &pfdev->devfreq.slot[slot];
+
+ panfrost_devfreq_update_utilization(pfdev, slot);
+ devfreq_slot->busy = !devfreq_slot->busy;
+}
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.h b/drivers/gpu/drm/panfrost/panfrost_devfreq.h
new file mode 100644
index 000000000000..eb999531ed90
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2019 Collabora ltd. */
+
+#ifndef __PANFROST_DEVFREQ_H__
+#define __PANFROST_DEVFREQ_H__
+
+int panfrost_devfreq_init(struct panfrost_device *pfdev);
+
+void panfrost_devfreq_resume(struct panfrost_device *pfdev);
+void panfrost_devfreq_suspend(struct panfrost_device *pfdev);
+
+void panfrost_devfreq_record_transition(struct panfrost_device *pfdev, int slot);
+
+#endif /* __PANFROST_DEVFREQ_H__ */
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
new file mode 100644
index 000000000000..970f669c6d29
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+
+#include "panfrost_device.h"
+#include "panfrost_devfreq.h"
+#include "panfrost_features.h"
+#include "panfrost_gpu.h"
+#include "panfrost_job.h"
+#include "panfrost_mmu.h"
+
+static int panfrost_reset_init(struct panfrost_device *pfdev)
+{
+ int err;
+
+ pfdev->rstc = devm_reset_control_array_get(pfdev->dev, false, true);
+ if (IS_ERR(pfdev->rstc)) {
+ dev_err(pfdev->dev, "get reset failed %ld\n", PTR_ERR(pfdev->rstc));
+ return PTR_ERR(pfdev->rstc);
+ }
+
+ err = reset_control_deassert(pfdev->rstc);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void panfrost_reset_fini(struct panfrost_device *pfdev)
+{
+ reset_control_assert(pfdev->rstc);
+}
+
+static int panfrost_clk_init(struct panfrost_device *pfdev)
+{
+ int err;
+ unsigned long rate;
+
+ pfdev->clock = devm_clk_get(pfdev->dev, NULL);
+ if (IS_ERR(pfdev->clock)) {
+ dev_err(pfdev->dev, "get clock failed %ld\n", PTR_ERR(pfdev->clock));
+ return PTR_ERR(pfdev->clock);
+ }
+
+ rate = clk_get_rate(pfdev->clock);
+ dev_info(pfdev->dev, "clock rate = %lu\n", rate);
+
+ err = clk_prepare_enable(pfdev->clock);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static void panfrost_clk_fini(struct panfrost_device *pfdev)
+{
+ clk_disable_unprepare(pfdev->clock);
+}
+
+static int panfrost_regulator_init(struct panfrost_device *pfdev)
+{
+ int ret;
+
+ pfdev->regulator = devm_regulator_get_optional(pfdev->dev, "mali");
+ if (IS_ERR(pfdev->regulator)) {
+ ret = PTR_ERR(pfdev->regulator);
+ pfdev->regulator = NULL;
+ if (ret == -ENODEV)
+ return 0;
+ dev_err(pfdev->dev, "failed to get regulator: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_enable(pfdev->regulator);
+ if (ret < 0) {
+ dev_err(pfdev->dev, "failed to enable regulator: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void panfrost_regulator_fini(struct panfrost_device *pfdev)
+{
+ if (pfdev->regulator)
+ regulator_disable(pfdev->regulator);
+}
+
+int panfrost_device_init(struct panfrost_device *pfdev)
+{
+ int err;
+ struct resource *res;
+
+ mutex_init(&pfdev->sched_lock);
+ mutex_init(&pfdev->reset_lock);
+ INIT_LIST_HEAD(&pfdev->scheduled_jobs);
+
+ spin_lock_init(&pfdev->hwaccess_lock);
+
+ err = panfrost_clk_init(pfdev);
+ if (err) {
+ dev_err(pfdev->dev, "clk init failed %d\n", err);
+ return err;
+ }
+
+ err = panfrost_regulator_init(pfdev);
+ if (err) {
+ dev_err(pfdev->dev, "regulator init failed %d\n", err);
+ goto err_out0;
+ }
+
+ err = panfrost_reset_init(pfdev);
+ if (err) {
+ dev_err(pfdev->dev, "reset init failed %d\n", err);
+ goto err_out1;
+ }
+
+ res = platform_get_resource(pfdev->pdev, IORESOURCE_MEM, 0);
+ pfdev->iomem = devm_ioremap_resource(pfdev->dev, res);
+ if (IS_ERR(pfdev->iomem)) {
+ dev_err(pfdev->dev, "failed to ioremap iomem\n");
+ err = PTR_ERR(pfdev->iomem);
+ goto err_out2;
+ }
+
+ err = panfrost_gpu_init(pfdev);
+ if (err)
+ goto err_out2;
+
+ err = panfrost_mmu_init(pfdev);
+ if (err)
+ goto err_out3;
+
+ err = panfrost_job_init(pfdev);
+ if (err)
+ goto err_out4;
+
+ /* runtime PM will wake us up later */
+ panfrost_gpu_power_off(pfdev);
+
+ pm_runtime_set_active(pfdev->dev);
+ pm_runtime_get_sync(pfdev->dev);
+ pm_runtime_mark_last_busy(pfdev->dev);
+ pm_runtime_put_autosuspend(pfdev->dev);
+
+ return 0;
+err_out4:
+ panfrost_mmu_fini(pfdev);
+err_out3:
+ panfrost_gpu_fini(pfdev);
+err_out2:
+ panfrost_reset_fini(pfdev);
+err_out1:
+ panfrost_regulator_fini(pfdev);
+err_out0:
+ panfrost_clk_fini(pfdev);
+ return err;
+}
+
+void panfrost_device_fini(struct panfrost_device *pfdev)
+{
+ panfrost_regulator_fini(pfdev);
+ panfrost_clk_fini(pfdev);
+}
+
+const char *panfrost_exception_name(struct panfrost_device *pfdev, u32 exception_code)
+{
+ switch (exception_code) {
+ /* Non-Fault Status code */
+ case 0x00: return "NOT_STARTED/IDLE/OK";
+ case 0x01: return "DONE";
+ case 0x02: return "INTERRUPTED";
+ case 0x03: return "STOPPED";
+ case 0x04: return "TERMINATED";
+ case 0x08: return "ACTIVE";
+ /* Job exceptions */
+ case 0x40: return "JOB_CONFIG_FAULT";
+ case 0x41: return "JOB_POWER_FAULT";
+ case 0x42: return "JOB_READ_FAULT";
+ case 0x43: return "JOB_WRITE_FAULT";
+ case 0x44: return "JOB_AFFINITY_FAULT";
+ case 0x48: return "JOB_BUS_FAULT";
+ case 0x50: return "INSTR_INVALID_PC";
+ case 0x51: return "INSTR_INVALID_ENC";
+ case 0x52: return "INSTR_TYPE_MISMATCH";
+ case 0x53: return "INSTR_OPERAND_FAULT";
+ case 0x54: return "INSTR_TLS_FAULT";
+ case 0x55: return "INSTR_BARRIER_FAULT";
+ case 0x56: return "INSTR_ALIGN_FAULT";
+ case 0x58: return "DATA_INVALID_FAULT";
+ case 0x59: return "TILE_RANGE_FAULT";
+ case 0x5A: return "ADDR_RANGE_FAULT";
+ case 0x60: return "OUT_OF_MEMORY";
+ /* GPU exceptions */
+ case 0x80: return "DELAYED_BUS_FAULT";
+ case 0x88: return "SHAREABILITY_FAULT";
+ /* MMU exceptions */
+ case 0xC1: return "TRANSLATION_FAULT_LEVEL1";
+ case 0xC2: return "TRANSLATION_FAULT_LEVEL2";
+ case 0xC3: return "TRANSLATION_FAULT_LEVEL3";
+ case 0xC4: return "TRANSLATION_FAULT_LEVEL4";
+ case 0xC8: return "PERMISSION_FAULT";
+ case 0xC9 ... 0xCF: return "PERMISSION_FAULT";
+ case 0xD1: return "TRANSTAB_BUS_FAULT_LEVEL1";
+ case 0xD2: return "TRANSTAB_BUS_FAULT_LEVEL2";
+ case 0xD3: return "TRANSTAB_BUS_FAULT_LEVEL3";
+ case 0xD4: return "TRANSTAB_BUS_FAULT_LEVEL4";
+ case 0xD8: return "ACCESS_FLAG";
+ case 0xD9 ... 0xDF: return "ACCESS_FLAG";
+ case 0xE0 ... 0xE7: return "ADDRESS_SIZE_FAULT";
+ case 0xE8 ... 0xEF: return "MEMORY_ATTRIBUTES_FAULT";
+ }
+
+ return "UNKNOWN";
+}
+
+#ifdef CONFIG_PM
+int panfrost_device_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct panfrost_device *pfdev = platform_get_drvdata(pdev);
+
+ panfrost_gpu_soft_reset(pfdev);
+
+ /* TODO: Re-enable all other address spaces */
+ panfrost_gpu_power_on(pfdev);
+ panfrost_mmu_enable(pfdev, 0);
+ panfrost_job_enable_interrupts(pfdev);
+ panfrost_devfreq_resume(pfdev);
+
+ return 0;
+}
+
+int panfrost_device_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct panfrost_device *pfdev = platform_get_drvdata(pdev);
+
+ if (!panfrost_job_is_idle(pfdev))
+ return -EBUSY;
+
+ panfrost_devfreq_suspend(pfdev);
+ panfrost_gpu_power_off(pfdev);
+
+ return 0;
+}
+#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
new file mode 100644
index 000000000000..56f452dfb490
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+
+#ifndef __PANFROST_DEVICE_H__
+#define __PANFROST_DEVICE_H__
+
+#include <linux/spinlock.h>
+#include <drm/drm_device.h>
+#include <drm/drm_mm.h>
+#include <drm/gpu_scheduler.h>
+
+struct panfrost_device;
+struct panfrost_mmu;
+struct panfrost_job_slot;
+struct panfrost_job;
+
+#define NUM_JOB_SLOTS 3
+
+struct panfrost_features {
+ u16 id;
+ u16 revision;
+
+ u64 shader_present;
+ u64 tiler_present;
+ u64 l2_present;
+ u64 stack_present;
+ u32 as_present;
+ u32 js_present;
+
+ u32 l2_features;
+ u32 core_features;
+ u32 tiler_features;
+ u32 mem_features;
+ u32 mmu_features;
+ u32 thread_features;
+ u32 max_threads;
+ u32 thread_max_workgroup_sz;
+ u32 thread_max_barrier_sz;
+ u32 coherency_features;
+ u32 texture_features[4];
+ u32 js_features[16];
+
+ u32 nr_core_groups;
+
+ unsigned long hw_features[64 / BITS_PER_LONG];
+ unsigned long hw_issues[64 / BITS_PER_LONG];
+};
+
+struct panfrost_devfreq_slot {
+ ktime_t busy_time;
+ ktime_t idle_time;
+ ktime_t time_last_update;
+ bool busy;
+};
+
+struct panfrost_device {
+ struct device *dev;
+ struct drm_device *ddev;
+ struct platform_device *pdev;
+
+ spinlock_t hwaccess_lock;
+
+ struct drm_mm mm;
+ spinlock_t mm_lock;
+
+ void __iomem *iomem;
+ struct clk *clock;
+ struct regulator *regulator;
+ struct reset_control *rstc;
+
+ struct panfrost_features features;
+
+ struct panfrost_mmu *mmu;
+ struct panfrost_job_slot *js;
+
+ struct panfrost_job *jobs[NUM_JOB_SLOTS];
+ struct list_head scheduled_jobs;
+
+ struct mutex sched_lock;
+ struct mutex reset_lock;
+
+ struct {
+ struct devfreq *devfreq;
+ struct thermal_cooling_device *cooling;
+ unsigned long cur_freq;
+ unsigned long cur_volt;
+ struct panfrost_devfreq_slot slot[NUM_JOB_SLOTS];
+ } devfreq;
+};
+
+struct panfrost_file_priv {
+ struct panfrost_device *pfdev;
+
+ struct drm_sched_entity sched_entity[NUM_JOB_SLOTS];
+};
+
+static inline struct panfrost_device *to_panfrost_device(struct drm_device *ddev)
+{
+ return ddev->dev_private;
+}
+
+static inline int panfrost_model_cmp(struct panfrost_device *pfdev, s32 id)
+{
+ s32 match_id = pfdev->features.id;
+
+ if (match_id & 0xf000)
+ match_id &= 0xf00f;
+ return match_id - id;
+}
+
+static inline bool panfrost_model_eq(struct panfrost_device *pfdev, s32 id)
+{
+ return !panfrost_model_cmp(pfdev, id);
+}
+
+int panfrost_device_init(struct panfrost_device *pfdev);
+void panfrost_device_fini(struct panfrost_device *pfdev);
+
+int panfrost_device_resume(struct device *dev);
+int panfrost_device_suspend(struct device *dev);
+
+const char *panfrost_exception_name(struct panfrost_device *pfdev, u32 exception_code);
+
+#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
new file mode 100644
index 000000000000..94b0819ad50b
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -0,0 +1,474 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
+/* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
+/* Copyright 2019 Collabora ltd. */
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/pagemap.h>
+#include <linux/pm_runtime.h>
+#include <drm/panfrost_drm.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_syncobj.h>
+#include <drm/drm_utils.h>
+
+#include "panfrost_device.h"
+#include "panfrost_devfreq.h"
+#include "panfrost_gem.h"
+#include "panfrost_mmu.h"
+#include "panfrost_job.h"
+#include "panfrost_gpu.h"
+
+static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct drm_file *file)
+{
+ struct drm_panfrost_get_param *param = data;
+ struct panfrost_device *pfdev = ddev->dev_private;
+
+ if (param->pad != 0)
+ return -EINVAL;
+
+ switch (param->param) {
+ case DRM_PANFROST_PARAM_GPU_PROD_ID:
+ param->value = pfdev->features.id;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ int ret;
+ struct drm_gem_shmem_object *shmem;
+ struct drm_panfrost_create_bo *args = data;
+
+ if (!args->size || args->flags || args->pad)
+ return -EINVAL;
+
+ shmem = drm_gem_shmem_create_with_handle(file, dev, args->size,
+ &args->handle);
+ if (IS_ERR(shmem))
+ return PTR_ERR(shmem);
+
+ ret = panfrost_mmu_map(to_panfrost_bo(&shmem->base));
+ if (ret)
+ goto err_free;
+
+ args->offset = to_panfrost_bo(&shmem->base)->node.start << PAGE_SHIFT;
+
+ return 0;
+
+err_free:
+ drm_gem_object_put_unlocked(&shmem->base);
+ return ret;
+}
+
+/**
+ * panfrost_lookup_bos() - Sets up job->bo[] with the GEM objects
+ * referenced by the job.
+ * @dev: DRM device
+ * @file_priv: DRM file for this fd
+ * @args: IOCTL args
+ * @job: job being set up
+ *
+ * Resolve handles from userspace to BOs and attach them to job.
+ *
+ * Note that this function doesn't need to unreference the BOs on
+ * failure, because that will happen at panfrost_job_cleanup() time.
+ */
+static int
+panfrost_lookup_bos(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_panfrost_submit *args,
+ struct panfrost_job *job)
+{
+ job->bo_count = args->bo_handle_count;
+
+ if (!job->bo_count)
+ return 0;
+
+ job->implicit_fences = kvmalloc_array(job->bo_count,
+ sizeof(struct dma_fence *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!job->implicit_fences)
+ return -ENOMEM;
+
+ return drm_gem_objects_lookup(file_priv,
+ (void __user *)(uintptr_t)args->bo_handles,
+ job->bo_count, &job->bos);
+}
+
+/**
+ * panfrost_copy_in_sync() - Sets up job->in_fences[] with the sync objects
+ * referenced by the job.
+ * @dev: DRM device
+ * @file_priv: DRM file for this fd
+ * @args: IOCTL args
+ * @job: job being set up
+ *
+ * Resolve syncobjs from userspace to fences and attach them to job.
+ *
+ * Note that this function doesn't need to unreference the fences on
+ * failure, because that will happen at panfrost_job_cleanup() time.
+ */
+static int
+panfrost_copy_in_sync(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_panfrost_submit *args,
+ struct panfrost_job *job)
+{
+ u32 *handles;
+ int ret = 0;
+ int i;
+
+ job->in_fence_count = args->in_sync_count;
+
+ if (!job->in_fence_count)
+ return 0;
+
+ job->in_fences = kvmalloc_array(job->in_fence_count,
+ sizeof(struct dma_fence *),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!job->in_fences) {
+ DRM_DEBUG("Failed to allocate job in fences\n");
+ return -ENOMEM;
+ }
+
+ handles = kvmalloc_array(job->in_fence_count, sizeof(u32), GFP_KERNEL);
+ if (!handles) {
+ ret = -ENOMEM;
+ DRM_DEBUG("Failed to allocate incoming syncobj handles\n");
+ goto fail;
+ }
+
+ if (copy_from_user(handles,
+ (void __user *)(uintptr_t)args->in_syncs,
+ job->in_fence_count * sizeof(u32))) {
+ ret = -EFAULT;
+ DRM_DEBUG("Failed to copy in syncobj handles\n");
+ goto fail;
+ }
+
+ for (i = 0; i < job->in_fence_count; i++) {
+ ret = drm_syncobj_find_fence(file_priv, handles[i], 0, 0,
+ &job->in_fences[i]);
+ if (ret == -EINVAL)
+ goto fail;
+ }
+
+fail:
+ kvfree(handles);
+ return ret;
+}
+
+static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct panfrost_device *pfdev = dev->dev_private;
+ struct drm_panfrost_submit *args = data;
+ struct drm_syncobj *sync_out = NULL;
+ struct panfrost_job *job;
+ int ret = 0;
+
+ if (!args->jc)
+ return -EINVAL;
+
+ if (args->requirements && args->requirements != PANFROST_JD_REQ_FS)
+ return -EINVAL;
+
+ if (args->out_sync > 0) {
+ sync_out = drm_syncobj_find(file, args->out_sync);
+ if (!sync_out)
+ return -ENODEV;
+ }
+
+ job = kzalloc(sizeof(*job), GFP_KERNEL);
+ if (!job) {
+ ret = -ENOMEM;
+ goto fail_out_sync;
+ }
+
+ kref_init(&job->refcount);
+
+ job->pfdev = pfdev;
+ job->jc = args->jc;
+ job->requirements = args->requirements;
+ job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
+ job->file_priv = file->driver_priv;
+
+ ret = panfrost_copy_in_sync(dev, file, args, job);
+ if (ret)
+ goto fail_job;
+
+ ret = panfrost_lookup_bos(dev, file, args, job);
+ if (ret)
+ goto fail_job;
+
+ ret = panfrost_job_push(job);
+ if (ret)
+ goto fail_job;
+
+ /* Update the return sync object for the job */
+ if (sync_out)
+ drm_syncobj_replace_fence(sync_out, job->render_done_fence);
+
+fail_job:
+ panfrost_job_put(job);
+fail_out_sync:
+ drm_syncobj_put(sync_out);
+
+ return ret;
+}
+
+static int
+panfrost_ioctl_wait_bo(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ long ret;
+ struct drm_panfrost_wait_bo *args = data;
+ struct drm_gem_object *gem_obj;
+ unsigned long timeout = drm_timeout_abs_to_jiffies(args->timeout_ns);
+
+ if (args->pad)
+ return -EINVAL;
+
+ gem_obj = drm_gem_object_lookup(file_priv, args->handle);
+ if (!gem_obj)
+ return -ENOENT;
+
+ ret = reservation_object_wait_timeout_rcu(gem_obj->resv, true,
+ true, timeout);
+ if (!ret)
+ ret = timeout ? -ETIMEDOUT : -EBUSY;
+
+ drm_gem_object_put_unlocked(gem_obj);
+
+ return ret;
+}
+
+static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_panfrost_mmap_bo *args = data;
+ struct drm_gem_object *gem_obj;
+ int ret;
+
+ if (args->flags != 0) {
+ DRM_INFO("unknown mmap_bo flags: %d\n", args->flags);
+ return -EINVAL;
+ }
+
+ gem_obj = drm_gem_object_lookup(file_priv, args->handle);
+ if (!gem_obj) {
+ DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
+ return -ENOENT;
+ }
+
+ ret = drm_gem_create_mmap_offset(gem_obj);
+ if (ret == 0)
+ args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
+ drm_gem_object_put_unlocked(gem_obj);
+
+ return ret;
+}
+
+static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_panfrost_get_bo_offset *args = data;
+ struct drm_gem_object *gem_obj;
+ struct panfrost_gem_object *bo;
+
+ gem_obj = drm_gem_object_lookup(file_priv, args->handle);
+ if (!gem_obj) {
+ DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
+ return -ENOENT;
+ }
+ bo = to_panfrost_bo(gem_obj);
+
+ args->offset = bo->node.start << PAGE_SHIFT;
+
+ drm_gem_object_put_unlocked(gem_obj);
+ return 0;
+}
+
+static int
+panfrost_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct panfrost_device *pfdev = dev->dev_private;
+ struct panfrost_file_priv *panfrost_priv;
+
+ panfrost_priv = kzalloc(sizeof(*panfrost_priv), GFP_KERNEL);
+ if (!panfrost_priv)
+ return -ENOMEM;
+
+ panfrost_priv->pfdev = pfdev;
+ file->driver_priv = panfrost_priv;
+
+ return panfrost_job_open(panfrost_priv);
+}
+
+static void
+panfrost_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct panfrost_file_priv *panfrost_priv = file->driver_priv;
+
+ panfrost_job_close(panfrost_priv);
+
+ kfree(panfrost_priv);
+}
+
+/* DRM_AUTH is required on SUBMIT for now, while all clients share a single
+ * address space. Note that render nodes would be able to submit jobs that
+ * could access BOs from clients authenticated with the master node.
+ */
+static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
+#define PANFROST_IOCTL(n, func, flags) \
+ DRM_IOCTL_DEF_DRV(PANFROST_##n, panfrost_ioctl_##func, flags)
+
+ PANFROST_IOCTL(SUBMIT, submit, DRM_RENDER_ALLOW | DRM_AUTH),
+ PANFROST_IOCTL(WAIT_BO, wait_bo, DRM_RENDER_ALLOW),
+ PANFROST_IOCTL(CREATE_BO, create_bo, DRM_RENDER_ALLOW),
+ PANFROST_IOCTL(MMAP_BO, mmap_bo, DRM_RENDER_ALLOW),
+ PANFROST_IOCTL(GET_PARAM, get_param, DRM_RENDER_ALLOW),
+ PANFROST_IOCTL(GET_BO_OFFSET, get_bo_offset, DRM_RENDER_ALLOW),
+};
+
+DEFINE_DRM_GEM_SHMEM_FOPS(panfrost_drm_driver_fops);
+
+static struct drm_driver panfrost_drm_driver = {
+ .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_PRIME |
+ DRIVER_SYNCOBJ,
+ .open = panfrost_open,
+ .postclose = panfrost_postclose,
+ .ioctls = panfrost_drm_driver_ioctls,
+ .num_ioctls = ARRAY_SIZE(panfrost_drm_driver_ioctls),
+ .fops = &panfrost_drm_driver_fops,
+ .name = "panfrost",
+ .desc = "panfrost DRM",
+ .date = "20180908",
+ .major = 1,
+ .minor = 0,
+
+ .gem_create_object = panfrost_gem_create_object,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import_sg_table = panfrost_gem_prime_import_sg_table,
+ .gem_prime_mmap = drm_gem_prime_mmap,
+};
+
+static int panfrost_probe(struct platform_device *pdev)
+{
+ struct panfrost_device *pfdev;
+ struct drm_device *ddev;
+ int err;
+
+ pfdev = devm_kzalloc(&pdev->dev, sizeof(*pfdev), GFP_KERNEL);
+ if (!pfdev)
+ return -ENOMEM;
+
+ pfdev->pdev = pdev;
+ pfdev->dev = &pdev->dev;
+
+ platform_set_drvdata(pdev, pfdev);
+
+ /* Allocate and initialze the DRM device. */
+ ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev);
+ if (IS_ERR(ddev))
+ return PTR_ERR(ddev);
+
+ ddev->dev_private = pfdev;
+ pfdev->ddev = ddev;
+
+ spin_lock_init(&pfdev->mm_lock);
+
+ /* 4G enough for now. can be 48-bit */
+ drm_mm_init(&pfdev->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
+
+ pm_runtime_use_autosuspend(pfdev->dev);
+ pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */
+ pm_runtime_enable(pfdev->dev);
+
+ err = panfrost_device_init(pfdev);
+ if (err) {
+ if (err != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Fatal error during GPU init\n");
+ goto err_out0;
+ }
+
+ err = panfrost_devfreq_init(pfdev);
+ if (err) {
+ if (err != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Fatal error during devfreq init\n");
+ goto err_out1;
+ }
+
+ /*
+ * Register the DRM device with the core and the connectors with
+ * sysfs
+ */
+ err = drm_dev_register(ddev, 0);
+ if (err < 0)
+ goto err_out1;
+
+ return 0;
+
+err_out1:
+ panfrost_device_fini(pfdev);
+err_out0:
+ pm_runtime_disable(pfdev->dev);
+ drm_dev_put(ddev);
+ return err;
+}
+
+static int panfrost_remove(struct platform_device *pdev)
+{
+ struct panfrost_device *pfdev = platform_get_drvdata(pdev);
+ struct drm_device *ddev = pfdev->ddev;
+
+ drm_dev_unregister(ddev);
+ pm_runtime_get_sync(pfdev->dev);
+ pm_runtime_put_sync_autosuspend(pfdev->dev);
+ pm_runtime_disable(pfdev->dev);
+ panfrost_device_fini(pfdev);
+ drm_dev_put(ddev);
+ return 0;
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "arm,mali-t604" },
+ { .compatible = "arm,mali-t624" },
+ { .compatible = "arm,mali-t628" },
+ { .compatible = "arm,mali-t720" },
+ { .compatible = "arm,mali-t760" },
+ { .compatible = "arm,mali-t820" },
+ { .compatible = "arm,mali-t830" },
+ { .compatible = "arm,mali-t860" },
+ { .compatible = "arm,mali-t880" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
+static const struct dev_pm_ops panfrost_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(panfrost_device_suspend, panfrost_device_resume, NULL)
+};
+
+static struct platform_driver panfrost_driver = {
+ .probe = panfrost_probe,
+ .remove = panfrost_remove,
+ .driver = {
+ .name = "panfrost",
+ .pm = &panfrost_pm_ops,
+ .of_match_table = dt_match,
+ },
+};
+module_platform_driver(panfrost_driver);
+
+MODULE_AUTHOR("Panfrost Project Developers");
+MODULE_DESCRIPTION("Panfrost DRM Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panfrost/panfrost_features.h b/drivers/gpu/drm/panfrost/panfrost_features.h
new file mode 100644
index 000000000000..5056777c7744
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_features.h
@@ -0,0 +1,309 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved. */
+/* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
+#ifndef __PANFROST_FEATURES_H__
+#define __PANFROST_FEATURES_H__
+
+#include <linux/bitops.h>
+
+#include "panfrost_device.h"
+
+enum panfrost_hw_feature {
+ HW_FEATURE_JOBCHAIN_DISAMBIGUATION,
+ HW_FEATURE_PWRON_DURING_PWROFF_TRANS,
+ HW_FEATURE_XAFFINITY,
+ HW_FEATURE_OUT_OF_ORDER_EXEC,
+ HW_FEATURE_MRT,
+ HW_FEATURE_BRNDOUT_CC,
+ HW_FEATURE_INTERPIPE_REG_ALIASING,
+ HW_FEATURE_LD_ST_TILEBUFFER,
+ HW_FEATURE_MSAA_16X,
+ HW_FEATURE_32_BIT_UNIFORM_ADDRESS,
+ HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL,
+ HW_FEATURE_OPTIMIZED_COVERAGE_MASK,
+ HW_FEATURE_T7XX_PAIRING_RULES,
+ HW_FEATURE_LD_ST_LEA_TEX,
+ HW_FEATURE_LINEAR_FILTER_FLOAT,
+ HW_FEATURE_WORKGROUP_ROUND_MULTIPLE_OF_4,
+ HW_FEATURE_IMAGES_IN_FRAGMENT_SHADERS,
+ HW_FEATURE_TEST4_DATUM_MODE,
+ HW_FEATURE_NEXT_INSTRUCTION_TYPE,
+ HW_FEATURE_BRNDOUT_KILL,
+ HW_FEATURE_WARPING,
+ HW_FEATURE_V4,
+ HW_FEATURE_FLUSH_REDUCTION,
+ HW_FEATURE_PROTECTED_MODE,
+ HW_FEATURE_COHERENCY_REG,
+ HW_FEATURE_PROTECTED_DEBUG_MODE,
+ HW_FEATURE_AARCH64_MMU,
+ HW_FEATURE_TLS_HASHING,
+ HW_FEATURE_THREAD_GROUP_SPLIT,
+ HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG,
+};
+
+#define hw_features_t600 (\
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
+ BIT_ULL(HW_FEATURE_V4))
+
+#define hw_features_t620 (\
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
+ BIT_ULL(HW_FEATURE_V4))
+
+#define hw_features_t720 (\
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
+ BIT_ULL(HW_FEATURE_OPTIMIZED_COVERAGE_MASK) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
+ BIT_ULL(HW_FEATURE_WORKGROUP_ROUND_MULTIPLE_OF_4) | \
+ BIT_ULL(HW_FEATURE_WARPING) | \
+ BIT_ULL(HW_FEATURE_V4))
+
+
+#define hw_features_t760 (\
+ BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
+ BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
+ BIT_ULL(HW_FEATURE_XAFFINITY) | \
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_MRT) | \
+ BIT_ULL(HW_FEATURE_MSAA_16X) | \
+ BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT))
+
+// T860
+#define hw_features_t860 (\
+ BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
+ BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
+ BIT_ULL(HW_FEATURE_XAFFINITY) | \
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_MRT) | \
+ BIT_ULL(HW_FEATURE_MSAA_16X) | \
+ BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
+ BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT))
+
+#define hw_features_t880 hw_features_t860
+
+#define hw_features_t830 (\
+ BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
+ BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
+ BIT_ULL(HW_FEATURE_XAFFINITY) | \
+ BIT_ULL(HW_FEATURE_WARPING) | \
+ BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_MRT) | \
+ BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
+ BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT))
+
+#define hw_features_t820 (\
+ BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
+ BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
+ BIT_ULL(HW_FEATURE_XAFFINITY) | \
+ BIT_ULL(HW_FEATURE_WARPING) | \
+ BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_MRT) | \
+ BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
+ BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT))
+
+#define hw_features_g71 (\
+ BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
+ BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
+ BIT_ULL(HW_FEATURE_XAFFINITY) | \
+ BIT_ULL(HW_FEATURE_WARPING) | \
+ BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_MRT) | \
+ BIT_ULL(HW_FEATURE_MSAA_16X) | \
+ BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
+ BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
+ BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
+ BIT_ULL(HW_FEATURE_COHERENCY_REG))
+
+#define hw_features_g72 (\
+ BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
+ BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
+ BIT_ULL(HW_FEATURE_XAFFINITY) | \
+ BIT_ULL(HW_FEATURE_WARPING) | \
+ BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_MRT) | \
+ BIT_ULL(HW_FEATURE_MSAA_16X) | \
+ BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
+ BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
+ BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_DEBUG_MODE) | \
+ BIT_ULL(HW_FEATURE_COHERENCY_REG))
+
+#define hw_features_g51 (\
+ BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
+ BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
+ BIT_ULL(HW_FEATURE_XAFFINITY) | \
+ BIT_ULL(HW_FEATURE_WARPING) | \
+ BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_MRT) | \
+ BIT_ULL(HW_FEATURE_MSAA_16X) | \
+ BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
+ BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
+ BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_DEBUG_MODE) | \
+ BIT_ULL(HW_FEATURE_COHERENCY_REG))
+
+#define hw_features_g52 (\
+ BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
+ BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
+ BIT_ULL(HW_FEATURE_XAFFINITY) | \
+ BIT_ULL(HW_FEATURE_WARPING) | \
+ BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_MRT) | \
+ BIT_ULL(HW_FEATURE_MSAA_16X) | \
+ BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
+ BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
+ BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_DEBUG_MODE) | \
+ BIT_ULL(HW_FEATURE_COHERENCY_REG))
+
+#define hw_features_g76 (\
+ BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
+ BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
+ BIT_ULL(HW_FEATURE_XAFFINITY) | \
+ BIT_ULL(HW_FEATURE_WARPING) | \
+ BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_MRT) | \
+ BIT_ULL(HW_FEATURE_MSAA_16X) | \
+ BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
+ BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
+ BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_DEBUG_MODE) | \
+ BIT_ULL(HW_FEATURE_COHERENCY_REG) | \
+ BIT_ULL(HW_FEATURE_AARCH64_MMU) | \
+ BIT_ULL(HW_FEATURE_TLS_HASHING) | \
+ BIT_ULL(HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG))
+
+#define hw_features_g31 (\
+ BIT_ULL(HW_FEATURE_JOBCHAIN_DISAMBIGUATION) | \
+ BIT_ULL(HW_FEATURE_PWRON_DURING_PWROFF_TRANS) | \
+ BIT_ULL(HW_FEATURE_XAFFINITY) | \
+ BIT_ULL(HW_FEATURE_WARPING) | \
+ BIT_ULL(HW_FEATURE_INTERPIPE_REG_ALIASING) | \
+ BIT_ULL(HW_FEATURE_32_BIT_UNIFORM_ADDRESS) | \
+ BIT_ULL(HW_FEATURE_ATTR_AUTO_TYPE_INFERRAL) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_CC) | \
+ BIT_ULL(HW_FEATURE_BRNDOUT_KILL) | \
+ BIT_ULL(HW_FEATURE_LD_ST_LEA_TEX) | \
+ BIT_ULL(HW_FEATURE_LD_ST_TILEBUFFER) | \
+ BIT_ULL(HW_FEATURE_LINEAR_FILTER_FLOAT) | \
+ BIT_ULL(HW_FEATURE_MRT) | \
+ BIT_ULL(HW_FEATURE_MSAA_16X) | \
+ BIT_ULL(HW_FEATURE_NEXT_INSTRUCTION_TYPE) | \
+ BIT_ULL(HW_FEATURE_OUT_OF_ORDER_EXEC) | \
+ BIT_ULL(HW_FEATURE_T7XX_PAIRING_RULES) | \
+ BIT_ULL(HW_FEATURE_TEST4_DATUM_MODE) | \
+ BIT_ULL(HW_FEATURE_THREAD_GROUP_SPLIT) | \
+ BIT_ULL(HW_FEATURE_FLUSH_REDUCTION) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_MODE) | \
+ BIT_ULL(HW_FEATURE_PROTECTED_DEBUG_MODE) | \
+ BIT_ULL(HW_FEATURE_COHERENCY_REG) | \
+ BIT_ULL(HW_FEATURE_AARCH64_MMU) | \
+ BIT_ULL(HW_FEATURE_TLS_HASHING) | \
+ BIT_ULL(HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG))
+
+static inline bool panfrost_has_hw_feature(struct panfrost_device *pfdev,
+ enum panfrost_hw_feature feat)
+{
+ return test_bit(feat, pfdev->features.hw_features);
+}
+
+#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
new file mode 100644
index 000000000000..a5528a360ef4
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+
+#include <drm/panfrost_drm.h>
+#include "panfrost_device.h"
+#include "panfrost_gem.h"
+#include "panfrost_mmu.h"
+
+/* Called DRM core on the last userspace/kernel unreference of the
+ * BO.
+ */
+static void panfrost_gem_free_object(struct drm_gem_object *obj)
+{
+ struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+ struct panfrost_device *pfdev = obj->dev->dev_private;
+
+ panfrost_mmu_unmap(bo);
+
+ spin_lock(&pfdev->mm_lock);
+ drm_mm_remove_node(&bo->node);
+ spin_unlock(&pfdev->mm_lock);
+
+ drm_gem_shmem_free_object(obj);
+}
+
+static const struct drm_gem_object_funcs panfrost_gem_funcs = {
+ .free = panfrost_gem_free_object,
+ .print_info = drm_gem_shmem_print_info,
+ .pin = drm_gem_shmem_pin,
+ .unpin = drm_gem_shmem_unpin,
+ .get_sg_table = drm_gem_shmem_get_sg_table,
+ .vmap = drm_gem_shmem_vmap,
+ .vunmap = drm_gem_shmem_vunmap,
+ .vm_ops = &drm_gem_shmem_vm_ops,
+};
+
+/**
+ * panfrost_gem_create_object - Implementation of driver->gem_create_object.
+ * @dev: DRM device
+ * @size: Size in bytes of the memory the object will reference
+ *
+ * This lets the GEM helpers allocate object structs for us, and keep
+ * our BO stats correct.
+ */
+struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size)
+{
+ int ret;
+ struct panfrost_device *pfdev = dev->dev_private;
+ struct panfrost_gem_object *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return NULL;
+
+ obj->base.base.funcs = &panfrost_gem_funcs;
+
+ spin_lock(&pfdev->mm_lock);
+ ret = drm_mm_insert_node(&pfdev->mm, &obj->node,
+ roundup(size, PAGE_SIZE) >> PAGE_SHIFT);
+ spin_unlock(&pfdev->mm_lock);
+ if (ret)
+ goto free_obj;
+
+ return &obj->base.base;
+
+free_obj:
+ kfree(obj);
+ return ERR_PTR(ret);
+}
+
+struct drm_gem_object *
+panfrost_gem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt)
+{
+ struct drm_gem_object *obj;
+ struct panfrost_gem_object *pobj;
+
+ obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ pobj = to_panfrost_bo(obj);
+
+ obj->resv = attach->dmabuf->resv;
+
+ panfrost_mmu_map(pobj);
+
+ return obj;
+}
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
new file mode 100644
index 000000000000..045000eb5fcf
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+
+#ifndef __PANFROST_GEM_H__
+#define __PANFROST_GEM_H__
+
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_mm.h>
+
+struct panfrost_gem_object {
+ struct drm_gem_shmem_object base;
+
+ struct drm_mm_node node;
+};
+
+static inline
+struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj)
+{
+ return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base);
+}
+
+struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size);
+
+struct drm_gem_object *
+panfrost_gem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+
+#endif /* __PANFROST_GEM_H__ */
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
new file mode 100644
index 000000000000..58ef25573cda
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -0,0 +1,367 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
+/* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
+/* Copyright 2019 Collabora ltd. */
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/platform_device.h>
+
+#include "panfrost_device.h"
+#include "panfrost_features.h"
+#include "panfrost_issues.h"
+#include "panfrost_gpu.h"
+#include "panfrost_regs.h"
+
+#define gpu_write(dev, reg, data) writel(data, dev->iomem + reg)
+#define gpu_read(dev, reg) readl(dev->iomem + reg)
+
+static irqreturn_t panfrost_gpu_irq_handler(int irq, void *data)
+{
+ struct panfrost_device *pfdev = data;
+ u32 state = gpu_read(pfdev, GPU_INT_STAT);
+ u32 fault_status = gpu_read(pfdev, GPU_FAULT_STATUS);
+
+ if (!state)
+ return IRQ_NONE;
+
+ if (state & GPU_IRQ_MASK_ERROR) {
+ u64 address = (u64) gpu_read(pfdev, GPU_FAULT_ADDRESS_HI) << 32;
+ address |= gpu_read(pfdev, GPU_FAULT_ADDRESS_LO);
+
+ dev_warn(pfdev->dev, "GPU Fault 0x%08x (%s) at 0x%016llx\n",
+ fault_status & 0xFF, panfrost_exception_name(pfdev, fault_status),
+ address);
+
+ if (state & GPU_IRQ_MULTIPLE_FAULT)
+ dev_warn(pfdev->dev, "There were multiple GPU faults - some have not been reported\n");
+
+ gpu_write(pfdev, GPU_INT_MASK, 0);
+ }
+
+ gpu_write(pfdev, GPU_INT_CLEAR, state);
+
+ return IRQ_HANDLED;
+}
+
+int panfrost_gpu_soft_reset(struct panfrost_device *pfdev)
+{
+ int ret;
+ u32 val;
+
+ gpu_write(pfdev, GPU_INT_MASK, 0);
+ gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_RESET_COMPLETED);
+ gpu_write(pfdev, GPU_CMD, GPU_CMD_SOFT_RESET);
+
+ ret = readl_relaxed_poll_timeout(pfdev->iomem + GPU_INT_RAWSTAT,
+ val, val & GPU_IRQ_RESET_COMPLETED, 100, 10000);
+
+ if (ret) {
+ dev_err(pfdev->dev, "gpu soft reset timed out\n");
+ return ret;
+ }
+
+ gpu_write(pfdev, GPU_INT_CLEAR, GPU_IRQ_MASK_ALL);
+ gpu_write(pfdev, GPU_INT_MASK, GPU_IRQ_MASK_ALL);
+
+ return 0;
+}
+
+static void panfrost_gpu_init_quirks(struct panfrost_device *pfdev)
+{
+ u32 quirks = 0;
+
+ if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8443) ||
+ panfrost_has_hw_issue(pfdev, HW_ISSUE_11035))
+ quirks |= SC_LS_PAUSEBUFFER_DISABLE;
+
+ if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10327))
+ quirks |= SC_SDC_DISABLE_OQ_DISCARD;
+
+ if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10797))
+ quirks |= SC_ENABLE_TEXGRD_FLAGS;
+
+ if (!panfrost_has_hw_issue(pfdev, GPUCORE_1619)) {
+ if (panfrost_model_cmp(pfdev, 0x750) < 0) /* T60x, T62x, T72x */
+ quirks |= SC_LS_ATTR_CHECK_DISABLE;
+ else if (panfrost_model_cmp(pfdev, 0x880) <= 0) /* T76x, T8xx */
+ quirks |= SC_LS_ALLOW_ATTR_TYPES;
+ }
+
+ if (panfrost_has_hw_feature(pfdev, HW_FEATURE_TLS_HASHING))
+ quirks |= SC_TLS_HASH_ENABLE;
+
+ if (quirks)
+ gpu_write(pfdev, GPU_SHADER_CONFIG, quirks);
+
+
+ quirks = gpu_read(pfdev, GPU_TILER_CONFIG);
+
+ /* Set tiler clock gate override if required */
+ if (panfrost_has_hw_issue(pfdev, HW_ISSUE_T76X_3953))
+ quirks |= TC_CLOCK_GATE_OVERRIDE;
+
+ gpu_write(pfdev, GPU_TILER_CONFIG, quirks);
+
+
+ quirks = gpu_read(pfdev, GPU_L2_MMU_CONFIG);
+
+ /* Limit read & write ID width for AXI */
+ if (panfrost_has_hw_feature(pfdev, HW_FEATURE_3BIT_EXT_RW_L2_MMU_CONFIG))
+ quirks &= ~(L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_READS |
+ L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_WRITES);
+ else
+ quirks &= ~(L2_MMU_CONFIG_LIMIT_EXTERNAL_READS |
+ L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES);
+
+ gpu_write(pfdev, GPU_L2_MMU_CONFIG, quirks);
+
+ quirks = 0;
+ if ((panfrost_model_eq(pfdev, 0x860) || panfrost_model_eq(pfdev, 0x880)) &&
+ pfdev->features.revision >= 0x2000)
+ quirks |= JM_MAX_JOB_THROTTLE_LIMIT << JM_JOB_THROTTLE_LIMIT_SHIFT;
+ else if (panfrost_model_eq(pfdev, 0x6000) &&
+ pfdev->features.coherency_features == COHERENCY_ACE)
+ quirks |= (COHERENCY_ACE_LITE | COHERENCY_ACE) <<
+ JM_FORCE_COHERENCY_FEATURES_SHIFT;
+
+ if (quirks)
+ gpu_write(pfdev, GPU_JM_CONFIG, quirks);
+}
+
+#define MAX_HW_REVS 6
+
+struct panfrost_model {
+ const char *name;
+ u32 id;
+ u32 id_mask;
+ u64 features;
+ u64 issues;
+ struct {
+ u32 revision;
+ u64 issues;
+ } revs[MAX_HW_REVS];
+};
+
+#define GPU_MODEL(_name, _id, ...) \
+{\
+ .name = __stringify(_name), \
+ .id = _id, \
+ .features = hw_features_##_name, \
+ .issues = hw_issues_##_name, \
+ .revs = { __VA_ARGS__ }, \
+}
+
+#define GPU_REV_EXT(name, _rev, _p, _s, stat) \
+{\
+ .revision = (_rev) << 12 | (_p) << 4 | (_s), \
+ .issues = hw_issues_##name##_r##_rev##p##_p##stat, \
+}
+#define GPU_REV(name, r, p) GPU_REV_EXT(name, r, p, 0, )
+
+static const struct panfrost_model gpu_models[] = {
+ /* T60x has an oddball version */
+ GPU_MODEL(t600, 0x600,
+ GPU_REV_EXT(t600, 0, 0, 1, _15dev0)),
+ GPU_MODEL(t620, 0x620,
+ GPU_REV(t620, 0, 1), GPU_REV(t620, 1, 0)),
+ GPU_MODEL(t720, 0x720),
+ GPU_MODEL(t760, 0x750,
+ GPU_REV(t760, 0, 0), GPU_REV(t760, 0, 1),
+ GPU_REV_EXT(t760, 0, 1, 0, _50rel0),
+ GPU_REV(t760, 0, 2), GPU_REV(t760, 0, 3)),
+ GPU_MODEL(t820, 0x820),
+ GPU_MODEL(t830, 0x830),
+ GPU_MODEL(t860, 0x860),
+ GPU_MODEL(t880, 0x880),
+
+ GPU_MODEL(g71, 0x6000,
+ GPU_REV_EXT(g71, 0, 0, 1, _05dev0)),
+ GPU_MODEL(g72, 0x6001),
+ GPU_MODEL(g51, 0x7000),
+ GPU_MODEL(g76, 0x7001),
+ GPU_MODEL(g52, 0x7002),
+ GPU_MODEL(g31, 0x7003,
+ GPU_REV(g31, 1, 0)),
+};
+
+static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
+{
+ u32 gpu_id, num_js, major, minor, status, rev;
+ const char *name = "unknown";
+ u64 hw_feat = 0;
+ u64 hw_issues = hw_issues_all;
+ const struct panfrost_model *model;
+ int i;
+
+ pfdev->features.l2_features = gpu_read(pfdev, GPU_L2_FEATURES);
+ pfdev->features.core_features = gpu_read(pfdev, GPU_CORE_FEATURES);
+ pfdev->features.tiler_features = gpu_read(pfdev, GPU_TILER_FEATURES);
+ pfdev->features.mem_features = gpu_read(pfdev, GPU_MEM_FEATURES);
+ pfdev->features.mmu_features = gpu_read(pfdev, GPU_MMU_FEATURES);
+ pfdev->features.thread_features = gpu_read(pfdev, GPU_THREAD_FEATURES);
+ pfdev->features.coherency_features = gpu_read(pfdev, GPU_COHERENCY_FEATURES);
+ for (i = 0; i < 4; i++)
+ pfdev->features.texture_features[i] = gpu_read(pfdev, GPU_TEXTURE_FEATURES(i));
+
+ pfdev->features.as_present = gpu_read(pfdev, GPU_AS_PRESENT);
+
+ pfdev->features.js_present = gpu_read(pfdev, GPU_JS_PRESENT);
+ num_js = hweight32(pfdev->features.js_present);
+ for (i = 0; i < num_js; i++)
+ pfdev->features.js_features[i] = gpu_read(pfdev, GPU_JS_FEATURES(i));
+
+ pfdev->features.shader_present = gpu_read(pfdev, GPU_SHADER_PRESENT_LO);
+ pfdev->features.shader_present |= (u64)gpu_read(pfdev, GPU_SHADER_PRESENT_HI) << 32;
+
+ pfdev->features.tiler_present = gpu_read(pfdev, GPU_TILER_PRESENT_LO);
+ pfdev->features.tiler_present |= (u64)gpu_read(pfdev, GPU_TILER_PRESENT_HI) << 32;
+
+ pfdev->features.l2_present = gpu_read(pfdev, GPU_L2_PRESENT_LO);
+ pfdev->features.l2_present |= (u64)gpu_read(pfdev, GPU_L2_PRESENT_HI) << 32;
+ pfdev->features.nr_core_groups = hweight64(pfdev->features.l2_present);
+
+ pfdev->features.stack_present = gpu_read(pfdev, GPU_STACK_PRESENT_LO);
+ pfdev->features.stack_present |= (u64)gpu_read(pfdev, GPU_STACK_PRESENT_HI) << 32;
+
+ gpu_id = gpu_read(pfdev, GPU_ID);
+ pfdev->features.revision = gpu_id & 0xffff;
+ pfdev->features.id = gpu_id >> 16;
+
+ /* The T60x has an oddball ID value. Fix it up to the standard Midgard
+ * format so we (and userspace) don't have to special case it.
+ */
+ if (pfdev->features.id == 0x6956)
+ pfdev->features.id = 0x0600;
+
+ major = (pfdev->features.revision >> 12) & 0xf;
+ minor = (pfdev->features.revision >> 4) & 0xff;
+ status = pfdev->features.revision & 0xf;
+ rev = pfdev->features.revision;
+
+ gpu_id = pfdev->features.id;
+
+ for (model = gpu_models; model->name; model++) {
+ int best = -1;
+
+ if (!panfrost_model_eq(pfdev, model->id))
+ continue;
+
+ name = model->name;
+ hw_feat = model->features;
+ hw_issues |= model->issues;
+ for (i = 0; i < MAX_HW_REVS; i++) {
+ if (model->revs[i].revision == rev) {
+ best = i;
+ break;
+ } else if (model->revs[i].revision == (rev & ~0xf))
+ best = i;
+ }
+
+ if (best >= 0)
+ hw_issues |= model->revs[best].issues;
+
+ break;
+ }
+
+ bitmap_from_u64(pfdev->features.hw_features, hw_feat);
+ bitmap_from_u64(pfdev->features.hw_issues, hw_issues);
+
+ dev_info(pfdev->dev, "mali-%s id 0x%x major 0x%x minor 0x%x status 0x%x",
+ name, gpu_id, major, minor, status);
+ dev_info(pfdev->dev, "features: %64pb, issues: %64pb",
+ pfdev->features.hw_features,
+ pfdev->features.hw_issues);
+
+ dev_info(pfdev->dev, "Features: L2:0x%08x Shader:0x%08x Tiler:0x%08x Mem:0x%0x MMU:0x%08x AS:0x%x JS:0x%x",
+ pfdev->features.l2_features,
+ pfdev->features.core_features,
+ pfdev->features.tiler_features,
+ pfdev->features.mem_features,
+ pfdev->features.mmu_features,
+ pfdev->features.as_present,
+ pfdev->features.js_present);
+
+ dev_info(pfdev->dev, "shader_present=0x%0llx l2_present=0x%0llx",
+ pfdev->features.shader_present, pfdev->features.l2_present);
+}
+
+void panfrost_gpu_power_on(struct panfrost_device *pfdev)
+{
+ int ret;
+ u32 val;
+
+ /* Just turn on everything for now */
+ gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present);
+ ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO,
+ val, val == pfdev->features.l2_present, 100, 1000);
+
+ gpu_write(pfdev, STACK_PWRON_LO, pfdev->features.stack_present);
+ ret |= readl_relaxed_poll_timeout(pfdev->iomem + STACK_READY_LO,
+ val, val == pfdev->features.stack_present, 100, 1000);
+
+ gpu_write(pfdev, SHADER_PWRON_LO, pfdev->features.shader_present);
+ ret |= readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO,
+ val, val == pfdev->features.shader_present, 100, 1000);
+
+ gpu_write(pfdev, TILER_PWRON_LO, pfdev->features.tiler_present);
+ ret |= readl_relaxed_poll_timeout(pfdev->iomem + TILER_READY_LO,
+ val, val == pfdev->features.tiler_present, 100, 1000);
+
+ if (ret)
+ dev_err(pfdev->dev, "error powering up gpu");
+}
+
+void panfrost_gpu_power_off(struct panfrost_device *pfdev)
+{
+ gpu_write(pfdev, TILER_PWROFF_LO, 0);
+ gpu_write(pfdev, SHADER_PWROFF_LO, 0);
+ gpu_write(pfdev, STACK_PWROFF_LO, 0);
+ gpu_write(pfdev, L2_PWROFF_LO, 0);
+}
+
+int panfrost_gpu_init(struct panfrost_device *pfdev)
+{
+ int err, irq;
+
+ err = panfrost_gpu_soft_reset(pfdev);
+ if (err)
+ return err;
+
+ panfrost_gpu_init_features(pfdev);
+
+ dma_set_mask_and_coherent(pfdev->dev,
+ DMA_BIT_MASK(FIELD_GET(0xff00, pfdev->features.mmu_features)));
+
+ irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "gpu");
+ if (irq <= 0)
+ return -ENODEV;
+
+ err = devm_request_irq(pfdev->dev, irq, panfrost_gpu_irq_handler,
+ IRQF_SHARED, "gpu", pfdev);
+ if (err) {
+ dev_err(pfdev->dev, "failed to request gpu irq");
+ return err;
+ }
+
+ panfrost_gpu_init_quirks(pfdev);
+ panfrost_gpu_power_on(pfdev);
+
+ return 0;
+}
+
+void panfrost_gpu_fini(struct panfrost_device *pfdev)
+{
+ panfrost_gpu_power_off(pfdev);
+}
+
+u32 panfrost_gpu_get_latest_flush_id(struct panfrost_device *pfdev)
+{
+ if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
+ return gpu_read(pfdev, GPU_LATEST_FLUSH_ID);
+ return 0;
+}
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.h b/drivers/gpu/drm/panfrost/panfrost_gpu.h
new file mode 100644
index 000000000000..4112412087b2
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
+/* Copyright 2019 Collabora ltd. */
+
+#ifndef __PANFROST_GPU_H__
+#define __PANFROST_GPU_H__
+
+struct panfrost_device;
+
+int panfrost_gpu_init(struct panfrost_device *pfdev);
+void panfrost_gpu_fini(struct panfrost_device *pfdev);
+
+u32 panfrost_gpu_get_latest_flush_id(struct panfrost_device *pfdev);
+
+int panfrost_gpu_soft_reset(struct panfrost_device *pfdev);
+void panfrost_gpu_power_on(struct panfrost_device *pfdev);
+void panfrost_gpu_power_off(struct panfrost_device *pfdev);
+
+#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_issues.h b/drivers/gpu/drm/panfrost/panfrost_issues.h
new file mode 100644
index 000000000000..cec6dcdadb5c
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_issues.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved. */
+/* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
+#ifndef __PANFROST_ISSUES_H__
+#define __PANFROST_ISSUES_H__
+
+#include <linux/bitops.h>
+
+#include "panfrost_device.h"
+
+/*
+ * This is not a complete list of issues, but only the ones the driver needs
+ * to care about.
+ */
+enum panfrost_hw_issue {
+ HW_ISSUE_6367,
+ HW_ISSUE_6787,
+ HW_ISSUE_8186,
+ HW_ISSUE_8245,
+ HW_ISSUE_8316,
+ HW_ISSUE_8394,
+ HW_ISSUE_8401,
+ HW_ISSUE_8408,
+ HW_ISSUE_8443,
+ HW_ISSUE_8987,
+ HW_ISSUE_9435,
+ HW_ISSUE_9510,
+ HW_ISSUE_9630,
+ HW_ISSUE_10327,
+ HW_ISSUE_10649,
+ HW_ISSUE_10676,
+ HW_ISSUE_10797,
+ HW_ISSUE_10817,
+ HW_ISSUE_10883,
+ HW_ISSUE_10959,
+ HW_ISSUE_10969,
+ HW_ISSUE_11020,
+ HW_ISSUE_11024,
+ HW_ISSUE_11035,
+ HW_ISSUE_11056,
+ HW_ISSUE_T76X_3542,
+ HW_ISSUE_T76X_3953,
+ HW_ISSUE_TMIX_8463,
+ GPUCORE_1619,
+ HW_ISSUE_TMIX_8438,
+ HW_ISSUE_TGOX_R1_1234,
+ HW_ISSUE_END
+};
+
+#define hw_issues_all (\
+ BIT_ULL(HW_ISSUE_9435))
+
+#define hw_issues_t600 (\
+ BIT_ULL(HW_ISSUE_6367) | \
+ BIT_ULL(HW_ISSUE_6787) | \
+ BIT_ULL(HW_ISSUE_8408) | \
+ BIT_ULL(HW_ISSUE_9510) | \
+ BIT_ULL(HW_ISSUE_10649) | \
+ BIT_ULL(HW_ISSUE_10676) | \
+ BIT_ULL(HW_ISSUE_10883) | \
+ BIT_ULL(HW_ISSUE_11020) | \
+ BIT_ULL(HW_ISSUE_11035) | \
+ BIT_ULL(HW_ISSUE_11056) | \
+ BIT_ULL(HW_ISSUE_TMIX_8438))
+
+#define hw_issues_t600_r0p0_15dev0 (\
+ BIT_ULL(HW_ISSUE_8186) | \
+ BIT_ULL(HW_ISSUE_8245) | \
+ BIT_ULL(HW_ISSUE_8316) | \
+ BIT_ULL(HW_ISSUE_8394) | \
+ BIT_ULL(HW_ISSUE_8401) | \
+ BIT_ULL(HW_ISSUE_8443) | \
+ BIT_ULL(HW_ISSUE_8987) | \
+ BIT_ULL(HW_ISSUE_9630) | \
+ BIT_ULL(HW_ISSUE_10969) | \
+ BIT_ULL(GPUCORE_1619))
+
+#define hw_issues_t620 (\
+ BIT_ULL(HW_ISSUE_10649) | \
+ BIT_ULL(HW_ISSUE_10883) | \
+ BIT_ULL(HW_ISSUE_10959) | \
+ BIT_ULL(HW_ISSUE_11056) | \
+ BIT_ULL(HW_ISSUE_TMIX_8438))
+
+#define hw_issues_t620_r0p1 (\
+ BIT_ULL(HW_ISSUE_10327) | \
+ BIT_ULL(HW_ISSUE_10676) | \
+ BIT_ULL(HW_ISSUE_10817) | \
+ BIT_ULL(HW_ISSUE_11020) | \
+ BIT_ULL(HW_ISSUE_11024) | \
+ BIT_ULL(HW_ISSUE_11035))
+
+#define hw_issues_t620_r1p0 (\
+ BIT_ULL(HW_ISSUE_11020) | \
+ BIT_ULL(HW_ISSUE_11024))
+
+#define hw_issues_t720 (\
+ BIT_ULL(HW_ISSUE_10649) | \
+ BIT_ULL(HW_ISSUE_10797) | \
+ BIT_ULL(HW_ISSUE_10883) | \
+ BIT_ULL(HW_ISSUE_11056) | \
+ BIT_ULL(HW_ISSUE_TMIX_8438))
+
+#define hw_issues_t760 (\
+ BIT_ULL(HW_ISSUE_10883) | \
+ BIT_ULL(HW_ISSUE_T76X_3953) | \
+ BIT_ULL(HW_ISSUE_TMIX_8438))
+
+#define hw_issues_t760_r0p0 (\
+ BIT_ULL(HW_ISSUE_11020) | \
+ BIT_ULL(HW_ISSUE_11024) | \
+ BIT_ULL(HW_ISSUE_T76X_3542))
+
+#define hw_issues_t760_r0p1 (\
+ BIT_ULL(HW_ISSUE_11020) | \
+ BIT_ULL(HW_ISSUE_11024) | \
+ BIT_ULL(HW_ISSUE_T76X_3542))
+
+#define hw_issues_t760_r0p1_50rel0 (\
+ BIT_ULL(HW_ISSUE_T76X_3542))
+
+#define hw_issues_t760_r0p2 (\
+ BIT_ULL(HW_ISSUE_11020) | \
+ BIT_ULL(HW_ISSUE_11024) | \
+ BIT_ULL(HW_ISSUE_T76X_3542))
+
+#define hw_issues_t760_r0p3 (\
+ BIT_ULL(HW_ISSUE_T76X_3542))
+
+#define hw_issues_t820 (\
+ BIT_ULL(HW_ISSUE_10883) | \
+ BIT_ULL(HW_ISSUE_T76X_3953) | \
+ BIT_ULL(HW_ISSUE_TMIX_8438))
+
+#define hw_issues_t830 (\
+ BIT_ULL(HW_ISSUE_10883) | \
+ BIT_ULL(HW_ISSUE_T76X_3953) | \
+ BIT_ULL(HW_ISSUE_TMIX_8438))
+
+#define hw_issues_t860 (\
+ BIT_ULL(HW_ISSUE_10883) | \
+ BIT_ULL(HW_ISSUE_T76X_3953) | \
+ BIT_ULL(HW_ISSUE_TMIX_8438))
+
+#define hw_issues_t880 (\
+ BIT_ULL(HW_ISSUE_10883) | \
+ BIT_ULL(HW_ISSUE_T76X_3953) | \
+ BIT_ULL(HW_ISSUE_TMIX_8438))
+
+#define hw_issues_g31 0
+
+#define hw_issues_g31_r1p0 (\
+ BIT_ULL(HW_ISSUE_TGOX_R1_1234))
+
+#define hw_issues_g51 0
+
+#define hw_issues_g52 0
+
+#define hw_issues_g71 (\
+ BIT_ULL(HW_ISSUE_TMIX_8463) | \
+ BIT_ULL(HW_ISSUE_TMIX_8438))
+
+#define hw_issues_g71_r0p0_05dev0 (\
+ BIT_ULL(HW_ISSUE_T76X_3953))
+
+#define hw_issues_g72 0
+
+#define hw_issues_g76 0
+
+static inline bool panfrost_has_hw_issue(struct panfrost_device *pfdev,
+ enum panfrost_hw_issue issue)
+{
+ return test_bit(issue, pfdev->features.hw_issues);
+}
+
+#endif /* __PANFROST_ISSUES_H__ */
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
new file mode 100644
index 000000000000..a5716c8fe8b3
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -0,0 +1,564 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+/* Copyright 2019 Collabora ltd. */
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reservation.h>
+#include <drm/gpu_scheduler.h>
+#include <drm/panfrost_drm.h>
+
+#include "panfrost_device.h"
+#include "panfrost_devfreq.h"
+#include "panfrost_job.h"
+#include "panfrost_features.h"
+#include "panfrost_issues.h"
+#include "panfrost_gem.h"
+#include "panfrost_regs.h"
+#include "panfrost_gpu.h"
+#include "panfrost_mmu.h"
+
+#define job_write(dev, reg, data) writel(data, dev->iomem + (reg))
+#define job_read(dev, reg) readl(dev->iomem + (reg))
+
+struct panfrost_queue_state {
+ struct drm_gpu_scheduler sched;
+
+ u64 fence_context;
+ u64 emit_seqno;
+};
+
+struct panfrost_job_slot {
+ struct panfrost_queue_state queue[NUM_JOB_SLOTS];
+ spinlock_t job_lock;
+};
+
+static struct panfrost_job *
+to_panfrost_job(struct drm_sched_job *sched_job)
+{
+ return container_of(sched_job, struct panfrost_job, base);
+}
+
+struct panfrost_fence {
+ struct dma_fence base;
+ struct drm_device *dev;
+ /* panfrost seqno for signaled() test */
+ u64 seqno;
+ int queue;
+};
+
+static inline struct panfrost_fence *
+to_panfrost_fence(struct dma_fence *fence)
+{
+ return (struct panfrost_fence *)fence;
+}
+
+static const char *panfrost_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "panfrost";
+}
+
+static const char *panfrost_fence_get_timeline_name(struct dma_fence *fence)
+{
+ struct panfrost_fence *f = to_panfrost_fence(fence);
+
+ switch (f->queue) {
+ case 0:
+ return "panfrost-js-0";
+ case 1:
+ return "panfrost-js-1";
+ case 2:
+ return "panfrost-js-2";
+ default:
+ return NULL;
+ }
+}
+
+static const struct dma_fence_ops panfrost_fence_ops = {
+ .get_driver_name = panfrost_fence_get_driver_name,
+ .get_timeline_name = panfrost_fence_get_timeline_name,
+};
+
+static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, int js_num)
+{
+ struct panfrost_fence *fence;
+ struct panfrost_job_slot *js = pfdev->js;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return ERR_PTR(-ENOMEM);
+
+ fence->dev = pfdev->ddev;
+ fence->queue = js_num;
+ fence->seqno = ++js->queue[js_num].emit_seqno;
+ dma_fence_init(&fence->base, &panfrost_fence_ops, &js->job_lock,
+ js->queue[js_num].fence_context, fence->seqno);
+
+ return &fence->base;
+}
+
+static int panfrost_job_get_slot(struct panfrost_job *job)
+{
+ /* JS0: fragment jobs.
+ * JS1: vertex/tiler jobs
+ * JS2: compute jobs
+ */
+ if (job->requirements & PANFROST_JD_REQ_FS)
+ return 0;
+
+/* Not exposed to userspace yet */
+#if 0
+ if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) {
+ if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) &&
+ (job->pfdev->features.nr_core_groups == 2))
+ return 2;
+ if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987))
+ return 2;
+ }
+#endif
+ return 1;
+}
+
+static void panfrost_job_write_affinity(struct panfrost_device *pfdev,
+ u32 requirements,
+ int js)
+{
+ u64 affinity;
+
+ /*
+ * Use all cores for now.
+ * Eventually we may need to support tiler only jobs and h/w with
+ * multiple (2) coherent core groups
+ */
+ affinity = pfdev->features.shader_present;
+
+ job_write(pfdev, JS_AFFINITY_NEXT_LO(js), affinity & 0xFFFFFFFF);
+ job_write(pfdev, JS_AFFINITY_NEXT_HI(js), affinity >> 32);
+}
+
+static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
+{
+ struct panfrost_device *pfdev = job->pfdev;
+ unsigned long flags;
+ u32 cfg;
+ u64 jc_head = job->jc;
+ int ret;
+
+ ret = pm_runtime_get_sync(pfdev->dev);
+ if (ret < 0)
+ return;
+
+ if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js))))
+ goto end;
+
+ panfrost_devfreq_record_transition(pfdev, js);
+ spin_lock_irqsave(&pfdev->hwaccess_lock, flags);
+
+ job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF);
+ job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
+
+ panfrost_job_write_affinity(pfdev, job->requirements, js);
+
+ /* start MMU, medium priority, cache clean/flush on end, clean/flush on
+ * start */
+ /* TODO: different address spaces */
+ cfg = JS_CONFIG_THREAD_PRI(8) |
+ JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE |
+ JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE;
+
+ if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
+ cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION;
+
+ if (panfrost_has_hw_issue(pfdev, HW_ISSUE_10649))
+ cfg |= JS_CONFIG_START_MMU;
+
+ job_write(pfdev, JS_CONFIG_NEXT(js), cfg);
+
+ if (panfrost_has_hw_feature(pfdev, HW_FEATURE_FLUSH_REDUCTION))
+ job_write(pfdev, JS_FLUSH_ID_NEXT(js), job->flush_id);
+
+ /* GO ! */
+ dev_dbg(pfdev->dev, "JS: Submitting atom %p to js[%d] with head=0x%llx",
+ job, js, jc_head);
+
+ job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START);
+
+ spin_unlock_irqrestore(&pfdev->hwaccess_lock, flags);
+
+end:
+ pm_runtime_mark_last_busy(pfdev->dev);
+ pm_runtime_put_autosuspend(pfdev->dev);
+}
+
+static void panfrost_acquire_object_fences(struct drm_gem_object **bos,
+ int bo_count,
+ struct dma_fence **implicit_fences)
+{
+ int i;
+
+ for (i = 0; i < bo_count; i++)
+ implicit_fences[i] = reservation_object_get_excl_rcu(bos[i]->resv);
+}
+
+static void panfrost_attach_object_fences(struct drm_gem_object **bos,
+ int bo_count,
+ struct dma_fence *fence)
+{
+ int i;
+
+ for (i = 0; i < bo_count; i++)
+ reservation_object_add_excl_fence(bos[i]->resv, fence);
+}
+
+int panfrost_job_push(struct panfrost_job *job)
+{
+ struct panfrost_device *pfdev = job->pfdev;
+ int slot = panfrost_job_get_slot(job);
+ struct drm_sched_entity *entity = &job->file_priv->sched_entity[slot];
+ struct ww_acquire_ctx acquire_ctx;
+ int ret = 0;
+
+ mutex_lock(&pfdev->sched_lock);
+
+ ret = drm_gem_lock_reservations(job->bos, job->bo_count,
+ &acquire_ctx);
+ if (ret) {
+ mutex_unlock(&pfdev->sched_lock);
+ return ret;
+ }
+
+ ret = drm_sched_job_init(&job->base, entity, NULL);
+ if (ret) {
+ mutex_unlock(&pfdev->sched_lock);
+ goto unlock;
+ }
+
+ job->render_done_fence = dma_fence_get(&job->base.s_fence->finished);
+
+ kref_get(&job->refcount); /* put by scheduler job completion */
+
+ panfrost_acquire_object_fences(job->bos, job->bo_count,
+ job->implicit_fences);
+
+ drm_sched_entity_push_job(&job->base, entity);
+
+ mutex_unlock(&pfdev->sched_lock);
+
+ panfrost_attach_object_fences(job->bos, job->bo_count,
+ job->render_done_fence);
+
+unlock:
+ drm_gem_unlock_reservations(job->bos, job->bo_count, &acquire_ctx);
+
+ return ret;
+}
+
+static void panfrost_job_cleanup(struct kref *ref)
+{
+ struct panfrost_job *job = container_of(ref, struct panfrost_job,
+ refcount);
+ unsigned int i;
+
+ if (job->in_fences) {
+ for (i = 0; i < job->in_fence_count; i++)
+ dma_fence_put(job->in_fences[i]);
+ kvfree(job->in_fences);
+ }
+ if (job->implicit_fences) {
+ for (i = 0; i < job->bo_count; i++)
+ dma_fence_put(job->implicit_fences[i]);
+ kvfree(job->implicit_fences);
+ }
+ dma_fence_put(job->done_fence);
+ dma_fence_put(job->render_done_fence);
+
+ if (job->bos) {
+ for (i = 0; i < job->bo_count; i++)
+ drm_gem_object_put_unlocked(job->bos[i]);
+ kvfree(job->bos);
+ }
+
+ kfree(job);
+}
+
+void panfrost_job_put(struct panfrost_job *job)
+{
+ kref_put(&job->refcount, panfrost_job_cleanup);
+}
+
+static void panfrost_job_free(struct drm_sched_job *sched_job)
+{
+ struct panfrost_job *job = to_panfrost_job(sched_job);
+
+ drm_sched_job_cleanup(sched_job);
+
+ panfrost_job_put(job);
+}
+
+static struct dma_fence *panfrost_job_dependency(struct drm_sched_job *sched_job,
+ struct drm_sched_entity *s_entity)
+{
+ struct panfrost_job *job = to_panfrost_job(sched_job);
+ struct dma_fence *fence;
+ unsigned int i;
+
+ /* Explicit fences */
+ for (i = 0; i < job->in_fence_count; i++) {
+ if (job->in_fences[i]) {
+ fence = job->in_fences[i];
+ job->in_fences[i] = NULL;
+ return fence;
+ }
+ }
+
+ /* Implicit fences, max. one per BO */
+ for (i = 0; i < job->bo_count; i++) {
+ if (job->implicit_fences[i]) {
+ fence = job->implicit_fences[i];
+ job->implicit_fences[i] = NULL;
+ return fence;
+ }
+ }
+
+ return NULL;
+}
+
+static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
+{
+ struct panfrost_job *job = to_panfrost_job(sched_job);
+ struct panfrost_device *pfdev = job->pfdev;
+ int slot = panfrost_job_get_slot(job);
+ struct dma_fence *fence = NULL;
+
+ if (unlikely(job->base.s_fence->finished.error))
+ return NULL;
+
+ pfdev->jobs[slot] = job;
+
+ fence = panfrost_fence_create(pfdev, slot);
+ if (IS_ERR(fence))
+ return NULL;
+
+ if (job->done_fence)
+ dma_fence_put(job->done_fence);
+ job->done_fence = dma_fence_get(fence);
+
+ panfrost_job_hw_submit(job, slot);
+
+ return fence;
+}
+
+void panfrost_job_enable_interrupts(struct panfrost_device *pfdev)
+{
+ int j;
+ u32 irq_mask = 0;
+
+ for (j = 0; j < NUM_JOB_SLOTS; j++) {
+ irq_mask |= MK_JS_MASK(j);
+ }
+
+ job_write(pfdev, JOB_INT_CLEAR, irq_mask);
+ job_write(pfdev, JOB_INT_MASK, irq_mask);
+}
+
+static void panfrost_job_timedout(struct drm_sched_job *sched_job)
+{
+ struct panfrost_job *job = to_panfrost_job(sched_job);
+ struct panfrost_device *pfdev = job->pfdev;
+ int js = panfrost_job_get_slot(job);
+ int i;
+
+ /*
+ * If the GPU managed to complete this jobs fence, the timeout is
+ * spurious. Bail out.
+ */
+ if (dma_fence_is_signaled(job->done_fence))
+ return;
+
+ dev_err(pfdev->dev, "gpu sched timeout, js=%d, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
+ js,
+ job_read(pfdev, JS_STATUS(js)),
+ job_read(pfdev, JS_HEAD_LO(js)),
+ job_read(pfdev, JS_TAIL_LO(js)),
+ sched_job);
+
+ mutex_lock(&pfdev->reset_lock);
+
+ for (i = 0; i < NUM_JOB_SLOTS; i++)
+ drm_sched_stop(&pfdev->js->queue[i].sched);
+
+ if (sched_job)
+ drm_sched_increase_karma(sched_job);
+
+ /* panfrost_core_dump(pfdev); */
+
+ panfrost_devfreq_record_transition(pfdev, js);
+ panfrost_gpu_soft_reset(pfdev);
+
+ /* TODO: Re-enable all other address spaces */
+ panfrost_mmu_enable(pfdev, 0);
+ panfrost_gpu_power_on(pfdev);
+ panfrost_job_enable_interrupts(pfdev);
+
+ for (i = 0; i < NUM_JOB_SLOTS; i++)
+ drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched);
+
+ /* restart scheduler after GPU is usable again */
+ for (i = 0; i < NUM_JOB_SLOTS; i++)
+ drm_sched_start(&pfdev->js->queue[i].sched, true);
+
+ mutex_unlock(&pfdev->reset_lock);
+}
+
+static const struct drm_sched_backend_ops panfrost_sched_ops = {
+ .dependency = panfrost_job_dependency,
+ .run_job = panfrost_job_run,
+ .timedout_job = panfrost_job_timedout,
+ .free_job = panfrost_job_free
+};
+
+static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
+{
+ struct panfrost_device *pfdev = data;
+ u32 status = job_read(pfdev, JOB_INT_STAT);
+ int j;
+
+ dev_dbg(pfdev->dev, "jobslot irq status=%x\n", status);
+
+ if (!status)
+ return IRQ_NONE;
+
+ pm_runtime_mark_last_busy(pfdev->dev);
+
+ for (j = 0; status; j++) {
+ u32 mask = MK_JS_MASK(j);
+
+ if (!(status & mask))
+ continue;
+
+ job_write(pfdev, JOB_INT_CLEAR, mask);
+
+ if (status & JOB_INT_MASK_ERR(j)) {
+ job_write(pfdev, JS_COMMAND_NEXT(j), JS_COMMAND_NOP);
+
+ dev_err(pfdev->dev, "js fault, js=%d, status=%s, head=0x%x, tail=0x%x",
+ j,
+ panfrost_exception_name(pfdev, job_read(pfdev, JS_STATUS(j))),
+ job_read(pfdev, JS_HEAD_LO(j)),
+ job_read(pfdev, JS_TAIL_LO(j)));
+
+ drm_sched_fault(&pfdev->js->queue[j].sched);
+ }
+
+ if (status & JOB_INT_MASK_DONE(j)) {
+ panfrost_devfreq_record_transition(pfdev, j);
+ dma_fence_signal(pfdev->jobs[j]->done_fence);
+ }
+
+ status &= ~mask;
+ }
+
+ return IRQ_HANDLED;
+}
+
+int panfrost_job_init(struct panfrost_device *pfdev)
+{
+ struct panfrost_job_slot *js;
+ int ret, j, irq;
+
+ pfdev->js = js = devm_kzalloc(pfdev->dev, sizeof(*js), GFP_KERNEL);
+ if (!js)
+ return -ENOMEM;
+
+ spin_lock_init(&js->job_lock);
+
+ irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "job");
+ if (irq <= 0)
+ return -ENODEV;
+
+ ret = devm_request_irq(pfdev->dev, irq, panfrost_job_irq_handler,
+ IRQF_SHARED, "job", pfdev);
+ if (ret) {
+ dev_err(pfdev->dev, "failed to request job irq");
+ return ret;
+ }
+
+ for (j = 0; j < NUM_JOB_SLOTS; j++) {
+ js->queue[j].fence_context = dma_fence_context_alloc(1);
+
+ ret = drm_sched_init(&js->queue[j].sched,
+ &panfrost_sched_ops,
+ 1, 0, msecs_to_jiffies(500),
+ "pan_js");
+ if (ret) {
+ dev_err(pfdev->dev, "Failed to create scheduler: %d.", ret);
+ goto err_sched;
+ }
+ }
+
+ panfrost_job_enable_interrupts(pfdev);
+
+ return 0;
+
+err_sched:
+ for (j--; j >= 0; j--)
+ drm_sched_fini(&js->queue[j].sched);
+
+ return ret;
+}
+
+void panfrost_job_fini(struct panfrost_device *pfdev)
+{
+ struct panfrost_job_slot *js = pfdev->js;
+ int j;
+
+ job_write(pfdev, JOB_INT_MASK, 0);
+
+ for (j = 0; j < NUM_JOB_SLOTS; j++)
+ drm_sched_fini(&js->queue[j].sched);
+
+}
+
+int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
+{
+ struct panfrost_device *pfdev = panfrost_priv->pfdev;
+ struct panfrost_job_slot *js = pfdev->js;
+ struct drm_sched_rq *rq;
+ int ret, i;
+
+ for (i = 0; i < NUM_JOB_SLOTS; i++) {
+ rq = &js->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i], &rq, 1, NULL);
+ if (WARN_ON(ret))
+ return ret;
+ }
+ return 0;
+}
+
+void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
+{
+ int i;
+
+ for (i = 0; i < NUM_JOB_SLOTS; i++)
+ drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]);
+}
+
+int panfrost_job_is_idle(struct panfrost_device *pfdev)
+{
+ struct panfrost_job_slot *js = pfdev->js;
+ int i;
+
+ for (i = 0; i < NUM_JOB_SLOTS; i++) {
+ /* If there are any jobs in the HW queue, we're not idle */
+ if (atomic_read(&js->queue[i].sched.hw_rq_count))
+ return false;
+
+ /* Check whether the hardware is idle */
+ if (pfdev->devfreq.slot[i].busy)
+ return false;
+ }
+
+ return true;
+}
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h
new file mode 100644
index 000000000000..62454128a792
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_job.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2019 Collabora ltd. */
+
+#ifndef __PANFROST_JOB_H__
+#define __PANFROST_JOB_H__
+
+#include <uapi/drm/panfrost_drm.h>
+#include <drm/gpu_scheduler.h>
+
+struct panfrost_device;
+struct panfrost_gem_object;
+struct panfrost_file_priv;
+
+struct panfrost_job {
+ struct drm_sched_job base;
+
+ struct kref refcount;
+
+ struct panfrost_device *pfdev;
+ struct panfrost_file_priv *file_priv;
+
+ /* Optional fences userspace can pass in for the job to depend on. */
+ struct dma_fence **in_fences;
+ u32 in_fence_count;
+
+ /* Fence to be signaled by IRQ handler when the job is complete. */
+ struct dma_fence *done_fence;
+
+ __u64 jc;
+ __u32 requirements;
+ __u32 flush_id;
+
+ /* Exclusive fences we have taken from the BOs to wait for */
+ struct dma_fence **implicit_fences;
+ struct drm_gem_object **bos;
+ u32 bo_count;
+
+ /* Fence to be signaled by drm-sched once its done with the job */
+ struct dma_fence *render_done_fence;
+};
+
+int panfrost_job_init(struct panfrost_device *pfdev);
+void panfrost_job_fini(struct panfrost_device *pfdev);
+int panfrost_job_open(struct panfrost_file_priv *panfrost_priv);
+void panfrost_job_close(struct panfrost_file_priv *panfrost_priv);
+int panfrost_job_push(struct panfrost_job *job);
+void panfrost_job_put(struct panfrost_job *job);
+void panfrost_job_enable_interrupts(struct panfrost_device *pfdev);
+int panfrost_job_is_idle(struct panfrost_device *pfdev);
+
+#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
new file mode 100644
index 000000000000..762b1bd2a8c2
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -0,0 +1,386 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/io-pgtable.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/sizes.h>
+
+#include "panfrost_device.h"
+#include "panfrost_mmu.h"
+#include "panfrost_gem.h"
+#include "panfrost_features.h"
+#include "panfrost_regs.h"
+
+#define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
+#define mmu_read(dev, reg) readl(dev->iomem + reg)
+
+struct panfrost_mmu {
+ struct io_pgtable_cfg pgtbl_cfg;
+ struct io_pgtable_ops *pgtbl_ops;
+ struct mutex lock;
+};
+
+static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
+{
+ int ret;
+ u32 val;
+
+ /* Wait for the MMU status to indicate there is no active command, in
+ * case one is pending. */
+ ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr),
+ val, !(val & AS_STATUS_AS_ACTIVE), 10, 1000);
+
+ if (ret)
+ dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n");
+
+ return ret;
+}
+
+static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
+{
+ int status;
+
+ /* write AS_COMMAND when MMU is ready to accept another command */
+ status = wait_ready(pfdev, as_nr);
+ if (!status)
+ mmu_write(pfdev, AS_COMMAND(as_nr), cmd);
+
+ return status;
+}
+
+static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
+ u64 iova, size_t size)
+{
+ u8 region_width;
+ u64 region = iova & PAGE_MASK;
+ /*
+ * fls returns:
+ * 1 .. 32
+ *
+ * 10 + fls(num_pages)
+ * results in the range (11 .. 42)
+ */
+
+ size = round_up(size, PAGE_SIZE);
+
+ region_width = 10 + fls(size >> PAGE_SHIFT);
+ if ((size >> PAGE_SHIFT) != (1ul << (region_width - 11))) {
+ /* not pow2, so must go up to the next pow2 */
+ region_width += 1;
+ }
+ region |= region_width;
+
+ /* Lock the region that needs to be updated */
+ mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), region & 0xFFFFFFFFUL);
+ mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), (region >> 32) & 0xFFFFFFFFUL);
+ write_cmd(pfdev, as_nr, AS_COMMAND_LOCK);
+}
+
+
+static int mmu_hw_do_operation(struct panfrost_device *pfdev, u32 as_nr,
+ u64 iova, size_t size, u32 op)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&pfdev->hwaccess_lock, flags);
+
+ if (op != AS_COMMAND_UNLOCK)
+ lock_region(pfdev, as_nr, iova, size);
+
+ /* Run the MMU operation */
+ write_cmd(pfdev, as_nr, op);
+
+ /* Wait for the flush to complete */
+ ret = wait_ready(pfdev, as_nr);
+
+ spin_unlock_irqrestore(&pfdev->hwaccess_lock, flags);
+
+ return ret;
+}
+
+void panfrost_mmu_enable(struct panfrost_device *pfdev, u32 as_nr)
+{
+ struct io_pgtable_cfg *cfg = &pfdev->mmu->pgtbl_cfg;
+ u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
+ u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
+
+ mmu_write(pfdev, MMU_INT_CLEAR, ~0);
+ mmu_write(pfdev, MMU_INT_MASK, ~0);
+
+ mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
+ mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
+
+ /* Need to revisit mem attrs.
+ * NC is the default, Mali driver is inner WT.
+ */
+ mmu_write(pfdev, AS_MEMATTR_LO(as_nr), memattr & 0xffffffffUL);
+ mmu_write(pfdev, AS_MEMATTR_HI(as_nr), memattr >> 32);
+
+ write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
+}
+
+static void mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
+{
+ mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
+ mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
+
+ mmu_write(pfdev, AS_MEMATTR_LO(as_nr), 0);
+ mmu_write(pfdev, AS_MEMATTR_HI(as_nr), 0);
+
+ write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
+}
+
+static size_t get_pgsize(u64 addr, size_t size)
+{
+ if (addr & (SZ_2M - 1) || size < SZ_2M)
+ return SZ_4K;
+
+ return SZ_2M;
+}
+
+int panfrost_mmu_map(struct panfrost_gem_object *bo)
+{
+ struct drm_gem_object *obj = &bo->base.base;
+ struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
+ struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops;
+ u64 iova = bo->node.start << PAGE_SHIFT;
+ unsigned int count;
+ struct scatterlist *sgl;
+ struct sg_table *sgt;
+ int ret;
+
+ sgt = drm_gem_shmem_get_pages_sgt(obj);
+ if (WARN_ON(IS_ERR(sgt)))
+ return PTR_ERR(sgt);
+
+ ret = pm_runtime_get_sync(pfdev->dev);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&pfdev->mmu->lock);
+
+ for_each_sg(sgt->sgl, sgl, sgt->nents, count) {
+ unsigned long paddr = sg_dma_address(sgl);
+ size_t len = sg_dma_len(sgl);
+
+ dev_dbg(pfdev->dev, "map: iova=%llx, paddr=%lx, len=%zx", iova, paddr, len);
+
+ while (len) {
+ size_t pgsize = get_pgsize(iova | paddr, len);
+
+ ops->map(ops, iova, paddr, pgsize, IOMMU_WRITE | IOMMU_READ);
+ iova += pgsize;
+ paddr += pgsize;
+ len -= pgsize;
+ }
+ }
+
+ mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT,
+ bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT);
+
+ mutex_unlock(&pfdev->mmu->lock);
+
+ pm_runtime_mark_last_busy(pfdev->dev);
+ pm_runtime_put_autosuspend(pfdev->dev);
+
+ return 0;
+}
+
+void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
+{
+ struct drm_gem_object *obj = &bo->base.base;
+ struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
+ struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops;
+ u64 iova = bo->node.start << PAGE_SHIFT;
+ size_t len = bo->node.size << PAGE_SHIFT;
+ size_t unmapped_len = 0;
+ int ret;
+
+ dev_dbg(pfdev->dev, "unmap: iova=%llx, len=%zx", iova, len);
+
+ ret = pm_runtime_get_sync(pfdev->dev);
+ if (ret < 0)
+ return;
+
+ mutex_lock(&pfdev->mmu->lock);
+
+ while (unmapped_len < len) {
+ size_t unmapped_page;
+ size_t pgsize = get_pgsize(iova, len - unmapped_len);
+
+ unmapped_page = ops->unmap(ops, iova, pgsize);
+ if (!unmapped_page)
+ break;
+
+ iova += unmapped_page;
+ unmapped_len += unmapped_page;
+ }
+
+ mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT,
+ bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT);
+
+ mutex_unlock(&pfdev->mmu->lock);
+
+ pm_runtime_mark_last_busy(pfdev->dev);
+ pm_runtime_put_autosuspend(pfdev->dev);
+}
+
+static void mmu_tlb_inv_context_s1(void *cookie)
+{
+ struct panfrost_device *pfdev = cookie;
+
+ mmu_hw_do_operation(pfdev, 0, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
+}
+
+static void mmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
+ size_t granule, bool leaf, void *cookie)
+{}
+
+static void mmu_tlb_sync_context(void *cookie)
+{
+ //struct panfrost_device *pfdev = cookie;
+ // TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
+}
+
+static const struct iommu_gather_ops mmu_tlb_ops = {
+ .tlb_flush_all = mmu_tlb_inv_context_s1,
+ .tlb_add_flush = mmu_tlb_inv_range_nosync,
+ .tlb_sync = mmu_tlb_sync_context,
+};
+
+static const char *access_type_name(struct panfrost_device *pfdev,
+ u32 fault_status)
+{
+ switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
+ case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
+ if (panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU))
+ return "ATOMIC";
+ else
+ return "UNKNOWN";
+ case AS_FAULTSTATUS_ACCESS_TYPE_READ:
+ return "READ";
+ case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
+ return "WRITE";
+ case AS_FAULTSTATUS_ACCESS_TYPE_EX:
+ return "EXECUTE";
+ default:
+ WARN_ON(1);
+ return NULL;
+ }
+}
+
+static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
+{
+ struct panfrost_device *pfdev = data;
+ u32 status = mmu_read(pfdev, MMU_INT_STAT);
+ int i;
+
+ if (!status)
+ return IRQ_NONE;
+
+ dev_err(pfdev->dev, "mmu irq status=%x\n", status);
+
+ for (i = 0; status; i++) {
+ u32 mask = BIT(i) | BIT(i + 16);
+ u64 addr;
+ u32 fault_status;
+ u32 exception_type;
+ u32 access_type;
+ u32 source_id;
+
+ if (!(status & mask))
+ continue;
+
+ fault_status = mmu_read(pfdev, AS_FAULTSTATUS(i));
+ addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(i));
+ addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(i)) << 32;
+
+ /* decode the fault status */
+ exception_type = fault_status & 0xFF;
+ access_type = (fault_status >> 8) & 0x3;
+ source_id = (fault_status >> 16);
+
+ /* terminal fault, print info about the fault */
+ dev_err(pfdev->dev,
+ "Unhandled Page fault in AS%d at VA 0x%016llX\n"
+ "Reason: %s\n"
+ "raw fault status: 0x%X\n"
+ "decoded fault status: %s\n"
+ "exception type 0x%X: %s\n"
+ "access type 0x%X: %s\n"
+ "source id 0x%X\n",
+ i, addr,
+ "TODO",
+ fault_status,
+ (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
+ exception_type, panfrost_exception_name(pfdev, exception_type),
+ access_type, access_type_name(pfdev, fault_status),
+ source_id);
+
+ mmu_write(pfdev, MMU_INT_CLEAR, mask);
+
+ status &= ~mask;
+ }
+
+ return IRQ_HANDLED;
+};
+
+int panfrost_mmu_init(struct panfrost_device *pfdev)
+{
+ struct io_pgtable_ops *pgtbl_ops;
+ int err, irq;
+
+ pfdev->mmu = devm_kzalloc(pfdev->dev, sizeof(*pfdev->mmu), GFP_KERNEL);
+ if (!pfdev->mmu)
+ return -ENOMEM;
+
+ mutex_init(&pfdev->mmu->lock);
+
+ irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
+ if (irq <= 0)
+ return -ENODEV;
+
+ err = devm_request_irq(pfdev->dev, irq, panfrost_mmu_irq_handler,
+ IRQF_SHARED, "mmu", pfdev);
+
+ if (err) {
+ dev_err(pfdev->dev, "failed to request mmu irq");
+ return err;
+ }
+ mmu_write(pfdev, MMU_INT_CLEAR, ~0);
+ mmu_write(pfdev, MMU_INT_MASK, ~0);
+
+ pfdev->mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
+ .pgsize_bitmap = SZ_4K | SZ_2M,
+ .ias = FIELD_GET(0xff, pfdev->features.mmu_features),
+ .oas = FIELD_GET(0xff00, pfdev->features.mmu_features),
+ .tlb = &mmu_tlb_ops,
+ .iommu_dev = pfdev->dev,
+ };
+
+ pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &pfdev->mmu->pgtbl_cfg,
+ pfdev);
+ if (!pgtbl_ops)
+ return -ENOMEM;
+
+ pfdev->mmu->pgtbl_ops = pgtbl_ops;
+
+ panfrost_mmu_enable(pfdev, 0);
+
+ return 0;
+}
+
+void panfrost_mmu_fini(struct panfrost_device *pfdev)
+{
+ mmu_write(pfdev, MMU_INT_MASK, 0);
+ mmu_disable(pfdev, 0);
+
+ free_io_pgtable_ops(pfdev->mmu->pgtbl_ops);
+}
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h
new file mode 100644
index 000000000000..f5878d86a5ce
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+
+#ifndef __PANFROST_MMU_H__
+#define __PANFROST_MMU_H__
+
+struct panfrost_gem_object;
+
+int panfrost_mmu_map(struct panfrost_gem_object *bo);
+void panfrost_mmu_unmap(struct panfrost_gem_object *bo);
+
+int panfrost_mmu_init(struct panfrost_device *pfdev);
+void panfrost_mmu_fini(struct panfrost_device *pfdev);
+
+void panfrost_mmu_enable(struct panfrost_device *pfdev, u32 as_nr);
+
+#endif
diff --git a/drivers/gpu/drm/panfrost/panfrost_regs.h b/drivers/gpu/drm/panfrost/panfrost_regs.h
new file mode 100644
index 000000000000..578c5fc2188b
--- /dev/null
+++ b/drivers/gpu/drm/panfrost/panfrost_regs.h
@@ -0,0 +1,298 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
+/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
+/*
+ * Register definitions based on mali_midg_regmap.h
+ * (C) COPYRIGHT 2010-2018 ARM Limited. All rights reserved.
+ */
+#ifndef __PANFROST_REGS_H__
+#define __PANFROST_REGS_H__
+
+#define GPU_ID 0x00
+#define GPU_L2_FEATURES 0x004 /* (RO) Level 2 cache features */
+#define GPU_CORE_FEATURES 0x008 /* (RO) Shader Core Features */
+#define GPU_TILER_FEATURES 0x00C /* (RO) Tiler Features */
+#define GPU_MEM_FEATURES 0x010 /* (RO) Memory system features */
+#define GROUPS_L2_COHERENT BIT(0) /* Cores groups are l2 coherent */
+
+#define GPU_MMU_FEATURES 0x014 /* (RO) MMU features */
+#define GPU_AS_PRESENT 0x018 /* (RO) Address space slots present */
+#define GPU_JS_PRESENT 0x01C /* (RO) Job slots present */
+
+#define GPU_INT_RAWSTAT 0x20
+#define GPU_INT_CLEAR 0x24
+#define GPU_INT_MASK 0x28
+#define GPU_INT_STAT 0x2c
+#define GPU_IRQ_FAULT BIT(0)
+#define GPU_IRQ_MULTIPLE_FAULT BIT(7)
+#define GPU_IRQ_RESET_COMPLETED BIT(8)
+#define GPU_IRQ_POWER_CHANGED BIT(9)
+#define GPU_IRQ_POWER_CHANGED_ALL BIT(10)
+#define GPU_IRQ_PERFCNT_SAMPLE_COMPLETED BIT(16)
+#define GPU_IRQ_CLEAN_CACHES_COMPLETED BIT(17)
+#define GPU_IRQ_MASK_ALL \
+ (GPU_IRQ_FAULT |\
+ GPU_IRQ_MULTIPLE_FAULT |\
+ GPU_IRQ_RESET_COMPLETED |\
+ GPU_IRQ_POWER_CHANGED |\
+ GPU_IRQ_POWER_CHANGED_ALL |\
+ GPU_IRQ_PERFCNT_SAMPLE_COMPLETED |\
+ GPU_IRQ_CLEAN_CACHES_COMPLETED)
+#define GPU_IRQ_MASK_ERROR \
+ ( \
+ GPU_IRQ_FAULT |\
+ GPU_IRQ_MULTIPLE_FAULT)
+#define GPU_CMD 0x30
+#define GPU_CMD_SOFT_RESET 0x01
+#define GPU_STATUS 0x34
+#define GPU_LATEST_FLUSH_ID 0x38
+#define GPU_FAULT_STATUS 0x3C
+#define GPU_FAULT_ADDRESS_LO 0x40
+#define GPU_FAULT_ADDRESS_HI 0x44
+
+#define GPU_THREAD_MAX_THREADS 0x0A0 /* (RO) Maximum number of threads per core */
+#define GPU_THREAD_MAX_WORKGROUP_SIZE 0x0A4 /* (RO) Maximum workgroup size */
+#define GPU_THREAD_MAX_BARRIER_SIZE 0x0A8 /* (RO) Maximum threads waiting at a barrier */
+#define GPU_THREAD_FEATURES 0x0AC /* (RO) Thread features */
+#define GPU_THREAD_TLS_ALLOC 0x310 /* (RO) Number of threads per core that
+ * TLS must be allocated for */
+
+#define GPU_TEXTURE_FEATURES(n) (0x0B0 + ((n) * 4))
+#define GPU_JS_FEATURES(n) (0x0C0 + ((n) * 4))
+
+#define GPU_SHADER_PRESENT_LO 0x100 /* (RO) Shader core present bitmap, low word */
+#define GPU_SHADER_PRESENT_HI 0x104 /* (RO) Shader core present bitmap, high word */
+#define GPU_TILER_PRESENT_LO 0x110 /* (RO) Tiler core present bitmap, low word */
+#define GPU_TILER_PRESENT_HI 0x114 /* (RO) Tiler core present bitmap, high word */
+
+#define GPU_L2_PRESENT_LO 0x120 /* (RO) Level 2 cache present bitmap, low word */
+#define GPU_L2_PRESENT_HI 0x124 /* (RO) Level 2 cache present bitmap, high word */
+
+#define GPU_COHERENCY_FEATURES 0x300 /* (RO) Coherency features present */
+#define COHERENCY_ACE_LITE BIT(0)
+#define COHERENCY_ACE BIT(1)
+
+#define GPU_STACK_PRESENT_LO 0xE00 /* (RO) Core stack present bitmap, low word */
+#define GPU_STACK_PRESENT_HI 0xE04 /* (RO) Core stack present bitmap, high word */
+
+#define SHADER_READY_LO 0x140 /* (RO) Shader core ready bitmap, low word */
+#define SHADER_READY_HI 0x144 /* (RO) Shader core ready bitmap, high word */
+
+#define TILER_READY_LO 0x150 /* (RO) Tiler core ready bitmap, low word */
+#define TILER_READY_HI 0x154 /* (RO) Tiler core ready bitmap, high word */
+
+#define L2_READY_LO 0x160 /* (RO) Level 2 cache ready bitmap, low word */
+#define L2_READY_HI 0x164 /* (RO) Level 2 cache ready bitmap, high word */
+
+#define STACK_READY_LO 0xE10 /* (RO) Core stack ready bitmap, low word */
+#define STACK_READY_HI 0xE14 /* (RO) Core stack ready bitmap, high word */
+
+
+#define SHADER_PWRON_LO 0x180 /* (WO) Shader core power on bitmap, low word */
+#define SHADER_PWRON_HI 0x184 /* (WO) Shader core power on bitmap, high word */
+
+#define TILER_PWRON_LO 0x190 /* (WO) Tiler core power on bitmap, low word */
+#define TILER_PWRON_HI 0x194 /* (WO) Tiler core power on bitmap, high word */
+
+#define L2_PWRON_LO 0x1A0 /* (WO) Level 2 cache power on bitmap, low word */
+#define L2_PWRON_HI 0x1A4 /* (WO) Level 2 cache power on bitmap, high word */
+
+#define STACK_PWRON_LO 0xE20 /* (RO) Core stack power on bitmap, low word */
+#define STACK_PWRON_HI 0xE24 /* (RO) Core stack power on bitmap, high word */
+
+
+#define SHADER_PWROFF_LO 0x1C0 /* (WO) Shader core power off bitmap, low word */
+#define SHADER_PWROFF_HI 0x1C4 /* (WO) Shader core power off bitmap, high word */
+
+#define TILER_PWROFF_LO 0x1D0 /* (WO) Tiler core power off bitmap, low word */
+#define TILER_PWROFF_HI 0x1D4 /* (WO) Tiler core power off bitmap, high word */
+
+#define L2_PWROFF_LO 0x1E0 /* (WO) Level 2 cache power off bitmap, low word */
+#define L2_PWROFF_HI 0x1E4 /* (WO) Level 2 cache power off bitmap, high word */
+
+#define STACK_PWROFF_LO 0xE30 /* (RO) Core stack power off bitmap, low word */
+#define STACK_PWROFF_HI 0xE34 /* (RO) Core stack power off bitmap, high word */
+
+
+#define SHADER_PWRTRANS_LO 0x200 /* (RO) Shader core power transition bitmap, low word */
+#define SHADER_PWRTRANS_HI 0x204 /* (RO) Shader core power transition bitmap, high word */
+
+#define TILER_PWRTRANS_LO 0x210 /* (RO) Tiler core power transition bitmap, low word */
+#define TILER_PWRTRANS_HI 0x214 /* (RO) Tiler core power transition bitmap, high word */
+
+#define L2_PWRTRANS_LO 0x220 /* (RO) Level 2 cache power transition bitmap, low word */
+#define L2_PWRTRANS_HI 0x224 /* (RO) Level 2 cache power transition bitmap, high word */
+
+#define STACK_PWRTRANS_LO 0xE40 /* (RO) Core stack power transition bitmap, low word */
+#define STACK_PWRTRANS_HI 0xE44 /* (RO) Core stack power transition bitmap, high word */
+
+
+#define SHADER_PWRACTIVE_LO 0x240 /* (RO) Shader core active bitmap, low word */
+#define SHADER_PWRACTIVE_HI 0x244 /* (RO) Shader core active bitmap, high word */
+
+#define TILER_PWRACTIVE_LO 0x250 /* (RO) Tiler core active bitmap, low word */
+#define TILER_PWRACTIVE_HI 0x254 /* (RO) Tiler core active bitmap, high word */
+
+#define L2_PWRACTIVE_LO 0x260 /* (RO) Level 2 cache active bitmap, low word */
+#define L2_PWRACTIVE_HI 0x264 /* (RO) Level 2 cache active bitmap, high word */
+
+#define GPU_JM_CONFIG 0xF00 /* (RW) Job Manager configuration register (Implementation specific register) */
+#define GPU_SHADER_CONFIG 0xF04 /* (RW) Shader core configuration settings (Implementation specific register) */
+#define GPU_TILER_CONFIG 0xF08 /* (RW) Tiler core configuration settings (Implementation specific register) */
+#define GPU_L2_MMU_CONFIG 0xF0C /* (RW) Configuration of the L2 cache and MMU (Implementation specific register) */
+
+/* L2_MMU_CONFIG register */
+#define L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY_SHIFT 23
+#define L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY (0x1 << L2_MMU_CONFIG_ALLOW_SNOOP_DISPARITY_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT 24
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_OCTANT (0x1 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_QUARTER (0x2 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_HALF (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
+
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT 26
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_OCTANT (0x1 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_QUARTER (0x2 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
+#define L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_HALF (0x3 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
+
+#define L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_READS_SHIFT 12
+#define L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_READS (0x7 << L2_MMU_CONFIG_LIMIT_EXTERNAL_READS_SHIFT)
+
+#define L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_WRITES_SHIFT 15
+#define L2_MMU_CONFIG_3BIT_LIMIT_EXTERNAL_WRITES (0x7 << L2_MMU_CONFIG_LIMIT_EXTERNAL_WRITES_SHIFT)
+
+/* SHADER_CONFIG register */
+#define SC_ALT_COUNTERS BIT(3)
+#define SC_OVERRIDE_FWD_PIXEL_KILL BIT(4)
+#define SC_SDC_DISABLE_OQ_DISCARD BIT(6)
+#define SC_LS_ALLOW_ATTR_TYPES BIT(16)
+#define SC_LS_PAUSEBUFFER_DISABLE BIT(16)
+#define SC_TLS_HASH_ENABLE BIT(17)
+#define SC_LS_ATTR_CHECK_DISABLE BIT(18)
+#define SC_ENABLE_TEXGRD_FLAGS BIT(25)
+/* End SHADER_CONFIG register */
+
+/* TILER_CONFIG register */
+#define TC_CLOCK_GATE_OVERRIDE BIT(0)
+
+/* JM_CONFIG register */
+#define JM_TIMESTAMP_OVERRIDE BIT(0)
+#define JM_CLOCK_GATE_OVERRIDE BIT(1)
+#define JM_JOB_THROTTLE_ENABLE BIT(2)
+#define JM_JOB_THROTTLE_LIMIT_SHIFT 3
+#define JM_MAX_JOB_THROTTLE_LIMIT 0x3F
+#define JM_FORCE_COHERENCY_FEATURES_SHIFT 2
+#define JM_IDVS_GROUP_SIZE_SHIFT 16
+#define JM_MAX_IDVS_GROUP_SIZE 0x3F
+
+
+/* Job Control regs */
+#define JOB_INT_RAWSTAT 0x1000
+#define JOB_INT_CLEAR 0x1004
+#define JOB_INT_MASK 0x1008
+#define JOB_INT_STAT 0x100c
+#define JOB_INT_JS_STATE 0x1010
+#define JOB_INT_THROTTLE 0x1014
+
+#define MK_JS_MASK(j) (0x10001 << (j))
+#define JOB_INT_MASK_ERR(j) BIT((j) + 16)
+#define JOB_INT_MASK_DONE(j) BIT(j)
+
+#define JS_BASE 0x1800
+#define JS_HEAD_LO(n) (JS_BASE + ((n) * 0x80) + 0x00)
+#define JS_HEAD_HI(n) (JS_BASE + ((n) * 0x80) + 0x04)
+#define JS_TAIL_LO(n) (JS_BASE + ((n) * 0x80) + 0x08)
+#define JS_TAIL_HI(n) (JS_BASE + ((n) * 0x80) + 0x0c)
+#define JS_AFFINITY_LO(n) (JS_BASE + ((n) * 0x80) + 0x10)
+#define JS_AFFINITY_HI(n) (JS_BASE + ((n) * 0x80) + 0x14)
+#define JS_CONFIG(n) (JS_BASE + ((n) * 0x80) + 0x18)
+#define JS_XAFFINITY(n) (JS_BASE + ((n) * 0x80) + 0x1c)
+#define JS_COMMAND(n) (JS_BASE + ((n) * 0x80) + 0x20)
+#define JS_STATUS(n) (JS_BASE + ((n) * 0x80) + 0x24)
+#define JS_HEAD_NEXT_LO(n) (JS_BASE + ((n) * 0x80) + 0x40)
+#define JS_HEAD_NEXT_HI(n) (JS_BASE + ((n) * 0x80) + 0x44)
+#define JS_AFFINITY_NEXT_LO(n) (JS_BASE + ((n) * 0x80) + 0x50)
+#define JS_AFFINITY_NEXT_HI(n) (JS_BASE + ((n) * 0x80) + 0x54)
+#define JS_CONFIG_NEXT(n) (JS_BASE + ((n) * 0x80) + 0x58)
+#define JS_COMMAND_NEXT(n) (JS_BASE + ((n) * 0x80) + 0x60)
+#define JS_FLUSH_ID_NEXT(n) (JS_BASE + ((n) * 0x80) + 0x70)
+
+/* Possible values of JS_CONFIG and JS_CONFIG_NEXT registers */
+#define JS_CONFIG_START_FLUSH_CLEAN BIT(8)
+#define JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE (3u << 8)
+#define JS_CONFIG_START_MMU BIT(10)
+#define JS_CONFIG_JOB_CHAIN_FLAG BIT(11)
+#define JS_CONFIG_END_FLUSH_CLEAN BIT(12)
+#define JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE (3u << 12)
+#define JS_CONFIG_ENABLE_FLUSH_REDUCTION BIT(14)
+#define JS_CONFIG_DISABLE_DESCRIPTOR_WR_BK BIT(15)
+#define JS_CONFIG_THREAD_PRI(n) ((n) << 16)
+
+#define JS_COMMAND_NOP 0x00
+#define JS_COMMAND_START 0x01
+#define JS_COMMAND_SOFT_STOP 0x02 /* Gently stop processing a job chain */
+#define JS_COMMAND_HARD_STOP 0x03 /* Rudely stop processing a job chain */
+#define JS_COMMAND_SOFT_STOP_0 0x04 /* Execute SOFT_STOP if JOB_CHAIN_FLAG is 0 */
+#define JS_COMMAND_HARD_STOP_0 0x05 /* Execute HARD_STOP if JOB_CHAIN_FLAG is 0 */
+#define JS_COMMAND_SOFT_STOP_1 0x06 /* Execute SOFT_STOP if JOB_CHAIN_FLAG is 1 */
+#define JS_COMMAND_HARD_STOP_1 0x07 /* Execute HARD_STOP if JOB_CHAIN_FLAG is 1 */
+
+#define JS_STATUS_EVENT_ACTIVE 0x08
+
+
+/* MMU regs */
+#define MMU_INT_RAWSTAT 0x2000
+#define MMU_INT_CLEAR 0x2004
+#define MMU_INT_MASK 0x2008
+#define MMU_INT_STAT 0x200c
+
+/* AS_COMMAND register commands */
+#define AS_COMMAND_NOP 0x00 /* NOP Operation */
+#define AS_COMMAND_UPDATE 0x01 /* Broadcasts the values in AS_TRANSTAB and ASn_MEMATTR to all MMUs */
+#define AS_COMMAND_LOCK 0x02 /* Issue a lock region command to all MMUs */
+#define AS_COMMAND_UNLOCK 0x03 /* Issue a flush region command to all MMUs */
+#define AS_COMMAND_FLUSH 0x04 /* Flush all L2 caches then issue a flush region command to all MMUs
+ (deprecated - only for use with T60x) */
+#define AS_COMMAND_FLUSH_PT 0x04 /* Flush all L2 caches then issue a flush region command to all MMUs */
+#define AS_COMMAND_FLUSH_MEM 0x05 /* Wait for memory accesses to complete, flush all the L1s cache then
+ flush all L2 caches then issue a flush region command to all MMUs */
+
+#define MMU_AS(as) (0x2400 + ((as) << 6))
+
+#define AS_TRANSTAB_LO(as) (MMU_AS(as) + 0x00) /* (RW) Translation Table Base Address for address space n, low word */
+#define AS_TRANSTAB_HI(as) (MMU_AS(as) + 0x04) /* (RW) Translation Table Base Address for address space n, high word */
+#define AS_MEMATTR_LO(as) (MMU_AS(as) + 0x08) /* (RW) Memory attributes for address space n, low word. */
+#define AS_MEMATTR_HI(as) (MMU_AS(as) + 0x0C) /* (RW) Memory attributes for address space n, high word. */
+#define AS_LOCKADDR_LO(as) (MMU_AS(as) + 0x10) /* (RW) Lock region address for address space n, low word */
+#define AS_LOCKADDR_HI(as) (MMU_AS(as) + 0x14) /* (RW) Lock region address for address space n, high word */
+#define AS_COMMAND(as) (MMU_AS(as) + 0x18) /* (WO) MMU command register for address space n */
+#define AS_FAULTSTATUS(as) (MMU_AS(as) + 0x1C) /* (RO) MMU fault status register for address space n */
+#define AS_FAULTADDRESS_LO(as) (MMU_AS(as) + 0x20) /* (RO) Fault Address for address space n, low word */
+#define AS_FAULTADDRESS_HI(as) (MMU_AS(as) + 0x24) /* (RO) Fault Address for address space n, high word */
+#define AS_STATUS(as) (MMU_AS(as) + 0x28) /* (RO) Status flags for address space n */
+/* Additional Bifrost AS regsiters */
+#define AS_TRANSCFG_LO(as) (MMU_AS(as) + 0x30) /* (RW) Translation table configuration for address space n, low word */
+#define AS_TRANSCFG_HI(as) (MMU_AS(as) + 0x34) /* (RW) Translation table configuration for address space n, high word */
+#define AS_FAULTEXTRA_LO(as) (MMU_AS(as) + 0x38) /* (RO) Secondary fault address for address space n, low word */
+#define AS_FAULTEXTRA_HI(as) (MMU_AS(as) + 0x3C) /* (RO) Secondary fault address for address space n, high word */
+
+/*
+ * Begin LPAE MMU TRANSTAB register values
+ */
+#define AS_TRANSTAB_LPAE_ADDR_SPACE_MASK 0xfffffffffffff000
+#define AS_TRANSTAB_LPAE_ADRMODE_IDENTITY 0x2
+#define AS_TRANSTAB_LPAE_ADRMODE_TABLE 0x3
+#define AS_TRANSTAB_LPAE_ADRMODE_MASK 0x3
+#define AS_TRANSTAB_LPAE_READ_INNER BIT(2)
+#define AS_TRANSTAB_LPAE_SHARE_OUTER BIT(4)
+
+#define AS_STATUS_AS_ACTIVE 0x01
+
+#define AS_FAULTSTATUS_ACCESS_TYPE_MASK (0x3 << 8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC (0x0 << 8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_EX (0x1 << 8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_READ (0x2 << 8)
+#define AS_FAULTSTATUS_ACCESS_TYPE_WRITE (0x3 << 8)
+
+#endif
diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c
index 754f6b25f265..0c5d391f0a8f 100644
--- a/drivers/gpu/drm/pl111/pl111_display.c
+++ b/drivers/gpu/drm/pl111/pl111_display.c
@@ -188,7 +188,7 @@ static void pl111_display_enable(struct drm_simple_display_pipe *pipe,
tim2 |= TIM2_IOE;
if (connector->display_info.bus_flags &
- DRM_BUS_FLAG_PIXDATA_NEGEDGE)
+ DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
tim2 |= TIM2_IPC;
}
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
index b9baefdba38a..1c318ad32a8c 100644
--- a/drivers/gpu/drm/pl111/pl111_versatile.c
+++ b/drivers/gpu/drm/pl111/pl111_versatile.c
@@ -330,6 +330,7 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
ret = vexpress_muxfpga_init();
if (ret) {
dev_err(dev, "unable to initialize muxfpga driver\n");
+ of_node_put(np);
return ret;
}
@@ -337,17 +338,20 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
pdev = of_find_device_by_node(np);
if (!pdev) {
dev_err(dev, "can't find the sysreg device, deferring\n");
+ of_node_put(np);
return -EPROBE_DEFER;
}
map = dev_get_drvdata(&pdev->dev);
if (!map) {
dev_err(dev, "sysreg has not yet probed\n");
platform_device_put(pdev);
+ of_node_put(np);
return -EPROBE_DEFER;
}
} else {
map = syscon_node_to_regmap(np);
}
+ of_node_put(np);
if (IS_ERR(map)) {
dev_err(dev, "no Versatile syscon regmap\n");
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 08c725544a2f..8b319ebbb0fb 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -535,7 +535,7 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
{
struct qxl_device *qdev = plane->dev->dev_private;
struct qxl_bo *bo = gem_to_qxl_bo(plane->state->fb->obj[0]);
- struct qxl_bo *bo_old, *primary;
+ struct qxl_bo *primary;
struct drm_clip_rect norect = {
.x1 = 0,
.y1 = 0,
@@ -544,12 +544,6 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
};
uint32_t dumb_shadow_offset = 0;
- if (old_state->fb) {
- bo_old = gem_to_qxl_bo(old_state->fb->obj[0]);
- } else {
- bo_old = NULL;
- }
-
primary = bo->shadow ? bo->shadow : bo;
if (!primary->is_primary) {
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 4a0331b3ff7d..2896bb6fdbf4 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -65,9 +65,6 @@
extern int qxl_num_crtc;
extern int qxl_max_ioctls;
-#define DRM_FILE_OFFSET 0x100000000ULL
-#define DRM_FILE_PAGE_OFFSET (DRM_FILE_OFFSET >> PAGE_SHIFT)
-
#define QXL_INTERRUPT_MASK (\
QXL_INTERRUPT_DISPLAY |\
QXL_INTERRUPT_CURSOR |\
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 92f5db5b296f..0234f8556ada 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -63,15 +63,10 @@ static vm_fault_t qxl_ttm_fault(struct vm_fault *vmf)
int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct drm_file *file_priv;
- struct qxl_device *qdev;
int r;
+ struct drm_file *file_priv = filp->private_data;
+ struct qxl_device *qdev = file_priv->minor->dev->dev_private;
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
- return -EINVAL;
-
- file_priv = filp->private_data;
- qdev = file_priv->minor->dev->dev_private;
if (qdev == NULL) {
DRM_ERROR(
"filp->private_data->minor->dev->dev_private == NULL\n");
@@ -328,7 +323,7 @@ int qxl_ttm_init(struct qxl_device *qdev)
r = ttm_bo_device_init(&qdev->mman.bdev,
&qxl_bo_driver,
qdev->ddev.anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET, 0);
+ false);
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 53f29a115104..0a9312ea250a 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1388,7 +1388,7 @@ int radeon_device_init(struct radeon_device *rdev,
pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
pr_warn("radeon: No coherent DMA available\n");
}
- rdev->need_swiotlb = drm_get_max_iomem() > ((u64)1 << dma_bits);
+ rdev->need_swiotlb = drm_need_swiotlb(dma_bits);
/* Registers mapping */
/* TODO: block userspace mapping of io register */
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index afef2d9fccd8..173deb463414 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -35,7 +35,6 @@
#include <linux/platform_device.h>
#include <drm/drm_legacy.h>
-#include <drm/ati_pcigart.h>
#include "radeon_family.h"
/* General customization:
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 1179034024ae..1298b84cb1c7 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -42,7 +42,7 @@
* the helper contains a pointer to radeon framebuffer baseclass.
*/
struct radeon_fbdev {
- struct drm_fb_helper helper;
+ struct drm_fb_helper helper; /* must be first */
struct drm_framebuffer fb;
struct radeon_device *rdev;
};
@@ -244,7 +244,8 @@ static int radeonfb_create(struct drm_fb_helper *helper,
goto out;
}
- info->par = rfbdev;
+ /* radeon resume is fragile and needs a vt switch to help it along */
+ info->skip_vt_switch = false;
ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->fb, &mode_cmd, gobj);
if (ret) {
@@ -259,10 +260,6 @@ static int radeonfb_create(struct drm_fb_helper *helper,
memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo));
- strcpy(info->fix.id, "radeondrmfb");
-
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
-
info->fbops = &radeonfb_ops;
tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start;
@@ -271,7 +268,7 @@ static int radeonfb_create(struct drm_fb_helper *helper,
info->screen_base = rbo->kptr;
info->screen_size = radeon_bo_size(rbo);
- drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(info, &rfbdev->helper, sizes);
/* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 9920a6fc11bf..5d42f8d8e68d 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -45,8 +45,6 @@
#include "radeon_reg.h"
#include "radeon.h"
-#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
-
static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
static void radeon_ttm_debugfs_fini(struct radeon_device *rdev);
@@ -253,14 +251,12 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
{
struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
- struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
struct ttm_place placements;
struct ttm_placement placement;
int r;
- rdev = radeon_get_rdev(bo->bdev);
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
placement.num_placement = 1;
@@ -300,14 +296,12 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
{
struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
- struct radeon_device *rdev;
struct ttm_mem_reg *old_mem = &bo->mem;
struct ttm_mem_reg tmp_mem;
struct ttm_placement placement;
struct ttm_place placements;
int r;
- rdev = radeon_get_rdev(bo->bdev);
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
placement.num_placement = 1;
@@ -792,7 +786,6 @@ int radeon_ttm_init(struct radeon_device *rdev)
r = ttm_bo_device_init(&rdev->mman.bdev,
&radeon_bo_driver,
rdev->ddev->anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET,
rdev->need_dma32);
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
@@ -901,16 +894,10 @@ static vm_fault_t radeon_ttm_fault(struct vm_fault *vmf)
int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct drm_file *file_priv;
- struct radeon_device *rdev;
int r;
+ struct drm_file *file_priv = filp->private_data;
+ struct radeon_device *rdev = file_priv->minor->dev->dev_private;
- if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
- return -EINVAL;
- }
-
- file_priv = filp->private_data;
- rdev = file_priv->minor->dev->dev_private;
if (rdev == NULL) {
return -EINVAL;
}
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index 7c36e2777a15..1529849e217e 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -36,3 +36,7 @@ config DRM_RCAR_VSP
depends on VIDEO_RENESAS_VSP1=y || (VIDEO_RENESAS_VSP1 && DRM_RCAR_DU=m)
help
Enable support to expose the R-Car VSP Compositor as KMS planes.
+
+config DRM_RCAR_WRITEBACK
+ bool
+ default y if ARM64
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
index 2a3b8d7972b5..6c2ed9c46467 100644
--- a/drivers/gpu/drm/rcar-du/Makefile
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -4,7 +4,7 @@ rcar-du-drm-y := rcar_du_crtc.o \
rcar_du_encoder.o \
rcar_du_group.o \
rcar_du_kms.o \
- rcar_du_plane.o
+ rcar_du_plane.o \
rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_of.o \
rcar_du_of_lvds_r8a7790.dtb.o \
@@ -13,6 +13,7 @@ rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_of.o \
rcar_du_of_lvds_r8a7795.dtb.o \
rcar_du_of_lvds_r8a7796.dtb.o
rcar-du-drm-$(CONFIG_DRM_RCAR_VSP) += rcar_du_vsp.o
+rcar-du-drm-$(CONFIG_DRM_RCAR_WRITEBACK) += rcar_du_writeback.o
obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o
obj-$(CONFIG_DRM_RCAR_DW_HDMI) += rcar_dw_hdmi.o
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 4cdea14d552f..2da46e3dc4ae 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -32,21 +32,21 @@
static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg)
{
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
return rcar_du_read(rcdu, rcrtc->mmio_offset + reg);
}
static void rcar_du_crtc_write(struct rcar_du_crtc *rcrtc, u32 reg, u32 data)
{
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
rcar_du_write(rcdu, rcrtc->mmio_offset + reg, data);
}
static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr)
{
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
rcar_du_read(rcdu, rcrtc->mmio_offset + reg) & ~clr);
@@ -54,7 +54,7 @@ static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr)
static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set)
{
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
rcar_du_read(rcdu, rcrtc->mmio_offset + reg) | set);
@@ -62,7 +62,7 @@ static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set)
void rcar_du_crtc_dsysr_clr_set(struct rcar_du_crtc *rcrtc, u32 clr, u32 set)
{
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
rcrtc->dsysr = (rcrtc->dsysr & ~clr) | set;
rcar_du_write(rcdu, rcrtc->mmio_offset + DSYSR, rcrtc->dsysr);
@@ -157,10 +157,9 @@ static void rcar_du_dpll_divider(struct rcar_du_crtc *rcrtc,
}
done:
- dev_dbg(rcrtc->group->dev->dev,
+ dev_dbg(rcrtc->dev->dev,
"output:%u, fdpll:%u, n:%u, m:%u, diff:%lu\n",
- dpll->output, dpll->fdpll, dpll->n, dpll->m,
- best_diff);
+ dpll->output, dpll->fdpll, dpll->n, dpll->m, best_diff);
}
struct du_clk_params {
@@ -212,7 +211,7 @@ static const struct soc_device_attribute rcar_du_r8a7795_es1[] = {
static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
{
const struct drm_display_mode *mode = &rcrtc->crtc.state->adjusted_mode;
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
unsigned long mode_clock = mode->clock * 1000;
u32 dsmr;
u32 escr;
@@ -277,7 +276,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
rcar_du_escr_divider(rcrtc->extclock, mode_clock,
ESCR_DCLKSEL_DCLKIN, &params);
- dev_dbg(rcrtc->group->dev->dev, "mode clock %lu %s rate %lu\n",
+ dev_dbg(rcrtc->dev->dev, "mode clock %lu %s rate %lu\n",
mode_clock, params.clk == rcrtc->clock ? "cpg" : "ext",
params.rate);
@@ -285,7 +284,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
escr = params.escr;
}
- dev_dbg(rcrtc->group->dev->dev, "%s: ESCR 0x%08x\n", __func__, escr);
+ dev_dbg(rcrtc->dev->dev, "%s: ESCR 0x%08x\n", __func__, escr);
rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? ESCR13 : ESCR02, escr);
rcar_du_crtc_write(rcrtc, rcrtc->index % 2 ? OTAR13 : OTAR02, 0);
@@ -333,7 +332,7 @@ plane_format(struct rcar_du_plane *plane)
static void rcar_du_crtc_update_planes(struct rcar_du_crtc *rcrtc)
{
struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES];
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
unsigned int num_planes = 0;
unsigned int dptsr_planes;
unsigned int hwplanes = 0;
@@ -463,7 +462,7 @@ static bool rcar_du_crtc_page_flip_pending(struct rcar_du_crtc *rcrtc)
static void rcar_du_crtc_wait_page_flip(struct rcar_du_crtc *rcrtc)
{
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
if (wait_event_timeout(rcrtc->flip_wait,
!rcar_du_crtc_page_flip_pending(rcrtc),
@@ -493,7 +492,7 @@ static void rcar_du_crtc_setup(struct rcar_du_crtc *rcrtc)
rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR, 0);
/* Enable the VSP compositor. */
- if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
+ if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
rcar_du_vsp_enable(rcrtc);
/* Turn vertical blanking interrupt reporting on. */
@@ -564,7 +563,7 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
static void rcar_du_crtc_disable_planes(struct rcar_du_crtc *rcrtc)
{
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
struct drm_crtc *crtc = &rcrtc->crtc;
u32 status;
@@ -617,7 +616,7 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
drm_crtc_vblank_off(crtc);
/* Disable the VSP compositor. */
- if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
+ if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
rcar_du_vsp_disable(rcrtc);
/*
@@ -627,7 +626,7 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
* TODO: Find another way to stop the display for DUs that don't support
* TVM sync.
*/
- if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_TVM_SYNC))
+ if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_TVM_SYNC))
rcar_du_crtc_dsysr_clr_set(rcrtc, DSYSR_TVM_MASK,
DSYSR_TVM_SWITCH);
@@ -648,8 +647,13 @@ static int rcar_du_crtc_atomic_check(struct drm_crtc *crtc,
rstate->outputs = 0;
drm_for_each_encoder_mask(encoder, crtc->dev, state->encoder_mask) {
- struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
+ struct rcar_du_encoder *renc;
+ /* Skip the writeback encoder. */
+ if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL)
+ continue;
+
+ renc = to_rcar_encoder(encoder);
rstate->outputs |= BIT(renc->output);
}
@@ -661,7 +665,7 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc,
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(crtc->state);
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
rcar_du_crtc_get(rcrtc);
@@ -689,7 +693,7 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(old_state);
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
rcar_du_crtc_stop(rcrtc);
rcar_du_crtc_put(rcrtc);
@@ -735,7 +739,7 @@ static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
*/
rcar_du_crtc_get(rcrtc);
- if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
+ if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
rcar_du_vsp_atomic_begin(rcrtc);
}
@@ -757,15 +761,16 @@ static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc,
spin_unlock_irqrestore(&dev->event_lock, flags);
}
- if (rcar_du_has(rcrtc->group->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
+ if (rcar_du_has(rcrtc->dev, RCAR_DU_FEATURE_VSP1_SOURCE))
rcar_du_vsp_atomic_flush(rcrtc);
}
-enum drm_mode_status rcar_du_crtc_mode_valid(struct drm_crtc *crtc,
- const struct drm_display_mode *mode)
+static enum drm_mode_status
+rcar_du_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
unsigned int vbp;
@@ -797,7 +802,7 @@ static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
static void rcar_du_crtc_crc_init(struct rcar_du_crtc *rcrtc)
{
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
const char **sources;
unsigned int count;
int i = -1;
@@ -981,8 +986,8 @@ static int rcar_du_crtc_verify_crc_source(struct drm_crtc *crtc,
return 0;
}
-const char *const *rcar_du_crtc_get_crc_sources(struct drm_crtc *crtc,
- size_t *count)
+static const char *const *
+rcar_du_crtc_get_crc_sources(struct drm_crtc *crtc, size_t *count)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
@@ -1079,7 +1084,7 @@ static const struct drm_crtc_funcs crtc_funcs_gen3 = {
static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
{
struct rcar_du_crtc *rcrtc = arg;
- struct rcar_du_device *rcdu = rcrtc->group->dev;
+ struct rcar_du_device *rcdu = rcrtc->dev;
irqreturn_t ret = IRQ_NONE;
u32 status;
@@ -1171,6 +1176,7 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
init_waitqueue_head(&rcrtc->vblank_wait);
spin_lock_init(&rcrtc->vblank_lock);
+ rcrtc->dev = rcdu;
rcrtc->group = rgrp;
rcrtc->mmio_offset = mmio_offsets[hwindex];
rcrtc->index = hwindex;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index bcb35b0b7612..3b7fc668996f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -15,6 +15,7 @@
#include <linux/wait.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_writeback.h>
#include <media/vsp1.h>
@@ -24,10 +25,11 @@ struct rcar_du_vsp;
/**
* struct rcar_du_crtc - the CRTC, representing a DU superposition processor
* @crtc: base DRM CRTC
+ * @dev: the DU device
* @clock: the CRTC functional clock
* @extclock: external pixel dot clock (optional)
* @mmio_offset: offset of the CRTC registers in the DU MMIO block
- * @index: CRTC software and hardware index
+ * @index: CRTC hardware index
* @initialized: whether the CRTC has been initialized and clocks enabled
* @dsysr: cached value of the DSYSR register
* @vblank_enable: whether vblank events are enabled on this CRTC
@@ -39,10 +41,12 @@ struct rcar_du_vsp;
* @group: CRTC group this CRTC belongs to
* @vsp: VSP feeding video to this CRTC
* @vsp_pipe: index of the VSP pipeline feeding video to this CRTC
+ * @writeback: the writeback connector
*/
struct rcar_du_crtc {
struct drm_crtc crtc;
+ struct rcar_du_device *dev;
struct clk *clock;
struct clk *extclock;
unsigned int mmio_offset;
@@ -65,9 +69,12 @@ struct rcar_du_crtc {
const char *const *sources;
unsigned int sources_count;
+
+ struct drm_writeback_connector writeback;
};
-#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc)
+#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc)
+#define wb_to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, writeback)
/**
* struct rcar_du_crtc_state - Driver-specific CRTC state
@@ -97,8 +104,6 @@ enum rcar_du_output {
int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
unsigned int hwindex);
-void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc);
-void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc);
void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 8ee4e762f4e5..6c91753af7bc 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -28,13 +28,33 @@ static const struct drm_encoder_funcs encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
+static unsigned int rcar_du_encoder_count_ports(struct device_node *node)
+{
+ struct device_node *ports;
+ struct device_node *port;
+ unsigned int num_ports = 0;
+
+ ports = of_get_child_by_name(node, "ports");
+ if (!ports)
+ ports = of_node_get(node);
+
+ for_each_child_of_node(ports, port) {
+ if (of_node_name_eq(port, "port"))
+ num_ports++;
+ }
+
+ of_node_put(ports);
+
+ return num_ports;
+}
+
int rcar_du_encoder_init(struct rcar_du_device *rcdu,
enum rcar_du_output output,
struct device_node *enc_node)
{
struct rcar_du_encoder *renc;
struct drm_encoder *encoder;
- struct drm_bridge *bridge = NULL;
+ struct drm_bridge *bridge;
int ret;
renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL);
@@ -48,11 +68,33 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
dev_dbg(rcdu->dev, "initializing encoder %pOF for output %u\n",
enc_node, output);
- /* Locate the DRM bridge from the encoder DT node. */
- bridge = of_drm_find_bridge(enc_node);
- if (!bridge) {
- ret = -EPROBE_DEFER;
- goto done;
+ /*
+ * Locate the DRM bridge from the DT node. For the DPAD outputs, if the
+ * DT node has a single port, assume that it describes a panel and
+ * create a panel bridge.
+ */
+ if ((output == RCAR_DU_OUTPUT_DPAD0 ||
+ output == RCAR_DU_OUTPUT_DPAD1) &&
+ rcar_du_encoder_count_ports(enc_node) == 1) {
+ struct drm_panel *panel = of_drm_find_panel(enc_node);
+
+ if (IS_ERR(panel)) {
+ ret = PTR_ERR(panel);
+ goto done;
+ }
+
+ bridge = devm_drm_panel_bridge_add(rcdu->dev, panel,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(bridge)) {
+ ret = PTR_ERR(bridge);
+ goto done;
+ }
+ } else {
+ bridge = of_drm_find_bridge(enc_node);
+ if (!bridge) {
+ ret = -EPROBE_DEFER;
+ goto done;
+ }
}
ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 3b7d50a8fb9b..f8f7fff34dff 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -26,6 +26,7 @@
#include "rcar_du_kms.h"
#include "rcar_du_regs.h"
#include "rcar_du_vsp.h"
+#include "rcar_du_writeback.h"
/* -----------------------------------------------------------------------------
* Format helpers
@@ -34,60 +35,70 @@
static const struct rcar_du_format_info rcar_du_format_infos[] = {
{
.fourcc = DRM_FORMAT_RGB565,
+ .v4l2 = V4L2_PIX_FMT_RGB565,
.bpp = 16,
.planes = 1,
.pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_ARGB1555,
+ .v4l2 = V4L2_PIX_FMT_ARGB555,
.bpp = 16,
.planes = 1,
.pnmr = PnMR_SPIM_ALP | PnMR_DDDF_ARGB,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_XRGB1555,
+ .v4l2 = V4L2_PIX_FMT_XRGB555,
.bpp = 16,
.planes = 1,
.pnmr = PnMR_SPIM_ALP | PnMR_DDDF_ARGB,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_XRGB8888,
+ .v4l2 = V4L2_PIX_FMT_XBGR32,
.bpp = 32,
.planes = 1,
.pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP,
.edf = PnDDCR4_EDF_RGB888,
}, {
.fourcc = DRM_FORMAT_ARGB8888,
+ .v4l2 = V4L2_PIX_FMT_ABGR32,
.bpp = 32,
.planes = 1,
.pnmr = PnMR_SPIM_ALP | PnMR_DDDF_16BPP,
.edf = PnDDCR4_EDF_ARGB8888,
}, {
.fourcc = DRM_FORMAT_UYVY,
+ .v4l2 = V4L2_PIX_FMT_UYVY,
.bpp = 16,
.planes = 1,
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_YUYV,
+ .v4l2 = V4L2_PIX_FMT_YUYV,
.bpp = 16,
.planes = 1,
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_NV12,
+ .v4l2 = V4L2_PIX_FMT_NV12M,
.bpp = 12,
.planes = 2,
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_NV21,
+ .v4l2 = V4L2_PIX_FMT_NV21M,
.bpp = 12,
.planes = 2,
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
.edf = PnDDCR4_EDF_NONE,
}, {
.fourcc = DRM_FORMAT_NV16,
+ .v4l2 = V4L2_PIX_FMT_NV16M,
.bpp = 16,
.planes = 2,
.pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
@@ -99,62 +110,77 @@ static const struct rcar_du_format_info rcar_du_format_infos[] = {
*/
{
.fourcc = DRM_FORMAT_RGB332,
+ .v4l2 = V4L2_PIX_FMT_RGB332,
.bpp = 8,
.planes = 1,
}, {
.fourcc = DRM_FORMAT_ARGB4444,
+ .v4l2 = V4L2_PIX_FMT_ARGB444,
.bpp = 16,
.planes = 1,
}, {
.fourcc = DRM_FORMAT_XRGB4444,
+ .v4l2 = V4L2_PIX_FMT_XRGB444,
.bpp = 16,
.planes = 1,
}, {
.fourcc = DRM_FORMAT_BGR888,
+ .v4l2 = V4L2_PIX_FMT_RGB24,
.bpp = 24,
.planes = 1,
}, {
.fourcc = DRM_FORMAT_RGB888,
+ .v4l2 = V4L2_PIX_FMT_BGR24,
.bpp = 24,
.planes = 1,
}, {
.fourcc = DRM_FORMAT_BGRA8888,
+ .v4l2 = V4L2_PIX_FMT_ARGB32,
.bpp = 32,
.planes = 1,
}, {
.fourcc = DRM_FORMAT_BGRX8888,
+ .v4l2 = V4L2_PIX_FMT_XRGB32,
.bpp = 32,
.planes = 1,
}, {
.fourcc = DRM_FORMAT_YVYU,
+ .v4l2 = V4L2_PIX_FMT_YVYU,
.bpp = 16,
.planes = 1,
}, {
.fourcc = DRM_FORMAT_NV61,
+ .v4l2 = V4L2_PIX_FMT_NV61M,
.bpp = 16,
.planes = 2,
}, {
.fourcc = DRM_FORMAT_YUV420,
+ .v4l2 = V4L2_PIX_FMT_YUV420M,
.bpp = 12,
.planes = 3,
}, {
.fourcc = DRM_FORMAT_YVU420,
+ .v4l2 = V4L2_PIX_FMT_YVU420M,
.bpp = 12,
.planes = 3,
}, {
.fourcc = DRM_FORMAT_YUV422,
+ .v4l2 = V4L2_PIX_FMT_YUV422M,
.bpp = 16,
.planes = 3,
}, {
.fourcc = DRM_FORMAT_YVU422,
+ .v4l2 = V4L2_PIX_FMT_YVU422M,
.bpp = 16,
.planes = 3,
}, {
.fourcc = DRM_FORMAT_YUV444,
+ .v4l2 = V4L2_PIX_FMT_YUV444M,
.bpp = 24,
.planes = 3,
}, {
.fourcc = DRM_FORMAT_YVU444,
+ .v4l2 = V4L2_PIX_FMT_YVU444M,
.bpp = 24,
.planes = 3,
},
@@ -639,6 +665,17 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
encoder->possible_clones = (1 << num_encoders) - 1;
}
+ /* Create the writeback connectors. */
+ if (rcdu->info->gen >= 3) {
+ for (i = 0; i < rcdu->num_crtcs; ++i) {
+ struct rcar_du_crtc *rcrtc = &rcdu->crtcs[i];
+
+ ret = rcar_du_writeback_init(rcdu, rcrtc);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
/*
* Initialize the default DPAD0 source to the index of the first DU
* channel that can be connected to DPAD0. The exact value doesn't
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.h b/drivers/gpu/drm/rcar-du/rcar_du_kms.h
index e171527abdaa..0346504d8c59 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.h
@@ -19,6 +19,7 @@ struct rcar_du_device;
struct rcar_du_format_info {
u32 fourcc;
+ u32 v4l2;
unsigned int bpp;
unsigned int planes;
unsigned int pnmr;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index 0878accbd134..5e4faf258c31 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -10,6 +10,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
@@ -26,16 +27,19 @@
#include "rcar_du_drv.h"
#include "rcar_du_kms.h"
#include "rcar_du_vsp.h"
+#include "rcar_du_writeback.h"
-static void rcar_du_vsp_complete(void *private, bool completed, u32 crc)
+static void rcar_du_vsp_complete(void *private, unsigned int status, u32 crc)
{
struct rcar_du_crtc *crtc = private;
if (crtc->vblank_enable)
drm_crtc_handle_vblank(&crtc->crtc);
- if (completed)
+ if (status & VSP1_DU_STATUS_COMPLETE)
rcar_du_crtc_finish_page_flip(crtc);
+ if (status & VSP1_DU_STATUS_WRITEBACK)
+ rcar_du_writeback_complete(crtc);
drm_crtc_add_crc_entry(&crtc->crtc, false, 0, &crc);
}
@@ -43,7 +47,7 @@ static void rcar_du_vsp_complete(void *private, bool completed, u32 crc)
void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
{
const struct drm_display_mode *mode = &crtc->crtc.state->adjusted_mode;
- struct rcar_du_device *rcdu = crtc->group->dev;
+ struct rcar_du_device *rcdu = crtc->dev;
struct vsp1_du_lif_config cfg = {
.width = mode->hdisplay,
.height = mode->vdisplay,
@@ -107,11 +111,12 @@ void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc)
state = to_rcar_crtc_state(crtc->crtc.state);
cfg.crc = state->crc;
+ rcar_du_writeback_setup(crtc, &cfg.writeback);
+
vsp1_du_atomic_flush(crtc->vsp->vsp, crtc->vsp_pipe, &cfg);
}
-/* Keep the two tables in sync. */
-static const u32 formats_kms[] = {
+static const u32 rcar_du_vsp_formats[] = {
DRM_FORMAT_RGB332,
DRM_FORMAT_ARGB4444,
DRM_FORMAT_XRGB4444,
@@ -139,40 +144,13 @@ static const u32 formats_kms[] = {
DRM_FORMAT_YVU444,
};
-static const u32 formats_v4l2[] = {
- V4L2_PIX_FMT_RGB332,
- V4L2_PIX_FMT_ARGB444,
- V4L2_PIX_FMT_XRGB444,
- V4L2_PIX_FMT_ARGB555,
- V4L2_PIX_FMT_XRGB555,
- V4L2_PIX_FMT_RGB565,
- V4L2_PIX_FMT_RGB24,
- V4L2_PIX_FMT_BGR24,
- V4L2_PIX_FMT_ARGB32,
- V4L2_PIX_FMT_XRGB32,
- V4L2_PIX_FMT_ABGR32,
- V4L2_PIX_FMT_XBGR32,
- V4L2_PIX_FMT_UYVY,
- V4L2_PIX_FMT_YUYV,
- V4L2_PIX_FMT_YVYU,
- V4L2_PIX_FMT_NV12M,
- V4L2_PIX_FMT_NV21M,
- V4L2_PIX_FMT_NV16M,
- V4L2_PIX_FMT_NV61M,
- V4L2_PIX_FMT_YUV420M,
- V4L2_PIX_FMT_YVU420M,
- V4L2_PIX_FMT_YUV422M,
- V4L2_PIX_FMT_YVU422M,
- V4L2_PIX_FMT_YUV444M,
- V4L2_PIX_FMT_YVU444M,
-};
-
static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
{
struct rcar_du_vsp_plane_state *state =
to_rcar_vsp_plane_state(plane->plane.state);
struct rcar_du_crtc *crtc = to_rcar_crtc(state->state.crtc);
struct drm_framebuffer *fb = plane->plane.state->fb;
+ const struct rcar_du_format_info *format;
struct vsp1_du_atomic_config cfg = {
.pixelformat = 0,
.pitch = fb->pitches[0],
@@ -195,37 +173,23 @@ static void rcar_du_vsp_plane_setup(struct rcar_du_vsp_plane *plane)
cfg.mem[i] = sg_dma_address(state->sg_tables[i].sgl)
+ fb->offsets[i];
- for (i = 0; i < ARRAY_SIZE(formats_kms); ++i) {
- if (formats_kms[i] == state->format->fourcc) {
- cfg.pixelformat = formats_v4l2[i];
- break;
- }
- }
+ format = rcar_du_format_info(state->format->fourcc);
+ cfg.pixelformat = format->v4l2;
vsp1_du_atomic_update(plane->vsp->vsp, crtc->vsp_pipe,
plane->index, &cfg);
}
-static int rcar_du_vsp_plane_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *state)
+int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb,
+ struct sg_table sg_tables[3])
{
- struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state);
- struct rcar_du_vsp *vsp = to_rcar_vsp_plane(plane)->vsp;
struct rcar_du_device *rcdu = vsp->dev;
unsigned int i;
int ret;
- /*
- * There's no need to prepare (and unprepare) the framebuffer when the
- * plane is not visible, as it will not be displayed.
- */
- if (!state->visible)
- return 0;
-
- for (i = 0; i < rstate->format->planes; ++i) {
- struct drm_gem_cma_object *gem =
- drm_fb_cma_get_gem_obj(state->fb, i);
- struct sg_table *sgt = &rstate->sg_tables[i];
+ for (i = 0; i < fb->format->num_planes; ++i) {
+ struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(fb, i);
+ struct sg_table *sgt = &sg_tables[i];
ret = dma_get_sgtable(rcdu->dev, sgt, gem->vaddr, gem->paddr,
gem->base.size);
@@ -240,15 +204,11 @@ static int rcar_du_vsp_plane_prepare_fb(struct drm_plane *plane,
}
}
- ret = drm_gem_fb_prepare_fb(plane, state);
- if (ret)
- goto fail;
-
return 0;
fail:
while (i--) {
- struct sg_table *sgt = &rstate->sg_tables[i];
+ struct sg_table *sgt = &sg_tables[i];
vsp1_du_unmap_sg(vsp->vsp, sgt);
sg_free_table(sgt);
@@ -257,24 +217,52 @@ fail:
return ret;
}
-static void rcar_du_vsp_plane_cleanup_fb(struct drm_plane *plane,
- struct drm_plane_state *state)
+static int rcar_du_vsp_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
{
struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state);
struct rcar_du_vsp *vsp = to_rcar_vsp_plane(plane)->vsp;
- unsigned int i;
+ int ret;
+ /*
+ * There's no need to prepare (and unprepare) the framebuffer when the
+ * plane is not visible, as it will not be displayed.
+ */
if (!state->visible)
- return;
+ return 0;
+
+ ret = rcar_du_vsp_map_fb(vsp, state->fb, rstate->sg_tables);
+ if (ret < 0)
+ return ret;
+
+ return drm_gem_fb_prepare_fb(plane, state);
+}
+
+void rcar_du_vsp_unmap_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb,
+ struct sg_table sg_tables[3])
+{
+ unsigned int i;
- for (i = 0; i < rstate->format->planes; ++i) {
- struct sg_table *sgt = &rstate->sg_tables[i];
+ for (i = 0; i < fb->format->num_planes; ++i) {
+ struct sg_table *sgt = &sg_tables[i];
vsp1_du_unmap_sg(vsp->vsp, sgt);
sg_free_table(sgt);
}
}
+static void rcar_du_vsp_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct rcar_du_vsp_plane_state *rstate = to_rcar_vsp_plane_state(state);
+ struct rcar_du_vsp *vsp = to_rcar_vsp_plane(plane)->vsp;
+
+ if (!state->visible)
+ return;
+
+ rcar_du_vsp_unmap_fb(vsp, state->fb, rstate->sg_tables);
+}
+
static int rcar_du_vsp_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
@@ -395,8 +383,8 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np,
ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, crtcs,
&rcar_du_vsp_plane_funcs,
- formats_kms,
- ARRAY_SIZE(formats_kms),
+ rcar_du_vsp_formats,
+ ARRAY_SIZE(rcar_du_vsp_formats),
NULL, type, NULL);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
index db232037f24a..9b4724159378 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.h
@@ -12,8 +12,10 @@
#include <drm/drm_plane.h>
+struct drm_framebuffer;
struct rcar_du_format_info;
struct rcar_du_vsp;
+struct sg_table;
struct rcar_du_vsp_plane {
struct drm_plane plane;
@@ -60,6 +62,10 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc);
void rcar_du_vsp_disable(struct rcar_du_crtc *crtc);
void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc);
void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc);
+int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb,
+ struct sg_table sg_tables[3]);
+void rcar_du_vsp_unmap_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb,
+ struct sg_table sg_tables[3]);
#else
static inline int rcar_du_vsp_init(struct rcar_du_vsp *vsp,
struct device_node *np,
@@ -71,6 +77,17 @@ static inline void rcar_du_vsp_enable(struct rcar_du_crtc *crtc) { };
static inline void rcar_du_vsp_disable(struct rcar_du_crtc *crtc) { };
static inline void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc) { };
static inline void rcar_du_vsp_atomic_flush(struct rcar_du_crtc *crtc) { };
+static inline int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp,
+ struct drm_framebuffer *fb,
+ struct sg_table sg_tables[3])
+{
+ return -ENXIO;
+}
+static inline void rcar_du_vsp_unmap_fb(struct rcar_du_vsp *vsp,
+ struct drm_framebuffer *fb,
+ struct sg_table sg_tables[3])
+{
+}
#endif
#endif /* __RCAR_DU_VSP_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
new file mode 100644
index 000000000000..989a0be94131
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * rcar_du_writeback.c -- R-Car Display Unit Writeback Support
+ *
+ * Copyright (C) 2019 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_device.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_writeback.h>
+
+#include "rcar_du_crtc.h"
+#include "rcar_du_drv.h"
+#include "rcar_du_kms.h"
+
+/**
+ * struct rcar_du_wb_conn_state - Driver-specific writeback connector state
+ * @state: base DRM connector state
+ * @format: format of the writeback framebuffer
+ */
+struct rcar_du_wb_conn_state {
+ struct drm_connector_state state;
+ const struct rcar_du_format_info *format;
+};
+
+#define to_rcar_wb_conn_state(s) \
+ container_of(s, struct rcar_du_wb_conn_state, state)
+
+/**
+ * struct rcar_du_wb_job - Driver-private data for writeback jobs
+ * @sg_tables: scatter-gather tables for the framebuffer memory
+ */
+struct rcar_du_wb_job {
+ struct sg_table sg_tables[3];
+};
+
+static int rcar_du_wb_conn_get_modes(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+
+ return drm_add_modes_noedid(connector, dev->mode_config.max_width,
+ dev->mode_config.max_height);
+}
+
+static int rcar_du_wb_prepare_job(struct drm_writeback_connector *connector,
+ struct drm_writeback_job *job)
+{
+ struct rcar_du_crtc *rcrtc = wb_to_rcar_crtc(connector);
+ struct rcar_du_wb_job *rjob;
+ int ret;
+
+ if (!job->fb)
+ return 0;
+
+ rjob = kzalloc(sizeof(*rjob), GFP_KERNEL);
+ if (!rjob)
+ return -ENOMEM;
+
+ /* Map the framebuffer to the VSP. */
+ ret = rcar_du_vsp_map_fb(rcrtc->vsp, job->fb, rjob->sg_tables);
+ if (ret < 0) {
+ kfree(rjob);
+ return ret;
+ }
+
+ job->priv = rjob;
+ return 0;
+}
+
+static void rcar_du_wb_cleanup_job(struct drm_writeback_connector *connector,
+ struct drm_writeback_job *job)
+{
+ struct rcar_du_crtc *rcrtc = wb_to_rcar_crtc(connector);
+ struct rcar_du_wb_job *rjob = job->priv;
+
+ if (!job->fb)
+ return;
+
+ rcar_du_vsp_unmap_fb(rcrtc->vsp, job->fb, rjob->sg_tables);
+ kfree(rjob);
+}
+
+static const struct drm_connector_helper_funcs rcar_du_wb_conn_helper_funcs = {
+ .get_modes = rcar_du_wb_conn_get_modes,
+ .prepare_writeback_job = rcar_du_wb_prepare_job,
+ .cleanup_writeback_job = rcar_du_wb_cleanup_job,
+};
+
+static struct drm_connector_state *
+rcar_du_wb_conn_duplicate_state(struct drm_connector *connector)
+{
+ struct rcar_du_wb_conn_state *copy;
+
+ if (WARN_ON(!connector->state))
+ return NULL;
+
+ copy = kzalloc(sizeof(*copy), GFP_KERNEL);
+ if (!copy)
+ return NULL;
+
+ __drm_atomic_helper_connector_duplicate_state(connector, &copy->state);
+
+ return &copy->state;
+}
+
+static void rcar_du_wb_conn_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ __drm_atomic_helper_connector_destroy_state(state);
+ kfree(to_rcar_wb_conn_state(state));
+}
+
+static void rcar_du_wb_conn_reset(struct drm_connector *connector)
+{
+ struct rcar_du_wb_conn_state *state;
+
+ if (connector->state) {
+ rcar_du_wb_conn_destroy_state(connector, connector->state);
+ connector->state = NULL;
+ }
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state == NULL)
+ return;
+
+ __drm_atomic_helper_connector_reset(connector, &state->state);
+}
+
+static const struct drm_connector_funcs rcar_du_wb_conn_funcs = {
+ .reset = rcar_du_wb_conn_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = rcar_du_wb_conn_duplicate_state,
+ .atomic_destroy_state = rcar_du_wb_conn_destroy_state,
+};
+
+static int rcar_du_wb_enc_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct rcar_du_wb_conn_state *wb_state =
+ to_rcar_wb_conn_state(conn_state);
+ const struct drm_display_mode *mode = &crtc_state->mode;
+ struct drm_device *dev = encoder->dev;
+ struct drm_framebuffer *fb;
+
+ if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+ return 0;
+
+ fb = conn_state->writeback_job->fb;
+
+ /*
+ * Verify that the framebuffer format is supported and that its size
+ * matches the current mode.
+ */
+ if (fb->width != mode->hdisplay || fb->height != mode->vdisplay) {
+ dev_dbg(dev->dev, "%s: invalid framebuffer size %ux%u\n",
+ __func__, fb->width, fb->height);
+ return -EINVAL;
+ }
+
+ wb_state->format = rcar_du_format_info(fb->format->format);
+ if (wb_state->format == NULL) {
+ dev_dbg(dev->dev, "%s: unsupported format %08x\n", __func__,
+ fb->format->format);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs rcar_du_wb_enc_helper_funcs = {
+ .atomic_check = rcar_du_wb_enc_atomic_check,
+};
+
+/*
+ * Only RGB formats are currently supported as the VSP outputs RGB to the DU
+ * and can't convert to YUV separately for writeback.
+ */
+static const u32 writeback_formats[] = {
+ DRM_FORMAT_RGB332,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888,
+};
+
+int rcar_du_writeback_init(struct rcar_du_device *rcdu,
+ struct rcar_du_crtc *rcrtc)
+{
+ struct drm_writeback_connector *wb_conn = &rcrtc->writeback;
+
+ wb_conn->encoder.possible_crtcs = 1 << drm_crtc_index(&rcrtc->crtc);
+ drm_connector_helper_add(&wb_conn->base,
+ &rcar_du_wb_conn_helper_funcs);
+
+ return drm_writeback_connector_init(rcdu->ddev, wb_conn,
+ &rcar_du_wb_conn_funcs,
+ &rcar_du_wb_enc_helper_funcs,
+ writeback_formats,
+ ARRAY_SIZE(writeback_formats));
+}
+
+void rcar_du_writeback_setup(struct rcar_du_crtc *rcrtc,
+ struct vsp1_du_writeback_config *cfg)
+{
+ struct rcar_du_wb_conn_state *wb_state;
+ struct drm_connector_state *state;
+ struct rcar_du_wb_job *rjob;
+ struct drm_framebuffer *fb;
+ unsigned int i;
+
+ state = rcrtc->writeback.base.state;
+ if (!state || !state->writeback_job || !state->writeback_job->fb)
+ return;
+
+ fb = state->writeback_job->fb;
+ rjob = state->writeback_job->priv;
+ wb_state = to_rcar_wb_conn_state(state);
+
+ cfg->pixelformat = wb_state->format->v4l2;
+ cfg->pitch = fb->pitches[0];
+
+ for (i = 0; i < wb_state->format->planes; ++i)
+ cfg->mem[i] = sg_dma_address(rjob->sg_tables[i].sgl)
+ + fb->offsets[i];
+
+ drm_writeback_queue_job(&rcrtc->writeback, state);
+}
+
+void rcar_du_writeback_complete(struct rcar_du_crtc *rcrtc)
+{
+ drm_writeback_signal_completion(&rcrtc->writeback, 0);
+}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_writeback.h b/drivers/gpu/drm/rcar-du/rcar_du_writeback.h
new file mode 100644
index 000000000000..fa87ebf8d21f
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_writeback.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * rcar_du_writeback.h -- R-Car Display Unit Writeback Support
+ *
+ * Copyright (C) 2019 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+#ifndef __RCAR_DU_WRITEBACK_H__
+#define __RCAR_DU_WRITEBACK_H__
+
+#include <drm/drm_plane.h>
+
+struct rcar_du_crtc;
+struct rcar_du_device;
+struct vsp1_du_atomic_pipe_config;
+
+#ifdef CONFIG_DRM_RCAR_WRITEBACK
+int rcar_du_writeback_init(struct rcar_du_device *rcdu,
+ struct rcar_du_crtc *rcrtc);
+void rcar_du_writeback_setup(struct rcar_du_crtc *rcrtc,
+ struct vsp1_du_writeback_config *cfg);
+void rcar_du_writeback_complete(struct rcar_du_crtc *rcrtc);
+#else
+static inline int rcar_du_writeback_init(struct rcar_du_device *rcdu,
+ struct rcar_du_crtc *rcrtc)
+{
+ return -ENXIO;
+}
+static inline void
+rcar_du_writeback_setup(struct rcar_du_crtc *rcrtc,
+ struct vsp1_du_writeback_config *cfg)
+{
+}
+static inline void rcar_du_writeback_complete(struct rcar_du_crtc *rcrtc)
+{
+}
+#endif
+
+#endif /* __RCAR_DU_WRITEBACK_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 7ef97b2a6eda..620b51aab291 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -283,7 +283,7 @@ static void rcar_lvds_d3_e3_pll_calc(struct rcar_lvds *lvds, struct clk *clk,
* divider.
*/
fout = fvco / (1 << e) / div7;
- div = DIV_ROUND_CLOSEST(fout, target);
+ div = max(1UL, DIV_ROUND_CLOSEST(fout, target));
diff = abs(fout / div - target);
if (diff < pll->diff) {
@@ -485,9 +485,13 @@ static void rcar_lvds_enable(struct drm_bridge *bridge)
}
if (lvds->info->quirks & RCAR_LVDS_QUIRK_GEN3_LVEN) {
- /* Turn on the LVDS PHY. */
+ /*
+ * Turn on the LVDS PHY. On D3, the LVEN and LVRES bit must be
+ * set at the same time, so don't write the register yet.
+ */
lvdcr0 |= LVDCR0_LVEN;
- rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+ if (!(lvds->info->quirks & RCAR_LVDS_QUIRK_PWD))
+ rcar_lvds_write(lvds, LVDCR0, lvdcr0);
}
if (!(lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL)) {
@@ -531,11 +535,16 @@ static bool rcar_lvds_mode_fixup(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
+ int min_freq;
+
/*
* The internal LVDS encoder has a restricted clock frequency operating
- * range (31MHz to 148.5MHz). Clamp the clock accordingly.
+ * range, from 5MHz to 148.5MHz on D3 and E3, and from 31MHz to
+ * 148.5MHz on all other platforms. Clamp the clock accordingly.
*/
- adjusted_mode->clock = clamp(adjusted_mode->clock, 31000, 148500);
+ min_freq = lvds->info->quirks & RCAR_LVDS_QUIRK_EXT_PLL ? 5000 : 31000;
+ adjusted_mode->clock = clamp(adjusted_mode->clock, min_freq, 148500);
return true;
}
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 1e75196f9659..2cdf3b62d559 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -77,4 +77,12 @@ config ROCKCHIP_RGB
Some Rockchip CRTCs, like rv1108, can directly output parallel
and serial RGB format to panel or connect to a conversion chip.
say Y to enable its driver.
+
+config ROCKCHIP_RK3066_HDMI
+ bool "Rockchip specific extensions for RK3066 HDMI"
+ depends on DRM_ROCKCHIP
+ help
+ This selects support for Rockchip SoC specific extensions
+ for the RK3066 HDMI driver. If you want to enable
+ HDMI on RK3066 based SoC, you should select this option.
endif
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index f6fc9d5dd0ad..524684ba7f6a 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -15,5 +15,6 @@ rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o
rockchipdrm-$(CONFIG_ROCKCHIP_LVDS) += rockchip_lvds.o
rockchipdrm-$(CONFIG_ROCKCHIP_RGB) += rockchip_rgb.o
+rockchipdrm-$(CONFIG_ROCKCHIP_RK3066_HDMI) += rk3066_hdmi.o
obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
new file mode 100644
index 000000000000..85fc5f01f761
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c
@@ -0,0 +1,876 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Zheng Yang <zhengyang@rock-chips.com>
+ */
+
+#include <drm/drm_of.h>
+#include <drm/drm_probe_helper.h>
+
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include "rk3066_hdmi.h"
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_vop.h"
+
+#define DEFAULT_PLLA_RATE 30000000
+
+struct hdmi_data_info {
+ int vic; /* The CEA Video ID (VIC) of the current drm display mode. */
+ bool sink_is_hdmi;
+ unsigned int enc_out_format;
+ unsigned int colorimetry;
+};
+
+struct rk3066_hdmi_i2c {
+ struct i2c_adapter adap;
+
+ u8 ddc_addr;
+ u8 segment_addr;
+ u8 stat;
+
+ struct mutex i2c_lock; /* For i2c operation. */
+ struct completion cmpltn;
+};
+
+struct rk3066_hdmi {
+ struct device *dev;
+ struct drm_device *drm_dev;
+ struct regmap *grf_regmap;
+ int irq;
+ struct clk *hclk;
+ void __iomem *regs;
+
+ struct drm_connector connector;
+ struct drm_encoder encoder;
+
+ struct rk3066_hdmi_i2c *i2c;
+ struct i2c_adapter *ddc;
+
+ unsigned int tmdsclk;
+
+ struct hdmi_data_info hdmi_data;
+ struct drm_display_mode previous_mode;
+};
+
+#define to_rk3066_hdmi(x) container_of(x, struct rk3066_hdmi, x)
+
+static inline u8 hdmi_readb(struct rk3066_hdmi *hdmi, u16 offset)
+{
+ return readl_relaxed(hdmi->regs + offset);
+}
+
+static inline void hdmi_writeb(struct rk3066_hdmi *hdmi, u16 offset, u32 val)
+{
+ writel_relaxed(val, hdmi->regs + offset);
+}
+
+static inline void hdmi_modb(struct rk3066_hdmi *hdmi, u16 offset,
+ u32 msk, u32 val)
+{
+ u8 temp = hdmi_readb(hdmi, offset) & ~msk;
+
+ temp |= val & msk;
+ hdmi_writeb(hdmi, offset, temp);
+}
+
+static void rk3066_hdmi_i2c_init(struct rk3066_hdmi *hdmi)
+{
+ int ddc_bus_freq;
+
+ ddc_bus_freq = (hdmi->tmdsclk >> 2) / HDMI_SCL_RATE;
+
+ hdmi_writeb(hdmi, HDMI_DDC_BUS_FREQ_L, ddc_bus_freq & 0xFF);
+ hdmi_writeb(hdmi, HDMI_DDC_BUS_FREQ_H, (ddc_bus_freq >> 8) & 0xFF);
+
+ /* Clear the EDID interrupt flag and mute the interrupt. */
+ hdmi_modb(hdmi, HDMI_INTR_MASK1, HDMI_INTR_EDID_MASK, 0);
+ hdmi_writeb(hdmi, HDMI_INTR_STATUS1, HDMI_INTR_EDID_MASK);
+}
+
+static inline u8 rk3066_hdmi_get_power_mode(struct rk3066_hdmi *hdmi)
+{
+ return hdmi_readb(hdmi, HDMI_SYS_CTRL) & HDMI_SYS_POWER_MODE_MASK;
+}
+
+static void rk3066_hdmi_set_power_mode(struct rk3066_hdmi *hdmi, int mode)
+{
+ u8 current_mode, next_mode;
+ u8 i = 0;
+
+ current_mode = rk3066_hdmi_get_power_mode(hdmi);
+
+ DRM_DEV_DEBUG(hdmi->dev, "mode :%d\n", mode);
+ DRM_DEV_DEBUG(hdmi->dev, "current_mode :%d\n", current_mode);
+
+ if (current_mode == mode)
+ return;
+
+ do {
+ if (current_mode > mode) {
+ next_mode = current_mode / 2;
+ } else {
+ if (current_mode < HDMI_SYS_POWER_MODE_A)
+ next_mode = HDMI_SYS_POWER_MODE_A;
+ else
+ next_mode = current_mode * 2;
+ }
+
+ DRM_DEV_DEBUG(hdmi->dev, "%d: next_mode :%d\n", i, next_mode);
+
+ if (next_mode != HDMI_SYS_POWER_MODE_D) {
+ hdmi_modb(hdmi, HDMI_SYS_CTRL,
+ HDMI_SYS_POWER_MODE_MASK, next_mode);
+ } else {
+ hdmi_writeb(hdmi, HDMI_SYS_CTRL,
+ HDMI_SYS_POWER_MODE_D |
+ HDMI_SYS_PLL_RESET_MASK);
+ usleep_range(90, 100);
+ hdmi_writeb(hdmi, HDMI_SYS_CTRL,
+ HDMI_SYS_POWER_MODE_D |
+ HDMI_SYS_PLLB_RESET);
+ usleep_range(90, 100);
+ hdmi_writeb(hdmi, HDMI_SYS_CTRL,
+ HDMI_SYS_POWER_MODE_D);
+ }
+ current_mode = next_mode;
+ i = i + 1;
+ } while ((next_mode != mode) && (i < 5));
+
+ /*
+ * When the IP controller isn't configured with accurate video timing,
+ * DDC_CLK should be equal to the PLLA frequency, which is 30MHz,
+ * so we need to init the TMDS rate to the PCLK rate and reconfigure
+ * the DDC clock.
+ */
+ if (mode < HDMI_SYS_POWER_MODE_D)
+ hdmi->tmdsclk = DEFAULT_PLLA_RATE;
+}
+
+static int
+rk3066_hdmi_upload_frame(struct rk3066_hdmi *hdmi, int setup_rc,
+ union hdmi_infoframe *frame, u32 frame_index,
+ u32 mask, u32 disable, u32 enable)
+{
+ if (mask)
+ hdmi_modb(hdmi, HDMI_CP_AUTO_SEND_CTRL, mask, disable);
+
+ hdmi_writeb(hdmi, HDMI_CP_BUF_INDEX, frame_index);
+
+ if (setup_rc >= 0) {
+ u8 packed_frame[HDMI_MAXIMUM_INFO_FRAME_SIZE];
+ ssize_t rc, i;
+
+ rc = hdmi_infoframe_pack(frame, packed_frame,
+ sizeof(packed_frame));
+ if (rc < 0)
+ return rc;
+
+ for (i = 0; i < rc; i++)
+ hdmi_writeb(hdmi, HDMI_CP_BUF_ACC_HB0 + i * 4,
+ packed_frame[i]);
+
+ if (mask)
+ hdmi_modb(hdmi, HDMI_CP_AUTO_SEND_CTRL, mask, enable);
+ }
+
+ return setup_rc;
+}
+
+static int rk3066_hdmi_config_avi(struct rk3066_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ union hdmi_infoframe frame;
+ int rc;
+
+ rc = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+ &hdmi->connector, mode);
+
+ if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV444)
+ frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
+ else if (hdmi->hdmi_data.enc_out_format == HDMI_COLORSPACE_YUV422)
+ frame.avi.colorspace = HDMI_COLORSPACE_YUV422;
+ else
+ frame.avi.colorspace = HDMI_COLORSPACE_RGB;
+
+ frame.avi.colorimetry = hdmi->hdmi_data.colorimetry;
+ frame.avi.scan_mode = HDMI_SCAN_MODE_NONE;
+
+ return rk3066_hdmi_upload_frame(hdmi, rc, &frame,
+ HDMI_INFOFRAME_AVI, 0, 0, 0);
+}
+
+static int rk3066_hdmi_config_video_timing(struct rk3066_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ int value, vsync_offset;
+
+ /* Set the details for the external polarity and interlace mode. */
+ value = HDMI_EXT_VIDEO_SET_EN;
+ value |= mode->flags & DRM_MODE_FLAG_PHSYNC ?
+ HDMI_VIDEO_HSYNC_ACTIVE_HIGH : HDMI_VIDEO_HSYNC_ACTIVE_LOW;
+ value |= mode->flags & DRM_MODE_FLAG_PVSYNC ?
+ HDMI_VIDEO_VSYNC_ACTIVE_HIGH : HDMI_VIDEO_VSYNC_ACTIVE_LOW;
+ value |= mode->flags & DRM_MODE_FLAG_INTERLACE ?
+ HDMI_VIDEO_MODE_INTERLACE : HDMI_VIDEO_MODE_PROGRESSIVE;
+
+ if (hdmi->hdmi_data.vic == 2 || hdmi->hdmi_data.vic == 3)
+ vsync_offset = 6;
+ else
+ vsync_offset = 0;
+
+ value |= vsync_offset << HDMI_VIDEO_VSYNC_OFFSET_SHIFT;
+ hdmi_writeb(hdmi, HDMI_EXT_VIDEO_PARA, value);
+
+ /* Set the details for the external video timing. */
+ value = mode->htotal;
+ hdmi_writeb(hdmi, HDMI_EXT_HTOTAL_L, value & 0xFF);
+ hdmi_writeb(hdmi, HDMI_EXT_HTOTAL_H, (value >> 8) & 0xFF);
+
+ value = mode->htotal - mode->hdisplay;
+ hdmi_writeb(hdmi, HDMI_EXT_HBLANK_L, value & 0xFF);
+ hdmi_writeb(hdmi, HDMI_EXT_HBLANK_H, (value >> 8) & 0xFF);
+
+ value = mode->htotal - mode->hsync_start;
+ hdmi_writeb(hdmi, HDMI_EXT_HDELAY_L, value & 0xFF);
+ hdmi_writeb(hdmi, HDMI_EXT_HDELAY_H, (value >> 8) & 0xFF);
+
+ value = mode->hsync_end - mode->hsync_start;
+ hdmi_writeb(hdmi, HDMI_EXT_HDURATION_L, value & 0xFF);
+ hdmi_writeb(hdmi, HDMI_EXT_HDURATION_H, (value >> 8) & 0xFF);
+
+ value = mode->vtotal;
+ hdmi_writeb(hdmi, HDMI_EXT_VTOTAL_L, value & 0xFF);
+ hdmi_writeb(hdmi, HDMI_EXT_VTOTAL_H, (value >> 8) & 0xFF);
+
+ value = mode->vtotal - mode->vdisplay;
+ hdmi_writeb(hdmi, HDMI_EXT_VBLANK_L, value & 0xFF);
+
+ value = mode->vtotal - mode->vsync_start + vsync_offset;
+ hdmi_writeb(hdmi, HDMI_EXT_VDELAY, value & 0xFF);
+
+ value = mode->vsync_end - mode->vsync_start;
+ hdmi_writeb(hdmi, HDMI_EXT_VDURATION, value & 0xFF);
+
+ return 0;
+}
+
+static void
+rk3066_hdmi_phy_write(struct rk3066_hdmi *hdmi, u16 offset, u8 value)
+{
+ hdmi_writeb(hdmi, offset, value);
+ hdmi_modb(hdmi, HDMI_SYS_CTRL,
+ HDMI_SYS_PLL_RESET_MASK, HDMI_SYS_PLL_RESET);
+ usleep_range(90, 100);
+ hdmi_modb(hdmi, HDMI_SYS_CTRL, HDMI_SYS_PLL_RESET_MASK, 0);
+ usleep_range(900, 1000);
+}
+
+static void rk3066_hdmi_config_phy(struct rk3066_hdmi *hdmi)
+{
+ /* TMDS uses the same frequency as dclk. */
+ hdmi_writeb(hdmi, HDMI_DEEP_COLOR_MODE, 0x22);
+
+ /*
+ * The semi-public documentation does not describe the hdmi registers
+ * used by the function rk3066_hdmi_phy_write(), so we keep using
+ * these magic values for now.
+ */
+ if (hdmi->tmdsclk > 100000000) {
+ rk3066_hdmi_phy_write(hdmi, 0x158, 0x0E);
+ rk3066_hdmi_phy_write(hdmi, 0x15c, 0x00);
+ rk3066_hdmi_phy_write(hdmi, 0x160, 0x60);
+ rk3066_hdmi_phy_write(hdmi, 0x164, 0x00);
+ rk3066_hdmi_phy_write(hdmi, 0x168, 0xDA);
+ rk3066_hdmi_phy_write(hdmi, 0x16c, 0xA1);
+ rk3066_hdmi_phy_write(hdmi, 0x170, 0x0e);
+ rk3066_hdmi_phy_write(hdmi, 0x174, 0x22);
+ rk3066_hdmi_phy_write(hdmi, 0x178, 0x00);
+ } else if (hdmi->tmdsclk > 50000000) {
+ rk3066_hdmi_phy_write(hdmi, 0x158, 0x06);
+ rk3066_hdmi_phy_write(hdmi, 0x15c, 0x00);
+ rk3066_hdmi_phy_write(hdmi, 0x160, 0x60);
+ rk3066_hdmi_phy_write(hdmi, 0x164, 0x00);
+ rk3066_hdmi_phy_write(hdmi, 0x168, 0xCA);
+ rk3066_hdmi_phy_write(hdmi, 0x16c, 0xA3);
+ rk3066_hdmi_phy_write(hdmi, 0x170, 0x0e);
+ rk3066_hdmi_phy_write(hdmi, 0x174, 0x20);
+ rk3066_hdmi_phy_write(hdmi, 0x178, 0x00);
+ } else {
+ rk3066_hdmi_phy_write(hdmi, 0x158, 0x02);
+ rk3066_hdmi_phy_write(hdmi, 0x15c, 0x00);
+ rk3066_hdmi_phy_write(hdmi, 0x160, 0x60);
+ rk3066_hdmi_phy_write(hdmi, 0x164, 0x00);
+ rk3066_hdmi_phy_write(hdmi, 0x168, 0xC2);
+ rk3066_hdmi_phy_write(hdmi, 0x16c, 0xA2);
+ rk3066_hdmi_phy_write(hdmi, 0x170, 0x0e);
+ rk3066_hdmi_phy_write(hdmi, 0x174, 0x20);
+ rk3066_hdmi_phy_write(hdmi, 0x178, 0x00);
+ }
+}
+
+static int rk3066_hdmi_setup(struct rk3066_hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ hdmi->hdmi_data.vic = drm_match_cea_mode(mode);
+ hdmi->hdmi_data.enc_out_format = HDMI_COLORSPACE_RGB;
+
+ if (hdmi->hdmi_data.vic == 6 || hdmi->hdmi_data.vic == 7 ||
+ hdmi->hdmi_data.vic == 21 || hdmi->hdmi_data.vic == 22 ||
+ hdmi->hdmi_data.vic == 2 || hdmi->hdmi_data.vic == 3 ||
+ hdmi->hdmi_data.vic == 17 || hdmi->hdmi_data.vic == 18)
+ hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_601;
+ else
+ hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_709;
+
+ hdmi->tmdsclk = mode->clock * 1000;
+
+ /* Mute video and audio output. */
+ hdmi_modb(hdmi, HDMI_VIDEO_CTRL2, HDMI_VIDEO_AUDIO_DISABLE_MASK,
+ HDMI_AUDIO_DISABLE | HDMI_VIDEO_DISABLE);
+
+ /* Set power state to mode B. */
+ if (rk3066_hdmi_get_power_mode(hdmi) != HDMI_SYS_POWER_MODE_B)
+ rk3066_hdmi_set_power_mode(hdmi, HDMI_SYS_POWER_MODE_B);
+
+ /* Input video mode is RGB 24 bit. Use external data enable signal. */
+ hdmi_modb(hdmi, HDMI_AV_CTRL1,
+ HDMI_VIDEO_DE_MASK, HDMI_VIDEO_EXTERNAL_DE);
+ hdmi_writeb(hdmi, HDMI_VIDEO_CTRL1,
+ HDMI_VIDEO_OUTPUT_RGB444 |
+ HDMI_VIDEO_INPUT_DATA_DEPTH_8BIT |
+ HDMI_VIDEO_INPUT_COLOR_RGB);
+ hdmi_writeb(hdmi, HDMI_DEEP_COLOR_MODE, 0x20);
+
+ rk3066_hdmi_config_video_timing(hdmi, mode);
+
+ if (hdmi->hdmi_data.sink_is_hdmi) {
+ hdmi_modb(hdmi, HDMI_HDCP_CTRL, HDMI_VIDEO_MODE_MASK,
+ HDMI_VIDEO_MODE_HDMI);
+ rk3066_hdmi_config_avi(hdmi, mode);
+ } else {
+ hdmi_modb(hdmi, HDMI_HDCP_CTRL, HDMI_VIDEO_MODE_MASK, 0);
+ }
+
+ rk3066_hdmi_config_phy(hdmi);
+
+ rk3066_hdmi_set_power_mode(hdmi, HDMI_SYS_POWER_MODE_E);
+
+ /*
+ * When the IP controller is configured with accurate video
+ * timing, the TMDS clock source should be switched to
+ * DCLK_LCDC, so we need to init the TMDS rate to the pixel mode
+ * clock rate and reconfigure the DDC clock.
+ */
+ rk3066_hdmi_i2c_init(hdmi);
+
+ /* Unmute video output. */
+ hdmi_modb(hdmi, HDMI_VIDEO_CTRL2,
+ HDMI_VIDEO_AUDIO_DISABLE_MASK, HDMI_AUDIO_DISABLE);
+ return 0;
+}
+
+static void
+rk3066_hdmi_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct rk3066_hdmi *hdmi = to_rk3066_hdmi(encoder);
+
+ /* Store the display mode for plugin/DPMS poweron events. */
+ memcpy(&hdmi->previous_mode, adj_mode, sizeof(hdmi->previous_mode));
+}
+
+static void rk3066_hdmi_encoder_enable(struct drm_encoder *encoder)
+{
+ struct rk3066_hdmi *hdmi = to_rk3066_hdmi(encoder);
+ int mux, val;
+
+ mux = drm_of_encoder_active_endpoint_id(hdmi->dev->of_node, encoder);
+ if (mux)
+ val = (HDMI_VIDEO_SEL << 16) | HDMI_VIDEO_SEL;
+ else
+ val = HDMI_VIDEO_SEL << 16;
+
+ regmap_write(hdmi->grf_regmap, GRF_SOC_CON0, val);
+
+ DRM_DEV_DEBUG(hdmi->dev, "hdmi encoder enable select: vop%s\n",
+ (mux) ? "1" : "0");
+
+ rk3066_hdmi_setup(hdmi, &hdmi->previous_mode);
+}
+
+static void rk3066_hdmi_encoder_disable(struct drm_encoder *encoder)
+{
+ struct rk3066_hdmi *hdmi = to_rk3066_hdmi(encoder);
+
+ DRM_DEV_DEBUG(hdmi->dev, "hdmi encoder disable\n");
+
+ if (rk3066_hdmi_get_power_mode(hdmi) == HDMI_SYS_POWER_MODE_E) {
+ hdmi_writeb(hdmi, HDMI_VIDEO_CTRL2,
+ HDMI_VIDEO_AUDIO_DISABLE_MASK);
+ hdmi_modb(hdmi, HDMI_VIDEO_CTRL2,
+ HDMI_AUDIO_CP_LOGIC_RESET_MASK,
+ HDMI_AUDIO_CP_LOGIC_RESET);
+ usleep_range(500, 510);
+ }
+ rk3066_hdmi_set_power_mode(hdmi, HDMI_SYS_POWER_MODE_A);
+}
+
+static bool
+rk3066_hdmi_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ return true;
+}
+
+static int
+rk3066_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+
+ s->output_mode = ROCKCHIP_OUT_MODE_P888;
+ s->output_type = DRM_MODE_CONNECTOR_HDMIA;
+
+ return 0;
+}
+
+static const
+struct drm_encoder_helper_funcs rk3066_hdmi_encoder_helper_funcs = {
+ .enable = rk3066_hdmi_encoder_enable,
+ .disable = rk3066_hdmi_encoder_disable,
+ .mode_fixup = rk3066_hdmi_encoder_mode_fixup,
+ .mode_set = rk3066_hdmi_encoder_mode_set,
+ .atomic_check = rk3066_hdmi_encoder_atomic_check,
+};
+
+static const struct drm_encoder_funcs rk3066_hdmi_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+static enum drm_connector_status
+rk3066_hdmi_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct rk3066_hdmi *hdmi = to_rk3066_hdmi(connector);
+
+ return (hdmi_readb(hdmi, HDMI_HPG_MENS_STA) & HDMI_HPG_IN_STATUS_HIGH) ?
+ connector_status_connected : connector_status_disconnected;
+}
+
+static int rk3066_hdmi_connector_get_modes(struct drm_connector *connector)
+{
+ struct rk3066_hdmi *hdmi = to_rk3066_hdmi(connector);
+ struct edid *edid;
+ int ret = 0;
+
+ if (!hdmi->ddc)
+ return 0;
+
+ edid = drm_get_edid(connector, hdmi->ddc);
+ if (edid) {
+ hdmi->hdmi_data.sink_is_hdmi = drm_detect_hdmi_monitor(edid);
+ drm_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+
+ return ret;
+}
+
+static enum drm_mode_status
+rk3066_hdmi_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ u32 vic = drm_match_cea_mode(mode);
+
+ if (vic > 1)
+ return MODE_OK;
+ else
+ return MODE_BAD;
+}
+
+static struct drm_encoder *
+rk3066_hdmi_connector_best_encoder(struct drm_connector *connector)
+{
+ struct rk3066_hdmi *hdmi = to_rk3066_hdmi(connector);
+
+ return &hdmi->encoder;
+}
+
+static int
+rk3066_hdmi_probe_single_connector_modes(struct drm_connector *connector,
+ uint32_t maxX, uint32_t maxY)
+{
+ if (maxX > 1920)
+ maxX = 1920;
+ if (maxY > 1080)
+ maxY = 1080;
+
+ return drm_helper_probe_single_connector_modes(connector, maxX, maxY);
+}
+
+static void rk3066_hdmi_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+}
+
+static const struct drm_connector_funcs rk3066_hdmi_connector_funcs = {
+ .fill_modes = rk3066_hdmi_probe_single_connector_modes,
+ .detect = rk3066_hdmi_connector_detect,
+ .destroy = rk3066_hdmi_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static const
+struct drm_connector_helper_funcs rk3066_hdmi_connector_helper_funcs = {
+ .get_modes = rk3066_hdmi_connector_get_modes,
+ .mode_valid = rk3066_hdmi_connector_mode_valid,
+ .best_encoder = rk3066_hdmi_connector_best_encoder,
+};
+
+static int
+rk3066_hdmi_register(struct drm_device *drm, struct rk3066_hdmi *hdmi)
+{
+ struct drm_encoder *encoder = &hdmi->encoder;
+ struct device *dev = hdmi->dev;
+
+ encoder->possible_crtcs =
+ drm_of_find_possible_crtcs(drm, dev->of_node);
+
+ /*
+ * If we failed to find the CRTC(s) which this encoder is
+ * supposed to be connected to, it's because the CRTC has
+ * not been registered yet. Defer probing, and hope that
+ * the required CRTC is added later.
+ */
+ if (encoder->possible_crtcs == 0)
+ return -EPROBE_DEFER;
+
+ drm_encoder_helper_add(encoder, &rk3066_hdmi_encoder_helper_funcs);
+ drm_encoder_init(drm, encoder, &rk3066_hdmi_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS, NULL);
+
+ hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
+
+ drm_connector_helper_add(&hdmi->connector,
+ &rk3066_hdmi_connector_helper_funcs);
+ drm_connector_init(drm, &hdmi->connector,
+ &rk3066_hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+
+ drm_connector_attach_encoder(&hdmi->connector, encoder);
+
+ return 0;
+}
+
+static irqreturn_t rk3066_hdmi_hardirq(int irq, void *dev_id)
+{
+ struct rk3066_hdmi *hdmi = dev_id;
+ irqreturn_t ret = IRQ_NONE;
+ u8 interrupt;
+
+ if (rk3066_hdmi_get_power_mode(hdmi) == HDMI_SYS_POWER_MODE_A)
+ hdmi_writeb(hdmi, HDMI_SYS_CTRL, HDMI_SYS_POWER_MODE_B);
+
+ interrupt = hdmi_readb(hdmi, HDMI_INTR_STATUS1);
+ if (interrupt)
+ hdmi_writeb(hdmi, HDMI_INTR_STATUS1, interrupt);
+
+ if (interrupt & HDMI_INTR_EDID_MASK) {
+ hdmi->i2c->stat = interrupt;
+ complete(&hdmi->i2c->cmpltn);
+ }
+
+ if (interrupt & (HDMI_INTR_HOTPLUG | HDMI_INTR_MSENS))
+ ret = IRQ_WAKE_THREAD;
+
+ return ret;
+}
+
+static irqreturn_t rk3066_hdmi_irq(int irq, void *dev_id)
+{
+ struct rk3066_hdmi *hdmi = dev_id;
+
+ drm_helper_hpd_irq_event(hdmi->connector.dev);
+
+ return IRQ_HANDLED;
+}
+
+static int rk3066_hdmi_i2c_read(struct rk3066_hdmi *hdmi, struct i2c_msg *msgs)
+{
+ int length = msgs->len;
+ u8 *buf = msgs->buf;
+ int ret;
+
+ ret = wait_for_completion_timeout(&hdmi->i2c->cmpltn, HZ / 10);
+ if (!ret || hdmi->i2c->stat & HDMI_INTR_EDID_ERR)
+ return -EAGAIN;
+
+ while (length--)
+ *buf++ = hdmi_readb(hdmi, HDMI_DDC_READ_FIFO_ADDR);
+
+ return 0;
+}
+
+static int rk3066_hdmi_i2c_write(struct rk3066_hdmi *hdmi, struct i2c_msg *msgs)
+{
+ /*
+ * The DDC module only supports read EDID message, so
+ * we assume that each word write to this i2c adapter
+ * should be the offset of the EDID word address.
+ */
+ if (msgs->len != 1 ||
+ (msgs->addr != DDC_ADDR && msgs->addr != DDC_SEGMENT_ADDR))
+ return -EINVAL;
+
+ reinit_completion(&hdmi->i2c->cmpltn);
+
+ if (msgs->addr == DDC_SEGMENT_ADDR)
+ hdmi->i2c->segment_addr = msgs->buf[0];
+ if (msgs->addr == DDC_ADDR)
+ hdmi->i2c->ddc_addr = msgs->buf[0];
+
+ /* Set edid word address 0x00/0x80. */
+ hdmi_writeb(hdmi, HDMI_EDID_WORD_ADDR, hdmi->i2c->ddc_addr);
+
+ /* Set edid segment pointer. */
+ hdmi_writeb(hdmi, HDMI_EDID_SEGMENT_POINTER, hdmi->i2c->segment_addr);
+
+ return 0;
+}
+
+static int rk3066_hdmi_i2c_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct rk3066_hdmi *hdmi = i2c_get_adapdata(adap);
+ struct rk3066_hdmi_i2c *i2c = hdmi->i2c;
+ int i, ret = 0;
+
+ mutex_lock(&i2c->i2c_lock);
+
+ rk3066_hdmi_i2c_init(hdmi);
+
+ /* Unmute HDMI EDID interrupt. */
+ hdmi_modb(hdmi, HDMI_INTR_MASK1,
+ HDMI_INTR_EDID_MASK, HDMI_INTR_EDID_MASK);
+ i2c->stat = 0;
+
+ for (i = 0; i < num; i++) {
+ DRM_DEV_DEBUG(hdmi->dev,
+ "xfer: num: %d/%d, len: %d, flags: %#x\n",
+ i + 1, num, msgs[i].len, msgs[i].flags);
+
+ if (msgs[i].flags & I2C_M_RD)
+ ret = rk3066_hdmi_i2c_read(hdmi, &msgs[i]);
+ else
+ ret = rk3066_hdmi_i2c_write(hdmi, &msgs[i]);
+
+ if (ret < 0)
+ break;
+ }
+
+ if (!ret)
+ ret = num;
+
+ /* Mute HDMI EDID interrupt. */
+ hdmi_modb(hdmi, HDMI_INTR_MASK1, HDMI_INTR_EDID_MASK, 0);
+
+ mutex_unlock(&i2c->i2c_lock);
+
+ return ret;
+}
+
+static u32 rk3066_hdmi_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm rk3066_hdmi_algorithm = {
+ .master_xfer = rk3066_hdmi_i2c_xfer,
+ .functionality = rk3066_hdmi_i2c_func,
+};
+
+static struct i2c_adapter *rk3066_hdmi_i2c_adapter(struct rk3066_hdmi *hdmi)
+{
+ struct i2c_adapter *adap;
+ struct rk3066_hdmi_i2c *i2c;
+ int ret;
+
+ i2c = devm_kzalloc(hdmi->dev, sizeof(*i2c), GFP_KERNEL);
+ if (!i2c)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&i2c->i2c_lock);
+ init_completion(&i2c->cmpltn);
+
+ adap = &i2c->adap;
+ adap->class = I2C_CLASS_DDC;
+ adap->owner = THIS_MODULE;
+ adap->dev.parent = hdmi->dev;
+ adap->dev.of_node = hdmi->dev->of_node;
+ adap->algo = &rk3066_hdmi_algorithm;
+ strlcpy(adap->name, "RK3066 HDMI", sizeof(adap->name));
+ i2c_set_adapdata(adap, hdmi);
+
+ ret = i2c_add_adapter(adap);
+ if (ret) {
+ DRM_DEV_ERROR(hdmi->dev, "cannot add %s I2C adapter\n",
+ adap->name);
+ devm_kfree(hdmi->dev, i2c);
+ return ERR_PTR(ret);
+ }
+
+ hdmi->i2c = i2c;
+
+ DRM_DEV_DEBUG(hdmi->dev, "registered %s I2C bus driver\n", adap->name);
+
+ return adap;
+}
+
+static int rk3066_hdmi_bind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct drm_device *drm = data;
+ struct rk3066_hdmi *hdmi;
+ struct resource *iores;
+ int irq;
+ int ret;
+
+ hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi)
+ return -ENOMEM;
+
+ hdmi->dev = dev;
+ hdmi->drm_dev = drm;
+
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iores)
+ return -ENXIO;
+
+ hdmi->regs = devm_ioremap_resource(dev, iores);
+ if (IS_ERR(hdmi->regs))
+ return PTR_ERR(hdmi->regs);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ hdmi->hclk = devm_clk_get(dev, "hclk");
+ if (IS_ERR(hdmi->hclk)) {
+ DRM_DEV_ERROR(dev, "unable to get HDMI hclk clock\n");
+ return PTR_ERR(hdmi->hclk);
+ }
+
+ ret = clk_prepare_enable(hdmi->hclk);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "cannot enable HDMI hclk clock: %d\n", ret);
+ return ret;
+ }
+
+ hdmi->grf_regmap = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "rockchip,grf");
+ if (IS_ERR(hdmi->grf_regmap)) {
+ DRM_DEV_ERROR(dev, "unable to get rockchip,grf\n");
+ ret = PTR_ERR(hdmi->grf_regmap);
+ goto err_disable_hclk;
+ }
+
+ /* internal hclk = hdmi_hclk / 25 */
+ hdmi_writeb(hdmi, HDMI_INTERNAL_CLK_DIVIDER, 25);
+
+ hdmi->ddc = rk3066_hdmi_i2c_adapter(hdmi);
+ if (IS_ERR(hdmi->ddc)) {
+ ret = PTR_ERR(hdmi->ddc);
+ hdmi->ddc = NULL;
+ goto err_disable_hclk;
+ }
+
+ rk3066_hdmi_set_power_mode(hdmi, HDMI_SYS_POWER_MODE_B);
+ usleep_range(999, 1000);
+ hdmi_writeb(hdmi, HDMI_INTR_MASK1, HDMI_INTR_HOTPLUG);
+ hdmi_writeb(hdmi, HDMI_INTR_MASK2, 0);
+ hdmi_writeb(hdmi, HDMI_INTR_MASK3, 0);
+ hdmi_writeb(hdmi, HDMI_INTR_MASK4, 0);
+ rk3066_hdmi_set_power_mode(hdmi, HDMI_SYS_POWER_MODE_A);
+
+ ret = rk3066_hdmi_register(drm, hdmi);
+ if (ret)
+ goto err_disable_i2c;
+
+ dev_set_drvdata(dev, hdmi);
+
+ ret = devm_request_threaded_irq(dev, irq, rk3066_hdmi_hardirq,
+ rk3066_hdmi_irq, IRQF_SHARED,
+ dev_name(dev), hdmi);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to request hdmi irq: %d\n", ret);
+ goto err_cleanup_hdmi;
+ }
+
+ return 0;
+
+err_cleanup_hdmi:
+ hdmi->connector.funcs->destroy(&hdmi->connector);
+ hdmi->encoder.funcs->destroy(&hdmi->encoder);
+err_disable_i2c:
+ i2c_put_adapter(hdmi->ddc);
+err_disable_hclk:
+ clk_disable_unprepare(hdmi->hclk);
+
+ return ret;
+}
+
+static void rk3066_hdmi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct rk3066_hdmi *hdmi = dev_get_drvdata(dev);
+
+ hdmi->connector.funcs->destroy(&hdmi->connector);
+ hdmi->encoder.funcs->destroy(&hdmi->encoder);
+
+ i2c_put_adapter(hdmi->ddc);
+ clk_disable_unprepare(hdmi->hclk);
+}
+
+static const struct component_ops rk3066_hdmi_ops = {
+ .bind = rk3066_hdmi_bind,
+ .unbind = rk3066_hdmi_unbind,
+};
+
+static int rk3066_hdmi_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &rk3066_hdmi_ops);
+}
+
+static int rk3066_hdmi_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &rk3066_hdmi_ops);
+
+ return 0;
+}
+
+static const struct of_device_id rk3066_hdmi_dt_ids[] = {
+ { .compatible = "rockchip,rk3066-hdmi" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, rk3066_hdmi_dt_ids);
+
+struct platform_driver rk3066_hdmi_driver = {
+ .probe = rk3066_hdmi_probe,
+ .remove = rk3066_hdmi_remove,
+ .driver = {
+ .name = "rockchip-rk3066-hdmi",
+ .of_match_table = rk3066_hdmi_dt_ids,
+ },
+};
diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.h b/drivers/gpu/drm/rockchip/rk3066_hdmi.h
new file mode 100644
index 000000000000..39a31c62a428
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Zheng Yang <zhengyang@rock-chips.com>
+ */
+
+#ifndef __RK3066_HDMI_H__
+#define __RK3066_HDMI_H__
+
+#define GRF_SOC_CON0 0x150
+#define HDMI_VIDEO_SEL BIT(14)
+
+#define DDC_SEGMENT_ADDR 0x30
+#define HDMI_SCL_RATE (50 * 1000)
+#define HDMI_MAXIMUM_INFO_FRAME_SIZE 0x11
+
+#define N_32K 0x1000
+#define N_441K 0x1880
+#define N_882K 0x3100
+#define N_1764K 0x6200
+#define N_48K 0x1800
+#define N_96K 0x3000
+#define N_192K 0x6000
+
+#define HDMI_SYS_CTRL 0x000
+#define HDMI_LR_SWAP_N3 0x004
+#define HDMI_N2 0x008
+#define HDMI_N1 0x00c
+#define HDMI_SPDIF_FS_CTS_INT3 0x010
+#define HDMI_CTS_INT2 0x014
+#define HDMI_CTS_INT1 0x018
+#define HDMI_CTS_EXT3 0x01c
+#define HDMI_CTS_EXT2 0x020
+#define HDMI_CTS_EXT1 0x024
+#define HDMI_AUDIO_CTRL1 0x028
+#define HDMI_AUDIO_CTRL2 0x02c
+#define HDMI_I2S_AUDIO_CTRL 0x030
+#define HDMI_I2S_SWAP 0x040
+#define HDMI_AUDIO_STA_BIT_CTRL1 0x044
+#define HDMI_AUDIO_STA_BIT_CTRL2 0x048
+#define HDMI_AUDIO_SRC_NUM_AND_LENGTH 0x050
+#define HDMI_AV_CTRL1 0x054
+#define HDMI_VIDEO_CTRL1 0x058
+#define HDMI_DEEP_COLOR_MODE 0x05c
+
+#define HDMI_EXT_VIDEO_PARA 0x0c0
+#define HDMI_EXT_HTOTAL_L 0x0c4
+#define HDMI_EXT_HTOTAL_H 0x0c8
+#define HDMI_EXT_HBLANK_L 0x0cc
+#define HDMI_EXT_HBLANK_H 0x0d0
+#define HDMI_EXT_HDELAY_L 0x0d4
+#define HDMI_EXT_HDELAY_H 0x0d8
+#define HDMI_EXT_HDURATION_L 0x0dc
+#define HDMI_EXT_HDURATION_H 0x0e0
+#define HDMI_EXT_VTOTAL_L 0x0e4
+#define HDMI_EXT_VTOTAL_H 0x0e8
+#define HDMI_AV_CTRL2 0x0ec
+#define HDMI_EXT_VBLANK_L 0x0f4
+#define HDMI_EXT_VBLANK_H 0x10c
+#define HDMI_EXT_VDELAY 0x0f8
+#define HDMI_EXT_VDURATION 0x0fc
+
+#define HDMI_CP_MANU_SEND_CTRL 0x100
+#define HDMI_CP_AUTO_SEND_CTRL 0x104
+#define HDMI_AUTO_CHECKSUM_OPT 0x108
+
+#define HDMI_VIDEO_CTRL2 0x114
+
+#define HDMI_PHY_OPTION 0x144
+
+#define HDMI_CP_BUF_INDEX 0x17c
+#define HDMI_CP_BUF_ACC_HB0 0x180
+#define HDMI_CP_BUF_ACC_HB1 0x184
+#define HDMI_CP_BUF_ACC_HB2 0x188
+#define HDMI_CP_BUF_ACC_PB0 0x18c
+
+#define HDMI_DDC_READ_FIFO_ADDR 0x200
+#define HDMI_DDC_BUS_FREQ_L 0x204
+#define HDMI_DDC_BUS_FREQ_H 0x208
+#define HDMI_DDC_BUS_CTRL 0x2dc
+#define HDMI_DDC_I2C_LEN 0x278
+#define HDMI_DDC_I2C_OFFSET 0x280
+#define HDMI_DDC_I2C_CTRL 0x284
+#define HDMI_DDC_I2C_READ_BUF0 0x288
+#define HDMI_DDC_I2C_READ_BUF1 0x28c
+#define HDMI_DDC_I2C_READ_BUF2 0x290
+#define HDMI_DDC_I2C_READ_BUF3 0x294
+#define HDMI_DDC_I2C_WRITE_BUF0 0x298
+#define HDMI_DDC_I2C_WRITE_BUF1 0x29c
+#define HDMI_DDC_I2C_WRITE_BUF2 0x2a0
+#define HDMI_DDC_I2C_WRITE_BUF3 0x2a4
+#define HDMI_DDC_I2C_WRITE_BUF4 0x2ac
+#define HDMI_DDC_I2C_WRITE_BUF5 0x2b0
+#define HDMI_DDC_I2C_WRITE_BUF6 0x2b4
+
+#define HDMI_INTR_MASK1 0x248
+#define HDMI_INTR_MASK2 0x24c
+#define HDMI_INTR_STATUS1 0x250
+#define HDMI_INTR_STATUS2 0x254
+#define HDMI_INTR_MASK3 0x258
+#define HDMI_INTR_MASK4 0x25c
+#define HDMI_INTR_STATUS3 0x260
+#define HDMI_INTR_STATUS4 0x264
+
+#define HDMI_HDCP_CTRL 0x2bc
+
+#define HDMI_EDID_SEGMENT_POINTER 0x310
+#define HDMI_EDID_WORD_ADDR 0x314
+#define HDMI_EDID_FIFO_ADDR 0x318
+
+#define HDMI_HPG_MENS_STA 0x37c
+
+#define HDMI_INTERNAL_CLK_DIVIDER 0x800
+
+enum {
+ /* HDMI_SYS_CTRL */
+ HDMI_SYS_POWER_MODE_MASK = 0xf0,
+ HDMI_SYS_POWER_MODE_A = 0x10,
+ HDMI_SYS_POWER_MODE_B = 0x20,
+ HDMI_SYS_POWER_MODE_D = 0x40,
+ HDMI_SYS_POWER_MODE_E = 0x80,
+ HDMI_SYS_PLL_RESET_MASK = 0x0c,
+ HDMI_SYS_PLL_RESET = 0x0c,
+ HDMI_SYS_PLLB_RESET = 0x08,
+
+ /* HDMI_LR_SWAP_N3 */
+ HDMI_AUDIO_LR_SWAP_MASK = 0xf0,
+ HDMI_AUDIO_LR_SWAP_SUBPACKET0 = 0x10,
+ HDMI_AUDIO_LR_SWAP_SUBPACKET1 = 0x20,
+ HDMI_AUDIO_LR_SWAP_SUBPACKET2 = 0x40,
+ HDMI_AUDIO_LR_SWAP_SUBPACKET3 = 0x80,
+ HDMI_AUDIO_N_19_16_MASK = 0x0f,
+
+ /* HDMI_AUDIO_CTRL1 */
+ HDMI_AUDIO_EXTERNAL_CTS = BIT(7),
+ HDMI_AUDIO_INPUT_IIS = 0,
+ HDMI_AUDIO_INPUT_SPDIF = 0x08,
+ HDMI_AUDIO_INPUT_MCLK_ACTIVE = 0x04,
+ HDMI_AUDIO_INPUT_MCLK_DEACTIVE = 0,
+ HDMI_AUDIO_INPUT_MCLK_RATE_128X = 0,
+ HDMI_AUDIO_INPUT_MCLK_RATE_256X = 1,
+ HDMI_AUDIO_INPUT_MCLK_RATE_384X = 2,
+ HDMI_AUDIO_INPUT_MCLK_RATE_512X = 3,
+
+ /* HDMI_I2S_AUDIO_CTRL */
+ HDMI_AUDIO_I2S_FORMAT_STANDARD = 0,
+ HDMI_AUDIO_I2S_CHANNEL_1_2 = 0x04,
+ HDMI_AUDIO_I2S_CHANNEL_3_4 = 0x0c,
+ HDMI_AUDIO_I2S_CHANNEL_5_6 = 0x1c,
+ HDMI_AUDIO_I2S_CHANNEL_7_8 = 0x3c,
+
+ /* HDMI_AV_CTRL1 */
+ HDMI_AUDIO_SAMPLE_FRE_MASK = 0xf0,
+ HDMI_AUDIO_SAMPLE_FRE_32000 = 0x30,
+ HDMI_AUDIO_SAMPLE_FRE_44100 = 0,
+ HDMI_AUDIO_SAMPLE_FRE_48000 = 0x20,
+ HDMI_AUDIO_SAMPLE_FRE_88200 = 0x80,
+ HDMI_AUDIO_SAMPLE_FRE_96000 = 0xa0,
+ HDMI_AUDIO_SAMPLE_FRE_176400 = 0xc0,
+ HDMI_AUDIO_SAMPLE_FRE_192000 = 0xe0,
+ HDMI_AUDIO_SAMPLE_FRE_768000 = 0x90,
+
+ HDMI_VIDEO_INPUT_FORMAT_MASK = 0x0e,
+ HDMI_VIDEO_INPUT_RGB_YCBCR444 = 0,
+ HDMI_VIDEO_INPUT_YCBCR422 = 0x02,
+ HDMI_VIDEO_DE_MASK = 0x1,
+ HDMI_VIDEO_INTERNAL_DE = 0,
+ HDMI_VIDEO_EXTERNAL_DE = 0x01,
+
+ /* HDMI_VIDEO_CTRL1 */
+ HDMI_VIDEO_OUTPUT_FORMAT_MASK = 0xc0,
+ HDMI_VIDEO_OUTPUT_RGB444 = 0,
+ HDMI_VIDEO_OUTPUT_YCBCR444 = 0x40,
+ HDMI_VIDEO_OUTPUT_YCBCR422 = 0x80,
+ HDMI_VIDEO_INPUT_DATA_DEPTH_MASK = 0x30,
+ HDMI_VIDEO_INPUT_DATA_DEPTH_12BIT = 0,
+ HDMI_VIDEO_INPUT_DATA_DEPTH_10BIT = 0x10,
+ HDMI_VIDEO_INPUT_DATA_DEPTH_8BIT = 0x30,
+ HDMI_VIDEO_INPUT_COLOR_MASK = 1,
+ HDMI_VIDEO_INPUT_COLOR_RGB = 0,
+ HDMI_VIDEO_INPUT_COLOR_YCBCR = 1,
+
+ /* HDMI_EXT_VIDEO_PARA */
+ HDMI_VIDEO_VSYNC_OFFSET_SHIFT = 4,
+ HDMI_VIDEO_VSYNC_ACTIVE_HIGH = BIT(3),
+ HDMI_VIDEO_VSYNC_ACTIVE_LOW = 0,
+ HDMI_VIDEO_HSYNC_ACTIVE_HIGH = BIT(2),
+ HDMI_VIDEO_HSYNC_ACTIVE_LOW = 0,
+ HDMI_VIDEO_MODE_INTERLACE = BIT(1),
+ HDMI_VIDEO_MODE_PROGRESSIVE = 0,
+ HDMI_EXT_VIDEO_SET_EN = BIT(0),
+
+ /* HDMI_CP_AUTO_SEND_CTRL */
+
+ /* HDMI_VIDEO_CTRL2 */
+ HDMI_VIDEO_AV_MUTE_MASK = 0xc0,
+ HDMI_VIDEO_CLR_AV_MUTE = BIT(7),
+ HDMI_VIDEO_SET_AV_MUTE = BIT(6),
+ HDMI_AUDIO_CP_LOGIC_RESET_MASK = BIT(2),
+ HDMI_AUDIO_CP_LOGIC_RESET = BIT(2),
+ HDMI_VIDEO_AUDIO_DISABLE_MASK = 0x3,
+ HDMI_AUDIO_DISABLE = BIT(1),
+ HDMI_VIDEO_DISABLE = BIT(0),
+
+ /* HDMI_CP_BUF_INDEX */
+ HDMI_INFOFRAME_VSI = 0x05,
+ HDMI_INFOFRAME_AVI = 0x06,
+ HDMI_INFOFRAME_AAI = 0x08,
+
+ /* HDMI_INTR_MASK1 */
+ /* HDMI_INTR_STATUS1 */
+ HDMI_INTR_HOTPLUG = BIT(7),
+ HDMI_INTR_MSENS = BIT(6),
+ HDMI_INTR_VSYNC = BIT(5),
+ HDMI_INTR_AUDIO_FIFO_FULL = BIT(4),
+ HDMI_INTR_EDID_MASK = 0x6,
+ HDMI_INTR_EDID_READY = BIT(2),
+ HDMI_INTR_EDID_ERR = BIT(1),
+
+ /* HDMI_HDCP_CTRL */
+ HDMI_VIDEO_MODE_MASK = BIT(1),
+ HDMI_VIDEO_MODE_HDMI = BIT(1),
+
+ /* HDMI_HPG_MENS_STA */
+ HDMI_HPG_IN_STATUS_HIGH = BIT(7),
+ HDMI_MSENS_IN_STATUS_HIGH = BIT(6),
+};
+
+#endif /* __RK3066_HDMI_H__ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index d7fa17f12769..cb938d3cd3c2 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -448,6 +448,14 @@ static int rockchip_drm_platform_remove(struct platform_device *pdev)
return 0;
}
+static void rockchip_drm_platform_shutdown(struct platform_device *pdev)
+{
+ struct drm_device *drm = platform_get_drvdata(pdev);
+
+ if (drm)
+ drm_atomic_helper_shutdown(drm);
+}
+
static const struct of_device_id rockchip_drm_dt_ids[] = {
{ .compatible = "rockchip,display-subsystem", },
{ /* sentinel */ },
@@ -457,6 +465,7 @@ MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
static struct platform_driver rockchip_drm_platform_driver = {
.probe = rockchip_drm_platform_probe,
.remove = rockchip_drm_platform_remove,
+ .shutdown = rockchip_drm_platform_shutdown,
.driver = {
.name = "rockchip-drm",
.of_match_table = rockchip_drm_dt_ids,
@@ -486,6 +495,8 @@ static int __init rockchip_drm_init(void)
ADD_ROCKCHIP_SUB_DRIVER(dw_mipi_dsi_rockchip_driver,
CONFIG_ROCKCHIP_DW_MIPI_DSI);
ADD_ROCKCHIP_SUB_DRIVER(inno_hdmi_driver, CONFIG_ROCKCHIP_INNO_HDMI);
+ ADD_ROCKCHIP_SUB_DRIVER(rk3066_hdmi_driver,
+ CONFIG_ROCKCHIP_RK3066_HDMI);
ret = platform_register_drivers(rockchip_sub_drivers,
num_rockchip_sub_drivers);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index ce48568ec8a0..e4bc4322bc3f 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -73,4 +73,5 @@ extern struct platform_driver inno_hdmi_driver;
extern struct platform_driver rockchip_dp_driver;
extern struct platform_driver rockchip_lvds_driver;
extern struct platform_driver vop_platform_driver;
+extern struct platform_driver rk3066_hdmi_driver;
#endif /* _ROCKCHIP_DRM_DRV_H_ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index 8ce68bd508be..30459de66b67 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -90,12 +90,10 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
goto out;
}
- fbi->par = helper;
fbi->fbops = &rockchip_drm_fbdev_ops;
fb = helper->fb;
- drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(fbi, helper, sizes);
offset = fbi->var.xoffset * bytes_per_pixel;
offset += fbi->var.yoffset * fb->pitches[0];
@@ -110,8 +108,6 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
rk_obj->kvaddr,
offset, size);
- fbi->skip_vt_switch = true;
-
return 0;
out:
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 0d4ade9d4722..20a9c296d027 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -1041,6 +1041,7 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start;
u16 vact_end = vact_st + vdisplay;
uint32_t pin_pol, val;
+ int dither_bpc = s->output_bpc ? s->output_bpc : 10;
int ret;
mutex_lock(&vop->vop_lock);
@@ -1098,11 +1099,19 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc,
!(vop_data->feature & VOP_FEATURE_OUTPUT_RGB10))
s->output_mode = ROCKCHIP_OUT_MODE_P888;
- if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA && s->output_bpc == 8)
+ if (s->output_mode == ROCKCHIP_OUT_MODE_AAAA && dither_bpc <= 8)
VOP_REG_SET(vop, common, pre_dither_down, 1);
else
VOP_REG_SET(vop, common, pre_dither_down, 0);
+ if (dither_bpc == 6) {
+ VOP_REG_SET(vop, common, dither_down_sel, DITHER_DOWN_ALLEGRO);
+ VOP_REG_SET(vop, common, dither_down_mode, RGB888_TO_RGB666);
+ VOP_REG_SET(vop, common, dither_down_en, 1);
+ } else {
+ VOP_REG_SET(vop, common, dither_down_en, 0);
+ }
+
VOP_REG_SET(vop, common, out_mode, s->output_mode);
VOP_REG_SET(vop, modeset, htotal_pw, (htotal << 16) | hsync_len);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 04ed401d2325..e64351dab610 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -71,7 +71,9 @@ struct vop_common {
struct vop_reg dsp_blank;
struct vop_reg data_blank;
struct vop_reg pre_dither_down;
- struct vop_reg dither_down;
+ struct vop_reg dither_down_sel;
+ struct vop_reg dither_down_mode;
+ struct vop_reg dither_down_en;
struct vop_reg dither_up;
struct vop_reg gate_en;
struct vop_reg mmu_en;
@@ -287,6 +289,16 @@ enum scale_down_mode {
SCALE_DOWN_AVG = 0x1
};
+enum dither_down_mode {
+ RGB888_TO_RGB565 = 0x0,
+ RGB888_TO_RGB666 = 0x1
+};
+
+enum dither_down_mode_sel {
+ DITHER_DOWN_ALLEGRO = 0x0,
+ DITHER_DOWN_FRC = 0x1
+};
+
enum vop_pol {
HSYNC_POSITIVE = 0,
VSYNC_POSITIVE = 1,
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index bd76328c0fdb..e732b73033c8 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -137,6 +137,9 @@ static const struct vop_common rk3036_common = {
.standby = VOP_REG_SYNC(RK3036_SYS_CTRL, 0x1, 30),
.out_mode = VOP_REG(RK3036_DSP_CTRL0, 0xf, 0),
.dsp_blank = VOP_REG(RK3036_DSP_CTRL1, 0x1, 24),
+ .dither_down_sel = VOP_REG(RK3036_DSP_CTRL0, 0x1, 27),
+ .dither_down_en = VOP_REG(RK3036_DSP_CTRL0, 0x1, 11),
+ .dither_down_mode = VOP_REG(RK3036_DSP_CTRL0, 0x1, 10),
.cfg_done = VOP_REG_SYNC(RK3036_REG_CFG_DONE, 0x1, 0),
};
@@ -200,6 +203,9 @@ static const struct vop_common px30_common = {
.standby = VOP_REG_SYNC(PX30_SYS_CTRL2, 0x1, 1),
.out_mode = VOP_REG(PX30_DSP_CTRL2, 0xf, 16),
.dsp_blank = VOP_REG(PX30_DSP_CTRL2, 0x1, 14),
+ .dither_down_en = VOP_REG(PX30_DSP_CTRL2, 0x1, 8),
+ .dither_down_sel = VOP_REG(PX30_DSP_CTRL2, 0x1, 7),
+ .dither_down_mode = VOP_REG(PX30_DSP_CTRL2, 0x1, 6),
.cfg_done = VOP_REG_SYNC(PX30_REG_CFG_DONE, 0x1, 0),
};
@@ -365,6 +371,8 @@ static const struct vop_common rk3066_common = {
.standby = VOP_REG(RK3066_SYS_CTRL0, 0x1, 1),
.out_mode = VOP_REG(RK3066_DSP_CTRL0, 0xf, 0),
.cfg_done = VOP_REG(RK3066_REG_CFG_DONE, 0x1, 0),
+ .dither_down_en = VOP_REG(RK3066_DSP_CTRL0, 0x1, 11),
+ .dither_down_mode = VOP_REG(RK3066_DSP_CTRL0, 0x1, 10),
.dsp_blank = VOP_REG(RK3066_DSP_CTRL1, 0x1, 24),
};
@@ -458,6 +466,9 @@ static const struct vop_common rk3188_common = {
.standby = VOP_REG(RK3188_SYS_CTRL, 0x1, 30),
.out_mode = VOP_REG(RK3188_DSP_CTRL0, 0xf, 0),
.cfg_done = VOP_REG(RK3188_REG_CFG_DONE, 0x1, 0),
+ .dither_down_sel = VOP_REG(RK3188_DSP_CTRL0, 0x1, 27),
+ .dither_down_en = VOP_REG(RK3188_DSP_CTRL0, 0x1, 11),
+ .dither_down_mode = VOP_REG(RK3188_DSP_CTRL0, 0x1, 10),
.dsp_blank = VOP_REG(RK3188_DSP_CTRL1, 0x3, 24),
};
@@ -585,8 +596,10 @@ static const struct vop_common rk3288_common = {
.standby = VOP_REG_SYNC(RK3288_SYS_CTRL, 0x1, 22),
.gate_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 23),
.mmu_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 20),
+ .dither_down_sel = VOP_REG(RK3288_DSP_CTRL1, 0x1, 4),
+ .dither_down_mode = VOP_REG(RK3288_DSP_CTRL1, 0x1, 3),
+ .dither_down_en = VOP_REG(RK3288_DSP_CTRL1, 0x1, 2),
.pre_dither_down = VOP_REG(RK3288_DSP_CTRL1, 0x1, 1),
- .dither_down = VOP_REG(RK3288_DSP_CTRL1, 0xf, 1),
.dither_up = VOP_REG(RK3288_DSP_CTRL1, 0x1, 6),
.data_blank = VOP_REG(RK3288_DSP_CTRL0, 0x1, 19),
.dsp_blank = VOP_REG(RK3288_DSP_CTRL0, 0x3, 18),
@@ -878,7 +891,10 @@ static const struct vop_misc rk3328_misc = {
static const struct vop_common rk3328_common = {
.standby = VOP_REG_SYNC(RK3328_SYS_CTRL, 0x1, 22),
- .dither_down = VOP_REG(RK3328_DSP_CTRL1, 0xf, 1),
+ .dither_down_sel = VOP_REG(RK3328_DSP_CTRL1, 0x1, 4),
+ .dither_down_mode = VOP_REG(RK3328_DSP_CTRL1, 0x1, 3),
+ .dither_down_en = VOP_REG(RK3328_DSP_CTRL1, 0x1, 2),
+ .pre_dither_down = VOP_REG(RK3328_DSP_CTRL1, 0x1, 1),
.dither_up = VOP_REG(RK3328_DSP_CTRL1, 0x1, 6),
.dsp_blank = VOP_REG(RK3328_DSP_CTRL0, 0x3, 18),
.out_mode = VOP_REG(RK3328_DSP_CTRL0, 0xf, 0),
diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c
index fbed2c90fd51..286a0eeefcb6 100644
--- a/drivers/gpu/drm/selftests/test-drm_mm.c
+++ b/drivers/gpu/drm/selftests/test-drm_mm.c
@@ -1615,7 +1615,7 @@ static int igt_topdown(void *ignored)
DRM_RND_STATE(prng, random_seed);
const unsigned int count = 8192;
unsigned int size;
- unsigned long *bitmap = NULL;
+ unsigned long *bitmap;
struct drm_mm mm;
struct drm_mm_node *nodes, *node, *next;
unsigned int *order, n, m, o = 0;
@@ -1631,8 +1631,7 @@ static int igt_topdown(void *ignored)
if (!nodes)
goto err;
- bitmap = kcalloc(count / BITS_PER_LONG, sizeof(unsigned long),
- GFP_KERNEL);
+ bitmap = bitmap_zalloc(count, GFP_KERNEL);
if (!bitmap)
goto err_nodes;
@@ -1717,7 +1716,7 @@ out:
drm_mm_takedown(&mm);
kfree(order);
err_bitmap:
- kfree(bitmap);
+ bitmap_free(bitmap);
err_nodes:
vfree(nodes);
err:
@@ -1745,8 +1744,7 @@ static int igt_bottomup(void *ignored)
if (!nodes)
goto err;
- bitmap = kcalloc(count / BITS_PER_LONG, sizeof(unsigned long),
- GFP_KERNEL);
+ bitmap = bitmap_zalloc(count, GFP_KERNEL);
if (!bitmap)
goto err_nodes;
@@ -1818,7 +1816,7 @@ out:
drm_mm_takedown(&mm);
kfree(order);
err_bitmap:
- kfree(bitmap);
+ bitmap_free(bitmap);
err_nodes:
vfree(nodes);
err:
diff --git a/drivers/gpu/drm/stm/Kconfig b/drivers/gpu/drm/stm/Kconfig
index 35367ada3bc1..d15b10de1da6 100644
--- a/drivers/gpu/drm/stm/Kconfig
+++ b/drivers/gpu/drm/stm/Kconfig
@@ -6,7 +6,7 @@ config DRM_STM
select DRM_KMS_CMA_HELPER
select DRM_PANEL_BRIDGE
select VIDEOMODE_HELPERS
- select FB_PROVIDE_GET_FB_UNMAPPED_AREA
+ select FB_PROVIDE_GET_FB_UNMAPPED_AREA if FB
help
Enable support for the on-chip display controller on
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index 0a7f933ab007..5834ef56fbaa 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -129,6 +129,40 @@ static void drv_unload(struct drm_device *ddev)
drm_mode_config_cleanup(ddev);
}
+static __maybe_unused int drv_suspend(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct ltdc_device *ldev = ddev->dev_private;
+ struct drm_atomic_state *state;
+
+ drm_kms_helper_poll_disable(ddev);
+ state = drm_atomic_helper_suspend(ddev);
+ if (IS_ERR(state)) {
+ drm_kms_helper_poll_enable(ddev);
+ return PTR_ERR(state);
+ }
+ ldev->suspend_state = state;
+ ltdc_suspend(ddev);
+
+ return 0;
+}
+
+static __maybe_unused int drv_resume(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct ltdc_device *ldev = ddev->dev_private;
+
+ ltdc_resume(ddev);
+ drm_atomic_helper_resume(ddev, ldev->suspend_state);
+ drm_kms_helper_poll_enable(ddev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops drv_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(drv_suspend, drv_resume)
+};
+
static int stm_drm_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -186,6 +220,7 @@ static struct platform_driver stm_drm_platform_driver = {
.driver = {
.name = "stm32-display",
.of_match_table = drv_dt_ids,
+ .pm = &drv_pm_ops,
},
};
diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
index a672b59a2226..1bef73e8c8fe 100644
--- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
+++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
@@ -356,12 +356,40 @@ static int dw_mipi_dsi_stm_remove(struct platform_device *pdev)
return 0;
}
+static int __maybe_unused dw_mipi_dsi_stm_suspend(struct device *dev)
+{
+ struct dw_mipi_dsi_stm *dsi = dw_mipi_dsi_stm_plat_data.priv_data;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ clk_disable_unprepare(dsi->pllref_clk);
+
+ return 0;
+}
+
+static int __maybe_unused dw_mipi_dsi_stm_resume(struct device *dev)
+{
+ struct dw_mipi_dsi_stm *dsi = dw_mipi_dsi_stm_plat_data.priv_data;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ clk_prepare_enable(dsi->pllref_clk);
+
+ return 0;
+}
+
+static const struct dev_pm_ops dw_mipi_dsi_stm_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dw_mipi_dsi_stm_suspend,
+ dw_mipi_dsi_stm_resume)
+};
+
static struct platform_driver dw_mipi_dsi_stm_driver = {
.probe = dw_mipi_dsi_stm_probe,
.remove = dw_mipi_dsi_stm_remove,
.driver = {
.of_match_table = dw_mipi_dsi_stm_dt_ids,
.name = "stm32-display-dsi",
+ .pm = &dw_mipi_dsi_stm_pm_ops,
},
};
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index b1741a9d5be2..32fd6a3b37fb 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -1062,6 +1062,30 @@ static int ltdc_get_caps(struct drm_device *ddev)
return 0;
}
+void ltdc_suspend(struct drm_device *ddev)
+{
+ struct ltdc_device *ldev = ddev->dev_private;
+
+ DRM_DEBUG_DRIVER("\n");
+ clk_disable_unprepare(ldev->pixel_clk);
+}
+
+int ltdc_resume(struct drm_device *ddev)
+{
+ struct ltdc_device *ldev = ddev->dev_private;
+ int ret;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ ret = clk_prepare_enable(ldev->pixel_clk);
+ if (ret) {
+ DRM_ERROR("failed to enable pixel clock (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
int ltdc_load(struct drm_device *ddev)
{
struct platform_device *pdev = to_platform_device(ddev->dev);
diff --git a/drivers/gpu/drm/stm/ltdc.h b/drivers/gpu/drm/stm/ltdc.h
index e46f477a8494..a1ad0ae3b006 100644
--- a/drivers/gpu/drm/stm/ltdc.h
+++ b/drivers/gpu/drm/stm/ltdc.h
@@ -36,6 +36,7 @@ struct ltdc_device {
u32 error_status;
u32 irq_status;
struct fps_info plane_fpsi[LTDC_MAX_LAYER];
+ struct drm_atomic_state *suspend_state;
};
bool ltdc_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
@@ -45,5 +46,7 @@ bool ltdc_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
int ltdc_load(struct drm_device *ddev);
void ltdc_unload(struct drm_device *ddev);
+void ltdc_suspend(struct drm_device *ddev);
+int ltdc_resume(struct drm_device *ddev);
#endif
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 4c0d51f73237..4e5922c89d7b 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -361,13 +361,6 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
paddr = drm_fb_cma_get_gem_addr(fb, state, 0);
DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
- /*
- * backend DMA accesses DRAM directly, bypassing the system
- * bus. As such, the address range is different and the buffer
- * address needs to be corrected.
- */
- paddr -= PHYS_OFFSET;
-
if (fb->format->is_yuv)
return sun4i_backend_update_yuv_buffer(backend, fb, paddr);
@@ -720,33 +713,22 @@ static int sun4i_backend_free_sat(struct device *dev) {
*/
static int sun4i_backend_of_get_id(struct device_node *node)
{
- struct device_node *port, *ep;
- int ret = -EINVAL;
+ struct device_node *ep, *remote;
+ struct of_endpoint of_ep;
- /* input is port 0 */
- port = of_graph_get_port_by_id(node, 0);
- if (!port)
+ /* Input port is 0, and we want the first endpoint. */
+ ep = of_graph_get_endpoint_by_regs(node, 0, -1);
+ if (!ep)
return -EINVAL;
- /* try finding an upstream endpoint */
- for_each_available_child_of_node(port, ep) {
- struct device_node *remote;
- u32 reg;
-
- remote = of_graph_get_remote_endpoint(ep);
- if (!remote)
- continue;
-
- ret = of_property_read_u32(remote, "reg", &reg);
- if (ret)
- continue;
-
- ret = reg;
- }
-
- of_node_put(port);
+ remote = of_graph_get_remote_endpoint(ep);
+ of_node_put(ep);
+ if (!remote)
+ return -EINVAL;
- return ret;
+ of_graph_parse_endpoint(remote, &of_ep);
+ of_node_put(remote);
+ return of_ep.id;
}
/* TODO: This needs to take multiple pipelines into account */
@@ -814,6 +796,27 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
dev_set_drvdata(dev, backend);
spin_lock_init(&backend->frontend_lock);
+ if (of_find_property(dev->of_node, "interconnects", NULL)) {
+ /*
+ * This assume we have the same DMA constraints for all our the
+ * devices in our pipeline (all the backends, but also the
+ * frontends). This sounds bad, but it has always been the case
+ * for us, and DRM doesn't do per-device allocation either, so
+ * we would need to fix DRM first...
+ */
+ ret = of_dma_configure(drm->dev, dev->of_node, true);
+ if (ret)
+ return ret;
+ } else {
+ /*
+ * If we don't have the interconnect property, most likely
+ * because of an old DT, we need to set the DMA offset by hand
+ * on our device since the RAM mapping is at 0 for the DMA bus,
+ * unlike the CPU.
+ */
+ drm->dev->dma_pfn_offset = PHYS_PFN_OFFSET;
+ }
+
backend->engine.node = dev->of_node;
backend->engine.ops = &sun4i_backend_engine_ops;
backend->engine.id = sun4i_backend_of_get_id(dev->of_node);
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi.h b/drivers/gpu/drm/sun4i/sun4i_hdmi.h
index b685ee11623d..b08c4453d47c 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi.h
@@ -269,6 +269,7 @@ struct sun4i_hdmi {
struct clk *tmds_clk;
struct i2c_adapter *i2c;
+ struct i2c_adapter *ddc_i2c;
/* Regmap fields for I2C adapter */
struct regmap_field *field_ddc_en;
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index d18862629301..8c122e637697 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -217,7 +217,7 @@ static int sun4i_hdmi_get_modes(struct drm_connector *connector)
struct edid *edid;
int ret;
- edid = drm_get_edid(connector, hdmi->i2c);
+ edid = drm_get_edid(connector, hdmi->ddc_i2c ?: hdmi->i2c);
if (!edid)
return 0;
@@ -233,6 +233,28 @@ static int sun4i_hdmi_get_modes(struct drm_connector *connector)
return ret;
}
+static struct i2c_adapter *sun4i_hdmi_get_ddc(struct device *dev)
+{
+ struct device_node *phandle, *remote;
+ struct i2c_adapter *ddc;
+
+ remote = of_graph_get_remote_node(dev->of_node, 1, -1);
+ if (!remote)
+ return ERR_PTR(-EINVAL);
+
+ phandle = of_parse_phandle(remote, "ddc-i2c-bus", 0);
+ of_node_put(remote);
+ if (!phandle)
+ return ERR_PTR(-ENODEV);
+
+ ddc = of_get_i2c_adapter_by_node(phandle);
+ of_node_put(phandle);
+ if (!ddc)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return ddc;
+}
+
static const struct drm_connector_helper_funcs sun4i_hdmi_connector_helper_funcs = {
.get_modes = sun4i_hdmi_get_modes,
};
@@ -580,6 +602,15 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
goto err_disable_mod_clk;
}
+ hdmi->ddc_i2c = sun4i_hdmi_get_ddc(dev);
+ if (IS_ERR(hdmi->ddc_i2c)) {
+ ret = PTR_ERR(hdmi->ddc_i2c);
+ if (ret == -ENODEV)
+ hdmi->ddc_i2c = NULL;
+ else
+ goto err_del_i2c_adapter;
+ }
+
drm_encoder_helper_add(&hdmi->encoder,
&sun4i_hdmi_helper_funcs);
ret = drm_encoder_init(drm,
@@ -589,14 +620,14 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
NULL);
if (ret) {
dev_err(dev, "Couldn't initialise the HDMI encoder\n");
- goto err_del_i2c_adapter;
+ goto err_put_ddc_i2c;
}
hdmi->encoder.possible_crtcs = drm_of_find_possible_crtcs(drm,
dev->of_node);
if (!hdmi->encoder.possible_crtcs) {
ret = -EPROBE_DEFER;
- goto err_del_i2c_adapter;
+ goto err_put_ddc_i2c;
}
#ifdef CONFIG_DRM_SUN4I_HDMI_CEC
@@ -635,6 +666,8 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
err_cleanup_connector:
cec_delete_adapter(hdmi->cec_adap);
drm_encoder_cleanup(&hdmi->encoder);
+err_put_ddc_i2c:
+ i2c_put_adapter(hdmi->ddc_i2c);
err_del_i2c_adapter:
i2c_del_adapter(hdmi->i2c);
err_disable_mod_clk:
@@ -655,6 +688,7 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master,
drm_connector_cleanup(&hdmi->connector);
drm_encoder_cleanup(&hdmi->encoder);
i2c_del_adapter(hdmi->i2c);
+ i2c_put_adapter(hdmi->ddc_i2c);
clk_disable_unprepare(hdmi->mod_clk);
clk_disable_unprepare(hdmi->bus_clk);
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index 147b97ed1a09..3a3ba99fed22 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -20,7 +20,7 @@ struct sun4i_lvds {
struct drm_connector connector;
struct drm_encoder encoder;
- struct sun4i_tcon *tcon;
+ struct drm_panel *panel;
};
static inline struct sun4i_lvds *
@@ -41,9 +41,8 @@ static int sun4i_lvds_get_modes(struct drm_connector *connector)
{
struct sun4i_lvds *lvds =
drm_connector_to_sun4i_lvds(connector);
- struct sun4i_tcon *tcon = lvds->tcon;
- return drm_panel_get_modes(tcon->panel);
+ return drm_panel_get_modes(lvds->panel);
}
static struct drm_connector_helper_funcs sun4i_lvds_con_helper_funcs = {
@@ -54,9 +53,8 @@ static void
sun4i_lvds_connector_destroy(struct drm_connector *connector)
{
struct sun4i_lvds *lvds = drm_connector_to_sun4i_lvds(connector);
- struct sun4i_tcon *tcon = lvds->tcon;
- drm_panel_detach(tcon->panel);
+ drm_panel_detach(lvds->panel);
drm_connector_cleanup(connector);
}
@@ -71,26 +69,24 @@ static const struct drm_connector_funcs sun4i_lvds_con_funcs = {
static void sun4i_lvds_encoder_enable(struct drm_encoder *encoder)
{
struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(encoder);
- struct sun4i_tcon *tcon = lvds->tcon;
DRM_DEBUG_DRIVER("Enabling LVDS output\n");
- if (tcon->panel) {
- drm_panel_prepare(tcon->panel);
- drm_panel_enable(tcon->panel);
+ if (lvds->panel) {
+ drm_panel_prepare(lvds->panel);
+ drm_panel_enable(lvds->panel);
}
}
static void sun4i_lvds_encoder_disable(struct drm_encoder *encoder)
{
struct sun4i_lvds *lvds = drm_encoder_to_sun4i_lvds(encoder);
- struct sun4i_tcon *tcon = lvds->tcon;
DRM_DEBUG_DRIVER("Disabling LVDS output\n");
- if (tcon->panel) {
- drm_panel_disable(tcon->panel);
- drm_panel_unprepare(tcon->panel);
+ if (lvds->panel) {
+ drm_panel_disable(lvds->panel);
+ drm_panel_unprepare(lvds->panel);
}
}
@@ -113,11 +109,10 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
lvds = devm_kzalloc(drm->dev, sizeof(*lvds), GFP_KERNEL);
if (!lvds)
return -ENOMEM;
- lvds->tcon = tcon;
encoder = &lvds->encoder;
ret = drm_of_find_panel_or_bridge(tcon->dev->of_node, 1, 0,
- &tcon->panel, &bridge);
+ &lvds->panel, &bridge);
if (ret) {
dev_info(drm->dev, "No panel or bridge found... LVDS output disabled\n");
return 0;
@@ -138,7 +133,7 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
/* The LVDS encoder can only work with the TCON channel 0 */
lvds->encoder.possible_crtcs = drm_crtc_mask(&tcon->crtc->crtc);
- if (tcon->panel) {
+ if (lvds->panel) {
drm_connector_helper_add(&lvds->connector,
&sun4i_lvds_con_helper_funcs);
ret = drm_connector_init(drm, &lvds->connector,
@@ -152,7 +147,7 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
drm_connector_attach_encoder(&lvds->connector,
&lvds->encoder);
- ret = drm_panel_attach(tcon->panel, &lvds->connector);
+ ret = drm_panel_attach(lvds->panel, &lvds->connector);
if (ret) {
dev_err(drm->dev, "Couldn't attach our panel\n");
goto err_cleanup_connector;
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index cae19e7bbeaa..d9e2502b49fa 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -27,6 +27,8 @@ struct sun4i_rgb {
struct drm_encoder encoder;
struct sun4i_tcon *tcon;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
};
static inline struct sun4i_rgb *
@@ -47,11 +49,18 @@ static int sun4i_rgb_get_modes(struct drm_connector *connector)
{
struct sun4i_rgb *rgb =
drm_connector_to_sun4i_rgb(connector);
- struct sun4i_tcon *tcon = rgb->tcon;
- return drm_panel_get_modes(tcon->panel);
+ return drm_panel_get_modes(rgb->panel);
}
+/*
+ * VESA DMT defines a tolerance of 0.5% on the pixel clock, while the
+ * CVT spec reuses that tolerance in its examples, so it looks to be a
+ * good default tolerance for the EDID-based modes. Define it to 5 per
+ * mille to avoid floating point operations.
+ */
+#define SUN4I_RGB_DOTCLOCK_TOLERANCE_PER_MILLE 5
+
static enum drm_mode_status sun4i_rgb_mode_valid(struct drm_encoder *crtc,
const struct drm_display_mode *mode)
{
@@ -59,8 +68,9 @@ static enum drm_mode_status sun4i_rgb_mode_valid(struct drm_encoder *crtc,
struct sun4i_tcon *tcon = rgb->tcon;
u32 hsync = mode->hsync_end - mode->hsync_start;
u32 vsync = mode->vsync_end - mode->vsync_start;
- unsigned long rate = mode->clock * 1000;
- long rounded_rate;
+ unsigned long long rate = mode->clock * 1000;
+ unsigned long long lowest, highest;
+ unsigned long long rounded_rate;
DRM_DEBUG_DRIVER("Validating modes...\n");
@@ -92,15 +102,39 @@ static enum drm_mode_status sun4i_rgb_mode_valid(struct drm_encoder *crtc,
DRM_DEBUG_DRIVER("Vertical parameters OK\n");
+ /*
+ * TODO: We should use the struct display_timing if available
+ * and / or trying to stretch the timings within that
+ * tolerancy to take care of panels that we wouldn't be able
+ * to have a exact match for.
+ */
+ if (rgb->panel) {
+ DRM_DEBUG_DRIVER("RGB panel used, skipping clock rate checks");
+ goto out;
+ }
+
+ /*
+ * That shouldn't ever happen unless something is really wrong, but it
+ * doesn't harm to check.
+ */
+ if (!rgb->bridge)
+ goto out;
+
tcon->dclk_min_div = 6;
tcon->dclk_max_div = 127;
rounded_rate = clk_round_rate(tcon->dclk, rate);
- if (rounded_rate < rate)
+
+ lowest = rate * (1000 - SUN4I_RGB_DOTCLOCK_TOLERANCE_PER_MILLE);
+ do_div(lowest, 1000);
+ if (rounded_rate < lowest)
return MODE_CLOCK_LOW;
- if (rounded_rate > rate)
+ highest = rate * (1000 + SUN4I_RGB_DOTCLOCK_TOLERANCE_PER_MILLE);
+ do_div(highest, 1000);
+ if (rounded_rate > highest)
return MODE_CLOCK_HIGH;
+out:
DRM_DEBUG_DRIVER("Clock rate OK\n");
return MODE_OK;
@@ -114,9 +148,8 @@ static void
sun4i_rgb_connector_destroy(struct drm_connector *connector)
{
struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector);
- struct sun4i_tcon *tcon = rgb->tcon;
- drm_panel_detach(tcon->panel);
+ drm_panel_detach(rgb->panel);
drm_connector_cleanup(connector);
}
@@ -131,26 +164,24 @@ static const struct drm_connector_funcs sun4i_rgb_con_funcs = {
static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder)
{
struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder);
- struct sun4i_tcon *tcon = rgb->tcon;
DRM_DEBUG_DRIVER("Enabling RGB output\n");
- if (tcon->panel) {
- drm_panel_prepare(tcon->panel);
- drm_panel_enable(tcon->panel);
+ if (rgb->panel) {
+ drm_panel_prepare(rgb->panel);
+ drm_panel_enable(rgb->panel);
}
}
static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder)
{
struct sun4i_rgb *rgb = drm_encoder_to_sun4i_rgb(encoder);
- struct sun4i_tcon *tcon = rgb->tcon;
DRM_DEBUG_DRIVER("Disabling RGB output\n");
- if (tcon->panel) {
- drm_panel_disable(tcon->panel);
- drm_panel_unprepare(tcon->panel);
+ if (rgb->panel) {
+ drm_panel_disable(rgb->panel);
+ drm_panel_unprepare(rgb->panel);
}
}
@@ -172,7 +203,6 @@ static struct drm_encoder_funcs sun4i_rgb_enc_funcs = {
int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
{
struct drm_encoder *encoder;
- struct drm_bridge *bridge;
struct sun4i_rgb *rgb;
int ret;
@@ -183,7 +213,7 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
encoder = &rgb->encoder;
ret = drm_of_find_panel_or_bridge(tcon->dev->of_node, 1, 0,
- &tcon->panel, &bridge);
+ &rgb->panel, &rgb->bridge);
if (ret) {
dev_info(drm->dev, "No panel or bridge found... RGB output disabled\n");
return 0;
@@ -204,7 +234,7 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
/* The RGB encoder can only work with the TCON channel 0 */
rgb->encoder.possible_crtcs = drm_crtc_mask(&tcon->crtc->crtc);
- if (tcon->panel) {
+ if (rgb->panel) {
drm_connector_helper_add(&rgb->connector,
&sun4i_rgb_con_helper_funcs);
ret = drm_connector_init(drm, &rgb->connector,
@@ -218,15 +248,15 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
drm_connector_attach_encoder(&rgb->connector,
&rgb->encoder);
- ret = drm_panel_attach(tcon->panel, &rgb->connector);
+ ret = drm_panel_attach(rgb->panel, &rgb->connector);
if (ret) {
dev_err(drm->dev, "Couldn't attach our panel\n");
goto err_cleanup_connector;
}
}
- if (bridge) {
- ret = drm_bridge_attach(encoder, bridge, NULL);
+ if (rgb->bridge) {
+ ret = drm_bridge_attach(encoder, rgb->bridge, NULL);
if (ret) {
dev_err(drm->dev, "Couldn't attach our bridge\n");
goto err_cleanup_connector;
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 7136fc91c603..9d8d8124b1f6 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -236,8 +236,8 @@ static struct sun4i_tcon *sun4i_get_tcon0(struct drm_device *drm)
return NULL;
}
-void sun4i_tcon_set_mux(struct sun4i_tcon *tcon, int channel,
- const struct drm_encoder *encoder)
+static void sun4i_tcon_set_mux(struct sun4i_tcon *tcon, int channel,
+ const struct drm_encoder *encoder)
{
int ret = -ENOTSUPP;
@@ -341,8 +341,8 @@ static void sun4i_tcon0_mode_set_cpu(struct sun4i_tcon *tcon,
u32 block_space, start_delay;
u32 tcon_div;
- tcon->dclk_min_div = 4;
- tcon->dclk_max_div = 127;
+ tcon->dclk_min_div = SUN6I_DSI_TCON_DIV;
+ tcon->dclk_max_div = SUN6I_DSI_TCON_DIV;
sun4i_tcon0_mode_set_common(tcon, mode);
@@ -561,10 +561,10 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
* Following code is a way to avoid quirks all around TCON
* and DOTCLOCK drivers.
*/
- if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
+ if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)
clk_set_phase(tcon->dclk, 240);
- if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
+ if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
clk_set_phase(tcon->dclk, 0);
regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index b5214d71610f..84cfb1952ff7 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -257,8 +257,6 @@ struct sun4i_tcon {
struct reset_control *lcd_rst;
struct reset_control *lvds_rst;
- struct drm_panel *panel;
-
/* Platform adjustments */
const struct sun4i_tcon_quirks *quirks;
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index 318994cd1b85..6ff585055a07 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -24,7 +24,9 @@
#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
+#include "sun4i_crtc.h"
#include "sun4i_drv.h"
+#include "sun4i_tcon.h"
#include "sun6i_mipi_dsi.h"
#include <video/mipi_display.h>
@@ -33,6 +35,8 @@
#define SUN6I_DSI_CTL_EN BIT(0)
#define SUN6I_DSI_BASIC_CTL_REG 0x00c
+#define SUN6I_DSI_BASIC_CTL_TRAIL_INV(n) (((n) & 0xf) << 4)
+#define SUN6I_DSI_BASIC_CTL_TRAIL_FILL BIT(3)
#define SUN6I_DSI_BASIC_CTL_HBP_DIS BIT(2)
#define SUN6I_DSI_BASIC_CTL_HSA_HSE_DIS BIT(1)
#define SUN6I_DSI_BASIC_CTL_VIDEO_BURST BIT(0)
@@ -153,6 +157,8 @@
#define SUN6I_DSI_CMD_TX_REG(n) (0x300 + (n) * 0x04)
+#define SUN6I_DSI_SYNC_POINT 40
+
enum sun6i_dsi_start_inst {
DSI_START_LPRX,
DSI_START_LPTX,
@@ -358,7 +364,54 @@ static void sun6i_dsi_inst_init(struct sun6i_dsi *dsi,
static u16 sun6i_dsi_get_video_start_delay(struct sun6i_dsi *dsi,
struct drm_display_mode *mode)
{
- return mode->vtotal - (mode->vsync_end - mode->vdisplay) + 1;
+ u16 start = clamp(mode->vtotal - mode->vdisplay - 10, 8, 100);
+ u16 delay = mode->vtotal - (mode->vsync_end - mode->vdisplay) + start;
+
+ if (delay > mode->vtotal)
+ delay = delay % mode->vtotal;
+
+ return max_t(u16, delay, 1);
+}
+
+static u16 sun6i_dsi_get_line_num(struct sun6i_dsi *dsi,
+ struct drm_display_mode *mode)
+{
+ struct mipi_dsi_device *device = dsi->device;
+ unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
+
+ return mode->htotal * Bpp / device->lanes;
+}
+
+static u16 sun6i_dsi_get_drq_edge0(struct sun6i_dsi *dsi,
+ struct drm_display_mode *mode,
+ u16 line_num, u16 edge1)
+{
+ u16 edge0 = edge1;
+
+ edge0 += (mode->hdisplay + 40) * SUN6I_DSI_TCON_DIV / 8;
+
+ if (edge0 > line_num)
+ return edge0 - line_num;
+
+ return 1;
+}
+
+static u16 sun6i_dsi_get_drq_edge1(struct sun6i_dsi *dsi,
+ struct drm_display_mode *mode,
+ u16 line_num)
+{
+ struct mipi_dsi_device *device = dsi->device;
+ unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
+ unsigned int hbp = mode->htotal - mode->hsync_end;
+ u16 edge1;
+
+ edge1 = SUN6I_DSI_SYNC_POINT;
+ edge1 += (mode->hdisplay + hbp + 20) * Bpp / device->lanes;
+
+ if (edge1 > line_num)
+ return line_num;
+
+ return edge1;
}
static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi,
@@ -367,7 +420,23 @@ static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi,
struct mipi_dsi_device *device = dsi->device;
u32 val = 0;
- if ((mode->hsync_end - mode->hdisplay) > 20) {
+ if (device->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
+ u16 line_num = sun6i_dsi_get_line_num(dsi, mode);
+ u16 edge0, edge1;
+
+ edge1 = sun6i_dsi_get_drq_edge1(dsi, mode, line_num);
+ edge0 = sun6i_dsi_get_drq_edge0(dsi, mode, line_num, edge1);
+
+ regmap_write(dsi->regs, SUN6I_DSI_BURST_DRQ_REG,
+ SUN6I_DSI_BURST_DRQ_EDGE0(edge0) |
+ SUN6I_DSI_BURST_DRQ_EDGE1(edge1));
+
+ regmap_write(dsi->regs, SUN6I_DSI_BURST_LINE_REG,
+ SUN6I_DSI_BURST_LINE_NUM(line_num) |
+ SUN6I_DSI_BURST_LINE_SYNC_POINT(SUN6I_DSI_SYNC_POINT));
+
+ val = SUN6I_DSI_TCON_DRQ_ENABLE_MODE;
+ } else if ((mode->hsync_end - mode->hdisplay) > 20) {
/* Maaaaaagic */
u16 drq = (mode->hsync_end - mode->hdisplay) - 20;
@@ -384,8 +453,19 @@ static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi,
static void sun6i_dsi_setup_inst_loop(struct sun6i_dsi *dsi,
struct drm_display_mode *mode)
{
+ struct mipi_dsi_device *device = dsi->device;
u16 delay = 50 - 1;
+ if (device->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
+ delay = (mode->htotal - mode->hdisplay) * 150;
+ delay /= (mode->clock / 1000) * 8;
+ delay -= 50;
+ }
+
+ regmap_write(dsi->regs, SUN6I_DSI_INST_LOOP_SEL_REG,
+ 2 << (4 * DSI_INST_ID_LP11) |
+ 3 << (4 * DSI_INST_ID_DLY));
+
regmap_write(dsi->regs, SUN6I_DSI_INST_LOOP_NUM_REG(0),
SUN6I_DSI_INST_LOOP_NUM_N0(50 - 1) |
SUN6I_DSI_INST_LOOP_NUM_N1(delay));
@@ -451,49 +531,68 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
{
struct mipi_dsi_device *device = dsi->device;
unsigned int Bpp = mipi_dsi_pixel_format_to_bpp(device->format) / 8;
- u16 hbp, hfp, hsa, hblk, vblk;
+ u16 hbp = 0, hfp = 0, hsa = 0, hblk = 0, vblk = 0;
+ u32 basic_ctl = 0;
size_t bytes;
u8 *buffer;
/* Do all timing calculations up front to allocate buffer space */
- /*
- * A sync period is composed of a blanking packet (4 bytes +
- * payload + 2 bytes) and a sync event packet (4 bytes). Its
- * minimal size is therefore 10 bytes
- */
-#define HSA_PACKET_OVERHEAD 10
- hsa = max((unsigned int)HSA_PACKET_OVERHEAD,
- (mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD);
+ if (device->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
+ hblk = mode->hdisplay * Bpp;
+ basic_ctl = SUN6I_DSI_BASIC_CTL_VIDEO_BURST |
+ SUN6I_DSI_BASIC_CTL_HSA_HSE_DIS |
+ SUN6I_DSI_BASIC_CTL_HBP_DIS;
- /*
- * The backporch is set using a blanking packet (4 bytes +
- * payload + 2 bytes). Its minimal size is therefore 6 bytes
- */
+ if (device->lanes == 4)
+ basic_ctl |= SUN6I_DSI_BASIC_CTL_TRAIL_FILL |
+ SUN6I_DSI_BASIC_CTL_TRAIL_INV(0xc);
+ } else {
+ /*
+ * A sync period is composed of a blanking packet (4
+ * bytes + payload + 2 bytes) and a sync event packet
+ * (4 bytes). Its minimal size is therefore 10 bytes
+ */
+#define HSA_PACKET_OVERHEAD 10
+ hsa = max((unsigned int)HSA_PACKET_OVERHEAD,
+ (mode->hsync_end - mode->hsync_start) * Bpp - HSA_PACKET_OVERHEAD);
+
+ /*
+ * The backporch is set using a blanking packet (4
+ * bytes + payload + 2 bytes). Its minimal size is
+ * therefore 6 bytes
+ */
#define HBP_PACKET_OVERHEAD 6
- hbp = max((unsigned int)HBP_PACKET_OVERHEAD,
- (mode->hsync_start - mode->hdisplay) * Bpp - HBP_PACKET_OVERHEAD);
-
- /*
- * The frontporch is set using a blanking packet (4 bytes +
- * payload + 2 bytes). Its minimal size is therefore 6 bytes
- */
+ hbp = max((unsigned int)HBP_PACKET_OVERHEAD,
+ (mode->htotal - mode->hsync_end) * Bpp - HBP_PACKET_OVERHEAD);
+
+ /*
+ * The frontporch is set using a blanking packet (4
+ * bytes + payload + 2 bytes). Its minimal size is
+ * therefore 6 bytes
+ */
#define HFP_PACKET_OVERHEAD 6
- hfp = max((unsigned int)HFP_PACKET_OVERHEAD,
- (mode->htotal - mode->hsync_end) * Bpp - HFP_PACKET_OVERHEAD);
-
- /*
- * hblk seems to be the line + porches length.
- */
- hblk = mode->htotal * Bpp - hsa;
-
- /*
- * And I'm not entirely sure what vblk is about. The driver in
- * Allwinner BSP is using a rather convoluted calculation
- * there only for 4 lanes. However, using 0 (the !4 lanes
- * case) even with a 4 lanes screen seems to work...
- */
- vblk = 0;
+ hfp = max((unsigned int)HFP_PACKET_OVERHEAD,
+ (mode->hsync_start - mode->hdisplay) * Bpp - HFP_PACKET_OVERHEAD);
+
+ /*
+ * The blanking is set using a sync event (4 bytes)
+ * and a blanking packet (4 bytes + payload + 2
+ * bytes). Its minimal size is therefore 10 bytes.
+ */
+#define HBLK_PACKET_OVERHEAD 10
+ hblk = max((unsigned int)HBLK_PACKET_OVERHEAD,
+ (mode->htotal - (mode->hsync_end - mode->hsync_start)) * Bpp -
+ HBLK_PACKET_OVERHEAD);
+
+ /*
+ * And I'm not entirely sure what vblk is about. The driver in
+ * Allwinner BSP is using a rather convoluted calculation
+ * there only for 4 lanes. However, using 0 (the !4 lanes
+ * case) even with a 4 lanes screen seems to work...
+ */
+ vblk = 0;
+ }
/* How many bytes do we need to send all payloads? */
bytes = max_t(size_t, max(max(hfp, hblk), max(hsa, hbp)), vblk);
@@ -501,7 +600,7 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
if (WARN_ON(!buffer))
return;
- regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL_REG, 0);
+ regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL_REG, basic_ctl);
regmap_write(dsi->regs, SUN6I_DSI_SYNC_HSS_REG,
sun6i_dsi_build_sync_pkt(MIPI_DSI_H_SYNC_START,
@@ -526,8 +625,8 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi,
regmap_write(dsi->regs, SUN6I_DSI_BASIC_SIZE0_REG,
SUN6I_DSI_BASIC_SIZE0_VSA(mode->vsync_end -
mode->vsync_start) |
- SUN6I_DSI_BASIC_SIZE0_VBP(mode->vsync_start -
- mode->vdisplay));
+ SUN6I_DSI_BASIC_SIZE0_VBP(mode->vtotal -
+ mode->vsync_end));
regmap_write(dsi->regs, SUN6I_DSI_BASIC_SIZE1_REG,
SUN6I_DSI_BASIC_SIZE1_VACT(mode->vdisplay) |
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
index a07090579f84..5c3ad5be0690 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
@@ -13,6 +13,8 @@
#include <drm/drm_encoder.h>
#include <drm/drm_mipi_dsi.h>
+#define SUN6I_DSI_TCON_DIV 4
+
struct sun6i_dsi {
struct drm_connector connector;
struct drm_encoder encoder;
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 30a2eff55687..fd20a928cf4d 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -325,38 +325,22 @@ static struct regmap_config sun8i_mixer_regmap_config = {
static int sun8i_mixer_of_get_id(struct device_node *node)
{
- struct device_node *port, *ep;
- int ret = -EINVAL;
+ struct device_node *ep, *remote;
+ struct of_endpoint of_ep;
- /* output is port 1 */
- port = of_graph_get_port_by_id(node, 1);
- if (!port)
+ /* Output port is 1, and we want the first endpoint. */
+ ep = of_graph_get_endpoint_by_regs(node, 1, -1);
+ if (!ep)
return -EINVAL;
- /* try to find downstream endpoint */
- for_each_available_child_of_node(port, ep) {
- struct device_node *remote;
- u32 reg;
-
- remote = of_graph_get_remote_endpoint(ep);
- if (!remote)
- continue;
-
- ret = of_property_read_u32(remote, "reg", &reg);
- if (!ret) {
- of_node_put(remote);
- of_node_put(ep);
- of_node_put(port);
-
- return reg;
- }
-
- of_node_put(remote);
- }
-
- of_node_put(port);
+ remote = of_graph_get_remote_endpoint(ep);
+ of_node_put(ep);
+ if (!remote)
+ return -EINVAL;
- return ret;
+ of_graph_parse_endpoint(remote, &of_ep);
+ of_node_put(remote);
+ return of_ep.id;
}
static int sun8i_mixer_bind(struct device *dev, struct device *master,
@@ -554,6 +538,7 @@ static int sun8i_mixer_remove(struct platform_device *pdev)
static const struct sun8i_mixer_cfg sun8i_a83t_mixer0_cfg = {
.ccsc = 0,
.scaler_mask = 0xf,
+ .scanline_yuv = 2048,
.ui_num = 3,
.vi_num = 1,
};
@@ -561,6 +546,7 @@ static const struct sun8i_mixer_cfg sun8i_a83t_mixer0_cfg = {
static const struct sun8i_mixer_cfg sun8i_a83t_mixer1_cfg = {
.ccsc = 1,
.scaler_mask = 0x3,
+ .scanline_yuv = 2048,
.ui_num = 1,
.vi_num = 1,
};
@@ -569,6 +555,7 @@ static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = {
.ccsc = 0,
.mod_rate = 432000000,
.scaler_mask = 0xf,
+ .scanline_yuv = 2048,
.ui_num = 3,
.vi_num = 1,
};
@@ -577,6 +564,7 @@ static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = {
.ccsc = 0,
.mod_rate = 297000000,
.scaler_mask = 0xf,
+ .scanline_yuv = 2048,
.ui_num = 3,
.vi_num = 1,
};
@@ -585,6 +573,7 @@ static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = {
.ccsc = 1,
.mod_rate = 297000000,
.scaler_mask = 0x3,
+ .scanline_yuv = 2048,
.ui_num = 1,
.vi_num = 1,
};
@@ -593,6 +582,7 @@ static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = {
.vi_num = 2,
.ui_num = 1,
.scaler_mask = 0x3,
+ .scanline_yuv = 2048,
.ccsc = 0,
.mod_rate = 150000000,
};
@@ -601,6 +591,7 @@ static const struct sun8i_mixer_cfg sun50i_a64_mixer0_cfg = {
.ccsc = 0,
.mod_rate = 297000000,
.scaler_mask = 0xf,
+ .scanline_yuv = 4096,
.ui_num = 3,
.vi_num = 1,
};
@@ -609,6 +600,7 @@ static const struct sun8i_mixer_cfg sun50i_a64_mixer1_cfg = {
.ccsc = 1,
.mod_rate = 297000000,
.scaler_mask = 0x3,
+ .scanline_yuv = 2048,
.ui_num = 1,
.vi_num = 1,
};
@@ -618,6 +610,7 @@ static const struct sun8i_mixer_cfg sun50i_h6_mixer0_cfg = {
.is_de3 = true,
.mod_rate = 600000000,
.scaler_mask = 0xf,
+ .scanline_yuv = 4096,
.ui_num = 3,
.vi_num = 1,
};
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
index 913d14ce68b0..80e084caa084 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
@@ -159,6 +159,7 @@ struct de2_fmt_info {
* @mod_rate: module clock rate that needs to be set in order to have
* a functional block.
* @is_de3: true, if this is next gen display engine 3.0, false otherwise.
+ * @scaline_yuv: size of a scanline for VI scaler for YUV formats.
*/
struct sun8i_mixer_cfg {
int vi_num;
@@ -167,6 +168,7 @@ struct sun8i_mixer_cfg {
int ccsc;
unsigned long mod_rate;
unsigned int is_de3 : 1;
+ unsigned int scanline_yuv;
};
struct sun8i_mixer {
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
index b1e7c76e9c17..3267d0f9b9b2 100644
--- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
+++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
@@ -269,12 +269,12 @@ static int sun8i_tcon_top_remove(struct platform_device *pdev)
return 0;
}
-const struct sun8i_tcon_top_quirks sun8i_r40_tcon_top_quirks = {
+static const struct sun8i_tcon_top_quirks sun8i_r40_tcon_top_quirks = {
.has_tcon_tv1 = true,
.has_dsi = true,
};
-const struct sun8i_tcon_top_quirks sun50i_h6_tcon_top_quirks = {
+static const struct sun8i_tcon_top_quirks sun50i_h6_tcon_top_quirks = {
/* Nothing special */
};
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index 8a0616238467..bb8e026d6405 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -80,6 +80,8 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
u32 bld_base, ch_base;
u32 outsize, insize;
u32 hphase, vphase;
+ u32 hn = 0, hm = 0;
+ u32 vn = 0, vm = 0;
bool subsampled;
DRM_DEBUG_DRIVER("Updating VI channel %d overlay %d\n",
@@ -137,12 +139,41 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
subsampled = format->hsub > 1 || format->vsub > 1;
if (insize != outsize || subsampled || hphase || vphase) {
- u32 hscale, vscale;
+ unsigned int scanline, required;
+ struct drm_display_mode *mode;
+ u32 hscale, vscale, fps;
+ u64 ability;
DRM_DEBUG_DRIVER("HW scaling is enabled\n");
- hscale = state->src_w / state->crtc_w;
- vscale = state->src_h / state->crtc_h;
+ mode = &plane->state->crtc->state->mode;
+ fps = (mode->clock * 1000) / (mode->vtotal * mode->htotal);
+ ability = clk_get_rate(mixer->mod_clk);
+ /* BSP algorithm assumes 80% efficiency of VI scaler unit */
+ ability *= 80;
+ do_div(ability, mode->vdisplay * fps * max(src_w, dst_w));
+
+ required = src_h * 100 / dst_h;
+
+ if (ability < required) {
+ DRM_DEBUG_DRIVER("Using vertical coarse scaling\n");
+ vm = src_h;
+ vn = (u32)ability * dst_h / 100;
+ src_h = vn;
+ }
+
+ /* it seems that every RGB scaler has buffer for 2048 pixels */
+ scanline = subsampled ? mixer->cfg->scanline_yuv : 2048;
+
+ if (src_w > scanline) {
+ DRM_DEBUG_DRIVER("Using horizontal coarse scaling\n");
+ hm = src_w;
+ hn = scanline;
+ src_w = hn;
+ }
+
+ hscale = (src_w << 16) / dst_w;
+ vscale = (src_h << 16) / dst_h;
sun8i_vi_scaler_setup(mixer, channel, src_w, src_h, dst_w,
dst_h, hscale, vscale, hphase, vphase,
@@ -153,6 +184,23 @@ static int sun8i_vi_layer_update_coord(struct sun8i_mixer *mixer, int channel,
sun8i_vi_scaler_enable(mixer, channel, false);
}
+ regmap_write(mixer->engine.regs,
+ SUN8I_MIXER_CHAN_VI_HDS_Y(ch_base),
+ SUN8I_MIXER_CHAN_VI_DS_N(hn) |
+ SUN8I_MIXER_CHAN_VI_DS_M(hm));
+ regmap_write(mixer->engine.regs,
+ SUN8I_MIXER_CHAN_VI_HDS_UV(ch_base),
+ SUN8I_MIXER_CHAN_VI_DS_N(hn) |
+ SUN8I_MIXER_CHAN_VI_DS_M(hm));
+ regmap_write(mixer->engine.regs,
+ SUN8I_MIXER_CHAN_VI_VDS_Y(ch_base),
+ SUN8I_MIXER_CHAN_VI_DS_N(vn) |
+ SUN8I_MIXER_CHAN_VI_DS_M(vm));
+ regmap_write(mixer->engine.regs,
+ SUN8I_MIXER_CHAN_VI_VDS_UV(ch_base),
+ SUN8I_MIXER_CHAN_VI_DS_N(vn) |
+ SUN8I_MIXER_CHAN_VI_DS_M(vm));
+
/* Set base coordinates */
DRM_DEBUG_DRIVER("Layer destination coordinates X: %d Y: %d\n",
state->dst.x1, state->dst.y1);
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.h b/drivers/gpu/drm/sun4i/sun8i_vi_layer.h
index 8a5e6d01c85d..a223a4839f45 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.h
@@ -24,6 +24,14 @@
((base) + 0x30 * (layer) + 0x18 + 4 * (plane))
#define SUN8I_MIXER_CHAN_VI_OVL_SIZE(base) \
((base) + 0xe8)
+#define SUN8I_MIXER_CHAN_VI_HDS_Y(base) \
+ ((base) + 0xf0)
+#define SUN8I_MIXER_CHAN_VI_HDS_UV(base) \
+ ((base) + 0xf4)
+#define SUN8I_MIXER_CHAN_VI_VDS_Y(base) \
+ ((base) + 0xf8)
+#define SUN8I_MIXER_CHAN_VI_VDS_UV(base) \
+ ((base) + 0xfc)
#define SUN8I_MIXER_CHAN_VI_LAYER_ATTR_EN BIT(0)
/* RGB mode should be set for RGB formats and cleared for YCbCr */
@@ -33,6 +41,9 @@
#define SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA_MASK GENMASK(31, 24)
#define SUN50I_MIXER_CHAN_VI_LAYER_ATTR_ALPHA(x) ((x) << 24)
+#define SUN8I_MIXER_CHAN_VI_DS_N(x) ((x) << 16)
+#define SUN8I_MIXER_CHAN_VI_DS_M(x) ((x) << 0)
+
struct sun8i_mixer;
struct sun8i_vi_layer {
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 0a4ce05e00ab..1dd83a757dba 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -255,11 +255,9 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
helper->fb = fb;
helper->fbdev = info;
- info->par = helper;
info->fbops = &tegra_fb_ops;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(info, helper, fb->width, fb->height);
+ drm_fb_helper_fill_info(info, helper, sizes);
offset = info->var.xoffset * bytes_per_pixel +
info->var.yoffset * fb->pitches[0];
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index 4f80100ff5f3..4cce11fd8836 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -204,7 +204,7 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
{
if (bo->pages) {
dma_unmap_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
- DMA_BIDIRECTIONAL);
+ DMA_FROM_DEVICE);
drm_gem_put_pages(&bo->gem, bo->pages, true, true);
sg_free_table(bo->sgt);
kfree(bo->sgt);
@@ -230,7 +230,7 @@ static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
}
err = dma_map_sg(drm->dev, bo->sgt->sgl, bo->sgt->nents,
- DMA_BIDIRECTIONAL);
+ DMA_FROM_DEVICE);
if (err == 0) {
err = -EFAULT;
goto free_sgt;
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 40057106f5f3..5be5a0817dfe 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -2871,6 +2871,13 @@ static int tegra_sor_init(struct host1x_client *client)
* kernel is possible.
*/
if (sor->rst) {
+ err = reset_control_acquire(sor->rst);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to acquire SOR reset: %d\n",
+ err);
+ return err;
+ }
+
err = reset_control_assert(sor->rst);
if (err < 0) {
dev_err(sor->dev, "failed to assert SOR reset: %d\n",
@@ -2894,6 +2901,8 @@ static int tegra_sor_init(struct host1x_client *client)
err);
return err;
}
+
+ reset_control_release(sor->rst);
}
err = clk_prepare_enable(sor->clk_safe);
@@ -3331,7 +3340,7 @@ static int tegra_sor_probe(struct platform_device *pdev)
goto remove;
}
- sor->rst = devm_reset_control_get(&pdev->dev, "sor");
+ sor->rst = devm_reset_control_get_exclusive_released(&pdev->dev, "sor");
if (IS_ERR(sor->rst)) {
err = PTR_ERR(sor->rst);
@@ -3519,6 +3528,8 @@ static int tegra_sor_suspend(struct device *dev)
dev_err(dev, "failed to assert reset: %d\n", err);
return err;
}
+
+ reset_control_release(sor->rst);
}
usleep_range(1000, 2000);
@@ -3542,9 +3553,17 @@ static int tegra_sor_resume(struct device *dev)
usleep_range(1000, 2000);
if (sor->rst) {
+ err = reset_control_acquire(sor->rst);
+ if (err < 0) {
+ dev_err(dev, "failed to acquire reset: %d\n", err);
+ clk_disable_unprepare(sor->clk);
+ return err;
+ }
+
err = reset_control_deassert(sor->rst);
if (err < 0) {
dev_err(dev, "failed to deassert reset: %d\n", err);
+ reset_control_release(sor->rst);
clk_disable_unprepare(sor->clk);
return err;
}
diff --git a/drivers/gpu/drm/tinydrm/core/Makefile b/drivers/gpu/drm/tinydrm/core/Makefile
index fb221e6f8885..6f8f764560e0 100644
--- a/drivers/gpu/drm/tinydrm/core/Makefile
+++ b/drivers/gpu/drm/tinydrm/core/Makefile
@@ -1,3 +1,3 @@
-tinydrm-y := tinydrm-core.o tinydrm-pipe.o tinydrm-helpers.o
+tinydrm-y := tinydrm-pipe.o tinydrm-helpers.o
obj-$(CONFIG_DRM_TINYDRM) += tinydrm.o
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
deleted file mode 100644
index 554abd5d3b53..000000000000
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * Copyright (C) 2016 Noralf Trønnes
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <drm/drm_atomic.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_print.h>
-#include <drm/tinydrm/tinydrm.h>
-#include <linux/device.h>
-#include <linux/dma-buf.h>
-#include <linux/module.h>
-
-/**
- * DOC: overview
- *
- * This library provides driver helpers for very simple display hardware.
- *
- * It is based on &drm_simple_display_pipe coupled with a &drm_connector which
- * has only one fixed &drm_display_mode. The framebuffers are backed by the
- * cma helper and have support for framebuffer flushing (dirty).
- * fbdev support is also included.
- *
- */
-
-/**
- * DOC: core
- *
- * The driver allocates &tinydrm_device, initializes it using
- * devm_tinydrm_init(), sets up the pipeline using tinydrm_display_pipe_init()
- * and registers the DRM device using devm_tinydrm_register().
- */
-
-static const struct drm_mode_config_funcs tinydrm_mode_config_funcs = {
- .fb_create = drm_gem_fb_create_with_dirty,
- .atomic_check = drm_atomic_helper_check,
- .atomic_commit = drm_atomic_helper_commit,
-};
-
-static int tinydrm_init(struct device *parent, struct tinydrm_device *tdev,
- struct drm_driver *driver)
-{
- struct drm_device *drm;
-
- /*
- * We don't embed drm_device, because that prevent us from using
- * devm_kzalloc() to allocate tinydrm_device in the driver since
- * drm_dev_put() frees the structure. The devm_ functions provide
- * for easy error handling.
- */
- drm = drm_dev_alloc(driver, parent);
- if (IS_ERR(drm))
- return PTR_ERR(drm);
-
- tdev->drm = drm;
- drm->dev_private = tdev;
- drm_mode_config_init(drm);
- drm->mode_config.funcs = &tinydrm_mode_config_funcs;
- drm->mode_config.allow_fb_modifiers = true;
-
- return 0;
-}
-
-static void tinydrm_fini(struct tinydrm_device *tdev)
-{
- drm_mode_config_cleanup(tdev->drm);
- tdev->drm->dev_private = NULL;
- drm_dev_put(tdev->drm);
-}
-
-static void devm_tinydrm_release(void *data)
-{
- tinydrm_fini(data);
-}
-
-/**
- * devm_tinydrm_init - Initialize tinydrm device
- * @parent: Parent device object
- * @tdev: tinydrm device
- * @driver: DRM driver
- *
- * This function initializes @tdev, the underlying DRM device and it's
- * mode_config. Resources will be automatically freed on driver detach (devres)
- * using drm_mode_config_cleanup() and drm_dev_put().
- *
- * Returns:
- * Zero on success, negative error code on failure.
- */
-int devm_tinydrm_init(struct device *parent, struct tinydrm_device *tdev,
- struct drm_driver *driver)
-{
- int ret;
-
- ret = tinydrm_init(parent, tdev, driver);
- if (ret)
- return ret;
-
- ret = devm_add_action(parent, devm_tinydrm_release, tdev);
- if (ret)
- tinydrm_fini(tdev);
-
- return ret;
-}
-EXPORT_SYMBOL(devm_tinydrm_init);
-
-static int tinydrm_register(struct tinydrm_device *tdev)
-{
- struct drm_device *drm = tdev->drm;
- int ret;
-
- ret = drm_dev_register(tdev->drm, 0);
- if (ret)
- return ret;
-
- ret = drm_fbdev_generic_setup(drm, 0);
- if (ret)
- DRM_ERROR("Failed to initialize fbdev: %d\n", ret);
-
- return 0;
-}
-
-static void tinydrm_unregister(struct tinydrm_device *tdev)
-{
- drm_atomic_helper_shutdown(tdev->drm);
- drm_dev_unregister(tdev->drm);
-}
-
-static void devm_tinydrm_register_release(void *data)
-{
- tinydrm_unregister(data);
-}
-
-/**
- * devm_tinydrm_register - Register tinydrm device
- * @tdev: tinydrm device
- *
- * This function registers the underlying DRM device and fbdev.
- * These resources will be automatically unregistered on driver detach (devres)
- * and the display pipeline will be disabled.
- *
- * Returns:
- * Zero on success, negative error code on failure.
- */
-int devm_tinydrm_register(struct tinydrm_device *tdev)
-{
- struct device *dev = tdev->drm->dev;
- int ret;
-
- ret = tinydrm_register(tdev);
- if (ret)
- return ret;
-
- ret = devm_add_action(dev, devm_tinydrm_register_release, tdev);
- if (ret)
- tinydrm_unregister(tdev);
-
- return ret;
-}
-EXPORT_SYMBOL(devm_tinydrm_register);
-
-/**
- * tinydrm_shutdown - Shutdown tinydrm
- * @tdev: tinydrm device
- *
- * This function makes sure that the display pipeline is disabled.
- * Used by drivers in their shutdown callback to turn off the display
- * on machine shutdown and reboot.
- */
-void tinydrm_shutdown(struct tinydrm_device *tdev)
-{
- drm_atomic_helper_shutdown(tdev->drm);
-}
-EXPORT_SYMBOL(tinydrm_shutdown);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
index 2737b6fdadc8..6d540d93758f 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-helpers.c
@@ -26,164 +26,6 @@ static unsigned int spi_max;
module_param(spi_max, uint, 0400);
MODULE_PARM_DESC(spi_max, "Set a lower SPI max transfer size");
-/**
- * tinydrm_memcpy - Copy clip buffer
- * @dst: Destination buffer
- * @vaddr: Source buffer
- * @fb: DRM framebuffer
- * @clip: Clip rectangle area to copy
- */
-void tinydrm_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb,
- struct drm_rect *clip)
-{
- unsigned int cpp = drm_format_plane_cpp(fb->format->format, 0);
- unsigned int pitch = fb->pitches[0];
- void *src = vaddr + (clip->y1 * pitch) + (clip->x1 * cpp);
- size_t len = (clip->x2 - clip->x1) * cpp;
- unsigned int y;
-
- for (y = clip->y1; y < clip->y2; y++) {
- memcpy(dst, src, len);
- src += pitch;
- dst += len;
- }
-}
-EXPORT_SYMBOL(tinydrm_memcpy);
-
-/**
- * tinydrm_swab16 - Swap bytes into clip buffer
- * @dst: RGB565 destination buffer
- * @vaddr: RGB565 source buffer
- * @fb: DRM framebuffer
- * @clip: Clip rectangle area to copy
- */
-void tinydrm_swab16(u16 *dst, void *vaddr, struct drm_framebuffer *fb,
- struct drm_rect *clip)
-{
- size_t len = (clip->x2 - clip->x1) * sizeof(u16);
- unsigned int x, y;
- u16 *src, *buf;
-
- /*
- * The cma memory is write-combined so reads are uncached.
- * Speed up by fetching one line at a time.
- */
- buf = kmalloc(len, GFP_KERNEL);
- if (!buf)
- return;
-
- for (y = clip->y1; y < clip->y2; y++) {
- src = vaddr + (y * fb->pitches[0]);
- src += clip->x1;
- memcpy(buf, src, len);
- src = buf;
- for (x = clip->x1; x < clip->x2; x++)
- *dst++ = swab16(*src++);
- }
-
- kfree(buf);
-}
-EXPORT_SYMBOL(tinydrm_swab16);
-
-/**
- * tinydrm_xrgb8888_to_rgb565 - Convert XRGB8888 to RGB565 clip buffer
- * @dst: RGB565 destination buffer
- * @vaddr: XRGB8888 source buffer
- * @fb: DRM framebuffer
- * @clip: Clip rectangle area to copy
- * @swap: Swap bytes
- *
- * Drivers can use this function for RGB565 devices that don't natively
- * support XRGB8888.
- */
-void tinydrm_xrgb8888_to_rgb565(u16 *dst, void *vaddr,
- struct drm_framebuffer *fb,
- struct drm_rect *clip, bool swap)
-{
- size_t len = (clip->x2 - clip->x1) * sizeof(u32);
- unsigned int x, y;
- u32 *src, *buf;
- u16 val16;
-
- buf = kmalloc(len, GFP_KERNEL);
- if (!buf)
- return;
-
- for (y = clip->y1; y < clip->y2; y++) {
- src = vaddr + (y * fb->pitches[0]);
- src += clip->x1;
- memcpy(buf, src, len);
- src = buf;
- for (x = clip->x1; x < clip->x2; x++) {
- val16 = ((*src & 0x00F80000) >> 8) |
- ((*src & 0x0000FC00) >> 5) |
- ((*src & 0x000000F8) >> 3);
- src++;
- if (swap)
- *dst++ = swab16(val16);
- else
- *dst++ = val16;
- }
- }
-
- kfree(buf);
-}
-EXPORT_SYMBOL(tinydrm_xrgb8888_to_rgb565);
-
-/**
- * tinydrm_xrgb8888_to_gray8 - Convert XRGB8888 to grayscale
- * @dst: 8-bit grayscale destination buffer
- * @vaddr: XRGB8888 source buffer
- * @fb: DRM framebuffer
- * @clip: Clip rectangle area to copy
- *
- * Drm doesn't have native monochrome or grayscale support.
- * Such drivers can announce the commonly supported XR24 format to userspace
- * and use this function to convert to the native format.
- *
- * Monochrome drivers will use the most significant bit,
- * where 1 means foreground color and 0 background color.
- *
- * ITU BT.601 is used for the RGB -> luma (brightness) conversion.
- */
-void tinydrm_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb,
- struct drm_rect *clip)
-{
- unsigned int len = (clip->x2 - clip->x1) * sizeof(u32);
- unsigned int x, y;
- void *buf;
- u32 *src;
-
- if (WARN_ON(fb->format->format != DRM_FORMAT_XRGB8888))
- return;
- /*
- * The cma memory is write-combined so reads are uncached.
- * Speed up by fetching one line at a time.
- */
- buf = kmalloc(len, GFP_KERNEL);
- if (!buf)
- return;
-
- for (y = clip->y1; y < clip->y2; y++) {
- src = vaddr + (y * fb->pitches[0]);
- src += clip->x1;
- memcpy(buf, src, len);
- src = buf;
- for (x = clip->x1; x < clip->x2; x++) {
- u8 r = (*src & 0x00ff0000) >> 16;
- u8 g = (*src & 0x0000ff00) >> 8;
- u8 b = *src & 0x000000ff;
-
- /* ITU BT.601: Y = 0.299 R + 0.587 G + 0.114 B */
- *dst++ = (3 * r + 6 * g + b) / 10;
- src++;
- }
- }
-
- kfree(buf);
-}
-EXPORT_SYMBOL(tinydrm_xrgb8888_to_gray8);
-
#if IS_ENABLED(CONFIG_SPI)
/**
@@ -365,3 +207,5 @@ int tinydrm_spi_transfer(struct spi_device *spi, u32 speed_hz,
EXPORT_SYMBOL(tinydrm_spi_transfer);
#endif /* CONFIG_SPI */
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
index bb5b1c1e21ba..bb8a7ed8ddf6 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-pipe.c
@@ -13,7 +13,7 @@
#include <drm/drm_modes.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_print.h>
-#include <drm/tinydrm/tinydrm.h>
+#include <drm/drm_simple_kms_helper.h>
struct tinydrm_connector {
struct drm_connector base;
@@ -129,7 +129,8 @@ static int tinydrm_rotate_mode(struct drm_display_mode *mode,
/**
* tinydrm_display_pipe_init - Initialize display pipe
- * @tdev: tinydrm device
+ * @drm: DRM device
+ * @pipe: Display pipe
* @funcs: Display pipe functions
* @connector_type: Connector type
* @formats: Array of supported formats (DRM_FORMAT\_\*)
@@ -143,16 +144,15 @@ static int tinydrm_rotate_mode(struct drm_display_mode *mode,
* Returns:
* Zero on success, negative error code on failure.
*/
-int
-tinydrm_display_pipe_init(struct tinydrm_device *tdev,
- const struct drm_simple_display_pipe_funcs *funcs,
- int connector_type,
- const uint32_t *formats,
- unsigned int format_count,
- const struct drm_display_mode *mode,
- unsigned int rotation)
+int tinydrm_display_pipe_init(struct drm_device *drm,
+ struct drm_simple_display_pipe *pipe,
+ const struct drm_simple_display_pipe_funcs *funcs,
+ int connector_type,
+ const uint32_t *formats,
+ unsigned int format_count,
+ const struct drm_display_mode *mode,
+ unsigned int rotation)
{
- struct drm_device *drm = tdev->drm;
struct drm_display_mode mode_copy;
struct drm_connector *connector;
int ret;
@@ -177,7 +177,7 @@ tinydrm_display_pipe_init(struct tinydrm_device *tdev,
if (IS_ERR(connector))
return PTR_ERR(connector);
- return drm_simple_display_pipe_init(drm, &tdev->pipe, funcs, formats,
+ return drm_simple_display_pipe_init(drm, pipe, funcs, formats,
format_count, modifiers, connector);
}
EXPORT_SYMBOL(tinydrm_display_pipe_init);
diff --git a/drivers/gpu/drm/tinydrm/hx8357d.c b/drivers/gpu/drm/tinydrm/hx8357d.c
index 8bbd0beafc6a..5773d0fb6ca1 100644
--- a/drivers/gpu/drm/tinydrm/hx8357d.c
+++ b/drivers/gpu/drm/tinydrm/hx8357d.c
@@ -16,7 +16,9 @@
#include <linux/property.h>
#include <linux/spi/spi.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
@@ -46,16 +48,18 @@ static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
u8 addr_mode;
- int ret;
+ int ret, idx;
+
+ if (!drm_dev_enter(pipe->crtc.dev, &idx))
+ return;
DRM_DEBUG_KMS("\n");
ret = mipi_dbi_poweron_conditional_reset(mipi);
if (ret < 0)
- return;
+ goto out_exit;
if (ret == 1)
goto out_enable;
@@ -171,6 +175,8 @@ out_enable:
}
mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
+out_exit:
+ drm_dev_exit(idx);
}
static const struct drm_simple_display_pipe_funcs hx8357d_pipe_funcs = {
@@ -181,7 +187,7 @@ static const struct drm_simple_display_pipe_funcs hx8357d_pipe_funcs = {
};
static const struct drm_display_mode yx350hv15_mode = {
- TINYDRM_MODE(320, 480, 60, 75),
+ DRM_SIMPLE_MODE(320, 480, 60, 75),
};
DEFINE_DRM_GEM_CMA_FOPS(hx8357d_fops);
@@ -189,6 +195,7 @@ DEFINE_DRM_GEM_CMA_FOPS(hx8357d_fops);
static struct drm_driver hx8357d_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
.fops = &hx8357d_fops,
+ .release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "hx8357d",
@@ -213,15 +220,25 @@ MODULE_DEVICE_TABLE(spi, hx8357d_id);
static int hx8357d_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ struct drm_device *drm;
struct mipi_dbi *mipi;
struct gpio_desc *dc;
u32 rotation = 0;
int ret;
- mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
+ mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
if (!mipi)
return -ENOMEM;
+ drm = &mipi->drm;
+ ret = devm_drm_dev_init(dev, drm, &hx8357d_driver);
+ if (ret) {
+ kfree(mipi);
+ return ret;
+ }
+
+ drm_mode_config_init(drm);
+
dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW);
if (IS_ERR(dc)) {
DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n");
@@ -238,21 +255,36 @@ static int hx8357d_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = mipi_dbi_init(&spi->dev, mipi, &hx8357d_pipe_funcs,
- &hx8357d_driver, &yx350hv15_mode, rotation);
+ ret = mipi_dbi_init(mipi, &hx8357d_pipe_funcs, &yx350hv15_mode, rotation);
if (ret)
return ret;
- spi_set_drvdata(spi, mipi);
+ drm_mode_config_reset(drm);
- return devm_tinydrm_register(&mipi->tinydrm);
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, drm);
+
+ drm_fbdev_generic_setup(drm, 0);
+
+ return 0;
}
-static void hx8357d_shutdown(struct spi_device *spi)
+static int hx8357d_remove(struct spi_device *spi)
{
- struct mipi_dbi *mipi = spi_get_drvdata(spi);
+ struct drm_device *drm = spi_get_drvdata(spi);
+
+ drm_dev_unplug(drm);
+ drm_atomic_helper_shutdown(drm);
- tinydrm_shutdown(&mipi->tinydrm);
+ return 0;
+}
+
+static void hx8357d_shutdown(struct spi_device *spi)
+{
+ drm_atomic_helper_shutdown(spi_get_drvdata(spi));
}
static struct spi_driver hx8357d_spi_driver = {
@@ -262,6 +294,7 @@ static struct spi_driver hx8357d_spi_driver = {
},
.id_table = hx8357d_id,
.probe = hx8357d_probe,
+ .remove = hx8357d_remove,
.shutdown = hx8357d_shutdown,
};
module_spi_driver(hx8357d_spi_driver);
diff --git a/drivers/gpu/drm/tinydrm/ili9225.c b/drivers/gpu/drm/tinydrm/ili9225.c
index 43a3b68d90a2..4b1a587c0134 100644
--- a/drivers/gpu/drm/tinydrm/ili9225.c
+++ b/drivers/gpu/drm/tinydrm/ili9225.c
@@ -20,9 +20,11 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -81,20 +83,22 @@ static inline int ili9225_command(struct mipi_dbi *mipi, u8 cmd, u16 data)
static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
{
struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
- struct tinydrm_device *tdev = fb->dev->dev_private;
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(fb->dev);
unsigned int height = rect->y2 - rect->y1;
unsigned int width = rect->x2 - rect->x1;
bool swap = mipi->swap_bytes;
u16 x_start, y_start;
u16 x1, x2, y1, y2;
- int ret = 0;
+ int idx, ret = 0;
bool full;
void *tr;
if (!mipi->enabled)
return;
+ if (!drm_dev_enter(fb->dev, &idx))
+ return;
+
full = width == fb->width && height == fb->height;
DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
@@ -157,6 +161,8 @@ static void ili9225_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
err_msg:
if (ret)
dev_err_once(fb->dev->dev, "Failed to update display %d\n", ret);
+
+ drm_dev_exit(idx);
}
static void ili9225_pipe_update(struct drm_simple_display_pipe *pipe,
@@ -181,19 +187,21 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
struct drm_framebuffer *fb = plane_state->fb;
- struct device *dev = tdev->drm->dev;
+ struct device *dev = pipe->crtc.dev->dev;
struct drm_rect rect = {
.x1 = 0,
.x2 = fb->width,
.y1 = 0,
.y2 = fb->height,
};
- int ret;
+ int ret, idx;
u8 am_id;
+ if (!drm_dev_enter(pipe->crtc.dev, &idx))
+ return;
+
DRM_DEBUG_KMS("\n");
mipi_dbi_hw_reset(mipi);
@@ -207,7 +215,7 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
ret = ili9225_command(mipi, ILI9225_POWER_CONTROL_1, 0x0000);
if (ret) {
DRM_DEV_ERROR(dev, "Error sending command %d\n", ret);
- return;
+ goto out_exit;
}
ili9225_command(mipi, ILI9225_POWER_CONTROL_2, 0x0000);
ili9225_command(mipi, ILI9225_POWER_CONTROL_3, 0x0000);
@@ -280,15 +288,23 @@ static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
mipi->enabled = true;
ili9225_fb_dirty(fb, &rect);
+out_exit:
+ drm_dev_exit(idx);
}
static void ili9225_pipe_disable(struct drm_simple_display_pipe *pipe)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
DRM_DEBUG_KMS("\n");
+ /*
+ * This callback is not protected by drm_dev_enter/exit since we want to
+ * turn off the display on regular driver unload. It's highly unlikely
+ * that the underlying SPI controller is gone should this be called after
+ * unplug.
+ */
+
if (!mipi->enabled)
return;
@@ -301,7 +317,7 @@ static void ili9225_pipe_disable(struct drm_simple_display_pipe *pipe)
mipi->enabled = false;
}
-static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 cmd, u8 *par,
+static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par,
size_t num)
{
struct spi_device *spi = mipi->spi;
@@ -311,11 +327,11 @@ static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 cmd, u8 *par,
gpiod_set_value_cansleep(mipi->dc, 0);
speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1);
- ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, &cmd, 1);
+ ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, cmd, 1);
if (ret || !num)
return ret;
- if (cmd == ILI9225_WRITE_DATA_TO_GRAM && !mipi->swap_bytes)
+ if (*cmd == ILI9225_WRITE_DATA_TO_GRAM && !mipi->swap_bytes)
bpw = 16;
gpiod_set_value_cansleep(mipi->dc, 1);
@@ -332,7 +348,7 @@ static const struct drm_simple_display_pipe_funcs ili9225_pipe_funcs = {
};
static const struct drm_display_mode ili9225_mode = {
- TINYDRM_MODE(176, 220, 35, 44),
+ DRM_SIMPLE_MODE(176, 220, 35, 44),
};
DEFINE_DRM_GEM_CMA_FOPS(ili9225_fops);
@@ -341,6 +357,7 @@ static struct drm_driver ili9225_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
.fops = &ili9225_fops,
+ .release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.name = "ili9225",
.desc = "Ilitek ILI9225",
@@ -364,15 +381,25 @@ MODULE_DEVICE_TABLE(spi, ili9225_id);
static int ili9225_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ struct drm_device *drm;
struct mipi_dbi *mipi;
struct gpio_desc *rs;
u32 rotation = 0;
int ret;
- mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
+ mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
if (!mipi)
return -ENOMEM;
+ drm = &mipi->drm;
+ ret = devm_drm_dev_init(dev, drm, &ili9225_driver);
+ if (ret) {
+ kfree(mipi);
+ return ret;
+ }
+
+ drm_mode_config_init(drm);
+
mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(mipi->reset)) {
DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
@@ -394,21 +421,36 @@ static int ili9225_probe(struct spi_device *spi)
/* override the command function set in mipi_dbi_spi_init() */
mipi->command = ili9225_dbi_command;
- ret = mipi_dbi_init(&spi->dev, mipi, &ili9225_pipe_funcs,
- &ili9225_driver, &ili9225_mode, rotation);
+ ret = mipi_dbi_init(mipi, &ili9225_pipe_funcs, &ili9225_mode, rotation);
if (ret)
return ret;
- spi_set_drvdata(spi, mipi);
+ drm_mode_config_reset(drm);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, drm);
+
+ drm_fbdev_generic_setup(drm, 0);
- return devm_tinydrm_register(&mipi->tinydrm);
+ return 0;
}
-static void ili9225_shutdown(struct spi_device *spi)
+static int ili9225_remove(struct spi_device *spi)
{
- struct mipi_dbi *mipi = spi_get_drvdata(spi);
+ struct drm_device *drm = spi_get_drvdata(spi);
+
+ drm_dev_unplug(drm);
+ drm_atomic_helper_shutdown(drm);
+
+ return 0;
+}
- tinydrm_shutdown(&mipi->tinydrm);
+static void ili9225_shutdown(struct spi_device *spi)
+{
+ drm_atomic_helper_shutdown(spi_get_drvdata(spi));
}
static struct spi_driver ili9225_spi_driver = {
@@ -419,6 +461,7 @@ static struct spi_driver ili9225_spi_driver = {
},
.id_table = ili9225_id,
.probe = ili9225_probe,
+ .remove = ili9225_remove,
.shutdown = ili9225_shutdown,
};
module_spi_driver(ili9225_spi_driver);
diff --git a/drivers/gpu/drm/tinydrm/ili9341.c b/drivers/gpu/drm/tinydrm/ili9341.c
index 713bb2dd7e04..4ade9e4b924f 100644
--- a/drivers/gpu/drm/tinydrm/ili9341.c
+++ b/drivers/gpu/drm/tinydrm/ili9341.c
@@ -15,7 +15,9 @@
#include <linux/property.h>
#include <linux/spi/spi.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
@@ -52,16 +54,18 @@ static void yx240qv29_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
u8 addr_mode;
- int ret;
+ int ret, idx;
+
+ if (!drm_dev_enter(pipe->crtc.dev, &idx))
+ return;
DRM_DEBUG_KMS("\n");
ret = mipi_dbi_poweron_conditional_reset(mipi);
if (ret < 0)
- return;
+ goto out_exit;
if (ret == 1)
goto out_enable;
@@ -127,6 +131,8 @@ out_enable:
addr_mode |= ILI9341_MADCTL_BGR;
mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
+out_exit:
+ drm_dev_exit(idx);
}
static const struct drm_simple_display_pipe_funcs ili9341_pipe_funcs = {
@@ -137,7 +143,7 @@ static const struct drm_simple_display_pipe_funcs ili9341_pipe_funcs = {
};
static const struct drm_display_mode yx240qv29_mode = {
- TINYDRM_MODE(240, 320, 37, 49),
+ DRM_SIMPLE_MODE(240, 320, 37, 49),
};
DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops);
@@ -145,6 +151,7 @@ DEFINE_DRM_GEM_CMA_FOPS(ili9341_fops);
static struct drm_driver ili9341_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
.fops = &ili9341_fops,
+ .release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "ili9341",
@@ -169,15 +176,25 @@ MODULE_DEVICE_TABLE(spi, ili9341_id);
static int ili9341_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ struct drm_device *drm;
struct mipi_dbi *mipi;
struct gpio_desc *dc;
u32 rotation = 0;
int ret;
- mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
+ mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
if (!mipi)
return -ENOMEM;
+ drm = &mipi->drm;
+ ret = devm_drm_dev_init(dev, drm, &ili9341_driver);
+ if (ret) {
+ kfree(mipi);
+ return ret;
+ }
+
+ drm_mode_config_init(drm);
+
mipi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(mipi->reset)) {
DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
@@ -200,21 +217,36 @@ static int ili9341_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = mipi_dbi_init(&spi->dev, mipi, &ili9341_pipe_funcs,
- &ili9341_driver, &yx240qv29_mode, rotation);
+ ret = mipi_dbi_init(mipi, &ili9341_pipe_funcs, &yx240qv29_mode, rotation);
if (ret)
return ret;
- spi_set_drvdata(spi, mipi);
+ drm_mode_config_reset(drm);
- return devm_tinydrm_register(&mipi->tinydrm);
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, drm);
+
+ drm_fbdev_generic_setup(drm, 0);
+
+ return 0;
}
-static void ili9341_shutdown(struct spi_device *spi)
+static int ili9341_remove(struct spi_device *spi)
{
- struct mipi_dbi *mipi = spi_get_drvdata(spi);
+ struct drm_device *drm = spi_get_drvdata(spi);
+
+ drm_dev_unplug(drm);
+ drm_atomic_helper_shutdown(drm);
- tinydrm_shutdown(&mipi->tinydrm);
+ return 0;
+}
+
+static void ili9341_shutdown(struct spi_device *spi)
+{
+ drm_atomic_helper_shutdown(spi_get_drvdata(spi));
}
static struct spi_driver ili9341_spi_driver = {
@@ -224,6 +256,7 @@ static struct spi_driver ili9341_spi_driver = {
},
.id_table = ili9341_id,
.probe = ili9341_probe,
+ .remove = ili9341_remove,
.shutdown = ili9341_shutdown,
};
module_spi_driver(ili9341_spi_driver);
diff --git a/drivers/gpu/drm/tinydrm/mi0283qt.c b/drivers/gpu/drm/tinydrm/mi0283qt.c
index 82a92ec9ae3c..8e169846fbd8 100644
--- a/drivers/gpu/drm/tinydrm/mi0283qt.c
+++ b/drivers/gpu/drm/tinydrm/mi0283qt.c
@@ -17,7 +17,9 @@
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
@@ -54,16 +56,18 @@ static void mi0283qt_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
u8 addr_mode;
- int ret;
+ int ret, idx;
+
+ if (!drm_dev_enter(pipe->crtc.dev, &idx))
+ return;
DRM_DEBUG_KMS("\n");
ret = mipi_dbi_poweron_conditional_reset(mipi);
if (ret < 0)
- return;
+ goto out_exit;
if (ret == 1)
goto out_enable;
@@ -135,6 +139,8 @@ out_enable:
addr_mode |= ILI9341_MADCTL_BGR;
mipi_dbi_command(mipi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
+out_exit:
+ drm_dev_exit(idx);
}
static const struct drm_simple_display_pipe_funcs mi0283qt_pipe_funcs = {
@@ -145,7 +151,7 @@ static const struct drm_simple_display_pipe_funcs mi0283qt_pipe_funcs = {
};
static const struct drm_display_mode mi0283qt_mode = {
- TINYDRM_MODE(320, 240, 58, 43),
+ DRM_SIMPLE_MODE(320, 240, 58, 43),
};
DEFINE_DRM_GEM_CMA_FOPS(mi0283qt_fops);
@@ -154,6 +160,7 @@ static struct drm_driver mi0283qt_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
.fops = &mi0283qt_fops,
+ .release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "mi0283qt",
@@ -178,15 +185,25 @@ MODULE_DEVICE_TABLE(spi, mi0283qt_id);
static int mi0283qt_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ struct drm_device *drm;
struct mipi_dbi *mipi;
struct gpio_desc *dc;
u32 rotation = 0;
int ret;
- mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
+ mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
if (!mipi)
return -ENOMEM;
+ drm = &mipi->drm;
+ ret = devm_drm_dev_init(dev, drm, &mi0283qt_driver);
+ if (ret) {
+ kfree(mipi);
+ return ret;
+ }
+
+ drm_mode_config_init(drm);
+
mipi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(mipi->reset)) {
DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
@@ -213,35 +230,46 @@ static int mi0283qt_probe(struct spi_device *spi)
if (ret)
return ret;
- ret = mipi_dbi_init(&spi->dev, mipi, &mi0283qt_pipe_funcs,
- &mi0283qt_driver, &mi0283qt_mode, rotation);
+ ret = mipi_dbi_init(mipi, &mi0283qt_pipe_funcs, &mi0283qt_mode, rotation);
+ if (ret)
+ return ret;
+
+ drm_mode_config_reset(drm);
+
+ ret = drm_dev_register(drm, 0);
if (ret)
return ret;
- spi_set_drvdata(spi, mipi);
+ spi_set_drvdata(spi, drm);
- return devm_tinydrm_register(&mipi->tinydrm);
+ drm_fbdev_generic_setup(drm, 0);
+
+ return 0;
}
-static void mi0283qt_shutdown(struct spi_device *spi)
+static int mi0283qt_remove(struct spi_device *spi)
{
- struct mipi_dbi *mipi = spi_get_drvdata(spi);
+ struct drm_device *drm = spi_get_drvdata(spi);
- tinydrm_shutdown(&mipi->tinydrm);
+ drm_dev_unplug(drm);
+ drm_atomic_helper_shutdown(drm);
+
+ return 0;
}
-static int __maybe_unused mi0283qt_pm_suspend(struct device *dev)
+static void mi0283qt_shutdown(struct spi_device *spi)
{
- struct mipi_dbi *mipi = dev_get_drvdata(dev);
+ drm_atomic_helper_shutdown(spi_get_drvdata(spi));
+}
- return drm_mode_config_helper_suspend(mipi->tinydrm.drm);
+static int __maybe_unused mi0283qt_pm_suspend(struct device *dev)
+{
+ return drm_mode_config_helper_suspend(dev_get_drvdata(dev));
}
static int __maybe_unused mi0283qt_pm_resume(struct device *dev)
{
- struct mipi_dbi *mipi = dev_get_drvdata(dev);
-
- drm_mode_config_helper_resume(mipi->tinydrm.drm);
+ drm_mode_config_helper_resume(dev_get_drvdata(dev));
return 0;
}
@@ -259,6 +287,7 @@ static struct spi_driver mi0283qt_spi_driver = {
},
.id_table = mi0283qt_id,
.probe = mi0283qt_probe,
+ .remove = mi0283qt_remove,
.shutdown = mi0283qt_shutdown,
};
module_spi_driver(mi0283qt_spi_driver);
diff --git a/drivers/gpu/drm/tinydrm/mipi-dbi.c b/drivers/gpu/drm/tinydrm/mipi-dbi.c
index 918f77c7de34..85761b4abb83 100644
--- a/drivers/gpu/drm/tinydrm/mipi-dbi.c
+++ b/drivers/gpu/drm/tinydrm/mipi-dbi.c
@@ -21,6 +21,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_vblank.h>
@@ -153,16 +154,42 @@ EXPORT_SYMBOL(mipi_dbi_command_read);
*/
int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len)
{
+ u8 *cmdbuf;
int ret;
+ /* SPI requires dma-safe buffers */
+ cmdbuf = kmemdup(&cmd, 1, GFP_KERNEL);
+ if (!cmdbuf)
+ return -ENOMEM;
+
mutex_lock(&mipi->cmdlock);
- ret = mipi->command(mipi, cmd, data, len);
+ ret = mipi->command(mipi, cmdbuf, data, len);
mutex_unlock(&mipi->cmdlock);
+ kfree(cmdbuf);
+
return ret;
}
EXPORT_SYMBOL(mipi_dbi_command_buf);
+/* This should only be used by mipi_dbi_command() */
+int mipi_dbi_command_stackbuf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len)
+{
+ u8 *buf;
+ int ret;
+
+ buf = kmemdup(data, len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = mipi_dbi_command_buf(mipi, cmd, buf, len);
+
+ kfree(buf);
+
+ return ret;
+}
+EXPORT_SYMBOL(mipi_dbi_command_stackbuf);
+
/**
* mipi_dbi_buf_copy - Copy a framebuffer, transforming it if necessary
* @dst: The destination buffer
@@ -192,12 +219,12 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
switch (fb->format->format) {
case DRM_FORMAT_RGB565:
if (swap)
- tinydrm_swab16(dst, src, fb, clip);
+ drm_fb_swab16(dst, src, fb, clip);
else
- tinydrm_memcpy(dst, src, fb, clip);
+ drm_fb_memcpy(dst, src, fb, clip);
break;
case DRM_FORMAT_XRGB8888:
- tinydrm_xrgb8888_to_rgb565(dst, src, fb, clip, swap);
+ drm_fb_xrgb8888_to_rgb565(dst, src, fb, clip, swap);
break;
default:
dev_err_once(fb->dev->dev, "Format is not supported: %s\n",
@@ -216,18 +243,20 @@ EXPORT_SYMBOL(mipi_dbi_buf_copy);
static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
{
struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
- struct tinydrm_device *tdev = fb->dev->dev_private;
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(fb->dev);
unsigned int height = rect->y2 - rect->y1;
unsigned int width = rect->x2 - rect->x1;
bool swap = mipi->swap_bytes;
- int ret = 0;
+ int idx, ret = 0;
bool full;
void *tr;
if (!mipi->enabled)
return;
+ if (!drm_dev_enter(fb->dev, &idx))
+ return;
+
full = width == fb->width && height == fb->height;
DRM_DEBUG_KMS("Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
@@ -254,6 +283,8 @@ static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
err_msg:
if (ret)
dev_err_once(fb->dev->dev, "Failed to update display %d\n", ret);
+
+ drm_dev_exit(idx);
}
/**
@@ -308,19 +339,29 @@ void mipi_dbi_enable_flush(struct mipi_dbi *mipi,
.y1 = 0,
.y2 = fb->height,
};
+ int idx;
+
+ if (!drm_dev_enter(&mipi->drm, &idx))
+ return;
mipi->enabled = true;
mipi_dbi_fb_dirty(fb, &rect);
backlight_enable(mipi->backlight);
+
+ drm_dev_exit(idx);
}
EXPORT_SYMBOL(mipi_dbi_enable_flush);
static void mipi_dbi_blank(struct mipi_dbi *mipi)
{
- struct drm_device *drm = mipi->tinydrm.drm;
+ struct drm_device *drm = &mipi->drm;
u16 height = drm->mode_config.min_height;
u16 width = drm->mode_config.min_width;
size_t len = width * height * 2;
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
memset(mipi->tx_buf, 0, len);
@@ -330,6 +371,8 @@ static void mipi_dbi_blank(struct mipi_dbi *mipi)
(height >> 8) & 0xFF, (height - 1) & 0xFF);
mipi_dbi_command_buf(mipi, MIPI_DCS_WRITE_MEMORY_START,
(u8 *)mipi->tx_buf, len);
+
+ drm_dev_exit(idx);
}
/**
@@ -342,8 +385,10 @@ static void mipi_dbi_blank(struct mipi_dbi *mipi)
*/
void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
+
+ if (!mipi->enabled)
+ return;
DRM_DEBUG_KMS("\n");
@@ -359,6 +404,12 @@ void mipi_dbi_pipe_disable(struct drm_simple_display_pipe *pipe)
}
EXPORT_SYMBOL(mipi_dbi_pipe_disable);
+static const struct drm_mode_config_funcs mipi_dbi_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
static const uint32_t mipi_dbi_formats[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
@@ -366,31 +417,27 @@ static const uint32_t mipi_dbi_formats[] = {
/**
* mipi_dbi_init - MIPI DBI initialization
- * @dev: Parent device
* @mipi: &mipi_dbi structure to initialize
- * @pipe_funcs: Display pipe functions
- * @driver: DRM driver
+ * @funcs: Display pipe functions
* @mode: Display mode
* @rotation: Initial rotation in degrees Counter Clock Wise
*
- * This function initializes a &mipi_dbi structure and it's underlying
- * @tinydrm_device. It also sets up the display pipeline.
+ * This function sets up a &drm_simple_display_pipe with a &drm_connector that
+ * has one fixed &drm_display_mode which is rotated according to @rotation.
+ * This mode is used to set the mode config min/max width/height properties.
+ * Additionally &mipi_dbi.tx_buf is allocated.
*
* Supported formats: Native RGB565 and emulated XRGB8888.
*
- * Objects created by this function will be automatically freed on driver
- * detach (devres).
- *
* Returns:
* Zero on success, negative error code on failure.
*/
-int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi,
- const struct drm_simple_display_pipe_funcs *pipe_funcs,
- struct drm_driver *driver,
+int mipi_dbi_init(struct mipi_dbi *mipi,
+ const struct drm_simple_display_pipe_funcs *funcs,
const struct drm_display_mode *mode, unsigned int rotation)
{
size_t bufsize = mode->vdisplay * mode->hdisplay * sizeof(u16);
- struct tinydrm_device *tdev = &mipi->tinydrm;
+ struct drm_device *drm = &mipi->drm;
int ret;
if (!mipi->command)
@@ -398,16 +445,12 @@ int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi,
mutex_init(&mipi->cmdlock);
- mipi->tx_buf = devm_kmalloc(dev, bufsize, GFP_KERNEL);
+ mipi->tx_buf = devm_kmalloc(drm->dev, bufsize, GFP_KERNEL);
if (!mipi->tx_buf)
return -ENOMEM;
- ret = devm_tinydrm_init(dev, tdev, driver);
- if (ret)
- return ret;
-
/* TODO: Maybe add DRM_MODE_CONNECTOR_SPI */
- ret = tinydrm_display_pipe_init(tdev, pipe_funcs,
+ ret = tinydrm_display_pipe_init(drm, &mipi->pipe, funcs,
DRM_MODE_CONNECTOR_VIRTUAL,
mipi_dbi_formats,
ARRAY_SIZE(mipi_dbi_formats), mode,
@@ -415,21 +458,40 @@ int mipi_dbi_init(struct device *dev, struct mipi_dbi *mipi,
if (ret)
return ret;
- drm_plane_enable_fb_damage_clips(&tdev->pipe.plane);
+ drm_plane_enable_fb_damage_clips(&mipi->pipe.plane);
- tdev->drm->mode_config.preferred_depth = 16;
+ drm->mode_config.funcs = &mipi_dbi_mode_config_funcs;
+ drm->mode_config.preferred_depth = 16;
mipi->rotation = rotation;
- drm_mode_config_reset(tdev->drm);
-
DRM_DEBUG_KMS("preferred_depth=%u, rotation = %u\n",
- tdev->drm->mode_config.preferred_depth, rotation);
+ drm->mode_config.preferred_depth, rotation);
return 0;
}
EXPORT_SYMBOL(mipi_dbi_init);
/**
+ * mipi_dbi_release - DRM driver release helper
+ * @drm: DRM device
+ *
+ * This function finalizes and frees &mipi_dbi.
+ *
+ * Drivers can use this as their &drm_driver->release callback.
+ */
+void mipi_dbi_release(struct drm_device *drm)
+{
+ struct mipi_dbi *dbi = drm_to_mipi_dbi(drm);
+
+ DRM_DEBUG_DRIVER("\n");
+
+ drm_mode_config_cleanup(drm);
+ drm_dev_fini(drm);
+ kfree(dbi);
+}
+EXPORT_SYMBOL(mipi_dbi_release);
+
+/**
* mipi_dbi_hw_reset - Hardware reset of controller
* @mipi: MIPI DBI structure
*
@@ -481,7 +543,7 @@ EXPORT_SYMBOL(mipi_dbi_display_is_on);
static int mipi_dbi_poweron_reset_conditional(struct mipi_dbi *mipi, bool cond)
{
- struct device *dev = mipi->tinydrm.drm->dev;
+ struct device *dev = mipi->drm.dev;
int ret;
if (mipi->regulator) {
@@ -774,18 +836,18 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *mipi, int dc,
return 0;
}
-static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 cmd,
+static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 *cmd,
u8 *parameters, size_t num)
{
- unsigned int bpw = (cmd == MIPI_DCS_WRITE_MEMORY_START) ? 16 : 8;
+ unsigned int bpw = (*cmd == MIPI_DCS_WRITE_MEMORY_START) ? 16 : 8;
int ret;
- if (mipi_dbi_command_is_read(mipi, cmd))
+ if (mipi_dbi_command_is_read(mipi, *cmd))
return -ENOTSUPP;
- MIPI_DBI_DEBUG_COMMAND(cmd, parameters, num);
+ MIPI_DBI_DEBUG_COMMAND(*cmd, parameters, num);
- ret = mipi_dbi_spi1_transfer(mipi, 0, &cmd, 1, 8);
+ ret = mipi_dbi_spi1_transfer(mipi, 0, cmd, 1, 8);
if (ret || !num)
return ret;
@@ -794,7 +856,7 @@ static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 cmd,
/* MIPI DBI Type C Option 3 */
-static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
+static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 *cmd,
u8 *data, size_t len)
{
struct spi_device *spi = mipi->spi;
@@ -803,7 +865,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
struct spi_transfer tr[2] = {
{
.speed_hz = speed_hz,
- .tx_buf = &cmd,
+ .tx_buf = cmd,
.len = 1,
}, {
.speed_hz = speed_hz,
@@ -821,8 +883,8 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
* Support non-standard 24-bit and 32-bit Nokia read commands which
* start with a dummy clock, so we need to read an extra byte.
*/
- if (cmd == MIPI_DCS_GET_DISPLAY_ID ||
- cmd == MIPI_DCS_GET_DISPLAY_STATUS) {
+ if (*cmd == MIPI_DCS_GET_DISPLAY_ID ||
+ *cmd == MIPI_DCS_GET_DISPLAY_STATUS) {
if (!(len == 3 || len == 4))
return -EINVAL;
@@ -852,7 +914,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
data[i] = (buf[i] << 1) | !!(buf[i + 1] & BIT(7));
}
- MIPI_DBI_DEBUG_COMMAND(cmd, data, len);
+ MIPI_DBI_DEBUG_COMMAND(*cmd, data, len);
err_free:
kfree(buf);
@@ -860,7 +922,7 @@ err_free:
return ret;
}
-static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 cmd,
+static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 *cmd,
u8 *par, size_t num)
{
struct spi_device *spi = mipi->spi;
@@ -868,18 +930,18 @@ static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 cmd,
u32 speed_hz;
int ret;
- if (mipi_dbi_command_is_read(mipi, cmd))
+ if (mipi_dbi_command_is_read(mipi, *cmd))
return mipi_dbi_typec3_command_read(mipi, cmd, par, num);
- MIPI_DBI_DEBUG_COMMAND(cmd, par, num);
+ MIPI_DBI_DEBUG_COMMAND(*cmd, par, num);
gpiod_set_value_cansleep(mipi->dc, 0);
speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1);
- ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, &cmd, 1);
+ ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, cmd, 1);
if (ret || !num)
return ret;
- if (cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes)
+ if (*cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes)
bpw = 16;
gpiod_set_value_cansleep(mipi->dc, 1);
@@ -926,7 +988,7 @@ int mipi_dbi_spi_init(struct spi_device *spi, struct mipi_dbi *mipi,
* Even though it's not the SPI device that does DMA (the master does),
* the dma mask is necessary for the dma_alloc_wc() in
* drm_gem_cma_create(). The dma_addr returned will be a physical
- * adddress which might be different from the bus address, but this is
+ * address which might be different from the bus address, but this is
* not a problem since the address will not be used.
* The virtual address is used in the transfer and the SPI core
* re-maps it on the SPI master device using the DMA streaming API
@@ -976,11 +1038,16 @@ static ssize_t mipi_dbi_debugfs_command_write(struct file *file,
u8 val, cmd = 0, parameters[64];
char *buf, *pos, *token;
unsigned int i;
- int ret;
+ int ret, idx;
+
+ if (!drm_dev_enter(&mipi->drm, &idx))
+ return -ENODEV;
buf = memdup_user_nul(ubuf, count);
- if (IS_ERR(buf))
- return PTR_ERR(buf);
+ if (IS_ERR(buf)) {
+ ret = PTR_ERR(buf);
+ goto err_exit;
+ }
/* strip trailing whitespace */
for (i = count - 1; i > 0; i--)
@@ -1016,6 +1083,8 @@ static ssize_t mipi_dbi_debugfs_command_write(struct file *file,
err_free:
kfree(buf);
+err_exit:
+ drm_dev_exit(idx);
return ret < 0 ? ret : count;
}
@@ -1024,8 +1093,11 @@ static int mipi_dbi_debugfs_command_show(struct seq_file *m, void *unused)
{
struct mipi_dbi *mipi = m->private;
u8 cmd, val[4];
+ int ret, idx;
size_t len;
- int ret;
+
+ if (!drm_dev_enter(&mipi->drm, &idx))
+ return -ENODEV;
for (cmd = 0; cmd < 255; cmd++) {
if (!mipi_dbi_command_is_read(mipi, cmd))
@@ -1056,6 +1128,8 @@ static int mipi_dbi_debugfs_command_show(struct seq_file *m, void *unused)
seq_printf(m, "%*phN\n", (int)len, val);
}
+ drm_dev_exit(idx);
+
return 0;
}
@@ -1088,8 +1162,7 @@ static const struct file_operations mipi_dbi_debugfs_command_fops = {
*/
int mipi_dbi_debugfs_init(struct drm_minor *minor)
{
- struct tinydrm_device *tdev = minor->dev->dev_private;
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(minor->dev);
umode_t mode = S_IFREG | S_IWUSR;
if (mipi->read_commands)
diff --git a/drivers/gpu/drm/tinydrm/repaper.c b/drivers/gpu/drm/tinydrm/repaper.c
index b037c6540cf3..370629e2de94 100644
--- a/drivers/gpu/drm/tinydrm/repaper.c
+++ b/drivers/gpu/drm/tinydrm/repaper.c
@@ -26,14 +26,17 @@
#include <linux/spi/spi.h>
#include <linux/thermal.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_format_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_vblank.h>
-#include <drm/tinydrm/tinydrm.h>
+#include <drm/drm_simple_kms_helper.h>
#include <drm/tinydrm/tinydrm-helpers.h>
#define REPAPER_RID_G2_COG_ID 0x12
@@ -59,7 +62,8 @@ enum repaper_epd_border_byte {
};
struct repaper_epd {
- struct tinydrm_device tinydrm;
+ struct drm_device drm;
+ struct drm_simple_display_pipe pipe;
struct spi_device *spi;
struct gpio_desc *panel_on;
@@ -88,10 +92,9 @@ struct repaper_epd {
bool partial;
};
-static inline struct repaper_epd *
-epd_from_tinydrm(struct tinydrm_device *tdev)
+static inline struct repaper_epd *drm_to_epd(struct drm_device *drm)
{
- return container_of(tdev, struct repaper_epd, tinydrm);
+ return container_of(drm, struct repaper_epd, drm);
}
static int repaper_spi_transfer(struct spi_device *spi, u8 header,
@@ -529,11 +532,16 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb)
{
struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
struct dma_buf_attachment *import_attach = cma_obj->base.import_attach;
- struct tinydrm_device *tdev = fb->dev->dev_private;
- struct repaper_epd *epd = epd_from_tinydrm(tdev);
+ struct repaper_epd *epd = drm_to_epd(fb->dev);
struct drm_rect clip;
+ int idx, ret = 0;
u8 *buf = NULL;
- int ret = 0;
+
+ if (!epd->enabled)
+ return 0;
+
+ if (!drm_dev_enter(fb->dev, &idx))
+ return -ENODEV;
/* repaper can't do partial updates */
clip.x1 = 0;
@@ -541,17 +549,16 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb)
clip.y1 = 0;
clip.y2 = fb->height;
- if (!epd->enabled)
- return 0;
-
repaper_get_temperature(epd);
DRM_DEBUG("Flushing [FB:%d] st=%ums\n", fb->base.id,
epd->factored_stage_time);
buf = kmalloc_array(fb->width, fb->height, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
+ if (!buf) {
+ ret = -ENOMEM;
+ goto out_exit;
+ }
if (import_attach) {
ret = dma_buf_begin_cpu_access(import_attach->dmabuf,
@@ -560,7 +567,7 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb)
goto out_free;
}
- tinydrm_xrgb8888_to_gray8(buf, cma_obj->vaddr, fb, &clip);
+ drm_fb_xrgb8888_to_gray8(buf, cma_obj->vaddr, fb, &clip);
if (import_attach) {
ret = dma_buf_end_cpu_access(import_attach->dmabuf,
@@ -620,6 +627,8 @@ static int repaper_fb_dirty(struct drm_framebuffer *fb)
out_free:
kfree(buf);
+out_exit:
+ drm_dev_exit(idx);
return ret;
}
@@ -645,12 +654,14 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct repaper_epd *epd = epd_from_tinydrm(tdev);
+ struct repaper_epd *epd = drm_to_epd(pipe->crtc.dev);
struct spi_device *spi = epd->spi;
struct device *dev = &spi->dev;
bool dc_ok = false;
- int i, ret;
+ int i, ret, idx;
+
+ if (!drm_dev_enter(pipe->crtc.dev, &idx))
+ return;
DRM_DEBUG_DRIVER("\n");
@@ -689,7 +700,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
if (!i) {
DRM_DEV_ERROR(dev, "timeout waiting for panel to become ready.\n");
power_off(epd);
- return;
+ goto out_exit;
}
repaper_read_id(spi);
@@ -700,7 +711,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
else
dev_err(dev, "wrong COG ID 0x%02x\n", ret);
power_off(epd);
- return;
+ goto out_exit;
}
/* Disable OE */
@@ -713,7 +724,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
else
DRM_DEV_ERROR(dev, "panel is reported broken\n");
power_off(epd);
- return;
+ goto out_exit;
}
/* Power saving mode */
@@ -753,7 +764,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
if (ret < 0) {
DRM_DEV_ERROR(dev, "failed to read chip (%d)\n", ret);
power_off(epd);
- return;
+ goto out_exit;
}
if (ret & 0x40) {
@@ -765,7 +776,7 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
if (!dc_ok) {
DRM_DEV_ERROR(dev, "dc/dc failed\n");
power_off(epd);
- return;
+ goto out_exit;
}
/*
@@ -776,15 +787,26 @@ static void repaper_pipe_enable(struct drm_simple_display_pipe *pipe,
epd->enabled = true;
epd->partial = false;
+out_exit:
+ drm_dev_exit(idx);
}
static void repaper_pipe_disable(struct drm_simple_display_pipe *pipe)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct repaper_epd *epd = epd_from_tinydrm(tdev);
+ struct repaper_epd *epd = drm_to_epd(pipe->crtc.dev);
struct spi_device *spi = epd->spi;
unsigned int line;
+ /*
+ * This callback is not protected by drm_dev_enter/exit since we want to
+ * turn off the display on regular driver unload. It's highly unlikely
+ * that the underlying SPI controller is gone should this be called after
+ * unplug.
+ */
+
+ if (!epd->enabled)
+ return;
+
DRM_DEBUG_DRIVER("\n");
epd->enabled = false;
@@ -855,33 +877,50 @@ static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = {
.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
};
+static const struct drm_mode_config_funcs repaper_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void repaper_release(struct drm_device *drm)
+{
+ struct repaper_epd *epd = drm_to_epd(drm);
+
+ DRM_DEBUG_DRIVER("\n");
+
+ drm_mode_config_cleanup(drm);
+ drm_dev_fini(drm);
+ kfree(epd);
+}
+
static const uint32_t repaper_formats[] = {
DRM_FORMAT_XRGB8888,
};
static const struct drm_display_mode repaper_e1144cs021_mode = {
- TINYDRM_MODE(128, 96, 29, 22),
+ DRM_SIMPLE_MODE(128, 96, 29, 22),
};
static const u8 repaper_e1144cs021_cs[] = { 0x00, 0x00, 0x00, 0x00,
0x00, 0x0f, 0xff, 0x00 };
static const struct drm_display_mode repaper_e1190cs021_mode = {
- TINYDRM_MODE(144, 128, 36, 32),
+ DRM_SIMPLE_MODE(144, 128, 36, 32),
};
static const u8 repaper_e1190cs021_cs[] = { 0x00, 0x00, 0x00, 0x03,
0xfc, 0x00, 0x00, 0xff };
static const struct drm_display_mode repaper_e2200cs021_mode = {
- TINYDRM_MODE(200, 96, 46, 22),
+ DRM_SIMPLE_MODE(200, 96, 46, 22),
};
static const u8 repaper_e2200cs021_cs[] = { 0x00, 0x00, 0x00, 0x00,
0x01, 0xff, 0xe0, 0x00 };
static const struct drm_display_mode repaper_e2271cs021_mode = {
- TINYDRM_MODE(264, 176, 57, 38),
+ DRM_SIMPLE_MODE(264, 176, 57, 38),
};
static const u8 repaper_e2271cs021_cs[] = { 0x00, 0x00, 0x00, 0x7f,
@@ -893,6 +932,7 @@ static struct drm_driver repaper_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
.fops = &repaper_fops,
+ .release = repaper_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.name = "repaper",
.desc = "Pervasive Displays RePaper e-ink panels",
@@ -925,11 +965,11 @@ static int repaper_probe(struct spi_device *spi)
const struct spi_device_id *spi_id;
const struct of_device_id *match;
struct device *dev = &spi->dev;
- struct tinydrm_device *tdev;
enum repaper_model model;
const char *thermal_zone;
struct repaper_epd *epd;
size_t line_buffer_size;
+ struct drm_device *drm;
int ret;
match = of_match_device(repaper_of_match, dev);
@@ -949,10 +989,21 @@ static int repaper_probe(struct spi_device *spi)
}
}
- epd = devm_kzalloc(dev, sizeof(*epd), GFP_KERNEL);
+ epd = kzalloc(sizeof(*epd), GFP_KERNEL);
if (!epd)
return -ENOMEM;
+ drm = &epd->drm;
+
+ ret = devm_drm_dev_init(dev, drm, &repaper_driver);
+ if (ret) {
+ kfree(epd);
+ return ret;
+ }
+
+ drm_mode_config_init(drm);
+ drm->mode_config.funcs = &repaper_mode_config_funcs;
+
epd->spi = spi;
epd->panel_on = devm_gpiod_get(dev, "panel-on", GPIOD_OUT_LOW);
@@ -1063,32 +1114,41 @@ static int repaper_probe(struct spi_device *spi)
if (!epd->current_frame)
return -ENOMEM;
- tdev = &epd->tinydrm;
-
- ret = devm_tinydrm_init(dev, tdev, &repaper_driver);
- if (ret)
- return ret;
-
- ret = tinydrm_display_pipe_init(tdev, &repaper_pipe_funcs,
+ ret = tinydrm_display_pipe_init(drm, &epd->pipe, &repaper_pipe_funcs,
DRM_MODE_CONNECTOR_VIRTUAL,
repaper_formats,
ARRAY_SIZE(repaper_formats), mode, 0);
if (ret)
return ret;
- drm_mode_config_reset(tdev->drm);
- spi_set_drvdata(spi, tdev);
+ drm_mode_config_reset(drm);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, drm);
DRM_DEBUG_DRIVER("SPI speed: %uMHz\n", spi->max_speed_hz / 1000000);
- return devm_tinydrm_register(tdev);
+ drm_fbdev_generic_setup(drm, 0);
+
+ return 0;
}
-static void repaper_shutdown(struct spi_device *spi)
+static int repaper_remove(struct spi_device *spi)
{
- struct tinydrm_device *tdev = spi_get_drvdata(spi);
+ struct drm_device *drm = spi_get_drvdata(spi);
+
+ drm_dev_unplug(drm);
+ drm_atomic_helper_shutdown(drm);
+
+ return 0;
+}
- tinydrm_shutdown(tdev);
+static void repaper_shutdown(struct spi_device *spi)
+{
+ drm_atomic_helper_shutdown(spi_get_drvdata(spi));
}
static struct spi_driver repaper_spi_driver = {
@@ -1099,6 +1159,7 @@ static struct spi_driver repaper_spi_driver = {
},
.id_table = repaper_id,
.probe = repaper_probe,
+ .remove = repaper_remove,
.shutdown = repaper_shutdown,
};
module_spi_driver(repaper_spi_driver);
diff --git a/drivers/gpu/drm/tinydrm/st7586.c b/drivers/gpu/drm/tinydrm/st7586.c
index 01a8077954b3..36bb16a15f7e 100644
--- a/drivers/gpu/drm/tinydrm/st7586.c
+++ b/drivers/gpu/drm/tinydrm/st7586.c
@@ -17,9 +17,12 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_format_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_rect.h>
@@ -75,7 +78,7 @@ static void st7586_xrgb8888_to_gray332(u8 *dst, void *vaddr,
if (!buf)
return;
- tinydrm_xrgb8888_to_gray8(buf, vaddr, fb, clip);
+ drm_fb_xrgb8888_to_gray8(buf, vaddr, fb, clip);
src = buf;
for (y = clip->y1; y < clip->y2; y++) {
@@ -116,14 +119,15 @@ static int st7586_buf_copy(void *dst, struct drm_framebuffer *fb,
static void st7586_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
{
- struct tinydrm_device *tdev = fb->dev->dev_private;
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
- int start, end;
- int ret = 0;
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(fb->dev);
+ int start, end, idx, ret = 0;
if (!mipi->enabled)
return;
+ if (!drm_dev_enter(fb->dev, &idx))
+ return;
+
/* 3 pixels per byte, so grow clip to nearest multiple of 3 */
rect->x1 = rounddown(rect->x1, 3);
rect->x2 = roundup(rect->x2, 3);
@@ -151,6 +155,8 @@ static void st7586_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
err_msg:
if (ret)
dev_err_once(fb->dev->dev, "Failed to update display %d\n", ret);
+
+ drm_dev_exit(idx);
}
static void st7586_pipe_update(struct drm_simple_display_pipe *pipe,
@@ -175,8 +181,7 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
struct drm_framebuffer *fb = plane_state->fb;
struct drm_rect rect = {
.x1 = 0,
@@ -184,14 +189,17 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
.y1 = 0,
.y2 = fb->height,
};
- int ret;
+ int idx, ret;
u8 addr_mode;
+ if (!drm_dev_enter(pipe->crtc.dev, &idx))
+ return;
+
DRM_DEBUG_KMS("\n");
ret = mipi_dbi_poweron_reset(mipi);
if (ret)
- return;
+ goto out_exit;
mipi_dbi_command(mipi, ST7586_AUTO_READ_CTRL, 0x9f);
mipi_dbi_command(mipi, ST7586_OTP_RW_CTRL, 0x00);
@@ -244,12 +252,20 @@ static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
st7586_fb_dirty(fb, &rect);
mipi_dbi_command(mipi, MIPI_DCS_SET_DISPLAY_ON);
+out_exit:
+ drm_dev_exit(idx);
}
static void st7586_pipe_disable(struct drm_simple_display_pipe *pipe)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
+
+ /*
+ * This callback is not protected by drm_dev_enter/exit since we want to
+ * turn off the display on regular driver unload. It's highly unlikely
+ * that the underlying SPI controller is gone should this be called after
+ * unplug.
+ */
DRM_DEBUG_KMS("\n");
@@ -264,46 +280,6 @@ static const u32 st7586_formats[] = {
DRM_FORMAT_XRGB8888,
};
-static int st7586_init(struct device *dev, struct mipi_dbi *mipi,
- const struct drm_simple_display_pipe_funcs *pipe_funcs,
- struct drm_driver *driver, const struct drm_display_mode *mode,
- unsigned int rotation)
-{
- size_t bufsize = (mode->vdisplay + 2) / 3 * mode->hdisplay;
- struct tinydrm_device *tdev = &mipi->tinydrm;
- int ret;
-
- mutex_init(&mipi->cmdlock);
-
- mipi->tx_buf = devm_kmalloc(dev, bufsize, GFP_KERNEL);
- if (!mipi->tx_buf)
- return -ENOMEM;
-
- ret = devm_tinydrm_init(dev, tdev, driver);
- if (ret)
- return ret;
-
- ret = tinydrm_display_pipe_init(tdev, pipe_funcs,
- DRM_MODE_CONNECTOR_VIRTUAL,
- st7586_formats,
- ARRAY_SIZE(st7586_formats),
- mode, rotation);
- if (ret)
- return ret;
-
- drm_plane_enable_fb_damage_clips(&tdev->pipe.plane);
-
- tdev->drm->mode_config.preferred_depth = 32;
- mipi->rotation = rotation;
-
- drm_mode_config_reset(tdev->drm);
-
- DRM_DEBUG_KMS("preferred_depth=%u, rotation = %u\n",
- tdev->drm->mode_config.preferred_depth, rotation);
-
- return 0;
-}
-
static const struct drm_simple_display_pipe_funcs st7586_pipe_funcs = {
.enable = st7586_pipe_enable,
.disable = st7586_pipe_disable,
@@ -311,8 +287,14 @@ static const struct drm_simple_display_pipe_funcs st7586_pipe_funcs = {
.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
};
+static const struct drm_mode_config_funcs st7586_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
static const struct drm_display_mode st7586_mode = {
- TINYDRM_MODE(178, 128, 37, 27),
+ DRM_SIMPLE_MODE(178, 128, 37, 27),
};
DEFINE_DRM_GEM_CMA_FOPS(st7586_fops);
@@ -321,6 +303,7 @@ static struct drm_driver st7586_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
.fops = &st7586_fops,
+ .release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7586",
@@ -345,15 +328,35 @@ MODULE_DEVICE_TABLE(spi, st7586_id);
static int st7586_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ struct drm_device *drm;
struct mipi_dbi *mipi;
struct gpio_desc *a0;
u32 rotation = 0;
+ size_t bufsize;
int ret;
- mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
+ mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
if (!mipi)
return -ENOMEM;
+ drm = &mipi->drm;
+ ret = devm_drm_dev_init(dev, drm, &st7586_driver);
+ if (ret) {
+ kfree(mipi);
+ return ret;
+ }
+
+ drm_mode_config_init(drm);
+ drm->mode_config.preferred_depth = 32;
+ drm->mode_config.funcs = &st7586_mode_config_funcs;
+
+ mutex_init(&mipi->cmdlock);
+
+ bufsize = (st7586_mode.vdisplay + 2) / 3 * st7586_mode.hdisplay;
+ mipi->tx_buf = devm_kmalloc(dev, bufsize, GFP_KERNEL);
+ if (!mipi->tx_buf)
+ return -ENOMEM;
+
mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(mipi->reset)) {
DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
@@ -367,6 +370,7 @@ static int st7586_probe(struct spi_device *spi)
}
device_property_read_u32(dev, "rotation", &rotation);
+ mipi->rotation = rotation;
ret = mipi_dbi_spi_init(spi, mipi, a0);
if (ret)
@@ -384,21 +388,44 @@ static int st7586_probe(struct spi_device *spi)
*/
mipi->swap_bytes = true;
- ret = st7586_init(&spi->dev, mipi, &st7586_pipe_funcs, &st7586_driver,
- &st7586_mode, rotation);
+ ret = tinydrm_display_pipe_init(drm, &mipi->pipe, &st7586_pipe_funcs,
+ DRM_MODE_CONNECTOR_VIRTUAL,
+ st7586_formats, ARRAY_SIZE(st7586_formats),
+ &st7586_mode, rotation);
if (ret)
return ret;
- spi_set_drvdata(spi, mipi);
+ drm_plane_enable_fb_damage_clips(&mipi->pipe.plane);
- return devm_tinydrm_register(&mipi->tinydrm);
+ drm_mode_config_reset(drm);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, drm);
+
+ DRM_DEBUG_KMS("preferred_depth=%u, rotation = %u\n",
+ drm->mode_config.preferred_depth, rotation);
+
+ drm_fbdev_generic_setup(drm, 0);
+
+ return 0;
}
-static void st7586_shutdown(struct spi_device *spi)
+static int st7586_remove(struct spi_device *spi)
{
- struct mipi_dbi *mipi = spi_get_drvdata(spi);
+ struct drm_device *drm = spi_get_drvdata(spi);
+
+ drm_dev_unplug(drm);
+ drm_atomic_helper_shutdown(drm);
+
+ return 0;
+}
- tinydrm_shutdown(&mipi->tinydrm);
+static void st7586_shutdown(struct spi_device *spi)
+{
+ drm_atomic_helper_shutdown(spi_get_drvdata(spi));
}
static struct spi_driver st7586_spi_driver = {
@@ -409,6 +436,7 @@ static struct spi_driver st7586_spi_driver = {
},
.id_table = st7586_id,
.probe = st7586_probe,
+ .remove = st7586_remove,
.shutdown = st7586_shutdown,
};
module_spi_driver(st7586_spi_driver);
diff --git a/drivers/gpu/drm/tinydrm/st7735r.c b/drivers/gpu/drm/tinydrm/st7735r.c
index 3bab9a9569a6..ce9109e613e0 100644
--- a/drivers/gpu/drm/tinydrm/st7735r.c
+++ b/drivers/gpu/drm/tinydrm/st7735r.c
@@ -14,7 +14,9 @@
#include <linux/spi/spi.h>
#include <video/mipi_display.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/tinydrm/mipi-dbi.h>
@@ -41,16 +43,18 @@ static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
{
- struct tinydrm_device *tdev = pipe_to_tinydrm(pipe);
- struct mipi_dbi *mipi = mipi_dbi_from_tinydrm(tdev);
- int ret;
+ struct mipi_dbi *mipi = drm_to_mipi_dbi(pipe->crtc.dev);
+ int ret, idx;
u8 addr_mode;
+ if (!drm_dev_enter(pipe->crtc.dev, &idx))
+ return;
+
DRM_DEBUG_KMS("\n");
ret = mipi_dbi_poweron_reset(mipi);
if (ret)
- return;
+ goto out_exit;
msleep(150);
@@ -101,6 +105,8 @@ static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
msleep(20);
mipi_dbi_enable_flush(mipi, crtc_state, plane_state);
+out_exit:
+ drm_dev_exit(idx);
}
static const struct drm_simple_display_pipe_funcs jd_t18003_t01_pipe_funcs = {
@@ -111,7 +117,7 @@ static const struct drm_simple_display_pipe_funcs jd_t18003_t01_pipe_funcs = {
};
static const struct drm_display_mode jd_t18003_t01_mode = {
- TINYDRM_MODE(128, 160, 28, 35),
+ DRM_SIMPLE_MODE(128, 160, 28, 35),
};
DEFINE_DRM_GEM_CMA_FOPS(st7735r_fops);
@@ -120,6 +126,7 @@ static struct drm_driver st7735r_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
.fops = &st7735r_fops,
+ .release = mipi_dbi_release,
DRM_GEM_CMA_VMAP_DRIVER_OPS,
.debugfs_init = mipi_dbi_debugfs_init,
.name = "st7735r",
@@ -144,15 +151,25 @@ MODULE_DEVICE_TABLE(spi, st7735r_id);
static int st7735r_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ struct drm_device *drm;
struct mipi_dbi *mipi;
struct gpio_desc *dc;
u32 rotation = 0;
int ret;
- mipi = devm_kzalloc(dev, sizeof(*mipi), GFP_KERNEL);
+ mipi = kzalloc(sizeof(*mipi), GFP_KERNEL);
if (!mipi)
return -ENOMEM;
+ drm = &mipi->drm;
+ ret = devm_drm_dev_init(dev, drm, &st7735r_driver);
+ if (ret) {
+ kfree(mipi);
+ return ret;
+ }
+
+ drm_mode_config_init(drm);
+
mipi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(mipi->reset)) {
DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
@@ -178,21 +195,36 @@ static int st7735r_probe(struct spi_device *spi)
/* Cannot read from Adafruit 1.8" display via SPI */
mipi->read_commands = NULL;
- ret = mipi_dbi_init(&spi->dev, mipi, &jd_t18003_t01_pipe_funcs,
- &st7735r_driver, &jd_t18003_t01_mode, rotation);
+ ret = mipi_dbi_init(mipi, &jd_t18003_t01_pipe_funcs, &jd_t18003_t01_mode, rotation);
if (ret)
return ret;
- spi_set_drvdata(spi, mipi);
+ drm_mode_config_reset(drm);
- return devm_tinydrm_register(&mipi->tinydrm);
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, drm);
+
+ drm_fbdev_generic_setup(drm, 0);
+
+ return 0;
}
-static void st7735r_shutdown(struct spi_device *spi)
+static int st7735r_remove(struct spi_device *spi)
{
- struct mipi_dbi *mipi = spi_get_drvdata(spi);
+ struct drm_device *drm = spi_get_drvdata(spi);
+
+ drm_dev_unplug(drm);
+ drm_atomic_helper_shutdown(drm);
- tinydrm_shutdown(&mipi->tinydrm);
+ return 0;
+}
+
+static void st7735r_shutdown(struct spi_device *spi)
+{
+ drm_atomic_helper_shutdown(spi_get_drvdata(spi));
}
static struct spi_driver st7735r_spi_driver = {
@@ -203,6 +235,7 @@ static struct spi_driver st7735r_spi_driver = {
},
.id_table = st7735r_id,
.probe = st7735r_probe,
+ .remove = st7735r_remove,
.shutdown = st7735r_shutdown,
};
module_spi_driver(st7735r_spi_driver);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 1a01669b159a..2845fceb2fbd 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1626,7 +1626,6 @@ EXPORT_SYMBOL(ttm_bo_device_release);
int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_driver *driver,
struct address_space *mapping,
- uint64_t file_page_offset,
bool need_dma32)
{
struct ttm_bo_global *glob = &ttm_bo_glob;
@@ -1648,8 +1647,9 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
if (unlikely(ret != 0))
goto out_no_sys;
- drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
- 0x10000000);
+ drm_vma_offset_manager_init(&bdev->vma_manager,
+ DRM_FILE_PAGE_OFFSET_START,
+ DRM_FILE_PAGE_OFFSET_SIZE);
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
INIT_LIST_HEAD(&bdev->ddestroy);
bdev->dev_mapping = mapping;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index e86a29a1e51f..6dacff49c1cc 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -432,6 +432,9 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
struct ttm_buffer_object *bo;
int ret;
+ if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START))
+ return -EINVAL;
+
bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
if (unlikely(!bo))
return -EINVAL;
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 93860346c426..0075eb9a0b52 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -188,13 +188,11 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
struct ttm_validate_buffer *entry;
struct ttm_buffer_object *bo;
struct ttm_bo_global *glob;
- struct ttm_bo_device *bdev;
if (list_empty(list))
return;
bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
- bdev = bo->bdev;
glob = bo->bdev->glob;
spin_lock(&glob->lru_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 9a0909decb36..8617958b7ae6 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -81,7 +81,7 @@ static void ttm_mem_zone_kobj_release(struct kobject *kobj)
struct ttm_mem_zone *zone =
container_of(kobj, struct ttm_mem_zone, kobj);
- pr_info("Zone %7s: Used memory at exit: %llu kiB\n",
+ pr_info("Zone %7s: Used memory at exit: %llu KiB\n",
zone->name, (unsigned long long)zone->used_mem >> 10);
kfree(zone);
}
@@ -448,7 +448,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
#endif
for (i = 0; i < glob->num_zones; ++i) {
zone = glob->zones[i];
- pr_info("Zone %7s: Available graphics memory: %llu kiB\n",
+ pr_info("Zone %7s: Available graphics memory: %llu KiB\n",
zone->name, (unsigned long long)zone->max_mem >> 10);
}
ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
@@ -523,7 +523,7 @@ static void ttm_mem_global_free_zone(struct ttm_mem_global *glob,
void ttm_mem_global_free(struct ttm_mem_global *glob,
uint64_t amount)
{
- return ttm_mem_global_free_zone(glob, NULL, amount);
+ return ttm_mem_global_free_zone(glob, glob->zone_kernel, amount);
}
EXPORT_SYMBOL(ttm_mem_global_free);
@@ -622,10 +622,10 @@ int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
{
/**
* Normal allocations of kernel memory are registered in
- * all zones.
+ * the kernel zone.
*/
- return ttm_mem_global_alloc_zone(glob, NULL, memory, ctx);
+ return ttm_mem_global_alloc_zone(glob, glob->zone_kernel, memory, ctx);
}
EXPORT_SYMBOL(ttm_mem_global_alloc);
diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c
index e8723a2412a6..d775d10dbe6a 100644
--- a/drivers/gpu/drm/tve200/tve200_display.c
+++ b/drivers/gpu/drm/tve200/tve200_display.c
@@ -149,7 +149,8 @@ static void tve200_display_enable(struct drm_simple_display_pipe *pipe,
/* Vsync IRQ at start of Vsync at first */
ctrl1 |= TVE200_VSTSTYPE_VSYNC;
- if (connector->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
+ if (connector->display_info.bus_flags &
+ DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
ctrl1 |= TVE200_CTRL_TVCLKP;
if ((mode->hdisplay == 352 && mode->vdisplay == 240) || /* SIF(525) */
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index ff47f890e6ad..312bf324841a 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -48,10 +48,16 @@ static const struct file_operations udl_driver_fops = {
.llseek = noop_llseek,
};
+static void udl_driver_release(struct drm_device *dev)
+{
+ udl_fini(dev);
+ udl_modeset_cleanup(dev);
+ drm_dev_fini(dev);
+ kfree(dev);
+}
+
static struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
- .load = udl_driver_load,
- .unload = udl_driver_unload,
.release = udl_driver_release,
/* gem hooks */
@@ -75,28 +81,56 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
+static struct udl_device *udl_driver_create(struct usb_interface *interface)
+{
+ struct usb_device *udev = interface_to_usbdev(interface);
+ struct udl_device *udl;
+ int r;
+
+ udl = kzalloc(sizeof(*udl), GFP_KERNEL);
+ if (!udl)
+ return ERR_PTR(-ENOMEM);
+
+ r = drm_dev_init(&udl->drm, &driver, &interface->dev);
+ if (r) {
+ kfree(udl);
+ return ERR_PTR(r);
+ }
+
+ udl->udev = udev;
+ udl->drm.dev_private = udl;
+
+ r = udl_init(udl);
+ if (r) {
+ drm_dev_fini(&udl->drm);
+ kfree(udl);
+ return ERR_PTR(r);
+ }
+
+ usb_set_intfdata(interface, udl);
+ return udl;
+}
+
static int udl_usb_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
- struct usb_device *udev = interface_to_usbdev(interface);
- struct drm_device *dev;
int r;
+ struct udl_device *udl;
- dev = drm_dev_alloc(&driver, &interface->dev);
- if (IS_ERR(dev))
- return PTR_ERR(dev);
+ udl = udl_driver_create(interface);
+ if (IS_ERR(udl))
+ return PTR_ERR(udl);
- r = drm_dev_register(dev, (unsigned long)udev);
+ r = drm_dev_register(&udl->drm, 0);
if (r)
goto err_free;
- usb_set_intfdata(interface, dev);
- DRM_INFO("Initialized udl on minor %d\n", dev->primary->index);
+ DRM_INFO("Initialized udl on minor %d\n", udl->drm.primary->index);
return 0;
err_free:
- drm_dev_put(dev);
+ drm_dev_put(&udl->drm);
return r;
}
@@ -108,6 +142,7 @@ static void udl_usb_disconnect(struct usb_interface *interface)
udl_fbdev_unplug(dev);
udl_drop_usb(dev);
drm_dev_unplug(dev);
+ drm_dev_put(dev);
}
/*
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 4ae67d882eae..35c1f33fbc1a 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -50,8 +50,8 @@ struct urb_list {
struct udl_fbdev;
struct udl_device {
+ struct drm_device drm;
struct device *dev;
- struct drm_device *ddev;
struct usb_device *udev;
struct drm_crtc *crtc;
@@ -71,6 +71,8 @@ struct udl_device {
atomic_t cpu_kcycles_used; /* transpired during pixel processing */
};
+#define to_udl(x) container_of(x, struct udl_device, drm)
+
struct udl_gem_object {
struct drm_gem_object base;
struct page **pages;
@@ -102,9 +104,8 @@ struct urb *udl_get_urb(struct drm_device *dev);
int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len);
void udl_urb_completion(struct urb *urb);
-int udl_driver_load(struct drm_device *dev, unsigned long flags);
-void udl_driver_unload(struct drm_device *dev);
-void udl_driver_release(struct drm_device *dev);
+int udl_init(struct udl_device *udl);
+void udl_fini(struct drm_device *dev);
int udl_fbdev_init(struct drm_device *dev);
void udl_fbdev_cleanup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index dd9ffded223b..b9b67a546d4c 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -32,7 +32,7 @@ module_param(fb_bpp, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
module_param(fb_defio, int, S_IWUSR | S_IRUSR | S_IWGRP | S_IRGRP);
struct udl_fbdev {
- struct drm_fb_helper helper;
+ struct drm_fb_helper helper; /* must be first */
struct udl_framebuffer ufb;
int fb_count;
};
@@ -82,7 +82,7 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
int width, int height)
{
struct drm_device *dev = fb->base.dev;
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
int i, ret;
char *cmd;
cycles_t start_cycles, end_cycles;
@@ -210,10 +210,10 @@ static int udl_fb_open(struct fb_info *info, int user)
{
struct udl_fbdev *ufbdev = info->par;
struct drm_device *dev = ufbdev->ufb.base.dev;
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
/* If the USB device is gone, we don't accept new opens */
- if (drm_dev_is_unplugged(udl->ddev))
+ if (drm_dev_is_unplugged(&udl->drm))
return -ENODEV;
ufbdev->fb_count++;
@@ -392,7 +392,6 @@ static int udlfb_create(struct drm_fb_helper *helper,
ret = PTR_ERR(info);
goto out_gfree;
}
- info->par = ufbdev;
ret = udl_framebuffer_init(dev, &ufbdev->ufb, &mode_cmd, obj);
if (ret)
@@ -402,15 +401,12 @@ static int udlfb_create(struct drm_fb_helper *helper,
ufbdev->helper.fb = fb;
- strcpy(info->fix.id, "udldrmfb");
-
info->screen_base = ufbdev->ufb.obj->vmapping;
info->fix.smem_len = size;
info->fix.smem_start = (unsigned long)ufbdev->ufb.obj->vmapping;
info->fbops = &udlfb_ops;
- drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth);
- drm_fb_helper_fill_var(info, &ufbdev->helper, sizes->fb_width, sizes->fb_height);
+ drm_fb_helper_fill_info(info, &ufbdev->helper, sizes);
DRM_DEBUG_KMS("allocated %dx%d vmal %p\n",
fb->width, fb->height,
@@ -441,7 +437,7 @@ static void udl_fbdev_destroy(struct drm_device *dev,
int udl_fbdev_init(struct drm_device *dev)
{
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
int bpp_sel = fb_bpp;
struct udl_fbdev *ufbdev;
int ret;
@@ -480,7 +476,7 @@ free:
void udl_fbdev_cleanup(struct drm_device *dev)
{
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
if (!udl->fbdev)
return;
@@ -491,7 +487,7 @@ void udl_fbdev_cleanup(struct drm_device *dev)
void udl_fbdev_unplug(struct drm_device *dev)
{
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
struct udl_fbdev *ufbdev;
if (!udl->fbdev)
return;
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index bb7b58407039..3b3e17652bb2 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -203,7 +203,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
{
struct udl_gem_object *gobj;
struct drm_gem_object *obj;
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
int ret = 0;
mutex_lock(&udl->gem_lock);
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 1f8ef34ade24..6743eaef4594 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -30,7 +30,7 @@
static int udl_parse_vendor_descriptor(struct drm_device *dev,
struct usb_device *usbdev)
{
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
char *desc;
char *buf;
char *desc_end;
@@ -166,7 +166,7 @@ void udl_urb_completion(struct urb *urb)
static void udl_free_urb_list(struct drm_device *dev)
{
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
int count = udl->urbs.count;
struct list_head *node;
struct urb_node *unode;
@@ -199,7 +199,7 @@ static void udl_free_urb_list(struct drm_device *dev)
static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
{
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
struct urb *urb;
struct urb_node *unode;
char *buf;
@@ -263,7 +263,7 @@ retry:
struct urb *udl_get_urb(struct drm_device *dev)
{
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
int ret = 0;
struct list_head *entry;
struct urb_node *unode;
@@ -296,7 +296,7 @@ error:
int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
{
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
int ret;
BUG_ON(len > udl->urbs.size);
@@ -311,20 +311,12 @@ int udl_submit_urb(struct drm_device *dev, struct urb *urb, size_t len)
return ret;
}
-int udl_driver_load(struct drm_device *dev, unsigned long flags)
+int udl_init(struct udl_device *udl)
{
- struct usb_device *udev = (void*)flags;
- struct udl_device *udl;
+ struct drm_device *dev = &udl->drm;
int ret = -ENOMEM;
DRM_DEBUG("\n");
- udl = kzalloc(sizeof(struct udl_device), GFP_KERNEL);
- if (!udl)
- return -ENOMEM;
-
- udl->udev = udev;
- udl->ddev = dev;
- dev->dev_private = udl;
mutex_init(&udl->gem_lock);
@@ -358,7 +350,6 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags)
err:
if (udl->urbs.count)
udl_free_urb_list(dev);
- kfree(udl);
DRM_ERROR("%d\n", ret);
return ret;
}
@@ -369,9 +360,9 @@ int udl_drop_usb(struct drm_device *dev)
return 0;
}
-void udl_driver_unload(struct drm_device *dev)
+void udl_fini(struct drm_device *dev)
{
- struct udl_device *udl = dev->dev_private;
+ struct udl_device *udl = to_udl(dev);
drm_kms_helper_poll_fini(dev);
@@ -379,12 +370,4 @@ void udl_driver_unload(struct drm_device *dev)
udl_free_urb_list(dev);
udl_fbdev_cleanup(dev);
- kfree(udl);
-}
-
-void udl_driver_release(struct drm_device *dev)
-{
- udl_modeset_cleanup(dev);
- drm_dev_fini(dev);
- kfree(dev);
}
diff --git a/drivers/gpu/drm/v3d/Kconfig b/drivers/gpu/drm/v3d/Kconfig
index 1552bf552c94..75a74c45f109 100644
--- a/drivers/gpu/drm/v3d/Kconfig
+++ b/drivers/gpu/drm/v3d/Kconfig
@@ -5,6 +5,7 @@ config DRM_V3D
depends on COMMON_CLK
depends on MMU
select DRM_SCHED
+ select DRM_GEM_SHMEM_HELPER
help
Choose this option if you have a system that has a Broadcom
V3D 3.x or newer GPU, such as BCM7268.
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index a08766d39eab..a22b75a3a533 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -25,162 +25,6 @@
#include "v3d_drv.h"
#include "uapi/drm/v3d_drm.h"
-/* Pins the shmem pages, fills in the .pages and .sgt fields of the BO, and maps
- * it for DMA.
- */
-static int
-v3d_bo_get_pages(struct v3d_bo *bo)
-{
- struct drm_gem_object *obj = &bo->base;
- struct drm_device *dev = obj->dev;
- int npages = obj->size >> PAGE_SHIFT;
- int ret = 0;
-
- mutex_lock(&bo->lock);
- if (bo->pages_refcount++ != 0)
- goto unlock;
-
- if (!obj->import_attach) {
- bo->pages = drm_gem_get_pages(obj);
- if (IS_ERR(bo->pages)) {
- ret = PTR_ERR(bo->pages);
- goto unlock;
- }
-
- bo->sgt = drm_prime_pages_to_sg(bo->pages, npages);
- if (IS_ERR(bo->sgt)) {
- ret = PTR_ERR(bo->sgt);
- goto put_pages;
- }
-
- /* Map the pages for use by the GPU. */
- dma_map_sg(dev->dev, bo->sgt->sgl,
- bo->sgt->nents, DMA_BIDIRECTIONAL);
- } else {
- bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL);
- if (!bo->pages)
- goto put_pages;
-
- drm_prime_sg_to_page_addr_arrays(bo->sgt, bo->pages,
- NULL, npages);
-
- /* Note that dma-bufs come in mapped. */
- }
-
- mutex_unlock(&bo->lock);
-
- return 0;
-
-put_pages:
- drm_gem_put_pages(obj, bo->pages, true, true);
- bo->pages = NULL;
-unlock:
- bo->pages_refcount--;
- mutex_unlock(&bo->lock);
- return ret;
-}
-
-static void
-v3d_bo_put_pages(struct v3d_bo *bo)
-{
- struct drm_gem_object *obj = &bo->base;
-
- mutex_lock(&bo->lock);
- if (--bo->pages_refcount == 0) {
- if (!obj->import_attach) {
- dma_unmap_sg(obj->dev->dev, bo->sgt->sgl,
- bo->sgt->nents, DMA_BIDIRECTIONAL);
- sg_free_table(bo->sgt);
- kfree(bo->sgt);
- drm_gem_put_pages(obj, bo->pages, true, true);
- } else {
- kfree(bo->pages);
- }
- }
- mutex_unlock(&bo->lock);
-}
-
-static struct v3d_bo *v3d_bo_create_struct(struct drm_device *dev,
- size_t unaligned_size)
-{
- struct v3d_dev *v3d = to_v3d_dev(dev);
- struct drm_gem_object *obj;
- struct v3d_bo *bo;
- size_t size = roundup(unaligned_size, PAGE_SIZE);
- int ret;
-
- if (size == 0)
- return ERR_PTR(-EINVAL);
-
- bo = kzalloc(sizeof(*bo), GFP_KERNEL);
- if (!bo)
- return ERR_PTR(-ENOMEM);
- obj = &bo->base;
-
- INIT_LIST_HEAD(&bo->vmas);
- INIT_LIST_HEAD(&bo->unref_head);
- mutex_init(&bo->lock);
-
- ret = drm_gem_object_init(dev, obj, size);
- if (ret)
- goto free_bo;
-
- spin_lock(&v3d->mm_lock);
- ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node,
- obj->size >> PAGE_SHIFT,
- GMP_GRANULARITY >> PAGE_SHIFT, 0, 0);
- spin_unlock(&v3d->mm_lock);
- if (ret)
- goto free_obj;
-
- return bo;
-
-free_obj:
- drm_gem_object_release(obj);
-free_bo:
- kfree(bo);
- return ERR_PTR(ret);
-}
-
-struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
- size_t unaligned_size)
-{
- struct v3d_dev *v3d = to_v3d_dev(dev);
- struct drm_gem_object *obj;
- struct v3d_bo *bo;
- int ret;
-
- bo = v3d_bo_create_struct(dev, unaligned_size);
- if (IS_ERR(bo))
- return bo;
- obj = &bo->base;
-
- bo->resv = &bo->_resv;
- reservation_object_init(bo->resv);
-
- ret = v3d_bo_get_pages(bo);
- if (ret)
- goto free_mm;
-
- v3d_mmu_insert_ptes(bo);
-
- mutex_lock(&v3d->bo_lock);
- v3d->bo_stats.num_allocated++;
- v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT;
- mutex_unlock(&v3d->bo_lock);
-
- return bo;
-
-free_mm:
- spin_lock(&v3d->mm_lock);
- drm_mm_remove_node(&bo->node);
- spin_unlock(&v3d->mm_lock);
-
- drm_gem_object_release(obj);
- kfree(bo);
- return ERR_PTR(ret);
-}
-
/* Called DRM core on the last userspace/kernel unreference of the
* BO.
*/
@@ -189,92 +33,116 @@ void v3d_free_object(struct drm_gem_object *obj)
struct v3d_dev *v3d = to_v3d_dev(obj->dev);
struct v3d_bo *bo = to_v3d_bo(obj);
+ v3d_mmu_remove_ptes(bo);
+
mutex_lock(&v3d->bo_lock);
v3d->bo_stats.num_allocated--;
v3d->bo_stats.pages_allocated -= obj->size >> PAGE_SHIFT;
mutex_unlock(&v3d->bo_lock);
- reservation_object_fini(&bo->_resv);
-
- v3d_bo_put_pages(bo);
-
- if (obj->import_attach)
- drm_prime_gem_destroy(obj, bo->sgt);
-
- v3d_mmu_remove_ptes(bo);
spin_lock(&v3d->mm_lock);
drm_mm_remove_node(&bo->node);
spin_unlock(&v3d->mm_lock);
- mutex_destroy(&bo->lock);
+ /* GPU execution may have dirtied any pages in the BO. */
+ bo->base.pages_mark_dirty_on_put = true;
- drm_gem_object_release(obj);
- kfree(bo);
+ drm_gem_shmem_free_object(obj);
}
-struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj)
+static const struct drm_gem_object_funcs v3d_gem_funcs = {
+ .free = v3d_free_object,
+ .print_info = drm_gem_shmem_print_info,
+ .pin = drm_gem_shmem_pin,
+ .unpin = drm_gem_shmem_unpin,
+ .get_sg_table = drm_gem_shmem_get_sg_table,
+ .vmap = drm_gem_shmem_vmap,
+ .vunmap = drm_gem_shmem_vunmap,
+ .vm_ops = &drm_gem_shmem_vm_ops,
+};
+
+/* gem_create_object function for allocating a BO struct and doing
+ * early setup.
+ */
+struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size)
{
- struct v3d_bo *bo = to_v3d_bo(obj);
+ struct v3d_bo *bo;
+ struct drm_gem_object *obj;
- return bo->resv;
-}
+ if (size == 0)
+ return NULL;
-static void
-v3d_set_mmap_vma_flags(struct vm_area_struct *vma)
-{
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_MIXEDMAP;
- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
-}
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return NULL;
+ obj = &bo->base.base;
-vm_fault_t v3d_gem_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_gem_object *obj = vma->vm_private_data;
- struct v3d_bo *bo = to_v3d_bo(obj);
- pfn_t pfn;
- pgoff_t pgoff;
+ obj->funcs = &v3d_gem_funcs;
- /* We don't use vmf->pgoff since that has the fake offset: */
- pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
- pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV);
+ INIT_LIST_HEAD(&bo->unref_head);
- return vmf_insert_mixed(vma, vmf->address, pfn);
+ return &bo->base.base;
}
-int v3d_mmap(struct file *filp, struct vm_area_struct *vma)
+static int
+v3d_bo_create_finish(struct drm_gem_object *obj)
{
+ struct v3d_dev *v3d = to_v3d_dev(obj->dev);
+ struct v3d_bo *bo = to_v3d_bo(obj);
+ struct sg_table *sgt;
int ret;
- ret = drm_gem_mmap(filp, vma);
+ /* So far we pin the BO in the MMU for its lifetime, so use
+ * shmem's helper for getting a lifetime sgt.
+ */
+ sgt = drm_gem_shmem_get_pages_sgt(&bo->base.base);
+ if (IS_ERR(sgt))
+ return PTR_ERR(sgt);
+
+ spin_lock(&v3d->mm_lock);
+ /* Allocate the object's space in the GPU's page tables.
+ * Inserting PTEs will happen later, but the offset is for the
+ * lifetime of the BO.
+ */
+ ret = drm_mm_insert_node_generic(&v3d->mm, &bo->node,
+ obj->size >> PAGE_SHIFT,
+ GMP_GRANULARITY >> PAGE_SHIFT, 0, 0);
+ spin_unlock(&v3d->mm_lock);
if (ret)
return ret;
- v3d_set_mmap_vma_flags(vma);
+ /* Track stats for /debug/dri/n/bo_stats. */
+ mutex_lock(&v3d->bo_lock);
+ v3d->bo_stats.num_allocated++;
+ v3d->bo_stats.pages_allocated += obj->size >> PAGE_SHIFT;
+ mutex_unlock(&v3d->bo_lock);
- return ret;
+ v3d_mmu_insert_ptes(bo);
+
+ return 0;
}
-int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
+ size_t unaligned_size)
{
+ struct drm_gem_shmem_object *shmem_obj;
+ struct v3d_bo *bo;
int ret;
- ret = drm_gem_mmap_obj(obj, obj->size, vma);
- if (ret < 0)
- return ret;
-
- v3d_set_mmap_vma_flags(vma);
+ shmem_obj = drm_gem_shmem_create(dev, unaligned_size);
+ if (IS_ERR(shmem_obj))
+ return ERR_CAST(shmem_obj);
+ bo = to_v3d_bo(&shmem_obj->base);
- return 0;
-}
+ ret = v3d_bo_create_finish(&shmem_obj->base);
+ if (ret)
+ goto free_obj;
-struct sg_table *
-v3d_prime_get_sg_table(struct drm_gem_object *obj)
-{
- struct v3d_bo *bo = to_v3d_bo(obj);
- int npages = obj->size >> PAGE_SHIFT;
+ return bo;
- return drm_prime_pages_to_sg(bo->pages, npages);
+free_obj:
+ drm_gem_shmem_free_object(&shmem_obj->base);
+ return ERR_PTR(ret);
}
struct drm_gem_object *
@@ -283,20 +151,17 @@ v3d_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sgt)
{
struct drm_gem_object *obj;
- struct v3d_bo *bo;
-
- bo = v3d_bo_create_struct(dev, attach->dmabuf->size);
- if (IS_ERR(bo))
- return ERR_CAST(bo);
- obj = &bo->base;
-
- bo->resv = attach->dmabuf->resv;
+ int ret;
- bo->sgt = sgt;
- obj->import_attach = attach;
- v3d_bo_get_pages(bo);
+ obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
+ if (IS_ERR(obj))
+ return obj;
- v3d_mmu_insert_ptes(bo);
+ ret = v3d_bo_create_finish(obj);
+ if (ret) {
+ drm_gem_shmem_free_object(obj);
+ return ERR_PTR(ret);
+ }
return obj;
}
@@ -319,8 +184,8 @@ int v3d_create_bo_ioctl(struct drm_device *dev, void *data,
args->offset = bo->node.start << PAGE_SHIFT;
- ret = drm_gem_handle_create(file_priv, &bo->base, &args->handle);
- drm_gem_object_put_unlocked(&bo->base);
+ ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
+ drm_gem_object_put_unlocked(&bo->base.base);
return ret;
}
@@ -330,7 +195,6 @@ int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
{
struct drm_v3d_mmap_bo *args = data;
struct drm_gem_object *gem_obj;
- int ret;
if (args->flags != 0) {
DRM_INFO("unknown mmap_bo flags: %d\n", args->flags);
@@ -343,12 +207,10 @@ int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
}
- ret = drm_gem_create_mmap_offset(gem_obj);
- if (ret == 0)
- args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
+ args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
drm_gem_object_put_unlocked(gem_obj);
- return ret;
+ return 0;
}
int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index eb2b2d2f8553..a24af2d2f574 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -187,6 +187,11 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
uint32_t cycles;
int core = 0;
int measure_ms = 1000;
+ int ret;
+
+ ret = pm_runtime_get_sync(v3d->dev);
+ if (ret < 0)
+ return ret;
if (v3d->ver >= 40) {
V3D_CORE_WRITE(core, V3D_V4_PCTR_0_SRC_0_3,
@@ -210,6 +215,9 @@ static int v3d_measure_clock(struct seq_file *m, void *unused)
cycles / (measure_ms * 1000),
(cycles / (measure_ms * 100)) % 10);
+ pm_runtime_mark_last_busy(v3d->dev);
+ pm_runtime_put_autosuspend(v3d->dev);
+
return 0;
}
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index f0afcec72c34..a06b05f714a5 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -7,9 +7,9 @@
* This driver supports the Broadcom V3D 3.3 and 4.1 OpenGL ES GPUs.
* For V3D 2.x support, see the VC4 driver.
*
- * Currently only single-core rendering using the binner and renderer
- * is supported. The TFU (texture formatting unit) and V3D 4.x's CSD
- * (compute shader dispatch) are not yet supported.
+ * Currently only single-core rendering using the binner and renderer,
+ * along with TFU (texture formatting unit) rendering is supported.
+ * V3D 4.x's CSD (compute shader dispatch) is not yet supported.
*/
#include <linux/clk.h>
@@ -19,6 +19,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
@@ -101,6 +102,8 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
ret = pm_runtime_get_sync(v3d->dev);
+ if (ret < 0)
+ return ret;
if (args->param >= DRM_V3D_PARAM_V3D_CORE0_IDENT0 &&
args->param <= DRM_V3D_PARAM_V3D_CORE0_IDENT2) {
args->value = V3D_CORE_READ(0, offset);
@@ -160,17 +163,7 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file)
kfree(v3d_priv);
}
-static const struct file_operations v3d_drm_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = v3d_mmap,
- .poll = drm_poll,
- .read = drm_read,
- .compat_ioctl = drm_compat_ioctl,
- .llseek = noop_llseek,
-};
+DEFINE_DRM_GEM_SHMEM_FOPS(v3d_drm_fops);
/* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP
* protection between clients. Note that render nodes would be be
@@ -188,12 +181,6 @@ static const struct drm_ioctl_desc v3d_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(V3D_SUBMIT_TFU, v3d_submit_tfu_ioctl, DRM_RENDER_ALLOW | DRM_AUTH),
};
-static const struct vm_operations_struct v3d_vm_ops = {
- .fault = v3d_gem_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
static struct drm_driver v3d_drm_driver = {
.driver_features = (DRIVER_GEM |
DRIVER_RENDER |
@@ -207,17 +194,11 @@ static struct drm_driver v3d_drm_driver = {
.debugfs_init = v3d_debugfs_init,
#endif
- .gem_free_object_unlocked = v3d_free_object,
- .gem_vm_ops = &v3d_vm_ops,
-
+ .gem_create_object = v3d_create_object,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = drm_gem_prime_import,
- .gem_prime_export = drm_gem_prime_export,
- .gem_prime_res_obj = v3d_prime_res_obj,
- .gem_prime_get_sg_table = v3d_prime_get_sg_table,
.gem_prime_import_sg_table = v3d_prime_import_sg_table,
- .gem_prime_mmap = v3d_prime_mmap,
+ .gem_prime_mmap = drm_gem_prime_mmap,
.ioctls = v3d_drm_ioctls,
.num_ioctls = ARRAY_SIZE(v3d_drm_ioctls),
@@ -265,10 +246,6 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
v3d->pdev = pdev;
drm = &v3d->drm;
- ret = map_regs(v3d, &v3d->bridge_regs, "bridge");
- if (ret)
- goto dev_free;
-
ret = map_regs(v3d, &v3d->hub_regs, "hub");
if (ret)
goto dev_free;
@@ -283,6 +260,22 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
v3d->cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES);
WARN_ON(v3d->cores > 1); /* multicore not yet implemented */
+ v3d->reset = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(v3d->reset)) {
+ ret = PTR_ERR(v3d->reset);
+
+ if (ret == -EPROBE_DEFER)
+ goto dev_free;
+
+ v3d->reset = NULL;
+ ret = map_regs(v3d, &v3d->bridge_regs, "bridge");
+ if (ret) {
+ dev_err(dev,
+ "Failed to get reset control or bridge regs\n");
+ goto dev_free;
+ }
+ }
+
if (v3d->ver < 41) {
ret = map_regs(v3d, &v3d->gca_regs, "gca");
if (ret)
@@ -312,14 +305,18 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
if (ret)
goto dev_destroy;
- v3d_irq_init(v3d);
+ ret = v3d_irq_init(v3d);
+ if (ret)
+ goto gem_destroy;
ret = drm_dev_register(drm, 0);
if (ret)
- goto gem_destroy;
+ goto irq_disable;
return 0;
+irq_disable:
+ v3d_irq_disable(v3d);
gem_destroy:
v3d_gem_destroy(drm);
dev_destroy:
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index fdda3037f7af..e9d4a2fdcf44 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -1,11 +1,11 @@
// SPDX-License-Identifier: GPL-2.0+
/* Copyright (C) 2015-2018 Broadcom */
-#include <linux/reservation.h>
#include <linux/mm_types.h>
#include <drm/drmP.h>
#include <drm/drm_encoder.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gem_shmem_helper.h>
#include <drm/gpu_scheduler.h>
#include "uapi/drm/v3d_drm.h"
@@ -34,6 +34,7 @@ struct v3d_dev {
* and revision.
*/
int ver;
+ bool single_irq_line;
struct device *dev;
struct platform_device *pdev;
@@ -42,6 +43,7 @@ struct v3d_dev {
void __iomem *bridge_regs;
void __iomem *gca_regs;
struct clk *clk;
+ struct reset_control *reset;
/* Virtual and DMA addresses of the single shared page table. */
volatile u32 *pt;
@@ -109,34 +111,15 @@ struct v3d_file_priv {
struct drm_sched_entity sched_entity[V3D_MAX_QUEUES];
};
-/* Tracks a mapping of a BO into a per-fd address space */
-struct v3d_vma {
- struct v3d_page_table *pt;
- struct list_head list; /* entry in v3d_bo.vmas */
-};
-
struct v3d_bo {
- struct drm_gem_object base;
-
- struct mutex lock;
+ struct drm_gem_shmem_object base;
struct drm_mm_node node;
- u32 pages_refcount;
- struct page **pages;
- struct sg_table *sgt;
- void *vaddr;
-
- struct list_head vmas; /* list of v3d_vma */
-
/* List entry for the BO's position in
* v3d_exec_info->unref_list
*/
struct list_head unref_head;
-
- /* normally (resv == &_resv) except for imported bo's */
- struct reservation_object *resv;
- struct reservation_object _resv;
};
static inline struct v3d_bo *
@@ -180,7 +163,7 @@ struct v3d_job {
struct dma_fence *in_fence;
/* v3d fence to be signaled by IRQ handler when the job is complete. */
- struct dma_fence *done_fence;
+ struct dma_fence *irq_fence;
/* GPU virtual addresses of the start/end of the CL job. */
u32 start, end;
@@ -227,7 +210,7 @@ struct v3d_tfu_job {
struct dma_fence *in_fence;
/* v3d fence to be signaled by IRQ handler when the job is complete. */
- struct dma_fence *done_fence;
+ struct dma_fence *irq_fence;
struct v3d_dev *v3d;
@@ -270,6 +253,7 @@ static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
}
/* v3d_bo.c */
+struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size);
void v3d_free_object(struct drm_gem_object *gem_obj);
struct v3d_bo *v3d_bo_create(struct drm_device *dev, struct drm_file *file_priv,
size_t size);
@@ -279,11 +263,6 @@ int v3d_mmap_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int v3d_get_bo_offset_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-vm_fault_t v3d_gem_fault(struct vm_fault *vmf);
-int v3d_mmap(struct file *filp, struct vm_area_struct *vma);
-struct reservation_object *v3d_prime_res_obj(struct drm_gem_object *obj);
-int v3d_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
-struct sg_table *v3d_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *v3d_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
@@ -310,7 +289,7 @@ void v3d_reset(struct v3d_dev *v3d);
void v3d_invalidate_caches(struct v3d_dev *v3d);
/* v3d_irq.c */
-void v3d_irq_init(struct v3d_dev *v3d);
+int v3d_irq_init(struct v3d_dev *v3d);
void v3d_irq_enable(struct v3d_dev *v3d);
void v3d_irq_disable(struct v3d_dev *v3d);
void v3d_irq_reset(struct v3d_dev *v3d);
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 803f31467ec1..93ff8fcbe475 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -6,6 +6,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/sched/signal.h>
@@ -24,7 +25,8 @@ v3d_init_core(struct v3d_dev *v3d, int core)
* type. If you want the default behavior, you can still put
* "2" in the indirect texture state's output_type field.
*/
- V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT);
+ if (v3d->ver < 40)
+ V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT);
/* Whenever we flush the L2T cache, we always want to flush
* the whole thing.
@@ -69,7 +71,7 @@ v3d_idle_gca(struct v3d_dev *v3d)
}
static void
-v3d_reset_v3d(struct v3d_dev *v3d)
+v3d_reset_by_bridge(struct v3d_dev *v3d)
{
int version = V3D_BRIDGE_READ(V3D_TOP_GR_BRIDGE_REVISION);
@@ -89,6 +91,15 @@ v3d_reset_v3d(struct v3d_dev *v3d)
V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT);
V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, 0);
}
+}
+
+static void
+v3d_reset_v3d(struct v3d_dev *v3d)
+{
+ if (v3d->reset)
+ reset_control_reset(v3d->reset);
+ else
+ v3d_reset_by_bridge(v3d);
v3d_init_hw_state(v3d);
}
@@ -190,7 +201,8 @@ v3d_attach_object_fences(struct v3d_bo **bos, int bo_count,
for (i = 0; i < bo_count; i++) {
/* XXX: Use shared fences for read-only objects. */
- reservation_object_add_excl_fence(bos[i]->resv, fence);
+ reservation_object_add_excl_fence(bos[i]->base.base.resv,
+ fence);
}
}
@@ -199,12 +211,8 @@ v3d_unlock_bo_reservations(struct v3d_bo **bos,
int bo_count,
struct ww_acquire_ctx *acquire_ctx)
{
- int i;
-
- for (i = 0; i < bo_count; i++)
- ww_mutex_unlock(&bos[i]->resv->lock);
-
- ww_acquire_fini(acquire_ctx);
+ drm_gem_unlock_reservations((struct drm_gem_object **)bos, bo_count,
+ acquire_ctx);
}
/* Takes the reservation lock on all the BOs being referenced, so that
@@ -219,58 +227,19 @@ v3d_lock_bo_reservations(struct v3d_bo **bos,
int bo_count,
struct ww_acquire_ctx *acquire_ctx)
{
- int contended_lock = -1;
int i, ret;
- ww_acquire_init(acquire_ctx, &reservation_ww_class);
-
-retry:
- if (contended_lock != -1) {
- struct v3d_bo *bo = bos[contended_lock];
-
- ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
- acquire_ctx);
- if (ret) {
- ww_acquire_done(acquire_ctx);
- return ret;
- }
- }
-
- for (i = 0; i < bo_count; i++) {
- if (i == contended_lock)
- continue;
-
- ret = ww_mutex_lock_interruptible(&bos[i]->resv->lock,
- acquire_ctx);
- if (ret) {
- int j;
-
- for (j = 0; j < i; j++)
- ww_mutex_unlock(&bos[j]->resv->lock);
-
- if (contended_lock != -1 && contended_lock >= i) {
- struct v3d_bo *bo = bos[contended_lock];
-
- ww_mutex_unlock(&bo->resv->lock);
- }
-
- if (ret == -EDEADLK) {
- contended_lock = i;
- goto retry;
- }
-
- ww_acquire_done(acquire_ctx);
- return ret;
- }
- }
-
- ww_acquire_done(acquire_ctx);
+ ret = drm_gem_lock_reservations((struct drm_gem_object **)bos,
+ bo_count, acquire_ctx);
+ if (ret)
+ return ret;
/* Reserve space for our shared (read-only) fence references,
* before we commit the CL to the hardware.
*/
for (i = 0; i < bo_count; i++) {
- ret = reservation_object_reserve_shared(bos[i]->resv, 1);
+ ret = reservation_object_reserve_shared(bos[i]->base.base.resv,
+ 1);
if (ret) {
v3d_unlock_bo_reservations(bos, bo_count,
acquire_ctx);
@@ -371,18 +340,18 @@ v3d_exec_cleanup(struct kref *ref)
dma_fence_put(exec->bin.in_fence);
dma_fence_put(exec->render.in_fence);
- dma_fence_put(exec->bin.done_fence);
- dma_fence_put(exec->render.done_fence);
+ dma_fence_put(exec->bin.irq_fence);
+ dma_fence_put(exec->render.irq_fence);
dma_fence_put(exec->bin_done_fence);
dma_fence_put(exec->render_done_fence);
for (i = 0; i < exec->bo_count; i++)
- drm_gem_object_put_unlocked(&exec->bo[i]->base);
+ drm_gem_object_put_unlocked(&exec->bo[i]->base.base);
kvfree(exec->bo);
list_for_each_entry_safe(bo, save, &exec->unref_list, unref_head) {
- drm_gem_object_put_unlocked(&bo->base);
+ drm_gem_object_put_unlocked(&bo->base.base);
}
pm_runtime_mark_last_busy(v3d->dev);
@@ -405,11 +374,11 @@ v3d_tfu_job_cleanup(struct kref *ref)
unsigned int i;
dma_fence_put(job->in_fence);
- dma_fence_put(job->done_fence);
+ dma_fence_put(job->irq_fence);
for (i = 0; i < ARRAY_SIZE(job->bo); i++) {
if (job->bo[i])
- drm_gem_object_put_unlocked(&job->bo[i]->base);
+ drm_gem_object_put_unlocked(&job->bo[i]->base.base);
}
pm_runtime_mark_last_busy(v3d->dev);
@@ -429,8 +398,6 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
{
int ret;
struct drm_v3d_wait_bo *args = data;
- struct drm_gem_object *gem_obj;
- struct v3d_bo *bo;
ktime_t start = ktime_get();
u64 delta_ns;
unsigned long timeout_jiffies =
@@ -439,21 +406,8 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
if (args->pad != 0)
return -EINVAL;
- gem_obj = drm_gem_object_lookup(file_priv, args->handle);
- if (!gem_obj) {
- DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle);
- return -EINVAL;
- }
- bo = to_v3d_bo(gem_obj);
-
- ret = reservation_object_wait_timeout_rcu(bo->resv,
- true, true,
- timeout_jiffies);
-
- if (ret == 0)
- ret = -ETIME;
- else if (ret > 0)
- ret = 0;
+ ret = drm_gem_reservation_object_wait(file_priv, args->handle,
+ true, timeout_jiffies);
/* Decrement the user's timeout, in case we got interrupted
* such that the ioctl will be restarted.
@@ -468,8 +422,6 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
if (ret == -ETIME && args->timeout_ns)
ret = -EAGAIN;
- drm_gem_object_put_unlocked(gem_obj);
-
return ret;
}
diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
index 69338da70ddc..aa0a180ae700 100644
--- a/drivers/gpu/drm/v3d/v3d_irq.c
+++ b/drivers/gpu/drm/v3d/v3d_irq.c
@@ -27,6 +27,9 @@
V3D_HUB_INT_MMU_CAP | \
V3D_HUB_INT_TFUC))
+static irqreturn_t
+v3d_hub_irq(int irq, void *arg);
+
static void
v3d_overflow_mem_work(struct work_struct *work)
{
@@ -34,12 +37,14 @@ v3d_overflow_mem_work(struct work_struct *work)
container_of(work, struct v3d_dev, overflow_mem_work);
struct drm_device *dev = &v3d->drm;
struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024);
+ struct drm_gem_object *obj;
unsigned long irqflags;
if (IS_ERR(bo)) {
DRM_ERROR("Couldn't allocate binner overflow mem\n");
return;
}
+ obj = &bo->base.base;
/* We lost a race, and our work task came in after the bin job
* completed and exited. This can happen because the HW
@@ -56,15 +61,15 @@ v3d_overflow_mem_work(struct work_struct *work)
goto out;
}
- drm_gem_object_get(&bo->base);
+ drm_gem_object_get(obj);
list_add_tail(&bo->unref_head, &v3d->bin_job->unref_list);
spin_unlock_irqrestore(&v3d->job_lock, irqflags);
V3D_CORE_WRITE(0, V3D_PTB_BPOA, bo->node.start << PAGE_SHIFT);
- V3D_CORE_WRITE(0, V3D_PTB_BPOS, bo->base.size);
+ V3D_CORE_WRITE(0, V3D_PTB_BPOS, obj->size);
out:
- drm_gem_object_put_unlocked(&bo->base);
+ drm_gem_object_put_unlocked(obj);
}
static irqreturn_t
@@ -82,7 +87,8 @@ v3d_irq(int irq, void *arg)
if (intsts & V3D_INT_OUTOMEM) {
/* Note that the OOM status is edge signaled, so the
* interrupt won't happen again until the we actually
- * add more memory.
+ * add more memory. Also, as of V3D 4.1, FLDONE won't
+ * be reported until any OOM state has been cleared.
*/
schedule_work(&v3d->overflow_mem_work);
status = IRQ_HANDLED;
@@ -90,7 +96,7 @@ v3d_irq(int irq, void *arg)
if (intsts & V3D_INT_FLDONE) {
struct v3d_fence *fence =
- to_v3d_fence(v3d->bin_job->bin.done_fence);
+ to_v3d_fence(v3d->bin_job->bin.irq_fence);
trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
@@ -99,7 +105,7 @@ v3d_irq(int irq, void *arg)
if (intsts & V3D_INT_FRDONE) {
struct v3d_fence *fence =
- to_v3d_fence(v3d->render_job->render.done_fence);
+ to_v3d_fence(v3d->render_job->render.irq_fence);
trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
@@ -112,6 +118,12 @@ v3d_irq(int irq, void *arg)
if (intsts & V3D_INT_GMPV)
dev_err(v3d->dev, "GMP violation\n");
+ /* V3D 4.2 wires the hub and core IRQs together, so if we &
+ * didn't see the common one then check hub for MMU IRQs.
+ */
+ if (v3d->single_irq_line && status == IRQ_NONE)
+ return v3d_hub_irq(irq, arg);
+
return status;
}
@@ -129,7 +141,7 @@ v3d_hub_irq(int irq, void *arg)
if (intsts & V3D_HUB_INT_TFUC) {
struct v3d_fence *fence =
- to_v3d_fence(v3d->tfu_job->done_fence);
+ to_v3d_fence(v3d->tfu_job->irq_fence);
trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
dma_fence_signal(&fence->base);
@@ -156,10 +168,10 @@ v3d_hub_irq(int irq, void *arg)
return status;
}
-void
+int
v3d_irq_init(struct v3d_dev *v3d)
{
- int ret, core;
+ int irq1, ret, core;
INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work);
@@ -170,16 +182,37 @@ v3d_irq_init(struct v3d_dev *v3d)
V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS);
V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS);
- ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
- v3d_hub_irq, IRQF_SHARED,
- "v3d_hub", v3d);
- ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 1),
- v3d_irq, IRQF_SHARED,
- "v3d_core0", v3d);
- if (ret)
- dev_err(v3d->dev, "IRQ setup failed: %d\n", ret);
+ irq1 = platform_get_irq(v3d->pdev, 1);
+ if (irq1 == -EPROBE_DEFER)
+ return irq1;
+ if (irq1 > 0) {
+ ret = devm_request_irq(v3d->dev, irq1,
+ v3d_irq, IRQF_SHARED,
+ "v3d_core0", v3d);
+ if (ret)
+ goto fail;
+ ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
+ v3d_hub_irq, IRQF_SHARED,
+ "v3d_hub", v3d);
+ if (ret)
+ goto fail;
+ } else {
+ v3d->single_irq_line = true;
+
+ ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
+ v3d_irq, IRQF_SHARED,
+ "v3d", v3d);
+ if (ret)
+ goto fail;
+ }
v3d_irq_enable(v3d);
+ return 0;
+
+fail:
+ if (ret != -EPROBE_DEFER)
+ dev_err(v3d->dev, "IRQ setup failed: %d\n", ret);
+ return ret;
}
void
diff --git a/drivers/gpu/drm/v3d/v3d_mmu.c b/drivers/gpu/drm/v3d/v3d_mmu.c
index b00f97c31b70..7a21f1787ab1 100644
--- a/drivers/gpu/drm/v3d/v3d_mmu.c
+++ b/drivers/gpu/drm/v3d/v3d_mmu.c
@@ -83,13 +83,14 @@ int v3d_mmu_set_page_table(struct v3d_dev *v3d)
void v3d_mmu_insert_ptes(struct v3d_bo *bo)
{
- struct v3d_dev *v3d = to_v3d_dev(bo->base.dev);
+ struct drm_gem_shmem_object *shmem_obj = &bo->base;
+ struct v3d_dev *v3d = to_v3d_dev(shmem_obj->base.dev);
u32 page = bo->node.start;
u32 page_prot = V3D_PTE_WRITEABLE | V3D_PTE_VALID;
unsigned int count;
struct scatterlist *sgl;
- for_each_sg(bo->sgt->sgl, sgl, bo->sgt->nents, count) {
+ for_each_sg(shmem_obj->sgt->sgl, sgl, shmem_obj->sgt->nents, count) {
u32 page_address = sg_dma_address(sgl) >> V3D_MMU_PAGE_SHIFT;
u32 pte = page_prot | page_address;
u32 i;
@@ -102,7 +103,7 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo)
}
WARN_ON_ONCE(page - bo->node.start !=
- bo->base.size >> V3D_MMU_PAGE_SHIFT);
+ shmem_obj->base.size >> V3D_MMU_PAGE_SHIFT);
if (v3d_mmu_flush_all(v3d))
dev_err(v3d->dev, "MMU flush timeout\n");
@@ -110,8 +111,8 @@ void v3d_mmu_insert_ptes(struct v3d_bo *bo)
void v3d_mmu_remove_ptes(struct v3d_bo *bo)
{
- struct v3d_dev *v3d = to_v3d_dev(bo->base.dev);
- u32 npages = bo->base.size >> V3D_MMU_PAGE_SHIFT;
+ struct v3d_dev *v3d = to_v3d_dev(bo->base.base.dev);
+ u32 npages = bo->base.base.size >> V3D_MMU_PAGE_SHIFT;
u32 page;
for (page = bo->node.start; page < bo->node.start + npages; page++)
diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h
index 6ccdee9d47bd..8e88af237610 100644
--- a/drivers/gpu/drm/v3d/v3d_regs.h
+++ b/drivers/gpu/drm/v3d/v3d_regs.h
@@ -216,6 +216,8 @@
# define V3D_IDENT2_BCG_INT BIT(28)
#define V3D_CTL_MISCCFG 0x00018
+# define V3D_CTL_MISCCFG_QRMAXCNT_MASK V3D_MASK(3, 1)
+# define V3D_CTL_MISCCFG_QRMAXCNT_SHIFT 1
# define V3D_MISCCFG_OVRTMUOUT BIT(0)
#define V3D_CTL_L2CACTL 0x00020
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 4704b2df3688..e740f3b99aa5 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -156,9 +156,9 @@ static struct dma_fence *v3d_job_run(struct drm_sched_job *sched_job)
if (IS_ERR(fence))
return NULL;
- if (job->done_fence)
- dma_fence_put(job->done_fence);
- job->done_fence = dma_fence_get(fence);
+ if (job->irq_fence)
+ dma_fence_put(job->irq_fence);
+ job->irq_fence = dma_fence_get(fence);
trace_v3d_submit_cl(dev, q == V3D_RENDER, to_v3d_fence(fence)->seqno,
job->start, job->end);
@@ -199,9 +199,9 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job)
return NULL;
v3d->tfu_job = job;
- if (job->done_fence)
- dma_fence_put(job->done_fence);
- job->done_fence = dma_fence_get(fence);
+ if (job->irq_fence)
+ dma_fence_put(job->irq_fence);
+ job->irq_fence = dma_fence_get(fence);
trace_v3d_submit_tfu(dev, to_v3d_fence(fence)->seqno);
@@ -231,20 +231,17 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
mutex_lock(&v3d->reset_lock);
/* block scheduler */
- for (q = 0; q < V3D_MAX_QUEUES; q++) {
- struct drm_gpu_scheduler *sched = &v3d->queue[q].sched;
-
- drm_sched_stop(sched);
+ for (q = 0; q < V3D_MAX_QUEUES; q++)
+ drm_sched_stop(&v3d->queue[q].sched);
- if(sched_job)
- drm_sched_increase_karma(sched_job);
- }
+ if (sched_job)
+ drm_sched_increase_karma(sched_job);
/* get the GPU back into the init state */
v3d_reset(v3d);
for (q = 0; q < V3D_MAX_QUEUES; q++)
- drm_sched_resubmit_jobs(sched_job->sched);
+ drm_sched_resubmit_jobs(&v3d->queue[q].sched);
/* Unblock schedulers and restart their jobs. */
for (q = 0; q < V3D_MAX_QUEUES; q++) {
diff --git a/drivers/gpu/drm/vboxvideo/Kconfig b/drivers/gpu/drm/vboxvideo/Kconfig
new file mode 100644
index 000000000000..d6ab955c0768
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/Kconfig
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0
+config DRM_VBOXVIDEO
+ tristate "Virtual Box Graphics Card"
+ depends on DRM && X86 && PCI
+ select DRM_KMS_HELPER
+ select DRM_TTM
+ select GENERIC_ALLOCATOR
+ help
+ This is a KMS driver for the virtual Graphics Card used in
+ Virtual Box virtual machines.
+
+ Although it is possible to build this driver built-in to the
+ kernel, it is advised to build it as a module, so that it can
+ be updated independently of the kernel. Select M to build this
+ driver as a module and add support for these devices via drm/kms
+ interfaces.
diff --git a/drivers/gpu/drm/vboxvideo/Makefile b/drivers/gpu/drm/vboxvideo/Makefile
new file mode 100644
index 000000000000..1224f313af0c
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+vboxvideo-y := hgsmi_base.o modesetting.o vbva_base.o \
+ vbox_drv.o vbox_fb.o vbox_hgsmi.o vbox_irq.o vbox_main.o \
+ vbox_mode.o vbox_prime.o vbox_ttm.o
+
+obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo.o
diff --git a/drivers/gpu/drm/vboxvideo/hgsmi_base.c b/drivers/gpu/drm/vboxvideo/hgsmi_base.c
new file mode 100644
index 000000000000..361d3193258e
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/hgsmi_base.c
@@ -0,0 +1,207 @@
+// SPDX-License-Identifier: MIT
+/* Copyright (C) 2006-2017 Oracle Corporation */
+
+#include <linux/vbox_err.h>
+#include "vbox_drv.h"
+#include "vboxvideo_guest.h"
+#include "vboxvideo_vbe.h"
+#include "hgsmi_channels.h"
+#include "hgsmi_ch_setup.h"
+
+/**
+ * Inform the host of the location of the host flags in VRAM via an HGSMI cmd.
+ * Return: 0 or negative errno value.
+ * @ctx: The context of the guest heap to use.
+ * @location: The offset chosen for the flags within guest VRAM.
+ */
+int hgsmi_report_flags_location(struct gen_pool *ctx, u32 location)
+{
+ struct hgsmi_buffer_location *p;
+
+ p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_HGSMI,
+ HGSMI_CC_HOST_FLAGS_LOCATION);
+ if (!p)
+ return -ENOMEM;
+
+ p->buf_location = location;
+ p->buf_len = sizeof(struct hgsmi_host_flags);
+
+ hgsmi_buffer_submit(ctx, p);
+ hgsmi_buffer_free(ctx, p);
+
+ return 0;
+}
+
+/**
+ * Notify the host of HGSMI-related guest capabilities via an HGSMI command.
+ * Return: 0 or negative errno value.
+ * @ctx: The context of the guest heap to use.
+ * @caps: The capabilities to report, see vbva_caps.
+ */
+int hgsmi_send_caps_info(struct gen_pool *ctx, u32 caps)
+{
+ struct vbva_caps *p;
+
+ p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_INFO_CAPS);
+ if (!p)
+ return -ENOMEM;
+
+ p->rc = VERR_NOT_IMPLEMENTED;
+ p->caps = caps;
+
+ hgsmi_buffer_submit(ctx, p);
+
+ WARN_ON_ONCE(p->rc < 0);
+
+ hgsmi_buffer_free(ctx, p);
+
+ return 0;
+}
+
+int hgsmi_test_query_conf(struct gen_pool *ctx)
+{
+ u32 value = 0;
+ int ret;
+
+ ret = hgsmi_query_conf(ctx, U32_MAX, &value);
+ if (ret)
+ return ret;
+
+ return value == U32_MAX ? 0 : -EIO;
+}
+
+/**
+ * Query the host for an HGSMI configuration parameter via an HGSMI command.
+ * Return: 0 or negative errno value.
+ * @ctx: The context containing the heap used.
+ * @index: The index of the parameter to query.
+ * @value_ret: Where to store the value of the parameter on success.
+ */
+int hgsmi_query_conf(struct gen_pool *ctx, u32 index, u32 *value_ret)
+{
+ struct vbva_conf32 *p;
+
+ p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
+ VBVA_QUERY_CONF32);
+ if (!p)
+ return -ENOMEM;
+
+ p->index = index;
+ p->value = U32_MAX;
+
+ hgsmi_buffer_submit(ctx, p);
+
+ *value_ret = p->value;
+
+ hgsmi_buffer_free(ctx, p);
+
+ return 0;
+}
+
+/**
+ * Pass the host a new mouse pointer shape via an HGSMI command.
+ * Return: 0 or negative errno value.
+ * @ctx: The context containing the heap to be used.
+ * @flags: Cursor flags.
+ * @hot_x: Horizontal position of the hot spot.
+ * @hot_y: Vertical position of the hot spot.
+ * @width: Width in pixels of the cursor.
+ * @height: Height in pixels of the cursor.
+ * @pixels: Pixel data, @see VMMDevReqMousePointer for the format.
+ * @len: Size in bytes of the pixel data.
+ */
+int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags,
+ u32 hot_x, u32 hot_y, u32 width, u32 height,
+ u8 *pixels, u32 len)
+{
+ struct vbva_mouse_pointer_shape *p;
+ u32 pixel_len = 0;
+ int rc;
+
+ if (flags & VBOX_MOUSE_POINTER_SHAPE) {
+ /*
+ * Size of the pointer data:
+ * sizeof (AND mask) + sizeof (XOR_MASK)
+ */
+ pixel_len = ((((width + 7) / 8) * height + 3) & ~3) +
+ width * 4 * height;
+ if (pixel_len > len)
+ return -EINVAL;
+
+ /*
+ * If shape is supplied, then always create the pointer visible.
+ * See comments in 'vboxUpdatePointerShape'
+ */
+ flags |= VBOX_MOUSE_POINTER_VISIBLE;
+ }
+
+ p = hgsmi_buffer_alloc(ctx, sizeof(*p) + pixel_len, HGSMI_CH_VBVA,
+ VBVA_MOUSE_POINTER_SHAPE);
+ if (!p)
+ return -ENOMEM;
+
+ p->result = VINF_SUCCESS;
+ p->flags = flags;
+ p->hot_X = hot_x;
+ p->hot_y = hot_y;
+ p->width = width;
+ p->height = height;
+ if (pixel_len)
+ memcpy(p->data, pixels, pixel_len);
+
+ hgsmi_buffer_submit(ctx, p);
+
+ switch (p->result) {
+ case VINF_SUCCESS:
+ rc = 0;
+ break;
+ case VERR_NO_MEMORY:
+ rc = -ENOMEM;
+ break;
+ case VERR_NOT_SUPPORTED:
+ rc = -EBUSY;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ hgsmi_buffer_free(ctx, p);
+
+ return rc;
+}
+
+/**
+ * Report the guest cursor position. The host may wish to use this information
+ * to re-position its own cursor (though this is currently unlikely). The
+ * current host cursor position is returned.
+ * Return: 0 or negative errno value.
+ * @ctx: The context containing the heap used.
+ * @report_position: Are we reporting a position?
+ * @x: Guest cursor X position.
+ * @y: Guest cursor Y position.
+ * @x_host: Host cursor X position is stored here. Optional.
+ * @y_host: Host cursor Y position is stored here. Optional.
+ */
+int hgsmi_cursor_position(struct gen_pool *ctx, bool report_position,
+ u32 x, u32 y, u32 *x_host, u32 *y_host)
+{
+ struct vbva_cursor_position *p;
+
+ p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
+ VBVA_CURSOR_POSITION);
+ if (!p)
+ return -ENOMEM;
+
+ p->report_position = report_position;
+ p->x = x;
+ p->y = y;
+
+ hgsmi_buffer_submit(ctx, p);
+
+ *x_host = p->x;
+ *y_host = p->y;
+
+ hgsmi_buffer_free(ctx, p);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vboxvideo/hgsmi_ch_setup.h b/drivers/gpu/drm/vboxvideo/hgsmi_ch_setup.h
new file mode 100644
index 000000000000..4e93418d6a13
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/hgsmi_ch_setup.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright (C) 2006-2017 Oracle Corporation */
+
+#ifndef __HGSMI_CH_SETUP_H__
+#define __HGSMI_CH_SETUP_H__
+
+/*
+ * Tell the host the location of hgsmi_host_flags structure, where the host
+ * can write information about pending buffers, etc, and which can be quickly
+ * polled by the guest without a need to port IO.
+ */
+#define HGSMI_CC_HOST_FLAGS_LOCATION 0
+
+struct hgsmi_buffer_location {
+ u32 buf_location;
+ u32 buf_len;
+} __packed;
+
+/* HGSMI setup and configuration data structures. */
+
+#define HGSMIHOSTFLAGS_COMMANDS_PENDING 0x01u
+#define HGSMIHOSTFLAGS_IRQ 0x02u
+#define HGSMIHOSTFLAGS_VSYNC 0x10u
+#define HGSMIHOSTFLAGS_HOTPLUG 0x20u
+#define HGSMIHOSTFLAGS_CURSOR_CAPABILITIES 0x40u
+
+struct hgsmi_host_flags {
+ u32 host_flags;
+ u32 reserved[3];
+} __packed;
+
+#endif
diff --git a/drivers/gpu/drm/vboxvideo/hgsmi_channels.h b/drivers/gpu/drm/vboxvideo/hgsmi_channels.h
new file mode 100644
index 000000000000..9b83f4ff3faf
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/hgsmi_channels.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright (C) 2006-2017 Oracle Corporation */
+
+#ifndef __HGSMI_CHANNELS_H__
+#define __HGSMI_CHANNELS_H__
+
+/*
+ * Each channel has an 8 bit identifier. There are a number of predefined
+ * (hardcoded) channels.
+ *
+ * HGSMI_CH_HGSMI channel can be used to map a string channel identifier
+ * to a free 16 bit numerical value. values are allocated in range
+ * [HGSMI_CH_STRING_FIRST;HGSMI_CH_STRING_LAST].
+ */
+
+/* A reserved channel value */
+#define HGSMI_CH_RESERVED 0x00
+/* HGCMI: setup and configuration */
+#define HGSMI_CH_HGSMI 0x01
+/* Graphics: VBVA */
+#define HGSMI_CH_VBVA 0x02
+/* Graphics: Seamless with a single guest region */
+#define HGSMI_CH_SEAMLESS 0x03
+/* Graphics: Seamless with separate host windows */
+#define HGSMI_CH_SEAMLESS2 0x04
+/* Graphics: OpenGL HW acceleration */
+#define HGSMI_CH_OPENGL 0x05
+
+/* The first channel index to be used for string mappings (inclusive) */
+#define HGSMI_CH_STRING_FIRST 0x20
+/* The last channel index for string mappings (inclusive) */
+#define HGSMI_CH_STRING_LAST 0xff
+
+#endif
diff --git a/drivers/gpu/drm/vboxvideo/hgsmi_defs.h b/drivers/gpu/drm/vboxvideo/hgsmi_defs.h
new file mode 100644
index 000000000000..6c8df1cdb087
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/hgsmi_defs.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright (C) 2006-2017 Oracle Corporation */
+
+#ifndef __HGSMI_DEFS_H__
+#define __HGSMI_DEFS_H__
+
+/* Buffer sequence type mask. */
+#define HGSMI_BUFFER_HEADER_F_SEQ_MASK 0x03
+/* Single buffer, not a part of a sequence. */
+#define HGSMI_BUFFER_HEADER_F_SEQ_SINGLE 0x00
+/* The first buffer in a sequence. */
+#define HGSMI_BUFFER_HEADER_F_SEQ_START 0x01
+/* A middle buffer in a sequence. */
+#define HGSMI_BUFFER_HEADER_F_SEQ_CONTINUE 0x02
+/* The last buffer in a sequence. */
+#define HGSMI_BUFFER_HEADER_F_SEQ_END 0x03
+
+/* 16 bytes buffer header. */
+struct hgsmi_buffer_header {
+ u32 data_size; /* Size of data that follows the header. */
+ u8 flags; /* HGSMI_BUFFER_HEADER_F_* */
+ u8 channel; /* The channel the data must be routed to. */
+ u16 channel_info; /* Opaque to the HGSMI, used by the channel. */
+
+ union {
+ /* Opaque placeholder to make the union 8 bytes. */
+ u8 header_data[8];
+
+ /* HGSMI_BUFFER_HEADER_F_SEQ_SINGLE */
+ struct {
+ u32 reserved1; /* A reserved field, initialize to 0. */
+ u32 reserved2; /* A reserved field, initialize to 0. */
+ } buffer;
+
+ /* HGSMI_BUFFER_HEADER_F_SEQ_START */
+ struct {
+ /* Must be the same for all buffers in the sequence. */
+ u32 sequence_number;
+ /* The total size of the sequence. */
+ u32 sequence_size;
+ } sequence_start;
+
+ /*
+ * HGSMI_BUFFER_HEADER_F_SEQ_CONTINUE and
+ * HGSMI_BUFFER_HEADER_F_SEQ_END
+ */
+ struct {
+ /* Must be the same for all buffers in the sequence. */
+ u32 sequence_number;
+ /* Data offset in the entire sequence. */
+ u32 sequence_offset;
+ } sequence_continue;
+ } u;
+} __packed;
+
+/* 8 bytes buffer tail. */
+struct hgsmi_buffer_tail {
+ /* Reserved, must be initialized to 0. */
+ u32 reserved;
+ /*
+ * One-at-a-Time Hash: http://www.burtleburtle.net/bob/hash/doobs.html
+ * Over the header, offset and for first 4 bytes of the tail.
+ */
+ u32 checksum;
+} __packed;
+
+/*
+ * The size of the array of channels. Array indexes are u8.
+ * Note: the value must not be changed.
+ */
+#define HGSMI_NUMBER_OF_CHANNELS 0x100
+
+#endif
diff --git a/drivers/gpu/drm/vboxvideo/modesetting.c b/drivers/gpu/drm/vboxvideo/modesetting.c
new file mode 100644
index 000000000000..7580b9002379
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/modesetting.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: MIT
+/* Copyright (C) 2006-2017 Oracle Corporation */
+
+#include <linux/vbox_err.h>
+#include "vbox_drv.h"
+#include "vboxvideo_guest.h"
+#include "vboxvideo_vbe.h"
+#include "hgsmi_channels.h"
+
+/**
+ * Set a video mode via an HGSMI request. The views must have been
+ * initialised first using @a VBoxHGSMISendViewInfo and if the mode is being
+ * set on the first display then it must be set first using registers.
+ * @ctx: The context containing the heap to use.
+ * @display: The screen number.
+ * @origin_x: The horizontal displacement relative to the first scrn.
+ * @origin_y: The vertical displacement relative to the first screen.
+ * @start_offset: The offset of the visible area of the framebuffer
+ * relative to the framebuffer start.
+ * @pitch: The offset in bytes between the starts of two adjecent
+ * scan lines in video RAM.
+ * @width: The mode width.
+ * @height: The mode height.
+ * @bpp: The colour depth of the mode.
+ * @flags: Flags.
+ */
+void hgsmi_process_display_info(struct gen_pool *ctx, u32 display,
+ s32 origin_x, s32 origin_y, u32 start_offset,
+ u32 pitch, u32 width, u32 height,
+ u16 bpp, u16 flags)
+{
+ struct vbva_infoscreen *p;
+
+ p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
+ VBVA_INFO_SCREEN);
+ if (!p)
+ return;
+
+ p->view_index = display;
+ p->origin_x = origin_x;
+ p->origin_y = origin_y;
+ p->start_offset = start_offset;
+ p->line_size = pitch;
+ p->width = width;
+ p->height = height;
+ p->bits_per_pixel = bpp;
+ p->flags = flags;
+
+ hgsmi_buffer_submit(ctx, p);
+ hgsmi_buffer_free(ctx, p);
+}
+
+/**
+ * Report the rectangle relative to which absolute pointer events should be
+ * expressed. This information remains valid until the next VBVA resize event
+ * for any screen, at which time it is reset to the bounding rectangle of all
+ * virtual screens.
+ * Return: 0 or negative errno value.
+ * @ctx: The context containing the heap to use.
+ * @origin_x: Upper left X co-ordinate relative to the first screen.
+ * @origin_y: Upper left Y co-ordinate relative to the first screen.
+ * @width: Rectangle width.
+ * @height: Rectangle height.
+ */
+int hgsmi_update_input_mapping(struct gen_pool *ctx, s32 origin_x, s32 origin_y,
+ u32 width, u32 height)
+{
+ struct vbva_report_input_mapping *p;
+
+ p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA,
+ VBVA_REPORT_INPUT_MAPPING);
+ if (!p)
+ return -ENOMEM;
+
+ p->x = origin_x;
+ p->y = origin_y;
+ p->cx = width;
+ p->cy = height;
+
+ hgsmi_buffer_submit(ctx, p);
+ hgsmi_buffer_free(ctx, p);
+
+ return 0;
+}
+
+/**
+ * Get most recent video mode hints.
+ * Return: 0 or negative errno value.
+ * @ctx: The context containing the heap to use.
+ * @screens: The number of screens to query hints for, starting at 0.
+ * @hints: Array of vbva_modehint structures for receiving the hints.
+ */
+int hgsmi_get_mode_hints(struct gen_pool *ctx, unsigned int screens,
+ struct vbva_modehint *hints)
+{
+ struct vbva_query_mode_hints *p;
+ size_t size;
+
+ if (WARN_ON(!hints))
+ return -EINVAL;
+
+ size = screens * sizeof(struct vbva_modehint);
+ p = hgsmi_buffer_alloc(ctx, sizeof(*p) + size, HGSMI_CH_VBVA,
+ VBVA_QUERY_MODE_HINTS);
+ if (!p)
+ return -ENOMEM;
+
+ p->hints_queried_count = screens;
+ p->hint_structure_guest_size = sizeof(struct vbva_modehint);
+ p->rc = VERR_NOT_SUPPORTED;
+
+ hgsmi_buffer_submit(ctx, p);
+
+ if (p->rc < 0) {
+ hgsmi_buffer_free(ctx, p);
+ return -EIO;
+ }
+
+ memcpy(hints, ((u8 *)p) + sizeof(struct vbva_query_mode_hints), size);
+ hgsmi_buffer_free(ctx, p);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
new file mode 100644
index 000000000000..fb6a0f0b8167
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2013-2017 Oracle Corporation
+ * This file is based on ast_drv.c
+ * Copyright 2012 Red Hat Inc.
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Michael Thayer <michael.thayer@oracle.com,
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/vt_kern.h>
+
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_file.h>
+#include <drm/drm_ioctl.h>
+
+#include "vbox_drv.h"
+
+static int vbox_modeset = -1;
+
+MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
+module_param_named(modeset, vbox_modeset, int, 0400);
+
+static struct drm_driver driver;
+
+static const struct pci_device_id pciidlist[] = {
+ { PCI_DEVICE(0x80ee, 0xbeef) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+static struct drm_fb_helper_funcs vbox_fb_helper_funcs = {
+ .fb_probe = vboxfb_create,
+};
+
+static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct vbox_private *vbox;
+ int ret = 0;
+
+ if (!vbox_check_supported(VBE_DISPI_ID_HGSMI))
+ return -ENODEV;
+
+ vbox = kzalloc(sizeof(*vbox), GFP_KERNEL);
+ if (!vbox)
+ return -ENOMEM;
+
+ ret = drm_dev_init(&vbox->ddev, &driver, &pdev->dev);
+ if (ret) {
+ kfree(vbox);
+ return ret;
+ }
+
+ vbox->ddev.pdev = pdev;
+ vbox->ddev.dev_private = vbox;
+ pci_set_drvdata(pdev, vbox);
+ mutex_init(&vbox->hw_mutex);
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto err_dev_put;
+
+ ret = vbox_hw_init(vbox);
+ if (ret)
+ goto err_pci_disable;
+
+ ret = vbox_mm_init(vbox);
+ if (ret)
+ goto err_hw_fini;
+
+ ret = vbox_mode_init(vbox);
+ if (ret)
+ goto err_mm_fini;
+
+ ret = vbox_irq_init(vbox);
+ if (ret)
+ goto err_mode_fini;
+
+ ret = drm_fb_helper_fbdev_setup(&vbox->ddev, &vbox->fb_helper,
+ &vbox_fb_helper_funcs, 32,
+ vbox->num_crtcs);
+ if (ret)
+ goto err_irq_fini;
+
+ ret = drm_dev_register(&vbox->ddev, 0);
+ if (ret)
+ goto err_fbdev_fini;
+
+ return 0;
+
+err_fbdev_fini:
+ vbox_fbdev_fini(vbox);
+err_irq_fini:
+ vbox_irq_fini(vbox);
+err_mode_fini:
+ vbox_mode_fini(vbox);
+err_mm_fini:
+ vbox_mm_fini(vbox);
+err_hw_fini:
+ vbox_hw_fini(vbox);
+err_pci_disable:
+ pci_disable_device(pdev);
+err_dev_put:
+ drm_dev_put(&vbox->ddev);
+ return ret;
+}
+
+static void vbox_pci_remove(struct pci_dev *pdev)
+{
+ struct vbox_private *vbox = pci_get_drvdata(pdev);
+
+ drm_dev_unregister(&vbox->ddev);
+ vbox_fbdev_fini(vbox);
+ vbox_irq_fini(vbox);
+ vbox_mode_fini(vbox);
+ vbox_mm_fini(vbox);
+ vbox_hw_fini(vbox);
+ drm_dev_put(&vbox->ddev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int vbox_pm_suspend(struct device *dev)
+{
+ struct vbox_private *vbox = dev_get_drvdata(dev);
+ int error;
+
+ error = drm_mode_config_helper_suspend(&vbox->ddev);
+ if (error)
+ return error;
+
+ pci_save_state(vbox->ddev.pdev);
+ pci_disable_device(vbox->ddev.pdev);
+ pci_set_power_state(vbox->ddev.pdev, PCI_D3hot);
+
+ return 0;
+}
+
+static int vbox_pm_resume(struct device *dev)
+{
+ struct vbox_private *vbox = dev_get_drvdata(dev);
+
+ if (pci_enable_device(vbox->ddev.pdev))
+ return -EIO;
+
+ return drm_mode_config_helper_resume(&vbox->ddev);
+}
+
+static int vbox_pm_freeze(struct device *dev)
+{
+ struct vbox_private *vbox = dev_get_drvdata(dev);
+
+ return drm_mode_config_helper_suspend(&vbox->ddev);
+}
+
+static int vbox_pm_thaw(struct device *dev)
+{
+ struct vbox_private *vbox = dev_get_drvdata(dev);
+
+ return drm_mode_config_helper_resume(&vbox->ddev);
+}
+
+static int vbox_pm_poweroff(struct device *dev)
+{
+ struct vbox_private *vbox = dev_get_drvdata(dev);
+
+ return drm_mode_config_helper_suspend(&vbox->ddev);
+}
+
+static const struct dev_pm_ops vbox_pm_ops = {
+ .suspend = vbox_pm_suspend,
+ .resume = vbox_pm_resume,
+ .freeze = vbox_pm_freeze,
+ .thaw = vbox_pm_thaw,
+ .poweroff = vbox_pm_poweroff,
+ .restore = vbox_pm_resume,
+};
+#endif
+
+static struct pci_driver vbox_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = vbox_pci_probe,
+ .remove = vbox_pci_remove,
+#ifdef CONFIG_PM_SLEEP
+ .driver.pm = &vbox_pm_ops,
+#endif
+};
+
+static const struct file_operations vbox_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .compat_ioctl = drm_compat_ioctl,
+ .mmap = vbox_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+};
+
+static struct drm_driver driver = {
+ .driver_features =
+ DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
+
+ .lastclose = drm_fb_helper_lastclose,
+
+ .fops = &vbox_fops,
+ .irq_handler = vbox_irq_handler,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+
+ .gem_free_object_unlocked = vbox_gem_free_object,
+ .dumb_create = vbox_dumb_create,
+ .dumb_map_offset = vbox_dumb_mmap_offset,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_pin = vbox_gem_prime_pin,
+ .gem_prime_unpin = vbox_gem_prime_unpin,
+ .gem_prime_get_sg_table = vbox_gem_prime_get_sg_table,
+ .gem_prime_import_sg_table = vbox_gem_prime_import_sg_table,
+ .gem_prime_vmap = vbox_gem_prime_vmap,
+ .gem_prime_vunmap = vbox_gem_prime_vunmap,
+ .gem_prime_mmap = vbox_gem_prime_mmap,
+};
+
+static int __init vbox_init(void)
+{
+#ifdef CONFIG_VGA_CONSOLE
+ if (vgacon_text_force() && vbox_modeset == -1)
+ return -EINVAL;
+#endif
+
+ if (vbox_modeset == 0)
+ return -EINVAL;
+
+ return pci_register_driver(&vbox_pci_driver);
+}
+
+static void __exit vbox_exit(void)
+{
+ pci_unregister_driver(&vbox_pci_driver);
+}
+
+module_init(vbox_init);
+module_exit(vbox_exit);
+
+MODULE_AUTHOR("Oracle Corporation");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.h b/drivers/gpu/drm/vboxvideo/vbox_drv.h
new file mode 100644
index 000000000000..ece31f395540
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.h
@@ -0,0 +1,271 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2013-2017 Oracle Corporation
+ * This file is based on ast_drv.h
+ * Copyright 2012 Red Hat Inc.
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Michael Thayer <michael.thayer@oracle.com,
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+#ifndef __VBOX_DRV_H__
+#define __VBOX_DRV_H__
+
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/irqreturn.h>
+#include <linux/string.h>
+
+#include <drm/drm_encoder.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem.h>
+
+#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_memory.h>
+#include <drm/ttm/ttm_module.h>
+
+#include "vboxvideo_guest.h"
+#include "vboxvideo_vbe.h"
+#include "hgsmi_ch_setup.h"
+
+#define DRIVER_NAME "vboxvideo"
+#define DRIVER_DESC "Oracle VM VirtualBox Graphics Card"
+#define DRIVER_DATE "20130823"
+
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+#define VBOX_MAX_CURSOR_WIDTH 64
+#define VBOX_MAX_CURSOR_HEIGHT 64
+#define CURSOR_PIXEL_COUNT (VBOX_MAX_CURSOR_WIDTH * VBOX_MAX_CURSOR_HEIGHT)
+#define CURSOR_DATA_SIZE (CURSOR_PIXEL_COUNT * 4 + CURSOR_PIXEL_COUNT / 8)
+
+#define VBOX_MAX_SCREENS 32
+
+#define GUEST_HEAP_OFFSET(vbox) ((vbox)->full_vram_size - \
+ VBVA_ADAPTER_INFORMATION_SIZE)
+#define GUEST_HEAP_SIZE VBVA_ADAPTER_INFORMATION_SIZE
+#define GUEST_HEAP_USABLE_SIZE (VBVA_ADAPTER_INFORMATION_SIZE - \
+ sizeof(struct hgsmi_host_flags))
+#define HOST_FLAGS_OFFSET GUEST_HEAP_USABLE_SIZE
+
+struct vbox_framebuffer {
+ struct drm_framebuffer base;
+ struct drm_gem_object *obj;
+};
+
+struct vbox_private {
+ /* Must be first; or we must define our own release callback */
+ struct drm_device ddev;
+ struct drm_fb_helper fb_helper;
+ struct vbox_framebuffer afb;
+
+ u8 __iomem *guest_heap;
+ u8 __iomem *vbva_buffers;
+ struct gen_pool *guest_pool;
+ struct vbva_buf_ctx *vbva_info;
+ bool any_pitch;
+ u32 num_crtcs;
+ /* Amount of available VRAM, including space used for buffers. */
+ u32 full_vram_size;
+ /* Amount of available VRAM, not including space used for buffers. */
+ u32 available_vram_size;
+ /* Array of structures for receiving mode hints. */
+ struct vbva_modehint *last_mode_hints;
+
+ int fb_mtrr;
+
+ struct {
+ struct ttm_bo_device bdev;
+ } ttm;
+
+ struct mutex hw_mutex; /* protects modeset and accel/vbva accesses */
+ struct work_struct hotplug_work;
+ u32 input_mapping_width;
+ u32 input_mapping_height;
+ /*
+ * Is user-space using an X.Org-style layout of one large frame-buffer
+ * encompassing all screen ones or is the fbdev console active?
+ */
+ bool single_framebuffer;
+ u8 cursor_data[CURSOR_DATA_SIZE];
+};
+
+#undef CURSOR_PIXEL_COUNT
+#undef CURSOR_DATA_SIZE
+
+struct vbox_gem_object;
+
+struct vbox_connector {
+ struct drm_connector base;
+ char name[32];
+ struct vbox_crtc *vbox_crtc;
+ struct {
+ u32 width;
+ u32 height;
+ bool disconnected;
+ } mode_hint;
+};
+
+struct vbox_crtc {
+ struct drm_crtc base;
+ bool disconnected;
+ unsigned int crtc_id;
+ u32 fb_offset;
+ bool cursor_enabled;
+ u32 x_hint;
+ u32 y_hint;
+ /*
+ * When setting a mode we not only pass the mode to the hypervisor,
+ * but also information on how to map / translate input coordinates
+ * for the emulated USB tablet. This input-mapping may change when
+ * the mode on *another* crtc changes.
+ *
+ * This means that sometimes we must do a modeset on other crtc-s then
+ * the one being changed to update the input-mapping. Including crtc-s
+ * which may be disabled inside the guest (shown as a black window
+ * on the host unless closed by the user).
+ *
+ * With atomic modesetting the mode-info of disabled crtcs gets zeroed
+ * yet we need it when updating the input-map to avoid resizing the
+ * window as a side effect of a mode_set on another crtc. Therefor we
+ * cache the info of the last mode below.
+ */
+ u32 width;
+ u32 height;
+ u32 x;
+ u32 y;
+};
+
+struct vbox_encoder {
+ struct drm_encoder base;
+};
+
+#define to_vbox_crtc(x) container_of(x, struct vbox_crtc, base)
+#define to_vbox_connector(x) container_of(x, struct vbox_connector, base)
+#define to_vbox_encoder(x) container_of(x, struct vbox_encoder, base)
+#define to_vbox_framebuffer(x) container_of(x, struct vbox_framebuffer, base)
+
+bool vbox_check_supported(u16 id);
+int vbox_hw_init(struct vbox_private *vbox);
+void vbox_hw_fini(struct vbox_private *vbox);
+
+int vbox_mode_init(struct vbox_private *vbox);
+void vbox_mode_fini(struct vbox_private *vbox);
+
+void vbox_report_caps(struct vbox_private *vbox);
+
+void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
+ struct drm_clip_rect *rects,
+ unsigned int num_rects);
+
+int vbox_framebuffer_init(struct vbox_private *vbox,
+ struct vbox_framebuffer *vbox_fb,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+
+int vboxfb_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes);
+void vbox_fbdev_fini(struct vbox_private *vbox);
+
+struct vbox_bo {
+ struct ttm_buffer_object bo;
+ struct ttm_placement placement;
+ struct ttm_bo_kmap_obj kmap;
+ struct drm_gem_object gem;
+ struct ttm_place placements[3];
+ int pin_count;
+};
+
+#define gem_to_vbox_bo(gobj) container_of((gobj), struct vbox_bo, gem)
+
+static inline struct vbox_bo *vbox_bo(struct ttm_buffer_object *bo)
+{
+ return container_of(bo, struct vbox_bo, bo);
+}
+
+#define to_vbox_obj(x) container_of(x, struct vbox_gem_object, base)
+
+static inline u64 vbox_bo_gpu_offset(struct vbox_bo *bo)
+{
+ return bo->bo.offset;
+}
+
+int vbox_dumb_create(struct drm_file *file,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+
+void vbox_gem_free_object(struct drm_gem_object *obj);
+int vbox_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ u32 handle, u64 *offset);
+
+int vbox_mm_init(struct vbox_private *vbox);
+void vbox_mm_fini(struct vbox_private *vbox);
+
+int vbox_bo_create(struct vbox_private *vbox, int size, int align,
+ u32 flags, struct vbox_bo **pvboxbo);
+
+int vbox_gem_create(struct vbox_private *vbox,
+ u32 size, bool iskernel, struct drm_gem_object **obj);
+
+int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag);
+int vbox_bo_unpin(struct vbox_bo *bo);
+
+static inline int vbox_bo_reserve(struct vbox_bo *bo, bool no_wait)
+{
+ int ret;
+
+ ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL);
+ if (ret) {
+ if (ret != -ERESTARTSYS && ret != -EBUSY)
+ DRM_ERROR("reserve failed %p\n", bo);
+ return ret;
+ }
+ return 0;
+}
+
+static inline void vbox_bo_unreserve(struct vbox_bo *bo)
+{
+ ttm_bo_unreserve(&bo->bo);
+}
+
+void vbox_ttm_placement(struct vbox_bo *bo, int domain);
+int vbox_bo_push_sysram(struct vbox_bo *bo);
+int vbox_mmap(struct file *filp, struct vm_area_struct *vma);
+void *vbox_bo_kmap(struct vbox_bo *bo);
+void vbox_bo_kunmap(struct vbox_bo *bo);
+
+/* vbox_prime.c */
+int vbox_gem_prime_pin(struct drm_gem_object *obj);
+void vbox_gem_prime_unpin(struct drm_gem_object *obj);
+struct sg_table *vbox_gem_prime_get_sg_table(struct drm_gem_object *obj);
+struct drm_gem_object *vbox_gem_prime_import_sg_table(
+ struct drm_device *dev, struct dma_buf_attachment *attach,
+ struct sg_table *table);
+void *vbox_gem_prime_vmap(struct drm_gem_object *obj);
+void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
+int vbox_gem_prime_mmap(struct drm_gem_object *obj,
+ struct vm_area_struct *area);
+
+/* vbox_irq.c */
+int vbox_irq_init(struct vbox_private *vbox);
+void vbox_irq_fini(struct vbox_private *vbox);
+void vbox_report_hotplug(struct vbox_private *vbox);
+irqreturn_t vbox_irq_handler(int irq, void *arg);
+
+/* vbox_hgsmi.c */
+void *hgsmi_buffer_alloc(struct gen_pool *guest_pool, size_t size,
+ u8 channel, u16 channel_info);
+void hgsmi_buffer_free(struct gen_pool *guest_pool, void *buf);
+int hgsmi_buffer_submit(struct gen_pool *guest_pool, void *buf);
+
+static inline void vbox_write_ioport(u16 index, u16 data)
+{
+ outw(index, VBE_DISPI_IOPORT_INDEX);
+ outw(data, VBE_DISPI_IOPORT_DATA);
+}
+
+#endif
diff --git a/drivers/gpu/drm/vboxvideo/vbox_fb.c b/drivers/gpu/drm/vboxvideo/vbox_fb.c
new file mode 100644
index 000000000000..b724fe7c0c30
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/vbox_fb.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2013-2017 Oracle Corporation
+ * This file is based on ast_fb.c
+ * Copyright 2012 Red Hat Inc.
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Michael Thayer <michael.thayer@oracle.com,
+ */
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/string.h>
+#include <linux/sysrq.h>
+#include <linux/tty.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_fourcc.h>
+
+#include "vbox_drv.h"
+#include "vboxvideo.h"
+
+#ifdef CONFIG_DRM_KMS_FB_HELPER
+static struct fb_deferred_io vbox_defio = {
+ .delay = HZ / 30,
+ .deferred_io = drm_fb_helper_deferred_io,
+};
+#endif
+
+static struct fb_ops vboxfb_ops = {
+ .owner = THIS_MODULE,
+ DRM_FB_HELPER_DEFAULT_OPS,
+ .fb_fillrect = drm_fb_helper_sys_fillrect,
+ .fb_copyarea = drm_fb_helper_sys_copyarea,
+ .fb_imageblit = drm_fb_helper_sys_imageblit,
+};
+
+int vboxfb_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct vbox_private *vbox =
+ container_of(helper, struct vbox_private, fb_helper);
+ struct pci_dev *pdev = vbox->ddev.pdev;
+ struct drm_mode_fb_cmd2 mode_cmd;
+ struct drm_framebuffer *fb;
+ struct fb_info *info;
+ struct drm_gem_object *gobj;
+ struct vbox_bo *bo;
+ int size, ret;
+ u64 gpu_addr;
+ u32 pitch;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+ pitch = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+ mode_cmd.pitches[0] = pitch;
+
+ size = pitch * mode_cmd.height;
+
+ ret = vbox_gem_create(vbox, size, true, &gobj);
+ if (ret) {
+ DRM_ERROR("failed to create fbcon backing object %d\n", ret);
+ return ret;
+ }
+
+ ret = vbox_framebuffer_init(vbox, &vbox->afb, &mode_cmd, gobj);
+ if (ret)
+ return ret;
+
+ bo = gem_to_vbox_bo(gobj);
+
+ ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM);
+ if (ret)
+ return ret;
+
+ info = drm_fb_helper_alloc_fbi(helper);
+ if (IS_ERR(info))
+ return PTR_ERR(info);
+
+ info->screen_size = size;
+ info->screen_base = (char __iomem *)vbox_bo_kmap(bo);
+ if (IS_ERR(info->screen_base))
+ return PTR_ERR(info->screen_base);
+
+ fb = &vbox->afb.base;
+ helper->fb = fb;
+
+ info->fbops = &vboxfb_ops;
+
+ /*
+ * This seems to be done for safety checking that the framebuffer
+ * is not registered twice by different drivers.
+ */
+ info->apertures->ranges[0].base = pci_resource_start(pdev, 0);
+ info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
+
+ drm_fb_helper_fill_info(info, helper, sizes);
+
+ gpu_addr = vbox_bo_gpu_offset(bo);
+ info->fix.smem_start = info->apertures->ranges[0].base + gpu_addr;
+ info->fix.smem_len = vbox->available_vram_size - gpu_addr;
+
+#ifdef CONFIG_DRM_KMS_FB_HELPER
+ info->fbdefio = &vbox_defio;
+ fb_deferred_io_init(info);
+#endif
+
+ info->pixmap.flags = FB_PIXMAP_SYSTEM;
+
+ DRM_DEBUG_KMS("allocated %dx%d\n", fb->width, fb->height);
+
+ return 0;
+}
+
+void vbox_fbdev_fini(struct vbox_private *vbox)
+{
+ struct vbox_framebuffer *afb = &vbox->afb;
+
+#ifdef CONFIG_DRM_KMS_FB_HELPER
+ if (vbox->fb_helper.fbdev && vbox->fb_helper.fbdev->fbdefio)
+ fb_deferred_io_cleanup(vbox->fb_helper.fbdev);
+#endif
+
+ drm_fb_helper_unregister_fbi(&vbox->fb_helper);
+
+ if (afb->obj) {
+ struct vbox_bo *bo = gem_to_vbox_bo(afb->obj);
+
+ vbox_bo_kunmap(bo);
+
+ if (bo->pin_count)
+ vbox_bo_unpin(bo);
+
+ drm_gem_object_put_unlocked(afb->obj);
+ afb->obj = NULL;
+ }
+ drm_fb_helper_fini(&vbox->fb_helper);
+
+ drm_framebuffer_unregister_private(&afb->base);
+ drm_framebuffer_cleanup(&afb->base);
+}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_hgsmi.c b/drivers/gpu/drm/vboxvideo/vbox_hgsmi.c
new file mode 100644
index 000000000000..94b60654a012
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/vbox_hgsmi.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2017 Oracle Corporation
+ * Authors: Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include "vbox_drv.h"
+#include "vboxvideo_vbe.h"
+#include "hgsmi_defs.h"
+
+/* One-at-a-Time Hash from http://www.burtleburtle.net/bob/hash/doobs.html */
+static u32 hgsmi_hash_process(u32 hash, const u8 *data, int size)
+{
+ while (size--) {
+ hash += *data++;
+ hash += (hash << 10);
+ hash ^= (hash >> 6);
+ }
+
+ return hash;
+}
+
+static u32 hgsmi_hash_end(u32 hash)
+{
+ hash += (hash << 3);
+ hash ^= (hash >> 11);
+ hash += (hash << 15);
+
+ return hash;
+}
+
+/* Not really a checksum but that is the naming used in all vbox code */
+static u32 hgsmi_checksum(u32 offset,
+ const struct hgsmi_buffer_header *header,
+ const struct hgsmi_buffer_tail *tail)
+{
+ u32 checksum;
+
+ checksum = hgsmi_hash_process(0, (u8 *)&offset, sizeof(offset));
+ checksum = hgsmi_hash_process(checksum, (u8 *)header, sizeof(*header));
+ /* 4 -> Do not checksum the checksum itself */
+ checksum = hgsmi_hash_process(checksum, (u8 *)tail, 4);
+
+ return hgsmi_hash_end(checksum);
+}
+
+void *hgsmi_buffer_alloc(struct gen_pool *guest_pool, size_t size,
+ u8 channel, u16 channel_info)
+{
+ struct hgsmi_buffer_header *h;
+ struct hgsmi_buffer_tail *t;
+ size_t total_size;
+ dma_addr_t offset;
+
+ total_size = size + sizeof(*h) + sizeof(*t);
+ h = gen_pool_dma_alloc(guest_pool, total_size, &offset);
+ if (!h)
+ return NULL;
+
+ t = (struct hgsmi_buffer_tail *)((u8 *)h + sizeof(*h) + size);
+
+ h->flags = HGSMI_BUFFER_HEADER_F_SEQ_SINGLE;
+ h->data_size = size;
+ h->channel = channel;
+ h->channel_info = channel_info;
+ memset(&h->u.header_data, 0, sizeof(h->u.header_data));
+
+ t->reserved = 0;
+ t->checksum = hgsmi_checksum(offset, h, t);
+
+ return (u8 *)h + sizeof(*h);
+}
+
+void hgsmi_buffer_free(struct gen_pool *guest_pool, void *buf)
+{
+ struct hgsmi_buffer_header *h =
+ (struct hgsmi_buffer_header *)((u8 *)buf - sizeof(*h));
+ size_t total_size = h->data_size + sizeof(*h) +
+ sizeof(struct hgsmi_buffer_tail);
+
+ gen_pool_free(guest_pool, (unsigned long)h, total_size);
+}
+
+int hgsmi_buffer_submit(struct gen_pool *guest_pool, void *buf)
+{
+ phys_addr_t offset;
+
+ offset = gen_pool_virt_to_phys(guest_pool, (unsigned long)buf -
+ sizeof(struct hgsmi_buffer_header));
+ outl(offset, VGA_PORT_HGSMI_GUEST);
+ /* Make the compiler aware that the host has changed memory. */
+ mb();
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_irq.c b/drivers/gpu/drm/vboxvideo/vbox_irq.c
new file mode 100644
index 000000000000..16a1e29f5292
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/vbox_irq.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2016-2017 Oracle Corporation
+ * This file is based on qxl_irq.c
+ * Copyright 2013 Red Hat Inc.
+ * Authors: Dave Airlie
+ * Alon Levy
+ * Michael Thayer <michael.thayer@oracle.com,
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/pci.h>
+#include <drm/drm_irq.h>
+#include <drm/drm_probe_helper.h>
+
+#include "vbox_drv.h"
+#include "vboxvideo.h"
+
+static void vbox_clear_irq(void)
+{
+ outl((u32)~0, VGA_PORT_HGSMI_HOST);
+}
+
+static u32 vbox_get_flags(struct vbox_private *vbox)
+{
+ return readl(vbox->guest_heap + HOST_FLAGS_OFFSET);
+}
+
+void vbox_report_hotplug(struct vbox_private *vbox)
+{
+ schedule_work(&vbox->hotplug_work);
+}
+
+irqreturn_t vbox_irq_handler(int irq, void *arg)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ struct vbox_private *vbox = (struct vbox_private *)dev->dev_private;
+ u32 host_flags = vbox_get_flags(vbox);
+
+ if (!(host_flags & HGSMIHOSTFLAGS_IRQ))
+ return IRQ_NONE;
+
+ /*
+ * Due to a bug in the initial host implementation of hot-plug irqs,
+ * the hot-plug and cursor capability flags were never cleared.
+ * Fortunately we can tell when they would have been set by checking
+ * that the VSYNC flag is not set.
+ */
+ if (host_flags &
+ (HGSMIHOSTFLAGS_HOTPLUG | HGSMIHOSTFLAGS_CURSOR_CAPABILITIES) &&
+ !(host_flags & HGSMIHOSTFLAGS_VSYNC))
+ vbox_report_hotplug(vbox);
+
+ vbox_clear_irq();
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Check that the position hints provided by the host are suitable for GNOME
+ * shell (i.e. all screens disjoint and hints for all enabled screens) and if
+ * not replace them with default ones. Providing valid hints improves the
+ * chances that we will get a known screen layout for pointer mapping.
+ */
+static void validate_or_set_position_hints(struct vbox_private *vbox)
+{
+ struct vbva_modehint *hintsi, *hintsj;
+ bool valid = true;
+ u16 currentx = 0;
+ int i, j;
+
+ for (i = 0; i < vbox->num_crtcs; ++i) {
+ for (j = 0; j < i; ++j) {
+ hintsi = &vbox->last_mode_hints[i];
+ hintsj = &vbox->last_mode_hints[j];
+
+ if (hintsi->enabled && hintsj->enabled) {
+ if (hintsi->dx >= 0xffff ||
+ hintsi->dy >= 0xffff ||
+ hintsj->dx >= 0xffff ||
+ hintsj->dy >= 0xffff ||
+ (hintsi->dx <
+ hintsj->dx + (hintsj->cx & 0x8fff) &&
+ hintsi->dx + (hintsi->cx & 0x8fff) >
+ hintsj->dx) ||
+ (hintsi->dy <
+ hintsj->dy + (hintsj->cy & 0x8fff) &&
+ hintsi->dy + (hintsi->cy & 0x8fff) >
+ hintsj->dy))
+ valid = false;
+ }
+ }
+ }
+ if (!valid)
+ for (i = 0; i < vbox->num_crtcs; ++i) {
+ if (vbox->last_mode_hints[i].enabled) {
+ vbox->last_mode_hints[i].dx = currentx;
+ vbox->last_mode_hints[i].dy = 0;
+ currentx +=
+ vbox->last_mode_hints[i].cx & 0x8fff;
+ }
+ }
+}
+
+/* Query the host for the most recent video mode hints. */
+static void vbox_update_mode_hints(struct vbox_private *vbox)
+{
+ struct drm_connector_list_iter conn_iter;
+ struct drm_device *dev = &vbox->ddev;
+ struct drm_connector *connector;
+ struct vbox_connector *vbox_conn;
+ struct vbva_modehint *hints;
+ u16 flags;
+ bool disconnected;
+ unsigned int crtc_id;
+ int ret;
+
+ ret = hgsmi_get_mode_hints(vbox->guest_pool, vbox->num_crtcs,
+ vbox->last_mode_hints);
+ if (ret) {
+ DRM_ERROR("vboxvideo: hgsmi_get_mode_hints failed: %d\n", ret);
+ return;
+ }
+
+ validate_or_set_position_hints(vbox);
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ vbox_conn = to_vbox_connector(connector);
+
+ hints = &vbox->last_mode_hints[vbox_conn->vbox_crtc->crtc_id];
+ if (hints->magic != VBVAMODEHINT_MAGIC)
+ continue;
+
+ disconnected = !(hints->enabled);
+ crtc_id = vbox_conn->vbox_crtc->crtc_id;
+ vbox_conn->mode_hint.width = hints->cx;
+ vbox_conn->mode_hint.height = hints->cy;
+ vbox_conn->vbox_crtc->x_hint = hints->dx;
+ vbox_conn->vbox_crtc->y_hint = hints->dy;
+ vbox_conn->mode_hint.disconnected = disconnected;
+
+ if (vbox_conn->vbox_crtc->disconnected == disconnected)
+ continue;
+
+ if (disconnected)
+ flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_DISABLED;
+ else
+ flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_BLANK;
+
+ hgsmi_process_display_info(vbox->guest_pool, crtc_id, 0, 0, 0,
+ hints->cx * 4, hints->cx,
+ hints->cy, 0, flags);
+
+ vbox_conn->vbox_crtc->disconnected = disconnected;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+}
+
+static void vbox_hotplug_worker(struct work_struct *work)
+{
+ struct vbox_private *vbox = container_of(work, struct vbox_private,
+ hotplug_work);
+
+ vbox_update_mode_hints(vbox);
+ drm_kms_helper_hotplug_event(&vbox->ddev);
+}
+
+int vbox_irq_init(struct vbox_private *vbox)
+{
+ INIT_WORK(&vbox->hotplug_work, vbox_hotplug_worker);
+ vbox_update_mode_hints(vbox);
+
+ return drm_irq_install(&vbox->ddev, vbox->ddev.pdev->irq);
+}
+
+void vbox_irq_fini(struct vbox_private *vbox)
+{
+ drm_irq_uninstall(&vbox->ddev);
+ flush_work(&vbox->hotplug_work);
+}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c
new file mode 100644
index 000000000000..f4d02de5518a
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/vbox_main.c
@@ -0,0 +1,361 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2013-2017 Oracle Corporation
+ * This file is based on ast_main.c
+ * Copyright 2012 Red Hat Inc.
+ * Authors: Dave Airlie <airlied@redhat.com>,
+ * Michael Thayer <michael.thayer@oracle.com,
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+
+#include <linux/vbox_err.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "vbox_drv.h"
+#include "vboxvideo_guest.h"
+#include "vboxvideo_vbe.h"
+
+static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb);
+
+ if (vbox_fb->obj)
+ drm_gem_object_put_unlocked(vbox_fb->obj);
+
+ drm_framebuffer_cleanup(fb);
+ kfree(fb);
+}
+
+void vbox_report_caps(struct vbox_private *vbox)
+{
+ u32 caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION |
+ VBVACAPS_IRQ | VBVACAPS_USE_VBVA_ONLY;
+
+ /* The host only accepts VIDEO_MODE_HINTS if it is send separately. */
+ hgsmi_send_caps_info(vbox->guest_pool, caps);
+ caps |= VBVACAPS_VIDEO_MODE_HINTS;
+ hgsmi_send_caps_info(vbox->guest_pool, caps);
+}
+
+/* Send information about dirty rectangles to VBVA. */
+void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
+ struct drm_clip_rect *rects,
+ unsigned int num_rects)
+{
+ struct vbox_private *vbox = fb->dev->dev_private;
+ struct drm_display_mode *mode;
+ struct drm_crtc *crtc;
+ int crtc_x, crtc_y;
+ unsigned int i;
+
+ mutex_lock(&vbox->hw_mutex);
+ list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) {
+ if (crtc->primary->state->fb != fb)
+ continue;
+
+ mode = &crtc->state->mode;
+ crtc_x = crtc->primary->state->src_x >> 16;
+ crtc_y = crtc->primary->state->src_y >> 16;
+
+ for (i = 0; i < num_rects; ++i) {
+ struct vbva_cmd_hdr cmd_hdr;
+ unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id;
+
+ if (rects[i].x1 > crtc_x + mode->hdisplay ||
+ rects[i].y1 > crtc_y + mode->vdisplay ||
+ rects[i].x2 < crtc_x ||
+ rects[i].y2 < crtc_y)
+ continue;
+
+ cmd_hdr.x = (s16)rects[i].x1;
+ cmd_hdr.y = (s16)rects[i].y1;
+ cmd_hdr.w = (u16)rects[i].x2 - rects[i].x1;
+ cmd_hdr.h = (u16)rects[i].y2 - rects[i].y1;
+
+ if (!vbva_buffer_begin_update(&vbox->vbva_info[crtc_id],
+ vbox->guest_pool))
+ continue;
+
+ vbva_write(&vbox->vbva_info[crtc_id], vbox->guest_pool,
+ &cmd_hdr, sizeof(cmd_hdr));
+ vbva_buffer_end_update(&vbox->vbva_info[crtc_id]);
+ }
+ }
+ mutex_unlock(&vbox->hw_mutex);
+}
+
+static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int flags, unsigned int color,
+ struct drm_clip_rect *rects,
+ unsigned int num_rects)
+{
+ vbox_framebuffer_dirty_rectangles(fb, rects, num_rects);
+
+ return 0;
+}
+
+static const struct drm_framebuffer_funcs vbox_fb_funcs = {
+ .destroy = vbox_user_framebuffer_destroy,
+ .dirty = vbox_user_framebuffer_dirty,
+};
+
+int vbox_framebuffer_init(struct vbox_private *vbox,
+ struct vbox_framebuffer *vbox_fb,
+ const struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
+{
+ int ret;
+
+ drm_helper_mode_fill_fb_struct(&vbox->ddev, &vbox_fb->base, mode_cmd);
+ vbox_fb->obj = obj;
+ ret = drm_framebuffer_init(&vbox->ddev, &vbox_fb->base, &vbox_fb_funcs);
+ if (ret) {
+ DRM_ERROR("framebuffer init failed %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vbox_accel_init(struct vbox_private *vbox)
+{
+ struct vbva_buffer *vbva;
+ unsigned int i;
+
+ vbox->vbva_info = devm_kcalloc(vbox->ddev.dev, vbox->num_crtcs,
+ sizeof(*vbox->vbva_info), GFP_KERNEL);
+ if (!vbox->vbva_info)
+ return -ENOMEM;
+
+ /* Take a command buffer for each screen from the end of usable VRAM. */
+ vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE;
+
+ vbox->vbva_buffers = pci_iomap_range(vbox->ddev.pdev, 0,
+ vbox->available_vram_size,
+ vbox->num_crtcs *
+ VBVA_MIN_BUFFER_SIZE);
+ if (!vbox->vbva_buffers)
+ return -ENOMEM;
+
+ for (i = 0; i < vbox->num_crtcs; ++i) {
+ vbva_setup_buffer_context(&vbox->vbva_info[i],
+ vbox->available_vram_size +
+ i * VBVA_MIN_BUFFER_SIZE,
+ VBVA_MIN_BUFFER_SIZE);
+ vbva = (void __force *)vbox->vbva_buffers +
+ i * VBVA_MIN_BUFFER_SIZE;
+ if (!vbva_enable(&vbox->vbva_info[i],
+ vbox->guest_pool, vbva, i)) {
+ /* very old host or driver error. */
+ DRM_ERROR("vboxvideo: vbva_enable failed\n");
+ }
+ }
+
+ return 0;
+}
+
+static void vbox_accel_fini(struct vbox_private *vbox)
+{
+ unsigned int i;
+
+ for (i = 0; i < vbox->num_crtcs; ++i)
+ vbva_disable(&vbox->vbva_info[i], vbox->guest_pool, i);
+
+ pci_iounmap(vbox->ddev.pdev, vbox->vbva_buffers);
+}
+
+/* Do we support the 4.3 plus mode hint reporting interface? */
+static bool have_hgsmi_mode_hints(struct vbox_private *vbox)
+{
+ u32 have_hints, have_cursor;
+ int ret;
+
+ ret = hgsmi_query_conf(vbox->guest_pool,
+ VBOX_VBVA_CONF32_MODE_HINT_REPORTING,
+ &have_hints);
+ if (ret)
+ return false;
+
+ ret = hgsmi_query_conf(vbox->guest_pool,
+ VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING,
+ &have_cursor);
+ if (ret)
+ return false;
+
+ return have_hints == VINF_SUCCESS && have_cursor == VINF_SUCCESS;
+}
+
+bool vbox_check_supported(u16 id)
+{
+ u16 dispi_id;
+
+ vbox_write_ioport(VBE_DISPI_INDEX_ID, id);
+ dispi_id = inw(VBE_DISPI_IOPORT_DATA);
+
+ return dispi_id == id;
+}
+
+int vbox_hw_init(struct vbox_private *vbox)
+{
+ int ret = -ENOMEM;
+
+ vbox->full_vram_size = inl(VBE_DISPI_IOPORT_DATA);
+ vbox->any_pitch = vbox_check_supported(VBE_DISPI_ID_ANYX);
+
+ DRM_INFO("VRAM %08x\n", vbox->full_vram_size);
+
+ /* Map guest-heap at end of vram */
+ vbox->guest_heap =
+ pci_iomap_range(vbox->ddev.pdev, 0, GUEST_HEAP_OFFSET(vbox),
+ GUEST_HEAP_SIZE);
+ if (!vbox->guest_heap)
+ return -ENOMEM;
+
+ /* Create guest-heap mem-pool use 2^4 = 16 byte chunks */
+ vbox->guest_pool = gen_pool_create(4, -1);
+ if (!vbox->guest_pool)
+ goto err_unmap_guest_heap;
+
+ ret = gen_pool_add_virt(vbox->guest_pool,
+ (unsigned long)vbox->guest_heap,
+ GUEST_HEAP_OFFSET(vbox),
+ GUEST_HEAP_USABLE_SIZE, -1);
+ if (ret)
+ goto err_destroy_guest_pool;
+
+ ret = hgsmi_test_query_conf(vbox->guest_pool);
+ if (ret) {
+ DRM_ERROR("vboxvideo: hgsmi_test_query_conf failed\n");
+ goto err_destroy_guest_pool;
+ }
+
+ /* Reduce available VRAM size to reflect the guest heap. */
+ vbox->available_vram_size = GUEST_HEAP_OFFSET(vbox);
+ /* Linux drm represents monitors as a 32-bit array. */
+ hgsmi_query_conf(vbox->guest_pool, VBOX_VBVA_CONF32_MONITOR_COUNT,
+ &vbox->num_crtcs);
+ vbox->num_crtcs = clamp_t(u32, vbox->num_crtcs, 1, VBOX_MAX_SCREENS);
+
+ if (!have_hgsmi_mode_hints(vbox)) {
+ ret = -ENOTSUPP;
+ goto err_destroy_guest_pool;
+ }
+
+ vbox->last_mode_hints = devm_kcalloc(vbox->ddev.dev, vbox->num_crtcs,
+ sizeof(struct vbva_modehint),
+ GFP_KERNEL);
+ if (!vbox->last_mode_hints) {
+ ret = -ENOMEM;
+ goto err_destroy_guest_pool;
+ }
+
+ ret = vbox_accel_init(vbox);
+ if (ret)
+ goto err_destroy_guest_pool;
+
+ return 0;
+
+err_destroy_guest_pool:
+ gen_pool_destroy(vbox->guest_pool);
+err_unmap_guest_heap:
+ pci_iounmap(vbox->ddev.pdev, vbox->guest_heap);
+ return ret;
+}
+
+void vbox_hw_fini(struct vbox_private *vbox)
+{
+ vbox_accel_fini(vbox);
+ gen_pool_destroy(vbox->guest_pool);
+ pci_iounmap(vbox->ddev.pdev, vbox->guest_heap);
+}
+
+int vbox_gem_create(struct vbox_private *vbox,
+ u32 size, bool iskernel, struct drm_gem_object **obj)
+{
+ struct vbox_bo *vboxbo;
+ int ret;
+
+ *obj = NULL;
+
+ size = roundup(size, PAGE_SIZE);
+ if (size == 0)
+ return -EINVAL;
+
+ ret = vbox_bo_create(vbox, size, 0, 0, &vboxbo);
+ if (ret) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("failed to allocate GEM object\n");
+ return ret;
+ }
+
+ *obj = &vboxbo->gem;
+
+ return 0;
+}
+
+int vbox_dumb_create(struct drm_file *file,
+ struct drm_device *dev, struct drm_mode_create_dumb *args)
+{
+ struct vbox_private *vbox =
+ container_of(dev, struct vbox_private, ddev);
+ struct drm_gem_object *gobj;
+ u32 handle;
+ int ret;
+
+ args->pitch = args->width * ((args->bpp + 7) / 8);
+ args->size = args->pitch * args->height;
+
+ ret = vbox_gem_create(vbox, args->size, false, &gobj);
+ if (ret)
+ return ret;
+
+ ret = drm_gem_handle_create(file, gobj, &handle);
+ drm_gem_object_put_unlocked(gobj);
+ if (ret)
+ return ret;
+
+ args->handle = handle;
+
+ return 0;
+}
+
+void vbox_gem_free_object(struct drm_gem_object *obj)
+{
+ struct vbox_bo *vbox_bo = gem_to_vbox_bo(obj);
+
+ ttm_bo_put(&vbox_bo->bo);
+}
+
+static inline u64 vbox_bo_mmap_offset(struct vbox_bo *bo)
+{
+ return drm_vma_node_offset_addr(&bo->bo.vma_node);
+}
+
+int
+vbox_dumb_mmap_offset(struct drm_file *file,
+ struct drm_device *dev,
+ u32 handle, u64 *offset)
+{
+ struct drm_gem_object *obj;
+ int ret;
+ struct vbox_bo *bo;
+
+ mutex_lock(&dev->struct_mutex);
+ obj = drm_gem_object_lookup(file, handle);
+ if (!obj) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
+
+ bo = gem_to_vbox_bo(obj);
+ *offset = vbox_bo_mmap_offset(bo);
+
+ drm_gem_object_put(obj);
+ ret = 0;
+
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c
new file mode 100644
index 000000000000..58cea131470e
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c
@@ -0,0 +1,939 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2013-2017 Oracle Corporation
+ * This file is based on ast_mode.c
+ * Copyright 2012 Red Hat Inc.
+ * Parts based on xf86-video-ast
+ * Copyright (c) 2005 ASPEED Technology Inc.
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Michael Thayer <michael.thayer@oracle.com,
+ * Hans de Goede <hdegoede@redhat.com>
+ */
+#include <linux/export.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "hgsmi_channels.h"
+#include "vbox_drv.h"
+#include "vboxvideo.h"
+
+/*
+ * Set a graphics mode. Poke any required values into registers, do an HGSMI
+ * mode set and tell the host we support advanced graphics functions.
+ */
+static void vbox_do_modeset(struct drm_crtc *crtc)
+{
+ struct drm_framebuffer *fb = crtc->primary->state->fb;
+ struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
+ struct vbox_private *vbox;
+ int width, height, bpp, pitch;
+ u16 flags;
+ s32 x_offset, y_offset;
+
+ vbox = crtc->dev->dev_private;
+ width = vbox_crtc->width ? vbox_crtc->width : 640;
+ height = vbox_crtc->height ? vbox_crtc->height : 480;
+ bpp = fb ? fb->format->cpp[0] * 8 : 32;
+ pitch = fb ? fb->pitches[0] : width * bpp / 8;
+ x_offset = vbox->single_framebuffer ? vbox_crtc->x : vbox_crtc->x_hint;
+ y_offset = vbox->single_framebuffer ? vbox_crtc->y : vbox_crtc->y_hint;
+
+ /*
+ * This is the old way of setting graphics modes. It assumed one screen
+ * and a frame-buffer at the start of video RAM. On older versions of
+ * VirtualBox, certain parts of the code still assume that the first
+ * screen is programmed this way, so try to fake it.
+ */
+ if (vbox_crtc->crtc_id == 0 && fb &&
+ vbox_crtc->fb_offset / pitch < 0xffff - crtc->y &&
+ vbox_crtc->fb_offset % (bpp / 8) == 0) {
+ vbox_write_ioport(VBE_DISPI_INDEX_XRES, width);
+ vbox_write_ioport(VBE_DISPI_INDEX_YRES, height);
+ vbox_write_ioport(VBE_DISPI_INDEX_VIRT_WIDTH, pitch * 8 / bpp);
+ vbox_write_ioport(VBE_DISPI_INDEX_BPP, bpp);
+ vbox_write_ioport(VBE_DISPI_INDEX_ENABLE, VBE_DISPI_ENABLED);
+ vbox_write_ioport(VBE_DISPI_INDEX_X_OFFSET,
+ vbox_crtc->fb_offset % pitch / bpp * 8 + vbox_crtc->x);
+ vbox_write_ioport(VBE_DISPI_INDEX_Y_OFFSET,
+ vbox_crtc->fb_offset / pitch + vbox_crtc->y);
+ }
+
+ flags = VBVA_SCREEN_F_ACTIVE;
+ flags |= (fb && crtc->state->enable) ? 0 : VBVA_SCREEN_F_BLANK;
+ flags |= vbox_crtc->disconnected ? VBVA_SCREEN_F_DISABLED : 0;
+ hgsmi_process_display_info(vbox->guest_pool, vbox_crtc->crtc_id,
+ x_offset, y_offset,
+ vbox_crtc->x * bpp / 8 +
+ vbox_crtc->y * pitch,
+ pitch, width, height, bpp, flags);
+}
+
+static int vbox_set_view(struct drm_crtc *crtc)
+{
+ struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
+ struct vbox_private *vbox = crtc->dev->dev_private;
+ struct vbva_infoview *p;
+
+ /*
+ * Tell the host about the view. This design originally targeted the
+ * Windows XP driver architecture and assumed that each screen would
+ * have a dedicated frame buffer with the command buffer following it,
+ * the whole being a "view". The host works out which screen a command
+ * buffer belongs to by checking whether it is in the first view, then
+ * whether it is in the second and so on. The first match wins. We
+ * cheat around this by making the first view be the managed memory
+ * plus the first command buffer, the second the same plus the second
+ * buffer and so on.
+ */
+ p = hgsmi_buffer_alloc(vbox->guest_pool, sizeof(*p),
+ HGSMI_CH_VBVA, VBVA_INFO_VIEW);
+ if (!p)
+ return -ENOMEM;
+
+ p->view_index = vbox_crtc->crtc_id;
+ p->view_offset = vbox_crtc->fb_offset;
+ p->view_size = vbox->available_vram_size - vbox_crtc->fb_offset +
+ vbox_crtc->crtc_id * VBVA_MIN_BUFFER_SIZE;
+ p->max_screen_size = vbox->available_vram_size - vbox_crtc->fb_offset;
+
+ hgsmi_buffer_submit(vbox->guest_pool, p);
+ hgsmi_buffer_free(vbox->guest_pool, p);
+
+ return 0;
+}
+
+/*
+ * Try to map the layout of virtual screens to the range of the input device.
+ * Return true if we need to re-set the crtc modes due to screen offset
+ * changes.
+ */
+static bool vbox_set_up_input_mapping(struct vbox_private *vbox)
+{
+ struct drm_crtc *crtci;
+ struct drm_connector *connectori;
+ struct drm_framebuffer *fb, *fb1 = NULL;
+ bool single_framebuffer = true;
+ bool old_single_framebuffer = vbox->single_framebuffer;
+ u16 width = 0, height = 0;
+
+ /*
+ * Are we using an X.Org-style single large frame-buffer for all crtcs?
+ * If so then screen layout can be deduced from the crtc offsets.
+ * Same fall-back if this is the fbdev frame-buffer.
+ */
+ list_for_each_entry(crtci, &vbox->ddev.mode_config.crtc_list, head) {
+ fb = crtci->primary->state->fb;
+ if (!fb)
+ continue;
+
+ if (!fb1) {
+ fb1 = fb;
+ if (to_vbox_framebuffer(fb1) == &vbox->afb)
+ break;
+ } else if (fb != fb1) {
+ single_framebuffer = false;
+ }
+ }
+ if (!fb1)
+ return false;
+
+ if (single_framebuffer) {
+ vbox->single_framebuffer = true;
+ vbox->input_mapping_width = fb1->width;
+ vbox->input_mapping_height = fb1->height;
+ return old_single_framebuffer != vbox->single_framebuffer;
+ }
+ /* Otherwise calculate the total span of all screens. */
+ list_for_each_entry(connectori, &vbox->ddev.mode_config.connector_list,
+ head) {
+ struct vbox_connector *vbox_connector =
+ to_vbox_connector(connectori);
+ struct vbox_crtc *vbox_crtc = vbox_connector->vbox_crtc;
+
+ width = max_t(u16, width, vbox_crtc->x_hint +
+ vbox_connector->mode_hint.width);
+ height = max_t(u16, height, vbox_crtc->y_hint +
+ vbox_connector->mode_hint.height);
+ }
+
+ vbox->single_framebuffer = false;
+ vbox->input_mapping_width = width;
+ vbox->input_mapping_height = height;
+
+ return old_single_framebuffer != vbox->single_framebuffer;
+}
+
+static void vbox_crtc_set_base_and_mode(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y)
+{
+ struct vbox_bo *bo = gem_to_vbox_bo(to_vbox_framebuffer(fb)->obj);
+ struct vbox_private *vbox = crtc->dev->dev_private;
+ struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
+ bool needs_modeset = drm_atomic_crtc_needs_modeset(crtc->state);
+
+ mutex_lock(&vbox->hw_mutex);
+
+ if (crtc->state->enable) {
+ vbox_crtc->width = crtc->state->mode.hdisplay;
+ vbox_crtc->height = crtc->state->mode.vdisplay;
+ }
+
+ vbox_crtc->x = x;
+ vbox_crtc->y = y;
+ vbox_crtc->fb_offset = vbox_bo_gpu_offset(bo);
+
+ /* vbox_do_modeset() checks vbox->single_framebuffer so update it now */
+ if (needs_modeset && vbox_set_up_input_mapping(vbox)) {
+ struct drm_crtc *crtci;
+
+ list_for_each_entry(crtci, &vbox->ddev.mode_config.crtc_list,
+ head) {
+ if (crtci == crtc)
+ continue;
+ vbox_do_modeset(crtci);
+ }
+ }
+
+ vbox_set_view(crtc);
+ vbox_do_modeset(crtc);
+
+ if (needs_modeset)
+ hgsmi_update_input_mapping(vbox->guest_pool, 0, 0,
+ vbox->input_mapping_width,
+ vbox->input_mapping_height);
+
+ mutex_unlock(&vbox->hw_mutex);
+}
+
+static void vbox_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+}
+
+static void vbox_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+}
+
+static void vbox_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
+
+ if (crtc->state && crtc->state->event) {
+ event = crtc->state->event;
+ crtc->state->event = NULL;
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, event);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ }
+}
+
+static const struct drm_crtc_helper_funcs vbox_crtc_helper_funcs = {
+ .atomic_enable = vbox_crtc_atomic_enable,
+ .atomic_disable = vbox_crtc_atomic_disable,
+ .atomic_flush = vbox_crtc_atomic_flush,
+};
+
+static void vbox_crtc_destroy(struct drm_crtc *crtc)
+{
+ drm_crtc_cleanup(crtc);
+ kfree(crtc);
+}
+
+static const struct drm_crtc_funcs vbox_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ /* .gamma_set = vbox_crtc_gamma_set, */
+ .destroy = vbox_crtc_destroy,
+ .reset = drm_atomic_helper_crtc_reset,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static int vbox_primary_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_crtc_state *crtc_state = NULL;
+
+ if (new_state->crtc) {
+ crtc_state = drm_atomic_get_existing_crtc_state(
+ new_state->state, new_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+ }
+
+ return drm_atomic_helper_check_plane_state(new_state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true);
+}
+
+static void vbox_primary_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_crtc *crtc = plane->state->crtc;
+ struct drm_framebuffer *fb = plane->state->fb;
+
+ vbox_crtc_set_base_and_mode(crtc, fb,
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16);
+}
+
+static void vbox_primary_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_crtc *crtc = old_state->crtc;
+
+ /* vbox_do_modeset checks plane->state->fb and will disable if NULL */
+ vbox_crtc_set_base_and_mode(crtc, old_state->fb,
+ old_state->src_x >> 16,
+ old_state->src_y >> 16);
+}
+
+static int vbox_primary_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct vbox_bo *bo;
+ int ret;
+
+ if (!new_state->fb)
+ return 0;
+
+ bo = gem_to_vbox_bo(to_vbox_framebuffer(new_state->fb)->obj);
+ ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM);
+ if (ret)
+ DRM_WARN("Error %d pinning new fb, out of video mem?\n", ret);
+
+ return ret;
+}
+
+static void vbox_primary_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct vbox_bo *bo;
+
+ if (!old_state->fb)
+ return;
+
+ bo = gem_to_vbox_bo(to_vbox_framebuffer(old_state->fb)->obj);
+ vbox_bo_unpin(bo);
+}
+
+static int vbox_cursor_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_crtc_state *crtc_state = NULL;
+ u32 width = new_state->crtc_w;
+ u32 height = new_state->crtc_h;
+ int ret;
+
+ if (new_state->crtc) {
+ crtc_state = drm_atomic_get_existing_crtc_state(
+ new_state->state, new_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return -EINVAL;
+ }
+
+ ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ true, true);
+ if (ret)
+ return ret;
+
+ if (!new_state->fb)
+ return 0;
+
+ if (width > VBOX_MAX_CURSOR_WIDTH || height > VBOX_MAX_CURSOR_HEIGHT ||
+ width == 0 || height == 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * Copy the ARGB image and generate the mask, which is needed in case the host
+ * does not support ARGB cursors. The mask is a 1BPP bitmap with the bit set
+ * if the corresponding alpha value in the ARGB image is greater than 0xF0.
+ */
+static void copy_cursor_image(u8 *src, u8 *dst, u32 width, u32 height,
+ size_t mask_size)
+{
+ size_t line_size = (width + 7) / 8;
+ u32 i, j;
+
+ memcpy(dst + mask_size, src, width * height * 4);
+ for (i = 0; i < height; ++i)
+ for (j = 0; j < width; ++j)
+ if (((u32 *)src)[i * width + j] > 0xf0000000)
+ dst[i * line_size + j / 8] |= (0x80 >> (j % 8));
+}
+
+static void vbox_cursor_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct vbox_private *vbox =
+ container_of(plane->dev, struct vbox_private, ddev);
+ struct vbox_crtc *vbox_crtc = to_vbox_crtc(plane->state->crtc);
+ struct drm_framebuffer *fb = plane->state->fb;
+ struct vbox_bo *bo = gem_to_vbox_bo(to_vbox_framebuffer(fb)->obj);
+ u32 width = plane->state->crtc_w;
+ u32 height = plane->state->crtc_h;
+ size_t data_size, mask_size;
+ u32 flags;
+ u8 *src;
+
+ /*
+ * VirtualBox uses the host windowing system to draw the cursor so
+ * moves are a no-op, we only need to upload new cursor sprites.
+ */
+ if (fb == old_state->fb)
+ return;
+
+ mutex_lock(&vbox->hw_mutex);
+
+ vbox_crtc->cursor_enabled = true;
+
+ /* pinning is done in prepare/cleanup framebuffer */
+ src = vbox_bo_kmap(bo);
+ if (IS_ERR(src)) {
+ mutex_unlock(&vbox->hw_mutex);
+ DRM_WARN("Could not kmap cursor bo, skipping update\n");
+ return;
+ }
+
+ /*
+ * The mask must be calculated based on the alpha
+ * channel, one bit per ARGB word, and must be 32-bit
+ * padded.
+ */
+ mask_size = ((width + 7) / 8 * height + 3) & ~3;
+ data_size = width * height * 4 + mask_size;
+
+ copy_cursor_image(src, vbox->cursor_data, width, height, mask_size);
+ vbox_bo_kunmap(bo);
+
+ flags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE |
+ VBOX_MOUSE_POINTER_ALPHA;
+ hgsmi_update_pointer_shape(vbox->guest_pool, flags,
+ min_t(u32, max(fb->hot_x, 0), width),
+ min_t(u32, max(fb->hot_y, 0), height),
+ width, height, vbox->cursor_data, data_size);
+
+ mutex_unlock(&vbox->hw_mutex);
+}
+
+static void vbox_cursor_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct vbox_private *vbox =
+ container_of(plane->dev, struct vbox_private, ddev);
+ struct vbox_crtc *vbox_crtc = to_vbox_crtc(old_state->crtc);
+ bool cursor_enabled = false;
+ struct drm_crtc *crtci;
+
+ mutex_lock(&vbox->hw_mutex);
+
+ vbox_crtc->cursor_enabled = false;
+
+ list_for_each_entry(crtci, &vbox->ddev.mode_config.crtc_list, head) {
+ if (to_vbox_crtc(crtci)->cursor_enabled)
+ cursor_enabled = true;
+ }
+
+ if (!cursor_enabled)
+ hgsmi_update_pointer_shape(vbox->guest_pool, 0, 0, 0,
+ 0, 0, NULL, 0);
+
+ mutex_unlock(&vbox->hw_mutex);
+}
+
+static int vbox_cursor_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct vbox_bo *bo;
+
+ if (!new_state->fb)
+ return 0;
+
+ bo = gem_to_vbox_bo(to_vbox_framebuffer(new_state->fb)->obj);
+ return vbox_bo_pin(bo, TTM_PL_FLAG_SYSTEM);
+}
+
+static void vbox_cursor_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct vbox_bo *bo;
+
+ if (!plane->state->fb)
+ return;
+
+ bo = gem_to_vbox_bo(to_vbox_framebuffer(plane->state->fb)->obj);
+ vbox_bo_unpin(bo);
+}
+
+static const u32 vbox_cursor_plane_formats[] = {
+ DRM_FORMAT_ARGB8888,
+};
+
+static const struct drm_plane_helper_funcs vbox_cursor_helper_funcs = {
+ .atomic_check = vbox_cursor_atomic_check,
+ .atomic_update = vbox_cursor_atomic_update,
+ .atomic_disable = vbox_cursor_atomic_disable,
+ .prepare_fb = vbox_cursor_prepare_fb,
+ .cleanup_fb = vbox_cursor_cleanup_fb,
+};
+
+static const struct drm_plane_funcs vbox_cursor_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_primary_helper_destroy,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static const u32 vbox_primary_plane_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+};
+
+static const struct drm_plane_helper_funcs vbox_primary_helper_funcs = {
+ .atomic_check = vbox_primary_atomic_check,
+ .atomic_update = vbox_primary_atomic_update,
+ .atomic_disable = vbox_primary_atomic_disable,
+ .prepare_fb = vbox_primary_prepare_fb,
+ .cleanup_fb = vbox_primary_cleanup_fb,
+};
+
+static const struct drm_plane_funcs vbox_primary_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_primary_helper_destroy,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static struct drm_plane *vbox_create_plane(struct vbox_private *vbox,
+ unsigned int possible_crtcs,
+ enum drm_plane_type type)
+{
+ const struct drm_plane_helper_funcs *helper_funcs = NULL;
+ const struct drm_plane_funcs *funcs;
+ struct drm_plane *plane;
+ const u32 *formats;
+ int num_formats;
+ int err;
+
+ if (type == DRM_PLANE_TYPE_PRIMARY) {
+ funcs = &vbox_primary_plane_funcs;
+ formats = vbox_primary_plane_formats;
+ helper_funcs = &vbox_primary_helper_funcs;
+ num_formats = ARRAY_SIZE(vbox_primary_plane_formats);
+ } else if (type == DRM_PLANE_TYPE_CURSOR) {
+ funcs = &vbox_cursor_plane_funcs;
+ formats = vbox_cursor_plane_formats;
+ helper_funcs = &vbox_cursor_helper_funcs;
+ num_formats = ARRAY_SIZE(vbox_cursor_plane_formats);
+ } else {
+ return ERR_PTR(-EINVAL);
+ }
+
+ plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+ if (!plane)
+ return ERR_PTR(-ENOMEM);
+
+ err = drm_universal_plane_init(&vbox->ddev, plane, possible_crtcs,
+ funcs, formats, num_formats,
+ NULL, type, NULL);
+ if (err)
+ goto free_plane;
+
+ drm_plane_helper_add(plane, helper_funcs);
+
+ return plane;
+
+free_plane:
+ kfree(plane);
+ return ERR_PTR(-EINVAL);
+}
+
+static struct vbox_crtc *vbox_crtc_init(struct drm_device *dev, unsigned int i)
+{
+ struct vbox_private *vbox =
+ container_of(dev, struct vbox_private, ddev);
+ struct drm_plane *cursor = NULL;
+ struct vbox_crtc *vbox_crtc;
+ struct drm_plane *primary;
+ u32 caps = 0;
+ int ret;
+
+ ret = hgsmi_query_conf(vbox->guest_pool,
+ VBOX_VBVA_CONF32_CURSOR_CAPABILITIES, &caps);
+ if (ret)
+ return ERR_PTR(ret);
+
+ vbox_crtc = kzalloc(sizeof(*vbox_crtc), GFP_KERNEL);
+ if (!vbox_crtc)
+ return ERR_PTR(-ENOMEM);
+
+ primary = vbox_create_plane(vbox, 1 << i, DRM_PLANE_TYPE_PRIMARY);
+ if (IS_ERR(primary)) {
+ ret = PTR_ERR(primary);
+ goto free_mem;
+ }
+
+ if ((caps & VBOX_VBVA_CURSOR_CAPABILITY_HARDWARE)) {
+ cursor = vbox_create_plane(vbox, 1 << i, DRM_PLANE_TYPE_CURSOR);
+ if (IS_ERR(cursor)) {
+ ret = PTR_ERR(cursor);
+ goto clean_primary;
+ }
+ } else {
+ DRM_WARN("VirtualBox host is too old, no cursor support\n");
+ }
+
+ vbox_crtc->crtc_id = i;
+
+ ret = drm_crtc_init_with_planes(dev, &vbox_crtc->base, primary, cursor,
+ &vbox_crtc_funcs, NULL);
+ if (ret)
+ goto clean_cursor;
+
+ drm_mode_crtc_set_gamma_size(&vbox_crtc->base, 256);
+ drm_crtc_helper_add(&vbox_crtc->base, &vbox_crtc_helper_funcs);
+
+ return vbox_crtc;
+
+clean_cursor:
+ if (cursor) {
+ drm_plane_cleanup(cursor);
+ kfree(cursor);
+ }
+clean_primary:
+ drm_plane_cleanup(primary);
+ kfree(primary);
+free_mem:
+ kfree(vbox_crtc);
+ return ERR_PTR(ret);
+}
+
+static void vbox_encoder_destroy(struct drm_encoder *encoder)
+{
+ drm_encoder_cleanup(encoder);
+ kfree(encoder);
+}
+
+static const struct drm_encoder_funcs vbox_enc_funcs = {
+ .destroy = vbox_encoder_destroy,
+};
+
+static struct drm_encoder *vbox_encoder_init(struct drm_device *dev,
+ unsigned int i)
+{
+ struct vbox_encoder *vbox_encoder;
+
+ vbox_encoder = kzalloc(sizeof(*vbox_encoder), GFP_KERNEL);
+ if (!vbox_encoder)
+ return NULL;
+
+ drm_encoder_init(dev, &vbox_encoder->base, &vbox_enc_funcs,
+ DRM_MODE_ENCODER_DAC, NULL);
+
+ vbox_encoder->base.possible_crtcs = 1 << i;
+ return &vbox_encoder->base;
+}
+
+/*
+ * Generate EDID data with a mode-unique serial number for the virtual
+ * monitor to try to persuade Unity that different modes correspond to
+ * different monitors and it should not try to force the same resolution on
+ * them.
+ */
+static void vbox_set_edid(struct drm_connector *connector, int width,
+ int height)
+{
+ enum { EDID_SIZE = 128 };
+ unsigned char edid[EDID_SIZE] = {
+ 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, /* header */
+ 0x58, 0x58, /* manufacturer (VBX) */
+ 0x00, 0x00, /* product code */
+ 0x00, 0x00, 0x00, 0x00, /* serial number goes here */
+ 0x01, /* week of manufacture */
+ 0x00, /* year of manufacture */
+ 0x01, 0x03, /* EDID version */
+ 0x80, /* capabilities - digital */
+ 0x00, /* horiz. res in cm, zero for projectors */
+ 0x00, /* vert. res in cm */
+ 0x78, /* display gamma (120 == 2.2). */
+ 0xEE, /* features (standby, suspend, off, RGB, std */
+ /* colour space, preferred timing mode) */
+ 0xEE, 0x91, 0xA3, 0x54, 0x4C, 0x99, 0x26, 0x0F, 0x50, 0x54,
+ /* chromaticity for standard colour space. */
+ 0x00, 0x00, 0x00, /* no default timings */
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, /* no standard timings */
+ 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x06, 0x00, 0x02, 0x02,
+ 0x02, 0x02,
+ /* descriptor block 1 goes below */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* descriptor block 2, monitor ranges */
+ 0x00, 0x00, 0x00, 0xFD, 0x00,
+ 0x00, 0xC8, 0x00, 0xC8, 0x64, 0x00, 0x0A, 0x20, 0x20, 0x20,
+ 0x20, 0x20,
+ /* 0-200Hz vertical, 0-200KHz horizontal, 1000MHz pixel clock */
+ 0x20,
+ /* descriptor block 3, monitor name */
+ 0x00, 0x00, 0x00, 0xFC, 0x00,
+ 'V', 'B', 'O', 'X', ' ', 'm', 'o', 'n', 'i', 't', 'o', 'r',
+ '\n',
+ /* descriptor block 4: dummy data */
+ 0x00, 0x00, 0x00, 0x10, 0x00,
+ 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x20,
+ 0x00, /* number of extensions */
+ 0x00 /* checksum goes here */
+ };
+ int clock = (width + 6) * (height + 6) * 60 / 10000;
+ unsigned int i, sum = 0;
+
+ edid[12] = width & 0xff;
+ edid[13] = width >> 8;
+ edid[14] = height & 0xff;
+ edid[15] = height >> 8;
+ edid[54] = clock & 0xff;
+ edid[55] = clock >> 8;
+ edid[56] = width & 0xff;
+ edid[58] = (width >> 4) & 0xf0;
+ edid[59] = height & 0xff;
+ edid[61] = (height >> 4) & 0xf0;
+ for (i = 0; i < EDID_SIZE - 1; ++i)
+ sum += edid[i];
+ edid[EDID_SIZE - 1] = (0x100 - (sum & 0xFF)) & 0xFF;
+ drm_connector_update_edid_property(connector, (struct edid *)edid);
+}
+
+static int vbox_get_modes(struct drm_connector *connector)
+{
+ struct vbox_connector *vbox_connector = NULL;
+ struct drm_display_mode *mode = NULL;
+ struct vbox_private *vbox = NULL;
+ unsigned int num_modes = 0;
+ int preferred_width, preferred_height;
+
+ vbox_connector = to_vbox_connector(connector);
+ vbox = connector->dev->dev_private;
+
+ hgsmi_report_flags_location(vbox->guest_pool, GUEST_HEAP_OFFSET(vbox) +
+ HOST_FLAGS_OFFSET);
+ if (vbox_connector->vbox_crtc->crtc_id == 0)
+ vbox_report_caps(vbox);
+
+ num_modes = drm_add_modes_noedid(connector, 2560, 1600);
+ preferred_width = vbox_connector->mode_hint.width ?
+ vbox_connector->mode_hint.width : 1024;
+ preferred_height = vbox_connector->mode_hint.height ?
+ vbox_connector->mode_hint.height : 768;
+ mode = drm_cvt_mode(connector->dev, preferred_width, preferred_height,
+ 60, false, false, false);
+ if (mode) {
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+ ++num_modes;
+ }
+ vbox_set_edid(connector, preferred_width, preferred_height);
+
+ if (vbox_connector->vbox_crtc->x_hint != -1)
+ drm_object_property_set_value(&connector->base,
+ vbox->ddev.mode_config.suggested_x_property,
+ vbox_connector->vbox_crtc->x_hint);
+ else
+ drm_object_property_set_value(&connector->base,
+ vbox->ddev.mode_config.suggested_x_property, 0);
+
+ if (vbox_connector->vbox_crtc->y_hint != -1)
+ drm_object_property_set_value(&connector->base,
+ vbox->ddev.mode_config.suggested_y_property,
+ vbox_connector->vbox_crtc->y_hint);
+ else
+ drm_object_property_set_value(&connector->base,
+ vbox->ddev.mode_config.suggested_y_property, 0);
+
+ return num_modes;
+}
+
+static void vbox_connector_destroy(struct drm_connector *connector)
+{
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static enum drm_connector_status
+vbox_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct vbox_connector *vbox_connector;
+
+ vbox_connector = to_vbox_connector(connector);
+
+ return vbox_connector->mode_hint.disconnected ?
+ connector_status_disconnected : connector_status_connected;
+}
+
+static int vbox_fill_modes(struct drm_connector *connector, u32 max_x,
+ u32 max_y)
+{
+ struct vbox_connector *vbox_connector;
+ struct drm_device *dev;
+ struct drm_display_mode *mode, *iterator;
+
+ vbox_connector = to_vbox_connector(connector);
+ dev = vbox_connector->base.dev;
+ list_for_each_entry_safe(mode, iterator, &connector->modes, head) {
+ list_del(&mode->head);
+ drm_mode_destroy(dev, mode);
+ }
+
+ return drm_helper_probe_single_connector_modes(connector, max_x, max_y);
+}
+
+static const struct drm_connector_helper_funcs vbox_connector_helper_funcs = {
+ .get_modes = vbox_get_modes,
+};
+
+static const struct drm_connector_funcs vbox_connector_funcs = {
+ .detect = vbox_connector_detect,
+ .fill_modes = vbox_fill_modes,
+ .destroy = vbox_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int vbox_connector_init(struct drm_device *dev,
+ struct vbox_crtc *vbox_crtc,
+ struct drm_encoder *encoder)
+{
+ struct vbox_connector *vbox_connector;
+ struct drm_connector *connector;
+
+ vbox_connector = kzalloc(sizeof(*vbox_connector), GFP_KERNEL);
+ if (!vbox_connector)
+ return -ENOMEM;
+
+ connector = &vbox_connector->base;
+ vbox_connector->vbox_crtc = vbox_crtc;
+
+ drm_connector_init(dev, connector, &vbox_connector_funcs,
+ DRM_MODE_CONNECTOR_VGA);
+ drm_connector_helper_add(connector, &vbox_connector_helper_funcs);
+
+ connector->interlace_allowed = 0;
+ connector->doublescan_allowed = 0;
+
+ drm_mode_create_suggested_offset_properties(dev);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.suggested_x_property, 0);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.suggested_y_property, 0);
+
+ drm_connector_attach_encoder(connector, encoder);
+
+ return 0;
+}
+
+static struct drm_framebuffer *vbox_user_framebuffer_create(
+ struct drm_device *dev,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct vbox_private *vbox =
+ container_of(dev, struct vbox_private, ddev);
+ struct drm_gem_object *obj;
+ struct vbox_framebuffer *vbox_fb;
+ int ret = -ENOMEM;
+
+ obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
+ if (!obj)
+ return ERR_PTR(-ENOENT);
+
+ vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL);
+ if (!vbox_fb)
+ goto err_unref_obj;
+
+ ret = vbox_framebuffer_init(vbox, vbox_fb, mode_cmd, obj);
+ if (ret)
+ goto err_free_vbox_fb;
+
+ return &vbox_fb->base;
+
+err_free_vbox_fb:
+ kfree(vbox_fb);
+err_unref_obj:
+ drm_gem_object_put_unlocked(obj);
+ return ERR_PTR(ret);
+}
+
+static const struct drm_mode_config_funcs vbox_mode_funcs = {
+ .fb_create = vbox_user_framebuffer_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+int vbox_mode_init(struct vbox_private *vbox)
+{
+ struct drm_device *dev = &vbox->ddev;
+ struct drm_encoder *encoder;
+ struct vbox_crtc *vbox_crtc;
+ unsigned int i;
+ int ret;
+
+ drm_mode_config_init(dev);
+
+ dev->mode_config.funcs = (void *)&vbox_mode_funcs;
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.max_width = VBE_DISPI_MAX_XRES;
+ dev->mode_config.max_height = VBE_DISPI_MAX_YRES;
+
+ for (i = 0; i < vbox->num_crtcs; ++i) {
+ vbox_crtc = vbox_crtc_init(dev, i);
+ if (IS_ERR(vbox_crtc)) {
+ ret = PTR_ERR(vbox_crtc);
+ goto err_drm_mode_cleanup;
+ }
+ encoder = vbox_encoder_init(dev, i);
+ if (!encoder) {
+ ret = -ENOMEM;
+ goto err_drm_mode_cleanup;
+ }
+ ret = vbox_connector_init(dev, vbox_crtc, encoder);
+ if (ret)
+ goto err_drm_mode_cleanup;
+ }
+
+ drm_mode_config_reset(dev);
+ return 0;
+
+err_drm_mode_cleanup:
+ drm_mode_config_cleanup(dev);
+ return ret;
+}
+
+void vbox_mode_fini(struct vbox_private *vbox)
+{
+ drm_mode_config_cleanup(&vbox->ddev);
+}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_prime.c b/drivers/gpu/drm/vboxvideo/vbox_prime.c
new file mode 100644
index 000000000000..702b1aa53494
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/vbox_prime.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2017 Oracle Corporation
+ * Copyright 2017 Canonical
+ * Authors: Andreas Pokorny
+ */
+
+#include "vbox_drv.h"
+
+/*
+ * Based on qxl_prime.c:
+ * Empty Implementations as there should not be any other driver for a virtual
+ * device that might share buffers with vboxvideo
+ */
+
+int vbox_gem_prime_pin(struct drm_gem_object *obj)
+{
+ WARN_ONCE(1, "not implemented");
+ return -ENODEV;
+}
+
+void vbox_gem_prime_unpin(struct drm_gem_object *obj)
+{
+ WARN_ONCE(1, "not implemented");
+}
+
+struct sg_table *vbox_gem_prime_get_sg_table(struct drm_gem_object *obj)
+{
+ WARN_ONCE(1, "not implemented");
+ return ERR_PTR(-ENODEV);
+}
+
+struct drm_gem_object *vbox_gem_prime_import_sg_table(
+ struct drm_device *dev, struct dma_buf_attachment *attach,
+ struct sg_table *table)
+{
+ WARN_ONCE(1, "not implemented");
+ return ERR_PTR(-ENODEV);
+}
+
+void *vbox_gem_prime_vmap(struct drm_gem_object *obj)
+{
+ WARN_ONCE(1, "not implemented");
+ return ERR_PTR(-ENODEV);
+}
+
+void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+{
+ WARN_ONCE(1, "not implemented");
+}
+
+int vbox_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *area)
+{
+ WARN_ONCE(1, "not implemented");
+ return -ENODEV;
+}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_ttm.c b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
new file mode 100644
index 000000000000..9d78438c2877
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2013-2017 Oracle Corporation
+ * This file is based on ast_ttm.c
+ * Copyright 2012 Red Hat Inc.
+ * Authors: Dave Airlie <airlied@redhat.com>
+ * Michael Thayer <michael.thayer@oracle.com>
+ */
+#include <linux/pci.h>
+#include <drm/drm_file.h>
+#include <drm/ttm/ttm_page_alloc.h>
+#include "vbox_drv.h"
+
+static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd)
+{
+ return container_of(bd, struct vbox_private, ttm.bdev);
+}
+
+static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo)
+{
+ struct vbox_bo *bo;
+
+ bo = container_of(tbo, struct vbox_bo, bo);
+
+ drm_gem_object_release(&bo->gem);
+ kfree(bo);
+}
+
+static bool vbox_ttm_bo_is_vbox_bo(struct ttm_buffer_object *bo)
+{
+ if (bo->destroy == &vbox_bo_ttm_destroy)
+ return true;
+
+ return false;
+}
+
+static int
+vbox_bo_init_mem_type(struct ttm_bo_device *bdev, u32 type,
+ struct ttm_mem_type_manager *man)
+{
+ switch (type) {
+ case TTM_PL_SYSTEM:
+ man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_MASK_CACHING;
+ man->default_caching = TTM_PL_FLAG_CACHED;
+ break;
+ case TTM_PL_VRAM:
+ man->func = &ttm_bo_manager_func;
+ man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
+ man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
+ man->default_caching = TTM_PL_FLAG_WC;
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+vbox_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
+{
+ struct vbox_bo *vboxbo = vbox_bo(bo);
+
+ if (!vbox_ttm_bo_is_vbox_bo(bo))
+ return;
+
+ vbox_ttm_placement(vboxbo, TTM_PL_FLAG_SYSTEM);
+ *pl = vboxbo->placement;
+}
+
+static int vbox_bo_verify_access(struct ttm_buffer_object *bo,
+ struct file *filp)
+{
+ return 0;
+}
+
+static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+ struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
+ struct vbox_private *vbox = vbox_bdev(bdev);
+
+ mem->bus.addr = NULL;
+ mem->bus.offset = 0;
+ mem->bus.size = mem->num_pages << PAGE_SHIFT;
+ mem->bus.base = 0;
+ mem->bus.is_iomem = false;
+ if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
+ return -EINVAL;
+ switch (mem->mem_type) {
+ case TTM_PL_SYSTEM:
+ /* system memory */
+ return 0;
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = pci_resource_start(vbox->ddev.pdev, 0);
+ mem->bus.is_iomem = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem)
+{
+}
+
+static void vbox_ttm_backend_destroy(struct ttm_tt *tt)
+{
+ ttm_tt_fini(tt);
+ kfree(tt);
+}
+
+static struct ttm_backend_func vbox_tt_backend_func = {
+ .destroy = &vbox_ttm_backend_destroy,
+};
+
+static struct ttm_tt *vbox_ttm_tt_create(struct ttm_buffer_object *bo,
+ u32 page_flags)
+{
+ struct ttm_tt *tt;
+
+ tt = kzalloc(sizeof(*tt), GFP_KERNEL);
+ if (!tt)
+ return NULL;
+
+ tt->func = &vbox_tt_backend_func;
+ if (ttm_tt_init(tt, bo, page_flags)) {
+ kfree(tt);
+ return NULL;
+ }
+
+ return tt;
+}
+
+static struct ttm_bo_driver vbox_bo_driver = {
+ .ttm_tt_create = vbox_ttm_tt_create,
+ .init_mem_type = vbox_bo_init_mem_type,
+ .eviction_valuable = ttm_bo_eviction_valuable,
+ .evict_flags = vbox_bo_evict_flags,
+ .verify_access = vbox_bo_verify_access,
+ .io_mem_reserve = &vbox_ttm_io_mem_reserve,
+ .io_mem_free = &vbox_ttm_io_mem_free,
+};
+
+int vbox_mm_init(struct vbox_private *vbox)
+{
+ int ret;
+ struct drm_device *dev = &vbox->ddev;
+ struct ttm_bo_device *bdev = &vbox->ttm.bdev;
+
+ ret = ttm_bo_device_init(&vbox->ttm.bdev,
+ &vbox_bo_driver,
+ dev->anon_inode->i_mapping,
+ true);
+ if (ret) {
+ DRM_ERROR("Error initialising bo driver; %d\n", ret);
+ return ret;
+ }
+
+ ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
+ vbox->available_vram_size >> PAGE_SHIFT);
+ if (ret) {
+ DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
+ goto err_device_release;
+ }
+
+#ifdef DRM_MTRR_WC
+ vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0),
+ DRM_MTRR_WC);
+#else
+ vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
+ pci_resource_len(dev->pdev, 0));
+#endif
+ return 0;
+
+err_device_release:
+ ttm_bo_device_release(&vbox->ttm.bdev);
+ return ret;
+}
+
+void vbox_mm_fini(struct vbox_private *vbox)
+{
+#ifdef DRM_MTRR_WC
+ drm_mtrr_del(vbox->fb_mtrr,
+ pci_resource_start(vbox->ddev.pdev, 0),
+ pci_resource_len(vbox->ddev.pdev, 0), DRM_MTRR_WC);
+#else
+ arch_phys_wc_del(vbox->fb_mtrr);
+#endif
+ ttm_bo_device_release(&vbox->ttm.bdev);
+}
+
+void vbox_ttm_placement(struct vbox_bo *bo, int domain)
+{
+ unsigned int i;
+ u32 c = 0;
+
+ bo->placement.placement = bo->placements;
+ bo->placement.busy_placement = bo->placements;
+
+ if (domain & TTM_PL_FLAG_VRAM)
+ bo->placements[c++].flags =
+ TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
+ if (domain & TTM_PL_FLAG_SYSTEM)
+ bo->placements[c++].flags =
+ TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ if (!c)
+ bo->placements[c++].flags =
+ TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+
+ bo->placement.num_placement = c;
+ bo->placement.num_busy_placement = c;
+
+ for (i = 0; i < c; ++i) {
+ bo->placements[i].fpfn = 0;
+ bo->placements[i].lpfn = 0;
+ }
+}
+
+int vbox_bo_create(struct vbox_private *vbox, int size, int align,
+ u32 flags, struct vbox_bo **pvboxbo)
+{
+ struct vbox_bo *vboxbo;
+ size_t acc_size;
+ int ret;
+
+ vboxbo = kzalloc(sizeof(*vboxbo), GFP_KERNEL);
+ if (!vboxbo)
+ return -ENOMEM;
+
+ ret = drm_gem_object_init(&vbox->ddev, &vboxbo->gem, size);
+ if (ret)
+ goto err_free_vboxbo;
+
+ vboxbo->bo.bdev = &vbox->ttm.bdev;
+
+ vbox_ttm_placement(vboxbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
+
+ acc_size = ttm_bo_dma_acc_size(&vbox->ttm.bdev, size,
+ sizeof(struct vbox_bo));
+
+ ret = ttm_bo_init(&vbox->ttm.bdev, &vboxbo->bo, size,
+ ttm_bo_type_device, &vboxbo->placement,
+ align >> PAGE_SHIFT, false, acc_size,
+ NULL, NULL, vbox_bo_ttm_destroy);
+ if (ret)
+ goto err_free_vboxbo;
+
+ *pvboxbo = vboxbo;
+
+ return 0;
+
+err_free_vboxbo:
+ kfree(vboxbo);
+ return ret;
+}
+
+int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+ int i, ret;
+
+ if (bo->pin_count) {
+ bo->pin_count++;
+ return 0;
+ }
+
+ ret = vbox_bo_reserve(bo, false);
+ if (ret)
+ return ret;
+
+ vbox_ttm_placement(bo, pl_flag);
+
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
+ if (ret == 0)
+ bo->pin_count = 1;
+
+ vbox_bo_unreserve(bo);
+
+ return ret;
+}
+
+int vbox_bo_unpin(struct vbox_bo *bo)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+ int i, ret;
+
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ ret = vbox_bo_reserve(bo, false);
+ if (ret) {
+ DRM_ERROR("Error %d reserving bo, leaving it pinned\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
+
+ vbox_bo_unreserve(bo);
+
+ return ret;
+}
+
+/*
+ * Move a vbox-owned buffer object to system memory if no one else has it
+ * pinned. The caller must have pinned it previously, and this call will
+ * release the caller's pin.
+ */
+int vbox_bo_push_sysram(struct vbox_bo *bo)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+ int i, ret;
+
+ if (!bo->pin_count) {
+ DRM_ERROR("unpin bad %p\n", bo);
+ return 0;
+ }
+ bo->pin_count--;
+ if (bo->pin_count)
+ return 0;
+
+ if (bo->kmap.virtual) {
+ ttm_bo_kunmap(&bo->kmap);
+ bo->kmap.virtual = NULL;
+ }
+
+ vbox_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
+
+ for (i = 0; i < bo->placement.num_placement; i++)
+ bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
+
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
+ if (ret) {
+ DRM_ERROR("pushing to VRAM failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int vbox_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct vbox_private *vbox = file_priv->minor->dev->dev_private;
+
+ return ttm_bo_mmap(filp, vma, &vbox->ttm.bdev);
+}
+
+void *vbox_bo_kmap(struct vbox_bo *bo)
+{
+ int ret;
+
+ if (bo->kmap.virtual)
+ return bo->kmap.virtual;
+
+ ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
+ if (ret) {
+ DRM_ERROR("Error kmapping bo: %d\n", ret);
+ return NULL;
+ }
+
+ return bo->kmap.virtual;
+}
+
+void vbox_bo_kunmap(struct vbox_bo *bo)
+{
+ if (bo->kmap.virtual) {
+ ttm_bo_kunmap(&bo->kmap);
+ bo->kmap.virtual = NULL;
+ }
+}
diff --git a/drivers/gpu/drm/vboxvideo/vboxvideo.h b/drivers/gpu/drm/vboxvideo/vboxvideo.h
new file mode 100644
index 000000000000..0592004f71aa
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/vboxvideo.h
@@ -0,0 +1,442 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright (C) 2006-2016 Oracle Corporation */
+
+#ifndef __VBOXVIDEO_H__
+#define __VBOXVIDEO_H__
+
+#define VBOX_VIDEO_MAX_SCREENS 64
+
+/*
+ * The last 4096 bytes of the guest VRAM contains the generic info for all
+ * DualView chunks: sizes and offsets of chunks. This is filled by miniport.
+ *
+ * Last 4096 bytes of each chunk contain chunk specific data: framebuffer info,
+ * etc. This is used exclusively by the corresponding instance of a display
+ * driver.
+ *
+ * The VRAM layout:
+ * Last 4096 bytes - Adapter information area.
+ * 4096 bytes aligned miniport heap (value specified in the config rouded up).
+ * Slack - what left after dividing the VRAM.
+ * 4096 bytes aligned framebuffers:
+ * last 4096 bytes of each framebuffer is the display information area.
+ *
+ * The Virtual Graphics Adapter information in the guest VRAM is stored by the
+ * guest video driver using structures prepended by VBOXVIDEOINFOHDR.
+ *
+ * When the guest driver writes dword 0 to the VBE_DISPI_INDEX_VBOX_VIDEO
+ * the host starts to process the info. The first element at the start of
+ * the 4096 bytes region should be normally be a LINK that points to
+ * actual information chain. That way the guest driver can have some
+ * fixed layout of the information memory block and just rewrite
+ * the link to point to relevant memory chain.
+ *
+ * The processing stops at the END element.
+ *
+ * The host can access the memory only when the port IO is processed.
+ * All data that will be needed later must be copied from these 4096 bytes.
+ * But other VRAM can be used by host until the mode is disabled.
+ *
+ * The guest driver writes dword 0xffffffff to the VBE_DISPI_INDEX_VBOX_VIDEO
+ * to disable the mode.
+ *
+ * VBE_DISPI_INDEX_VBOX_VIDEO is used to read the configuration information
+ * from the host and issue commands to the host.
+ *
+ * The guest writes the VBE_DISPI_INDEX_VBOX_VIDEO index register, the the
+ * following operations with the VBE data register can be performed:
+ *
+ * Operation Result
+ * write 16 bit value NOP
+ * read 16 bit value count of monitors
+ * write 32 bit value set the vbox cmd value and the cmd processed by the host
+ * read 32 bit value result of the last vbox command is returned
+ */
+
+struct vbva_cmd_hdr {
+ s16 x;
+ s16 y;
+ u16 w;
+ u16 h;
+} __packed;
+
+/*
+ * The VBVA ring buffer is suitable for transferring large (< 2GB) amount of
+ * data. For example big bitmaps which do not fit to the buffer.
+ *
+ * Guest starts writing to the buffer by initializing a record entry in the
+ * records queue. VBVA_F_RECORD_PARTIAL indicates that the record is being
+ * written. As data is written to the ring buffer, the guest increases
+ * free_offset.
+ *
+ * The host reads the records on flushes and processes all completed records.
+ * When host encounters situation when only a partial record presents and
+ * len_and_flags & ~VBVA_F_RECORD_PARTIAL >= VBVA_RING_BUFFER_SIZE -
+ * VBVA_RING_BUFFER_THRESHOLD, the host fetched all record data and updates
+ * data_offset. After that on each flush the host continues fetching the data
+ * until the record is completed.
+ */
+
+#define VBVA_RING_BUFFER_SIZE (4194304 - 1024)
+#define VBVA_RING_BUFFER_THRESHOLD (4096)
+
+#define VBVA_MAX_RECORDS (64)
+
+#define VBVA_F_MODE_ENABLED 0x00000001u
+#define VBVA_F_MODE_VRDP 0x00000002u
+#define VBVA_F_MODE_VRDP_RESET 0x00000004u
+#define VBVA_F_MODE_VRDP_ORDER_MASK 0x00000008u
+
+#define VBVA_F_STATE_PROCESSING 0x00010000u
+
+#define VBVA_F_RECORD_PARTIAL 0x80000000u
+
+struct vbva_record {
+ u32 len_and_flags;
+} __packed;
+
+/*
+ * The minimum HGSMI heap size is PAGE_SIZE (4096 bytes) and is a restriction of
+ * the runtime heapsimple API. Use minimum 2 pages here, because the info area
+ * also may contain other data (for example hgsmi_host_flags structure).
+ */
+#define VBVA_ADAPTER_INFORMATION_SIZE 65536
+#define VBVA_MIN_BUFFER_SIZE 65536
+
+/* The value for port IO to let the adapter to interpret the adapter memory. */
+#define VBOX_VIDEO_DISABLE_ADAPTER_MEMORY 0xFFFFFFFF
+
+/* The value for port IO to let the adapter to interpret the adapter memory. */
+#define VBOX_VIDEO_INTERPRET_ADAPTER_MEMORY 0x00000000
+
+/*
+ * The value for port IO to let the adapter to interpret the display memory.
+ * The display number is encoded in low 16 bits.
+ */
+#define VBOX_VIDEO_INTERPRET_DISPLAY_MEMORY_BASE 0x00010000
+
+struct vbva_host_flags {
+ u32 host_events;
+ u32 supported_orders;
+} __packed;
+
+struct vbva_buffer {
+ struct vbva_host_flags host_flags;
+
+ /* The offset where the data start in the buffer. */
+ u32 data_offset;
+ /* The offset where next data must be placed in the buffer. */
+ u32 free_offset;
+
+ /* The queue of record descriptions. */
+ struct vbva_record records[VBVA_MAX_RECORDS];
+ u32 record_first_index;
+ u32 record_free_index;
+
+ /* Space to leave free when large partial records are transferred. */
+ u32 partial_write_tresh;
+
+ u32 data_len;
+ /* variable size for the rest of the vbva_buffer area in VRAM. */
+ u8 data[0];
+} __packed;
+
+#define VBVA_MAX_RECORD_SIZE (128 * 1024 * 1024)
+
+/* guest->host commands */
+#define VBVA_QUERY_CONF32 1
+#define VBVA_SET_CONF32 2
+#define VBVA_INFO_VIEW 3
+#define VBVA_INFO_HEAP 4
+#define VBVA_FLUSH 5
+#define VBVA_INFO_SCREEN 6
+#define VBVA_ENABLE 7
+#define VBVA_MOUSE_POINTER_SHAPE 8
+/* informs host about HGSMI caps. see vbva_caps below */
+#define VBVA_INFO_CAPS 12
+/* configures scanline, see VBVASCANLINECFG below */
+#define VBVA_SCANLINE_CFG 13
+/* requests scanline info, see VBVASCANLINEINFO below */
+#define VBVA_SCANLINE_INFO 14
+/* inform host about VBVA Command submission */
+#define VBVA_CMDVBVA_SUBMIT 16
+/* inform host about VBVA Command submission */
+#define VBVA_CMDVBVA_FLUSH 17
+/* G->H DMA command */
+#define VBVA_CMDVBVA_CTL 18
+/* Query most recent mode hints sent */
+#define VBVA_QUERY_MODE_HINTS 19
+/*
+ * Report the guest virtual desktop position and size for mapping host and
+ * guest pointer positions.
+ */
+#define VBVA_REPORT_INPUT_MAPPING 20
+/* Report the guest cursor position and query the host position. */
+#define VBVA_CURSOR_POSITION 21
+
+/* host->guest commands */
+#define VBVAHG_EVENT 1
+#define VBVAHG_DISPLAY_CUSTOM 2
+
+/* vbva_conf32::index */
+#define VBOX_VBVA_CONF32_MONITOR_COUNT 0
+#define VBOX_VBVA_CONF32_HOST_HEAP_SIZE 1
+/*
+ * Returns VINF_SUCCESS if the host can report mode hints via VBVA.
+ * Set value to VERR_NOT_SUPPORTED before calling.
+ */
+#define VBOX_VBVA_CONF32_MODE_HINT_REPORTING 2
+/*
+ * Returns VINF_SUCCESS if the host can report guest cursor enabled status via
+ * VBVA. Set value to VERR_NOT_SUPPORTED before calling.
+ */
+#define VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING 3
+/*
+ * Returns the currently available host cursor capabilities. Available if
+ * VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING returns success.
+ */
+#define VBOX_VBVA_CONF32_CURSOR_CAPABILITIES 4
+/* Returns the supported flags in vbva_infoscreen.flags. */
+#define VBOX_VBVA_CONF32_SCREEN_FLAGS 5
+/* Returns the max size of VBVA record. */
+#define VBOX_VBVA_CONF32_MAX_RECORD_SIZE 6
+
+struct vbva_conf32 {
+ u32 index;
+ u32 value;
+} __packed;
+
+/* Reserved for historical reasons. */
+#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED0 BIT(0)
+/*
+ * Guest cursor capability: can the host show a hardware cursor at the host
+ * pointer location?
+ */
+#define VBOX_VBVA_CURSOR_CAPABILITY_HARDWARE BIT(1)
+/* Reserved for historical reasons. */
+#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED2 BIT(2)
+/* Reserved for historical reasons. Must always be unset. */
+#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED3 BIT(3)
+/* Reserved for historical reasons. */
+#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED4 BIT(4)
+/* Reserved for historical reasons. */
+#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED5 BIT(5)
+
+struct vbva_infoview {
+ /* Index of the screen, assigned by the guest. */
+ u32 view_index;
+
+ /* The screen offset in VRAM, the framebuffer starts here. */
+ u32 view_offset;
+
+ /* The size of the VRAM memory that can be used for the view. */
+ u32 view_size;
+
+ /* The recommended maximum size of the VRAM memory for the screen. */
+ u32 max_screen_size;
+} __packed;
+
+struct vbva_flush {
+ u32 reserved;
+} __packed;
+
+/* vbva_infoscreen.flags */
+#define VBVA_SCREEN_F_NONE 0x0000
+#define VBVA_SCREEN_F_ACTIVE 0x0001
+/*
+ * The virtual monitor has been disabled by the guest and should be removed
+ * by the host and ignored for purposes of pointer position calculation.
+ */
+#define VBVA_SCREEN_F_DISABLED 0x0002
+/*
+ * The virtual monitor has been blanked by the guest and should be blacked
+ * out by the host using width, height, etc values from the vbva_infoscreen
+ * request.
+ */
+#define VBVA_SCREEN_F_BLANK 0x0004
+/*
+ * The virtual monitor has been blanked by the guest and should be blacked
+ * out by the host using the previous mode values for width. height, etc.
+ */
+#define VBVA_SCREEN_F_BLANK2 0x0008
+
+struct vbva_infoscreen {
+ /* Which view contains the screen. */
+ u32 view_index;
+
+ /* Physical X origin relative to the primary screen. */
+ s32 origin_x;
+
+ /* Physical Y origin relative to the primary screen. */
+ s32 origin_y;
+
+ /* Offset of visible framebuffer relative to the framebuffer start. */
+ u32 start_offset;
+
+ /* The scan line size in bytes. */
+ u32 line_size;
+
+ /* Width of the screen. */
+ u32 width;
+
+ /* Height of the screen. */
+ u32 height;
+
+ /* Color depth. */
+ u16 bits_per_pixel;
+
+ /* VBVA_SCREEN_F_* */
+ u16 flags;
+} __packed;
+
+/* vbva_enable.flags */
+#define VBVA_F_NONE 0x00000000
+#define VBVA_F_ENABLE 0x00000001
+#define VBVA_F_DISABLE 0x00000002
+/* extended VBVA to be used with WDDM */
+#define VBVA_F_EXTENDED 0x00000004
+/* vbva offset is absolute VRAM offset */
+#define VBVA_F_ABSOFFSET 0x00000008
+
+struct vbva_enable {
+ u32 flags;
+ u32 offset;
+ s32 result;
+} __packed;
+
+struct vbva_enable_ex {
+ struct vbva_enable base;
+ u32 screen_id;
+} __packed;
+
+struct vbva_mouse_pointer_shape {
+ /* The host result. */
+ s32 result;
+
+ /* VBOX_MOUSE_POINTER_* bit flags. */
+ u32 flags;
+
+ /* X coordinate of the hot spot. */
+ u32 hot_X;
+
+ /* Y coordinate of the hot spot. */
+ u32 hot_y;
+
+ /* Width of the pointer in pixels. */
+ u32 width;
+
+ /* Height of the pointer in scanlines. */
+ u32 height;
+
+ /* Pointer data.
+ *
+ * The data consists of 1 bpp AND mask followed by 32 bpp XOR (color)
+ * mask.
+ *
+ * For pointers without alpha channel the XOR mask pixels are 32 bit
+ * values: (lsb)BGR0(msb). For pointers with alpha channel the XOR mask
+ * consists of (lsb)BGRA(msb) 32 bit values.
+ *
+ * Guest driver must create the AND mask for pointers with alpha chan.,
+ * so if host does not support alpha, the pointer could be displayed as
+ * a normal color pointer. The AND mask can be constructed from alpha
+ * values. For example alpha value >= 0xf0 means bit 0 in the AND mask.
+ *
+ * The AND mask is 1 bpp bitmap with byte aligned scanlines. Size of AND
+ * mask, therefore, is and_len = (width + 7) / 8 * height. The padding
+ * bits at the end of any scanline are undefined.
+ *
+ * The XOR mask follows the AND mask on the next 4 bytes aligned offset:
+ * u8 *xor = and + (and_len + 3) & ~3
+ * Bytes in the gap between the AND and the XOR mask are undefined.
+ * XOR mask scanlines have no gap between them and size of XOR mask is:
+ * xor_len = width * 4 * height.
+ *
+ * Preallocate 4 bytes for accessing actual data as p->data.
+ */
+ u8 data[4];
+} __packed;
+
+/* pointer is visible */
+#define VBOX_MOUSE_POINTER_VISIBLE 0x0001
+/* pointer has alpha channel */
+#define VBOX_MOUSE_POINTER_ALPHA 0x0002
+/* pointerData contains new pointer shape */
+#define VBOX_MOUSE_POINTER_SHAPE 0x0004
+
+/*
+ * The guest driver can handle asynch guest cmd completion by reading the
+ * command offset from io port.
+ */
+#define VBVACAPS_COMPLETEGCMD_BY_IOREAD 0x00000001
+/* the guest driver can handle video adapter IRQs */
+#define VBVACAPS_IRQ 0x00000002
+/* The guest can read video mode hints sent via VBVA. */
+#define VBVACAPS_VIDEO_MODE_HINTS 0x00000004
+/* The guest can switch to a software cursor on demand. */
+#define VBVACAPS_DISABLE_CURSOR_INTEGRATION 0x00000008
+/* The guest does not depend on host handling the VBE registers. */
+#define VBVACAPS_USE_VBVA_ONLY 0x00000010
+
+struct vbva_caps {
+ s32 rc;
+ u32 caps;
+} __packed;
+
+/* Query the most recent mode hints received from the host. */
+struct vbva_query_mode_hints {
+ /* The maximum number of screens to return hints for. */
+ u16 hints_queried_count;
+ /* The size of the mode hint structures directly following this one. */
+ u16 hint_structure_guest_size;
+ /* Return code for the operation. Initialise to VERR_NOT_SUPPORTED. */
+ s32 rc;
+} __packed;
+
+/*
+ * Structure in which a mode hint is returned. The guest allocates an array
+ * of these immediately after the vbva_query_mode_hints structure.
+ * To accommodate future extensions, the vbva_query_mode_hints structure
+ * specifies the size of the vbva_modehint structures allocated by the guest,
+ * and the host only fills out structure elements which fit into that size. The
+ * host should fill any unused members (e.g. dx, dy) or structure space on the
+ * end with ~0. The whole structure can legally be set to ~0 to skip a screen.
+ */
+struct vbva_modehint {
+ u32 magic;
+ u32 cx;
+ u32 cy;
+ u32 bpp; /* Which has never been used... */
+ u32 display;
+ u32 dx; /* X offset into the virtual frame-buffer. */
+ u32 dy; /* Y offset into the virtual frame-buffer. */
+ u32 enabled; /* Not flags. Add new members for new flags. */
+} __packed;
+
+#define VBVAMODEHINT_MAGIC 0x0801add9u
+
+/*
+ * Report the rectangle relative to which absolute pointer events should be
+ * expressed. This information remains valid until the next VBVA resize event
+ * for any screen, at which time it is reset to the bounding rectangle of all
+ * virtual screens and must be re-set.
+ */
+struct vbva_report_input_mapping {
+ s32 x; /* Upper left X co-ordinate relative to the first screen. */
+ s32 y; /* Upper left Y co-ordinate relative to the first screen. */
+ u32 cx; /* Rectangle width. */
+ u32 cy; /* Rectangle height. */
+} __packed;
+
+/*
+ * Report the guest cursor position and query the host one. The host may wish
+ * to use the guest information to re-position its own cursor (though this is
+ * currently unlikely).
+ */
+struct vbva_cursor_position {
+ u32 report_position; /* Are we reporting a position? */
+ u32 x; /* Guest cursor X position */
+ u32 y; /* Guest cursor Y position */
+} __packed;
+
+#endif
diff --git a/drivers/gpu/drm/vboxvideo/vboxvideo_guest.h b/drivers/gpu/drm/vboxvideo/vboxvideo_guest.h
new file mode 100644
index 000000000000..55fcee3a6470
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/vboxvideo_guest.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright (C) 2006-2016 Oracle Corporation */
+
+#ifndef __VBOXVIDEO_GUEST_H__
+#define __VBOXVIDEO_GUEST_H__
+
+#include <linux/genalloc.h>
+#include "vboxvideo.h"
+
+/*
+ * Structure grouping the context needed for sending graphics acceleration
+ * information to the host via VBVA. Each screen has its own VBVA buffer.
+ */
+struct vbva_buf_ctx {
+ /* Offset of the buffer in the VRAM section for the screen */
+ u32 buffer_offset;
+ /* Length of the buffer in bytes */
+ u32 buffer_length;
+ /* Set if we wrote to the buffer faster than the host could read it */
+ bool buffer_overflow;
+ /* VBVA record that we are currently preparing for the host, or NULL */
+ struct vbva_record *record;
+ /*
+ * Pointer to the VBVA buffer mapped into the current address space.
+ * Will be NULL if VBVA is not enabled.
+ */
+ struct vbva_buffer *vbva;
+};
+
+int hgsmi_report_flags_location(struct gen_pool *ctx, u32 location);
+int hgsmi_send_caps_info(struct gen_pool *ctx, u32 caps);
+int hgsmi_test_query_conf(struct gen_pool *ctx);
+int hgsmi_query_conf(struct gen_pool *ctx, u32 index, u32 *value_ret);
+int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags,
+ u32 hot_x, u32 hot_y, u32 width, u32 height,
+ u8 *pixels, u32 len);
+int hgsmi_cursor_position(struct gen_pool *ctx, bool report_position,
+ u32 x, u32 y, u32 *x_host, u32 *y_host);
+
+bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
+ struct vbva_buffer *vbva, s32 screen);
+void vbva_disable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
+ s32 screen);
+bool vbva_buffer_begin_update(struct vbva_buf_ctx *vbva_ctx,
+ struct gen_pool *ctx);
+void vbva_buffer_end_update(struct vbva_buf_ctx *vbva_ctx);
+bool vbva_write(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
+ const void *p, u32 len);
+void vbva_setup_buffer_context(struct vbva_buf_ctx *vbva_ctx,
+ u32 buffer_offset, u32 buffer_length);
+
+void hgsmi_process_display_info(struct gen_pool *ctx, u32 display,
+ s32 origin_x, s32 origin_y, u32 start_offset,
+ u32 pitch, u32 width, u32 height,
+ u16 bpp, u16 flags);
+int hgsmi_update_input_mapping(struct gen_pool *ctx, s32 origin_x, s32 origin_y,
+ u32 width, u32 height);
+int hgsmi_get_mode_hints(struct gen_pool *ctx, unsigned int screens,
+ struct vbva_modehint *hints);
+
+#endif
diff --git a/drivers/gpu/drm/vboxvideo/vboxvideo_vbe.h b/drivers/gpu/drm/vboxvideo/vboxvideo_vbe.h
new file mode 100644
index 000000000000..427235869297
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/vboxvideo_vbe.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright (C) 2006-2016 Oracle Corporation */
+
+#ifndef __VBOXVIDEO_VBE_H__
+#define __VBOXVIDEO_VBE_H__
+
+/* GUEST <-> HOST Communication API */
+
+#define VBE_DISPI_BANK_ADDRESS 0xA0000
+#define VBE_DISPI_BANK_SIZE_KB 64
+
+#define VBE_DISPI_MAX_XRES 16384
+#define VBE_DISPI_MAX_YRES 16384
+#define VBE_DISPI_MAX_BPP 32
+
+#define VBE_DISPI_IOPORT_INDEX 0x01CE
+#define VBE_DISPI_IOPORT_DATA 0x01CF
+
+#define VBE_DISPI_IOPORT_DAC_WRITE_INDEX 0x03C8
+#define VBE_DISPI_IOPORT_DAC_DATA 0x03C9
+
+#define VBE_DISPI_INDEX_ID 0x0
+#define VBE_DISPI_INDEX_XRES 0x1
+#define VBE_DISPI_INDEX_YRES 0x2
+#define VBE_DISPI_INDEX_BPP 0x3
+#define VBE_DISPI_INDEX_ENABLE 0x4
+#define VBE_DISPI_INDEX_BANK 0x5
+#define VBE_DISPI_INDEX_VIRT_WIDTH 0x6
+#define VBE_DISPI_INDEX_VIRT_HEIGHT 0x7
+#define VBE_DISPI_INDEX_X_OFFSET 0x8
+#define VBE_DISPI_INDEX_Y_OFFSET 0x9
+#define VBE_DISPI_INDEX_VBOX_VIDEO 0xa
+#define VBE_DISPI_INDEX_FB_BASE_HI 0xb
+
+#define VBE_DISPI_ID0 0xB0C0
+#define VBE_DISPI_ID1 0xB0C1
+#define VBE_DISPI_ID2 0xB0C2
+#define VBE_DISPI_ID3 0xB0C3
+#define VBE_DISPI_ID4 0xB0C4
+
+#define VBE_DISPI_ID_VBOX_VIDEO 0xBE00
+/* The VBOX interface id. Indicates support for VBVA shared memory interface. */
+#define VBE_DISPI_ID_HGSMI 0xBE01
+#define VBE_DISPI_ID_ANYX 0xBE02
+
+#define VBE_DISPI_DISABLED 0x00
+#define VBE_DISPI_ENABLED 0x01
+#define VBE_DISPI_GETCAPS 0x02
+#define VBE_DISPI_8BIT_DAC 0x20
+
+#define VGA_PORT_HGSMI_HOST 0x3b0
+#define VGA_PORT_HGSMI_GUEST 0x3d0
+
+#endif
diff --git a/drivers/gpu/drm/vboxvideo/vbva_base.c b/drivers/gpu/drm/vboxvideo/vbva_base.c
new file mode 100644
index 000000000000..36bc9824ec3f
--- /dev/null
+++ b/drivers/gpu/drm/vboxvideo/vbva_base.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: MIT
+/* Copyright (C) 2006-2017 Oracle Corporation */
+
+#include <linux/vbox_err.h>
+#include "vbox_drv.h"
+#include "vboxvideo_guest.h"
+#include "hgsmi_channels.h"
+
+/*
+ * There is a hardware ring buffer in the graphics device video RAM, formerly
+ * in the VBox VMMDev PCI memory space.
+ * All graphics commands go there serialized by vbva_buffer_begin_update.
+ * and vbva_buffer_end_update.
+ *
+ * free_offset is writing position. data_offset is reading position.
+ * free_offset == data_offset means buffer is empty.
+ * There must be always gap between data_offset and free_offset when data
+ * are in the buffer.
+ * Guest only changes free_offset, host changes data_offset.
+ */
+
+static u32 vbva_buffer_available(const struct vbva_buffer *vbva)
+{
+ s32 diff = vbva->data_offset - vbva->free_offset;
+
+ return diff > 0 ? diff : vbva->data_len + diff;
+}
+
+static void vbva_buffer_place_data_at(struct vbva_buf_ctx *vbva_ctx,
+ const void *p, u32 len, u32 offset)
+{
+ struct vbva_buffer *vbva = vbva_ctx->vbva;
+ u32 bytes_till_boundary = vbva->data_len - offset;
+ u8 *dst = &vbva->data[offset];
+ s32 diff = len - bytes_till_boundary;
+
+ if (diff <= 0) {
+ /* Chunk will not cross buffer boundary. */
+ memcpy(dst, p, len);
+ } else {
+ /* Chunk crosses buffer boundary. */
+ memcpy(dst, p, bytes_till_boundary);
+ memcpy(&vbva->data[0], (u8 *)p + bytes_till_boundary, diff);
+ }
+}
+
+static void vbva_buffer_flush(struct gen_pool *ctx)
+{
+ struct vbva_flush *p;
+
+ p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_FLUSH);
+ if (!p)
+ return;
+
+ p->reserved = 0;
+
+ hgsmi_buffer_submit(ctx, p);
+ hgsmi_buffer_free(ctx, p);
+}
+
+bool vbva_write(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
+ const void *p, u32 len)
+{
+ struct vbva_record *record;
+ struct vbva_buffer *vbva;
+ u32 available;
+
+ vbva = vbva_ctx->vbva;
+ record = vbva_ctx->record;
+
+ if (!vbva || vbva_ctx->buffer_overflow ||
+ !record || !(record->len_and_flags & VBVA_F_RECORD_PARTIAL))
+ return false;
+
+ available = vbva_buffer_available(vbva);
+
+ while (len > 0) {
+ u32 chunk = len;
+
+ if (chunk >= available) {
+ vbva_buffer_flush(ctx);
+ available = vbva_buffer_available(vbva);
+ }
+
+ if (chunk >= available) {
+ if (WARN_ON(available <= vbva->partial_write_tresh)) {
+ vbva_ctx->buffer_overflow = true;
+ return false;
+ }
+ chunk = available - vbva->partial_write_tresh;
+ }
+
+ vbva_buffer_place_data_at(vbva_ctx, p, chunk,
+ vbva->free_offset);
+
+ vbva->free_offset = (vbva->free_offset + chunk) %
+ vbva->data_len;
+ record->len_and_flags += chunk;
+ available -= chunk;
+ len -= chunk;
+ p += chunk;
+ }
+
+ return true;
+}
+
+static bool vbva_inform_host(struct vbva_buf_ctx *vbva_ctx,
+ struct gen_pool *ctx, s32 screen, bool enable)
+{
+ struct vbva_enable_ex *p;
+ bool ret;
+
+ p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_ENABLE);
+ if (!p)
+ return false;
+
+ p->base.flags = enable ? VBVA_F_ENABLE : VBVA_F_DISABLE;
+ p->base.offset = vbva_ctx->buffer_offset;
+ p->base.result = VERR_NOT_SUPPORTED;
+ if (screen >= 0) {
+ p->base.flags |= VBVA_F_EXTENDED | VBVA_F_ABSOFFSET;
+ p->screen_id = screen;
+ }
+
+ hgsmi_buffer_submit(ctx, p);
+
+ if (enable)
+ ret = p->base.result >= 0;
+ else
+ ret = true;
+
+ hgsmi_buffer_free(ctx, p);
+
+ return ret;
+}
+
+bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
+ struct vbva_buffer *vbva, s32 screen)
+{
+ bool ret = false;
+
+ memset(vbva, 0, sizeof(*vbva));
+ vbva->partial_write_tresh = 256;
+ vbva->data_len = vbva_ctx->buffer_length - sizeof(struct vbva_buffer);
+ vbva_ctx->vbva = vbva;
+
+ ret = vbva_inform_host(vbva_ctx, ctx, screen, true);
+ if (!ret)
+ vbva_disable(vbva_ctx, ctx, screen);
+
+ return ret;
+}
+
+void vbva_disable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx,
+ s32 screen)
+{
+ vbva_ctx->buffer_overflow = false;
+ vbva_ctx->record = NULL;
+ vbva_ctx->vbva = NULL;
+
+ vbva_inform_host(vbva_ctx, ctx, screen, false);
+}
+
+bool vbva_buffer_begin_update(struct vbva_buf_ctx *vbva_ctx,
+ struct gen_pool *ctx)
+{
+ struct vbva_record *record;
+ u32 next;
+
+ if (!vbva_ctx->vbva ||
+ !(vbva_ctx->vbva->host_flags.host_events & VBVA_F_MODE_ENABLED))
+ return false;
+
+ WARN_ON(vbva_ctx->buffer_overflow || vbva_ctx->record);
+
+ next = (vbva_ctx->vbva->record_free_index + 1) % VBVA_MAX_RECORDS;
+
+ /* Flush if all slots in the records queue are used */
+ if (next == vbva_ctx->vbva->record_first_index)
+ vbva_buffer_flush(ctx);
+
+ /* If even after flush there is no place then fail the request */
+ if (next == vbva_ctx->vbva->record_first_index)
+ return false;
+
+ record = &vbva_ctx->vbva->records[vbva_ctx->vbva->record_free_index];
+ record->len_and_flags = VBVA_F_RECORD_PARTIAL;
+ vbva_ctx->vbva->record_free_index = next;
+ /* Remember which record we are using. */
+ vbva_ctx->record = record;
+
+ return true;
+}
+
+void vbva_buffer_end_update(struct vbva_buf_ctx *vbva_ctx)
+{
+ struct vbva_record *record = vbva_ctx->record;
+
+ WARN_ON(!vbva_ctx->vbva || !record ||
+ !(record->len_and_flags & VBVA_F_RECORD_PARTIAL));
+
+ /* Mark the record completed. */
+ record->len_and_flags &= ~VBVA_F_RECORD_PARTIAL;
+
+ vbva_ctx->buffer_overflow = false;
+ vbva_ctx->record = NULL;
+}
+
+void vbva_setup_buffer_context(struct vbva_buf_ctx *vbva_ctx,
+ u32 buffer_offset, u32 buffer_length)
+{
+ vbva_ctx->buffer_offset = buffer_offset;
+ vbva_ctx->buffer_length = buffer_length;
+}
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 8dcce7182bb7..88ebd681d7eb 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -40,7 +40,7 @@ static bool is_user_label(int label)
return label >= VC4_BO_TYPE_COUNT;
}
-static void vc4_bo_stats_dump(struct vc4_dev *vc4)
+static void vc4_bo_stats_print(struct drm_printer *p, struct vc4_dev *vc4)
{
int i;
@@ -48,58 +48,35 @@ static void vc4_bo_stats_dump(struct vc4_dev *vc4)
if (!vc4->bo_labels[i].num_allocated)
continue;
- DRM_INFO("%30s: %6dkb BOs (%d)\n",
- vc4->bo_labels[i].name,
- vc4->bo_labels[i].size_allocated / 1024,
- vc4->bo_labels[i].num_allocated);
+ drm_printf(p, "%30s: %6dkb BOs (%d)\n",
+ vc4->bo_labels[i].name,
+ vc4->bo_labels[i].size_allocated / 1024,
+ vc4->bo_labels[i].num_allocated);
}
mutex_lock(&vc4->purgeable.lock);
if (vc4->purgeable.num)
- DRM_INFO("%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
- vc4->purgeable.size / 1024, vc4->purgeable.num);
+ drm_printf(p, "%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
+ vc4->purgeable.size / 1024, vc4->purgeable.num);
if (vc4->purgeable.purged_num)
- DRM_INFO("%30s: %6zdkb BOs (%d)\n", "total purged BO",
- vc4->purgeable.purged_size / 1024,
- vc4->purgeable.purged_num);
+ drm_printf(p, "%30s: %6zdkb BOs (%d)\n", "total purged BO",
+ vc4->purgeable.purged_size / 1024,
+ vc4->purgeable.purged_num);
mutex_unlock(&vc4->purgeable.lock);
}
-#ifdef CONFIG_DEBUG_FS
-int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
+static int vc4_bo_stats_debugfs(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- int i;
-
- mutex_lock(&vc4->bo_lock);
- for (i = 0; i < vc4->num_labels; i++) {
- if (!vc4->bo_labels[i].num_allocated)
- continue;
-
- seq_printf(m, "%30s: %6dkb BOs (%d)\n",
- vc4->bo_labels[i].name,
- vc4->bo_labels[i].size_allocated / 1024,
- vc4->bo_labels[i].num_allocated);
- }
- mutex_unlock(&vc4->bo_lock);
-
- mutex_lock(&vc4->purgeable.lock);
- if (vc4->purgeable.num)
- seq_printf(m, "%30s: %6zdkb BOs (%d)\n", "userspace BO cache",
- vc4->purgeable.size / 1024, vc4->purgeable.num);
+ struct drm_printer p = drm_seq_file_printer(m);
- if (vc4->purgeable.purged_num)
- seq_printf(m, "%30s: %6zdkb BOs (%d)\n", "total purged BO",
- vc4->purgeable.purged_size / 1024,
- vc4->purgeable.purged_num);
- mutex_unlock(&vc4->purgeable.lock);
+ vc4_bo_stats_print(&p, vc4);
return 0;
}
-#endif
/* Takes ownership of *name and returns the appropriate slot for it in
* the bo_labels[] array, extending it as necessary.
@@ -201,8 +178,6 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
bo->validated_shader = NULL;
}
- reservation_object_fini(&bo->_resv);
-
drm_gem_cma_free_object(obj);
}
@@ -427,8 +402,6 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
vc4->bo_labels[VC4_BO_TYPE_KERNEL].num_allocated++;
vc4->bo_labels[VC4_BO_TYPE_KERNEL].size_allocated += size;
mutex_unlock(&vc4->bo_lock);
- bo->resv = &bo->_resv;
- reservation_object_init(bo->resv);
return &bo->base.base;
}
@@ -479,8 +452,9 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
}
if (IS_ERR(cma_obj)) {
+ struct drm_printer p = drm_info_printer(vc4->dev->dev);
DRM_ERROR("Failed to allocate from CMA:\n");
- vc4_bo_stats_dump(vc4);
+ vc4_bo_stats_print(&p, vc4);
return ERR_PTR(-ENOMEM);
}
bo = to_vc4_bo(&cma_obj->base);
@@ -684,13 +658,6 @@ static void vc4_bo_cache_time_timer(struct timer_list *t)
schedule_work(&vc4->bo_cache.time_work);
}
-struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj)
-{
- struct vc4_bo *bo = to_vc4_bo(obj);
-
- return bo->resv;
-}
-
struct dma_buf *
vc4_prime_export(struct drm_device *dev, struct drm_gem_object *obj, int flags)
{
@@ -822,14 +789,12 @@ vc4_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sgt)
{
struct drm_gem_object *obj;
- struct vc4_bo *bo;
obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
if (IS_ERR(obj))
return obj;
- bo = to_vc4_bo(obj);
- bo->resv = attach->dmabuf->resv;
+ obj->resv = attach->dmabuf->resv;
return obj;
}
@@ -1038,6 +1003,8 @@ int vc4_bo_cache_init(struct drm_device *dev)
mutex_init(&vc4->bo_lock);
+ vc4_debugfs_add_file(dev, "bo_stats", vc4_bo_stats_debugfs, NULL);
+
INIT_LIST_HEAD(&vc4->bo_cache.time_list);
INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 1baa10e94484..5e09389e1514 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -35,6 +35,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include <linux/clk.h>
#include <drm/drm_fb_cma_helper.h>
@@ -67,67 +68,22 @@ to_vc4_crtc_state(struct drm_crtc_state *crtc_state)
#define CRTC_WRITE(offset, val) writel(val, vc4_crtc->regs + (offset))
#define CRTC_READ(offset) readl(vc4_crtc->regs + (offset))
-#define CRTC_REG(reg) { reg, #reg }
-static const struct {
- u32 reg;
- const char *name;
-} crtc_regs[] = {
- CRTC_REG(PV_CONTROL),
- CRTC_REG(PV_V_CONTROL),
- CRTC_REG(PV_VSYNCD_EVEN),
- CRTC_REG(PV_HORZA),
- CRTC_REG(PV_HORZB),
- CRTC_REG(PV_VERTA),
- CRTC_REG(PV_VERTB),
- CRTC_REG(PV_VERTA_EVEN),
- CRTC_REG(PV_VERTB_EVEN),
- CRTC_REG(PV_INTEN),
- CRTC_REG(PV_INTSTAT),
- CRTC_REG(PV_STAT),
- CRTC_REG(PV_HACT_ACT),
+static const struct debugfs_reg32 crtc_regs[] = {
+ VC4_REG32(PV_CONTROL),
+ VC4_REG32(PV_V_CONTROL),
+ VC4_REG32(PV_VSYNCD_EVEN),
+ VC4_REG32(PV_HORZA),
+ VC4_REG32(PV_HORZB),
+ VC4_REG32(PV_VERTA),
+ VC4_REG32(PV_VERTB),
+ VC4_REG32(PV_VERTA_EVEN),
+ VC4_REG32(PV_VERTB_EVEN),
+ VC4_REG32(PV_INTEN),
+ VC4_REG32(PV_INTSTAT),
+ VC4_REG32(PV_STAT),
+ VC4_REG32(PV_HACT_ACT),
};
-static void vc4_crtc_dump_regs(struct vc4_crtc *vc4_crtc)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(crtc_regs); i++) {
- DRM_INFO("0x%04x (%s): 0x%08x\n",
- crtc_regs[i].reg, crtc_regs[i].name,
- CRTC_READ(crtc_regs[i].reg));
- }
-}
-
-#ifdef CONFIG_DEBUG_FS
-int vc4_crtc_debugfs_regs(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
- int crtc_index = (uintptr_t)node->info_ent->data;
- struct drm_crtc *crtc;
- struct vc4_crtc *vc4_crtc;
- int i;
-
- i = 0;
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (i == crtc_index)
- break;
- i++;
- }
- if (!crtc)
- return 0;
- vc4_crtc = to_vc4_crtc(crtc);
-
- for (i = 0; i < ARRAY_SIZE(crtc_regs); i++) {
- seq_printf(m, "%s (0x%04x): 0x%08x\n",
- crtc_regs[i].name, crtc_regs[i].reg,
- CRTC_READ(crtc_regs[i].reg));
- }
-
- return 0;
-}
-#endif
-
bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
bool in_vblank_irq, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime,
@@ -434,8 +390,10 @@ static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
bool debug_dump_regs = false;
if (debug_dump_regs) {
- DRM_INFO("CRTC %d regs before:\n", drm_crtc_index(crtc));
- vc4_crtc_dump_regs(vc4_crtc);
+ struct drm_printer p = drm_info_printer(&vc4_crtc->pdev->dev);
+ dev_info(&vc4_crtc->pdev->dev, "CRTC %d regs before:\n",
+ drm_crtc_index(crtc));
+ drm_print_regset32(&p, &vc4_crtc->regset);
}
if (vc4_crtc->channel == 2) {
@@ -476,8 +434,10 @@ static void vc4_crtc_mode_set_nofb(struct drm_crtc *crtc)
vc4_crtc_lut_load(crtc);
if (debug_dump_regs) {
- DRM_INFO("CRTC %d regs after:\n", drm_crtc_index(crtc));
- vc4_crtc_dump_regs(vc4_crtc);
+ struct drm_printer p = drm_info_printer(&vc4_crtc->pdev->dev);
+ dev_info(&vc4_crtc->pdev->dev, "CRTC %d regs after:\n",
+ drm_crtc_index(crtc));
+ drm_print_regset32(&p, &vc4_crtc->regset);
}
}
@@ -834,6 +794,14 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc)
drm_crtc_send_vblank_event(crtc, vc4_crtc->event);
vc4_crtc->event = NULL;
drm_crtc_vblank_put(crtc);
+
+ /* Wait for the page flip to unmask the underrun to ensure that
+ * the display list was updated by the hardware. Before that
+ * happens, the HVS will be using the previous display list with
+ * the CRTC and encoder already reconfigured, leading to
+ * underruns. This can be seen when reconfiguring the CRTC.
+ */
+ vc4_hvs_unmask_underrun(dev, vc4_crtc->channel);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
}
@@ -1075,6 +1043,7 @@ static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
static const struct vc4_crtc_data pv0_data = {
.hvs_channel = 0,
+ .debugfs_name = "crtc0_regs",
.encoder_types = {
[PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI0,
[PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_DPI,
@@ -1083,6 +1052,7 @@ static const struct vc4_crtc_data pv0_data = {
static const struct vc4_crtc_data pv1_data = {
.hvs_channel = 2,
+ .debugfs_name = "crtc1_regs",
.encoder_types = {
[PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI1,
[PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_SMI,
@@ -1091,6 +1061,7 @@ static const struct vc4_crtc_data pv1_data = {
static const struct vc4_crtc_data pv2_data = {
.hvs_channel = 1,
+ .debugfs_name = "crtc2_regs",
.encoder_types = {
[PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_HDMI,
[PV_CONTROL_CLK_SELECT_VEC] = VC4_ENCODER_TYPE_VEC,
@@ -1169,11 +1140,16 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
if (!match)
return -ENODEV;
vc4_crtc->data = match->data;
+ vc4_crtc->pdev = pdev;
vc4_crtc->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(vc4_crtc->regs))
return PTR_ERR(vc4_crtc->regs);
+ vc4_crtc->regset.base = vc4_crtc->regs;
+ vc4_crtc->regset.regs = crtc_regs;
+ vc4_crtc->regset.nregs = ARRAY_SIZE(crtc_regs);
+
/* For now, we create just the primary and the legacy cursor
* planes. We should be able to stack more planes on easily,
* but to do that we would need to compute the bandwidth
@@ -1247,6 +1223,9 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
platform_set_drvdata(pdev, vc4_crtc);
+ vc4_debugfs_add_regset32(drm, vc4_crtc->data->debugfs_name,
+ &vc4_crtc->regset);
+
return 0;
err_destroy_planes:
diff --git a/drivers/gpu/drm/vc4/vc4_debugfs.c b/drivers/gpu/drm/vc4/vc4_debugfs.c
index 7a0003de71ab..f9dec08267dc 100644
--- a/drivers/gpu/drm/vc4/vc4_debugfs.c
+++ b/drivers/gpu/drm/vc4/vc4_debugfs.c
@@ -15,26 +15,82 @@
#include "vc4_drv.h"
#include "vc4_regs.h"
-static const struct drm_info_list vc4_debugfs_list[] = {
- {"bo_stats", vc4_bo_stats_debugfs, 0},
- {"dpi_regs", vc4_dpi_debugfs_regs, 0},
- {"dsi1_regs", vc4_dsi_debugfs_regs, 0, (void *)(uintptr_t)1},
- {"hdmi_regs", vc4_hdmi_debugfs_regs, 0},
- {"vec_regs", vc4_vec_debugfs_regs, 0},
- {"txp_regs", vc4_txp_debugfs_regs, 0},
- {"hvs_regs", vc4_hvs_debugfs_regs, 0},
- {"crtc0_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)0},
- {"crtc1_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)1},
- {"crtc2_regs", vc4_crtc_debugfs_regs, 0, (void *)(uintptr_t)2},
- {"v3d_ident", vc4_v3d_debugfs_ident, 0},
- {"v3d_regs", vc4_v3d_debugfs_regs, 0},
+struct vc4_debugfs_info_entry {
+ struct list_head link;
+ struct drm_info_list info;
};
-#define VC4_DEBUGFS_ENTRIES ARRAY_SIZE(vc4_debugfs_list)
-
+/**
+ * Called at drm_dev_register() time on each of the minors registered
+ * by the DRM device, to attach the debugfs files.
+ */
int
vc4_debugfs_init(struct drm_minor *minor)
{
- return drm_debugfs_create_files(vc4_debugfs_list, VC4_DEBUGFS_ENTRIES,
- minor->debugfs_root, minor);
+ struct vc4_dev *vc4 = to_vc4_dev(minor->dev);
+ struct vc4_debugfs_info_entry *entry;
+ struct dentry *dentry;
+
+ dentry = debugfs_create_bool("hvs_load_tracker", S_IRUGO | S_IWUSR,
+ minor->debugfs_root,
+ &vc4->load_tracker_enabled);
+ if (!dentry)
+ return -ENOMEM;
+
+ list_for_each_entry(entry, &vc4->debugfs_list, link) {
+ int ret = drm_debugfs_create_files(&entry->info, 1,
+ minor->debugfs_root, minor);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int vc4_debugfs_regset32(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct debugfs_regset32 *regset = node->info_ent->data;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ drm_print_regset32(&p, regset);
+
+ return 0;
+}
+
+/**
+ * Registers a debugfs file with a callback function for a vc4 component.
+ *
+ * This is like drm_debugfs_create_files(), but that can only be
+ * called a given DRM minor, while the various VC4 components want to
+ * register their debugfs files during the component bind process. We
+ * track the request and delay it to be called on each minor during
+ * vc4_debugfs_init().
+ */
+void vc4_debugfs_add_file(struct drm_device *dev,
+ const char *name,
+ int (*show)(struct seq_file*, void*),
+ void *data)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ struct vc4_debugfs_info_entry *entry =
+ devm_kzalloc(dev->dev, sizeof(*entry), GFP_KERNEL);
+
+ if (!entry)
+ return;
+
+ entry->info.name = name;
+ entry->info.show = show;
+ entry->info.data = data;
+
+ list_add(&entry->link, &vc4->debugfs_list);
+}
+
+void vc4_debugfs_add_regset32(struct drm_device *drm,
+ const char *name,
+ struct debugfs_regset32 *regset)
+{
+ vc4_debugfs_add_file(drm, name, vc4_debugfs_regset32, regset);
}
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index 169521e547ba..34f90ca8f479 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -101,6 +101,8 @@ struct vc4_dpi {
struct clk *pixel_clock;
struct clk *core_clock;
+
+ struct debugfs_regset32 regset;
};
#define DPI_READ(offset) readl(dpi->regs + (offset))
@@ -118,37 +120,11 @@ to_vc4_dpi_encoder(struct drm_encoder *encoder)
return container_of(encoder, struct vc4_dpi_encoder, base.base);
}
-#define DPI_REG(reg) { reg, #reg }
-static const struct {
- u32 reg;
- const char *name;
-} dpi_regs[] = {
- DPI_REG(DPI_C),
- DPI_REG(DPI_ID),
+static const struct debugfs_reg32 dpi_regs[] = {
+ VC4_REG32(DPI_C),
+ VC4_REG32(DPI_ID),
};
-#ifdef CONFIG_DEBUG_FS
-int vc4_dpi_debugfs_regs(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_dpi *dpi = vc4->dpi;
- int i;
-
- if (!dpi)
- return 0;
-
- for (i = 0; i < ARRAY_SIZE(dpi_regs); i++) {
- seq_printf(m, "%s (0x%04x): 0x%08x\n",
- dpi_regs[i].name, dpi_regs[i].reg,
- DPI_READ(dpi_regs[i].reg));
- }
-
- return 0;
-}
-#endif
-
static const struct drm_encoder_funcs vc4_dpi_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
@@ -314,6 +290,9 @@ static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
dpi->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(dpi->regs))
return PTR_ERR(dpi->regs);
+ dpi->regset.base = dpi->regs;
+ dpi->regset.regs = dpi_regs;
+ dpi->regset.nregs = ARRAY_SIZE(dpi_regs);
if (DPI_READ(DPI_ID) != DPI_ID_VALUE) {
dev_err(dev, "Port returned 0x%08x for ID instead of 0x%08x\n",
@@ -352,6 +331,8 @@ static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
vc4->dpi = dpi;
+ vc4_debugfs_add_regset32(drm, "dpi_regs", &dpi->regset);
+
return 0;
err_destroy_encoder:
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 5fcd2f0da7f7..6d9be20a32be 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -72,30 +72,30 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
if (args->pad != 0)
return -EINVAL;
+ if (!vc4->v3d)
+ return -ENODEV;
+
switch (args->param) {
case DRM_VC4_PARAM_V3D_IDENT0:
- ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
- if (ret < 0)
+ ret = vc4_v3d_pm_get(vc4);
+ if (ret)
return ret;
args->value = V3D_READ(V3D_IDENT0);
- pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
- pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
+ vc4_v3d_pm_put(vc4);
break;
case DRM_VC4_PARAM_V3D_IDENT1:
- ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
- if (ret < 0)
+ ret = vc4_v3d_pm_get(vc4);
+ if (ret)
return ret;
args->value = V3D_READ(V3D_IDENT1);
- pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
- pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
+ vc4_v3d_pm_put(vc4);
break;
case DRM_VC4_PARAM_V3D_IDENT2:
- ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
- if (ret < 0)
+ ret = vc4_v3d_pm_get(vc4);
+ if (ret)
return ret;
args->value = V3D_READ(V3D_IDENT2);
- pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
- pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
+ vc4_v3d_pm_put(vc4);
break;
case DRM_VC4_PARAM_SUPPORTS_BRANCHES:
case DRM_VC4_PARAM_SUPPORTS_ETC1:
@@ -200,7 +200,6 @@ static struct drm_driver vc4_drm_driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = drm_gem_prime_import,
.gem_prime_export = vc4_prime_export,
- .gem_prime_res_obj = vc4_prime_res_obj,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
.gem_prime_import_sg_table = vc4_prime_import_sg_table,
.gem_prime_vmap = vc4_prime_vmap,
@@ -252,6 +251,7 @@ static int vc4_drm_bind(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm;
struct vc4_dev *vc4;
+ struct device_node *node;
int ret = 0;
dev->coherent_dma_mask = DMA_BIT_MASK(32);
@@ -260,12 +260,19 @@ static int vc4_drm_bind(struct device *dev)
if (!vc4)
return -ENOMEM;
+ /* If VC4 V3D is missing, don't advertise render nodes. */
+ node = of_find_matching_node_and_match(NULL, vc4_v3d_dt_match, NULL);
+ if (!node || !of_device_is_available(node))
+ vc4_drm_driver.driver_features &= ~DRIVER_RENDER;
+ of_node_put(node);
+
drm = drm_dev_alloc(&vc4_drm_driver, dev);
if (IS_ERR(drm))
return PTR_ERR(drm);
platform_set_drvdata(pdev, drm);
vc4->dev = drm;
drm->dev_private = vc4;
+ INIT_LIST_HEAD(&vc4->debugfs_list);
ret = vc4_bo_cache_init(drm);
if (ret)
@@ -281,13 +288,15 @@ static int vc4_drm_bind(struct device *dev)
drm_fb_helper_remove_conflicting_framebuffers(NULL, "vc4drmfb", false);
- ret = drm_dev_register(drm, 0);
+ ret = vc4_kms_load(drm);
if (ret < 0)
goto unbind_all;
- vc4_kms_load(drm);
+ ret = drm_dev_register(drm, 0);
+ if (ret < 0)
+ goto unbind_all;
- drm_fbdev_generic_setup(drm, 32);
+ drm_fbdev_generic_setup(drm, 16);
return 0;
@@ -312,6 +321,7 @@ static void vc4_drm_unbind(struct device *dev)
drm_mode_config_cleanup(drm);
+ drm_atomic_private_obj_fini(&vc4->load_tracker);
drm_atomic_private_obj_fini(&vc4->ctm_manager);
drm_dev_put(drm);
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 2c635f001c71..4f13f6262491 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -7,7 +7,6 @@
*/
#include <linux/mm_types.h>
-#include <linux/reservation.h>
#include <drm/drmP.h>
#include <drm/drm_util.h>
#include <drm/drm_encoder.h>
@@ -185,10 +184,20 @@ struct vc4_dev {
/* Bitmask of the current bin_alloc used for overflow memory. */
uint32_t bin_alloc_overflow;
+ /* Incremented when an underrun error happened after an atomic commit.
+ * This is particularly useful to detect when a specific modeset is too
+ * demanding in term of memory or HVS bandwidth which is hard to guess
+ * at atomic check time.
+ */
+ atomic_t underrun;
+
struct work_struct overflow_mem_work;
int power_refcount;
+ /* Set to true when the load tracker is active. */
+ bool load_tracker_enabled;
+
/* Mutex controlling the power refcount. */
struct mutex power_lock;
@@ -201,6 +210,12 @@ struct vc4_dev {
struct drm_modeset_lock ctm_state_lock;
struct drm_private_obj ctm_manager;
+ struct drm_private_obj load_tracker;
+
+ /* List of vc4_debugfs_info_entry for adding to debugfs once
+ * the minor is available (after drm_dev_register()).
+ */
+ struct list_head debugfs_list;
};
static inline struct vc4_dev *
@@ -240,10 +255,6 @@ struct vc4_bo {
*/
struct vc4_validated_shader_info *validated_shader;
- /* normally (resv == &_resv) except for imported bo's */
- struct reservation_object *resv;
- struct reservation_object _resv;
-
/* One of enum vc4_kernel_bo_type, or VC4_BO_TYPE_COUNT + i
* for user-allocated labels.
*/
@@ -290,6 +301,7 @@ struct vc4_v3d {
struct platform_device *pdev;
void __iomem *regs;
struct clk *clk;
+ struct debugfs_regset32 regset;
};
struct vc4_hvs {
@@ -306,6 +318,7 @@ struct vc4_hvs {
spinlock_t mm_lock;
struct drm_mm_node mitchell_netravali_filter;
+ struct debugfs_regset32 regset;
};
struct vc4_plane {
@@ -376,6 +389,16 @@ struct vc4_plane_state {
* when async update is not possible.
*/
bool dlist_initialized;
+
+ /* Load of this plane on the HVS block. The load is expressed in HVS
+ * cycles/sec.
+ */
+ u64 hvs_load;
+
+ /* Memory bandwidth needed for this plane. This is expressed in
+ * bytes/sec.
+ */
+ u64 membus_load;
};
static inline struct vc4_plane_state *
@@ -411,10 +434,12 @@ struct vc4_crtc_data {
int hvs_channel;
enum vc4_encoder_type encoder_types[4];
+ const char *debugfs_name;
};
struct vc4_crtc {
struct drm_crtc base;
+ struct platform_device *pdev;
const struct vc4_crtc_data *data;
void __iomem *regs;
@@ -431,6 +456,8 @@ struct vc4_crtc {
u32 cob_size;
struct drm_pending_vblank_event *event;
+
+ struct debugfs_regset32 regset;
};
static inline struct vc4_crtc *
@@ -444,6 +471,8 @@ to_vc4_crtc(struct drm_crtc *crtc)
#define HVS_READ(offset) readl(vc4->hvs->regs + offset)
#define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset)
+#define VC4_REG32(reg) { .name = #reg, .offset = reg }
+
struct vc4_exec_info {
/* Sequence number for this bin/render job. */
uint64_t seqno;
@@ -685,7 +714,6 @@ int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
vm_fault_t vc4_fault(struct vm_fault *vmf);
int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
-struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj);
int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
@@ -693,7 +721,6 @@ struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
void *vc4_prime_vmap(struct drm_gem_object *obj);
int vc4_bo_cache_init(struct drm_device *dev);
void vc4_bo_cache_destroy(struct drm_device *dev);
-int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
int vc4_bo_inc_usecnt(struct vc4_bo *bo);
void vc4_bo_dec_usecnt(struct vc4_bo *bo);
void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo);
@@ -701,7 +728,6 @@ void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo);
/* vc4_crtc.c */
extern struct platform_driver vc4_crtc_driver;
-int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg);
bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
bool in_vblank_irq, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime,
@@ -714,17 +740,37 @@ void vc4_crtc_get_margins(struct drm_crtc_state *state,
/* vc4_debugfs.c */
int vc4_debugfs_init(struct drm_minor *minor);
+#ifdef CONFIG_DEBUG_FS
+void vc4_debugfs_add_file(struct drm_device *drm,
+ const char *filename,
+ int (*show)(struct seq_file*, void*),
+ void *data);
+void vc4_debugfs_add_regset32(struct drm_device *drm,
+ const char *filename,
+ struct debugfs_regset32 *regset);
+#else
+static inline void vc4_debugfs_add_file(struct drm_device *drm,
+ const char *filename,
+ int (*show)(struct seq_file*, void*),
+ void *data)
+{
+}
+
+static inline void vc4_debugfs_add_regset32(struct drm_device *drm,
+ const char *filename,
+ struct debugfs_regset32 *regset)
+{
+}
+#endif
/* vc4_drv.c */
void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
/* vc4_dpi.c */
extern struct platform_driver vc4_dpi_driver;
-int vc4_dpi_debugfs_regs(struct seq_file *m, void *unused);
/* vc4_dsi.c */
extern struct platform_driver vc4_dsi_driver;
-int vc4_dsi_debugfs_regs(struct seq_file *m, void *unused);
/* vc4_fence.c */
extern const struct dma_fence_ops vc4_fence_ops;
@@ -752,15 +798,12 @@ int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
/* vc4_hdmi.c */
extern struct platform_driver vc4_hdmi_driver;
-int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused);
/* vc4_vec.c */
extern struct platform_driver vc4_vec_driver;
-int vc4_vec_debugfs_regs(struct seq_file *m, void *unused);
/* vc4_txp.c */
extern struct platform_driver vc4_txp_driver;
-int vc4_txp_debugfs_regs(struct seq_file *m, void *unused);
/* vc4_irq.c */
irqreturn_t vc4_irq(int irq, void *arg);
@@ -772,7 +815,8 @@ void vc4_irq_reset(struct drm_device *dev);
/* vc4_hvs.c */
extern struct platform_driver vc4_hvs_driver;
void vc4_hvs_dump_state(struct drm_device *dev);
-int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused);
+void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel);
+void vc4_hvs_mask_underrun(struct drm_device *dev, int channel);
/* vc4_kms.c */
int vc4_kms_load(struct drm_device *dev);
@@ -787,9 +831,10 @@ void vc4_plane_async_set_fb(struct drm_plane *plane,
/* vc4_v3d.c */
extern struct platform_driver vc4_v3d_driver;
-int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused);
-int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused);
+extern const struct of_device_id vc4_v3d_dt_match[];
int vc4_v3d_get_bin_slot(struct vc4_dev *vc4);
+int vc4_v3d_pm_get(struct vc4_dev *vc4);
+void vc4_v3d_pm_put(struct vc4_dev *vc4);
/* vc4_validate.c */
int
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index 11702e1d9011..9412709067f5 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -545,6 +545,8 @@ struct vc4_dsi {
struct completion xfer_completion;
int xfer_result;
+
+ struct debugfs_regset32 regset;
};
#define host_to_dsi(host) container_of(host, struct vc4_dsi, dsi_host)
@@ -605,113 +607,56 @@ to_vc4_dsi_encoder(struct drm_encoder *encoder)
return container_of(encoder, struct vc4_dsi_encoder, base.base);
}
-#define DSI_REG(reg) { reg, #reg }
-static const struct {
- u32 reg;
- const char *name;
-} dsi0_regs[] = {
- DSI_REG(DSI0_CTRL),
- DSI_REG(DSI0_STAT),
- DSI_REG(DSI0_HSTX_TO_CNT),
- DSI_REG(DSI0_LPRX_TO_CNT),
- DSI_REG(DSI0_TA_TO_CNT),
- DSI_REG(DSI0_PR_TO_CNT),
- DSI_REG(DSI0_DISP0_CTRL),
- DSI_REG(DSI0_DISP1_CTRL),
- DSI_REG(DSI0_INT_STAT),
- DSI_REG(DSI0_INT_EN),
- DSI_REG(DSI0_PHYC),
- DSI_REG(DSI0_HS_CLT0),
- DSI_REG(DSI0_HS_CLT1),
- DSI_REG(DSI0_HS_CLT2),
- DSI_REG(DSI0_HS_DLT3),
- DSI_REG(DSI0_HS_DLT4),
- DSI_REG(DSI0_HS_DLT5),
- DSI_REG(DSI0_HS_DLT6),
- DSI_REG(DSI0_HS_DLT7),
- DSI_REG(DSI0_PHY_AFEC0),
- DSI_REG(DSI0_PHY_AFEC1),
- DSI_REG(DSI0_ID),
+static const struct debugfs_reg32 dsi0_regs[] = {
+ VC4_REG32(DSI0_CTRL),
+ VC4_REG32(DSI0_STAT),
+ VC4_REG32(DSI0_HSTX_TO_CNT),
+ VC4_REG32(DSI0_LPRX_TO_CNT),
+ VC4_REG32(DSI0_TA_TO_CNT),
+ VC4_REG32(DSI0_PR_TO_CNT),
+ VC4_REG32(DSI0_DISP0_CTRL),
+ VC4_REG32(DSI0_DISP1_CTRL),
+ VC4_REG32(DSI0_INT_STAT),
+ VC4_REG32(DSI0_INT_EN),
+ VC4_REG32(DSI0_PHYC),
+ VC4_REG32(DSI0_HS_CLT0),
+ VC4_REG32(DSI0_HS_CLT1),
+ VC4_REG32(DSI0_HS_CLT2),
+ VC4_REG32(DSI0_HS_DLT3),
+ VC4_REG32(DSI0_HS_DLT4),
+ VC4_REG32(DSI0_HS_DLT5),
+ VC4_REG32(DSI0_HS_DLT6),
+ VC4_REG32(DSI0_HS_DLT7),
+ VC4_REG32(DSI0_PHY_AFEC0),
+ VC4_REG32(DSI0_PHY_AFEC1),
+ VC4_REG32(DSI0_ID),
};
-static const struct {
- u32 reg;
- const char *name;
-} dsi1_regs[] = {
- DSI_REG(DSI1_CTRL),
- DSI_REG(DSI1_STAT),
- DSI_REG(DSI1_HSTX_TO_CNT),
- DSI_REG(DSI1_LPRX_TO_CNT),
- DSI_REG(DSI1_TA_TO_CNT),
- DSI_REG(DSI1_PR_TO_CNT),
- DSI_REG(DSI1_DISP0_CTRL),
- DSI_REG(DSI1_DISP1_CTRL),
- DSI_REG(DSI1_INT_STAT),
- DSI_REG(DSI1_INT_EN),
- DSI_REG(DSI1_PHYC),
- DSI_REG(DSI1_HS_CLT0),
- DSI_REG(DSI1_HS_CLT1),
- DSI_REG(DSI1_HS_CLT2),
- DSI_REG(DSI1_HS_DLT3),
- DSI_REG(DSI1_HS_DLT4),
- DSI_REG(DSI1_HS_DLT5),
- DSI_REG(DSI1_HS_DLT6),
- DSI_REG(DSI1_HS_DLT7),
- DSI_REG(DSI1_PHY_AFEC0),
- DSI_REG(DSI1_PHY_AFEC1),
- DSI_REG(DSI1_ID),
+static const struct debugfs_reg32 dsi1_regs[] = {
+ VC4_REG32(DSI1_CTRL),
+ VC4_REG32(DSI1_STAT),
+ VC4_REG32(DSI1_HSTX_TO_CNT),
+ VC4_REG32(DSI1_LPRX_TO_CNT),
+ VC4_REG32(DSI1_TA_TO_CNT),
+ VC4_REG32(DSI1_PR_TO_CNT),
+ VC4_REG32(DSI1_DISP0_CTRL),
+ VC4_REG32(DSI1_DISP1_CTRL),
+ VC4_REG32(DSI1_INT_STAT),
+ VC4_REG32(DSI1_INT_EN),
+ VC4_REG32(DSI1_PHYC),
+ VC4_REG32(DSI1_HS_CLT0),
+ VC4_REG32(DSI1_HS_CLT1),
+ VC4_REG32(DSI1_HS_CLT2),
+ VC4_REG32(DSI1_HS_DLT3),
+ VC4_REG32(DSI1_HS_DLT4),
+ VC4_REG32(DSI1_HS_DLT5),
+ VC4_REG32(DSI1_HS_DLT6),
+ VC4_REG32(DSI1_HS_DLT7),
+ VC4_REG32(DSI1_PHY_AFEC0),
+ VC4_REG32(DSI1_PHY_AFEC1),
+ VC4_REG32(DSI1_ID),
};
-static void vc4_dsi_dump_regs(struct vc4_dsi *dsi)
-{
- int i;
-
- if (dsi->port == 0) {
- for (i = 0; i < ARRAY_SIZE(dsi0_regs); i++) {
- DRM_INFO("0x%04x (%s): 0x%08x\n",
- dsi0_regs[i].reg, dsi0_regs[i].name,
- DSI_READ(dsi0_regs[i].reg));
- }
- } else {
- for (i = 0; i < ARRAY_SIZE(dsi1_regs); i++) {
- DRM_INFO("0x%04x (%s): 0x%08x\n",
- dsi1_regs[i].reg, dsi1_regs[i].name,
- DSI_READ(dsi1_regs[i].reg));
- }
- }
-}
-
-#ifdef CONFIG_DEBUG_FS
-int vc4_dsi_debugfs_regs(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *drm = node->minor->dev;
- struct vc4_dev *vc4 = to_vc4_dev(drm);
- int dsi_index = (uintptr_t)node->info_ent->data;
- struct vc4_dsi *dsi = (dsi_index == 1 ? vc4->dsi1 : NULL);
- int i;
-
- if (!dsi)
- return 0;
-
- if (dsi->port == 0) {
- for (i = 0; i < ARRAY_SIZE(dsi0_regs); i++) {
- seq_printf(m, "0x%04x (%s): 0x%08x\n",
- dsi0_regs[i].reg, dsi0_regs[i].name,
- DSI_READ(dsi0_regs[i].reg));
- }
- } else {
- for (i = 0; i < ARRAY_SIZE(dsi1_regs); i++) {
- seq_printf(m, "0x%04x (%s): 0x%08x\n",
- dsi1_regs[i].reg, dsi1_regs[i].name,
- DSI_READ(dsi1_regs[i].reg));
- }
- }
-
- return 0;
-}
-#endif
-
static void vc4_dsi_encoder_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
@@ -900,8 +845,9 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
}
if (debug_dump_regs) {
- DRM_INFO("DSI regs before:\n");
- vc4_dsi_dump_regs(dsi);
+ struct drm_printer p = drm_info_printer(&dsi->pdev->dev);
+ dev_info(&dsi->pdev->dev, "DSI regs before:\n");
+ drm_print_regset32(&p, &dsi->regset);
}
/* Round up the clk_set_rate() request slightly, since
@@ -1135,8 +1081,9 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder)
drm_bridge_enable(dsi->bridge);
if (debug_dump_regs) {
- DRM_INFO("DSI regs after:\n");
- vc4_dsi_dump_regs(dsi);
+ struct drm_printer p = drm_info_printer(&dsi->pdev->dev);
+ dev_info(&dsi->pdev->dev, "DSI regs after:\n");
+ drm_print_regset32(&p, &dsi->regset);
}
}
@@ -1527,6 +1474,15 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(dsi->regs))
return PTR_ERR(dsi->regs);
+ dsi->regset.base = dsi->regs;
+ if (dsi->port == 0) {
+ dsi->regset.regs = dsi0_regs;
+ dsi->regset.nregs = ARRAY_SIZE(dsi0_regs);
+ } else {
+ dsi->regset.regs = dsi1_regs;
+ dsi->regset.nregs = ARRAY_SIZE(dsi1_regs);
+ }
+
if (DSI_PORT_READ(ID) != DSI_ID_VALUE) {
dev_err(dev, "Port returned 0x%08x for ID instead of 0x%08x\n",
DSI_PORT_READ(ID), DSI_ID_VALUE);
@@ -1662,6 +1618,11 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
*/
dsi->encoder->bridge = NULL;
+ if (dsi->port == 0)
+ vc4_debugfs_add_regset32(drm, "dsi0_regs", &dsi->regset);
+ else
+ vc4_debugfs_add_regset32(drm, "dsi1_regs", &dsi->regset);
+
pm_runtime_enable(dev);
return 0;
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index aea2b8dfec17..d9311be32a4f 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -74,6 +74,11 @@ vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
u32 i;
int ret = 0;
+ if (!vc4->v3d) {
+ DRM_DEBUG("VC4_GET_HANG_STATE with no VC4 V3D probed\n");
+ return -ENODEV;
+ }
+
spin_lock_irqsave(&vc4->job_lock, irqflags);
kernel_state = vc4->hang_state;
if (!kernel_state) {
@@ -536,7 +541,7 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
bo = to_vc4_bo(&exec->bo[i]->base);
bo->seqno = seqno;
- reservation_object_add_shared_fence(bo->resv, exec->fence);
+ reservation_object_add_shared_fence(bo->base.base.resv, exec->fence);
}
list_for_each_entry(bo, &exec->unref_list, unref_head) {
@@ -547,7 +552,7 @@ vc4_update_bo_seqnos(struct vc4_exec_info *exec, uint64_t seqno)
bo = to_vc4_bo(&exec->rcl_write_bo[i]->base);
bo->write_seqno = seqno;
- reservation_object_add_excl_fence(bo->resv, exec->fence);
+ reservation_object_add_excl_fence(bo->base.base.resv, exec->fence);
}
}
@@ -559,7 +564,7 @@ vc4_unlock_bo_reservations(struct drm_device *dev,
int i;
for (i = 0; i < exec->bo_count; i++) {
- struct vc4_bo *bo = to_vc4_bo(&exec->bo[i]->base);
+ struct drm_gem_object *bo = &exec->bo[i]->base;
ww_mutex_unlock(&bo->resv->lock);
}
@@ -581,13 +586,13 @@ vc4_lock_bo_reservations(struct drm_device *dev,
{
int contended_lock = -1;
int i, ret;
- struct vc4_bo *bo;
+ struct drm_gem_object *bo;
ww_acquire_init(acquire_ctx, &reservation_ww_class);
retry:
if (contended_lock != -1) {
- bo = to_vc4_bo(&exec->bo[contended_lock]->base);
+ bo = &exec->bo[contended_lock]->base;
ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
acquire_ctx);
if (ret) {
@@ -600,19 +605,19 @@ retry:
if (i == contended_lock)
continue;
- bo = to_vc4_bo(&exec->bo[i]->base);
+ bo = &exec->bo[i]->base;
ret = ww_mutex_lock_interruptible(&bo->resv->lock, acquire_ctx);
if (ret) {
int j;
for (j = 0; j < i; j++) {
- bo = to_vc4_bo(&exec->bo[j]->base);
+ bo = &exec->bo[j]->base;
ww_mutex_unlock(&bo->resv->lock);
}
if (contended_lock != -1 && contended_lock >= i) {
- bo = to_vc4_bo(&exec->bo[contended_lock]->base);
+ bo = &exec->bo[contended_lock]->base;
ww_mutex_unlock(&bo->resv->lock);
}
@@ -633,7 +638,7 @@ retry:
* before we commit the CL to the hardware.
*/
for (i = 0; i < exec->bo_count; i++) {
- bo = to_vc4_bo(&exec->bo[i]->base);
+ bo = &exec->bo[i]->base;
ret = reservation_object_reserve_shared(bo->resv, 1);
if (ret) {
@@ -964,12 +969,7 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
/* Release the reference we had on the perf monitor. */
vc4_perfmon_put(exec->perfmon);
- mutex_lock(&vc4->power_lock);
- if (--vc4->power_refcount == 0) {
- pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
- pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
- }
- mutex_unlock(&vc4->power_lock);
+ vc4_v3d_pm_put(vc4);
kfree(exec);
}
@@ -1124,6 +1124,11 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
struct dma_fence *in_fence;
int ret = 0;
+ if (!vc4->v3d) {
+ DRM_DEBUG("VC4_SUBMIT_CL with no VC4 V3D probed\n");
+ return -ENODEV;
+ }
+
if ((args->flags & ~(VC4_SUBMIT_CL_USE_CLEAR_COLOR |
VC4_SUBMIT_CL_FIXED_RCL_ORDER |
VC4_SUBMIT_CL_RCL_ORDER_INCREASING_X |
@@ -1143,17 +1148,11 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
return -ENOMEM;
}
- mutex_lock(&vc4->power_lock);
- if (vc4->power_refcount++ == 0) {
- ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
- if (ret < 0) {
- mutex_unlock(&vc4->power_lock);
- vc4->power_refcount--;
- kfree(exec);
- return ret;
- }
+ ret = vc4_v3d_pm_get(vc4);
+ if (ret) {
+ kfree(exec);
+ return ret;
}
- mutex_unlock(&vc4->power_lock);
exec->args = args;
INIT_LIST_HEAD(&exec->unref_list);
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 88fd5df7e7dc..99fc8569e0f5 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -97,6 +97,9 @@ struct vc4_hdmi {
struct clk *pixel_clock;
struct clk *hsm_clock;
+
+ struct debugfs_regset32 hdmi_regset;
+ struct debugfs_regset32 hd_regset;
};
#define HDMI_READ(offset) readl(vc4->hdmi->hdmicore_regs + offset)
@@ -134,103 +137,69 @@ to_vc4_hdmi_connector(struct drm_connector *connector)
return container_of(connector, struct vc4_hdmi_connector, base);
}
-#define HDMI_REG(reg) { reg, #reg }
-static const struct {
- u32 reg;
- const char *name;
-} hdmi_regs[] = {
- HDMI_REG(VC4_HDMI_CORE_REV),
- HDMI_REG(VC4_HDMI_SW_RESET_CONTROL),
- HDMI_REG(VC4_HDMI_HOTPLUG_INT),
- HDMI_REG(VC4_HDMI_HOTPLUG),
- HDMI_REG(VC4_HDMI_MAI_CHANNEL_MAP),
- HDMI_REG(VC4_HDMI_MAI_CONFIG),
- HDMI_REG(VC4_HDMI_MAI_FORMAT),
- HDMI_REG(VC4_HDMI_AUDIO_PACKET_CONFIG),
- HDMI_REG(VC4_HDMI_RAM_PACKET_CONFIG),
- HDMI_REG(VC4_HDMI_HORZA),
- HDMI_REG(VC4_HDMI_HORZB),
- HDMI_REG(VC4_HDMI_FIFO_CTL),
- HDMI_REG(VC4_HDMI_SCHEDULER_CONTROL),
- HDMI_REG(VC4_HDMI_VERTA0),
- HDMI_REG(VC4_HDMI_VERTA1),
- HDMI_REG(VC4_HDMI_VERTB0),
- HDMI_REG(VC4_HDMI_VERTB1),
- HDMI_REG(VC4_HDMI_TX_PHY_RESET_CTL),
- HDMI_REG(VC4_HDMI_TX_PHY_CTL0),
-
- HDMI_REG(VC4_HDMI_CEC_CNTRL_1),
- HDMI_REG(VC4_HDMI_CEC_CNTRL_2),
- HDMI_REG(VC4_HDMI_CEC_CNTRL_3),
- HDMI_REG(VC4_HDMI_CEC_CNTRL_4),
- HDMI_REG(VC4_HDMI_CEC_CNTRL_5),
- HDMI_REG(VC4_HDMI_CPU_STATUS),
- HDMI_REG(VC4_HDMI_CPU_MASK_STATUS),
-
- HDMI_REG(VC4_HDMI_CEC_RX_DATA_1),
- HDMI_REG(VC4_HDMI_CEC_RX_DATA_2),
- HDMI_REG(VC4_HDMI_CEC_RX_DATA_3),
- HDMI_REG(VC4_HDMI_CEC_RX_DATA_4),
- HDMI_REG(VC4_HDMI_CEC_TX_DATA_1),
- HDMI_REG(VC4_HDMI_CEC_TX_DATA_2),
- HDMI_REG(VC4_HDMI_CEC_TX_DATA_3),
- HDMI_REG(VC4_HDMI_CEC_TX_DATA_4),
+static const struct debugfs_reg32 hdmi_regs[] = {
+ VC4_REG32(VC4_HDMI_CORE_REV),
+ VC4_REG32(VC4_HDMI_SW_RESET_CONTROL),
+ VC4_REG32(VC4_HDMI_HOTPLUG_INT),
+ VC4_REG32(VC4_HDMI_HOTPLUG),
+ VC4_REG32(VC4_HDMI_MAI_CHANNEL_MAP),
+ VC4_REG32(VC4_HDMI_MAI_CONFIG),
+ VC4_REG32(VC4_HDMI_MAI_FORMAT),
+ VC4_REG32(VC4_HDMI_AUDIO_PACKET_CONFIG),
+ VC4_REG32(VC4_HDMI_RAM_PACKET_CONFIG),
+ VC4_REG32(VC4_HDMI_HORZA),
+ VC4_REG32(VC4_HDMI_HORZB),
+ VC4_REG32(VC4_HDMI_FIFO_CTL),
+ VC4_REG32(VC4_HDMI_SCHEDULER_CONTROL),
+ VC4_REG32(VC4_HDMI_VERTA0),
+ VC4_REG32(VC4_HDMI_VERTA1),
+ VC4_REG32(VC4_HDMI_VERTB0),
+ VC4_REG32(VC4_HDMI_VERTB1),
+ VC4_REG32(VC4_HDMI_TX_PHY_RESET_CTL),
+ VC4_REG32(VC4_HDMI_TX_PHY_CTL0),
+
+ VC4_REG32(VC4_HDMI_CEC_CNTRL_1),
+ VC4_REG32(VC4_HDMI_CEC_CNTRL_2),
+ VC4_REG32(VC4_HDMI_CEC_CNTRL_3),
+ VC4_REG32(VC4_HDMI_CEC_CNTRL_4),
+ VC4_REG32(VC4_HDMI_CEC_CNTRL_5),
+ VC4_REG32(VC4_HDMI_CPU_STATUS),
+ VC4_REG32(VC4_HDMI_CPU_MASK_STATUS),
+
+ VC4_REG32(VC4_HDMI_CEC_RX_DATA_1),
+ VC4_REG32(VC4_HDMI_CEC_RX_DATA_2),
+ VC4_REG32(VC4_HDMI_CEC_RX_DATA_3),
+ VC4_REG32(VC4_HDMI_CEC_RX_DATA_4),
+ VC4_REG32(VC4_HDMI_CEC_TX_DATA_1),
+ VC4_REG32(VC4_HDMI_CEC_TX_DATA_2),
+ VC4_REG32(VC4_HDMI_CEC_TX_DATA_3),
+ VC4_REG32(VC4_HDMI_CEC_TX_DATA_4),
};
-static const struct {
- u32 reg;
- const char *name;
-} hd_regs[] = {
- HDMI_REG(VC4_HD_M_CTL),
- HDMI_REG(VC4_HD_MAI_CTL),
- HDMI_REG(VC4_HD_MAI_THR),
- HDMI_REG(VC4_HD_MAI_FMT),
- HDMI_REG(VC4_HD_MAI_SMP),
- HDMI_REG(VC4_HD_VID_CTL),
- HDMI_REG(VC4_HD_CSC_CTL),
- HDMI_REG(VC4_HD_FRAME_COUNT),
+static const struct debugfs_reg32 hd_regs[] = {
+ VC4_REG32(VC4_HD_M_CTL),
+ VC4_REG32(VC4_HD_MAI_CTL),
+ VC4_REG32(VC4_HD_MAI_THR),
+ VC4_REG32(VC4_HD_MAI_FMT),
+ VC4_REG32(VC4_HD_MAI_SMP),
+ VC4_REG32(VC4_HD_VID_CTL),
+ VC4_REG32(VC4_HD_CSC_CTL),
+ VC4_REG32(VC4_HD_FRAME_COUNT),
};
-#ifdef CONFIG_DEBUG_FS
-int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
+static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(hdmi_regs); i++) {
- seq_printf(m, "%s (0x%04x): 0x%08x\n",
- hdmi_regs[i].name, hdmi_regs[i].reg,
- HDMI_READ(hdmi_regs[i].reg));
- }
+ struct vc4_hdmi *hdmi = vc4->hdmi;
+ struct drm_printer p = drm_seq_file_printer(m);
- for (i = 0; i < ARRAY_SIZE(hd_regs); i++) {
- seq_printf(m, "%s (0x%04x): 0x%08x\n",
- hd_regs[i].name, hd_regs[i].reg,
- HD_READ(hd_regs[i].reg));
- }
+ drm_print_regset32(&p, &hdmi->hdmi_regset);
+ drm_print_regset32(&p, &hdmi->hd_regset);
return 0;
}
-#endif /* CONFIG_DEBUG_FS */
-
-static void vc4_hdmi_dump_regs(struct drm_device *dev)
-{
- struct vc4_dev *vc4 = to_vc4_dev(dev);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(hdmi_regs); i++) {
- DRM_INFO("0x%04x (%s): 0x%08x\n",
- hdmi_regs[i].reg, hdmi_regs[i].name,
- HDMI_READ(hdmi_regs[i].reg));
- }
- for (i = 0; i < ARRAY_SIZE(hd_regs); i++) {
- DRM_INFO("0x%04x (%s): 0x%08x\n",
- hd_regs[i].reg, hd_regs[i].name,
- HD_READ(hd_regs[i].reg));
- }
-}
static enum drm_connector_status
vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
@@ -561,8 +530,11 @@ static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder)
HDMI_WRITE(VC4_HDMI_TX_PHY_RESET_CTL, 0);
if (debug_dump_regs) {
- DRM_INFO("HDMI regs before:\n");
- vc4_hdmi_dump_regs(dev);
+ struct drm_printer p = drm_info_printer(&hdmi->pdev->dev);
+
+ dev_info(&hdmi->pdev->dev, "HDMI regs before:\n");
+ drm_print_regset32(&p, &hdmi->hdmi_regset);
+ drm_print_regset32(&p, &hdmi->hd_regset);
}
HD_WRITE(VC4_HD_VID_CTL, 0);
@@ -637,8 +609,11 @@ static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder)
HDMI_WRITE(VC4_HDMI_FIFO_CTL, VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N);
if (debug_dump_regs) {
- DRM_INFO("HDMI regs after:\n");
- vc4_hdmi_dump_regs(dev);
+ struct drm_printer p = drm_info_printer(&hdmi->pdev->dev);
+
+ dev_info(&hdmi->pdev->dev, "HDMI regs after:\n");
+ drm_print_regset32(&p, &hdmi->hdmi_regset);
+ drm_print_regset32(&p, &hdmi->hd_regset);
}
HD_WRITE(VC4_HD_VID_CTL,
@@ -1333,6 +1308,13 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(hdmi->hd_regs))
return PTR_ERR(hdmi->hd_regs);
+ hdmi->hdmi_regset.base = hdmi->hdmicore_regs;
+ hdmi->hdmi_regset.regs = hdmi_regs;
+ hdmi->hdmi_regset.nregs = ARRAY_SIZE(hdmi_regs);
+ hdmi->hd_regset.base = hdmi->hd_regs;
+ hdmi->hd_regset.regs = hd_regs;
+ hdmi->hd_regset.nregs = ARRAY_SIZE(hd_regs);
+
hdmi->pixel_clock = devm_clk_get(dev, "pixel");
if (IS_ERR(hdmi->pixel_clock)) {
DRM_ERROR("Failed to get pixel clock\n");
@@ -1448,6 +1430,8 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
if (ret)
goto err_destroy_encoder;
+ vc4_debugfs_add_file(drm, "hdmi_regs", vc4_hdmi_debugfs_regs, hdmi);
+
return 0;
#ifdef CONFIG_DRM_VC4_HDMI_CEC
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index 5d8c749c9749..f746e9a7a88c 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -22,58 +22,52 @@
* each CRTC.
*/
+#include <drm/drm_atomic_helper.h>
#include <linux/component.h>
#include "vc4_drv.h"
#include "vc4_regs.h"
-#define HVS_REG(reg) { reg, #reg }
-static const struct {
- u32 reg;
- const char *name;
-} hvs_regs[] = {
- HVS_REG(SCALER_DISPCTRL),
- HVS_REG(SCALER_DISPSTAT),
- HVS_REG(SCALER_DISPID),
- HVS_REG(SCALER_DISPECTRL),
- HVS_REG(SCALER_DISPPROF),
- HVS_REG(SCALER_DISPDITHER),
- HVS_REG(SCALER_DISPEOLN),
- HVS_REG(SCALER_DISPLIST0),
- HVS_REG(SCALER_DISPLIST1),
- HVS_REG(SCALER_DISPLIST2),
- HVS_REG(SCALER_DISPLSTAT),
- HVS_REG(SCALER_DISPLACT0),
- HVS_REG(SCALER_DISPLACT1),
- HVS_REG(SCALER_DISPLACT2),
- HVS_REG(SCALER_DISPCTRL0),
- HVS_REG(SCALER_DISPBKGND0),
- HVS_REG(SCALER_DISPSTAT0),
- HVS_REG(SCALER_DISPBASE0),
- HVS_REG(SCALER_DISPCTRL1),
- HVS_REG(SCALER_DISPBKGND1),
- HVS_REG(SCALER_DISPSTAT1),
- HVS_REG(SCALER_DISPBASE1),
- HVS_REG(SCALER_DISPCTRL2),
- HVS_REG(SCALER_DISPBKGND2),
- HVS_REG(SCALER_DISPSTAT2),
- HVS_REG(SCALER_DISPBASE2),
- HVS_REG(SCALER_DISPALPHA2),
- HVS_REG(SCALER_OLEDOFFS),
- HVS_REG(SCALER_OLEDCOEF0),
- HVS_REG(SCALER_OLEDCOEF1),
- HVS_REG(SCALER_OLEDCOEF2),
+static const struct debugfs_reg32 hvs_regs[] = {
+ VC4_REG32(SCALER_DISPCTRL),
+ VC4_REG32(SCALER_DISPSTAT),
+ VC4_REG32(SCALER_DISPID),
+ VC4_REG32(SCALER_DISPECTRL),
+ VC4_REG32(SCALER_DISPPROF),
+ VC4_REG32(SCALER_DISPDITHER),
+ VC4_REG32(SCALER_DISPEOLN),
+ VC4_REG32(SCALER_DISPLIST0),
+ VC4_REG32(SCALER_DISPLIST1),
+ VC4_REG32(SCALER_DISPLIST2),
+ VC4_REG32(SCALER_DISPLSTAT),
+ VC4_REG32(SCALER_DISPLACT0),
+ VC4_REG32(SCALER_DISPLACT1),
+ VC4_REG32(SCALER_DISPLACT2),
+ VC4_REG32(SCALER_DISPCTRL0),
+ VC4_REG32(SCALER_DISPBKGND0),
+ VC4_REG32(SCALER_DISPSTAT0),
+ VC4_REG32(SCALER_DISPBASE0),
+ VC4_REG32(SCALER_DISPCTRL1),
+ VC4_REG32(SCALER_DISPBKGND1),
+ VC4_REG32(SCALER_DISPSTAT1),
+ VC4_REG32(SCALER_DISPBASE1),
+ VC4_REG32(SCALER_DISPCTRL2),
+ VC4_REG32(SCALER_DISPBKGND2),
+ VC4_REG32(SCALER_DISPSTAT2),
+ VC4_REG32(SCALER_DISPBASE2),
+ VC4_REG32(SCALER_DISPALPHA2),
+ VC4_REG32(SCALER_OLEDOFFS),
+ VC4_REG32(SCALER_OLEDCOEF0),
+ VC4_REG32(SCALER_OLEDCOEF1),
+ VC4_REG32(SCALER_OLEDCOEF2),
};
void vc4_hvs_dump_state(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct drm_printer p = drm_info_printer(&vc4->hvs->pdev->dev);
int i;
- for (i = 0; i < ARRAY_SIZE(hvs_regs); i++) {
- DRM_INFO("0x%04x (%s): 0x%08x\n",
- hvs_regs[i].reg, hvs_regs[i].name,
- HVS_READ(hvs_regs[i].reg));
- }
+ drm_print_regset32(&p, &vc4->hvs->regset);
DRM_INFO("HVS ctx:\n");
for (i = 0; i < 64; i += 4) {
@@ -86,23 +80,17 @@ void vc4_hvs_dump_state(struct drm_device *dev)
}
}
-#ifdef CONFIG_DEBUG_FS
-int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused)
+static int vc4_hvs_debugfs_underrun(struct seq_file *m, void *data)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
+ struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- int i;
+ struct drm_printer p = drm_seq_file_printer(m);
- for (i = 0; i < ARRAY_SIZE(hvs_regs); i++) {
- seq_printf(m, "%s (0x%04x): 0x%08x\n",
- hvs_regs[i].name, hvs_regs[i].reg,
- HVS_READ(hvs_regs[i].reg));
- }
+ drm_printf(&p, "%d\n", atomic_read(&vc4->underrun));
return 0;
}
-#endif
/* The filter kernel is composed of dwords each containing 3 9-bit
* signed integers packed next to each other.
@@ -166,6 +154,67 @@ static int vc4_hvs_upload_linear_kernel(struct vc4_hvs *hvs,
return 0;
}
+void vc4_hvs_mask_underrun(struct drm_device *dev, int channel)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ u32 dispctrl = HVS_READ(SCALER_DISPCTRL);
+
+ dispctrl &= ~SCALER_DISPCTRL_DSPEISLUR(channel);
+
+ HVS_WRITE(SCALER_DISPCTRL, dispctrl);
+}
+
+void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ u32 dispctrl = HVS_READ(SCALER_DISPCTRL);
+
+ dispctrl |= SCALER_DISPCTRL_DSPEISLUR(channel);
+
+ HVS_WRITE(SCALER_DISPSTAT,
+ SCALER_DISPSTAT_EUFLOW(channel));
+ HVS_WRITE(SCALER_DISPCTRL, dispctrl);
+}
+
+static void vc4_hvs_report_underrun(struct drm_device *dev)
+{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+
+ atomic_inc(&vc4->underrun);
+ DRM_DEV_ERROR(dev->dev, "HVS underrun\n");
+}
+
+static irqreturn_t vc4_hvs_irq_handler(int irq, void *data)
+{
+ struct drm_device *dev = data;
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
+ irqreturn_t irqret = IRQ_NONE;
+ int channel;
+ u32 control;
+ u32 status;
+
+ status = HVS_READ(SCALER_DISPSTAT);
+ control = HVS_READ(SCALER_DISPCTRL);
+
+ for (channel = 0; channel < SCALER_CHANNELS_COUNT; channel++) {
+ /* Interrupt masking is not always honored, so check it here. */
+ if (status & SCALER_DISPSTAT_EUFLOW(channel) &&
+ control & SCALER_DISPCTRL_DSPEISLUR(channel)) {
+ vc4_hvs_mask_underrun(dev, channel);
+ vc4_hvs_report_underrun(dev);
+
+ irqret = IRQ_HANDLED;
+ }
+ }
+
+ /* Clear every per-channel interrupt flag. */
+ HVS_WRITE(SCALER_DISPSTAT, SCALER_DISPSTAT_IRQMASK(0) |
+ SCALER_DISPSTAT_IRQMASK(1) |
+ SCALER_DISPSTAT_IRQMASK(2));
+
+ return irqret;
+}
+
static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -185,6 +234,10 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
if (IS_ERR(hvs->regs))
return PTR_ERR(hvs->regs);
+ hvs->regset.base = hvs->regs;
+ hvs->regset.regs = hvs_regs;
+ hvs->regset.nregs = ARRAY_SIZE(hvs_regs);
+
hvs->dlist = hvs->regs + SCALER_DLIST_START;
spin_lock_init(&hvs->mm_lock);
@@ -219,15 +272,40 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data)
dispctrl = HVS_READ(SCALER_DISPCTRL);
dispctrl |= SCALER_DISPCTRL_ENABLE;
+ dispctrl |= SCALER_DISPCTRL_DISPEIRQ(0) |
+ SCALER_DISPCTRL_DISPEIRQ(1) |
+ SCALER_DISPCTRL_DISPEIRQ(2);
/* Set DSP3 (PV1) to use HVS channel 2, which would otherwise
* be unused.
*/
dispctrl &= ~SCALER_DISPCTRL_DSP3_MUX_MASK;
+ dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ |
+ SCALER_DISPCTRL_SLVWREIRQ |
+ SCALER_DISPCTRL_SLVRDEIRQ |
+ SCALER_DISPCTRL_DSPEIEOF(0) |
+ SCALER_DISPCTRL_DSPEIEOF(1) |
+ SCALER_DISPCTRL_DSPEIEOF(2) |
+ SCALER_DISPCTRL_DSPEIEOLN(0) |
+ SCALER_DISPCTRL_DSPEIEOLN(1) |
+ SCALER_DISPCTRL_DSPEIEOLN(2) |
+ SCALER_DISPCTRL_DSPEISLUR(0) |
+ SCALER_DISPCTRL_DSPEISLUR(1) |
+ SCALER_DISPCTRL_DSPEISLUR(2) |
+ SCALER_DISPCTRL_SCLEIRQ);
dispctrl |= VC4_SET_FIELD(2, SCALER_DISPCTRL_DSP3_MUX);
HVS_WRITE(SCALER_DISPCTRL, dispctrl);
+ ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
+ vc4_hvs_irq_handler, 0, "vc4 hvs", drm);
+ if (ret)
+ return ret;
+
+ vc4_debugfs_add_regset32(drm, "hvs_regs", &hvs->regset);
+ vc4_debugfs_add_file(drm, "hvs_underrun", vc4_hvs_debugfs_underrun,
+ NULL);
+
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index 4cd2ccfe15f4..ffd0a4388752 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -229,6 +229,9 @@ vc4_irq_preinstall(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ if (!vc4->v3d)
+ return;
+
init_waitqueue_head(&vc4->job_wait_queue);
INIT_WORK(&vc4->overflow_mem_work, vc4_overflow_mem_work);
@@ -243,6 +246,9 @@ vc4_irq_postinstall(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ if (!vc4->v3d)
+ return 0;
+
/* Enable both the render done and out of memory interrupts. */
V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
@@ -254,6 +260,9 @@ vc4_irq_uninstall(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ if (!vc4->v3d)
+ return;
+
/* Disable sending interrupts for our driver's IRQs. */
V3D_WRITE(V3D_INTDIS, V3D_DRIVER_IRQS);
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 91b8c72ff361..295dacc8bcb9 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -34,6 +34,18 @@ static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv)
return container_of(priv, struct vc4_ctm_state, base);
}
+struct vc4_load_tracker_state {
+ struct drm_private_state base;
+ u64 hvs_load;
+ u64 membus_load;
+};
+
+static struct vc4_load_tracker_state *
+to_vc4_load_tracker_state(struct drm_private_state *priv)
+{
+ return container_of(priv, struct vc4_load_tracker_state, base);
+}
+
static struct vc4_ctm_state *vc4_get_ctm_state(struct drm_atomic_state *state,
struct drm_private_obj *manager)
{
@@ -138,6 +150,16 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_crtc *vc4_crtc;
+ int i;
+
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ if (!state->crtcs[i].ptr || !state->crtcs[i].commit)
+ continue;
+
+ vc4_crtc = to_vc4_crtc(state->crtcs[i].ptr);
+ vc4_hvs_mask_underrun(dev, vc4_crtc->channel);
+ }
drm_atomic_helper_wait_for_fences(dev, state, false);
@@ -385,6 +407,85 @@ vc4_ctm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
return 0;
}
+static int vc4_load_tracker_atomic_check(struct drm_atomic_state *state)
+{
+ struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct vc4_dev *vc4 = to_vc4_dev(state->dev);
+ struct vc4_load_tracker_state *load_state;
+ struct drm_private_state *priv_state;
+ struct drm_plane *plane;
+ int i;
+
+ priv_state = drm_atomic_get_private_obj_state(state,
+ &vc4->load_tracker);
+ if (IS_ERR(priv_state))
+ return PTR_ERR(priv_state);
+
+ load_state = to_vc4_load_tracker_state(priv_state);
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state,
+ new_plane_state, i) {
+ struct vc4_plane_state *vc4_plane_state;
+
+ if (old_plane_state->fb && old_plane_state->crtc) {
+ vc4_plane_state = to_vc4_plane_state(old_plane_state);
+ load_state->membus_load -= vc4_plane_state->membus_load;
+ load_state->hvs_load -= vc4_plane_state->hvs_load;
+ }
+
+ if (new_plane_state->fb && new_plane_state->crtc) {
+ vc4_plane_state = to_vc4_plane_state(new_plane_state);
+ load_state->membus_load += vc4_plane_state->membus_load;
+ load_state->hvs_load += vc4_plane_state->hvs_load;
+ }
+ }
+
+ /* Don't check the load when the tracker is disabled. */
+ if (!vc4->load_tracker_enabled)
+ return 0;
+
+ /* The absolute limit is 2Gbyte/sec, but let's take a margin to let
+ * the system work when other blocks are accessing the memory.
+ */
+ if (load_state->membus_load > SZ_1G + SZ_512M)
+ return -ENOSPC;
+
+ /* HVS clock is supposed to run @ 250Mhz, let's take a margin and
+ * consider the maximum number of cycles is 240M.
+ */
+ if (load_state->hvs_load > 240000000ULL)
+ return -ENOSPC;
+
+ return 0;
+}
+
+static struct drm_private_state *
+vc4_load_tracker_duplicate_state(struct drm_private_obj *obj)
+{
+ struct vc4_load_tracker_state *state;
+
+ state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
+
+ return &state->base;
+}
+
+static void vc4_load_tracker_destroy_state(struct drm_private_obj *obj,
+ struct drm_private_state *state)
+{
+ struct vc4_load_tracker_state *load_state;
+
+ load_state = to_vc4_load_tracker_state(state);
+ kfree(load_state);
+}
+
+static const struct drm_private_state_funcs vc4_load_tracker_state_funcs = {
+ .atomic_duplicate_state = vc4_load_tracker_duplicate_state,
+ .atomic_destroy_state = vc4_load_tracker_destroy_state,
+};
+
static int
vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
{
@@ -394,7 +495,11 @@ vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state)
if (ret < 0)
return ret;
- return drm_atomic_helper_check(dev, state);
+ ret = drm_atomic_helper_check(dev, state);
+ if (ret)
+ return ret;
+
+ return vc4_load_tracker_atomic_check(state);
}
static const struct drm_mode_config_funcs vc4_mode_funcs = {
@@ -407,13 +512,20 @@ int vc4_kms_load(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_ctm_state *ctm_state;
+ struct vc4_load_tracker_state *load_state;
int ret;
+ /* Start with the load tracker enabled. Can be disabled through the
+ * debugfs load_tracker file.
+ */
+ vc4->load_tracker_enabled = true;
+
sema_init(&vc4->async_modeset, 1);
/* Set support for vblank irq fast disable, before drm_vblank_init() */
dev->vblank_disable_immediate = true;
+ dev->irq_enabled = true;
ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
if (ret < 0) {
dev_err(dev->dev, "failed to initialize vblank\n");
@@ -436,6 +548,15 @@ int vc4_kms_load(struct drm_device *dev)
drm_atomic_private_obj_init(dev, &vc4->ctm_manager, &ctm_state->base,
&vc4_ctm_state_funcs);
+ load_state = kzalloc(sizeof(*load_state), GFP_KERNEL);
+ if (!load_state) {
+ drm_atomic_private_obj_fini(&vc4->ctm_manager);
+ return -ENOMEM;
+ }
+
+ drm_atomic_private_obj_init(dev, &vc4->load_tracker, &load_state->base,
+ &vc4_load_tracker_state_funcs);
+
drm_mode_config_reset(dev);
drm_kms_helper_poll_init(dev);
diff --git a/drivers/gpu/drm/vc4/vc4_perfmon.c b/drivers/gpu/drm/vc4/vc4_perfmon.c
index 495150415020..f4aa75efd16b 100644
--- a/drivers/gpu/drm/vc4/vc4_perfmon.c
+++ b/drivers/gpu/drm/vc4/vc4_perfmon.c
@@ -100,12 +100,18 @@ void vc4_perfmon_close_file(struct vc4_file *vc4file)
int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_file *vc4file = file_priv->driver_priv;
struct drm_vc4_perfmon_create *req = data;
struct vc4_perfmon *perfmon;
unsigned int i;
int ret;
+ if (!vc4->v3d) {
+ DRM_DEBUG("Creating perfmon no VC4 V3D probed\n");
+ return -ENODEV;
+ }
+
/* Number of monitored counters cannot exceed HW limits. */
if (req->ncounters > DRM_VC4_MAX_PERF_COUNTERS ||
!req->ncounters)
@@ -146,10 +152,16 @@ int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_file *vc4file = file_priv->driver_priv;
struct drm_vc4_perfmon_destroy *req = data;
struct vc4_perfmon *perfmon;
+ if (!vc4->v3d) {
+ DRM_DEBUG("Destroying perfmon no VC4 V3D probed\n");
+ return -ENODEV;
+ }
+
mutex_lock(&vc4file->perfmon.lock);
perfmon = idr_remove(&vc4file->perfmon.idr, req->id);
mutex_unlock(&vc4file->perfmon.lock);
@@ -164,11 +176,17 @@ int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_file *vc4file = file_priv->driver_priv;
struct drm_vc4_perfmon_get_values *req = data;
struct vc4_perfmon *perfmon;
int ret;
+ if (!vc4->v3d) {
+ DRM_DEBUG("Getting perfmon no VC4 V3D probed\n");
+ return -ENODEV;
+ }
+
mutex_lock(&vc4file->perfmon.lock);
perfmon = idr_find(&vc4file->perfmon.idr, req->id);
vc4_perfmon_get(perfmon);
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index d098337c10e9..4d918d3e4858 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -488,6 +488,61 @@ static void vc4_write_scaling_parameters(struct drm_plane_state *state,
}
}
+static void vc4_plane_calc_load(struct drm_plane_state *state)
+{
+ unsigned int hvs_load_shift, vrefresh, i;
+ struct drm_framebuffer *fb = state->fb;
+ struct vc4_plane_state *vc4_state;
+ struct drm_crtc_state *crtc_state;
+ unsigned int vscale_factor;
+
+ vc4_state = to_vc4_plane_state(state);
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state,
+ state->crtc);
+ vrefresh = drm_mode_vrefresh(&crtc_state->adjusted_mode);
+
+ /* The HVS is able to process 2 pixels/cycle when scaling the source,
+ * 4 pixels/cycle otherwise.
+ * Alpha blending step seems to be pipelined and it's always operating
+ * at 4 pixels/cycle, so the limiting aspect here seems to be the
+ * scaler block.
+ * HVS load is expressed in clk-cycles/sec (AKA Hz).
+ */
+ if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
+ vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
+ vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
+ vc4_state->y_scaling[1] != VC4_SCALING_NONE)
+ hvs_load_shift = 1;
+ else
+ hvs_load_shift = 2;
+
+ vc4_state->membus_load = 0;
+ vc4_state->hvs_load = 0;
+ for (i = 0; i < fb->format->num_planes; i++) {
+ /* Even if the bandwidth/plane required for a single frame is
+ *
+ * vc4_state->src_w[i] * vc4_state->src_h[i] * cpp * vrefresh
+ *
+ * when downscaling, we have to read more pixels per line in
+ * the time frame reserved for a single line, so the bandwidth
+ * demand can be punctually higher. To account for that, we
+ * calculate the down-scaling factor and multiply the plane
+ * load by this number. We're likely over-estimating the read
+ * demand, but that's better than under-estimating it.
+ */
+ vscale_factor = DIV_ROUND_UP(vc4_state->src_h[i],
+ vc4_state->crtc_h);
+ vc4_state->membus_load += vc4_state->src_w[i] *
+ vc4_state->src_h[i] * vscale_factor *
+ fb->format->cpp[i];
+ vc4_state->hvs_load += vc4_state->crtc_h * vc4_state->crtc_w;
+ }
+
+ vc4_state->hvs_load *= vrefresh;
+ vc4_state->hvs_load >>= hvs_load_shift;
+ vc4_state->membus_load *= vrefresh;
+}
+
static int vc4_plane_allocate_lbm(struct drm_plane_state *state)
{
struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev);
@@ -875,6 +930,8 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
*/
vc4_state->dlist_initialized = 1;
+ vc4_plane_calc_load(state);
+
return 0;
}
@@ -1082,7 +1139,7 @@ static int vc4_prepare_fb(struct drm_plane *plane,
bo = to_vc4_bo(&drm_fb_cma_get_gem_obj(state->fb, 0)->base);
- fence = reservation_object_get_excl_rcu(bo->resv);
+ fence = reservation_object_get_excl_rcu(bo->base.base.resv);
drm_atomic_set_fence_for_plane(state, fence);
if (plane->state->fb == state->fb)
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index 931088014272..c0c5fadaf7e3 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -212,11 +212,11 @@
#define PV_HACT_ACT 0x30
+#define SCALER_CHANNELS_COUNT 3
+
#define SCALER_DISPCTRL 0x00000000
/* Global register for clock gating the HVS */
# define SCALER_DISPCTRL_ENABLE BIT(31)
-# define SCALER_DISPCTRL_DSP2EISLUR BIT(15)
-# define SCALER_DISPCTRL_DSP1EISLUR BIT(14)
# define SCALER_DISPCTRL_DSP3_MUX_MASK VC4_MASK(19, 18)
# define SCALER_DISPCTRL_DSP3_MUX_SHIFT 18
@@ -224,45 +224,25 @@
* SCALER_DISPSTAT_IRQDISP0. Note that short frame contributions are
* always enabled.
*/
-# define SCALER_DISPCTRL_DSP0EISLUR BIT(13)
-# define SCALER_DISPCTRL_DSP2EIEOLN BIT(12)
-# define SCALER_DISPCTRL_DSP2EIEOF BIT(11)
-# define SCALER_DISPCTRL_DSP1EIEOLN BIT(10)
-# define SCALER_DISPCTRL_DSP1EIEOF BIT(9)
+# define SCALER_DISPCTRL_DSPEISLUR(x) BIT(13 + (x))
/* Enables Display 0 end-of-line-N contribution to
* SCALER_DISPSTAT_IRQDISP0
*/
-# define SCALER_DISPCTRL_DSP0EIEOLN BIT(8)
+# define SCALER_DISPCTRL_DSPEIEOLN(x) BIT(8 + ((x) * 2))
/* Enables Display 0 EOF contribution to SCALER_DISPSTAT_IRQDISP0 */
-# define SCALER_DISPCTRL_DSP0EIEOF BIT(7)
+# define SCALER_DISPCTRL_DSPEIEOF(x) BIT(7 + ((x) * 2))
# define SCALER_DISPCTRL_SLVRDEIRQ BIT(6)
# define SCALER_DISPCTRL_SLVWREIRQ BIT(5)
# define SCALER_DISPCTRL_DMAEIRQ BIT(4)
-# define SCALER_DISPCTRL_DISP2EIRQ BIT(3)
-# define SCALER_DISPCTRL_DISP1EIRQ BIT(2)
/* Enables interrupt generation on the enabled EOF/EOLN/EISLUR
* bits and short frames..
*/
-# define SCALER_DISPCTRL_DISP0EIRQ BIT(1)
+# define SCALER_DISPCTRL_DISPEIRQ(x) BIT(1 + (x))
/* Enables interrupt generation on scaler profiler interrupt. */
# define SCALER_DISPCTRL_SCLEIRQ BIT(0)
#define SCALER_DISPSTAT 0x00000004
-# define SCALER_DISPSTAT_COBLOW2 BIT(29)
-# define SCALER_DISPSTAT_EOLN2 BIT(28)
-# define SCALER_DISPSTAT_ESFRAME2 BIT(27)
-# define SCALER_DISPSTAT_ESLINE2 BIT(26)
-# define SCALER_DISPSTAT_EUFLOW2 BIT(25)
-# define SCALER_DISPSTAT_EOF2 BIT(24)
-
-# define SCALER_DISPSTAT_COBLOW1 BIT(21)
-# define SCALER_DISPSTAT_EOLN1 BIT(20)
-# define SCALER_DISPSTAT_ESFRAME1 BIT(19)
-# define SCALER_DISPSTAT_ESLINE1 BIT(18)
-# define SCALER_DISPSTAT_EUFLOW1 BIT(17)
-# define SCALER_DISPSTAT_EOF1 BIT(16)
-
# define SCALER_DISPSTAT_RESP_MASK VC4_MASK(15, 14)
# define SCALER_DISPSTAT_RESP_SHIFT 14
# define SCALER_DISPSTAT_RESP_OKAY 0
@@ -270,23 +250,26 @@
# define SCALER_DISPSTAT_RESP_SLVERR 2
# define SCALER_DISPSTAT_RESP_DECERR 3
-# define SCALER_DISPSTAT_COBLOW0 BIT(13)
+# define SCALER_DISPSTAT_COBLOW(x) BIT(13 + ((x) * 8))
/* Set when the DISPEOLN line is done compositing. */
-# define SCALER_DISPSTAT_EOLN0 BIT(12)
+# define SCALER_DISPSTAT_EOLN(x) BIT(12 + ((x) * 8))
/* Set when VSTART is seen but there are still pixels in the current
* output line.
*/
-# define SCALER_DISPSTAT_ESFRAME0 BIT(11)
+# define SCALER_DISPSTAT_ESFRAME(x) BIT(11 + ((x) * 8))
/* Set when HSTART is seen but there are still pixels in the current
* output line.
*/
-# define SCALER_DISPSTAT_ESLINE0 BIT(10)
+# define SCALER_DISPSTAT_ESLINE(x) BIT(10 + ((x) * 8))
/* Set when the the downstream tries to read from the display FIFO
* while it's empty.
*/
-# define SCALER_DISPSTAT_EUFLOW0 BIT(9)
+# define SCALER_DISPSTAT_EUFLOW(x) BIT(9 + ((x) * 8))
/* Set when the display mode changes from RUN to EOF */
-# define SCALER_DISPSTAT_EOF0 BIT(8)
+# define SCALER_DISPSTAT_EOF(x) BIT(8 + ((x) * 8))
+
+# define SCALER_DISPSTAT_IRQMASK(x) VC4_MASK(13 + ((x) * 8), \
+ 8 + ((x) * 8))
/* Set on AXI invalid DMA ID error. */
# define SCALER_DISPSTAT_DMA_ERROR BIT(7)
@@ -298,12 +281,10 @@
* SCALER_DISPSTAT_RESP_ERROR is not SCALER_DISPSTAT_RESP_OKAY.
*/
# define SCALER_DISPSTAT_IRQDMA BIT(4)
-# define SCALER_DISPSTAT_IRQDISP2 BIT(3)
-# define SCALER_DISPSTAT_IRQDISP1 BIT(2)
/* Set when any of the EOF/EOLN/ESFRAME/ESLINE bits are set and their
* corresponding interrupt bit is enabled in DISPCTRL.
*/
-# define SCALER_DISPSTAT_IRQDISP0 BIT(1)
+# define SCALER_DISPSTAT_IRQDISP(x) BIT(1 + (x))
/* On read, the profiler interrupt. On write, clear *all* interrupt bits. */
# define SCALER_DISPSTAT_IRQSCL BIT(0)
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
index 273984f71ae2..3c918eeaf56e 100644
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -148,6 +148,12 @@ static void emit_tile(struct vc4_exec_info *exec,
}
if (setup->zs_read) {
+ if (setup->color_read) {
+ /* Exec previous load. */
+ vc4_tile_coordinates(setup, x, y);
+ vc4_store_before_load(setup);
+ }
+
if (args->zs_read.flags &
VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
rcl_u8(setup, VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER);
@@ -156,12 +162,6 @@ static void emit_tile(struct vc4_exec_info *exec,
&args->zs_read, x, y) |
VC4_LOADSTORE_FULL_RES_DISABLE_COLOR);
} else {
- if (setup->color_read) {
- /* Exec previous load. */
- vc4_tile_coordinates(setup, x, y);
- vc4_store_before_load(setup);
- }
-
rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL);
rcl_u16(setup, args->zs_read.bits);
rcl_u32(setup, setup->zs_read->paddr +
@@ -291,16 +291,15 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
}
}
if (setup->zs_read) {
+ if (setup->color_read) {
+ loop_body_size += VC4_PACKET_TILE_COORDINATES_SIZE;
+ loop_body_size += VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE;
+ }
+
if (args->zs_read.flags &
VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES) {
loop_body_size += VC4_PACKET_LOAD_FULL_RES_TILE_BUFFER_SIZE;
} else {
- if (setup->color_read &&
- !(args->color_read.flags &
- VC4_SUBMIT_RCL_SURFACE_READ_IS_FULL_RES)) {
- loop_body_size += VC4_PACKET_TILE_COORDINATES_SIZE;
- loop_body_size += VC4_PACKET_STORE_TILE_BUFFER_GENERAL_SIZE;
- }
loop_body_size += VC4_PACKET_LOAD_TILE_BUFFER_GENERAL_SIZE;
}
}
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index aa279b5b0de7..c8b89a78f9f4 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -148,6 +148,7 @@ struct vc4_txp {
struct drm_writeback_connector connector;
void __iomem *regs;
+ struct debugfs_regset32 regset;
};
static inline struct vc4_txp *encoder_to_vc4_txp(struct drm_encoder *encoder)
@@ -160,40 +161,14 @@ static inline struct vc4_txp *connector_to_vc4_txp(struct drm_connector *conn)
return container_of(conn, struct vc4_txp, connector.base);
}
-#define TXP_REG(reg) { reg, #reg }
-static const struct {
- u32 reg;
- const char *name;
-} txp_regs[] = {
- TXP_REG(TXP_DST_PTR),
- TXP_REG(TXP_DST_PITCH),
- TXP_REG(TXP_DIM),
- TXP_REG(TXP_DST_CTRL),
- TXP_REG(TXP_PROGRESS),
+static const struct debugfs_reg32 txp_regs[] = {
+ VC4_REG32(TXP_DST_PTR),
+ VC4_REG32(TXP_DST_PITCH),
+ VC4_REG32(TXP_DIM),
+ VC4_REG32(TXP_DST_CTRL),
+ VC4_REG32(TXP_PROGRESS),
};
-#ifdef CONFIG_DEBUG_FS
-int vc4_txp_debugfs_regs(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_txp *txp = vc4->txp;
- int i;
-
- if (!txp)
- return 0;
-
- for (i = 0; i < ARRAY_SIZE(txp_regs); i++) {
- seq_printf(m, "%s (0x%04x): 0x%08x\n",
- txp_regs[i].name, txp_regs[i].reg,
- TXP_READ(txp_regs[i].reg));
- }
-
- return 0;
-}
-#endif
-
static int vc4_txp_connector_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
@@ -249,7 +224,6 @@ static int vc4_txp_connector_atomic_check(struct drm_connector *conn,
struct drm_connector_state *conn_state)
{
struct drm_crtc_state *crtc_state;
- struct drm_gem_cma_object *gem;
struct drm_framebuffer *fb;
int i;
@@ -275,8 +249,6 @@ static int vc4_txp_connector_atomic_check(struct drm_connector *conn,
if (i == ARRAY_SIZE(drm_fmts))
return -EINVAL;
- gem = drm_fb_cma_get_gem_obj(fb, 0);
-
/* Pitch must be aligned on 16 bytes. */
if (fb->pitches[0] & GENMASK(3, 0))
return -EINVAL;
@@ -327,7 +299,7 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
TXP_WRITE(TXP_DST_CTRL, ctrl);
- drm_writeback_queue_job(&txp->connector, conn_state->writeback_job);
+ drm_writeback_queue_job(&txp->connector, conn_state);
}
static const struct drm_connector_helper_funcs vc4_txp_connector_helper_funcs = {
@@ -413,6 +385,9 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
txp->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(txp->regs))
return PTR_ERR(txp->regs);
+ txp->regset.base = txp->regs;
+ txp->regset.regs = txp_regs;
+ txp->regset.nregs = ARRAY_SIZE(txp_regs);
drm_connector_helper_add(&txp->connector.base,
&vc4_txp_connector_helper_funcs);
@@ -431,6 +406,8 @@ static int vc4_txp_bind(struct device *dev, struct device *master, void *data)
dev_set_drvdata(dev, txp);
vc4->txp = txp;
+ vc4_debugfs_add_regset32(drm, "txp_regs", &txp->regset);
+
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index e47e29426078..a4b6859e3af6 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -22,129 +22,145 @@
#include "vc4_drv.h"
#include "vc4_regs.h"
-#ifdef CONFIG_DEBUG_FS
-#define REGDEF(reg) { reg, #reg }
-static const struct {
- uint32_t reg;
- const char *name;
-} vc4_reg_defs[] = {
- REGDEF(V3D_IDENT0),
- REGDEF(V3D_IDENT1),
- REGDEF(V3D_IDENT2),
- REGDEF(V3D_SCRATCH),
- REGDEF(V3D_L2CACTL),
- REGDEF(V3D_SLCACTL),
- REGDEF(V3D_INTCTL),
- REGDEF(V3D_INTENA),
- REGDEF(V3D_INTDIS),
- REGDEF(V3D_CT0CS),
- REGDEF(V3D_CT1CS),
- REGDEF(V3D_CT0EA),
- REGDEF(V3D_CT1EA),
- REGDEF(V3D_CT0CA),
- REGDEF(V3D_CT1CA),
- REGDEF(V3D_CT00RA0),
- REGDEF(V3D_CT01RA0),
- REGDEF(V3D_CT0LC),
- REGDEF(V3D_CT1LC),
- REGDEF(V3D_CT0PC),
- REGDEF(V3D_CT1PC),
- REGDEF(V3D_PCS),
- REGDEF(V3D_BFC),
- REGDEF(V3D_RFC),
- REGDEF(V3D_BPCA),
- REGDEF(V3D_BPCS),
- REGDEF(V3D_BPOA),
- REGDEF(V3D_BPOS),
- REGDEF(V3D_BXCF),
- REGDEF(V3D_SQRSV0),
- REGDEF(V3D_SQRSV1),
- REGDEF(V3D_SQCNTL),
- REGDEF(V3D_SRQPC),
- REGDEF(V3D_SRQUA),
- REGDEF(V3D_SRQUL),
- REGDEF(V3D_SRQCS),
- REGDEF(V3D_VPACNTL),
- REGDEF(V3D_VPMBASE),
- REGDEF(V3D_PCTRC),
- REGDEF(V3D_PCTRE),
- REGDEF(V3D_PCTR(0)),
- REGDEF(V3D_PCTRS(0)),
- REGDEF(V3D_PCTR(1)),
- REGDEF(V3D_PCTRS(1)),
- REGDEF(V3D_PCTR(2)),
- REGDEF(V3D_PCTRS(2)),
- REGDEF(V3D_PCTR(3)),
- REGDEF(V3D_PCTRS(3)),
- REGDEF(V3D_PCTR(4)),
- REGDEF(V3D_PCTRS(4)),
- REGDEF(V3D_PCTR(5)),
- REGDEF(V3D_PCTRS(5)),
- REGDEF(V3D_PCTR(6)),
- REGDEF(V3D_PCTRS(6)),
- REGDEF(V3D_PCTR(7)),
- REGDEF(V3D_PCTRS(7)),
- REGDEF(V3D_PCTR(8)),
- REGDEF(V3D_PCTRS(8)),
- REGDEF(V3D_PCTR(9)),
- REGDEF(V3D_PCTRS(9)),
- REGDEF(V3D_PCTR(10)),
- REGDEF(V3D_PCTRS(10)),
- REGDEF(V3D_PCTR(11)),
- REGDEF(V3D_PCTRS(11)),
- REGDEF(V3D_PCTR(12)),
- REGDEF(V3D_PCTRS(12)),
- REGDEF(V3D_PCTR(13)),
- REGDEF(V3D_PCTRS(13)),
- REGDEF(V3D_PCTR(14)),
- REGDEF(V3D_PCTRS(14)),
- REGDEF(V3D_PCTR(15)),
- REGDEF(V3D_PCTRS(15)),
- REGDEF(V3D_DBGE),
- REGDEF(V3D_FDBGO),
- REGDEF(V3D_FDBGB),
- REGDEF(V3D_FDBGR),
- REGDEF(V3D_FDBGS),
- REGDEF(V3D_ERRSTAT),
+static const struct debugfs_reg32 v3d_regs[] = {
+ VC4_REG32(V3D_IDENT0),
+ VC4_REG32(V3D_IDENT1),
+ VC4_REG32(V3D_IDENT2),
+ VC4_REG32(V3D_SCRATCH),
+ VC4_REG32(V3D_L2CACTL),
+ VC4_REG32(V3D_SLCACTL),
+ VC4_REG32(V3D_INTCTL),
+ VC4_REG32(V3D_INTENA),
+ VC4_REG32(V3D_INTDIS),
+ VC4_REG32(V3D_CT0CS),
+ VC4_REG32(V3D_CT1CS),
+ VC4_REG32(V3D_CT0EA),
+ VC4_REG32(V3D_CT1EA),
+ VC4_REG32(V3D_CT0CA),
+ VC4_REG32(V3D_CT1CA),
+ VC4_REG32(V3D_CT00RA0),
+ VC4_REG32(V3D_CT01RA0),
+ VC4_REG32(V3D_CT0LC),
+ VC4_REG32(V3D_CT1LC),
+ VC4_REG32(V3D_CT0PC),
+ VC4_REG32(V3D_CT1PC),
+ VC4_REG32(V3D_PCS),
+ VC4_REG32(V3D_BFC),
+ VC4_REG32(V3D_RFC),
+ VC4_REG32(V3D_BPCA),
+ VC4_REG32(V3D_BPCS),
+ VC4_REG32(V3D_BPOA),
+ VC4_REG32(V3D_BPOS),
+ VC4_REG32(V3D_BXCF),
+ VC4_REG32(V3D_SQRSV0),
+ VC4_REG32(V3D_SQRSV1),
+ VC4_REG32(V3D_SQCNTL),
+ VC4_REG32(V3D_SRQPC),
+ VC4_REG32(V3D_SRQUA),
+ VC4_REG32(V3D_SRQUL),
+ VC4_REG32(V3D_SRQCS),
+ VC4_REG32(V3D_VPACNTL),
+ VC4_REG32(V3D_VPMBASE),
+ VC4_REG32(V3D_PCTRC),
+ VC4_REG32(V3D_PCTRE),
+ VC4_REG32(V3D_PCTR(0)),
+ VC4_REG32(V3D_PCTRS(0)),
+ VC4_REG32(V3D_PCTR(1)),
+ VC4_REG32(V3D_PCTRS(1)),
+ VC4_REG32(V3D_PCTR(2)),
+ VC4_REG32(V3D_PCTRS(2)),
+ VC4_REG32(V3D_PCTR(3)),
+ VC4_REG32(V3D_PCTRS(3)),
+ VC4_REG32(V3D_PCTR(4)),
+ VC4_REG32(V3D_PCTRS(4)),
+ VC4_REG32(V3D_PCTR(5)),
+ VC4_REG32(V3D_PCTRS(5)),
+ VC4_REG32(V3D_PCTR(6)),
+ VC4_REG32(V3D_PCTRS(6)),
+ VC4_REG32(V3D_PCTR(7)),
+ VC4_REG32(V3D_PCTRS(7)),
+ VC4_REG32(V3D_PCTR(8)),
+ VC4_REG32(V3D_PCTRS(8)),
+ VC4_REG32(V3D_PCTR(9)),
+ VC4_REG32(V3D_PCTRS(9)),
+ VC4_REG32(V3D_PCTR(10)),
+ VC4_REG32(V3D_PCTRS(10)),
+ VC4_REG32(V3D_PCTR(11)),
+ VC4_REG32(V3D_PCTRS(11)),
+ VC4_REG32(V3D_PCTR(12)),
+ VC4_REG32(V3D_PCTRS(12)),
+ VC4_REG32(V3D_PCTR(13)),
+ VC4_REG32(V3D_PCTRS(13)),
+ VC4_REG32(V3D_PCTR(14)),
+ VC4_REG32(V3D_PCTRS(14)),
+ VC4_REG32(V3D_PCTR(15)),
+ VC4_REG32(V3D_PCTRS(15)),
+ VC4_REG32(V3D_DBGE),
+ VC4_REG32(V3D_FDBGO),
+ VC4_REG32(V3D_FDBGB),
+ VC4_REG32(V3D_FDBGR),
+ VC4_REG32(V3D_FDBGS),
+ VC4_REG32(V3D_ERRSTAT),
};
-int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused)
+static int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(vc4_reg_defs); i++) {
- seq_printf(m, "%s (0x%04x): 0x%08x\n",
- vc4_reg_defs[i].name, vc4_reg_defs[i].reg,
- V3D_READ(vc4_reg_defs[i].reg));
+ int ret = vc4_v3d_pm_get(vc4);
+
+ if (ret == 0) {
+ uint32_t ident1 = V3D_READ(V3D_IDENT1);
+ uint32_t nslc = VC4_GET_FIELD(ident1, V3D_IDENT1_NSLC);
+ uint32_t tups = VC4_GET_FIELD(ident1, V3D_IDENT1_TUPS);
+ uint32_t qups = VC4_GET_FIELD(ident1, V3D_IDENT1_QUPS);
+
+ seq_printf(m, "Revision: %d\n",
+ VC4_GET_FIELD(ident1, V3D_IDENT1_REV));
+ seq_printf(m, "Slices: %d\n", nslc);
+ seq_printf(m, "TMUs: %d\n", nslc * tups);
+ seq_printf(m, "QPUs: %d\n", nslc * qups);
+ seq_printf(m, "Semaphores: %d\n",
+ VC4_GET_FIELD(ident1, V3D_IDENT1_NSEM));
+ vc4_v3d_pm_put(vc4);
}
return 0;
}
-int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused)
+/**
+ * Wraps pm_runtime_get_sync() in a refcount, so that we can reliably
+ * get the pm_runtime refcount to 0 in vc4_reset().
+ */
+int
+vc4_v3d_pm_get(struct vc4_dev *vc4)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
- uint32_t ident1 = V3D_READ(V3D_IDENT1);
- uint32_t nslc = VC4_GET_FIELD(ident1, V3D_IDENT1_NSLC);
- uint32_t tups = VC4_GET_FIELD(ident1, V3D_IDENT1_TUPS);
- uint32_t qups = VC4_GET_FIELD(ident1, V3D_IDENT1_QUPS);
-
- seq_printf(m, "Revision: %d\n",
- VC4_GET_FIELD(ident1, V3D_IDENT1_REV));
- seq_printf(m, "Slices: %d\n", nslc);
- seq_printf(m, "TMUs: %d\n", nslc * tups);
- seq_printf(m, "QPUs: %d\n", nslc * qups);
- seq_printf(m, "Semaphores: %d\n",
- VC4_GET_FIELD(ident1, V3D_IDENT1_NSEM));
+ mutex_lock(&vc4->power_lock);
+ if (vc4->power_refcount++ == 0) {
+ int ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
+
+ if (ret < 0) {
+ vc4->power_refcount--;
+ mutex_unlock(&vc4->power_lock);
+ return ret;
+ }
+ }
+ mutex_unlock(&vc4->power_lock);
return 0;
}
-#endif /* CONFIG_DEBUG_FS */
+
+void
+vc4_v3d_pm_put(struct vc4_dev *vc4)
+{
+ mutex_lock(&vc4->power_lock);
+ if (--vc4->power_refcount == 0) {
+ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
+ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
+ }
+ mutex_unlock(&vc4->power_lock);
+}
static void vc4_v3d_init_hw(struct drm_device *dev)
{
@@ -354,6 +370,9 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
v3d->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(v3d->regs))
return PTR_ERR(v3d->regs);
+ v3d->regset.base = v3d->regs;
+ v3d->regset.regs = v3d_regs;
+ v3d->regset.nregs = ARRAY_SIZE(v3d_regs);
vc4->v3d = v3d;
v3d->vc4 = vc4;
@@ -409,6 +428,9 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
pm_runtime_set_autosuspend_delay(dev, 40); /* a little over 2 frames. */
pm_runtime_enable(dev);
+ vc4_debugfs_add_file(drm, "v3d_ident", vc4_v3d_debugfs_ident, NULL);
+ vc4_debugfs_add_regset32(drm, "v3d_regs", &v3d->regset);
+
return 0;
}
@@ -452,7 +474,7 @@ static int vc4_v3d_dev_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id vc4_v3d_dt_match[] = {
+const struct of_device_id vc4_v3d_dt_match[] = {
{ .compatible = "brcm,bcm2835-v3d" },
{ .compatible = "brcm,cygnus-v3d" },
{ .compatible = "brcm,vc4-v3d" },
diff --git a/drivers/gpu/drm/vc4/vc4_vec.c b/drivers/gpu/drm/vc4/vc4_vec.c
index 858c3a483229..0a27e48fab31 100644
--- a/drivers/gpu/drm/vc4/vc4_vec.c
+++ b/drivers/gpu/drm/vc4/vc4_vec.c
@@ -176,6 +176,8 @@ struct vc4_vec {
struct clk *clock;
const struct vc4_vec_tv_mode *tv_mode;
+
+ struct debugfs_regset32 regset;
};
#define VEC_READ(offset) readl(vec->regs + (offset))
@@ -223,59 +225,33 @@ struct vc4_vec_tv_mode {
void (*mode_set)(struct vc4_vec *vec);
};
-#define VEC_REG(reg) { reg, #reg }
-static const struct {
- u32 reg;
- const char *name;
-} vec_regs[] = {
- VEC_REG(VEC_WSE_CONTROL),
- VEC_REG(VEC_WSE_WSS_DATA),
- VEC_REG(VEC_WSE_VPS_DATA1),
- VEC_REG(VEC_WSE_VPS_CONTROL),
- VEC_REG(VEC_REVID),
- VEC_REG(VEC_CONFIG0),
- VEC_REG(VEC_SCHPH),
- VEC_REG(VEC_CLMP0_START),
- VEC_REG(VEC_CLMP0_END),
- VEC_REG(VEC_FREQ3_2),
- VEC_REG(VEC_FREQ1_0),
- VEC_REG(VEC_CONFIG1),
- VEC_REG(VEC_CONFIG2),
- VEC_REG(VEC_INTERRUPT_CONTROL),
- VEC_REG(VEC_INTERRUPT_STATUS),
- VEC_REG(VEC_FCW_SECAM_B),
- VEC_REG(VEC_SECAM_GAIN_VAL),
- VEC_REG(VEC_CONFIG3),
- VEC_REG(VEC_STATUS0),
- VEC_REG(VEC_MASK0),
- VEC_REG(VEC_CFG),
- VEC_REG(VEC_DAC_TEST),
- VEC_REG(VEC_DAC_CONFIG),
- VEC_REG(VEC_DAC_MISC),
+static const struct debugfs_reg32 vec_regs[] = {
+ VC4_REG32(VEC_WSE_CONTROL),
+ VC4_REG32(VEC_WSE_WSS_DATA),
+ VC4_REG32(VEC_WSE_VPS_DATA1),
+ VC4_REG32(VEC_WSE_VPS_CONTROL),
+ VC4_REG32(VEC_REVID),
+ VC4_REG32(VEC_CONFIG0),
+ VC4_REG32(VEC_SCHPH),
+ VC4_REG32(VEC_CLMP0_START),
+ VC4_REG32(VEC_CLMP0_END),
+ VC4_REG32(VEC_FREQ3_2),
+ VC4_REG32(VEC_FREQ1_0),
+ VC4_REG32(VEC_CONFIG1),
+ VC4_REG32(VEC_CONFIG2),
+ VC4_REG32(VEC_INTERRUPT_CONTROL),
+ VC4_REG32(VEC_INTERRUPT_STATUS),
+ VC4_REG32(VEC_FCW_SECAM_B),
+ VC4_REG32(VEC_SECAM_GAIN_VAL),
+ VC4_REG32(VEC_CONFIG3),
+ VC4_REG32(VEC_STATUS0),
+ VC4_REG32(VEC_MASK0),
+ VC4_REG32(VEC_CFG),
+ VC4_REG32(VEC_DAC_TEST),
+ VC4_REG32(VEC_DAC_CONFIG),
+ VC4_REG32(VEC_DAC_MISC),
};
-#ifdef CONFIG_DEBUG_FS
-int vc4_vec_debugfs_regs(struct seq_file *m, void *unused)
-{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct vc4_vec *vec = vc4->vec;
- int i;
-
- if (!vec)
- return 0;
-
- for (i = 0; i < ARRAY_SIZE(vec_regs); i++) {
- seq_printf(m, "%s (0x%04x): 0x%08x\n",
- vec_regs[i].name, vec_regs[i].reg,
- VEC_READ(vec_regs[i].reg));
- }
-
- return 0;
-}
-#endif
-
static void vc4_vec_ntsc_mode_set(struct vc4_vec *vec)
{
VEC_WRITE(VEC_CONFIG0, VEC_CONFIG0_NTSC_STD | VEC_CONFIG0_PDEN);
@@ -587,6 +563,9 @@ static int vc4_vec_bind(struct device *dev, struct device *master, void *data)
vec->regs = vc4_ioremap_regs(pdev, 0);
if (IS_ERR(vec->regs))
return PTR_ERR(vec->regs);
+ vec->regset.base = vec->regs;
+ vec->regset.regs = vec_regs;
+ vec->regset.nregs = ARRAY_SIZE(vec_regs);
vec->clock = devm_clk_get(dev, NULL);
if (IS_ERR(vec->clock)) {
@@ -612,6 +591,8 @@ static int vc4_vec_bind(struct device *dev, struct device *master, void *data)
vc4->vec = vec;
+ vc4_debugfs_add_regset32(drm, "vec_regs", &vec->regset);
+
return 0;
err_destroy_encoder:
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
index 73dc99046c43..ed0fcda713c3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -28,6 +28,30 @@
#include "virtgpu_drv.h"
+static void virtio_add_bool(struct seq_file *m, const char *name,
+ bool value)
+{
+ seq_printf(m, "%-16s : %s\n", name, value ? "yes" : "no");
+}
+
+static void virtio_add_int(struct seq_file *m, const char *name,
+ int value)
+{
+ seq_printf(m, "%-16s : %d\n", name, value);
+}
+
+static int virtio_gpu_features(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
+
+ virtio_add_bool(m, "virgl", vgdev->has_virgl_3d);
+ virtio_add_bool(m, "edid", vgdev->has_edid);
+ virtio_add_int(m, "cap sets", vgdev->num_capsets);
+ virtio_add_int(m, "scanouts", vgdev->num_scanouts);
+ return 0;
+}
+
static int
virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data)
{
@@ -41,7 +65,8 @@ virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data)
}
static struct drm_info_list virtio_gpu_debugfs_list[] = {
- { "irq_fence", virtio_gpu_debugfs_irq_info, 0, NULL },
+ { "virtio-gpu-features", virtio_gpu_features },
+ { "virtio-gpu-irq-fence", virtio_gpu_debugfs_irq_info, 0, NULL },
};
#define VIRTIO_GPU_DEBUGFS_ENTRIES ARRAY_SIZE(virtio_gpu_debugfs_list)
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 653ec7d0bf4d..86843a4d6102 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -385,5 +385,6 @@ void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev)
for (i = 0 ; i < vgdev->num_scanouts; ++i)
kfree(vgdev->outputs[i].edid);
+ drm_atomic_helper_shutdown(vgdev->ddev);
drm_mode_config_cleanup(vgdev->ddev);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index af92964b6889..c50868753132 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -209,8 +209,6 @@ static struct drm_driver driver = {
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
- .gem_prime_pin = virtgpu_gem_prime_pin,
- .gem_prime_unpin = virtgpu_gem_prime_unpin,
.gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table,
.gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
.gem_prime_vmap = virtgpu_gem_prime_vmap,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index d577cb76f5ad..b69ae10ca238 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -50,6 +50,23 @@
#define DRIVER_MINOR 1
#define DRIVER_PATCHLEVEL 0
+struct virtio_gpu_object_params {
+ uint32_t format;
+ uint32_t width;
+ uint32_t height;
+ unsigned long size;
+ bool dumb;
+ /* 3d */
+ bool virgl;
+ uint32_t target;
+ uint32_t bind;
+ uint32_t depth;
+ uint32_t array_size;
+ uint32_t last_level;
+ uint32_t nr_samples;
+ uint32_t flags;
+};
+
struct virtio_gpu_object {
struct drm_gem_object gem_base;
uint32_t hw_res_handle;
@@ -204,6 +221,9 @@ struct virtio_gpu_fpriv {
/* virtio_ioctl.c */
#define DRM_VIRTIO_NUM_IOCTLS 10
extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
+int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
+ struct list_head *head);
+void virtio_gpu_unref_list(struct list_head *head);
/* virtio_kms.c */
int virtio_gpu_init(struct drm_device *dev);
@@ -217,16 +237,17 @@ int virtio_gpu_gem_init(struct virtio_gpu_device *vgdev);
void virtio_gpu_gem_fini(struct virtio_gpu_device *vgdev);
int virtio_gpu_gem_create(struct drm_file *file,
struct drm_device *dev,
- uint64_t size,
+ struct virtio_gpu_object_params *params,
struct drm_gem_object **obj_p,
uint32_t *handle_p);
int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
struct drm_file *file);
void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file);
-struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
- size_t size, bool kernel,
- bool pinned);
+struct virtio_gpu_object*
+virtio_gpu_alloc_object(struct drm_device *dev,
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_fence *fence);
int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
@@ -243,9 +264,8 @@ int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev);
void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
- uint32_t format,
- uint32_t width,
- uint32_t height);
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
uint32_t resource_id);
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
@@ -304,7 +324,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
- struct virtio_gpu_resource_create_3d *rc_3d);
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_fence *fence);
void virtio_gpu_ctrl_ack(struct virtqueue *vq);
void virtio_gpu_cursor_ack(struct virtqueue *vq);
void virtio_gpu_fence_ack(struct virtqueue *vq);
@@ -332,6 +353,7 @@ void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev);
int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma);
/* virtio_gpu_fence.c */
+bool virtio_fence_signaled(struct dma_fence *f);
struct virtio_gpu_fence *virtio_gpu_fence_alloc(
struct virtio_gpu_device *vgdev);
int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
@@ -342,8 +364,9 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
/* virtio_gpu_object */
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
- unsigned long size, bool kernel, bool pinned,
- struct virtio_gpu_object **bo_ptr);
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_object **bo_ptr,
+ struct virtio_gpu_fence *fence);
void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo);
int virtio_gpu_object_kmap(struct virtio_gpu_object *bo);
int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
@@ -352,8 +375,6 @@ void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo);
int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait);
/* virtgpu_prime.c */
-int virtgpu_gem_prime_pin(struct drm_gem_object *obj);
-void virtgpu_gem_prime_unpin(struct drm_gem_object *obj);
struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index 21bd4c4a32d1..87d1966192f4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -36,7 +36,7 @@ static const char *virtio_get_timeline_name(struct dma_fence *f)
return "controlq";
}
-static bool virtio_signaled(struct dma_fence *f)
+bool virtio_fence_signaled(struct dma_fence *f)
{
struct virtio_gpu_fence *fence = to_virtio_fence(f);
@@ -62,7 +62,7 @@ static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
static const struct dma_fence_ops virtio_fence_ops = {
.get_driver_name = virtio_get_driver_name,
.get_timeline_name = virtio_get_timeline_name,
- .signaled = virtio_signaled,
+ .signaled = virtio_fence_signaled,
.fence_value_str = virtio_fence_value_str,
.timeline_value_str = virtio_timeline_value_str,
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index f06586393974..1e49e08dd545 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -34,15 +34,16 @@ void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj)
virtio_gpu_object_unref(&obj);
}
-struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
- size_t size, bool kernel,
- bool pinned)
+struct virtio_gpu_object*
+virtio_gpu_alloc_object(struct drm_device *dev,
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_object *obj;
int ret;
- ret = virtio_gpu_object_create(vgdev, size, kernel, pinned, &obj);
+ ret = virtio_gpu_object_create(vgdev, params, &obj, fence);
if (ret)
return ERR_PTR(ret);
@@ -51,7 +52,7 @@ struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
int virtio_gpu_gem_create(struct drm_file *file,
struct drm_device *dev,
- uint64_t size,
+ struct virtio_gpu_object_params *params,
struct drm_gem_object **obj_p,
uint32_t *handle_p)
{
@@ -59,7 +60,7 @@ int virtio_gpu_gem_create(struct drm_file *file,
int ret;
u32 handle;
- obj = virtio_gpu_alloc_object(dev, size, false, false);
+ obj = virtio_gpu_alloc_object(dev, params, NULL);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -82,12 +83,10 @@ int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
- struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_gem_object *gobj;
- struct virtio_gpu_object *obj;
+ struct virtio_gpu_object_params params = { 0 };
int ret;
uint32_t pitch;
- uint32_t format;
if (args->bpp != 32)
return -EINVAL;
@@ -96,22 +95,16 @@ int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
args->size = pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE);
- ret = virtio_gpu_gem_create(file_priv, dev, args->size, &gobj,
+ params.format = virtio_gpu_translate_format(DRM_FORMAT_HOST_XRGB8888);
+ params.width = args->width;
+ params.height = args->height;
+ params.size = args->size;
+ params.dumb = true;
+ ret = virtio_gpu_gem_create(file_priv, dev, &params, &gobj,
&args->handle);
if (ret)
goto fail;
- format = virtio_gpu_translate_format(DRM_FORMAT_HOST_XRGB8888);
- obj = gem_to_virtio_gpu_obj(gobj);
- virtio_gpu_cmd_create_resource(vgdev, obj, format,
- args->width, args->height);
-
- /* attach the object to the resource */
- ret = virtio_gpu_object_attach(vgdev, obj, NULL);
- if (ret)
- goto fail;
-
- obj->dumb = true;
args->pitch = pitch;
return ret;
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 14ce8188c052..949a264985fc 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -54,8 +54,8 @@ static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
&virtio_gpu_map->offset);
}
-static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
- struct list_head *head)
+int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
+ struct list_head *head)
{
struct ttm_operation_ctx ctx = { false, false };
struct ttm_validate_buffer *buf;
@@ -79,7 +79,7 @@ static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
return 0;
}
-static void virtio_gpu_unref_list(struct list_head *head)
+void virtio_gpu_unref_list(struct list_head *head)
{
struct ttm_validate_buffer *buf;
struct ttm_buffer_object *bo;
@@ -275,16 +275,12 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_virtgpu_resource_create *rc = data;
+ struct virtio_gpu_fence *fence;
int ret;
struct virtio_gpu_object *qobj;
struct drm_gem_object *obj;
uint32_t handle = 0;
- uint32_t size;
- struct list_head validate_list;
- struct ttm_validate_buffer mainbuf;
- struct virtio_gpu_fence *fence = NULL;
- struct ww_acquire_ctx ticket;
- struct virtio_gpu_resource_create_3d rc_3d;
+ struct virtio_gpu_object_params params = { 0 };
if (vgdev->has_virgl_3d == false) {
if (rc->depth > 1)
@@ -299,94 +295,43 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- INIT_LIST_HEAD(&validate_list);
- memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
-
- size = rc->size;
-
+ params.format = rc->format;
+ params.width = rc->width;
+ params.height = rc->height;
+ params.size = rc->size;
+ if (vgdev->has_virgl_3d) {
+ params.virgl = true;
+ params.target = rc->target;
+ params.bind = rc->bind;
+ params.depth = rc->depth;
+ params.array_size = rc->array_size;
+ params.last_level = rc->last_level;
+ params.nr_samples = rc->nr_samples;
+ params.flags = rc->flags;
+ }
/* allocate a single page size object */
- if (size == 0)
- size = PAGE_SIZE;
+ if (params.size == 0)
+ params.size = PAGE_SIZE;
- qobj = virtio_gpu_alloc_object(dev, size, false, false);
+ fence = virtio_gpu_fence_alloc(vgdev);
+ if (!fence)
+ return -ENOMEM;
+ qobj = virtio_gpu_alloc_object(dev, &params, fence);
+ dma_fence_put(&fence->f);
if (IS_ERR(qobj))
return PTR_ERR(qobj);
obj = &qobj->gem_base;
- if (!vgdev->has_virgl_3d) {
- virtio_gpu_cmd_create_resource(vgdev, qobj, rc->format,
- rc->width, rc->height);
-
- ret = virtio_gpu_object_attach(vgdev, qobj, NULL);
- } else {
- /* use a gem reference since unref list undoes them */
- drm_gem_object_get(&qobj->gem_base);
- mainbuf.bo = &qobj->tbo;
- list_add(&mainbuf.head, &validate_list);
-
- ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
- if (ret) {
- DRM_DEBUG("failed to validate\n");
- goto fail_unref;
- }
-
- rc_3d.resource_id = cpu_to_le32(qobj->hw_res_handle);
- rc_3d.target = cpu_to_le32(rc->target);
- rc_3d.format = cpu_to_le32(rc->format);
- rc_3d.bind = cpu_to_le32(rc->bind);
- rc_3d.width = cpu_to_le32(rc->width);
- rc_3d.height = cpu_to_le32(rc->height);
- rc_3d.depth = cpu_to_le32(rc->depth);
- rc_3d.array_size = cpu_to_le32(rc->array_size);
- rc_3d.last_level = cpu_to_le32(rc->last_level);
- rc_3d.nr_samples = cpu_to_le32(rc->nr_samples);
- rc_3d.flags = cpu_to_le32(rc->flags);
-
- fence = virtio_gpu_fence_alloc(vgdev);
- if (!fence) {
- ret = -ENOMEM;
- goto fail_backoff;
- }
-
- virtio_gpu_cmd_resource_create_3d(vgdev, qobj, &rc_3d);
- ret = virtio_gpu_object_attach(vgdev, qobj, fence);
- if (ret) {
- dma_fence_put(&fence->f);
- goto fail_backoff;
- }
- ttm_eu_fence_buffer_objects(&ticket, &validate_list, &fence->f);
- }
-
ret = drm_gem_handle_create(file_priv, obj, &handle);
if (ret) {
-
drm_gem_object_release(obj);
- if (vgdev->has_virgl_3d) {
- virtio_gpu_unref_list(&validate_list);
- dma_fence_put(&fence->f);
- }
return ret;
}
drm_gem_object_put_unlocked(obj);
rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
rc->bo_handle = handle;
-
- if (vgdev->has_virgl_3d) {
- virtio_gpu_unref_list(&validate_list);
- dma_fence_put(&fence->f);
- }
return 0;
-fail_backoff:
- ttm_eu_backoff_reservation(&ticket, &validate_list);
-fail_unref:
- if (vgdev->has_virgl_3d) {
- virtio_gpu_unref_list(&validate_list);
- dma_fence_put(&fence->f);
- }
-//fail_obj:
-// drm_gem_object_handle_unreference_unlocked(obj);
- return ret;
}
static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index e7e946035027..b2da31310d24 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -23,6 +23,8 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <drm/ttm/ttm_execbuf_util.h>
+
#include "virtgpu_drv.h"
static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
@@ -74,39 +76,34 @@ static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
kfree(bo);
}
-static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo,
- bool pinned)
+static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo)
{
u32 c = 1;
- u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
vgbo->placement.placement = &vgbo->placement_code;
vgbo->placement.busy_placement = &vgbo->placement_code;
vgbo->placement_code.fpfn = 0;
vgbo->placement_code.lpfn = 0;
vgbo->placement_code.flags =
- TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT | pflag;
+ TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT |
+ TTM_PL_FLAG_NO_EVICT;
vgbo->placement.num_placement = c;
vgbo->placement.num_busy_placement = c;
}
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
- unsigned long size, bool kernel, bool pinned,
- struct virtio_gpu_object **bo_ptr)
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_object **bo_ptr,
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_object *bo;
- enum ttm_bo_type type;
size_t acc_size;
int ret;
- if (kernel)
- type = ttm_bo_type_kernel;
- else
- type = ttm_bo_type_device;
*bo_ptr = NULL;
- acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, size,
+ acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, params->size,
sizeof(struct virtio_gpu_object));
bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL);
@@ -117,23 +114,62 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
kfree(bo);
return ret;
}
- size = roundup(size, PAGE_SIZE);
- ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size);
+ params->size = roundup(params->size, PAGE_SIZE);
+ ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, params->size);
if (ret != 0) {
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
kfree(bo);
return ret;
}
- bo->dumb = false;
- virtio_gpu_init_ttm_placement(bo, pinned);
+ bo->dumb = params->dumb;
+
+ if (params->virgl) {
+ virtio_gpu_cmd_resource_create_3d(vgdev, bo, params, fence);
+ } else {
+ virtio_gpu_cmd_create_resource(vgdev, bo, params, fence);
+ }
- ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, size, type,
- &bo->placement, 0, !kernel, acc_size,
- NULL, NULL, &virtio_gpu_ttm_bo_destroy);
+ virtio_gpu_init_ttm_placement(bo);
+ ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, params->size,
+ ttm_bo_type_device, &bo->placement, 0,
+ true, acc_size, NULL, NULL,
+ &virtio_gpu_ttm_bo_destroy);
/* ttm_bo_init failure will call the destroy */
if (ret != 0)
return ret;
+ if (fence) {
+ struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
+ struct list_head validate_list;
+ struct ttm_validate_buffer mainbuf;
+ struct ww_acquire_ctx ticket;
+ unsigned long irq_flags;
+ bool signaled;
+
+ INIT_LIST_HEAD(&validate_list);
+ memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer));
+
+ /* use a gem reference since unref list undoes them */
+ drm_gem_object_get(&bo->gem_base);
+ mainbuf.bo = &bo->tbo;
+ list_add(&mainbuf.head, &validate_list);
+
+ ret = virtio_gpu_object_list_validate(&ticket, &validate_list);
+ if (ret == 0) {
+ spin_lock_irqsave(&drv->lock, irq_flags);
+ signaled = virtio_fence_signaled(&fence->f);
+ if (!signaled)
+ /* virtio create command still in flight */
+ ttm_eu_fence_buffer_objects(&ticket, &validate_list,
+ &fence->f);
+ spin_unlock_irqrestore(&drv->lock, irq_flags);
+ if (signaled)
+ /* virtio create command finished */
+ ttm_eu_backoff_reservation(&ticket, &validate_list);
+ }
+ virtio_gpu_unref_list(&validate_list);
+ }
+
*bo_ptr = bo;
return 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index eb51a78e1199..8fbf71bd0c5e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -28,20 +28,16 @@
* device that might share buffers with virtgpu
*/
-int virtgpu_gem_prime_pin(struct drm_gem_object *obj)
+struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
- WARN_ONCE(1, "not implemented");
- return -ENODEV;
-}
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
-void virtgpu_gem_prime_unpin(struct drm_gem_object *obj)
-{
- WARN_ONCE(1, "not implemented");
-}
+ if (!bo->tbo.ttm->pages || !bo->tbo.ttm->num_pages)
+ /* should not happen */
+ return ERR_PTR(-EINVAL);
-struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
-{
- return ERR_PTR(-ENODEV);
+ return drm_prime_pages_to_sg(bo->tbo.ttm->pages,
+ bo->tbo.ttm->num_pages);
}
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
@@ -68,7 +64,10 @@ void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
}
int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *area)
+ struct vm_area_struct *vma)
{
- return -ENODEV;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+
+ bo->gem_base.vma_node.vm_node.start = bo->tbo.vma_node.vm_node.start;
+ return drm_gem_prime_mmap(obj, vma);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index 4bfbf25fabff..300ef3a83538 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -37,8 +37,6 @@
#include <linux/delay.h>
-#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
-
static struct
virtio_gpu_device *virtio_gpu_get_vgdev(struct ttm_bo_device *bdev)
{
@@ -116,10 +114,6 @@ static const struct ttm_mem_type_manager_func virtio_gpu_bo_manager_func = {
static int virtio_gpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
- struct virtio_gpu_device *vgdev;
-
- vgdev = virtio_gpu_get_vgdev(bdev);
-
switch (type) {
case TTM_PL_SYSTEM:
/* System memory */
@@ -194,42 +188,45 @@ static void virtio_gpu_ttm_io_mem_free(struct ttm_bo_device *bdev,
*/
struct virtio_gpu_ttm_tt {
struct ttm_dma_tt ttm;
- struct virtio_gpu_device *vgdev;
- u64 offset;
+ struct virtio_gpu_object *obj;
};
-static int virtio_gpu_ttm_backend_bind(struct ttm_tt *ttm,
- struct ttm_mem_reg *bo_mem)
+static int virtio_gpu_ttm_tt_bind(struct ttm_tt *ttm,
+ struct ttm_mem_reg *bo_mem)
{
- struct virtio_gpu_ttm_tt *gtt = (void *)ttm;
+ struct virtio_gpu_ttm_tt *gtt =
+ container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm);
+ struct virtio_gpu_device *vgdev =
+ virtio_gpu_get_vgdev(gtt->obj->tbo.bdev);
- gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
- if (!ttm->num_pages)
- WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
- ttm->num_pages, bo_mem, ttm);
-
- /* Not implemented */
+ virtio_gpu_object_attach(vgdev, gtt->obj, NULL);
return 0;
}
-static int virtio_gpu_ttm_backend_unbind(struct ttm_tt *ttm)
+static int virtio_gpu_ttm_tt_unbind(struct ttm_tt *ttm)
{
- /* Not implemented */
+ struct virtio_gpu_ttm_tt *gtt =
+ container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm);
+ struct virtio_gpu_device *vgdev =
+ virtio_gpu_get_vgdev(gtt->obj->tbo.bdev);
+
+ virtio_gpu_object_detach(vgdev, gtt->obj);
return 0;
}
-static void virtio_gpu_ttm_backend_destroy(struct ttm_tt *ttm)
+static void virtio_gpu_ttm_tt_destroy(struct ttm_tt *ttm)
{
- struct virtio_gpu_ttm_tt *gtt = (void *)ttm;
+ struct virtio_gpu_ttm_tt *gtt =
+ container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm);
ttm_dma_tt_fini(&gtt->ttm);
kfree(gtt);
}
-static struct ttm_backend_func virtio_gpu_backend_func = {
- .bind = &virtio_gpu_ttm_backend_bind,
- .unbind = &virtio_gpu_ttm_backend_unbind,
- .destroy = &virtio_gpu_ttm_backend_destroy,
+static struct ttm_backend_func virtio_gpu_tt_func = {
+ .bind = &virtio_gpu_ttm_tt_bind,
+ .unbind = &virtio_gpu_ttm_tt_unbind,
+ .destroy = &virtio_gpu_ttm_tt_destroy,
};
static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo,
@@ -242,8 +239,8 @@ static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo,
gtt = kzalloc(sizeof(struct virtio_gpu_ttm_tt), GFP_KERNEL);
if (gtt == NULL)
return NULL;
- gtt->ttm.ttm.func = &virtio_gpu_backend_func;
- gtt->vgdev = vgdev;
+ gtt->ttm.ttm.func = &virtio_gpu_tt_func;
+ gtt->obj = container_of(bo, struct virtio_gpu_object, tbo);
if (ttm_dma_tt_init(&gtt->ttm, bo, page_flags)) {
kfree(gtt);
return NULL;
@@ -251,58 +248,11 @@ static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo,
return &gtt->ttm.ttm;
}
-static void virtio_gpu_move_null(struct ttm_buffer_object *bo,
- struct ttm_mem_reg *new_mem)
-{
- struct ttm_mem_reg *old_mem = &bo->mem;
-
- BUG_ON(old_mem->mm_node != NULL);
- *old_mem = *new_mem;
- new_mem->mm_node = NULL;
-}
-
-static int virtio_gpu_bo_move(struct ttm_buffer_object *bo, bool evict,
- struct ttm_operation_ctx *ctx,
- struct ttm_mem_reg *new_mem)
-{
- int ret;
-
- ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu);
- if (ret)
- return ret;
-
- virtio_gpu_move_null(bo, new_mem);
- return 0;
-}
-
-static void virtio_gpu_bo_move_notify(struct ttm_buffer_object *tbo,
- bool evict,
- struct ttm_mem_reg *new_mem)
-{
- struct virtio_gpu_object *bo;
- struct virtio_gpu_device *vgdev;
-
- bo = container_of(tbo, struct virtio_gpu_object, tbo);
- vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
-
- if (!new_mem || (new_mem->placement & TTM_PL_FLAG_SYSTEM)) {
- if (bo->hw_res_handle)
- virtio_gpu_object_detach(vgdev, bo);
-
- } else if (new_mem->placement & TTM_PL_FLAG_TT) {
- if (bo->hw_res_handle) {
- virtio_gpu_object_attach(vgdev, bo, NULL);
- }
- }
-}
-
static void virtio_gpu_bo_swap_notify(struct ttm_buffer_object *tbo)
{
struct virtio_gpu_object *bo;
- struct virtio_gpu_device *vgdev;
bo = container_of(tbo, struct virtio_gpu_object, tbo);
- vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private;
if (bo->pages)
virtio_gpu_object_free_sg_table(bo);
@@ -314,11 +264,9 @@ static struct ttm_bo_driver virtio_gpu_bo_driver = {
.init_mem_type = &virtio_gpu_init_mem_type,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &virtio_gpu_evict_flags,
- .move = &virtio_gpu_bo_move,
.verify_access = &virtio_gpu_verify_access,
.io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve,
.io_mem_free = &virtio_gpu_ttm_io_mem_free,
- .move_notify = &virtio_gpu_bo_move_notify,
.swap_notify = &virtio_gpu_bo_swap_notify,
};
@@ -330,7 +278,7 @@ int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev)
r = ttm_bo_device_init(&vgdev->mman.bdev,
&virtio_gpu_bo_driver,
vgdev->ddev->anon_inode->i_mapping,
- DRM_FILE_PAGE_OFFSET, 0);
+ false);
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
goto err_dev_init;
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 6bc2008b0d0d..e62fe24b1a2e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -376,9 +376,8 @@ retry:
/* create a basic resource */
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
- uint32_t format,
- uint32_t width,
- uint32_t height)
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_create_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -388,11 +387,11 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
- cmd_p->format = cpu_to_le32(format);
- cmd_p->width = cpu_to_le32(width);
- cmd_p->height = cpu_to_le32(height);
+ cmd_p->format = cpu_to_le32(params->format);
+ cmd_p->width = cpu_to_le32(params->width);
+ cmd_p->height = cpu_to_le32(params->height);
- virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
bo->created = true;
}
@@ -828,7 +827,8 @@ void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
void
virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
- struct virtio_gpu_resource_create_3d *rc_3d)
+ struct virtio_gpu_object_params *params,
+ struct virtio_gpu_fence *fence)
{
struct virtio_gpu_resource_create_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -836,11 +836,21 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
memset(cmd_p, 0, sizeof(*cmd_p));
- *cmd_p = *rc_3d;
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
- cmd_p->hdr.flags = 0;
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+ cmd_p->format = cpu_to_le32(params->format);
+ cmd_p->width = cpu_to_le32(params->width);
+ cmd_p->height = cpu_to_le32(params->height);
+
+ cmd_p->target = cpu_to_le32(params->target);
+ cmd_p->bind = cpu_to_le32(params->bind);
+ cmd_p->depth = cpu_to_le32(params->depth);
+ cmd_p->array_size = cpu_to_le32(params->array_size);
+ cmd_p->last_level = cpu_to_le32(params->last_level);
+ cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
+ cmd_p->flags = cpu_to_le32(params->flags);
- virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
bo->created = true;
}
@@ -924,8 +934,8 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct scatterlist *sg;
int si, nents;
- if (!obj->created)
- return 0;
+ if (WARN_ON_ONCE(!obj->created))
+ return -EINVAL;
if (!obj->pages) {
int ret;
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 8a9aeb0a9ea8..bb66dbcd5e3f 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -219,6 +219,8 @@ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
spin_lock_init(&vkms_out->state_lock);
vkms_out->crc_workq = alloc_ordered_workqueue("vkms_crc_workq", 0);
+ if (!vkms_out->crc_workq)
+ return -ENOMEM;
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
index 0b9ee7fb45d6..66e14e38d5e8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
@@ -499,12 +499,9 @@ static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
SVGA3dCmdSetShader body;
} *cmd;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for shader "
- "unbinding.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_SET_SHADER;
cmd->header.size = sizeof(cmd->body);
@@ -534,12 +531,9 @@ static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi,
SVGA3dCmdSetRenderTarget body;
} *cmd;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for render target "
- "unbinding.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
cmd->header.size = sizeof(cmd->body);
@@ -576,12 +570,9 @@ static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi,
} body;
} *cmd;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for texture "
- "unbinding.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
cmd->header.size = sizeof(cmd->body);
@@ -610,12 +601,10 @@ static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
SVGA3dCmdDXSetShader body;
} *cmd;
- cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for DX shader "
- "unbinding.\n");
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
+
cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER;
cmd->header.size = sizeof(cmd->body);
cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN;
@@ -641,12 +630,9 @@ static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind)
SVGA3dCmdDXSetSingleConstantBuffer body;
} *cmd;
- cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for DX shader "
- "unbinding.\n");
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER;
cmd->header.size = sizeof(cmd->body);
@@ -768,12 +754,9 @@ static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
view_id_size = cbs->bind_cmd_count*sizeof(uint32);
cmd_size = sizeof(*cmd) + view_id_size;
- cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for DX shader"
- " resource binding.\n");
+ cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER_RESOURCES;
cmd->header.size = sizeof(cmd->body) + view_id_size;
@@ -807,12 +790,9 @@ static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs)
vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS);
view_id_size = cbs->bind_cmd_count*sizeof(uint32);
cmd_size = sizeof(*cmd) + view_id_size;
- cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for DX render-target"
- " binding.\n");
+ cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_SET_RENDERTARGETS;
cmd->header.size = sizeof(cmd->body) + view_id_size;
@@ -894,12 +874,9 @@ static int vmw_emit_set_so(struct vmw_ctx_binding_state *cbs)
so_target_size = cbs->bind_cmd_count*sizeof(SVGA3dSoTarget);
cmd_size = sizeof(*cmd) + so_target_size;
- cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for DX SO target"
- " binding.\n");
+ cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_SET_SOTARGETS;
cmd->header.size = sizeof(cmd->body) + so_target_size;
@@ -1011,12 +988,9 @@ static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs)
set_vb_size = cbs->bind_cmd_count*sizeof(SVGA3dVertexBuffer);
cmd_size = sizeof(*cmd) + set_vb_size;
- cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for DX vertex buffer"
- " binding.\n");
+ cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS;
cmd->header.size = sizeof(cmd->body) + set_vb_size;
@@ -1167,12 +1141,10 @@ static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind)
SVGA3dCmdDXSetIndexBuffer body;
} *cmd;
- cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for DX index buffer "
- "binding.\n");
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
+
cmd->header.id = SVGA_3D_CMD_DX_SET_INDEX_BUFFER;
cmd->header.size = sizeof(cmd->body);
if (rebind) {
@@ -1269,6 +1241,32 @@ void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs)
vmw_binding_drop(entry);
}
+/**
+ * vmw_binding_dirtying - Return whether a binding type is dirtying its resource
+ * @binding_type: The binding type
+ *
+ * Each time a resource is put on the validation list as the result of a
+ * context binding referencing it, we need to determine whether that resource
+ * will be dirtied (written to by the GPU) as a result of the corresponding
+ * GPU operation. Currently rendertarget-, depth-stencil-, and
+ * stream-output-target bindings are capable of dirtying its resource.
+ *
+ * Return: Whether the binding type dirties the resource its binding points to.
+ */
+u32 vmw_binding_dirtying(enum vmw_ctx_binding_type binding_type)
+{
+ static u32 is_binding_dirtying[vmw_ctx_binding_max] = {
+ [vmw_ctx_binding_rt] = VMW_RES_DIRTY_SET,
+ [vmw_ctx_binding_dx_rt] = VMW_RES_DIRTY_SET,
+ [vmw_ctx_binding_ds] = VMW_RES_DIRTY_SET,
+ [vmw_ctx_binding_so] = VMW_RES_DIRTY_SET,
+ };
+
+ /* Review this function as new bindings are added. */
+ BUILD_BUG_ON(vmw_ctx_binding_max != 11);
+ return is_binding_dirtying[binding_type];
+}
+
/*
* This function is unused at run-time, and only used to hold various build
* asserts important for code optimization assumptions.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
index 6a2a9d69043b..f6ab79d23923 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
@@ -205,5 +205,7 @@ extern void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs);
extern struct list_head *
vmw_binding_state_list(struct vmw_ctx_binding_state *cbs);
extern void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs);
+extern u32 vmw_binding_dirtying(enum vmw_ctx_binding_type binding_type);
+
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 70dab55e7888..56979e412ca8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -393,6 +393,7 @@ static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
__vmw_cmdbuf_header_free(entry);
break;
case SVGA_CB_STATUS_COMMAND_ERROR:
+ WARN_ONCE(true, "Command buffer error.\n");
entry->cb_header->status = SVGA_CB_STATUS_NONE;
list_add_tail(&entry->list, &man->error);
schedule_work(&man->work);
@@ -511,17 +512,14 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
container_of(work, struct vmw_cmdbuf_man, work);
struct vmw_cmdbuf_header *entry, *next;
uint32_t dummy;
- bool restart[SVGA_CB_CONTEXT_MAX];
bool send_fence = false;
struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
int i;
struct vmw_cmdbuf_context *ctx;
bool global_block = false;
- for_each_cmdbuf_ctx(man, i, ctx) {
+ for_each_cmdbuf_ctx(man, i, ctx)
INIT_LIST_HEAD(&restart_head[i]);
- restart[i] = false;
- }
mutex_lock(&man->error_mutex);
spin_lock(&man->lock);
@@ -533,23 +531,23 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
const char *cmd_name;
list_del_init(&entry->list);
- restart[entry->cb_context] = true;
global_block = true;
if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
- DRM_ERROR("Unknown command causing device error.\n");
- DRM_ERROR("Command buffer offset is %lu\n",
- (unsigned long) cb_hdr->errorOffset);
+ VMW_DEBUG_USER("Unknown command causing device error.\n");
+ VMW_DEBUG_USER("Command buffer offset is %lu\n",
+ (unsigned long) cb_hdr->errorOffset);
__vmw_cmdbuf_header_free(entry);
send_fence = true;
continue;
}
- DRM_ERROR("Command \"%s\" causing device error.\n", cmd_name);
- DRM_ERROR("Command buffer offset is %lu\n",
- (unsigned long) cb_hdr->errorOffset);
- DRM_ERROR("Command size is %lu\n",
- (unsigned long) error_cmd_size);
+ VMW_DEBUG_USER("Command \"%s\" causing device error.\n",
+ cmd_name);
+ VMW_DEBUG_USER("Command buffer offset is %lu\n",
+ (unsigned long) cb_hdr->errorOffset);
+ VMW_DEBUG_USER("Command size is %lu\n",
+ (unsigned long) error_cmd_size);
new_start_offset = cb_hdr->errorOffset + error_cmd_size;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 14bd760a62fd..63f111068a44 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -156,12 +156,9 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
}
vmw_execbuf_release_pinned_bo(dev_priv);
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "destruction.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return;
- }
cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
cmd->header.size = sizeof(cmd->body);
@@ -210,7 +207,7 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) {
uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
&uctx->res, i);
- if (unlikely(IS_ERR(uctx->cotables[i]))) {
+ if (IS_ERR(uctx->cotables[i])) {
ret = PTR_ERR(uctx->cotables[i]);
goto out_cotables;
}
@@ -259,9 +256,8 @@ static int vmw_context_init(struct vmw_private *dev_priv,
return -ENOMEM;
}
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
- DRM_ERROR("Fifo reserve failed.\n");
vmw_resource_unreference(&res);
return -ENOMEM;
}
@@ -311,10 +307,8 @@ static int vmw_gb_context_create(struct vmw_resource *res)
goto out_no_fifo;
}
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for context "
- "creation.\n");
ret = -ENOMEM;
goto out_no_fifo;
}
@@ -345,12 +339,10 @@ static int vmw_gb_context_bind(struct vmw_resource *res,
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for context "
- "binding.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
+
cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = res->id;
@@ -391,10 +383,8 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
- cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for context "
- "unbinding.\n");
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
}
@@ -441,12 +431,9 @@ static int vmw_gb_context_destroy(struct vmw_resource *res)
if (likely(res->id == -1))
return 0;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for context "
- "destruction.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
cmd->header.size = sizeof(cmd->body);
@@ -487,10 +474,8 @@ static int vmw_dx_context_create(struct vmw_resource *res)
goto out_no_fifo;
}
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for context "
- "creation.\n");
ret = -ENOMEM;
goto out_no_fifo;
}
@@ -521,12 +506,9 @@ static int vmw_dx_context_bind(struct vmw_resource *res,
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for context "
- "binding.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
cmd->header.size = sizeof(cmd->body);
@@ -615,10 +597,8 @@ static int vmw_dx_context_unbind(struct vmw_resource *res,
submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
- cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for context "
- "unbinding.\n");
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
}
@@ -665,12 +645,9 @@ static int vmw_dx_context_destroy(struct vmw_resource *res)
if (likely(res->id == -1))
return 0;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for context "
- "destruction.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
cmd->header.size = sizeof(cmd->body);
@@ -751,7 +728,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
int ret;
if (!dev_priv->has_dx && dx) {
- DRM_ERROR("DX contexts not supported by device.\n");
+ VMW_DEBUG_USER("DX contexts not supported by device.\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index 44f3f6f107d3..b4f6e1217c9d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -171,12 +171,9 @@ static int vmw_cotable_unscrub(struct vmw_resource *res)
WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB);
lockdep_assert_held(&bo->resv->lock.base);
- cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), SVGA3D_INVALID_ID);
- if (!cmd) {
- DRM_ERROR("Failed reserving FIFO space for cotable "
- "binding.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (!cmd)
return -ENOMEM;
- }
WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID);
WARN_ON(bo->mem.mem_type != VMW_PL_MOB);
@@ -262,12 +259,9 @@ int vmw_cotable_scrub(struct vmw_resource *res, bool readback)
if (readback)
submit_size += sizeof(*cmd0);
- cmd1 = vmw_fifo_reserve_dx(dev_priv, submit_size, SVGA3D_INVALID_ID);
- if (!cmd1) {
- DRM_ERROR("Failed reserving FIFO space for cotable "
- "unbinding.\n");
+ cmd1 = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ if (!cmd1)
return -ENOMEM;
- }
vcotbl->size_read_back = 0;
if (readback) {
@@ -351,13 +345,10 @@ static int vmw_cotable_readback(struct vmw_resource *res)
struct vmw_fence_obj *fence;
if (!vcotbl->scrubbed) {
- cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd),
- SVGA3D_INVALID_ID);
- if (!cmd) {
- DRM_ERROR("Failed reserving FIFO space for cotable "
- "readback.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (!cmd)
return -ENOMEM;
- }
+
cmd->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE;
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = vcotbl->ctx->id;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 1bfa353d995c..bf6c3500d363 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -828,7 +828,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
ret = ttm_bo_device_init(&dev_priv->bdev,
&vmw_bo_driver,
dev->anon_inode->i_mapping,
- VMWGFX_FILE_PAGE_OFFSET,
false);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed initializing TTM buffer object driver.\n");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index accb2fafe2f1..96983c47fb40 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -48,7 +48,6 @@
#define VMWGFX_DRIVER_MAJOR 2
#define VMWGFX_DRIVER_MINOR 15
#define VMWGFX_DRIVER_PATCHLEVEL 0
-#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
#define VMWGFX_MAX_VALIDATIONS 2048
@@ -700,6 +699,8 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
uint32_t *inout_id,
struct vmw_resource **out);
extern void vmw_resource_unreserve(struct vmw_resource *res,
+ bool dirty_set,
+ bool dirty,
bool switch_backup,
struct vmw_buffer_object *new_backup,
unsigned long new_backup_offset);
@@ -812,7 +813,6 @@ extern int vmw_fifo_init(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo);
extern void vmw_fifo_release(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo);
-extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
extern void *
vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
@@ -828,6 +828,18 @@ extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
extern int vmw_fifo_flush(struct vmw_private *dev_priv,
bool interruptible);
+#define VMW_FIFO_RESERVE_DX(__priv, __bytes, __ctx_id) \
+({ \
+ vmw_fifo_reserve_dx(__priv, __bytes, __ctx_id) ? : ({ \
+ DRM_ERROR("FIFO reserve failed at %s for %u bytes\n", \
+ __func__, (unsigned int) __bytes); \
+ NULL; \
+ }); \
+})
+
+#define VMW_FIFO_RESERVE(__priv, __bytes) \
+ VMW_FIFO_RESERVE_DX(__priv, __bytes, SVGA3D_INVALID_ID)
+
/**
* TTM glue - vmwgfx_ttm_glue.c
*/
@@ -1312,6 +1324,20 @@ int vmw_host_get_guestinfo(const char *guest_info_param,
char *buffer, size_t *length);
int vmw_host_log(const char *log);
+/* VMW logging */
+
+/**
+ * VMW_DEBUG_USER - Debug output for user-space debugging.
+ *
+ * @fmt: printf() like format string.
+ *
+ * This macro is for logging user-space error and debugging messages for e.g.
+ * command buffer execution errors due to malformed commands, invalid context,
+ * etc.
+ */
+#define VMW_DEBUG_USER(fmt, ...) \
+ DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
+
/**
* Inline helper functions
*/
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 88b8178d4687..2ff7ba04d8c8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -36,6 +36,25 @@
#define VMW_RES_HT_ORDER 12
/*
+ * Helper macro to get dx_ctx_node if available otherwise print an error
+ * message. This is for use in command verifier function where if dx_ctx_node
+ * is not set then command is invalid.
+ */
+#define VMW_GET_CTX_NODE(__sw_context) \
+({ \
+ __sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({ \
+ VMW_DEBUG_USER("SM context is not set at %s\n", __func__); \
+ __sw_context->dx_ctx_node; \
+ }); \
+})
+
+#define VMW_DECLARE_CMD_VAR(__var, __type) \
+ struct { \
+ SVGA3dCmdHeader header; \
+ __type body; \
+ } __var
+
+/**
* struct vmw_relocation - Buffer object relocation
*
* @head: List head for the command submission context's relocation list
@@ -59,9 +78,8 @@ struct vmw_relocation {
* command stream is replaced with the actual id after validation.
* @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
* with a NOP.
- * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id
- * after validation is -1, the command is replaced with a NOP. Otherwise no
- * action.
+ * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
+ * validation is -1, the command is replaced with a NOP. Otherwise no action.
*/
enum vmw_resource_relocation_type {
vmw_res_rel_normal,
@@ -75,8 +93,8 @@ enum vmw_resource_relocation_type {
*
* @head: List head for the software context's relocation list.
* @res: Non-ref-counted pointer to the resource.
- * @offset: Offset of single byte entries into the command buffer where the
- * id that needs fixup is located.
+ * @offset: Offset of single byte entries into the command buffer where the id
+ * that needs fixup is located.
* @rel_type: Type of relocation.
*/
struct vmw_resource_relocation {
@@ -86,8 +104,9 @@ struct vmw_resource_relocation {
enum vmw_resource_relocation_type rel_type:3;
};
-/*
+/**
* struct vmw_ctx_validation_info - Extra validation metadata for contexts
+ *
* @head: List head of context list
* @ctx: The context resource
* @cur: The context's persistent binding state
@@ -142,9 +161,10 @@ static size_t vmw_ptr_diff(void *a, void *b)
/**
* vmw_execbuf_bindings_commit - Commit modified binding state
+ *
* @sw_context: The command submission context
- * @backoff: Whether this is part of the error path and binding state
- * changes should be ignored
+ * @backoff: Whether this is part of the error path and binding state changes
+ * should be ignored
*/
static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
bool backoff)
@@ -154,6 +174,7 @@ static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
list_for_each_entry(entry, &sw_context->ctx_list, head) {
if (!backoff)
vmw_binding_state_commit(entry->cur, entry->staged);
+
if (entry->staged != sw_context->staged_bindings)
vmw_binding_state_free(entry->staged);
else
@@ -166,6 +187,7 @@ static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
/**
* vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
+ *
* @sw_context: The command submission context
*/
static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
@@ -176,8 +198,8 @@ static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
}
/**
- * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is
- * added to the validate list.
+ * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
+ * the validate list.
*
* @dev_priv: Pointer to the device private:
* @sw_context: The command submission context
@@ -195,11 +217,8 @@ static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
goto out_err;
if (!sw_context->staged_bindings) {
- sw_context->staged_bindings =
- vmw_binding_state_alloc(dev_priv);
+ sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
if (IS_ERR(sw_context->staged_bindings)) {
- DRM_ERROR("Failed to allocate context binding "
- "information.\n");
ret = PTR_ERR(sw_context->staged_bindings);
sw_context->staged_bindings = NULL;
goto out_err;
@@ -209,8 +228,6 @@ static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
if (sw_context->staged_bindings_inuse) {
node->staged = vmw_binding_state_alloc(dev_priv);
if (IS_ERR(node->staged)) {
- DRM_ERROR("Failed to allocate context binding "
- "information.\n");
ret = PTR_ERR(node->staged);
node->staged = NULL;
goto out_err;
@@ -225,19 +242,20 @@ static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
list_add_tail(&node->head, &sw_context->ctx_list);
return 0;
+
out_err:
return ret;
}
/**
- * vmw_execbuf_res_size - calculate extra size fore the resource validation
- * node
+ * vmw_execbuf_res_size - calculate extra size fore the resource validation node
+ *
* @dev_priv: Pointer to the device private struct.
* @res_type: The resource type.
*
- * Guest-backed contexts and DX contexts require extra size to store
- * execbuf private information in the validation node. Typically the
- * binding manager associated data structures.
+ * Guest-backed contexts and DX contexts require extra size to store execbuf
+ * private information in the validation node. Typically the binding manager
+ * associated data structures.
*
* Returns: The extra size requirement based on resource type.
*/
@@ -254,8 +272,8 @@ static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
*
* @rcache: Pointer to the entry to update.
* @res: Pointer to the resource.
- * @private: Pointer to the execbuf-private space in the resource
- * validation node.
+ * @private: Pointer to the execbuf-private space in the resource validation
+ * node.
*/
static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
struct vmw_resource *res,
@@ -268,17 +286,19 @@ static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
}
/**
- * vmw_execbuf_res_noref_val_add - Add a resource described by an
- * unreferenced rcu-protected pointer to the validation list.
+ * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced
+ * rcu-protected pointer to the validation list.
+ *
* @sw_context: Pointer to the software context.
* @res: Unreferenced rcu-protected pointer to the resource.
+ * @dirty: Whether to change dirty status.
*
- * Returns: 0 on success. Negative error code on failure. Typical error
- * codes are %-EINVAL on inconsistency and %-ESRCH if the resource was
- * doomed.
+ * Returns: 0 on success. Negative error code on failure. Typical error codes
+ * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
*/
static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
- struct vmw_resource *res)
+ struct vmw_resource *res,
+ u32 dirty)
{
struct vmw_private *dev_priv = res->dev_priv;
int ret;
@@ -290,13 +310,17 @@ static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
rcache = &sw_context->res_cache[res_type];
if (likely(rcache->valid && rcache->res == res)) {
+ if (dirty)
+ vmw_validation_res_set_dirty(sw_context->ctx,
+ rcache->private, dirty);
vmw_user_resource_noref_release();
return 0;
}
priv_size = vmw_execbuf_res_size(dev_priv, res_type);
ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
- (void **)&ctx_info, &first_usage);
+ dirty, (void **)&ctx_info,
+ &first_usage);
vmw_user_resource_noref_release();
if (ret)
return ret;
@@ -304,8 +328,10 @@ static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
if (priv_size && first_usage) {
ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
ctx_info);
- if (ret)
+ if (ret) {
+ VMW_DEBUG_USER("Failed first usage context setup.\n");
return ret;
+ }
}
vmw_execbuf_rcache_update(rcache, res, ctx_info);
@@ -315,13 +341,16 @@ static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
/**
* vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
* validation list if it's not already on it
+ *
* @sw_context: Pointer to the software context.
* @res: Pointer to the resource.
+ * @dirty: Whether to change dirty status.
*
* Returns: Zero on success. Negative error code on failure.
*/
static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
- struct vmw_resource *res)
+ struct vmw_resource *res,
+ u32 dirty)
{
struct vmw_res_cache_entry *rcache;
enum vmw_res_type res_type = vmw_res_type(res);
@@ -329,10 +358,15 @@ static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
int ret;
rcache = &sw_context->res_cache[res_type];
- if (likely(rcache->valid && rcache->res == res))
+ if (likely(rcache->valid && rcache->res == res)) {
+ if (dirty)
+ vmw_validation_res_set_dirty(sw_context->ctx,
+ rcache->private, dirty);
return 0;
+ }
- ret = vmw_validation_add_resource(sw_context->ctx, res, 0, &ptr, NULL);
+ ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
+ &ptr, NULL);
if (ret)
return ret;
@@ -342,8 +376,8 @@ static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
}
/**
- * vmw_view_res_val_add - Add a view and the surface it's pointing to
- * to the validation list
+ * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
+ * validation list
*
* @sw_context: The software context holding the validation list.
* @view: Pointer to the view resource.
@@ -356,27 +390,29 @@ static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
int ret;
/*
- * First add the resource the view is pointing to, otherwise
- * it may be swapped out when the view is validated.
+ * First add the resource the view is pointing to, otherwise it may be
+ * swapped out when the view is validated.
*/
- ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view));
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view),
+ vmw_view_dirtying(view));
if (ret)
return ret;
- return vmw_execbuf_res_noctx_val_add(sw_context, view);
+ return vmw_execbuf_res_noctx_val_add(sw_context, view,
+ VMW_RES_DIRTY_NONE);
}
/**
- * vmw_view_id_val_add - Look up a view and add it and the surface it's
- * pointing to to the validation list.
+ * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
+ * to to the validation list.
*
* @sw_context: The software context holding the validation list.
* @view_type: The view type to look up.
* @id: view id of the view.
*
- * The view is represented by a view id and the DX context it's created on,
- * or scheduled for creation on. If there is no DX context set, the function
- * will return an -EINVAL error pointer.
+ * The view is represented by a view id and the DX context it's created on, or
+ * scheduled for creation on. If there is no DX context set, the function will
+ * return an -EINVAL error pointer.
*
* Returns: Unreferenced pointer to the resource on success, negative error
* pointer on failure.
@@ -389,10 +425,8 @@ vmw_view_id_val_add(struct vmw_sw_context *sw_context,
struct vmw_resource *view;
int ret;
- if (!ctx_node) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return ERR_PTR(-EINVAL);
- }
view = vmw_view_lookup(sw_context->man, view_type, id);
if (IS_ERR(view))
@@ -413,8 +447,8 @@ vmw_view_id_val_add(struct vmw_sw_context *sw_context,
* @sw_context: Pointer to a software context used for this command submission
* @ctx: Pointer to the context resource
*
- * This function puts all resources that were previously bound to @ctx on
- * the resource validation list. This is part of the context state reemission
+ * This function puts all resources that were previously bound to @ctx on the
+ * resource validation list. This is part of the context state reemission
*/
static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
@@ -433,13 +467,13 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
if (IS_ERR(res))
continue;
- ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
+ VMW_RES_DIRTY_SET);
if (unlikely(ret != 0))
return ret;
}
}
-
/* Add all resources bound to the context to the validation list */
mutex_lock(&dev_priv->binding_mutex);
binding_list = vmw_context_binding_list(ctx);
@@ -448,8 +482,9 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
if (vmw_res_type(entry->res) == vmw_res_view)
ret = vmw_view_res_val_add(sw_context, entry->res);
else
- ret = vmw_execbuf_res_noctx_val_add(sw_context,
- entry->res);
+ ret = vmw_execbuf_res_noctx_val_add
+ (sw_context, entry->res,
+ vmw_binding_dirtying(entry->bt));
if (unlikely(ret != 0))
break;
}
@@ -472,8 +507,8 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
*
* @list: Pointer to head of relocation list.
* @res: The resource.
- * @offset: Offset into the command buffer currently being parsed where the
- * id that needs fixup is located. Granularity is one byte.
+ * @offset: Offset into the command buffer currently being parsed where the id
+ * that needs fixup is located. Granularity is one byte.
* @rel_type: Relocation type.
*/
static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
@@ -486,7 +521,7 @@ static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
if (unlikely(!rel)) {
- DRM_ERROR("Failed to allocate a resource relocation.\n");
+ VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
return -ENOMEM;
}
@@ -506,17 +541,15 @@ static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
static void vmw_resource_relocations_free(struct list_head *list)
{
/* Memory is validation context memory, so no need to free it */
-
INIT_LIST_HEAD(list);
}
/**
* vmw_resource_relocations_apply - Apply all relocations on a list
*
- * @cb: Pointer to the start of the command buffer bein patch. This need
- * not be the same buffer as the one being parsed when the relocation
- * list was built, but the contents must be the same modulo the
- * resource ids.
+ * @cb: Pointer to the start of the command buffer bein patch. This need not be
+ * the same buffer as the one being parsed when the relocation list was built,
+ * but the contents must be the same modulo the resource ids.
* @list: Pointer to the head of the relocation list.
*/
static void vmw_resource_relocations_apply(uint32_t *cb,
@@ -560,14 +593,14 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
}
/**
- * vmw_resources_reserve - Reserve all resources on the sw_context's
- * resource list.
+ * vmw_resources_reserve - Reserve all resources on the sw_context's resource
+ * list.
*
* @sw_context: Pointer to the software context.
*
- * Note that since vmware's command submission currently is protected by
- * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
- * since only a single thread at once will attempt this.
+ * Note that since vmware's command submission currently is protected by the
+ * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
+ * only a single thread at once will attempt this.
*/
static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
{
@@ -592,22 +625,24 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
}
/**
- * vmw_cmd_res_check - Check that a resource is present and if so, put it
- * on the resource validate list unless it's already there.
+ * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
+ * resource validate list unless it's already there.
*
* @dev_priv: Pointer to a device private structure.
* @sw_context: Pointer to the software context.
* @res_type: Resource type.
+ * @dirty: Whether to change dirty status.
* @converter: User-space visisble type specific information.
- * @id_loc: Pointer to the location in the command buffer currently being
- * parsed from where the user-space resource id handle is located.
- * @p_val: Pointer to pointer to resource validalidation node. Populated
- * on exit.
+ * @id_loc: Pointer to the location in the command buffer currently being parsed
+ * from where the user-space resource id handle is located.
+ * @p_val: Pointer to pointer to resource validalidation node. Populated on
+ * exit.
*/
static int
vmw_cmd_res_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
enum vmw_res_type res_type,
+ u32 dirty,
const struct vmw_user_resource_conv *converter,
uint32_t *id_loc,
struct vmw_resource **p_res)
@@ -621,7 +656,7 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
if (*id_loc == SVGA3D_INVALID_ID) {
if (res_type == vmw_res_context) {
- DRM_ERROR("Illegal context invalid id.\n");
+ VMW_DEBUG_USER("Illegal context invalid id.\n");
return -EINVAL;
}
return 0;
@@ -629,6 +664,9 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
res = rcache->res;
+ if (dirty)
+ vmw_validation_res_set_dirty(sw_context->ctx,
+ rcache->private, dirty);
} else {
unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
@@ -638,13 +676,13 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
res = vmw_user_resource_noref_lookup_handle
(dev_priv, sw_context->fp->tfile, *id_loc, converter);
- if (unlikely(IS_ERR(res))) {
- DRM_ERROR("Could not find or use resource 0x%08x.\n",
- (unsigned int) *id_loc);
+ if (IS_ERR(res)) {
+ VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
+ (unsigned int) *id_loc);
return PTR_ERR(res);
}
- ret = vmw_execbuf_res_noref_val_add(sw_context, res);
+ ret = vmw_execbuf_res_noref_val_add(sw_context, res, dirty);
if (unlikely(ret != 0))
return ret;
@@ -675,23 +713,16 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
{
struct vmw_private *dev_priv = ctx_res->dev_priv;
struct vmw_buffer_object *dx_query_mob;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXBindAllQuery body;
- } *cmd;
-
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
if (!dx_query_mob || dx_query_mob->dx_query_ctx)
return 0;
- cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id);
-
- if (cmd == NULL) {
- DRM_ERROR("Failed to rebind queries.\n");
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), ctx_res->id);
+ if (cmd == NULL)
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
cmd->header.size = sizeof(cmd->body);
@@ -705,8 +736,8 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
}
/**
- * vmw_rebind_contexts - Rebind all resources previously bound to
- * referenced contexts.
+ * vmw_rebind_contexts - Rebind all resources previously bound to referenced
+ * contexts.
*
* @sw_context: Pointer to the software context.
*
@@ -721,21 +752,23 @@ static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
ret = vmw_binding_rebind_all(val->cur);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
- DRM_ERROR("Failed to rebind context.\n");
+ VMW_DEBUG_USER("Failed to rebind context.\n");
return ret;
}
ret = vmw_rebind_all_dx_query(val->ctx);
- if (ret != 0)
+ if (ret != 0) {
+ VMW_DEBUG_USER("Failed to rebind queries.\n");
return ret;
+ }
}
return 0;
}
/**
- * vmw_view_bindings_add - Add an array of view bindings to a context
- * binding state tracker.
+ * vmw_view_bindings_add - Add an array of view bindings to a context binding
+ * state tracker.
*
* @sw_context: The execbuf state used for this command.
* @view_type: View type for the bindings.
@@ -752,13 +785,11 @@ static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
uint32 view_ids[], u32 num_views,
u32 first_slot)
{
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
u32 i;
- if (!ctx_node) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
for (i = 0; i < num_views; ++i) {
struct vmw_ctx_bindinfo_view binding;
@@ -768,7 +799,7 @@ static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
view = vmw_view_id_val_add(sw_context, view_type,
view_ids[i]);
if (IS_ERR(view)) {
- DRM_ERROR("View not found.\n");
+ VMW_DEBUG_USER("View not found.\n");
return PTR_ERR(view);
}
}
@@ -798,19 +829,18 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_cid_cmd {
- SVGA3dCmdHeader header;
- uint32_t cid;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
+ container_of(header, typeof(*cmd), header);
- cmd = container_of(header, struct vmw_cid_cmd, header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->cid, NULL);
+ VMW_RES_DIRTY_SET, user_context_converter,
+ &cmd->body, NULL);
}
/**
* vmw_execbuf_info_from_res - Get the private validation metadata for a
* recently validated resource
+ *
* @sw_context: Pointer to the command submission context
* @res: The resource
*
@@ -818,8 +848,8 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
* context's resource cache and hence the last resource of that type to be
* processed by the validation code.
*
- * Return: a pointer to the private metadata of the resource, or NULL
- * if it wasn't found
+ * Return: a pointer to the private metadata of the resource, or NULL if it
+ * wasn't found
*/
static struct vmw_ctx_validation_info *
vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
@@ -835,36 +865,32 @@ vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
return NULL;
}
-
static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_sid_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSetRenderTarget body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
struct vmw_resource *ctx;
struct vmw_resource *res;
int ret;
- cmd = container_of(header, struct vmw_sid_cmd, header);
+ cmd = container_of(header, typeof(*cmd), header);
if (cmd->body.type >= SVGA3D_RT_MAX) {
- DRM_ERROR("Illegal render target type %u.\n",
- (unsigned) cmd->body.type);
+ VMW_DEBUG_USER("Illegal render target type %u.\n",
+ (unsigned int) cmd->body.type);
return -EINVAL;
}
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->body.cid,
- &ctx);
+ VMW_RES_DIRTY_SET, user_context_converter,
+ &cmd->body.cid, &ctx);
if (unlikely(ret != 0))
return ret;
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter, &cmd->body.target.sid,
- &res);
+ VMW_RES_DIRTY_SET, user_surface_converter,
+ &cmd->body.target.sid, &res);
if (unlikely(ret))
return ret;
@@ -890,44 +916,38 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_sid_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSurfaceCopy body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
int ret;
- cmd = container_of(header, struct vmw_sid_cmd, header);
+ cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
- &cmd->body.src.sid, NULL);
+ VMW_RES_DIRTY_NONE, user_surface_converter,
+ &cmd->body.src.sid, NULL);
if (ret)
return ret;
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_SET, user_surface_converter,
&cmd->body.dest.sid, NULL);
}
static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- SVGA3dCmdHeader *header)
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXBufferCopy body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
int ret;
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->body.src, NULL);
if (ret != 0)
return ret;
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_SET, user_surface_converter,
&cmd->body.dest, NULL);
}
@@ -935,21 +955,18 @@ static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXPredCopyRegion body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
int ret;
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->body.srcSid, NULL);
if (ret != 0)
return ret;
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_SET, user_surface_converter,
&cmd->body.dstSid, NULL);
}
@@ -957,20 +974,18 @@ static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_sid_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSurfaceStretchBlt body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
int ret;
- cmd = container_of(header, struct vmw_sid_cmd, header);
+ cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->body.src.sid, NULL);
if (unlikely(ret != 0))
return ret;
+
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_SET, user_surface_converter,
&cmd->body.dest.sid, NULL);
}
@@ -978,15 +993,11 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_sid_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdBlitSurfaceToScreen body;
- } *cmd;
-
- cmd = container_of(header, struct vmw_sid_cmd, header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->body.srcImage.sid, NULL);
}
@@ -994,17 +1005,12 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_sid_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdPresent body;
- } *cmd;
-
-
- cmd = container_of(header, struct vmw_sid_cmd, header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter, &cmd->body.sid,
- NULL);
+ VMW_RES_DIRTY_NONE, user_surface_converter,
+ &cmd->body.sid, NULL);
}
/**
@@ -1014,11 +1020,10 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
* @new_query_bo: The new buffer holding query results.
* @sw_context: The software context used for this command submission.
*
- * This function checks whether @new_query_bo is suitable for holding
- * query results, and if another buffer currently is pinned for query
- * results. If so, the function prepares the state of @sw_context for
- * switching pinned buffers after successful submission of the current
- * command batch.
+ * This function checks whether @new_query_bo is suitable for holding query
+ * results, and if another buffer currently is pinned for query results. If so,
+ * the function prepares the state of @sw_context for switching pinned buffers
+ * after successful submission of the current command batch.
*/
static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
struct vmw_buffer_object *new_query_bo,
@@ -1034,7 +1039,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
if (unlikely(new_query_bo->base.num_pages > 4)) {
- DRM_ERROR("Query buffer too large.\n");
+ VMW_DEBUG_USER("Query buffer too large.\n");
return -EINVAL;
}
@@ -1053,13 +1058,11 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
dev_priv->has_mob, false);
if (unlikely(ret != 0))
return ret;
-
}
return 0;
}
-
/**
* vmw_query_bo_switch_commit - Finalize switching pinned query buffer
*
@@ -1068,11 +1071,11 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
*
* This function will check if we're switching query buffers, and will then,
* issue a dummy occlusion query wait used as a query barrier. When the fence
- * object following that query wait has signaled, we are sure that all
- * preceding queries have finished, and the old query buffer can be unpinned.
- * However, since both the new query buffer and the old one are fenced with
- * that fence, we can do an asynchronus unpin now, and be sure that the
- * old query buffer won't be moved until the fence has signaled.
+ * object following that query wait has signaled, we are sure that all preceding
+ * queries have finished, and the old query buffer can be unpinned. However,
+ * since both the new query buffer and the old one are fenced with that fence,
+ * we can do an asynchronus unpin now, and be sure that the old query buffer
+ * won't be moved until the fence has signaled.
*
* As mentioned above, both the new - and old query buffers need to be fenced
* using a sequence emitted *after* calling this function.
@@ -1084,7 +1087,6 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
* The validate list should still hold references to all
* contexts here.
*/
-
if (sw_context->needs_post_query_barrier) {
struct vmw_res_cache_entry *ctx_entry =
&sw_context->res_cache[vmw_res_context];
@@ -1097,7 +1099,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
if (unlikely(ret != 0))
- DRM_ERROR("Out of fifo space for dummy query.\n");
+ VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
}
if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
@@ -1111,10 +1113,9 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
/*
* We pin also the dummy_query_bo buffer so that we
- * don't need to validate it when emitting
- * dummy queries in context destroy paths.
+ * don't need to validate it when emitting dummy queries
+ * in context destroy paths.
*/
-
if (!dev_priv->dummy_query_bo_pinned) {
vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
true);
@@ -1131,22 +1132,24 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
}
/**
- * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
- * handle to a MOB id.
+ * vmw_translate_mob_pointer - Prepare to translate a user-space buffer handle
+ * to a MOB id.
*
* @dev_priv: Pointer to a device private structure.
* @sw_context: The software context used for this command batch validation.
* @id: Pointer to the user-space handle to be translated.
- * @vmw_bo_p: Points to a location that, on successful return will carry
- * a non-reference-counted pointer to the buffer object identified by the
+ * @vmw_bo_p: Points to a location that, on successful return will carry a
+ * non-reference-counted pointer to the buffer object identified by the
* user-space handle in @id.
*
* This function saves information needed to translate a user-space buffer
* handle to a MOB id. The translation does not take place immediately, but
- * during a call to vmw_apply_relocations(). This function builds a relocation
- * list and a list of buffers to validate. The former needs to be freed using
- * either vmw_apply_relocations() or vmw_free_relocations(). The latter
- * needs to be freed using vmw_clear_validations.
+ * during a call to vmw_apply_relocations().
+ *
+ * This function builds a relocation list and a list of buffers to validate. The
+ * former needs to be freed using either vmw_apply_relocations() or
+ * vmw_free_relocations(). The latter needs to be freed using
+ * vmw_clear_validations.
*/
static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
@@ -1161,7 +1164,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
vmw_validation_preload_bo(sw_context->ctx);
vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
if (IS_ERR(vmw_bo)) {
- DRM_ERROR("Could not find or use MOB buffer.\n");
+ VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
return PTR_ERR(vmw_bo);
}
@@ -1184,19 +1187,20 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
}
/**
- * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
- * handle to a valid SVGAGuestPtr
+ * vmw_translate_guest_pointer - Prepare to translate a user-space buffer handle
+ * to a valid SVGAGuestPtr
*
* @dev_priv: Pointer to a device private structure.
* @sw_context: The software context used for this command batch validation.
* @ptr: Pointer to the user-space handle to be translated.
- * @vmw_bo_p: Points to a location that, on successful return will carry
- * a non-reference-counted pointer to the DMA buffer identified by the
- * user-space handle in @id.
+ * @vmw_bo_p: Points to a location that, on successful return will carry a
+ * non-reference-counted pointer to the DMA buffer identified by the user-space
+ * handle in @id.
*
* This function saves information needed to translate a user-space buffer
* handle to a valid SVGAGuestPtr. The translation does not take place
* immediately, but during a call to vmw_apply_relocations().
+ *
* This function builds a relocation list and a list of buffers to validate.
* The former needs to be freed using either vmw_apply_relocations() or
* vmw_free_relocations(). The latter needs to be freed using
@@ -1215,7 +1219,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
vmw_validation_preload_bo(sw_context->ctx);
vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle);
if (IS_ERR(vmw_bo)) {
- DRM_ERROR("Could not find or use GMR region.\n");
+ VMW_DEBUG_USER("Could not find or use GMR region.\n");
return PTR_ERR(vmw_bo);
}
@@ -1236,10 +1240,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
return 0;
}
-
-
/**
- * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command.
+ * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
@@ -1251,67 +1253,52 @@ static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_dx_define_query_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXDefineQuery q;
- } *cmd;
-
- int ret;
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_resource *cotable_res;
+ int ret;
-
- if (ctx_node == NULL) {
- DRM_ERROR("DX Context not set for query.\n");
+ if (!ctx_node)
return -EINVAL;
- }
- cmd = container_of(header, struct vmw_dx_define_query_cmd, header);
+ cmd = container_of(header, typeof(*cmd), header);
- if (cmd->q.type < SVGA3D_QUERYTYPE_MIN ||
- cmd->q.type >= SVGA3D_QUERYTYPE_MAX)
+ if (cmd->body.type < SVGA3D_QUERYTYPE_MIN ||
+ cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
return -EINVAL;
cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
- ret = vmw_cotable_notify(cotable_res, cmd->q.queryId);
+ ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
return ret;
}
-
-
/**
- * vmw_cmd_dx_bind_query - validate a SVGA_3D_CMD_DX_BIND_QUERY command.
+ * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
* @header: Pointer to the command header in the command stream.
*
- * The query bind operation will eventually associate the query ID
- * with its backing MOB. In this function, we take the user mode
- * MOB ID and use vmw_translate_mob_ptr() to translate it to its
- * kernel mode equivalent.
+ * The query bind operation will eventually associate the query ID with its
+ * backing MOB. In this function, we take the user mode MOB ID and use
+ * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
*/
static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_dx_bind_query_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXBindQuery q;
- } *cmd;
-
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
struct vmw_buffer_object *vmw_bo;
- int ret;
-
+ int ret;
- cmd = container_of(header, struct vmw_dx_bind_query_cmd, header);
+ cmd = container_of(header, typeof(*cmd), header);
/*
* Look up the buffer pointed to by q.mobid, put it on the relocation
* list so its kernel mode MOB ID can be filled in later
*/
- ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->q.mobid,
+ ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
&vmw_bo);
if (ret != 0)
@@ -1322,10 +1309,8 @@ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
return 0;
}
-
-
/**
- * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
+ * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
@@ -1335,21 +1320,16 @@ static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_begin_gb_query_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdBeginGBQuery q;
- } *cmd;
-
- cmd = container_of(header, struct vmw_begin_gb_query_cmd,
- header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->q.cid,
- NULL);
+ VMW_RES_DIRTY_SET, user_context_converter,
+ &cmd->body.cid, NULL);
}
/**
- * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
+ * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
@@ -1359,38 +1339,30 @@ static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_begin_query_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdBeginQuery q;
- } *cmd;
-
- cmd = container_of(header, struct vmw_begin_query_cmd,
- header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
+ container_of(header, typeof(*cmd), header);
if (unlikely(dev_priv->has_mob)) {
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdBeginGBQuery q;
- } gb_cmd;
+ VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
gb_cmd.header.size = cmd->header.size;
- gb_cmd.q.cid = cmd->q.cid;
- gb_cmd.q.type = cmd->q.type;
+ gb_cmd.body.cid = cmd->body.cid;
+ gb_cmd.body.type = cmd->body.type;
memcpy(cmd, &gb_cmd, sizeof(*cmd));
return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
}
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->q.cid,
- NULL);
+ VMW_RES_DIRTY_SET, user_context_converter,
+ &cmd->body.cid, NULL);
}
/**
- * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
+ * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
@@ -1401,19 +1373,15 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
struct vmw_buffer_object *vmw_bo;
- struct vmw_query_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdEndGBQuery q;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
int ret;
- cmd = container_of(header, struct vmw_query_cmd, header);
+ cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
if (unlikely(ret != 0))
return ret;
- ret = vmw_translate_mob_ptr(dev_priv, sw_context,
- &cmd->q.mobid,
+ ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
&vmw_bo);
if (unlikely(ret != 0))
return ret;
@@ -1424,7 +1392,7 @@ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
+ * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
@@ -1435,27 +1403,21 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
struct vmw_buffer_object *vmw_bo;
- struct vmw_query_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdEndQuery q;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
int ret;
- cmd = container_of(header, struct vmw_query_cmd, header);
+ cmd = container_of(header, typeof(*cmd), header);
if (dev_priv->has_mob) {
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdEndGBQuery q;
- } gb_cmd;
+ VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
gb_cmd.header.size = cmd->header.size;
- gb_cmd.q.cid = cmd->q.cid;
- gb_cmd.q.type = cmd->q.type;
- gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
- gb_cmd.q.offset = cmd->q.guestResult.offset;
+ gb_cmd.body.cid = cmd->body.cid;
+ gb_cmd.body.type = cmd->body.type;
+ gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
+ gb_cmd.body.offset = cmd->body.guestResult.offset;
memcpy(cmd, &gb_cmd, sizeof(*cmd));
return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
@@ -1466,8 +1428,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
return ret;
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
- &cmd->q.guestResult,
- &vmw_bo);
+ &cmd->body.guestResult, &vmw_bo);
if (unlikely(ret != 0))
return ret;
@@ -1477,7 +1438,7 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
+ * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
@@ -1488,19 +1449,15 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
struct vmw_buffer_object *vmw_bo;
- struct vmw_query_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdWaitForGBQuery q;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
int ret;
- cmd = container_of(header, struct vmw_query_cmd, header);
+ cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
if (unlikely(ret != 0))
return ret;
- ret = vmw_translate_mob_ptr(dev_priv, sw_context,
- &cmd->q.mobid,
+ ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
&vmw_bo);
if (unlikely(ret != 0))
return ret;
@@ -1509,7 +1466,7 @@ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
+ * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context used for this command submission.
@@ -1520,27 +1477,21 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
SVGA3dCmdHeader *header)
{
struct vmw_buffer_object *vmw_bo;
- struct vmw_query_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdWaitForQuery q;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
int ret;
- cmd = container_of(header, struct vmw_query_cmd, header);
+ cmd = container_of(header, typeof(*cmd), header);
if (dev_priv->has_mob) {
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdWaitForGBQuery q;
- } gb_cmd;
+ VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
gb_cmd.header.size = cmd->header.size;
- gb_cmd.q.cid = cmd->q.cid;
- gb_cmd.q.type = cmd->q.type;
- gb_cmd.q.mobid = cmd->q.guestResult.gmrId;
- gb_cmd.q.offset = cmd->q.guestResult.offset;
+ gb_cmd.body.cid = cmd->body.cid;
+ gb_cmd.body.type = cmd->body.type;
+ gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
+ gb_cmd.body.offset = cmd->body.guestResult.offset;
memcpy(cmd, &gb_cmd, sizeof(*cmd));
return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
@@ -1551,8 +1502,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
return ret;
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
- &cmd->q.guestResult,
- &vmw_bo);
+ &cmd->body.guestResult, &vmw_bo);
if (unlikely(ret != 0))
return ret;
@@ -1565,54 +1515,52 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
{
struct vmw_buffer_object *vmw_bo = NULL;
struct vmw_surface *srf = NULL;
- struct vmw_dma_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSurfaceDMA dma;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
int ret;
SVGA3dCmdSurfaceDMASuffix *suffix;
uint32_t bo_size;
+ bool dirty;
- cmd = container_of(header, struct vmw_dma_cmd, header);
- suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
+ cmd = container_of(header, typeof(*cmd), header);
+ suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
header->size - sizeof(*suffix));
/* Make sure device and verifier stays in sync. */
if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
- DRM_ERROR("Invalid DMA suffix size.\n");
+ VMW_DEBUG_USER("Invalid DMA suffix size.\n");
return -EINVAL;
}
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
- &cmd->dma.guest.ptr,
- &vmw_bo);
+ &cmd->body.guest.ptr, &vmw_bo);
if (unlikely(ret != 0))
return ret;
/* Make sure DMA doesn't cross BO boundaries. */
bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
- if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
- DRM_ERROR("Invalid DMA offset.\n");
+ if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
+ VMW_DEBUG_USER("Invalid DMA offset.\n");
return -EINVAL;
}
- bo_size -= cmd->dma.guest.ptr.offset;
+ bo_size -= cmd->body.guest.ptr.offset;
if (unlikely(suffix->maximumOffset > bo_size))
suffix->maximumOffset = bo_size;
+ dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
+ VMW_RES_DIRTY_SET : 0;
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter, &cmd->dma.host.sid,
- NULL);
+ dirty, user_surface_converter,
+ &cmd->body.host.sid, NULL);
if (unlikely(ret != 0)) {
if (unlikely(ret != -ERESTARTSYS))
- DRM_ERROR("could not find surface for DMA.\n");
+ VMW_DEBUG_USER("could not find surface for DMA.\n");
return ret;
}
srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
- vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
- header);
+ vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header);
return 0;
}
@@ -1621,10 +1569,7 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_draw_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdDrawPrimitives body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
(unsigned long)header + sizeof(*cmd));
SVGA3dPrimitiveRange *range;
@@ -1636,16 +1581,17 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- cmd = container_of(header, struct vmw_draw_cmd, header);
+ cmd = container_of(header, typeof(*cmd), header);
maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
if (unlikely(cmd->body.numVertexDecls > maxnum)) {
- DRM_ERROR("Illegal number of vertex declarations.\n");
+ VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
return -EINVAL;
}
for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ VMW_RES_DIRTY_NONE,
user_surface_converter,
&decl->array.surfaceId, NULL);
if (unlikely(ret != 0))
@@ -1655,13 +1601,14 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
maxnum = (header->size - sizeof(cmd->body) -
cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
if (unlikely(cmd->body.numRanges > maxnum)) {
- DRM_ERROR("Illegal number of index ranges.\n");
+ VMW_DEBUG_USER("Illegal number of index ranges.\n");
return -EINVAL;
}
range = (SVGA3dPrimitiveRange *) decl;
for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ VMW_RES_DIRTY_NONE,
user_surface_converter,
&range->indexArray.surfaceId, NULL);
if (unlikely(ret != 0))
@@ -1670,30 +1617,24 @@ static int vmw_cmd_draw(struct vmw_private *dev_priv,
return 0;
}
-
static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_tex_state_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSetTextureState state;
- } *cmd;
-
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
SVGA3dTextureState *last_state = (SVGA3dTextureState *)
((unsigned long) header + header->size + sizeof(header));
SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
- ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
+ ((unsigned long) header + sizeof(*cmd));
struct vmw_resource *ctx;
struct vmw_resource *res;
int ret;
- cmd = container_of(header, struct vmw_tex_state_cmd,
- header);
+ cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->state.cid,
- &ctx);
+ VMW_RES_DIRTY_SET, user_context_converter,
+ &cmd->body.cid, &ctx);
if (unlikely(ret != 0))
return ret;
@@ -1702,12 +1643,13 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
continue;
if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
- DRM_ERROR("Illegal texture/sampler unit %u.\n",
- (unsigned) cur_state->stage);
+ VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
+ (unsigned int) cur_state->stage);
return -EINVAL;
}
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ VMW_RES_DIRTY_NONE,
user_surface_converter,
&cur_state->value, &res);
if (unlikely(ret != 0))
@@ -1744,12 +1686,10 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
SVGAFifoCmdDefineGMRFB body;
} *cmd = buf;
- return vmw_translate_guest_ptr(dev_priv, sw_context,
- &cmd->body.ptr,
+ return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
&vmw_bo);
}
-
/**
* vmw_cmd_res_switch_backup - Utility function to handle backup buffer
* switching
@@ -1761,14 +1701,13 @@ static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
* stream.
* @backup_offset: Offset of backup into MOB.
*
- * This function prepares for registering a switch of backup buffers
- * in the resource metadata just prior to unreserving. It's basically a wrapper
- * around vmw_cmd_res_switch_backup with a different interface.
+ * This function prepares for registering a switch of backup buffers in the
+ * resource metadata just prior to unreserving. It's basically a wrapper around
+ * vmw_cmd_res_switch_backup with a different interface.
*/
static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
- struct vmw_resource *res,
- uint32_t *buf_id,
+ struct vmw_resource *res, uint32_t *buf_id,
unsigned long backup_offset)
{
struct vmw_buffer_object *vbo;
@@ -1788,7 +1727,6 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
return 0;
}
-
/**
* vmw_cmd_switch_backup - Utility function to handle backup buffer switching
*
@@ -1801,34 +1739,31 @@ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
* stream.
* @backup_offset: Offset of backup into MOB.
*
- * This function prepares for registering a switch of backup buffers
- * in the resource metadata just prior to unreserving. It's basically a wrapper
- * around vmw_cmd_res_switch_backup with a different interface.
+ * This function prepares for registering a switch of backup buffers in the
+ * resource metadata just prior to unreserving. It's basically a wrapper around
+ * vmw_cmd_res_switch_backup with a different interface.
*/
static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
enum vmw_res_type res_type,
const struct vmw_user_resource_conv
- *converter,
- uint32_t *res_id,
- uint32_t *buf_id,
+ *converter, uint32_t *res_id, uint32_t *buf_id,
unsigned long backup_offset)
{
struct vmw_resource *res;
int ret;
ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
- converter, res_id, &res);
+ VMW_RES_DIRTY_NONE, converter, res_id, &res);
if (ret)
return ret;
- return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
- buf_id, backup_offset);
+ return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
+ backup_offset);
}
/**
- * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
- * command
+ * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -1838,22 +1773,16 @@ static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_bind_gb_surface_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdBindGBSurface body;
- } *cmd;
-
- cmd = container_of(header, struct vmw_bind_gb_surface_cmd, header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
- &cmd->body.sid, &cmd->body.mobid,
- 0);
+ user_surface_converter, &cmd->body.sid,
+ &cmd->body.mobid, 0);
}
/**
- * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
- * command
+ * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -1863,21 +1792,16 @@ static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_gb_surface_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdUpdateGBImage body;
- } *cmd;
-
- cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->body.image.sid, NULL);
}
/**
- * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
- * command
+ * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -1887,21 +1811,16 @@ static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_gb_surface_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdUpdateGBSurface body;
- } *cmd;
-
- cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_CLEAR, user_surface_converter,
&cmd->body.sid, NULL);
}
/**
- * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
- * command
+ * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -1911,20 +1830,16 @@ static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_gb_surface_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdReadbackGBImage body;
- } *cmd;
-
- cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->body.image.sid, NULL);
}
/**
- * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
+ * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
* command
*
* @dev_priv: Pointer to a device private struct.
@@ -1935,20 +1850,16 @@ static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_gb_surface_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdReadbackGBSurface body;
- } *cmd;
-
- cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_CLEAR, user_surface_converter,
&cmd->body.sid, NULL);
}
/**
- * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
+ * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
* command
*
* @dev_priv: Pointer to a device private struct.
@@ -1959,21 +1870,17 @@ static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_gb_surface_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdInvalidateGBImage body;
- } *cmd;
-
- cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->body.image.sid, NULL);
}
/**
- * vmw_cmd_invalidate_gb_surface - Validate an
- * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
+ * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
+ * command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -1983,22 +1890,16 @@ static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_gb_surface_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdInvalidateGBSurface body;
- } *cmd;
-
- cmd = container_of(header, struct vmw_gb_surface_cmd, header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_CLEAR, user_surface_converter,
&cmd->body.sid, NULL);
}
-
/**
- * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
- * command
+ * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2008,20 +1909,16 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_shader_define_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdDefineShader body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
int ret;
size_t size;
struct vmw_resource *ctx;
- cmd = container_of(header, struct vmw_shader_define_cmd,
- header);
+ cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->body.cid,
- &ctx);
+ VMW_RES_DIRTY_SET, user_context_converter,
+ &cmd->body.cid, &ctx);
if (unlikely(ret != 0))
return ret;
@@ -2029,24 +1926,20 @@ static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
return 0;
size = cmd->header.size - sizeof(cmd->body);
- ret = vmw_compat_shader_add(dev_priv,
- vmw_context_res_man(ctx),
- cmd->body.shid, cmd + 1,
- cmd->body.type, size,
- &sw_context->staged_cmd_res);
+ ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
+ cmd->body.shid, cmd + 1, cmd->body.type,
+ size, &sw_context->staged_cmd_res);
if (unlikely(ret != 0))
return ret;
- return vmw_resource_relocation_add(sw_context,
- NULL,
+ return vmw_resource_relocation_add(sw_context, NULL,
vmw_ptr_diff(sw_context->buf_start,
&cmd->header.id),
vmw_res_rel_nop);
}
/**
- * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
- * command
+ * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2056,42 +1949,34 @@ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_shader_destroy_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdDestroyShader body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
int ret;
struct vmw_resource *ctx;
- cmd = container_of(header, struct vmw_shader_destroy_cmd,
- header);
+ cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->body.cid,
- &ctx);
+ VMW_RES_DIRTY_SET, user_context_converter,
+ &cmd->body.cid, &ctx);
if (unlikely(ret != 0))
return ret;
if (unlikely(!dev_priv->has_mob))
return 0;
- ret = vmw_shader_remove(vmw_context_res_man(ctx),
- cmd->body.shid,
- cmd->body.type,
- &sw_context->staged_cmd_res);
+ ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
+ cmd->body.type, &sw_context->staged_cmd_res);
if (unlikely(ret != 0))
return ret;
- return vmw_resource_relocation_add(sw_context,
- NULL,
+ return vmw_resource_relocation_add(sw_context, NULL,
vmw_ptr_diff(sw_context->buf_start,
&cmd->header.id),
vmw_res_rel_nop);
}
/**
- * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
- * command
+ * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2101,27 +1986,23 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_set_shader_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSetShader body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
struct vmw_ctx_bindinfo_shader binding;
struct vmw_resource *ctx, *res = NULL;
struct vmw_ctx_validation_info *ctx_info;
int ret;
- cmd = container_of(header, struct vmw_set_shader_cmd,
- header);
+ cmd = container_of(header, typeof(*cmd), header);
if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
- DRM_ERROR("Illegal shader type %u.\n",
- (unsigned) cmd->body.type);
+ VMW_DEBUG_USER("Illegal shader type %u.\n",
+ (unsigned int) cmd->body.type);
return -EINVAL;
}
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->body.cid,
- &ctx);
+ VMW_RES_DIRTY_SET, user_context_converter,
+ &cmd->body.cid, &ctx);
if (unlikely(ret != 0))
return ret;
@@ -2130,21 +2011,20 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
if (cmd->body.shid != SVGA3D_INVALID_ID) {
res = vmw_shader_lookup(vmw_context_res_man(ctx),
- cmd->body.shid,
- cmd->body.type);
-
+ cmd->body.shid, cmd->body.type);
if (!IS_ERR(res)) {
- ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
+ VMW_RES_DIRTY_NONE);
if (unlikely(ret != 0))
return ret;
}
}
if (IS_ERR_OR_NULL(res)) {
- ret = vmw_cmd_res_check(dev_priv, sw_context,
- vmw_res_shader,
- user_shader_converter,
- &cmd->body.shid, &res);
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
+ VMW_RES_DIRTY_NONE,
+ user_shader_converter, &cmd->body.shid,
+ &res);
if (unlikely(ret != 0))
return ret;
}
@@ -2157,14 +2037,13 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
binding.bi.res = res;
binding.bi.bt = vmw_ctx_binding_shader;
binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
- vmw_binding_add(ctx_info->staged, &binding.bi,
- binding.shader_slot, 0);
+ vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
+
return 0;
}
/**
- * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
- * command
+ * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2174,18 +2053,14 @@ static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_set_shader_const_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdSetShaderConst body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
int ret;
- cmd = container_of(header, struct vmw_set_shader_const_cmd,
- header);
+ cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter, &cmd->body.cid,
- NULL);
+ VMW_RES_DIRTY_SET, user_context_converter,
+ &cmd->body.cid, NULL);
if (unlikely(ret != 0))
return ret;
@@ -2196,8 +2071,7 @@ static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
- * command
+ * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2207,22 +2081,16 @@ static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_bind_gb_shader_cmd {
- SVGA3dCmdHeader header;
- SVGA3dCmdBindGBShader body;
- } *cmd;
-
- cmd = container_of(header, struct vmw_bind_gb_shader_cmd,
- header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
+ container_of(header, typeof(*cmd), header);
return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
- user_shader_converter,
- &cmd->body.shid, &cmd->body.mobid,
- cmd->body.offsetInBytes);
+ user_shader_converter, &cmd->body.shid,
+ &cmd->body.mobid, cmd->body.offsetInBytes);
}
/**
- * vmw_cmd_dx_set_single_constant_buffer - Validate an
+ * vmw_cmd_dx_set_single_constant_buffer - Validate
* SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
*
* @dev_priv: Pointer to a device private struct.
@@ -2234,23 +2102,18 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXSetSingleConstantBuffer body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
struct vmw_resource *res = NULL;
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_ctx_bindinfo_cb binding;
int ret;
- if (unlikely(ctx_node == NULL)) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->body.sid, &res);
if (unlikely(ret != 0))
return ret;
@@ -2265,21 +2128,21 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 ||
binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
- DRM_ERROR("Illegal const buffer shader %u slot %u.\n",
- (unsigned) cmd->body.type,
- (unsigned) binding.slot);
+ VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
+ (unsigned int) cmd->body.type,
+ (unsigned int) binding.slot);
return -EINVAL;
}
- vmw_binding_add(ctx_node->staged, &binding.bi,
- binding.shader_slot, binding.slot);
+ vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
+ binding.slot);
return 0;
}
/**
- * vmw_cmd_dx_set_shader_res - Validate an
- * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command
+ * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
+ * command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2289,17 +2152,15 @@ static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXSetShaderResources body;
- } *cmd = container_of(header, typeof(*cmd), header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
+ container_of(header, typeof(*cmd), header);
u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
sizeof(SVGA3dShaderResourceViewId);
if ((u64) cmd->body.startView + (u64) num_sr_view >
(u64) SVGA3D_DX_MAX_SRVIEWS ||
cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
- DRM_ERROR("Invalid shader binding.\n");
+ VMW_DEBUG_USER("Invalid shader binding.\n");
return -EINVAL;
}
@@ -2311,8 +2172,7 @@ static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER
- * command
+ * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2322,36 +2182,32 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXSetShader body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
struct vmw_resource *res = NULL;
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_ctx_bindinfo_shader binding;
int ret = 0;
- if (unlikely(ctx_node == NULL)) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
cmd = container_of(header, typeof(*cmd), header);
if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) {
- DRM_ERROR("Illegal shader type %u.\n",
- (unsigned) cmd->body.type);
+ VMW_DEBUG_USER("Illegal shader type %u.\n",
+ (unsigned int) cmd->body.type);
return -EINVAL;
}
if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
if (IS_ERR(res)) {
- DRM_ERROR("Could not find shader for binding.\n");
+ VMW_DEBUG_USER("Could not find shader for binding.\n");
return PTR_ERR(res);
}
- ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
+ VMW_RES_DIRTY_NONE);
if (ret)
return ret;
}
@@ -2361,15 +2217,14 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
binding.bi.bt = vmw_ctx_binding_dx_shader;
binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
- vmw_binding_add(ctx_node->staged, &binding.bi,
- binding.shader_slot, 0);
+ vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
return 0;
}
/**
- * vmw_cmd_dx_set_vertex_buffers - Validates an
- * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command
+ * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
+ * command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2379,7 +2234,7 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_ctx_bindinfo_vb binding;
struct vmw_resource *res;
struct {
@@ -2389,22 +2244,21 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
} *cmd;
int i, ret, num;
- if (unlikely(ctx_node == NULL)) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
cmd = container_of(header, typeof(*cmd), header);
num = (cmd->header.size - sizeof(cmd->body)) /
sizeof(SVGA3dVertexBuffer);
if ((u64)num + (u64)cmd->body.startBuffer >
(u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
- DRM_ERROR("Invalid number of vertex buffers.\n");
+ VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
return -EINVAL;
}
for (i = 0; i < num; i++) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ VMW_RES_DIRTY_NONE,
user_surface_converter,
&cmd->buf[i].sid, &res);
if (unlikely(ret != 0))
@@ -2417,15 +2271,14 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
binding.stride = cmd->buf[i].stride;
binding.slot = i + cmd->body.startBuffer;
- vmw_binding_add(ctx_node->staged, &binding.bi,
- 0, binding.slot);
+ vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
}
return 0;
}
/**
- * vmw_cmd_dx_ia_set_vertex_buffers - Validate an
+ * vmw_cmd_dx_ia_set_vertex_buffers - Validate
* SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
*
* @dev_priv: Pointer to a device private struct.
@@ -2436,23 +2289,18 @@ static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_ctx_bindinfo_ib binding;
struct vmw_resource *res;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXSetIndexBuffer body;
- } *cmd;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
int ret;
- if (unlikely(ctx_node == NULL)) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->body.sid, &res);
if (unlikely(ret != 0))
return ret;
@@ -2469,8 +2317,8 @@ static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_dx_set_rendertarget - Validate an
- * SVGA_3D_CMD_DX_SET_RENDERTARGETS command
+ * vmw_cmd_dx_set_rendertarget - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
+ * command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2480,32 +2328,29 @@ static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXSetRenderTargets body;
- } *cmd = container_of(header, typeof(*cmd), header);
- int ret;
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
+ container_of(header, typeof(*cmd), header);
u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
sizeof(SVGA3dRenderTargetViewId);
+ int ret;
if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) {
- DRM_ERROR("Invalid DX Rendertarget binding.\n");
+ VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
return -EINVAL;
}
- ret = vmw_view_bindings_add(sw_context, vmw_view_ds,
- vmw_ctx_binding_ds, 0,
- &cmd->body.depthStencilViewId, 1, 0);
+ ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
+ 0, &cmd->body.depthStencilViewId, 1, 0);
if (ret)
return ret;
return vmw_view_bindings_add(sw_context, vmw_view_rt,
- vmw_ctx_binding_dx_rt, 0,
- (void *)&cmd[1], num_rt_view, 0);
+ vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
+ num_rt_view, 0);
}
/**
- * vmw_cmd_dx_clear_rendertarget_view - Validate an
+ * vmw_cmd_dx_clear_rendertarget_view - Validate
* SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
*
* @dev_priv: Pointer to a device private struct.
@@ -2516,17 +2361,15 @@ static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXClearRenderTargetView body;
- } *cmd = container_of(header, typeof(*cmd), header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
+ container_of(header, typeof(*cmd), header);
return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_rt,
cmd->body.renderTargetViewId));
}
/**
- * vmw_cmd_dx_clear_rendertarget_view - Validate an
+ * vmw_cmd_dx_clear_rendertarget_view - Validate
* SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
*
* @dev_priv: Pointer to a device private struct.
@@ -2537,10 +2380,8 @@ static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXClearDepthStencilView body;
- } *cmd = container_of(header, typeof(*cmd), header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
+ container_of(header, typeof(*cmd), header);
return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_ds,
cmd->body.depthStencilViewId));
@@ -2550,14 +2391,14 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_resource *srf;
struct vmw_resource *res;
enum vmw_view_type view_type;
int ret;
/*
- * This is based on the fact that all affected define commands have
- * the same initial command body layout.
+ * This is based on the fact that all affected define commands have the
+ * same initial command body layout.
*/
struct {
SVGA3dCmdHeader header;
@@ -2565,17 +2406,16 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
uint32 sid;
} *cmd;
- if (unlikely(ctx_node == NULL)) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
view_type = vmw_view_cmd_to_type(header->id);
if (view_type == vmw_view_max)
return -EINVAL;
+
cmd = container_of(header, typeof(*cmd), header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->sid, &srf);
if (unlikely(ret != 0))
return ret;
@@ -2585,19 +2425,14 @@ static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
return ret;
- return vmw_view_add(sw_context->man,
- ctx_node->ctx,
- srf,
- view_type,
- cmd->defined_id,
- header,
+ return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
+ cmd->defined_id, header,
header->size + sizeof(*header),
&sw_context->staged_cmd_res);
}
/**
- * vmw_cmd_dx_set_so_targets - Validate an
- * SVGA_3D_CMD_DX_SET_SOTARGETS command.
+ * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2607,7 +2442,7 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_ctx_bindinfo_so binding;
struct vmw_resource *res;
struct {
@@ -2617,22 +2452,20 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
} *cmd;
int i, ret, num;
- if (unlikely(ctx_node == NULL)) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
cmd = container_of(header, typeof(*cmd), header);
- num = (cmd->header.size - sizeof(cmd->body)) /
- sizeof(SVGA3dSoTarget);
+ num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
if (num > SVGA3D_DX_MAX_SOTARGETS) {
- DRM_ERROR("Invalid DX SO binding.\n");
+ VMW_DEBUG_USER("Invalid DX SO binding.\n");
return -EINVAL;
}
for (i = 0; i < num; i++) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
+ VMW_RES_DIRTY_SET,
user_surface_converter,
&cmd->targets[i].sid, &res);
if (unlikely(ret != 0))
@@ -2645,8 +2478,7 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
binding.size = cmd->targets[i].sizeInBytes;
binding.slot = i;
- vmw_binding_add(ctx_node->staged, &binding.bi,
- 0, binding.slot);
+ vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
}
return 0;
@@ -2656,7 +2488,7 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_resource *res;
/*
* This is based on the fact that all affected define commands have
@@ -2669,10 +2501,8 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
enum vmw_so_type so_type;
int ret;
- if (unlikely(ctx_node == NULL)) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
so_type = vmw_so_cmd_to_type(header->id);
res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
@@ -2683,8 +2513,8 @@ static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_dx_check_subresource - Validate an
- * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command
+ * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
+ * command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2714,7 +2544,7 @@ static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
cmd = container_of(header, typeof(*cmd), header);
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->sid, NULL);
}
@@ -2722,32 +2552,30 @@ static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
- if (unlikely(ctx_node == NULL)) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
return 0;
}
/**
- * vmw_cmd_dx_view_remove - validate a view remove command and
- * schedule the view resource for removal.
+ * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
+ * resource for removal.
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
* @header: Pointer to the command header in the command stream.
*
- * Check that the view exists, and if it was not created using this
- * command batch, conditionally make this command a NOP.
+ * Check that the view exists, and if it was not created using this command
+ * batch, conditionally make this command a NOP.
*/
static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct {
SVGA3dCmdHeader header;
union vmw_view_destroy body;
@@ -2756,15 +2584,11 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
struct vmw_resource *view;
int ret;
- if (!ctx_node) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
- ret = vmw_view_remove(sw_context->man,
- cmd->body.view_id, view_type,
- &sw_context->staged_cmd_res,
- &view);
+ ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
+ &sw_context->staged_cmd_res, &view);
if (ret || !view)
return ret;
@@ -2774,16 +2598,14 @@ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
* relocation to conditionally make this command a NOP to avoid
* device errors.
*/
- return vmw_resource_relocation_add(sw_context,
- view,
+ return vmw_resource_relocation_add(sw_context, view,
vmw_ptr_diff(sw_context->buf_start,
&cmd->header.id),
vmw_res_rel_cond_nop);
}
/**
- * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER
- * command
+ * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2793,18 +2615,14 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
struct vmw_resource *res;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXDefineShader body;
- } *cmd = container_of(header, typeof(*cmd), header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
+ container_of(header, typeof(*cmd), header);
int ret;
- if (!ctx_node) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
ret = vmw_cotable_notify(res, cmd->body.shaderId);
@@ -2817,8 +2635,7 @@ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER
- * command
+ * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2828,29 +2645,22 @@ static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXDestroyShader body;
- } *cmd = container_of(header, typeof(*cmd), header);
+ struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
+ container_of(header, typeof(*cmd), header);
int ret;
- if (!ctx_node) {
- DRM_ERROR("DX Context not set.\n");
+ if (!ctx_node)
return -EINVAL;
- }
ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
&sw_context->staged_cmd_res);
- if (ret)
- DRM_ERROR("Could not find shader to remove.\n");
return ret;
}
/**
- * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER
- * command
+ * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2862,36 +2672,37 @@ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
{
struct vmw_resource *ctx;
struct vmw_resource *res;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXBindShader body;
- } *cmd = container_of(header, typeof(*cmd), header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
+ container_of(header, typeof(*cmd), header);
int ret;
if (cmd->body.cid != SVGA3D_INVALID_ID) {
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
- user_context_converter,
- &cmd->body.cid, &ctx);
+ VMW_RES_DIRTY_SET,
+ user_context_converter, &cmd->body.cid,
+ &ctx);
if (ret)
return ret;
} else {
- if (!sw_context->dx_ctx_node) {
- DRM_ERROR("DX Context not set.\n");
+ struct vmw_ctx_validation_info *ctx_node =
+ VMW_GET_CTX_NODE(sw_context);
+
+ if (!ctx_node)
return -EINVAL;
- }
- ctx = sw_context->dx_ctx_node->ctx;
+
+ ctx = ctx_node->ctx;
}
- res = vmw_shader_lookup(vmw_context_res_man(ctx),
- cmd->body.shid, 0);
+ res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
if (IS_ERR(res)) {
- DRM_ERROR("Could not find shader to bind.\n");
+ VMW_DEBUG_USER("Could not find shader to bind.\n");
return PTR_ERR(res);
}
- ret = vmw_execbuf_res_noctx_val_add(sw_context, res);
+ ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
+ VMW_RES_DIRTY_NONE);
if (ret) {
- DRM_ERROR("Error creating resource validation node.\n");
+ VMW_DEBUG_USER("Error creating resource validation node.\n");
return ret;
}
@@ -2901,7 +2712,7 @@ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_dx_genmips - Validate an SVGA_3D_CMD_DX_GENMIPS command
+ * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2911,18 +2722,16 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXGenMips body;
- } *cmd = container_of(header, typeof(*cmd), header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
+ container_of(header, typeof(*cmd), header);
return PTR_RET(vmw_view_id_val_add(sw_context, vmw_view_sr,
cmd->body.shaderResourceViewId));
}
/**
- * vmw_cmd_dx_transfer_from_buffer -
- * Validate an SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
+ * vmw_cmd_dx_transfer_from_buffer - Validate
+ * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2932,26 +2741,23 @@ static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXTransferFromBuffer body;
- } *cmd = container_of(header, typeof(*cmd), header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
+ container_of(header, typeof(*cmd), header);
int ret;
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_NONE, user_surface_converter,
&cmd->body.srcSid, NULL);
if (ret != 0)
return ret;
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
+ VMW_RES_DIRTY_SET, user_surface_converter,
&cmd->body.destSid, NULL);
}
/**
- * vmw_cmd_intra_surface_copy -
- * Validate an SVGA_3D_CMD_INTRA_SURFACE_COPY command
+ * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
@@ -2961,20 +2767,17 @@ static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdIntraSurfaceCopy body;
- } *cmd = container_of(header, typeof(*cmd), header);
+ VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
+ container_of(header, typeof(*cmd), header);
if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
return -EINVAL;
return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
- user_surface_converter,
- &cmd->body.surface.sid, NULL);
+ VMW_RES_DIRTY_SET, user_surface_converter,
+ &cmd->body.surface.sid, NULL);
}
-
static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
@@ -2997,18 +2800,18 @@ static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
break;
default:
- DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
+ VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
return -EINVAL;
}
if (*size > size_remaining) {
- DRM_ERROR("Invalid SVGA command (size mismatch):"
- " %u.\n", cmd_id);
+ VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
+ cmd_id);
return -EINVAL;
}
if (unlikely(!sw_context->kernel)) {
- DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
+ VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
return -EPERM;
}
@@ -3196,9 +2999,7 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
false, false, true),
- /*
- * DX commands
- */
+ /* SM commands */
VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
false, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
@@ -3380,8 +3181,8 @@ bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
}
static int vmw_cmd_check(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- void *buf, uint32_t *size)
+ struct vmw_sw_context *sw_context, void *buf,
+ uint32_t *size)
{
uint32_t cmd_id;
uint32_t size_remaining = *size;
@@ -3420,31 +3221,33 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
goto out_new;
ret = entry->func(dev_priv, sw_context, header);
- if (unlikely(ret != 0))
- goto out_invalid;
+ if (unlikely(ret != 0)) {
+ VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
+ cmd_id + SVGA_3D_CMD_BASE, ret);
+ return ret;
+ }
return 0;
out_invalid:
- DRM_ERROR("Invalid SVGA3D command: %d\n",
- cmd_id + SVGA_3D_CMD_BASE);
+ VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
+ cmd_id + SVGA_3D_CMD_BASE);
return -EINVAL;
out_privileged:
- DRM_ERROR("Privileged SVGA3D command: %d\n",
- cmd_id + SVGA_3D_CMD_BASE);
+ VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
+ cmd_id + SVGA_3D_CMD_BASE);
return -EPERM;
out_old:
- DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
- cmd_id + SVGA_3D_CMD_BASE);
+ VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
+ cmd_id + SVGA_3D_CMD_BASE);
return -EINVAL;
out_new:
- DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
- cmd_id + SVGA_3D_CMD_BASE);
+ VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
+ cmd_id + SVGA_3D_CMD_BASE);
return -EINVAL;
}
static int vmw_cmd_check_all(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- void *buf,
+ struct vmw_sw_context *sw_context, void *buf,
uint32_t size)
{
int32_t cur_size = size;
@@ -3462,7 +3265,7 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv,
}
if (unlikely(cur_size != 0)) {
- DRM_ERROR("Command verifier out of sync.\n");
+ VMW_DEBUG_USER("Command verifier out of sync.\n");
return -EINVAL;
}
@@ -3472,7 +3275,6 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv,
static void vmw_free_relocations(struct vmw_sw_context *sw_context)
{
/* Memory is validation context memory, so no need to free it */
-
INIT_LIST_HEAD(&sw_context->bo_relocations);
}
@@ -3520,7 +3322,7 @@ static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
if (sw_context->cmd_bounce == NULL) {
- DRM_ERROR("Failed to allocate command bounce buffer.\n");
+ VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
sw_context->cmd_bounce_size = 0;
return -ENOMEM;
}
@@ -3535,8 +3337,8 @@ static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
* If this fails for some reason, We sync the fifo and return NULL.
* It is then safe to fence buffers with a NULL pointer.
*
- * If @p_handle is not NULL @file_priv must also not be NULL. Creates
- * a userspace handle if @p_handle is not NULL, otherwise not.
+ * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
+ * userspace handle if @p_handle is not NULL, otherwise not.
*/
int vmw_execbuf_fence_commands(struct drm_file *file_priv,
@@ -3553,7 +3355,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
ret = vmw_fifo_send_fence(dev_priv, &sequence);
if (unlikely(ret != 0)) {
- DRM_ERROR("Fence submission error. Syncing.\n");
+ VMW_DEBUG_USER("Fence submission error. Syncing.\n");
synced = true;
}
@@ -3564,9 +3366,8 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
if (unlikely(ret != 0 && !synced)) {
- (void) vmw_fallback_wait(dev_priv, false, false,
- sequence, false,
- VMW_FENCE_WAIT_TIMEOUT);
+ (void) vmw_fallback_wait(dev_priv, false, false, sequence,
+ false, VMW_FENCE_WAIT_TIMEOUT);
*p_fence = NULL;
}
@@ -3574,36 +3375,32 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
}
/**
- * vmw_execbuf_copy_fence_user - copy fence object information to
- * user-space.
+ * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
*
* @dev_priv: Pointer to a vmw_private struct.
* @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
* @ret: Return value from fence object creation.
- * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
- * which the information should be copied.
+ * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
+ * the information should be copied.
* @fence: Pointer to the fenc object.
* @fence_handle: User-space fence handle.
* @out_fence_fd: exported file descriptor for the fence. -1 if not used
* @sync_file: Only used to clean up in case of an error in this function.
*
- * This function copies fence information to user-space. If copying fails,
- * The user-space struct drm_vmw_fence_rep::error member is hopefully
- * left untouched, and if it's preloaded with an -EFAULT by user-space,
- * the error will hopefully be detected.
- * Also if copying fails, user-space will be unable to signal the fence
- * object so we wait for it immediately, and then unreference the
- * user-space reference.
+ * This function copies fence information to user-space. If copying fails, the
+ * user-space struct drm_vmw_fence_rep::error member is hopefully left
+ * untouched, and if it's preloaded with an -EFAULT by user-space, the error
+ * will hopefully be detected.
+ *
+ * Also if copying fails, user-space will be unable to signal the fence object
+ * so we wait for it immediately, and then unreference the user-space reference.
*/
void
vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
- struct vmw_fpriv *vmw_fp,
- int ret,
+ struct vmw_fpriv *vmw_fp, int ret,
struct drm_vmw_fence_rep __user *user_fence_rep,
- struct vmw_fence_obj *fence,
- uint32_t fence_handle,
- int32_t out_fence_fd,
- struct sync_file *sync_file)
+ struct vmw_fence_obj *fence, uint32_t fence_handle,
+ int32_t out_fence_fd, struct sync_file *sync_file)
{
struct drm_vmw_fence_rep fence_rep;
@@ -3624,16 +3421,16 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
}
/*
- * copy_to_user errors will be detected by user space not
- * seeing fence_rep::error filled in. Typically
- * user-space would have pre-set that member to -EFAULT.
+ * copy_to_user errors will be detected by user space not seeing
+ * fence_rep::error filled in. Typically user-space would have pre-set
+ * that member to -EFAULT.
*/
ret = copy_to_user(user_fence_rep, &fence_rep,
sizeof(fence_rep));
/*
- * User-space lost the fence object. We need to sync
- * and unreference the handle.
+ * User-space lost the fence object. We need to sync and unreference the
+ * handle.
*/
if (unlikely(ret != 0) && (fence_rep.error == 0)) {
if (sync_file)
@@ -3644,42 +3441,39 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
fence_rep.fd = -1;
}
- ttm_ref_object_base_unref(vmw_fp->tfile,
- fence_handle, TTM_REF_USAGE);
- DRM_ERROR("Fence copy error. Syncing.\n");
+ ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle,
+ TTM_REF_USAGE);
+ VMW_DEBUG_USER("Fence copy error. Syncing.\n");
(void) vmw_fence_obj_wait(fence, false, false,
VMW_FENCE_WAIT_TIMEOUT);
}
}
/**
- * vmw_execbuf_submit_fifo - Patch a command batch and submit it using
- * the fifo.
+ * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
*
* @dev_priv: Pointer to a device private structure.
* @kernel_commands: Pointer to the unpatched command batch.
* @command_size: Size of the unpatched command batch.
* @sw_context: Structure holding the relocation lists.
*
- * Side effects: If this function returns 0, then the command batch
- * pointed to by @kernel_commands will have been modified.
+ * Side effects: If this function returns 0, then the command batch pointed to
+ * by @kernel_commands will have been modified.
*/
static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
- void *kernel_commands,
- u32 command_size,
+ void *kernel_commands, u32 command_size,
struct vmw_sw_context *sw_context)
{
void *cmd;
if (sw_context->dx_ctx_node)
- cmd = vmw_fifo_reserve_dx(dev_priv, command_size,
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, command_size,
sw_context->dx_ctx_node->ctx->id);
else
- cmd = vmw_fifo_reserve(dev_priv, command_size);
- if (!cmd) {
- DRM_ERROR("Failed reserving fifo space for commands.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, command_size);
+
+ if (!cmd)
return -ENOMEM;
- }
vmw_apply_relocations(sw_context);
memcpy(cmd, kernel_commands, command_size);
@@ -3691,16 +3485,16 @@ static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
}
/**
- * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using
- * the command buffer manager.
+ * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
+ * command buffer manager.
*
* @dev_priv: Pointer to a device private structure.
* @header: Opaque handle to the command buffer allocation.
* @command_size: Size of the unpatched command batch.
* @sw_context: Structure holding the relocation lists.
*
- * Side effects: If this function returns 0, then the command buffer
- * represented by @header will have been modified.
+ * Side effects: If this function returns 0, then the command buffer represented
+ * by @header will have been modified.
*/
static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
struct vmw_cmdbuf_header *header,
@@ -3709,8 +3503,8 @@ static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
{
u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
SVGA3D_INVALID_ID);
- void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size,
- id, false, header);
+ void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
+ header);
vmw_apply_relocations(sw_context);
vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
@@ -3730,22 +3524,23 @@ static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
* @header: Out parameter returning the opaque pointer to the command buffer.
*
* This function checks whether we can use the command buffer manager for
- * submission and if so, creates a command buffer of suitable size and
- * copies the user data into that buffer.
+ * submission and if so, creates a command buffer of suitable size and copies
+ * the user data into that buffer.
*
* On successful return, the function returns a pointer to the data in the
* command buffer and *@header is set to non-NULL.
- * If command buffers could not be used, the function will return the value
- * of @kernel_commands on function call. That value may be NULL. In that case,
- * the value of *@header will be set to NULL.
+ *
+ * If command buffers could not be used, the function will return the value of
+ * @kernel_commands on function call. That value may be NULL. In that case, the
+ * value of *@header will be set to NULL.
+ *
* If an error is encountered, the function will return a pointer error value.
* If the function is interrupted by a signal while sleeping, it will return
* -ERESTARTSYS casted to a pointer error value.
*/
static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
void __user *user_commands,
- void *kernel_commands,
- u32 command_size,
+ void *kernel_commands, u32 command_size,
struct vmw_cmdbuf_header **header)
{
size_t cmdbuf_size;
@@ -3753,7 +3548,7 @@ static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
*header = NULL;
if (command_size > SVGA_CB_MAX_SIZE) {
- DRM_ERROR("Command buffer is too large.\n");
+ VMW_DEBUG_USER("Command buffer is too large.\n");
return ERR_PTR(-EINVAL);
}
@@ -3763,15 +3558,14 @@ static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
/* If possible, add a little space for fencing. */
cmdbuf_size = command_size + 512;
cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
- kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size,
- true, header);
+ kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
+ header);
if (IS_ERR(kernel_commands))
return kernel_commands;
- ret = copy_from_user(kernel_commands, user_commands,
- command_size);
+ ret = copy_from_user(kernel_commands, user_commands, command_size);
if (ret) {
- DRM_ERROR("Failed copying commands.\n");
+ VMW_DEBUG_USER("Failed copying commands.\n");
vmw_cmdbuf_header_free(*header);
*header = NULL;
return ERR_PTR(-EFAULT);
@@ -3799,13 +3593,13 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
res = vmw_user_resource_noref_lookup_handle
(dev_priv, sw_context->fp->tfile, handle,
user_context_converter);
- if (unlikely(IS_ERR(res))) {
- DRM_ERROR("Could not find or user DX context 0x%08x.\n",
- (unsigned) handle);
+ if (IS_ERR(res)) {
+ VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
+ (unsigned int) handle);
return PTR_ERR(res);
}
- ret = vmw_execbuf_res_noref_val_add(sw_context, res);
+ ret = vmw_execbuf_res_noref_val_add(sw_context, res, VMW_RES_DIRTY_SET);
if (unlikely(ret != 0))
return ret;
@@ -3817,19 +3611,16 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_private *dev_priv,
- void __user *user_commands,
- void *kernel_commands,
- uint32_t command_size,
- uint64_t throttle_us,
+ void __user *user_commands, void *kernel_commands,
+ uint32_t command_size, uint64_t throttle_us,
uint32_t dx_context_handle,
struct drm_vmw_fence_rep __user *user_fence_rep,
- struct vmw_fence_obj **out_fence,
- uint32_t flags)
+ struct vmw_fence_obj **out_fence, uint32_t flags)
{
struct vmw_sw_context *sw_context = &dev_priv->ctx;
struct vmw_fence_obj *fence = NULL;
struct vmw_cmdbuf_header *header;
- uint32_t handle;
+ uint32_t handle = 0;
int ret;
int32_t out_fence_fd = -1;
struct sync_file *sync_file = NULL;
@@ -3840,7 +3631,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
if (out_fence_fd < 0) {
- DRM_ERROR("Failed to get a fence file descriptor.\n");
+ VMW_DEBUG_USER("Failed to get a fence fd.\n");
return out_fence_fd;
}
}
@@ -3873,18 +3664,18 @@ int vmw_execbuf_process(struct drm_file *file_priv,
if (unlikely(ret != 0))
goto out_unlock;
-
- ret = copy_from_user(sw_context->cmd_bounce,
- user_commands, command_size);
-
+ ret = copy_from_user(sw_context->cmd_bounce, user_commands,
+ command_size);
if (unlikely(ret != 0)) {
ret = -EFAULT;
- DRM_ERROR("Failed copying commands.\n");
+ VMW_DEBUG_USER("Failed copying commands.\n");
goto out_unlock;
}
+
kernel_commands = sw_context->cmd_bounce;
- } else if (!header)
+ } else if (!header) {
sw_context->kernel = true;
+ }
sw_context->fp = vmw_fpriv(file_priv);
INIT_LIST_HEAD(&sw_context->ctx_list);
@@ -3897,6 +3688,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
INIT_LIST_HEAD(&sw_context->res_relocations);
INIT_LIST_HEAD(&sw_context->bo_relocations);
+
if (sw_context->staged_bindings)
vmw_binding_state_reset(sw_context->staged_bindings);
@@ -3904,8 +3696,10 @@ int vmw_execbuf_process(struct drm_file *file_priv,
ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER);
if (unlikely(ret != 0))
goto out_unlock;
+
sw_context->res_ht_initialized = true;
}
+
INIT_LIST_HEAD(&sw_context->staged_cmd_res);
sw_context->ctx = &val_ctx;
ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
@@ -3932,6 +3726,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
ret = vmw_validation_res_validate(&val_ctx, true);
if (unlikely(ret != 0))
goto out_err;
+
vmw_validation_drop_ht(&val_ctx);
ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
@@ -3959,17 +3754,15 @@ int vmw_execbuf_process(struct drm_file *file_priv,
goto out_err;
vmw_query_bo_switch_commit(dev_priv, sw_context);
- ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
- &fence,
+ ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
(user_fence_rep) ? &handle : NULL);
/*
* This error is harmless, because if fence submission fails,
* vmw_fifo_send_fence will sync. The error will be propagated to
* user-space in @fence_rep
*/
-
if (ret != 0)
- DRM_ERROR("Fence submission error. Syncing.\n");
+ VMW_DEBUG_USER("Fence submission error. Syncing.\n");
vmw_execbuf_bindings_commit(sw_context, false);
vmw_bind_dx_query_mob(sw_context);
@@ -3977,21 +3770,19 @@ int vmw_execbuf_process(struct drm_file *file_priv,
vmw_validation_bo_fence(sw_context->ctx, fence);
- if (unlikely(dev_priv->pinned_bo != NULL &&
- !dev_priv->query_cid_valid))
+ if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
__vmw_execbuf_release_pinned_bo(dev_priv, fence);
/*
- * If anything fails here, give up trying to export the fence
- * and do a sync since the user mode will not be able to sync
- * the fence itself. This ensures we are still functionally
- * correct.
+ * If anything fails here, give up trying to export the fence and do a
+ * sync since the user mode will not be able to sync the fence itself.
+ * This ensures we are still functionally correct.
*/
if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
sync_file = sync_file_create(&fence->base);
if (!sync_file) {
- DRM_ERROR("Unable to create sync file for fence\n");
+ VMW_DEBUG_USER("Sync file create failed for fence\n");
put_unused_fd(out_fence_fd);
out_fence_fd = -1;
@@ -4004,8 +3795,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
}
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
- user_fence_rep, fence, handle,
- out_fence_fd, sync_file);
+ user_fence_rep, fence, handle, out_fence_fd,
+ sync_file);
/* Don't unreference when handing fence out */
if (unlikely(out_fence != NULL)) {
@@ -4019,8 +3810,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
mutex_unlock(&dev_priv->cmdbuf_mutex);
/*
- * Unreference resources outside of the cmdbuf_mutex to
- * avoid deadlocks in resource destruction paths.
+ * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
+ * in resource destruction paths.
*/
vmw_validation_unref_lists(&val_ctx);
@@ -4035,8 +3826,7 @@ out_err_nores:
vmw_validation_res_unreserve(&val_ctx, true);
vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_free_relocations(sw_context);
- if (unlikely(dev_priv->pinned_bo != NULL &&
- !dev_priv->query_cid_valid))
+ if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
out_unlock:
vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
@@ -4045,8 +3835,8 @@ out_unlock:
mutex_unlock(&dev_priv->cmdbuf_mutex);
/*
- * Unreference resources outside of the cmdbuf_mutex to
- * avoid deadlocks in resource destruction paths.
+ * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
+ * in resource destruction paths.
*/
vmw_validation_unref_lists(&val_ctx);
out_free_header:
@@ -4064,13 +3854,13 @@ out_free_fence_fd:
*
* @dev_priv: The device private structure.
*
- * This function is called to idle the fifo and unpin the query buffer
- * if the normal way to do this hits an error, which should typically be
- * extremely rare.
+ * This function is called to idle the fifo and unpin the query buffer if the
+ * normal way to do this hits an error, which should typically be extremely
+ * rare.
*/
static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
{
- DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
+ VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
@@ -4082,28 +3872,27 @@ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
/**
- * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
- * query bo.
+ * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
+ * bo.
*
* @dev_priv: The device private structure.
- * @fence: If non-NULL should point to a struct vmw_fence_obj issued
- * _after_ a query barrier that flushes all queries touching the current
- * buffer pointed to by @dev_priv->pinned_bo
+ * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
+ * query barrier that flushes all queries touching the current buffer pointed to
+ * by @dev_priv->pinned_bo
*
- * This function should be used to unpin the pinned query bo, or
- * as a query barrier when we need to make sure that all queries have
- * finished before the next fifo command. (For example on hardware
- * context destructions where the hardware may otherwise leak unfinished
- * queries).
+ * This function should be used to unpin the pinned query bo, or as a query
+ * barrier when we need to make sure that all queries have finished before the
+ * next fifo command. (For example on hardware context destructions where the
+ * hardware may otherwise leak unfinished queries).
*
- * This function does not return any failure codes, but make attempts
- * to do safe unpinning in case of errors.
+ * This function does not return any failure codes, but make attempts to do safe
+ * unpinning in case of errors.
*
- * The function will synchronize on the previous query barrier, and will
- * thus not finish until that barrier has executed.
+ * The function will synchronize on the previous query barrier, and will thus
+ * not finish until that barrier has executed.
*
- * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
- * before calling this function.
+ * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
+ * calling this function.
*/
void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
struct vmw_fence_obj *fence)
@@ -4153,35 +3942,32 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
vmw_validation_unref_lists(&val_ctx);
vmw_bo_unreference(&dev_priv->pinned_bo);
+
out_unlock:
return;
-
out_no_emit:
vmw_validation_bo_backoff(&val_ctx);
out_no_reserve:
vmw_validation_unref_lists(&val_ctx);
vmw_execbuf_unpin_panic(dev_priv);
vmw_bo_unreference(&dev_priv->pinned_bo);
-
}
/**
- * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
- * query bo.
+ * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
*
* @dev_priv: The device private structure.
*
- * This function should be used to unpin the pinned query bo, or
- * as a query barrier when we need to make sure that all queries have
- * finished before the next fifo command. (For example on hardware
- * context destructions where the hardware may otherwise leak unfinished
- * queries).
+ * This function should be used to unpin the pinned query bo, or as a query
+ * barrier when we need to make sure that all queries have finished before the
+ * next fifo command. (For example on hardware context destructions where the
+ * hardware may otherwise leak unfinished queries).
*
- * This function does not return any failure codes, but make attempts
- * to do safe unpinning in case of errors.
+ * This function does not return any failure codes, but make attempts to do safe
+ * unpinning in case of errors.
*
- * The function will synchronize on the previous query barrier, and will
- * thus not finish until that barrier has executed.
+ * The function will synchronize on the previous query barrier, and will thus
+ * not finish until that barrier has executed.
*/
void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
{
@@ -4203,8 +3989,8 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
struct dma_fence *in_fence = NULL;
if (unlikely(size < copy_offset[0])) {
- DRM_ERROR("Invalid command size, ioctl %d\n",
- DRM_VMW_EXECBUF);
+ VMW_DEBUG_USER("Invalid command size, ioctl %d\n",
+ DRM_VMW_EXECBUF);
return -EINVAL;
}
@@ -4212,23 +3998,19 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
return -EFAULT;
/*
- * Extend the ioctl argument while
- * maintaining backwards compatibility:
- * We take different code paths depending on the value of
- * arg.version.
+ * Extend the ioctl argument while maintaining backwards compatibility:
+ * We take different code paths depending on the value of arg.version.
*/
-
if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION ||
arg.version == 0)) {
- DRM_ERROR("Incorrect execbuf version.\n");
+ VMW_DEBUG_USER("Incorrect execbuf version.\n");
return -EINVAL;
}
if (arg.version > 1 &&
copy_from_user(&arg.context_handle,
(void __user *) (data + copy_offset[0]),
- copy_offset[arg.version - 1] -
- copy_offset[0]) != 0)
+ copy_offset[arg.version - 1] - copy_offset[0]) != 0)
return -EFAULT;
switch (arg.version) {
@@ -4240,13 +4022,12 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
break;
}
-
/* If imported a fence FD from elsewhere, then wait on it */
if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
in_fence = sync_file_get_fence(arg.imported_fence_fd);
if (!in_fence) {
- DRM_ERROR("Cannot get imported fence\n");
+ VMW_DEBUG_USER("Cannot get imported fence\n");
return -EINVAL;
}
@@ -4264,8 +4045,8 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
NULL, arg.command_size, arg.throttle_us,
arg.context_handle,
(void __user *)(unsigned long)arg.fence_rep,
- NULL,
- arg.flags);
+ NULL, arg.flags);
+
ttm_read_unlock(&dev_priv->reservation_sem);
if (unlikely(ret != 0))
goto out;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 2a9112515f46..972e8fda6d35 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -642,12 +642,11 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
struct vmw_fb_par *par;
struct fb_info *info;
unsigned fb_width, fb_height;
- unsigned fb_bpp, fb_depth, fb_offset, fb_pitch, fb_size;
+ unsigned int fb_bpp, fb_pitch, fb_size;
struct drm_display_mode *init_mode;
int ret;
fb_bpp = 32;
- fb_depth = 24;
/* XXX As shouldn't these be as well. */
fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
@@ -655,7 +654,6 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
fb_pitch = fb_width * fb_bpp / 8;
fb_size = fb_pitch * fb_height;
- fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
info = framebuffer_alloc(sizeof(*par), device);
if (!info)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index d0fd147ef75f..ff3586cb6851 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -395,12 +395,8 @@ void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes,
WARN(1, "Command buffer has not been allocated.\n");
ret = NULL;
}
- if (IS_ERR_OR_NULL(ret)) {
- DRM_ERROR("Fifo reserve failure of %u bytes.\n",
- (unsigned) bytes);
- dump_stack();
+ if (IS_ERR_OR_NULL(ret))
return NULL;
- }
return ret;
}
@@ -544,7 +540,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
int ret = 0;
uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence);
- fm = vmw_fifo_reserve(dev_priv, bytes);
+ fm = VMW_FIFO_RESERVE(dev_priv, bytes);
if (unlikely(fm == NULL)) {
*seqno = atomic_read(&dev_priv->marker_seq);
ret = -ENOMEM;
@@ -603,12 +599,9 @@ static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
SVGA3dCmdWaitForQuery body;
} *cmd;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Out of fifo space for dummy query.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
cmd->header.size = sizeof(cmd->body);
@@ -652,12 +645,9 @@ static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
SVGA3dCmdWaitForGBQuery body;
} *cmd;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Out of fifo space for dummy query.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
cmd->header.size = sizeof(cmd->body);
@@ -699,8 +689,3 @@ int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
}
-
-void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
-{
- return vmw_fifo_reserve_dx(dev_priv, bytes, SVGA3D_INVALID_ID);
-}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 007a0cc7f232..ae7acc6f3dda 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -51,7 +51,7 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
uint32_t cmd_size = define_size + remap_size;
uint32_t i;
- cmd_orig = cmd = vmw_fifo_reserve(dev_priv, cmd_size);
+ cmd_orig = cmd = VMW_FIFO_RESERVE(dev_priv, cmd_size);
if (unlikely(cmd == NULL))
return -ENOMEM;
@@ -110,11 +110,10 @@ static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
uint32_t define_size = sizeof(define_cmd) + 4;
uint32_t *cmd;
- cmd = vmw_fifo_reserve(dev_priv, define_size);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("GMR2 unbind failed.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, define_size);
+ if (unlikely(cmd == NULL))
return;
- }
+
define_cmd.gmrId = gmr_id;
define_cmd.numPages = 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 172a6ba6539c..a15375eb476e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -188,7 +188,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
- DRM_ERROR("Illegal GET_3D_CAP argument.\n");
+ VMW_DEBUG_USER("Illegal GET_3D_CAP argument.\n");
return -EINVAL;
}
@@ -268,7 +268,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
return 0;
if (clips_ptr == NULL) {
- DRM_ERROR("Variable clips_ptr must be specified.\n");
+ VMW_DEBUG_USER("Variable clips_ptr must be specified.\n");
ret = -EINVAL;
goto out_clips;
}
@@ -291,7 +291,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
fb = drm_framebuffer_lookup(dev, file_priv, arg->fb_id);
if (!fb) {
- DRM_ERROR("Invalid framebuffer id.\n");
+ VMW_DEBUG_USER("Invalid framebuffer id.\n");
ret = -ENOENT;
goto out_no_fb;
}
@@ -351,7 +351,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
return 0;
if (clips_ptr == NULL) {
- DRM_ERROR("Argument clips_ptr must be specified.\n");
+ VMW_DEBUG_USER("Argument clips_ptr must be specified.\n");
ret = -EINVAL;
goto out_clips;
}
@@ -374,14 +374,14 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
fb = drm_framebuffer_lookup(dev, file_priv, arg->fb_id);
if (!fb) {
- DRM_ERROR("Invalid framebuffer id.\n");
+ VMW_DEBUG_USER("Invalid framebuffer id.\n");
ret = -ENOENT;
goto out_no_fb;
}
vfb = vmw_framebuffer_to_vfb(fb);
if (!vfb->bo) {
- DRM_ERROR("Framebuffer not buffer backed.\n");
+ VMW_DEBUG_USER("Framebuffer not buffer backed.\n");
ret = -EINVAL;
goto out_no_ttm_lock;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index ed2f67822f45..b97bc8e5944b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -64,11 +64,9 @@ static int vmw_cursor_update_image(struct vmw_private *dev_priv,
if (!image)
return -EINVAL;
- cmd = vmw_fifo_reserve(dev_priv, cmd_size);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Fifo reserve failed.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, cmd_size);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
memset(cmd, 0, sizeof(*cmd));
@@ -1202,7 +1200,7 @@ static int vmw_create_bo_proxy(struct drm_device *dev,
vmw_bo_unreference(&res->backup);
res->backup = vmw_bo_reference(bo_mob);
res->backup_offset = 0;
- vmw_resource_unreserve(res, false, NULL, 0);
+ vmw_resource_unreserve(res, false, false, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
return 0;
@@ -2468,13 +2466,11 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
dirty->unit = unit;
if (dirty->fifo_reserve_size > 0) {
- dirty->cmd = vmw_fifo_reserve(dev_priv,
+ dirty->cmd = VMW_FIFO_RESERVE(dev_priv,
dirty->fifo_reserve_size);
- if (!dirty->cmd) {
- DRM_ERROR("Couldn't reserve fifo space "
- "for dirty blits.\n");
+ if (!dirty->cmd)
return -ENOMEM;
- }
+
memset(dirty->cmd, 0, dirty->fifo_reserve_size);
}
dirty->num_hits = 0;
@@ -2604,12 +2600,9 @@ int vmw_kms_update_proxy(struct vmw_resource *res,
if (!clips)
return 0;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
- if (!cmd) {
- DRM_ERROR("Couldn't reserve fifo space for proxy surface "
- "update.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd) * num_clips);
+ if (!cmd)
return -ENOMEM;
- }
for (i = 0; i < num_clips; ++i, clips += increment, ++cmd) {
box = &cmd->body.box;
@@ -2827,7 +2820,8 @@ int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
container_of(update->vfb, typeof(*vfbs), base);
ret = vmw_validation_add_resource(&val_ctx, &vfbs->surface->res,
- 0, NULL, NULL);
+ 0, VMW_RES_DIRTY_NONE, NULL,
+ NULL);
}
if (ret)
@@ -2838,7 +2832,7 @@ int vmw_du_helper_plane_update(struct vmw_du_update_plane *update)
goto out_unref;
reserved_size = update->calc_fifo_size(update, num_hits);
- cmd_start = vmw_fifo_reserve(update->dev_priv, reserved_size);
+ cmd_start = VMW_FIFO_RESERVE(update->dev_priv, reserved_size);
if (!cmd_start) {
ret = -ENOMEM;
goto out_revert;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 16be515c4c0f..25e6343bcf21 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -554,11 +554,9 @@ int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv,
} *cmd;
fifo_size = sizeof(*cmd) * num_clips;
- cmd = vmw_fifo_reserve(dev_priv, fifo_size);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Fifo reserve failed.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, fifo_size);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
memset(cmd, 0, fifo_size);
for (i = 0; i < num_clips; i++, clips += increment) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index d83cc66e1210..406edc8cef35 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -146,9 +146,8 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1;
}
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
ret = -ENOMEM;
goto out_no_fifo;
}
@@ -202,12 +201,9 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
return;
bo = otable->page_table->pt_bo;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for OTable "
- "takedown.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return;
- }
memset(cmd, 0, sizeof(*cmd));
cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
@@ -614,16 +610,14 @@ void vmw_mob_unbind(struct vmw_private *dev_priv,
BUG_ON(ret != 0);
}
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for Memory "
- "Object unbinding.\n");
- } else {
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (cmd) {
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
cmd->header.size = sizeof(cmd->body);
cmd->body.mobid = mob->id;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
}
+
if (bo) {
vmw_bo_fence_single(bo, NULL);
ttm_bo_unreserve(bo);
@@ -683,12 +677,9 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
vmw_fifo_resource_inc(dev_priv);
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for Memory "
- "Object binding.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
goto out_no_cmd_space;
- }
cmd->header.id = SVGA_3D_CMD_DEFINE_GB_MOB64;
cmd->header.size = sizeof(cmd->body);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 9f1b9d289bec..d5ef8cf802de 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -124,7 +124,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items;
- cmds = vmw_fifo_reserve(dev_priv, fifo_size);
+ cmds = VMW_FIFO_RESERVE(dev_priv, fifo_size);
/* hardware has hung, can't do anything here */
if (!cmds)
return -ENOMEM;
@@ -194,7 +194,7 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
int ret;
for (;;) {
- cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
+ cmds = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmds));
if (cmds)
break;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index a7c30e567f09..711f8fd0dd45 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -365,14 +365,6 @@ static int vmw_resource_do_validate(struct vmw_resource *res,
list_add_tail(&res->mob_head, &res->backup->res_list);
}
- /*
- * Only do this on write operations, and move to
- * vmw_resource_unreserve if it can be called after
- * backup buffers have been unreserved. Otherwise
- * sort out locking.
- */
- res->res_dirty = true;
-
return 0;
out_bind_failed:
@@ -386,6 +378,8 @@ out_bind_failed:
* command submission.
*
* @res: Pointer to the struct vmw_resource to unreserve.
+ * @dirty_set: Change dirty status of the resource.
+ * @dirty: When changing dirty status indicates the new status.
* @switch_backup: Backup buffer has been switched.
* @new_backup: Pointer to new backup buffer if command submission
* switched. May be NULL.
@@ -395,6 +389,8 @@ out_bind_failed:
* resource lru list, so that it can be evicted if necessary.
*/
void vmw_resource_unreserve(struct vmw_resource *res,
+ bool dirty_set,
+ bool dirty,
bool switch_backup,
struct vmw_buffer_object *new_backup,
unsigned long new_backup_offset)
@@ -422,6 +418,9 @@ void vmw_resource_unreserve(struct vmw_resource *res,
if (switch_backup)
res->backup_offset = new_backup_offset;
+ if (dirty_set)
+ res->res_dirty = dirty;
+
if (!res->func->may_evict || res->id == -1 || res->pin_count)
return;
@@ -696,7 +695,7 @@ void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
if (!res->func->unbind)
continue;
- (void) res->func->unbind(res, true, &val_buf);
+ (void) res->func->unbind(res, res->res_dirty, &val_buf);
res->backup_dirty = true;
res->res_dirty = false;
list_del_init(&res->mob_head);
@@ -731,12 +730,9 @@ int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
dx_query_ctx = dx_query_mob->dx_query_ctx;
dev_priv = dx_query_ctx->dev_priv;
- cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), dx_query_ctx->id);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for "
- "query MOB read back.\n");
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), dx_query_ctx->id);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
cmd->header.size = sizeof(cmd->body);
@@ -932,7 +928,7 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
res->pin_count++;
out_no_validate:
- vmw_resource_unreserve(res, false, NULL, 0UL);
+ vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
out_no_reserve:
mutex_unlock(&dev_priv->cmdbuf_mutex);
ttm_write_unlock(&dev_priv->reservation_sem);
@@ -968,7 +964,7 @@ void vmw_resource_unpin(struct vmw_resource *res)
ttm_bo_unreserve(&vbo->base);
}
- vmw_resource_unreserve(res, false, NULL, 0UL);
+ vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
mutex_unlock(&dev_priv->cmdbuf_mutex);
ttm_read_unlock(&dev_priv->reservation_sem);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index cd586c52af7e..9a2a3836d89a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -130,12 +130,9 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
BUG_ON(!sou->buffer);
fifo_size = sizeof(*cmd);
- cmd = vmw_fifo_reserve(dev_priv, fifo_size);
- /* The hardware has hung, nothing we can do about it here. */
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Fifo reserve failed.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, fifo_size);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
memset(cmd, 0, fifo_size);
cmd->header.cmdType = SVGA_CMD_DEFINE_SCREEN;
@@ -182,12 +179,9 @@ static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
return 0;
fifo_size = sizeof(*cmd);
- cmd = vmw_fifo_reserve(dev_priv, fifo_size);
- /* the hardware has hung, nothing we can do about it here */
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Fifo reserve failed.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, fifo_size);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
memset(cmd, 0, fifo_size);
cmd->header.cmdType = SVGA_CMD_DESTROY_SCREEN;
@@ -998,11 +992,9 @@ static int do_bo_define_gmrfb(struct vmw_private *dev_priv,
if (depth == 32)
depth = 24;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (!cmd) {
- DRM_ERROR("Out of fifo space for dirty framebuffer command.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (!cmd)
return -ENOMEM;
- }
cmd->header = SVGA_CMD_DEFINE_GMRFB;
cmd->body.format.bitsPerPixel = framebuffer->base.format->cpp[0] * 8;
@@ -1148,7 +1140,8 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
if (!srf)
srf = &vfbs->surface->res;
- ret = vmw_validation_add_resource(&val_ctx, srf, 0, NULL, NULL);
+ ret = vmw_validation_add_resource(&val_ctx, srf, 0, VMW_RES_DIRTY_NONE,
+ NULL, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index bf32fe446219..d310d21f0d54 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -218,10 +218,8 @@ static int vmw_gb_shader_create(struct vmw_resource *res)
goto out_no_fifo;
}
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for shader "
- "creation.\n");
ret = -ENOMEM;
goto out_no_fifo;
}
@@ -254,12 +252,9 @@ static int vmw_gb_shader_bind(struct vmw_resource *res,
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for shader "
- "binding.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
cmd->header.size = sizeof(cmd->body);
@@ -285,12 +280,9 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res,
BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB);
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for shader "
- "unbinding.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_BIND_GB_SHADER;
cmd->header.size = sizeof(cmd->body);
@@ -328,10 +320,8 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
mutex_lock(&dev_priv->binding_mutex);
vmw_binding_res_list_scrub(&res->binding_head);
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for shader "
- "destruction.\n");
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
}
@@ -400,13 +390,9 @@ static int vmw_dx_shader_unscrub(struct vmw_resource *res)
if (!list_empty(&shader->cotable_head) || !shader->committed)
return 0;
- cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd),
- shader->ctx->id);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for shader "
- "scrubbing.\n");
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), shader->ctx->id);
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER;
cmd->header.size = sizeof(cmd->body);
@@ -491,12 +477,9 @@ static int vmw_dx_shader_scrub(struct vmw_resource *res)
return 0;
WARN_ON_ONCE(!shader->committed);
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for shader "
- "scrubbing.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER;
cmd->header.size = sizeof(cmd->body);
@@ -865,14 +848,13 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
ret = vmw_user_bo_lookup(tfile, buffer_handle,
&buffer, NULL);
if (unlikely(ret != 0)) {
- DRM_ERROR("Could not find buffer for shader "
- "creation.\n");
+ VMW_DEBUG_USER("Couldn't find buffer for shader creation.\n");
return ret;
}
if ((u64)buffer->base.num_pages * PAGE_SIZE <
(u64)size + (u64)offset) {
- DRM_ERROR("Illegal buffer- or shader size.\n");
+ VMW_DEBUG_USER("Illegal buffer- or shader size.\n");
ret = -EINVAL;
goto out_bad_arg;
}
@@ -886,7 +868,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
shader_type = SVGA3D_SHADERTYPE_PS;
break;
default:
- DRM_ERROR("Illegal shader type.\n");
+ VMW_DEBUG_USER("Illegal shader type.\n");
ret = -EINVAL;
goto out_bad_arg;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
index 6a6865384e91..73e9a487e659 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
@@ -239,17 +239,17 @@ vmw_simple_resource_lookup(struct ttm_object_file *tfile,
base = ttm_base_object_lookup(tfile, handle);
if (!base) {
- DRM_ERROR("Invalid %s handle 0x%08lx.\n",
- func->res_func.type_name,
- (unsigned long) handle);
+ VMW_DEBUG_USER("Invalid %s handle 0x%08lx.\n",
+ func->res_func.type_name,
+ (unsigned long) handle);
return ERR_PTR(-ESRCH);
}
if (ttm_base_object_type(base) != func->ttm_res_type) {
ttm_base_object_unref(&base);
- DRM_ERROR("Invalid type of %s handle 0x%08lx.\n",
- func->res_func.type_name,
- (unsigned long) handle);
+ VMW_DEBUG_USER("Invalid type of %s handle 0x%08lx.\n",
+ func->res_func.type_name,
+ (unsigned long) handle);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
index bc8bb690f1ea..63807361e16f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -170,13 +170,12 @@ static int vmw_view_create(struct vmw_resource *res)
return 0;
}
- cmd = vmw_fifo_reserve_dx(res->dev_priv, view->cmd_size,
- view->ctx->id);
+ cmd = VMW_FIFO_RESERVE_DX(res->dev_priv, view->cmd_size, view->ctx->id);
if (!cmd) {
- DRM_ERROR("Failed reserving FIFO space for view creation.\n");
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
}
+
memcpy(cmd, &view->cmd, view->cmd_size);
WARN_ON(cmd->body.view_id != view->view_id);
/* Sid may have changed due to surface eviction. */
@@ -214,12 +213,9 @@ static int vmw_view_destroy(struct vmw_resource *res)
if (!view->committed || res->id == -1)
return 0;
- cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), view->ctx->id);
- if (!cmd) {
- DRM_ERROR("Failed reserving FIFO space for view "
- "destruction.\n");
+ cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), view->ctx->id);
+ if (!cmd)
return -ENOMEM;
- }
cmd->header.id = vmw_view_destroy_cmds[view->view_type];
cmd->header.size = sizeof(cmd->body);
@@ -338,12 +334,12 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
if (cmd_size != vmw_view_define_sizes[view_type] +
sizeof(SVGA3dCmdHeader)) {
- DRM_ERROR("Illegal view create command size.\n");
+ VMW_DEBUG_USER("Illegal view create command size.\n");
return -EINVAL;
}
if (!vmw_view_id_ok(user_key, view_type)) {
- DRM_ERROR("Illegal view add view id.\n");
+ VMW_DEBUG_USER("Illegal view add view id.\n");
return -EINVAL;
}
@@ -352,8 +348,7 @@ int vmw_view_add(struct vmw_cmdbuf_res_manager *man,
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, &ttm_opt_ctx);
if (ret) {
if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for view"
- " creation.\n");
+ DRM_ERROR("Out of graphics memory for view creation\n");
return ret;
}
@@ -413,7 +408,7 @@ int vmw_view_remove(struct vmw_cmdbuf_res_manager *man,
struct vmw_resource **res_p)
{
if (!vmw_view_id_ok(user_key, view_type)) {
- DRM_ERROR("Illegal view remove view id.\n");
+ VMW_DEBUG_USER("Illegal view remove view id.\n");
return -EINVAL;
}
@@ -497,6 +492,30 @@ struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
vmw_view_key(user_key, view_type));
}
+/**
+ * vmw_view_dirtying - Return whether a view type is dirtying its resource
+ * @res: Pointer to the view
+ *
+ * Each time a resource is put on the validation list as the result of a
+ * view pointing to it, we need to determine whether that resource will
+ * be dirtied (written to by the GPU) as a result of the corresponding
+ * GPU operation. Currently only rendertarget- and depth-stencil views are
+ * capable of dirtying its resource.
+ *
+ * Return: Whether the view type of @res dirties the resource it points to.
+ */
+u32 vmw_view_dirtying(struct vmw_resource *res)
+{
+ static u32 view_is_dirtying[vmw_view_max] = {
+ [vmw_view_rt] = VMW_RES_DIRTY_SET,
+ [vmw_view_ds] = VMW_RES_DIRTY_SET,
+ };
+
+ /* Update this function as we add more view types */
+ BUILD_BUG_ON(vmw_view_max != 3);
+ return view_is_dirtying[vmw_view(res)->view_type];
+}
+
const u32 vmw_view_destroy_cmds[] = {
[vmw_view_sr] = SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
[vmw_view_rt] = SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
index b80c7252f2fd..12565047bc55 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.h
@@ -157,4 +157,5 @@ extern struct vmw_resource *vmw_view_srf(struct vmw_resource *res);
extern struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man,
enum vmw_view_type view_type,
u32 user_key);
+extern u32 vmw_view_dirtying(struct vmw_resource *res);
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 096c2941a8e4..f803bb5e782b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -111,7 +111,7 @@ struct vmw_stdu_update_gb_image {
*/
struct vmw_screen_target_display_unit {
struct vmw_display_unit base;
- const struct vmw_surface *display_srf;
+ struct vmw_surface *display_srf;
enum stdu_content_type content_fb_type;
s32 display_width, display_height;
@@ -167,12 +167,9 @@ static int vmw_stdu_define_st(struct vmw_private *dev_priv,
SVGA3dCmdDefineGBScreenTarget body;
} *cmd;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Out of FIFO space defining Screen Target\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SCREENTARGET;
cmd->header.size = sizeof(cmd->body);
@@ -229,12 +226,9 @@ static int vmw_stdu_bind_st(struct vmw_private *dev_priv,
memset(&image, 0, sizeof(image));
image.sid = res ? res->id : SVGA3D_INVALID_ID;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Out of FIFO space binding a screen target\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_BIND_GB_SCREENTARGET;
cmd->header.size = sizeof(cmd->body);
@@ -296,12 +290,9 @@ static int vmw_stdu_update_st(struct vmw_private *dev_priv,
return -EINVAL;
}
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Out of FIFO space updating a Screen Target\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
vmw_stdu_populate_update(cmd, stdu->base.unit,
0, stdu->display_width,
@@ -335,12 +326,9 @@ static int vmw_stdu_destroy_st(struct vmw_private *dev_priv,
if (unlikely(!stdu->defined))
return 0;
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Out of FIFO space, screen target not destroyed\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL))
return -ENOMEM;
- }
cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SCREENTARGET;
cmd->header.size = sizeof(cmd->body);
@@ -533,6 +521,7 @@ static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty)
vmw_fifo_commit(dirty->dev_priv, sizeof(*cmd) + blit_size);
+ stdu->display_srf->res.res_dirty = true;
ddirty->left = ddirty->top = S32_MAX;
ddirty->right = ddirty->bottom = S32_MIN;
}
@@ -629,20 +618,16 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
region.x2 = diff.rect.x2;
region.y1 = diff.rect.y1;
region.y2 = diff.rect.y2;
- ret = vmw_kms_update_proxy(
- (struct vmw_resource *) &stdu->display_srf->res,
- (const struct drm_clip_rect *) &region, 1, 1);
+ ret = vmw_kms_update_proxy(&stdu->display_srf->res, &region,
+ 1, 1);
if (ret)
goto out_cleanup;
dev_priv = vmw_priv(stdu->base.crtc.dev);
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-
- if (!cmd) {
- DRM_ERROR("Cannot reserve FIFO space to update STDU");
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
+ if (!cmd)
goto out_cleanup;
- }
vmw_stdu_populate_update(cmd, stdu->base.unit,
region.x1, region.x2,
@@ -820,6 +805,7 @@ static void vmw_kms_stdu_surface_fifo_commit(struct vmw_kms_dirty *dirty)
cmd->body.dest.sid = stdu->display_srf->res.id;
update = (struct vmw_stdu_update *) &blit[dirty->num_hits];
commit_size = sizeof(*cmd) + blit_size + sizeof(*update);
+ stdu->display_srf->res.res_dirty = true;
} else {
update = dirty->cmd;
commit_size = sizeof(*update);
@@ -876,7 +862,8 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
if (!srf)
srf = &vfbs->surface->res;
- ret = vmw_validation_add_resource(&val_ctx, srf, 0, NULL, NULL);
+ ret = vmw_validation_add_resource(&val_ctx, srf, 0, VMW_RES_DIRTY_NONE,
+ NULL, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index ef09f7edf931..219471903bc1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -342,12 +342,9 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
if (res->id != -1) {
- cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
- if (unlikely(!cmd)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "destruction.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, vmw_surface_destroy_size());
+ if (unlikely(!cmd))
return;
- }
vmw_surface_destroy_encode(res->id, cmd);
vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
@@ -414,10 +411,8 @@ static int vmw_legacy_srf_create(struct vmw_resource *res)
*/
submit_size = vmw_surface_define_size(srf);
- cmd = vmw_fifo_reserve(dev_priv, submit_size);
+ cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
if (unlikely(!cmd)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "creation.\n");
ret = -ENOMEM;
goto out_no_fifo;
}
@@ -468,12 +463,10 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res,
BUG_ON(!val_buf->bo);
submit_size = vmw_surface_dma_size(srf);
- cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(!cmd)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "DMA.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ if (unlikely(!cmd))
return -ENOMEM;
- }
+
vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
vmw_surface_dma_encode(srf, cmd, &ptr, bind);
@@ -556,12 +549,9 @@ static int vmw_legacy_srf_destroy(struct vmw_resource *res)
*/
submit_size = vmw_surface_destroy_size();
- cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(!cmd)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "eviction.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ if (unlikely(!cmd))
return -ENOMEM;
- }
vmw_surface_destroy_encode(res->id, cmd);
vmw_fifo_commit(dev_priv, submit_size);
@@ -748,11 +738,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
-
desc = svga3dsurface_get_desc(req->format);
if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
- DRM_ERROR("Invalid surface format for surface creation.\n");
- DRM_ERROR("Format requested is: %d\n", req->format);
+ VMW_DEBUG_USER("Invalid format %d for surface creation.\n",
+ req->format);
return -EINVAL;
}
@@ -764,8 +753,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
size, &ctx);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for surface"
- " creation.\n");
+ DRM_ERROR("Out of graphics memory for surface.\n");
goto out_unlock;
}
@@ -939,12 +927,12 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
ret = -EINVAL;
base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle);
if (unlikely(!base)) {
- DRM_ERROR("Could not find surface to reference.\n");
+ VMW_DEBUG_USER("Could not find surface to reference.\n");
goto out_no_lookup;
}
if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) {
- DRM_ERROR("Referenced object is not a surface.\n");
+ VMW_DEBUG_USER("Referenced object is not a surface.\n");
goto out_bad_resource;
}
@@ -1022,8 +1010,8 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
ret = copy_to_user(user_sizes, &srf->base_size,
sizeof(srf->base_size));
if (unlikely(ret != 0)) {
- DRM_ERROR("copy_to_user failed %p %u\n",
- user_sizes, srf->num_sizes);
+ VMW_DEBUG_USER("copy_to_user failed %p %u\n", user_sizes,
+ srf->num_sizes);
ttm_ref_object_base_unref(tfile, base->handle, TTM_REF_USAGE);
ret = -EFAULT;
}
@@ -1088,12 +1076,10 @@ static int vmw_gb_surface_create(struct vmw_resource *res)
submit_len = sizeof(*cmd);
}
- cmd = vmw_fifo_reserve(dev_priv, submit_len);
+ cmd = VMW_FIFO_RESERVE(dev_priv, submit_len);
cmd2 = (typeof(cmd2))cmd;
cmd3 = (typeof(cmd3))cmd;
if (unlikely(!cmd)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "creation.\n");
ret = -ENOMEM;
goto out_no_fifo;
}
@@ -1171,12 +1157,9 @@ static int vmw_gb_surface_bind(struct vmw_resource *res,
submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0);
- cmd1 = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(!cmd1)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "binding.\n");
+ cmd1 = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ if (unlikely(!cmd1))
return -ENOMEM;
- }
cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE;
cmd1->header.size = sizeof(cmd1->body);
@@ -1221,12 +1204,9 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res,
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2));
- cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(!cmd)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "unbinding.\n");
+ cmd = VMW_FIFO_RESERVE(dev_priv, submit_size);
+ if (unlikely(!cmd))
return -ENOMEM;
- }
if (readback) {
cmd1 = (void *) cmd;
@@ -1280,10 +1260,8 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
vmw_view_surface_list_destroy(dev_priv, &srf->view_list);
vmw_binding_res_list_scrub(&res->binding_head);
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+ cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd));
if (unlikely(!cmd)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "destruction.\n");
mutex_unlock(&dev_priv->binding_mutex);
return -ENOMEM;
}
@@ -1405,16 +1383,16 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
if (for_scanout) {
if (!svga3dsurface_is_screen_target_format(format)) {
- DRM_ERROR("Invalid Screen Target surface format.");
+ VMW_DEBUG_USER("Invalid Screen Target surface format.");
return -EINVAL;
}
if (size.width > dev_priv->texture_max_width ||
size.height > dev_priv->texture_max_height) {
- DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u",
- size.width, size.height,
- dev_priv->texture_max_width,
- dev_priv->texture_max_height);
+ VMW_DEBUG_USER("%ux%u\n, exceeds max surface size %ux%u",
+ size.width, size.height,
+ dev_priv->texture_max_width,
+ dev_priv->texture_max_height);
return -EINVAL;
}
} else {
@@ -1422,14 +1400,14 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
desc = svga3dsurface_get_desc(format);
if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
- DRM_ERROR("Invalid surface format.\n");
+ VMW_DEBUG_USER("Invalid surface format.\n");
return -EINVAL;
}
}
/* array_size must be null for non-GL3 host. */
if (array_size > 0 && !dev_priv->has_dx) {
- DRM_ERROR("Tried to create DX surface on non-DX host.\n");
+ VMW_DEBUG_USER("Tried to create DX surface on non-DX host.\n");
return -EINVAL;
}
@@ -1651,7 +1629,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
if (ret == 0) {
if (res->backup->base.num_pages * PAGE_SIZE <
res->backup_size) {
- DRM_ERROR("Surface backup buffer too small.\n");
+ VMW_DEBUG_USER("Surface backup buffer too small.\n");
vmw_bo_unreference(&res->backup);
ret = -EINVAL;
goto out_unlock;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index e6d75e377dd8..8bafa6eac5a8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -30,16 +30,9 @@
int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
{
- struct drm_file *file_priv;
- struct vmw_private *dev_priv;
+ struct drm_file *file_priv = filp->private_data;
+ struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev);
- if (unlikely(vma->vm_pgoff < VMWGFX_FILE_PAGE_OFFSET)) {
- DRM_ERROR("Illegal attempt to mmap old fifo space.\n");
- return -EINVAL;
- }
-
- file_priv = filp->private_data;
- dev_priv = vmw_priv(file_priv->minor->dev);
return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index e9944ac2e057..f611b2290a1b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -76,6 +76,8 @@ struct vmw_validation_res_node {
u32 switching_backup : 1;
u32 first_usage : 1;
u32 reserved : 1;
+ u32 dirty : 1;
+ u32 dirty_set : 1;
unsigned long private[0];
};
@@ -299,6 +301,7 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx,
* @ctx: The validation context.
* @res: The resource.
* @priv_size: Size of private, additional metadata.
+ * @dirty: Whether to change dirty status.
* @p_node: Output pointer of additional metadata address.
* @first_usage: Whether this was the first time this resource was seen.
*
@@ -307,6 +310,7 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx,
int vmw_validation_add_resource(struct vmw_validation_context *ctx,
struct vmw_resource *res,
size_t priv_size,
+ u32 dirty,
void **p_node,
bool *first_usage)
{
@@ -321,8 +325,7 @@ int vmw_validation_add_resource(struct vmw_validation_context *ctx,
node = vmw_validation_mem_alloc(ctx, sizeof(*node) + priv_size);
if (!node) {
- DRM_ERROR("Failed to allocate a resource validation "
- "entry.\n");
+ VMW_DEBUG_USER("Failed to allocate a resource validation entry.\n");
return -ENOMEM;
}
@@ -358,6 +361,11 @@ int vmw_validation_add_resource(struct vmw_validation_context *ctx,
}
out_fill:
+ if (dirty) {
+ node->dirty_set = 1;
+ /* Overwriting previous information here is intentional! */
+ node->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
+ }
if (first_usage)
*first_usage = node->first_usage;
if (p_node)
@@ -367,6 +375,29 @@ out_fill:
}
/**
+ * vmw_validation_res_set_dirty - Register a resource dirty set or clear during
+ * validation.
+ * @ctx: The validation context.
+ * @val_private: The additional meta-data pointer returned when the
+ * resource was registered with the validation context. Used to identify
+ * the resource.
+ * @dirty: Dirty information VMW_RES_DIRTY_XX
+ */
+void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx,
+ void *val_private, u32 dirty)
+{
+ struct vmw_validation_res_node *val;
+
+ if (!dirty)
+ return;
+
+ val = container_of(val_private, typeof(*val), private);
+ val->dirty_set = 1;
+ /* Overwriting previous information here is intentional! */
+ val->dirty = (dirty & VMW_RES_DIRTY_SET) ? 1 : 0;
+}
+
+/**
* vmw_validation_res_switch_backup - Register a backup MOB switch during
* validation.
* @ctx: The validation context.
@@ -450,15 +481,23 @@ void vmw_validation_res_unreserve(struct vmw_validation_context *ctx,
struct vmw_validation_res_node *val;
list_splice_init(&ctx->resource_ctx_list, &ctx->resource_list);
-
- list_for_each_entry(val, &ctx->resource_list, head) {
- if (val->reserved)
- vmw_resource_unreserve(val->res,
- !backoff &&
- val->switching_backup,
- val->new_backup,
- val->new_backup_offset);
- }
+ if (backoff)
+ list_for_each_entry(val, &ctx->resource_list, head) {
+ if (val->reserved)
+ vmw_resource_unreserve(val->res,
+ false, false, false,
+ NULL, 0);
+ }
+ else
+ list_for_each_entry(val, &ctx->resource_list, head) {
+ if (val->reserved)
+ vmw_resource_unreserve(val->res,
+ val->dirty_set,
+ val->dirty,
+ val->switching_backup,
+ val->new_backup,
+ val->new_backup_offset);
+ }
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
index 3b396fea40d7..523f6ac5c335 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
@@ -33,6 +33,10 @@
#include <linux/ww_mutex.h>
#include <drm/ttm/ttm_execbuf_util.h>
+#define VMW_RES_DIRTY_NONE 0
+#define VMW_RES_DIRTY_SET BIT(0)
+#define VMW_RES_DIRTY_CLEAR BIT(1)
+
/**
* struct vmw_validation_mem - Custom interface to provide memory reservations
* for the validation code.
@@ -237,6 +241,7 @@ void vmw_validation_unref_lists(struct vmw_validation_context *ctx);
int vmw_validation_add_resource(struct vmw_validation_context *ctx,
struct vmw_resource *res,
size_t priv_size,
+ u32 dirty,
void **p_node,
bool *first_usage);
void vmw_validation_drop_ht(struct vmw_validation_context *ctx);
@@ -261,4 +266,6 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx,
int vmw_validation_preload_bo(struct vmw_validation_context *ctx);
int vmw_validation_preload_res(struct vmw_validation_context *ctx,
unsigned int size);
+void vmw_validation_res_set_dirty(struct vmw_validation_context *ctx,
+ void *val_private, u32 dirty);
#endif
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index 3e78a832d7f9..84aa4d61dc42 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -582,6 +582,7 @@ static void xen_drm_drv_fini(struct xen_drm_front_info *front_info)
drm_kms_helper_poll_fini(dev);
drm_dev_unplug(dev);
+ drm_dev_put(dev);
front_info->drm_info = NULL;