summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2017-03-23 08:15:55 +0100
committerDaniel Vetter <daniel.vetter@ffwll.ch>2017-03-23 08:15:55 +0100
commit628d4c46eef4fabe3ddbe07698577162c1cd5d41 (patch)
tree5e89f3deafd44c8b81543540c573af0c16dbb427 /drivers
parent1e797f556c616a42f1e039b1ff1d3c58f61b6104 (diff)
parent65d1086c44791112188f6aebbdc3a27cab3736d3 (diff)
downloadlinux-628d4c46eef4fabe3ddbe07698577162c1cd5d41.tar.bz2
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux into drm-misc-next
Resync with drm-next, I have a patch which currently can't be applied because drm-misc-next lacked the latest drm/i915 code. Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_processor.c57
-rw-r--r--drivers/acpi/bus.c1
-rw-r--r--drivers/acpi/internal.h2
-rw-r--r--drivers/acpi/ioapic.c22
-rw-r--r--drivers/acpi/pci_root.c4
-rw-r--r--drivers/acpi/processor_core.c133
-rw-r--r--drivers/ata/ahci_qoriq.c6
-rw-r--r--drivers/ata/libata-sff.c1
-rw-r--r--drivers/ata/libata-transport.c9
-rw-r--r--drivers/base/core.c5
-rw-r--r--drivers/block/paride/pcd.c2
-rw-r--r--drivers/block/paride/pd.c2
-rw-r--r--drivers/block/paride/pf.c2
-rw-r--r--drivers/block/paride/pg.c2
-rw-r--r--drivers/block/paride/pt.c2
-rw-r--r--drivers/block/rbd.c16
-rw-r--r--drivers/block/zram/zram_drv.c2
-rw-r--r--drivers/char/agp/intel-gtt.c16
-rw-r--r--drivers/char/hw_random/omap-rng.c16
-rw-r--r--drivers/char/nwbutton.c2
-rw-r--r--drivers/char/random.c129
-rw-r--r--drivers/clocksource/tcb_clksrc.c16
-rw-r--r--drivers/cpufreq/cpufreq.c9
-rw-r--r--drivers/cpufreq/intel_pstate.c131
-rw-r--r--drivers/crypto/s5p-sss.c132
-rw-r--r--drivers/crypto/ux500/cryp/cryp.c2
-rw-r--r--drivers/dax/dax.c33
-rw-r--r--drivers/firmware/efi/arm-runtime.c1
-rw-r--r--drivers/firmware/efi/libstub/secureboot.c4
-rw-r--r--drivers/gpu/drm/amd/acp/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c2
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c3
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c2
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c18
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h1
-rw-r--r--drivers/gpu/drm/drm_edid.c3
-rw-r--r--drivers/gpu/drm/drm_fourcc.c8
-rw-r--r--drivers/gpu/drm/i915/Kconfig2
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug29
-rw-r--r--drivers/gpu/drm/i915/Makefile7
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c57
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c46
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c139
-rw-r--r--drivers/gpu/drm/i915/gvt/display.h20
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c40
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h14
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c439
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c12
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c66
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c5
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c16
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c99
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c72
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c25
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c642
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c151
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h308
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c607
-rw-r--r--drivers/gpu/drm/i915/i915_gem.h9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.c37
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.c189
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.h37
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c166
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c24
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c159
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c2086
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h124
-rw-r--r--drivers/gpu/drm/i915/i915_gem_internal.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_object.h44
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c499
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.h96
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c42
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c25
-rw-r--r--drivers/gpu/drm/i915/i915_gem_timeline.h9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c75
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c321
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c290
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c380
-rw-r--r--drivers/gpu/drm/i915/i915_params.c16
-rw-r--r--drivers/gpu/drm/i915/i915_params.h83
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c32
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h107
-rw-r--r--drivers/gpu/drm/i915/i915_selftest.h106
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c63
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h472
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h13
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c17
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c91
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h4
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c15
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c17
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c4
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c46
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c589
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c1891
-rw-r--r--drivers/gpu/drm/i915/intel_color.c104
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c21
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c9
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c513
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c17
-rw-r--r--drivers/gpu/drm/i915/intel_display.c3308
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c307
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c37
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c54
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.h16
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h225
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c614
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h13
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c135
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_vbt.c (renamed from drivers/gpu/drm/i915/intel_dsi_panel_vbt.c)133
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c1
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c703
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c13
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c78
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c25
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c3
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.h8
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c462
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c4
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c41
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c59
-rw-r--r--drivers/gpu/drm/i915/intel_huc.c120
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c4
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c916
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h4
-rw-r--r--drivers/gpu/drm/i915/intel_lspcon.c17
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c55
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c26
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c87
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c4
-rw-r--r--drivers/gpu/drm/i915/intel_pipe_crc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1052
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c1163
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h185
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c625
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c1
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c34
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c293
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c1
-rw-r--r--drivers/gpu/drm/i915/intel_uc.c287
-rw-r--r--drivers/gpu/drm/i915/intel_uc.h25
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c282
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_gem_object.c135
-rw-r--r--drivers/gpu/drm/i915/selftests/huge_gem_object.h45
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_coherency.c385
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c463
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c303
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c350
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c1562
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_object.c600
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_request.c882
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h19
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h20
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.c63
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_random.h50
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_selftest.c250
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c746
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c481
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c543
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c182
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.c78
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_context.h34
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_dmabuf.c176
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_dmabuf.h41
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_drm.c54
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_drm.h31
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.c207
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_engine.h54
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c226
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.h9
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_object.h8
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.c138
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gtt.h35
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.c63
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_request.h46
-rw-r--r--drivers/gpu/drm/i915/selftests/scatterlist.c355
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c30
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h3
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c24
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c344
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.h6
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_crtc.c49
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c4
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_out.c4
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_regs.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h47
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h9
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h13
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h7
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h8
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/subdev.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c21
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c98
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c29
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c66
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c59
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gp102.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c108
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c30
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/base.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c552
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h207
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c323
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c263
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/falcon/v1.c124
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c42
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h33
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c148
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c62
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c70
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c68
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c68
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c15
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c25
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c589
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h143
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c149
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h72
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c117
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c388
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h35
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c165
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c23
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c252
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c97
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h81
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c149
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c3
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c8
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c37
-rw-r--r--drivers/gpu/ipu-v3/Makefile2
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c43
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c78
-rw-r--r--drivers/gpu/ipu-v3/ipu-dc.c61
-rw-r--r--drivers/gpu/ipu-v3/ipu-dp.c15
-rw-r--r--drivers/gpu/ipu-v3/ipu-image-convert.c7
-rw-r--r--drivers/gpu/ipu-v3/ipu-pre.c289
-rw-r--r--drivers/gpu/ipu-v3/ipu-prg.c424
-rw-r--r--drivers/gpu/ipu-v3/ipu-prv.h27
-rw-r--r--drivers/hv/channel.c2
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c27
-rw-r--r--drivers/i2c/busses/i2c-designware-baytrail.c83
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c14
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h14
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c26
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c36
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c3
-rw-r--r--drivers/i2c/busses/i2c-meson.c2
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c9
-rw-r--r--drivers/i2c/busses/i2c-riic.c6
-rw-r--r--drivers/i2c/i2c-mux.c2
-rw-r--r--drivers/irqchip/irq-crossbar.c9
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c16
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c3
-rw-r--r--drivers/isdn/hisax/st5481_b.c2
-rw-r--r--drivers/macintosh/macio_asic.c1
-rw-r--r--drivers/md/bcache/util.h1
-rw-r--r--drivers/md/dm.c29
-rw-r--r--drivers/md/md-cluster.c2
-rw-r--r--drivers/md/md.c27
-rw-r--r--drivers/md/md.h6
-rw-r--r--drivers/md/raid1.c29
-rw-r--r--drivers/md/raid10.c44
-rw-r--r--drivers/md/raid5.c5
-rw-r--r--drivers/media/dvb-frontends/drx39xyj/drx_driver.h8
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.c33
-rw-r--r--drivers/media/rc/lirc_dev.c4
-rw-r--r--drivers/media/rc/nuvoton-cir.c5
-rw-r--r--drivers/media/rc/rc-main.c26
-rw-r--r--drivers/media/rc/serial_ir.c123
-rw-r--r--drivers/media/usb/dvb-usb/dw2102.c244
-rw-r--r--drivers/misc/sgi-gru/grufault.c9
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c36
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c40
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c25
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c206
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h16
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c110
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c104
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_config.h6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c17
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h42
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h43
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c12
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c184
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h4
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c64
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h1
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c25
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c43
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c31
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h4
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c47
-rw-r--r--drivers/net/hyperv/hyperv_net.h3
-rw-r--r--drivers/net/hyperv/netvsc.c8
-rw-r--r--drivers/net/hyperv/netvsc_drv.c11
-rw-r--r--drivers/net/phy/marvell.c15
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/phy/spi_ks8995.c3
-rw-r--r--drivers/net/team/team.c1
-rw-r--r--drivers/net/tun.c19
-rw-r--r--drivers/net/vrf.c3
-rw-r--r--drivers/net/vxlan.c73
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c4
-rw-r--r--drivers/net/wimax/i2400m/usb.c3
-rw-r--r--drivers/net/xen-netback/interface.c26
-rw-r--r--drivers/net/xen-netback/netback.c2
-rw-r--r--drivers/net/xen-netback/xenbus.c20
-rw-r--r--drivers/pci/dwc/pci-exynos.c8
-rw-r--r--drivers/pci/pcie/aspm.c5
-rw-r--r--drivers/pci/quirks.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c15
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c12
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c49
-rw-r--r--drivers/platform/x86/asus-wmi.c22
-rw-r--r--drivers/platform/x86/asus-wmi.h1
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c451
-rw-r--r--drivers/scsi/Kconfig19
-rw-r--r--drivers/scsi/aacraid/src.c2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c2
-rw-r--r--drivers/scsi/libiscsi.c26
-rw-r--r--drivers/scsi/lpfc/lpfc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c22
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c22
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c24
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c128
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c107
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c43
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c68
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c19
-rw-r--r--drivers/scsi/qedf/qedf_dbg.h13
-rw-r--r--drivers/scsi/qedf/qedf_fip.c2
-rw-r--r--drivers/scsi/qedf/qedf_io.c4
-rw-r--r--drivers/scsi/qedf/qedf_main.c4
-rw-r--r--drivers/scsi/qedi/qedi_debugfs.c16
-rw-r--r--drivers/scsi/qedi/qedi_fw.c4
-rw-r--r--drivers/scsi/qedi/qedi_gbl.h8
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c8
-rw-r--r--drivers/scsi/qedi/qedi_main.c2
-rw-r--r--drivers/scsi/qla2xxx/Kconfig1
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h56
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c107
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h18
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c85
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c41
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c304
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c23
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c748
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h39
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c49
-rw-r--r--drivers/scsi/scsi_lib.c14
-rw-r--r--drivers/scsi/scsi_priv.h3
-rw-r--r--drivers/scsi/sd.c58
-rw-r--r--drivers/scsi/storvsc_drv.c27
-rw-r--r--drivers/scsi/ufs/ufs.h22
-rw-r--r--drivers/scsi/ufs/ufshcd.c231
-rw-r--r--drivers/scsi/ufs/ufshcd.h15
-rw-r--r--drivers/scsi/vmw_pvscsi.c2
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-socket.c4
-rw-r--r--drivers/staging/octeon/ethernet-rx.c1
-rw-r--r--drivers/staging/vc04_services/Kconfig1
-rw-r--r--drivers/target/target_core_alua.c82
-rw-r--r--drivers/target/target_core_configfs.c4
-rw-r--r--drivers/target/target_core_pscsi.c50
-rw-r--r--drivers/target/target_core_sbc.c10
-rw-r--r--drivers/target/target_core_tpg.c3
-rw-r--r--drivers/target/target_core_transport.c3
-rw-r--r--drivers/target/target_core_user.c152
-rw-r--r--drivers/tty/n_hdlc.c132
-rw-r--r--drivers/tty/serial/samsung.c6
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c3
-rw-r--r--drivers/usb/dwc3/gadget.c76
-rw-r--r--drivers/usb/dwc3/gadget.h14
-rw-r--r--drivers/usb/gadget/configfs.c1
-rw-r--r--drivers/usb/gadget/function/f_fs.c17
-rw-r--r--drivers/usb/gadget/function/f_uvc.c7
-rw-r--r--drivers/usb/gadget/legacy/inode.c7
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c4
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c2
-rw-r--r--drivers/usb/gadget/udc/net2280.c25
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c5
-rw-r--r--drivers/usb/host/ohci-at91.c4
-rw-r--r--drivers/usb/host/xhci-dbg.c2
-rw-r--r--drivers/usb/host/xhci-mtk.c7
-rw-r--r--drivers/usb/host/xhci-plat.c2
-rw-r--r--drivers/usb/host/xhci-tegra.c1
-rw-r--r--drivers/usb/host/xhci.c4
-rw-r--r--drivers/usb/misc/iowarrior.c21
-rw-r--r--drivers/usb/misc/usb251xb.c59
-rw-r--r--drivers/usb/phy/phy-isp1301.c7
-rw-r--r--drivers/usb/serial/digi_acceleport.c2
-rw-r--r--drivers/usb/serial/io_ti.c8
-rw-r--r--drivers/usb/serial/omninet.c13
-rw-r--r--drivers/usb/serial/safe_serial.c5
-rw-r--r--drivers/usb/storage/unusual_devs.h14
-rw-r--r--drivers/xen/gntdev.c11
-rw-r--r--drivers/xen/swiotlb-xen.c47
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c1
489 files changed, 31753 insertions, 13441 deletions
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 4467a8089ab8..0143135b3abe 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -182,11 +182,6 @@ int __weak arch_register_cpu(int cpu)
void __weak arch_unregister_cpu(int cpu) {}
-int __weak acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
-{
- return -ENODEV;
-}
-
static int acpi_processor_hotadd_init(struct acpi_processor *pr)
{
unsigned long long sta;
@@ -285,6 +280,13 @@ static int acpi_processor_get_info(struct acpi_device *device)
pr->acpi_id = value;
}
+ if (acpi_duplicate_processor_id(pr->acpi_id)) {
+ dev_err(&device->dev,
+ "Failed to get unique processor _UID (0x%x)\n",
+ pr->acpi_id);
+ return -ENODEV;
+ }
+
pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration,
pr->acpi_id);
if (invalid_phys_cpuid(pr->phys_id))
@@ -585,7 +587,7 @@ static struct acpi_scan_handler processor_container_handler = {
static int nr_unique_ids __initdata;
/* The number of the duplicate processor IDs */
-static int nr_duplicate_ids __initdata;
+static int nr_duplicate_ids;
/* Used to store the unique processor IDs */
static int unique_processor_ids[] __initdata = {
@@ -593,7 +595,7 @@ static int unique_processor_ids[] __initdata = {
};
/* Used to store the duplicate processor IDs */
-static int duplicate_processor_ids[] __initdata = {
+static int duplicate_processor_ids[] = {
[0 ... NR_CPUS - 1] = -1,
};
@@ -638,28 +640,53 @@ static acpi_status __init acpi_processor_ids_walk(acpi_handle handle,
void **rv)
{
acpi_status status;
+ acpi_object_type acpi_type;
+ unsigned long long uid;
union acpi_object object = { 0 };
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
- status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
+ status = acpi_get_type(handle, &acpi_type);
if (ACPI_FAILURE(status))
- acpi_handle_info(handle, "Not get the processor object\n");
- else
- processor_validated_ids_update(object.processor.proc_id);
+ return false;
+
+ switch (acpi_type) {
+ case ACPI_TYPE_PROCESSOR:
+ status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
+ if (ACPI_FAILURE(status))
+ goto err;
+ uid = object.processor.proc_id;
+ break;
+
+ case ACPI_TYPE_DEVICE:
+ status = acpi_evaluate_integer(handle, "_UID", NULL, &uid);
+ if (ACPI_FAILURE(status))
+ goto err;
+ break;
+ default:
+ goto err;
+ }
+
+ processor_validated_ids_update(uid);
+ return true;
+
+err:
+ acpi_handle_info(handle, "Invalid processor object\n");
+ return false;
- return AE_OK;
}
-static void __init acpi_processor_check_duplicates(void)
+void __init acpi_processor_check_duplicates(void)
{
- /* Search all processor nodes in ACPI namespace */
+ /* check the correctness for all processors in ACPI namespace */
acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
ACPI_UINT32_MAX,
acpi_processor_ids_walk,
NULL, NULL, NULL);
+ acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, acpi_processor_ids_walk,
+ NULL, NULL);
}
-bool __init acpi_processor_validate_proc_id(int proc_id)
+bool acpi_duplicate_processor_id(int proc_id)
{
int i;
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 80cb5eb75b63..34fbe027e73a 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1249,7 +1249,6 @@ static int __init acpi_init(void)
acpi_wakeup_device_init();
acpi_debugger_init();
acpi_setup_sb_notify_handler();
- acpi_set_processor_mapping();
return 0;
}
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 219b90bc0922..f15900132912 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -41,8 +41,10 @@ void acpi_gpe_apply_masked_gpes(void);
void acpi_container_init(void);
void acpi_memory_hotplug_init(void);
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
+void pci_ioapic_remove(struct acpi_pci_root *root);
int acpi_ioapic_remove(struct acpi_pci_root *root);
#else
+static inline void pci_ioapic_remove(struct acpi_pci_root *root) { return; }
static inline int acpi_ioapic_remove(struct acpi_pci_root *root) { return 0; }
#endif
#ifdef CONFIG_ACPI_DOCK
diff --git a/drivers/acpi/ioapic.c b/drivers/acpi/ioapic.c
index 6d7ce6e12aaa..1120dfd625b8 100644
--- a/drivers/acpi/ioapic.c
+++ b/drivers/acpi/ioapic.c
@@ -206,24 +206,34 @@ int acpi_ioapic_add(acpi_handle root_handle)
return ACPI_SUCCESS(status) && ACPI_SUCCESS(retval) ? 0 : -ENODEV;
}
-int acpi_ioapic_remove(struct acpi_pci_root *root)
+void pci_ioapic_remove(struct acpi_pci_root *root)
{
- int retval = 0;
struct acpi_pci_ioapic *ioapic, *tmp;
mutex_lock(&ioapic_list_lock);
list_for_each_entry_safe(ioapic, tmp, &ioapic_list, list) {
if (root->device->handle != ioapic->root_handle)
continue;
-
- if (acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base))
- retval = -EBUSY;
-
if (ioapic->pdev) {
pci_release_region(ioapic->pdev, 0);
pci_disable_device(ioapic->pdev);
pci_dev_put(ioapic->pdev);
}
+ }
+ mutex_unlock(&ioapic_list_lock);
+}
+
+int acpi_ioapic_remove(struct acpi_pci_root *root)
+{
+ int retval = 0;
+ struct acpi_pci_ioapic *ioapic, *tmp;
+
+ mutex_lock(&ioapic_list_lock);
+ list_for_each_entry_safe(ioapic, tmp, &ioapic_list, list) {
+ if (root->device->handle != ioapic->root_handle)
+ continue;
+ if (acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base))
+ retval = -EBUSY;
if (ioapic->res.flags && ioapic->res.parent)
release_resource(&ioapic->res);
list_del(&ioapic->list);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index bf601d4df8cf..919be0aa2578 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -648,12 +648,12 @@ static void acpi_pci_root_remove(struct acpi_device *device)
pci_stop_root_bus(root->bus);
- WARN_ON(acpi_ioapic_remove(root));
-
+ pci_ioapic_remove(root);
device_set_run_wake(root->bus->bridge, false);
pci_acpi_remove_bus_pm_notifier(device);
pci_remove_root_bus(root->bus);
+ WARN_ON(acpi_ioapic_remove(root));
dmar_device_remove(device->handle);
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 611a5585a902..b933061b6b60 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -32,12 +32,12 @@ static struct acpi_table_madt *get_madt_table(void)
}
static int map_lapic_id(struct acpi_subtable_header *entry,
- u32 acpi_id, phys_cpuid_t *apic_id, bool ignore_disabled)
+ u32 acpi_id, phys_cpuid_t *apic_id)
{
struct acpi_madt_local_apic *lapic =
container_of(entry, struct acpi_madt_local_apic, header);
- if (ignore_disabled && !(lapic->lapic_flags & ACPI_MADT_ENABLED))
+ if (!(lapic->lapic_flags & ACPI_MADT_ENABLED))
return -ENODEV;
if (lapic->processor_id != acpi_id)
@@ -48,13 +48,12 @@ static int map_lapic_id(struct acpi_subtable_header *entry,
}
static int map_x2apic_id(struct acpi_subtable_header *entry,
- int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id,
- bool ignore_disabled)
+ int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id)
{
struct acpi_madt_local_x2apic *apic =
container_of(entry, struct acpi_madt_local_x2apic, header);
- if (ignore_disabled && !(apic->lapic_flags & ACPI_MADT_ENABLED))
+ if (!(apic->lapic_flags & ACPI_MADT_ENABLED))
return -ENODEV;
if (device_declaration && (apic->uid == acpi_id)) {
@@ -66,13 +65,12 @@ static int map_x2apic_id(struct acpi_subtable_header *entry,
}
static int map_lsapic_id(struct acpi_subtable_header *entry,
- int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id,
- bool ignore_disabled)
+ int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id)
{
struct acpi_madt_local_sapic *lsapic =
container_of(entry, struct acpi_madt_local_sapic, header);
- if (ignore_disabled && !(lsapic->lapic_flags & ACPI_MADT_ENABLED))
+ if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
return -ENODEV;
if (device_declaration) {
@@ -89,13 +87,12 @@ static int map_lsapic_id(struct acpi_subtable_header *entry,
* Retrieve the ARM CPU physical identifier (MPIDR)
*/
static int map_gicc_mpidr(struct acpi_subtable_header *entry,
- int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr,
- bool ignore_disabled)
+ int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr)
{
struct acpi_madt_generic_interrupt *gicc =
container_of(entry, struct acpi_madt_generic_interrupt, header);
- if (ignore_disabled && !(gicc->flags & ACPI_MADT_ENABLED))
+ if (!(gicc->flags & ACPI_MADT_ENABLED))
return -ENODEV;
/* device_declaration means Device object in DSDT, in the
@@ -112,7 +109,7 @@ static int map_gicc_mpidr(struct acpi_subtable_header *entry,
}
static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
- int type, u32 acpi_id, bool ignore_disabled)
+ int type, u32 acpi_id)
{
unsigned long madt_end, entry;
phys_cpuid_t phys_id = PHYS_CPUID_INVALID; /* CPU hardware ID */
@@ -130,20 +127,16 @@ static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt,
struct acpi_subtable_header *header =
(struct acpi_subtable_header *)entry;
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
- if (!map_lapic_id(header, acpi_id, &phys_id,
- ignore_disabled))
+ if (!map_lapic_id(header, acpi_id, &phys_id))
break;
} else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
- if (!map_x2apic_id(header, type, acpi_id, &phys_id,
- ignore_disabled))
+ if (!map_x2apic_id(header, type, acpi_id, &phys_id))
break;
} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
- if (!map_lsapic_id(header, type, acpi_id, &phys_id,
- ignore_disabled))
+ if (!map_lsapic_id(header, type, acpi_id, &phys_id))
break;
} else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) {
- if (!map_gicc_mpidr(header, type, acpi_id, &phys_id,
- ignore_disabled))
+ if (!map_gicc_mpidr(header, type, acpi_id, &phys_id))
break;
}
entry += header->length;
@@ -161,15 +154,14 @@ phys_cpuid_t __init acpi_map_madt_entry(u32 acpi_id)
if (!madt)
return PHYS_CPUID_INVALID;
- rv = map_madt_entry(madt, 1, acpi_id, true);
+ rv = map_madt_entry(madt, 1, acpi_id);
acpi_put_table((struct acpi_table_header *)madt);
return rv;
}
-static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id,
- bool ignore_disabled)
+static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
@@ -190,38 +182,30 @@ static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id,
header = (struct acpi_subtable_header *)obj->buffer.pointer;
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC)
- map_lapic_id(header, acpi_id, &phys_id, ignore_disabled);
+ map_lapic_id(header, acpi_id, &phys_id);
else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC)
- map_lsapic_id(header, type, acpi_id, &phys_id, ignore_disabled);
+ map_lsapic_id(header, type, acpi_id, &phys_id);
else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
- map_x2apic_id(header, type, acpi_id, &phys_id, ignore_disabled);
+ map_x2apic_id(header, type, acpi_id, &phys_id);
else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT)
- map_gicc_mpidr(header, type, acpi_id, &phys_id,
- ignore_disabled);
+ map_gicc_mpidr(header, type, acpi_id, &phys_id);
exit:
kfree(buffer.pointer);
return phys_id;
}
-static phys_cpuid_t __acpi_get_phys_id(acpi_handle handle, int type,
- u32 acpi_id, bool ignore_disabled)
+phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
{
phys_cpuid_t phys_id;
- phys_id = map_mat_entry(handle, type, acpi_id, ignore_disabled);
+ phys_id = map_mat_entry(handle, type, acpi_id);
if (invalid_phys_cpuid(phys_id))
- phys_id = map_madt_entry(get_madt_table(), type, acpi_id,
- ignore_disabled);
+ phys_id = map_madt_entry(get_madt_table(), type, acpi_id);
return phys_id;
}
-phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
-{
- return __acpi_get_phys_id(handle, type, acpi_id, true);
-}
-
int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id)
{
#ifdef CONFIG_SMP
@@ -278,79 +262,6 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
}
EXPORT_SYMBOL_GPL(acpi_get_cpuid);
-#ifdef CONFIG_ACPI_HOTPLUG_CPU
-static bool __init
-map_processor(acpi_handle handle, phys_cpuid_t *phys_id, int *cpuid)
-{
- int type, id;
- u32 acpi_id;
- acpi_status status;
- acpi_object_type acpi_type;
- unsigned long long tmp;
- union acpi_object object = { 0 };
- struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
-
- status = acpi_get_type(handle, &acpi_type);
- if (ACPI_FAILURE(status))
- return false;
-
- switch (acpi_type) {
- case ACPI_TYPE_PROCESSOR:
- status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
- if (ACPI_FAILURE(status))
- return false;
- acpi_id = object.processor.proc_id;
-
- /* validate the acpi_id */
- if(acpi_processor_validate_proc_id(acpi_id))
- return false;
- break;
- case ACPI_TYPE_DEVICE:
- status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp);
- if (ACPI_FAILURE(status))
- return false;
- acpi_id = tmp;
- break;
- default:
- return false;
- }
-
- type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
-
- *phys_id = __acpi_get_phys_id(handle, type, acpi_id, false);
- id = acpi_map_cpuid(*phys_id, acpi_id);
-
- if (id < 0)
- return false;
- *cpuid = id;
- return true;
-}
-
-static acpi_status __init
-set_processor_node_mapping(acpi_handle handle, u32 lvl, void *context,
- void **rv)
-{
- phys_cpuid_t phys_id;
- int cpu_id;
-
- if (!map_processor(handle, &phys_id, &cpu_id))
- return AE_ERROR;
-
- acpi_map_cpu2node(handle, cpu_id, phys_id);
- return AE_OK;
-}
-
-void __init acpi_set_processor_mapping(void)
-{
- /* Set persistent cpu <-> node mapping for all processors. */
- acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX, set_processor_node_mapping,
- NULL, NULL, NULL);
-}
-#else
-void __init acpi_set_processor_mapping(void) {}
-#endif /* CONFIG_ACPI_HOTPLUG_CPU */
-
#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base,
u64 *phys_addr, int *ioapic_id)
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index 85d833289f28..4c96f3ac4976 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -177,7 +177,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
case AHCI_LS1043A:
if (!qpriv->ecc_addr)
return -EINVAL;
- writel(ECC_DIS_ARMV8_CH2, qpriv->ecc_addr);
+ writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
+ qpriv->ecc_addr);
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
if (qpriv->is_dmacoherent)
@@ -194,7 +195,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
case AHCI_LS1046A:
if (!qpriv->ecc_addr)
return -EINVAL;
- writel(ECC_DIS_ARMV8_CH2, qpriv->ecc_addr);
+ writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
+ qpriv->ecc_addr);
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
if (qpriv->is_dmacoherent)
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 2bd92dca3e62..274d6d7193d7 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -1482,7 +1482,6 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
break;
default:
- WARN_ON_ONCE(1);
return AC_ERR_SYSTEM;
}
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index 46698232e6bf..19e6e539a061 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -224,7 +224,6 @@ static DECLARE_TRANSPORT_CLASS(ata_port_class,
static void ata_tport_release(struct device *dev)
{
- put_device(dev->parent);
}
/**
@@ -284,7 +283,7 @@ int ata_tport_add(struct device *parent,
device_initialize(dev);
dev->type = &ata_port_type;
- dev->parent = get_device(parent);
+ dev->parent = parent;
dev->release = ata_tport_release;
dev_set_name(dev, "ata%d", ap->print_id);
transport_setup_device(dev);
@@ -348,7 +347,6 @@ static DECLARE_TRANSPORT_CLASS(ata_link_class,
static void ata_tlink_release(struct device *dev)
{
- put_device(dev->parent);
}
/**
@@ -410,7 +408,7 @@ int ata_tlink_add(struct ata_link *link)
int error;
device_initialize(dev);
- dev->parent = get_device(&ap->tdev);
+ dev->parent = &ap->tdev;
dev->release = ata_tlink_release;
if (ata_is_host_link(link))
dev_set_name(dev, "link%d", ap->print_id);
@@ -589,7 +587,6 @@ static DECLARE_TRANSPORT_CLASS(ata_dev_class,
static void ata_tdev_release(struct device *dev)
{
- put_device(dev->parent);
}
/**
@@ -662,7 +659,7 @@ static int ata_tdev_add(struct ata_device *ata_dev)
int error;
device_initialize(dev);
- dev->parent = get_device(&link->tdev);
+ dev->parent = &link->tdev;
dev->release = ata_tdev_release;
if (ata_is_host_link(link))
dev_set_name(dev, "dev%d.%d", ap->print_id,ata_dev->devno);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 684bda4d14a1..6bb60fb6a30b 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -639,11 +639,6 @@ int lock_device_hotplug_sysfs(void)
return restart_syscall();
}
-void assert_held_device_hotplug(void)
-{
- lockdep_assert_held(&device_hotplug_lock);
-}
-
#ifdef CONFIG_BLOCK
static inline int device_is_not_partition(struct device *dev)
{
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 10aed84244f5..939641d6e262 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -50,7 +50,7 @@
the slower the port i/o. In some cases, setting
this to zero will speed up the device. (default -1)
- major You may use this parameter to overide the
+ major You may use this parameter to override the
default major number (46) that this driver
will use. Be sure to change the device
name as well.
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 644ba0888bd4..9cfd2e06a649 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -61,7 +61,7 @@
first drive found.
- major You may use this parameter to overide the
+ major You may use this parameter to override the
default major number (45) that this driver
will use. Be sure to change the device
name as well.
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index ed93e8badf56..14c5d32f5d8b 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -59,7 +59,7 @@
the slower the port i/o. In some cases, setting
this to zero will speed up the device. (default -1)
- major You may use this parameter to overide the
+ major You may use this parameter to override the
default major number (47) that this driver
will use. Be sure to change the device
name as well.
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c
index 5db955fe3a94..3b5882bfb736 100644
--- a/drivers/block/paride/pg.c
+++ b/drivers/block/paride/pg.c
@@ -84,7 +84,7 @@
the slower the port i/o. In some cases, setting
this to zero will speed up the device. (default -1)
- major You may use this parameter to overide the
+ major You may use this parameter to override the
default major number (97) that this driver
will use. Be sure to change the device
name as well.
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
index 61fc6824299a..e815312a00ad 100644
--- a/drivers/block/paride/pt.c
+++ b/drivers/block/paride/pt.c
@@ -61,7 +61,7 @@
the slower the port i/o. In some cases, setting
this to zero will speed up the device. (default -1)
- major You may use this parameter to overide the
+ major You may use this parameter to override the
default major number (96) that this driver
will use. Be sure to change the device
name as well.
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 4d6807723798..517838b65964 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -120,10 +120,11 @@ static int atomic_dec_return_safe(atomic_t *v)
/* Feature bits */
-#define RBD_FEATURE_LAYERING (1<<0)
-#define RBD_FEATURE_STRIPINGV2 (1<<1)
-#define RBD_FEATURE_EXCLUSIVE_LOCK (1<<2)
-#define RBD_FEATURE_DATA_POOL (1<<7)
+#define RBD_FEATURE_LAYERING (1ULL<<0)
+#define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
+#define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
+#define RBD_FEATURE_DATA_POOL (1ULL<<7)
+
#define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
RBD_FEATURE_STRIPINGV2 | \
RBD_FEATURE_EXCLUSIVE_LOCK | \
@@ -499,16 +500,23 @@ static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
return is_lock_owner;
}
+static ssize_t rbd_supported_features_show(struct bus_type *bus, char *buf)
+{
+ return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
+}
+
static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
+static BUS_ATTR(supported_features, S_IRUGO, rbd_supported_features_show, NULL);
static struct attribute *rbd_bus_attrs[] = {
&bus_attr_add.attr,
&bus_attr_remove.attr,
&bus_attr_add_single_major.attr,
&bus_attr_remove_single_major.attr,
+ &bus_attr_supported_features.attr,
NULL,
};
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index e27d89a36c34..dceb5edd1e54 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1189,6 +1189,8 @@ static int zram_add(void)
blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
+ zram->disk->queue->limits.max_sectors = SECTORS_PER_PAGE;
+ zram->disk->queue->limits.chunk_sectors = 0;
blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
/*
* zram_bio_discard() will clear all logical blocks if logical block
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 9702c78f458d..7fcc2a9d1d5a 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -332,14 +332,6 @@ static void i810_write_entry(dma_addr_t addr, unsigned int entry,
writel_relaxed(addr | pte_flags, intel_private.gtt + entry);
}
-static const struct aper_size_info_fixed intel_fake_agp_sizes[] = {
- {32, 8192, 3},
- {64, 16384, 4},
- {128, 32768, 5},
- {256, 65536, 6},
- {512, 131072, 7},
-};
-
static unsigned int intel_gtt_stolen_size(void)
{
u16 gmch_ctrl;
@@ -670,6 +662,14 @@ static int intel_gtt_init(void)
}
#if IS_ENABLED(CONFIG_AGP_INTEL)
+static const struct aper_size_info_fixed intel_fake_agp_sizes[] = {
+ {32, 8192, 3},
+ {64, 16384, 4},
+ {128, 32768, 5},
+ {256, 65536, 6},
+ {512, 131072, 7},
+};
+
static int intel_fake_agp_fetch_size(void)
{
int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 3ad86fdf954e..b1ad12552b56 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -397,9 +397,8 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
irq, err);
return err;
}
- omap_rng_write(priv, RNG_INTMASK_REG, RNG_SHUTDOWN_OFLO_MASK);
- priv->clk = of_clk_get(pdev->dev.of_node, 0);
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
return -EPROBE_DEFER;
if (!IS_ERR(priv->clk)) {
@@ -408,6 +407,19 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
dev_err(&pdev->dev, "unable to enable the clk, "
"err = %d\n", err);
}
+
+ /*
+ * On OMAP4, enabling the shutdown_oflo interrupt is
+ * done in the interrupt mask register. There is no
+ * such register on EIP76, and it's enabled by the
+ * same bit in the control register
+ */
+ if (priv->pdata->regs[RNG_INTMASK_REG])
+ omap_rng_write(priv, RNG_INTMASK_REG,
+ RNG_SHUTDOWN_OFLO_MASK);
+ else
+ omap_rng_write(priv, RNG_CONTROL_REG,
+ RNG_SHUTDOWN_OFLO_MASK);
}
return 0;
}
diff --git a/drivers/char/nwbutton.c b/drivers/char/nwbutton.c
index a5b1eb276c0b..e6d0d271c58c 100644
--- a/drivers/char/nwbutton.c
+++ b/drivers/char/nwbutton.c
@@ -6,7 +6,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/sched.h>
+#include <linux/sched/signal.h>
#include <linux/interrupt.h>
#include <linux/time.h>
#include <linux/timer.h>
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 1ef26403bcc8..0ab024918907 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -313,13 +313,6 @@ static int random_read_wakeup_bits = 64;
static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
/*
- * The minimum number of seconds between urandom pool reseeding. We
- * do this to limit the amount of entropy that can be drained from the
- * input pool even if there are heavy demands on /dev/urandom.
- */
-static int random_min_urandom_seed = 60;
-
-/*
* Originally, we used a primitive polynomial of degree .poolwords
* over GF(2). The taps for various sizes are defined below. They
* were chosen to be evenly spaced except for the last tap, which is 1
@@ -409,7 +402,6 @@ static struct poolinfo {
*/
static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
-static DECLARE_WAIT_QUEUE_HEAD(urandom_init_wait);
static struct fasync_struct *fasync;
static DEFINE_SPINLOCK(random_ready_list_lock);
@@ -467,7 +459,6 @@ struct entropy_store {
int entropy_count;
int entropy_total;
unsigned int initialized:1;
- unsigned int limit:1;
unsigned int last_data_init:1;
__u8 last_data[EXTRACT_SIZE];
};
@@ -485,7 +476,6 @@ static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
static struct entropy_store input_pool = {
.poolinfo = &poolinfo_table[0],
.name = "input",
- .limit = 1,
.lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
.pool = input_pool_data
};
@@ -493,7 +483,6 @@ static struct entropy_store input_pool = {
static struct entropy_store blocking_pool = {
.poolinfo = &poolinfo_table[1],
.name = "blocking",
- .limit = 1,
.pull = &input_pool,
.lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
.pool = blocking_pool_data,
@@ -855,13 +844,6 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
spin_unlock_irqrestore(&primary_crng.lock, flags);
}
-static inline void maybe_reseed_primary_crng(void)
-{
- if (crng_init > 2 &&
- time_after(jiffies, primary_crng.init_time + CRNG_RESEED_INTERVAL))
- crng_reseed(&primary_crng, &input_pool);
-}
-
static inline void crng_wait_ready(void)
{
wait_event_interruptible(crng_init_wait, crng_ready());
@@ -1220,15 +1202,6 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
r->entropy_count > r->poolinfo->poolfracbits)
return;
- if (r->limit == 0 && random_min_urandom_seed) {
- unsigned long now = jiffies;
-
- if (time_before(now,
- r->last_pulled + random_min_urandom_seed * HZ))
- return;
- r->last_pulled = now;
- }
-
_xfer_secondary_pool(r, nbytes);
}
@@ -1236,8 +1209,6 @@ static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
{
__u32 tmp[OUTPUT_POOL_WORDS];
- /* For /dev/random's pool, always leave two wakeups' worth */
- int rsvd_bytes = r->limit ? 0 : random_read_wakeup_bits / 4;
int bytes = nbytes;
/* pull at least as much as a wakeup */
@@ -1248,7 +1219,7 @@ static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8,
ENTROPY_BITS(r), ENTROPY_BITS(r->pull));
bytes = extract_entropy(r->pull, tmp, bytes,
- random_read_wakeup_bits / 8, rsvd_bytes);
+ random_read_wakeup_bits / 8, 0);
mix_pool_bytes(r, tmp, bytes);
credit_entropy_bits(r, bytes*8);
}
@@ -1276,7 +1247,7 @@ static void push_to_pool(struct work_struct *work)
static size_t account(struct entropy_store *r, size_t nbytes, int min,
int reserved)
{
- int entropy_count, orig;
+ int entropy_count, orig, have_bytes;
size_t ibytes, nfrac;
BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
@@ -1285,14 +1256,12 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
retry:
entropy_count = orig = ACCESS_ONCE(r->entropy_count);
ibytes = nbytes;
- /* If limited, never pull more than available */
- if (r->limit) {
- int have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
+ /* never pull more than available */
+ have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
- if ((have_bytes -= reserved) < 0)
- have_bytes = 0;
- ibytes = min_t(size_t, ibytes, have_bytes);
- }
+ if ((have_bytes -= reserved) < 0)
+ have_bytes = 0;
+ ibytes = min_t(size_t, ibytes, have_bytes);
if (ibytes < min)
ibytes = 0;
@@ -1912,6 +1881,7 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
static int min_read_thresh = 8, min_write_thresh;
static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
static int max_write_thresh = INPUT_POOL_WORDS * 32;
+static int random_min_urandom_seed = 60;
static char sysctl_bootid[16];
/*
@@ -2042,63 +2012,64 @@ struct ctl_table random_table[] = {
};
#endif /* CONFIG_SYSCTL */
-static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
-
-int random_int_secret_init(void)
-{
- get_random_bytes(random_int_secret, sizeof(random_int_secret));
- return 0;
-}
-
-static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash)
- __aligned(sizeof(unsigned long));
+struct batched_entropy {
+ union {
+ u64 entropy_u64[CHACHA20_BLOCK_SIZE / sizeof(u64)];
+ u32 entropy_u32[CHACHA20_BLOCK_SIZE / sizeof(u32)];
+ };
+ unsigned int position;
+};
/*
- * Get a random word for internal kernel use only. Similar to urandom but
- * with the goal of minimal entropy pool depletion. As a result, the random
- * value is not cryptographically secure but for several uses the cost of
- * depleting entropy is too high
+ * Get a random word for internal kernel use only. The quality of the random
+ * number is either as good as RDRAND or as good as /dev/urandom, with the
+ * goal of being quite fast and not depleting entropy.
*/
-unsigned int get_random_int(void)
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
+u64 get_random_u64(void)
{
- __u32 *hash;
- unsigned int ret;
+ u64 ret;
+ struct batched_entropy *batch;
- if (arch_get_random_int(&ret))
+#if BITS_PER_LONG == 64
+ if (arch_get_random_long((unsigned long *)&ret))
return ret;
+#else
+ if (arch_get_random_long((unsigned long *)&ret) &&
+ arch_get_random_long((unsigned long *)&ret + 1))
+ return ret;
+#endif
- hash = get_cpu_var(get_random_int_hash);
-
- hash[0] += current->pid + jiffies + random_get_entropy();
- md5_transform(hash, random_int_secret);
- ret = hash[0];
- put_cpu_var(get_random_int_hash);
-
+ batch = &get_cpu_var(batched_entropy_u64);
+ if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
+ extract_crng((u8 *)batch->entropy_u64);
+ batch->position = 0;
+ }
+ ret = batch->entropy_u64[batch->position++];
+ put_cpu_var(batched_entropy_u64);
return ret;
}
-EXPORT_SYMBOL(get_random_int);
+EXPORT_SYMBOL(get_random_u64);
-/*
- * Same as get_random_int(), but returns unsigned long.
- */
-unsigned long get_random_long(void)
+static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
+u32 get_random_u32(void)
{
- __u32 *hash;
- unsigned long ret;
+ u32 ret;
+ struct batched_entropy *batch;
- if (arch_get_random_long(&ret))
+ if (arch_get_random_int(&ret))
return ret;
- hash = get_cpu_var(get_random_int_hash);
-
- hash[0] += current->pid + jiffies + random_get_entropy();
- md5_transform(hash, random_int_secret);
- ret = *(unsigned long *)hash;
- put_cpu_var(get_random_int_hash);
-
+ batch = &get_cpu_var(batched_entropy_u32);
+ if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
+ extract_crng((u8 *)batch->entropy_u32);
+ batch->position = 0;
+ }
+ ret = batch->entropy_u32[batch->position++];
+ put_cpu_var(batched_entropy_u32);
return ret;
}
-EXPORT_SYMBOL(get_random_long);
+EXPORT_SYMBOL(get_random_u32);
/**
* randomize_page - Generate a random, page aligned address
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 745844ee973e..d4ca9962a759 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -10,7 +10,6 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/atmel_tc.h>
-#include <linux/sched_clock.h>
/*
@@ -57,14 +56,9 @@ static u64 tc_get_cycles(struct clocksource *cs)
return (upper << 16) | lower;
}
-static u32 tc_get_cv32(void)
-{
- return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
-}
-
static u64 tc_get_cycles32(struct clocksource *cs)
{
- return tc_get_cv32();
+ return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV));
}
static struct clocksource clksrc = {
@@ -75,11 +69,6 @@ static struct clocksource clksrc = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-static u64 notrace tc_read_sched_clock(void)
-{
- return tc_get_cv32();
-}
-
#ifdef CONFIG_GENERIC_CLOCKEVENTS
struct tc_clkevt_device {
@@ -350,9 +339,6 @@ static int __init tcb_clksrc_init(void)
clksrc.read = tc_get_cycles32;
/* setup ony channel 0 */
tcb_setup_single_chan(tc, best_divisor_idx);
-
- /* register sched_clock on chips with single 32 bit counter */
- sched_clock_register(tc_read_sched_clock, 32, divided_rate);
} else {
/* tclib will give us three clocks no matter what the
* underlying platform supports.
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index a47543281864..b8ff617d449d 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -680,9 +680,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
char *buf)
{
unsigned int cur_freq = __cpufreq_get(policy);
- if (!cur_freq)
- return sprintf(buf, "<unknown>");
- return sprintf(buf, "%u\n", cur_freq);
+
+ if (cur_freq)
+ return sprintf(buf, "%u\n", cur_freq);
+
+ return sprintf(buf, "<unknown>\n");
}
/**
@@ -2532,4 +2534,5 @@ static int __init cpufreq_core_init(void)
return 0;
}
+module_param(off, int, 0444);
core_initcall(cpufreq_core_init);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b1fbaa30ae04..08e134ffba68 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -84,6 +84,11 @@ static inline u64 div_ext_fp(u64 x, u64 y)
return div64_u64(x << EXT_FRAC_BITS, y);
}
+static inline int32_t percent_ext_fp(int percent)
+{
+ return div_ext_fp(percent, 100);
+}
+
/**
* struct sample - Store performance sample
* @core_avg_perf: Ratio of APERF/MPERF which is the actual average
@@ -377,6 +382,7 @@ static void intel_pstate_set_performance_limits(struct perf_limits *limits)
intel_pstate_init_limits(limits);
limits->min_perf_pct = 100;
limits->min_perf = int_ext_tofp(1);
+ limits->min_sysfs_pct = 100;
}
static DEFINE_MUTEX(intel_pstate_driver_lock);
@@ -844,12 +850,11 @@ static struct freq_attr *hwp_cpufreq_attrs[] = {
static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
{
- int min, hw_min, max, hw_max, cpu, range, adj_range;
+ int min, hw_min, max, hw_max, cpu;
struct perf_limits *perf_limits = limits;
u64 value, cap;
for_each_cpu(cpu, policy->cpus) {
- int max_perf_pct, min_perf_pct;
struct cpudata *cpu_data = all_cpu_data[cpu];
s16 epp;
@@ -862,20 +867,15 @@ static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
hw_max = HWP_GUARANTEED_PERF(cap);
else
hw_max = HWP_HIGHEST_PERF(cap);
- range = hw_max - hw_min;
- max_perf_pct = perf_limits->max_perf_pct;
- min_perf_pct = perf_limits->min_perf_pct;
+ min = fp_ext_toint(hw_max * perf_limits->min_perf);
rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
- adj_range = min_perf_pct * range / 100;
- min = hw_min + adj_range;
+
value &= ~HWP_MIN_PERF(~0L);
value |= HWP_MIN_PERF(min);
- adj_range = max_perf_pct * range / 100;
- max = hw_min + adj_range;
-
+ max = fp_ext_toint(hw_max * perf_limits->max_perf);
value &= ~HWP_MAX_PERF(~0L);
value |= HWP_MAX_PERF(max);
@@ -968,17 +968,27 @@ static int intel_pstate_resume(struct cpufreq_policy *policy)
}
static void intel_pstate_update_policies(void)
+ __releases(&intel_pstate_limits_lock)
+ __acquires(&intel_pstate_limits_lock)
{
+ struct perf_limits *saved_limits = limits;
int cpu;
+ mutex_unlock(&intel_pstate_limits_lock);
+
for_each_possible_cpu(cpu)
cpufreq_update_policy(cpu);
+
+ mutex_lock(&intel_pstate_limits_lock);
+
+ limits = saved_limits;
}
/************************** debugfs begin ************************/
static int pid_param_set(void *data, u64 val)
{
*(u32 *)data = val;
+ pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC;
intel_pstate_reset_all_pid();
return 0;
}
@@ -1180,10 +1190,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
limits->no_turbo = clamp_t(int, input, 0, 1);
- mutex_unlock(&intel_pstate_limits_lock);
-
intel_pstate_update_policies();
+ mutex_unlock(&intel_pstate_limits_lock);
+
mutex_unlock(&intel_pstate_driver_lock);
return count;
@@ -1215,12 +1225,12 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
limits->max_perf_pct);
limits->max_perf_pct = max(limits->min_perf_pct,
limits->max_perf_pct);
- limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
-
- mutex_unlock(&intel_pstate_limits_lock);
+ limits->max_perf = percent_ext_fp(limits->max_perf_pct);
intel_pstate_update_policies();
+ mutex_unlock(&intel_pstate_limits_lock);
+
mutex_unlock(&intel_pstate_driver_lock);
return count;
@@ -1252,12 +1262,12 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
limits->min_perf_pct);
limits->min_perf_pct = min(limits->max_perf_pct,
limits->min_perf_pct);
- limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
-
- mutex_unlock(&intel_pstate_limits_lock);
+ limits->min_perf = percent_ext_fp(limits->min_perf_pct);
intel_pstate_update_policies();
+ mutex_unlock(&intel_pstate_limits_lock);
+
mutex_unlock(&intel_pstate_driver_lock);
return count;
@@ -1874,13 +1884,11 @@ static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
pstate = clamp_t(int, pstate, min_perf, max_perf);
- trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
return pstate;
}
static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
{
- pstate = intel_pstate_prepare_request(cpu, pstate);
if (pstate == cpu->pstate.current_pstate)
return;
@@ -1900,6 +1908,8 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
update_turbo_state();
+ target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
+ trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu);
intel_pstate_update_pstate(cpu, target_pstate);
sample = &cpu->sample;
@@ -2070,36 +2080,34 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu)
static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy,
struct perf_limits *limits)
{
+ int32_t max_policy_perf, min_policy_perf;
- limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100,
- policy->cpuinfo.max_freq);
- limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0, 100);
+ max_policy_perf = div_ext_fp(policy->max, policy->cpuinfo.max_freq);
+ max_policy_perf = clamp_t(int32_t, max_policy_perf, 0, int_ext_tofp(1));
if (policy->max == policy->min) {
- limits->min_policy_pct = limits->max_policy_pct;
+ min_policy_perf = max_policy_perf;
} else {
- limits->min_policy_pct = DIV_ROUND_UP(policy->min * 100,
- policy->cpuinfo.max_freq);
- limits->min_policy_pct = clamp_t(int, limits->min_policy_pct,
- 0, 100);
+ min_policy_perf = div_ext_fp(policy->min,
+ policy->cpuinfo.max_freq);
+ min_policy_perf = clamp_t(int32_t, min_policy_perf,
+ 0, max_policy_perf);
}
- /* Normalize user input to [min_policy_pct, max_policy_pct] */
- limits->min_perf_pct = max(limits->min_policy_pct,
- limits->min_sysfs_pct);
- limits->min_perf_pct = min(limits->max_policy_pct,
- limits->min_perf_pct);
- limits->max_perf_pct = min(limits->max_policy_pct,
- limits->max_sysfs_pct);
- limits->max_perf_pct = max(limits->min_policy_pct,
- limits->max_perf_pct);
+ /* Normalize user input to [min_perf, max_perf] */
+ limits->min_perf = max(min_policy_perf,
+ percent_ext_fp(limits->min_sysfs_pct));
+ limits->min_perf = min(limits->min_perf, max_policy_perf);
+ limits->max_perf = min(max_policy_perf,
+ percent_ext_fp(limits->max_sysfs_pct));
+ limits->max_perf = max(min_policy_perf, limits->max_perf);
- /* Make sure min_perf_pct <= max_perf_pct */
- limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
+ /* Make sure min_perf <= max_perf */
+ limits->min_perf = min(limits->min_perf, limits->max_perf);
- limits->min_perf = div_ext_fp(limits->min_perf_pct, 100);
- limits->max_perf = div_ext_fp(limits->max_perf_pct, 100);
limits->max_perf = round_up(limits->max_perf, EXT_FRAC_BITS);
limits->min_perf = round_up(limits->min_perf, EXT_FRAC_BITS);
+ limits->max_perf_pct = fp_ext_toint(limits->max_perf * 100);
+ limits->min_perf_pct = fp_ext_toint(limits->min_perf * 100);
pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu,
limits->max_perf_pct, limits->min_perf_pct);
@@ -2132,16 +2140,11 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
mutex_lock(&intel_pstate_limits_lock);
if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ pr_debug("set performance\n");
if (!perf_limits) {
limits = &performance_limits;
perf_limits = limits;
}
- if (policy->max >= policy->cpuinfo.max_freq &&
- !limits->no_turbo) {
- pr_debug("set performance\n");
- intel_pstate_set_performance_limits(perf_limits);
- goto out;
- }
} else {
pr_debug("set powersave\n");
if (!perf_limits) {
@@ -2152,7 +2155,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
}
intel_pstate_update_perf_limits(policy, perf_limits);
- out:
+
if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
/*
* NOHZ_FULL CPUs need this as the governor callback may not
@@ -2198,9 +2201,9 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
unsigned int max_freq, min_freq;
max_freq = policy->cpuinfo.max_freq *
- limits->max_sysfs_pct / 100;
+ perf_limits->max_sysfs_pct / 100;
min_freq = policy->cpuinfo.max_freq *
- limits->min_sysfs_pct / 100;
+ perf_limits->min_sysfs_pct / 100;
cpufreq_verify_within_limits(policy, min_freq, max_freq);
}
@@ -2243,13 +2246,8 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
cpu = all_cpu_data[policy->cpu];
- /*
- * We need sane value in the cpu->perf_limits, so inherit from global
- * perf_limits limits, which are seeded with values based on the
- * CONFIG_CPU_FREQ_DEFAULT_GOV_*, during boot up.
- */
if (per_cpu_limits)
- memcpy(cpu->perf_limits, limits, sizeof(struct perf_limits));
+ intel_pstate_init_limits(cpu->perf_limits);
policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
@@ -2301,7 +2299,6 @@ static struct cpufreq_driver intel_pstate = {
static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
{
struct cpudata *cpu = all_cpu_data[policy->cpu];
- struct perf_limits *perf_limits = limits;
update_turbo_state();
policy->cpuinfo.max_freq = limits->turbo_disabled ?
@@ -2309,15 +2306,6 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy)
cpufreq_verify_within_cpu_limits(policy);
- if (per_cpu_limits)
- perf_limits = cpu->perf_limits;
-
- mutex_lock(&intel_pstate_limits_lock);
-
- intel_pstate_update_perf_limits(policy, perf_limits);
-
- mutex_unlock(&intel_pstate_limits_lock);
-
return 0;
}
@@ -2370,6 +2358,7 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
wrmsrl_on_cpu(policy->cpu, MSR_IA32_PERF_CTL,
pstate_funcs.get_val(cpu, target_pstate));
}
+ freqs.new = target_pstate * cpu->pstate.scaling;
cpufreq_freq_transition_end(policy, &freqs, false);
return 0;
@@ -2383,8 +2372,9 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
target_freq = intel_cpufreq_turbo_update(cpu, policy, target_freq);
target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
+ target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
intel_pstate_update_pstate(cpu, target_pstate);
- return target_freq;
+ return target_pstate * cpu->pstate.scaling;
}
static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
@@ -2437,8 +2427,11 @@ static int intel_pstate_register_driver(void)
intel_pstate_init_limits(&powersave_limits);
intel_pstate_set_performance_limits(&performance_limits);
- limits = IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE) ?
- &performance_limits : &powersave_limits;
+ if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE) &&
+ intel_pstate_driver == &intel_pstate)
+ limits = &performance_limits;
+ else
+ limits = &powersave_limits;
ret = cpufreq_register_driver(intel_pstate_driver);
if (ret) {
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index dce1af0ce85c..1b9da3dc799b 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -270,7 +270,7 @@ static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
scatterwalk_done(&walk, out, 0);
}
-static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
+static void s5p_sg_done(struct s5p_aes_dev *dev)
{
if (dev->sg_dst_cpy) {
dev_dbg(dev->dev,
@@ -281,8 +281,11 @@ static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
}
s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
+}
- /* holding a lock outside */
+/* Calls the completion. Cannot be called with dev->lock hold. */
+static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
+{
dev->req->base.complete(&dev->req->base, err);
dev->busy = false;
}
@@ -368,51 +371,44 @@ exit:
}
/*
- * Returns true if new transmitting (output) data is ready and its
- * address+length have to be written to device (by calling
- * s5p_set_dma_outdata()). False otherwise.
+ * Returns -ERRNO on error (mapping of new data failed).
+ * On success returns:
+ * - 0 if there is no more data,
+ * - 1 if new transmitting (output) data is ready and its address+length
+ * have to be written to device (by calling s5p_set_dma_outdata()).
*/
-static bool s5p_aes_tx(struct s5p_aes_dev *dev)
+static int s5p_aes_tx(struct s5p_aes_dev *dev)
{
- int err = 0;
- bool ret = false;
+ int ret = 0;
s5p_unset_outdata(dev);
if (!sg_is_last(dev->sg_dst)) {
- err = s5p_set_outdata(dev, sg_next(dev->sg_dst));
- if (err)
- s5p_aes_complete(dev, err);
- else
- ret = true;
- } else {
- s5p_aes_complete(dev, err);
-
- dev->busy = true;
- tasklet_schedule(&dev->tasklet);
+ ret = s5p_set_outdata(dev, sg_next(dev->sg_dst));
+ if (!ret)
+ ret = 1;
}
return ret;
}
/*
- * Returns true if new receiving (input) data is ready and its
- * address+length have to be written to device (by calling
- * s5p_set_dma_indata()). False otherwise.
+ * Returns -ERRNO on error (mapping of new data failed).
+ * On success returns:
+ * - 0 if there is no more data,
+ * - 1 if new receiving (input) data is ready and its address+length
+ * have to be written to device (by calling s5p_set_dma_indata()).
*/
-static bool s5p_aes_rx(struct s5p_aes_dev *dev)
+static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/)
{
- int err;
- bool ret = false;
+ int ret = 0;
s5p_unset_indata(dev);
if (!sg_is_last(dev->sg_src)) {
- err = s5p_set_indata(dev, sg_next(dev->sg_src));
- if (err)
- s5p_aes_complete(dev, err);
- else
- ret = true;
+ ret = s5p_set_indata(dev, sg_next(dev->sg_src));
+ if (!ret)
+ ret = 1;
}
return ret;
@@ -422,33 +418,73 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
- bool set_dma_tx = false;
- bool set_dma_rx = false;
+ int err_dma_tx = 0;
+ int err_dma_rx = 0;
+ bool tx_end = false;
unsigned long flags;
uint32_t status;
+ int err;
spin_lock_irqsave(&dev->lock, flags);
+ /*
+ * Handle rx or tx interrupt. If there is still data (scatterlist did not
+ * reach end), then map next scatterlist entry.
+ * In case of such mapping error, s5p_aes_complete() should be called.
+ *
+ * If there is no more data in tx scatter list, call s5p_aes_complete()
+ * and schedule new tasklet.
+ */
status = SSS_READ(dev, FCINTSTAT);
if (status & SSS_FCINTSTAT_BRDMAINT)
- set_dma_rx = s5p_aes_rx(dev);
- if (status & SSS_FCINTSTAT_BTDMAINT)
- set_dma_tx = s5p_aes_tx(dev);
+ err_dma_rx = s5p_aes_rx(dev);
+
+ if (status & SSS_FCINTSTAT_BTDMAINT) {
+ if (sg_is_last(dev->sg_dst))
+ tx_end = true;
+ err_dma_tx = s5p_aes_tx(dev);
+ }
SSS_WRITE(dev, FCINTPEND, status);
- /*
- * Writing length of DMA block (either receiving or transmitting)
- * will start the operation immediately, so this should be done
- * at the end (even after clearing pending interrupts to not miss the
- * interrupt).
- */
- if (set_dma_tx)
- s5p_set_dma_outdata(dev, dev->sg_dst);
- if (set_dma_rx)
- s5p_set_dma_indata(dev, dev->sg_src);
+ if (err_dma_rx < 0) {
+ err = err_dma_rx;
+ goto error;
+ }
+ if (err_dma_tx < 0) {
+ err = err_dma_tx;
+ goto error;
+ }
+
+ if (tx_end) {
+ s5p_sg_done(dev);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ s5p_aes_complete(dev, 0);
+ dev->busy = true;
+ tasklet_schedule(&dev->tasklet);
+ } else {
+ /*
+ * Writing length of DMA block (either receiving or
+ * transmitting) will start the operation immediately, so this
+ * should be done at the end (even after clearing pending
+ * interrupts to not miss the interrupt).
+ */
+ if (err_dma_tx == 1)
+ s5p_set_dma_outdata(dev, dev->sg_dst);
+ if (err_dma_rx == 1)
+ s5p_set_dma_indata(dev, dev->sg_src);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ }
+
+ return IRQ_HANDLED;
+
+error:
+ s5p_sg_done(dev);
spin_unlock_irqrestore(&dev->lock, flags);
+ s5p_aes_complete(dev, err);
return IRQ_HANDLED;
}
@@ -597,8 +633,9 @@ outdata_error:
s5p_unset_indata(dev);
indata_error:
- s5p_aes_complete(dev, err);
+ s5p_sg_done(dev);
spin_unlock_irqrestore(&dev->lock, flags);
+ s5p_aes_complete(dev, err);
}
static void s5p_tasklet_cb(unsigned long data)
@@ -805,8 +842,9 @@ static int s5p_aes_probe(struct platform_device *pdev)
dev_warn(dev, "feed control interrupt is not available.\n");
goto err_irq;
}
- err = devm_request_irq(dev, pdata->irq_fc, s5p_aes_interrupt,
- IRQF_SHARED, pdev->name, pdev);
+ err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
+ s5p_aes_interrupt, IRQF_ONESHOT,
+ pdev->name, pdev);
if (err < 0) {
dev_warn(dev, "feed control interrupt is not available.\n");
goto err_irq;
diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c
index 43a0c8a26ab0..00a16ab601cb 100644
--- a/drivers/crypto/ux500/cryp/cryp.c
+++ b/drivers/crypto/ux500/cryp/cryp.c
@@ -82,7 +82,7 @@ void cryp_activity(struct cryp_device_data *device_data,
void cryp_flush_inoutfifo(struct cryp_device_data *device_data)
{
/*
- * We always need to disble the hardware before trying to flush the
+ * We always need to disable the hardware before trying to flush the
* FIFO. This is something that isn't written in the design
* specification, but we have been informed by the hardware designers
* that this must be done.
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c
index 8d9829ff2a78..80c6db279ae1 100644
--- a/drivers/dax/dax.c
+++ b/drivers/dax/dax.c
@@ -427,6 +427,7 @@ static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
int rc = VM_FAULT_SIGBUS;
phys_addr_t phys;
pfn_t pfn;
+ unsigned int fault_size = PAGE_SIZE;
if (check_vma(dax_dev, vmf->vma, __func__))
return VM_FAULT_SIGBUS;
@@ -437,9 +438,12 @@ static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
+ if (fault_size != dax_region->align)
+ return VM_FAULT_SIGBUS;
+
phys = pgoff_to_phys(dax_dev, vmf->pgoff, PAGE_SIZE);
if (phys == -1) {
- dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
+ dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
vmf->pgoff);
return VM_FAULT_SIGBUS;
}
@@ -464,6 +468,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
phys_addr_t phys;
pgoff_t pgoff;
pfn_t pfn;
+ unsigned int fault_size = PMD_SIZE;
if (check_vma(dax_dev, vmf->vma, __func__))
return VM_FAULT_SIGBUS;
@@ -480,10 +485,20 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
+ if (fault_size < dax_region->align)
+ return VM_FAULT_SIGBUS;
+ else if (fault_size > dax_region->align)
+ return VM_FAULT_FALLBACK;
+
+ /* if we are outside of the VMA */
+ if (pmd_addr < vmf->vma->vm_start ||
+ (pmd_addr + PMD_SIZE) > vmf->vma->vm_end)
+ return VM_FAULT_SIGBUS;
+
pgoff = linear_page_index(vmf->vma, pmd_addr);
phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE);
if (phys == -1) {
- dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
+ dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
pgoff);
return VM_FAULT_SIGBUS;
}
@@ -503,6 +518,8 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
phys_addr_t phys;
pgoff_t pgoff;
pfn_t pfn;
+ unsigned int fault_size = PUD_SIZE;
+
if (check_vma(dax_dev, vmf->vma, __func__))
return VM_FAULT_SIGBUS;
@@ -519,10 +536,20 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
+ if (fault_size < dax_region->align)
+ return VM_FAULT_SIGBUS;
+ else if (fault_size > dax_region->align)
+ return VM_FAULT_FALLBACK;
+
+ /* if we are outside of the VMA */
+ if (pud_addr < vmf->vma->vm_start ||
+ (pud_addr + PUD_SIZE) > vmf->vma->vm_end)
+ return VM_FAULT_SIGBUS;
+
pgoff = linear_page_index(vmf->vma, pud_addr);
phys = pgoff_to_phys(dax_dev, pgoff, PUD_SIZE);
if (phys == -1) {
- dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__,
+ dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__,
pgoff);
return VM_FAULT_SIGBUS;
}
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c
index 349dc3e1e52e..974c5a31a005 100644
--- a/drivers/firmware/efi/arm-runtime.c
+++ b/drivers/firmware/efi/arm-runtime.c
@@ -65,6 +65,7 @@ static bool __init efi_virtmap_init(void)
bool systab_found;
efi_mm.pgd = pgd_alloc(&efi_mm);
+ mm_init_cpumask(&efi_mm);
init_new_context(NULL, &efi_mm);
systab_found = false;
diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c
index 6def402bf569..5da36e56b36a 100644
--- a/drivers/firmware/efi/libstub/secureboot.c
+++ b/drivers/firmware/efi/libstub/secureboot.c
@@ -45,6 +45,8 @@ enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table_arg)
size = sizeof(secboot);
status = get_efi_var(efi_SecureBoot_name, &efi_variable_guid,
NULL, &size, &secboot);
+ if (status == EFI_NOT_FOUND)
+ return efi_secureboot_mode_disabled;
if (status != EFI_SUCCESS)
goto out_efi_err;
@@ -78,7 +80,5 @@ secure_boot_enabled:
out_efi_err:
pr_efi_err(sys_table_arg, "Could not determine UEFI Secure Boot status.\n");
- if (status == EFI_NOT_FOUND)
- return efi_secureboot_mode_disabled;
return efi_secureboot_mode_unknown;
}
diff --git a/drivers/gpu/drm/amd/acp/Makefile b/drivers/gpu/drm/amd/acp/Makefile
index 8363cb57915b..8a08e81ee90d 100644
--- a/drivers/gpu/drm/amd/acp/Makefile
+++ b/drivers/gpu/drm/amd/acp/Makefile
@@ -3,6 +3,4 @@
# of AMDSOC/AMDGPU drm driver.
# It provides the HW control for ACP related functionalities.
-subdir-ccflags-y += -I$(AMDACPPATH)/ -I$(AMDACPPATH)/include
-
AMD_ACP_FILES := $(AMDACPPATH)/acp_hw.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index d2d0f60ff36d..99424cb8020b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -240,6 +240,8 @@ free_partial_kdata:
for (; i >= 0; i--)
drm_free_large(p->chunks[i].kdata);
kfree(p->chunks);
+ p->chunks = NULL;
+ p->nchunks = 0;
put_ctx:
amdgpu_ctx_put(p->ctx);
free_chunk:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6abb238b25c9..a3a105ec99e2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2094,8 +2094,11 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
}
r = amdgpu_late_init(adev);
- if (r)
+ if (r) {
+ if (fbcon)
+ console_unlock();
return r;
+ }
/* pin cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -2587,7 +2590,7 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
use_bank = 0;
}
- *pos &= 0x3FFFF;
+ *pos &= (1UL << 22) - 1;
if (use_bank) {
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
@@ -2663,7 +2666,7 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
use_bank = 0;
}
- *pos &= 0x3FFFF;
+ *pos &= (1UL << 22) - 1;
if (use_bank) {
if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 75fc376ba735..f7adbace428a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -59,9 +59,10 @@
* - 3.7.0 - Add support for VCE clock list packet
* - 3.8.0 - Add support raster config init in the kernel
* - 3.9.0 - Add support for memory query info about VRAM and GTT.
+ * - 3.10.0 - Add support for new fences ioctl, new gem ioctl flags
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 9
+#define KMS_DRIVER_MINOR 10
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 51d759463384..106cf83c2e6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -202,6 +202,27 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
bool kernel = false;
int r;
+ /* reject invalid gem flags */
+ if (args->in.domain_flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC |
+ AMDGPU_GEM_CREATE_VRAM_CLEARED|
+ AMDGPU_GEM_CREATE_SHADOW |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
+ r = -EINVAL;
+ goto error_unlock;
+ }
+ /* reject invalid gem domains */
+ if (args->in.domains & ~(AMDGPU_GEM_DOMAIN_CPU |
+ AMDGPU_GEM_DOMAIN_GTT |
+ AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GDS |
+ AMDGPU_GEM_DOMAIN_GWS |
+ AMDGPU_GEM_DOMAIN_OA)) {
+ r = -EINVAL;
+ goto error_unlock;
+ }
+
/* create a gem object to contain this object in */
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 31375bdde6f1..011800f621c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -788,7 +788,7 @@ static int sdma_v3_0_start(struct amdgpu_device *adev)
}
}
- /* disble sdma engine before programing it */
+ /* disable sdma engine before programing it */
sdma_v3_0_ctx_switch_enable(adev, false);
sdma_v3_0_enable(adev, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index f55e45b52fbc..33b504bafb88 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3464,6 +3464,12 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
(adev->pdev->device == 0x6667)) {
max_sclk = 75000;
}
+ } else if (adev->asic_type == CHIP_OLAND) {
+ if ((adev->pdev->device == 0x6604) &&
+ (adev->pdev->subsystem_vendor == 0x1028) &&
+ (adev->pdev->subsystem_device == 0x066F)) {
+ max_sclk = 75000;
+ }
}
if (rps->vce_active) {
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 50bdb24ef8d6..4a785d6acfb9 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1051,7 +1051,7 @@ static int vi_common_early_init(void *handle)
/* rev0 hardware requires workarounds to support PG */
adev->pg_flags = 0;
if (adev->rev_id != 0x00) {
- adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
+ adev->pg_flags |=
AMD_PG_SUPPORT_GFX_SMG |
AMD_PG_SUPPORT_GFX_PIPELINE |
AMD_PG_SUPPORT_CP |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
index 8cf71f3c6d0e..261b828ad590 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c
@@ -178,7 +178,7 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
if (bgate) {
cgs_set_powergating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_UNGATE);
+ AMD_PG_STATE_GATE);
cgs_set_clockgating_state(hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
AMD_CG_STATE_GATE);
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
index bad4d80cb711..f9d665550d3e 100644
--- a/drivers/gpu/drm/arm/malidp_crtc.c
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -63,8 +63,7 @@ static void malidp_crtc_enable(struct drm_crtc *crtc)
clk_prepare_enable(hwdev->pxlclk);
- /* mclk needs to be set to the same or higher rate than pxlclk */
- clk_set_rate(hwdev->mclk, crtc->state->adjusted_mode.crtc_clock * 1000);
+ /* We rely on firmware to set mclk to a sensible level. */
clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000);
hwdev->modeset(hwdev, &vm);
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index 488aedf5b58d..9f5513006eee 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -83,7 +83,7 @@ static const struct malidp_layer malidp550_layers[] = {
{ DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
{ DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, MALIDP_DE_LG_STRIDE },
{ DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, MALIDP_DE_LV_STRIDE0 },
- { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, 0 },
+ { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, MALIDP550_DE_LS_R1_STRIDE },
};
#define MALIDP_DE_DEFAULT_PREFETCH_START 5
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 414aada10fe5..d5aec082294c 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -37,6 +37,8 @@
#define LAYER_V_VAL(x) (((x) & 0x1fff) << 16)
#define MALIDP_LAYER_COMP_SIZE 0x010
#define MALIDP_LAYER_OFFSET 0x014
+#define MALIDP550_LS_ENABLE 0x01c
+#define MALIDP550_LS_R1_IN_SIZE 0x020
/*
* This 4-entry look-up-table is used to determine the full 8-bit alpha value
@@ -242,6 +244,11 @@ static void malidp_de_plane_update(struct drm_plane *plane,
LAYER_V_VAL(plane->state->crtc_y),
mp->layer->base + MALIDP_LAYER_OFFSET);
+ if (mp->layer->id == DE_SMART)
+ malidp_hw_write(mp->hwdev,
+ LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h),
+ mp->layer->base + MALIDP550_LS_R1_IN_SIZE);
+
/* first clear the rotation bits */
val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL);
val &= ~LAYER_ROT_MASK;
@@ -330,9 +337,16 @@ int malidp_de_planes_init(struct drm_device *drm)
plane->hwdev = malidp->dev;
plane->layer = &map->layers[i];
- /* Skip the features which the SMART layer doesn't have */
- if (id == DE_SMART)
+ if (id == DE_SMART) {
+ /*
+ * Enable the first rectangle in the SMART layer to be
+ * able to use it as a drm plane.
+ */
+ malidp_hw_write(malidp->dev, 1,
+ plane->layer->base + MALIDP550_LS_ENABLE);
+ /* Skip the features which the SMART layer doesn't have. */
continue;
+ }
drm_plane_create_rotation_property(&plane->base, DRM_ROTATE_0, flags);
malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT,
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
index aff6d4a84e99..b816067a65c5 100644
--- a/drivers/gpu/drm/arm/malidp_regs.h
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -84,6 +84,7 @@
/* Stride register offsets relative to Lx_BASE */
#define MALIDP_DE_LG_STRIDE 0x18
#define MALIDP_DE_LV_STRIDE0 0x18
+#define MALIDP550_DE_LS_R1_STRIDE 0x28
/* macros to set values into registers */
#define MALIDP_DE_H_FRONTPORCH(x) (((x) & 0xfff) << 0)
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 99144f879a4f..fad3d44e4642 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -149,6 +149,9 @@ static const struct edid_quirk {
/* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
{ "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
+
+ /* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
+ { "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
};
/*
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index f9b6445e846a..9c0152df45ad 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -132,6 +132,8 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_RGBX8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGRX8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGB565_A8, .depth = 24, .num_planes = 2, .cpp = { 2, 1, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGR565_A8, .depth = 24, .num_planes = 2, .cpp = { 2, 1, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_RGBX1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
@@ -144,6 +146,12 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_RGBA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGB888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGR888_A8, .depth = 32, .num_planes = 2, .cpp = { 3, 1, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XRGB8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_XBGR8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGBX8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_BGRX8888_A8, .depth = 32, .num_planes = 2, .cpp = { 4, 1, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_YUV410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4 },
{ .format = DRM_FORMAT_YVU410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4 },
{ .format = DRM_FORMAT_YUV411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1 },
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 183f5dc1c3f2..a5cd5dacf055 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -19,6 +19,8 @@ config DRM_I915
select INPUT if ACPI
select ACPI_VIDEO if ACPI
select ACPI_BUTTON if ACPI
+ select SYNC_FILE
+ select IOSF_MBI
help
Choose this option if you have a system that has "Intel Graphics
Media Accelerator" or "HD Graphics" integrated graphics,
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 597648c7a645..e091809a9a9e 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -24,7 +24,9 @@ config DRM_I915_DEBUG
select X86_MSR # used by igt/pm_rpm
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
select DRM_DEBUG_MM if DRM=y
+ select DRM_DEBUG_MM_SELFTEST
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
+ select DRM_I915_SELFTEST
default n
help
Choose this option to turn on extra driver debugging that may affect
@@ -58,3 +60,30 @@ config DRM_I915_SW_FENCE_DEBUG_OBJECTS
Recommended for driver developers only.
If in doubt, say "N".
+
+config DRM_I915_SELFTEST
+ bool "Enable selftests upon driver load"
+ depends on DRM_I915
+ default n
+ select FAULT_INJECTION
+ select PRIME_NUMBERS
+ help
+ Choose this option to allow the driver to perform selftests upon
+ loading; also requires the i915.selftest=1 module parameter. To
+ exit the module after running the selftests (i.e. to prevent normal
+ module initialisation afterwards) use i915.selftest=-1.
+
+ Recommended for driver developers only.
+
+ If in doubt, say "N".
+
+config DRM_I915_LOW_LEVEL_TRACEPOINTS
+ bool "Enable low level request tracing events"
+ depends on DRM_I915
+ default n
+ help
+ Choose this option to turn on low level request tracing events.
+ This provides the ability to precisely monitor engine utilisation
+ and also analyze the request dependency resolving timeline.
+
+ If in doubt, say "N".
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index c62ab45683c0..2cf04504e494 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -29,6 +29,7 @@ i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o intel_pipe_crc.o
# GEM code
i915-y += i915_cmd_parser.o \
i915_gem_batch_pool.o \
+ i915_gem_clflush.o \
i915_gem_context.o \
i915_gem_dmabuf.o \
i915_gem_evict.o \
@@ -72,6 +73,7 @@ i915-y += intel_audio.o \
intel_atomic.o \
intel_atomic_plane.o \
intel_bios.o \
+ intel_cdclk.o \
intel_color.o \
intel_display.o \
intel_dpio_phy.o \
@@ -103,8 +105,8 @@ i915-y += dvo_ch7017.o \
intel_dp.o \
intel_dsi.o \
intel_dsi_dcs_backlight.o \
- intel_dsi_panel_vbt.o \
intel_dsi_pll.o \
+ intel_dsi_vbt.o \
intel_dvo.o \
intel_hdmi.o \
intel_i2c.o \
@@ -116,6 +118,9 @@ i915-y += dvo_ch7017.o \
# Post-mortem debug and GPU hang state capture
i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
+i915-$(CONFIG_DRM_I915_SELFTEST) += \
+ selftests/i915_random.o \
+ selftests/i915_selftest.o
# virtual gpu code
i915-y += i915_vgpu.o
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index 4a6a2ed65732..b7d7721e72fa 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -41,6 +41,54 @@ enum {
INTEL_GVT_PCI_BAR_MAX,
};
+/* bitmap for writable bits (RW or RW1C bits, but cannot co-exist in one
+ * byte) byte by byte in standard pci configuration space. (not the full
+ * 256 bytes.)
+ */
+static const u8 pci_cfg_space_rw_bmp[PCI_INTERRUPT_LINE + 4] = {
+ [PCI_COMMAND] = 0xff, 0x07,
+ [PCI_STATUS] = 0x00, 0xf9, /* the only one RW1C byte */
+ [PCI_CACHE_LINE_SIZE] = 0xff,
+ [PCI_BASE_ADDRESS_0 ... PCI_CARDBUS_CIS - 1] = 0xff,
+ [PCI_ROM_ADDRESS] = 0x01, 0xf8, 0xff, 0xff,
+ [PCI_INTERRUPT_LINE] = 0xff,
+};
+
+/**
+ * vgpu_pci_cfg_mem_write - write virtual cfg space memory
+ *
+ * Use this function to write virtual cfg space memory.
+ * For standard cfg space, only RW bits can be changed,
+ * and we emulates the RW1C behavior of PCI_STATUS register.
+ */
+static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
+ u8 *src, unsigned int bytes)
+{
+ u8 *cfg_base = vgpu_cfg_space(vgpu);
+ u8 mask, new, old;
+ int i = 0;
+
+ for (; i < bytes && (off + i < sizeof(pci_cfg_space_rw_bmp)); i++) {
+ mask = pci_cfg_space_rw_bmp[off + i];
+ old = cfg_base[off + i];
+ new = src[i] & mask;
+
+ /**
+ * The PCI_STATUS high byte has RW1C bits, here
+ * emulates clear by writing 1 for these bits.
+ * Writing a 0b to RW1C bits has no effect.
+ */
+ if (off + i == PCI_STATUS + 1)
+ new = (~new & old) & mask;
+
+ cfg_base[off + i] = (old & ~mask) | new;
+ }
+
+ /* For other configuration space directly copy as it is. */
+ if (i < bytes)
+ memcpy(cfg_base + off + i, src + i, bytes - i);
+}
+
/**
* intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read
*
@@ -123,7 +171,7 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
u8 changed = old ^ new;
int ret;
- memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
+ vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
if (!(changed & PCI_COMMAND_MEMORY))
return 0;
@@ -237,6 +285,9 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
{
int ret;
+ if (vgpu->failsafe)
+ return 0;
+
if (WARN_ON(bytes > 4))
return -EINVAL;
@@ -274,10 +325,10 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
if (ret)
return ret;
- memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
+ vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
break;
default:
- memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
+ vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes);
break;
}
return 0;
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index b9c8e2407682..da6bbca90d97 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -668,7 +668,7 @@ static inline void print_opcode(u32 cmd, int ring_id)
if (d_info == NULL)
return;
- gvt_err("opcode=0x%x %s sub_ops:",
+ gvt_dbg_cmd("opcode=0x%x %s sub_ops:",
cmd >> (32 - d_info->op_len), d_info->name);
for (i = 0; i < d_info->nr_sub_op; i++)
@@ -693,23 +693,23 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
int cnt = 0;
int i;
- gvt_err(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
+ gvt_dbg_cmd(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
" ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id,
s->ring_id, s->ring_start, s->ring_start + s->ring_size,
s->ring_head, s->ring_tail);
- gvt_err(" %s %s ip_gma(%08lx) ",
+ gvt_dbg_cmd(" %s %s ip_gma(%08lx) ",
s->buf_type == RING_BUFFER_INSTRUCTION ?
"RING_BUFFER" : "BATCH_BUFFER",
s->buf_addr_type == GTT_BUFFER ?
"GTT" : "PPGTT", s->ip_gma);
if (s->ip_va == NULL) {
- gvt_err(" ip_va(NULL)");
+ gvt_dbg_cmd(" ip_va(NULL)");
return;
}
- gvt_err(" ip_va=%p: %08x %08x %08x %08x\n",
+ gvt_dbg_cmd(" ip_va=%p: %08x %08x %08x %08x\n",
s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
cmd_val(s, 2), cmd_val(s, 3));
@@ -1530,7 +1530,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
len += copy_len;
gma += copy_len;
}
- return 0;
+ return len;
}
@@ -1644,7 +1644,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
gma, gma + bb_size,
dst);
- if (ret) {
+ if (ret < 0) {
gvt_err("fail to copy guest ring buffer\n");
goto unmap_src;
}
@@ -2608,11 +2608,8 @@ out:
static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
- int ring_id = workload->ring_id;
- struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
- struct intel_ring *ring = shadow_ctx->engine[ring_id].ring;
unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
- unsigned int copy_len = 0;
+ u32 *cs;
int ret;
guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
@@ -2626,36 +2623,33 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
gma_top = workload->rb_start + guest_rb_size;
/* allocate shadow ring buffer */
- ret = intel_ring_begin(workload->req, workload->rb_len / 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
/* get shadow ring buffer va */
- workload->shadow_ring_buffer_va = ring->vaddr + ring->tail;
+ workload->shadow_ring_buffer_va = cs;
/* head > tail --> copy head <-> top */
if (gma_head > gma_tail) {
ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
- gma_head, gma_top,
- workload->shadow_ring_buffer_va);
- if (ret) {
+ gma_head, gma_top, cs);
+ if (ret < 0) {
gvt_err("fail to copy guest ring buffer\n");
return ret;
}
- copy_len = gma_top - gma_head;
+ cs += ret / sizeof(u32);
gma_head = workload->rb_start;
}
/* copy head or start <-> tail */
- ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
- gma_head, gma_tail,
- workload->shadow_ring_buffer_va + copy_len);
- if (ret) {
+ ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, cs);
+ if (ret < 0) {
gvt_err("fail to copy guest ring buffer\n");
return ret;
}
- ring->tail += workload->rb_len;
- intel_ring_advance(ring);
+ cs += ret / sizeof(u32);
+ intel_ring_advance(workload->req, cs);
return 0;
}
@@ -2709,7 +2703,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
wa_ctx->workload->vgpu->gtt.ggtt_mm,
guest_gma, guest_gma + ctx_size,
map);
- if (ret) {
+ if (ret < 0) {
gvt_err("fail to copy guest indirect ctx\n");
goto unmap_src;
}
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 6d8fde880c39..5419ae6ec633 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -83,44 +83,80 @@ static int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
return 0;
}
+static unsigned char virtual_dp_monitor_edid[GVT_EDID_NUM][EDID_SIZE] = {
+ {
+/* EDID with 1024x768 as its resolution */
+ /*Header*/
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ /* Vendor & Product Identification */
+ 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17,
+ /* Version & Revision */
+ 0x01, 0x04,
+ /* Basic Display Parameters & Features */
+ 0xa5, 0x34, 0x20, 0x78, 0x23,
+ /* Color Characteristics */
+ 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
+ /* Established Timings: maximum resolution is 1024x768 */
+ 0x21, 0x08, 0x00,
+ /* Standard Timings. All invalid */
+ 0x00, 0xc0, 0x00, 0xc0, 0x00, 0x40, 0x00, 0x80, 0x00, 0x00,
+ 0x00, 0x40, 0x00, 0x00, 0x00, 0x01,
+ /* 18 Byte Data Blocks 1: invalid */
+ 0x00, 0x00, 0x80, 0xa0, 0x70, 0xb0,
+ 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
+ /* 18 Byte Data Blocks 2: invalid */
+ 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ /* 18 Byte Data Blocks 3: invalid */
+ 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
+ 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20,
+ /* 18 Byte Data Blocks 4: invalid */
+ 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30,
+ 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20,
+ /* Extension Block Count */
+ 0x00,
+ /* Checksum */
+ 0xef,
+ },
+ {
/* EDID with 1920x1200 as its resolution */
-static unsigned char virtual_dp_monitor_edid[] = {
- /*Header*/
- 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
- /* Vendor & Product Identification */
- 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17,
- /* Version & Revision */
- 0x01, 0x04,
- /* Basic Display Parameters & Features */
- 0xa5, 0x34, 0x20, 0x78, 0x23,
- /* Color Characteristics */
- 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
- /* Established Timings: maximum resolution is 1024x768 */
- 0x21, 0x08, 0x00,
- /*
- * Standard Timings.
- * below new resolutions can be supported:
- * 1920x1080, 1280x720, 1280x960, 1280x1024,
- * 1440x900, 1600x1200, 1680x1050
- */
- 0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00,
- 0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01,
- /* 18 Byte Data Blocks 1: max resolution is 1920x1200 */
- 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0,
- 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
- /* 18 Byte Data Blocks 2: invalid */
- 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
- 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
- /* 18 Byte Data Blocks 3: invalid */
- 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
- 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20,
- /* 18 Byte Data Blocks 4: invalid */
- 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30,
- 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20,
- /* Extension Block Count */
- 0x00,
- /* Checksum */
- 0x45,
+ /*Header*/
+ 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
+ /* Vendor & Product Identification */
+ 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17,
+ /* Version & Revision */
+ 0x01, 0x04,
+ /* Basic Display Parameters & Features */
+ 0xa5, 0x34, 0x20, 0x78, 0x23,
+ /* Color Characteristics */
+ 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54,
+ /* Established Timings: maximum resolution is 1024x768 */
+ 0x21, 0x08, 0x00,
+ /*
+ * Standard Timings.
+ * below new resolutions can be supported:
+ * 1920x1080, 1280x720, 1280x960, 1280x1024,
+ * 1440x900, 1600x1200, 1680x1050
+ */
+ 0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00,
+ 0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01,
+ /* 18 Byte Data Blocks 1: max resolution is 1920x1200 */
+ 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0,
+ 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a,
+ /* 18 Byte Data Blocks 2: invalid */
+ 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a,
+ 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ /* 18 Byte Data Blocks 3: invalid */
+ 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48,
+ 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20,
+ /* 18 Byte Data Blocks 4: invalid */
+ 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30,
+ 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20,
+ /* Extension Block Count */
+ 0x00,
+ /* Checksum */
+ 0x45,
+ },
};
#define DPCD_HEADER_SIZE 0xb
@@ -140,14 +176,20 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
SDE_PORTE_HOTPLUG_SPT);
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B))
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
+ vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
+ }
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C))
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
+ vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
+ }
- if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D))
+ if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
+ vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
+ }
if (IS_SKYLAKE(dev_priv) &&
intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
@@ -160,6 +202,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
GEN8_PORT_DP_A_HOTPLUG;
else
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
+
+ vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED;
}
}
@@ -175,10 +219,13 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
}
static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
- int type)
+ int type, unsigned int resolution)
{
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
+ if (WARN_ON(resolution >= GVT_EDID_NUM))
+ return -EINVAL;
+
port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL);
if (!port->edid)
return -ENOMEM;
@@ -189,7 +236,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
return -ENOMEM;
}
- memcpy(port->edid->edid_block, virtual_dp_monitor_edid,
+ memcpy(port->edid->edid_block, virtual_dp_monitor_edid[resolution],
EDID_SIZE);
port->edid->data_valid = true;
@@ -322,16 +369,18 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
* Zero on success, negative error code if failed.
*
*/
-int intel_vgpu_init_display(struct intel_vgpu *vgpu)
+int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
intel_vgpu_init_i2c_edid(vgpu);
if (IS_SKYLAKE(dev_priv))
- return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D);
+ return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D,
+ resolution);
else
- return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B);
+ return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B,
+ resolution);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/display.h b/drivers/gpu/drm/i915/gvt/display.h
index 8b234ea961f6..d73de22102e2 100644
--- a/drivers/gpu/drm/i915/gvt/display.h
+++ b/drivers/gpu/drm/i915/gvt/display.h
@@ -154,10 +154,28 @@ struct intel_vgpu_port {
int type;
};
+enum intel_vgpu_edid {
+ GVT_EDID_1024_768,
+ GVT_EDID_1920_1200,
+ GVT_EDID_NUM,
+};
+
+static inline char *vgpu_edid_str(enum intel_vgpu_edid id)
+{
+ switch (id) {
+ case GVT_EDID_1024_768:
+ return "1024x768";
+ case GVT_EDID_1920_1200:
+ return "1920x1200";
+ default:
+ return "";
+ }
+}
+
void intel_gvt_emulate_vblank(struct intel_gvt *gvt);
void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt);
-int intel_vgpu_init_display(struct intel_vgpu *vgpu);
+int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution);
void intel_vgpu_reset_display(struct intel_vgpu *vgpu);
void intel_vgpu_clean_display(struct intel_vgpu *vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 1cb29b2d7dc6..933a7c211a1c 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -80,7 +80,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
int ret;
size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1;
- firmware = vmalloc(size);
+ firmware = vzalloc(size);
if (!firmware)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 28c92346db0e..6a5ff23ded90 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1825,11 +1825,8 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
gma = g_gtt_index << GTT_PAGE_SHIFT;
/* the VM may configure the whole GM space when ballooning is used */
- if (WARN_ONCE(!vgpu_gmadr_is_valid(vgpu, gma),
- "vgpu%d: found oob ggtt write, offset %x\n",
- vgpu->id, off)) {
+ if (!vgpu_gmadr_is_valid(vgpu, gma))
return 0;
- }
ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);
@@ -2015,6 +2012,22 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
return create_scratch_page_tree(vgpu);
}
+static void intel_vgpu_free_mm(struct intel_vgpu *vgpu, int type)
+{
+ struct list_head *pos, *n;
+ struct intel_vgpu_mm *mm;
+
+ list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
+ mm = container_of(pos, struct intel_vgpu_mm, list);
+ if (mm->type == type) {
+ vgpu->gvt->gtt.mm_free_page_table(mm);
+ list_del(&mm->list);
+ list_del(&mm->lru_list);
+ kfree(mm);
+ }
+ }
+}
+
/**
* intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
* @vgpu: a vGPU
@@ -2027,19 +2040,11 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
*/
void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
{
- struct list_head *pos, *n;
- struct intel_vgpu_mm *mm;
-
ppgtt_free_all_shadow_page(vgpu);
release_scratch_page_tree(vgpu);
- list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) {
- mm = container_of(pos, struct intel_vgpu_mm, list);
- vgpu->gvt->gtt.mm_free_page_table(mm);
- list_del(&mm->list);
- list_del(&mm->lru_list);
- kfree(mm);
- }
+ intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
+ intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_GGTT);
}
static void clean_spt_oos(struct intel_gvt *gvt)
@@ -2322,6 +2327,13 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
int i;
ppgtt_free_all_shadow_page(vgpu);
+
+ /* Shadow pages are only created when there is no page
+ * table tracking data, so remove page tracking data after
+ * removing the shadow pages.
+ */
+ intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT);
+
if (!dmlr)
return;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index e227caf5859e..6dfc48b63b71 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -143,6 +143,8 @@ struct intel_vgpu {
int id;
unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
bool active;
+ bool pv_notified;
+ bool failsafe;
bool resetting;
void *sched_data;
@@ -160,7 +162,6 @@ struct intel_vgpu {
atomic_t running_workload_num;
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
struct i915_gem_context *shadow_ctx;
- struct notifier_block shadow_ctx_notifier_block;
#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
struct {
@@ -203,18 +204,18 @@ struct intel_gvt_firmware {
};
struct intel_gvt_opregion {
- void __iomem *opregion_va;
+ void *opregion_va;
u32 opregion_pa;
};
#define NR_MAX_INTEL_VGPU_TYPES 20
struct intel_vgpu_type {
char name[16];
- unsigned int max_instance;
unsigned int avail_instance;
unsigned int low_gm_size;
unsigned int high_gm_size;
unsigned int fence;
+ enum intel_vgpu_edid resolution;
};
struct intel_gvt {
@@ -231,6 +232,7 @@ struct intel_gvt {
struct intel_gvt_gtt gtt;
struct intel_gvt_opregion opregion;
struct intel_gvt_workload_scheduler scheduler;
+ struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
struct intel_vgpu_type *types;
unsigned int num_types;
@@ -317,6 +319,7 @@ struct intel_vgpu_creation_params {
__u64 low_gm_sz; /* in MB */
__u64 high_gm_sz; /* in MB */
__u64 fence_sz;
+ __u64 resolution;
__s32 primary;
__u64 vgpu_id;
};
@@ -449,6 +452,11 @@ struct intel_gvt_ops {
};
+enum {
+ GVT_FAILSAFE_UNSUPPORTED_GUEST,
+ GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
+};
+
#include "mpt.h"
#endif
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 1d450627ff65..8e43395c748a 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -121,6 +121,7 @@ static int new_mmio_info(struct intel_gvt *gvt,
info->size = size;
info->length = (i + 4) < end ? 4 : (end - i);
info->addr_mask = addr_mask;
+ info->ro_mask = ro_mask;
info->device = device;
info->read = read ? read : intel_vgpu_default_mmio_read;
info->write = write ? write : intel_vgpu_default_mmio_write;
@@ -150,15 +151,44 @@ static int render_mmio_to_ring_id(struct intel_gvt *gvt, unsigned int reg)
#define fence_num_to_offset(num) \
(num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0)))
+
+static void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
+{
+ switch (reason) {
+ case GVT_FAILSAFE_UNSUPPORTED_GUEST:
+ pr_err("Detected your guest driver doesn't support GVT-g.\n");
+ break;
+ case GVT_FAILSAFE_INSUFFICIENT_RESOURCE:
+ pr_err("Graphics resource is not enough for the guest\n");
+ default:
+ break;
+ }
+ pr_err("Now vgpu %d will enter failsafe mode.\n", vgpu->id);
+ vgpu->failsafe = true;
+}
+
static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
unsigned int fence_num, void *p_data, unsigned int bytes)
{
if (fence_num >= vgpu_fence_sz(vgpu)) {
- gvt_err("vgpu%d: found oob fence register access\n",
- vgpu->id);
- gvt_err("vgpu%d: total fence num %d access fence num %d\n",
- vgpu->id, vgpu_fence_sz(vgpu), fence_num);
+
+ /* When guest access oob fence regs without access
+ * pv_info first, we treat guest not supporting GVT,
+ * and we will let vgpu enter failsafe mode.
+ */
+ if (!vgpu->pv_notified)
+ enter_failsafe_mode(vgpu,
+ GVT_FAILSAFE_UNSUPPORTED_GUEST);
+
+ if (!vgpu->mmio.disable_warn_untrack) {
+ gvt_err("vgpu%d: found oob fence register access\n",
+ vgpu->id);
+ gvt_err("vgpu%d: total fence %d, access fence %d\n",
+ vgpu->id, vgpu_fence_sz(vgpu),
+ fence_num);
+ }
memset(p_data, 0, bytes);
+ return -EINVAL;
}
return 0;
}
@@ -369,6 +399,74 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
}
+/* ascendingly sorted */
+static i915_reg_t force_nonpriv_white_list[] = {
+ GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec)
+ GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248)
+ GEN8_CS_CHICKEN1,//_MMIO(0x2580)
+ _MMIO(0x2690),
+ _MMIO(0x2694),
+ _MMIO(0x2698),
+ _MMIO(0x4de0),
+ _MMIO(0x4de4),
+ _MMIO(0x4dfc),
+ GEN7_COMMON_SLICE_CHICKEN1,//_MMIO(0x7010)
+ _MMIO(0x7014),
+ HDC_CHICKEN0,//_MMIO(0x7300)
+ GEN8_HDC_CHICKEN1,//_MMIO(0x7304)
+ _MMIO(0x7700),
+ _MMIO(0x7704),
+ _MMIO(0x7708),
+ _MMIO(0x770c),
+ _MMIO(0xb110),
+ GEN8_L3SQCREG4,//_MMIO(0xb118)
+ _MMIO(0xe100),
+ _MMIO(0xe18c),
+ _MMIO(0xe48c),
+ _MMIO(0xe5f4),
+};
+
+/* a simple bsearch */
+static inline bool in_whitelist(unsigned int reg)
+{
+ int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list);
+ i915_reg_t *array = force_nonpriv_white_list;
+
+ while (left < right) {
+ int mid = (left + right)/2;
+
+ if (reg > array[mid].reg)
+ left = mid + 1;
+ else if (reg < array[mid].reg)
+ right = mid;
+ else
+ return true;
+ }
+ return false;
+}
+
+static int force_nonpriv_write(struct intel_vgpu *vgpu,
+ unsigned int offset, void *p_data, unsigned int bytes)
+{
+ u32 reg_nonpriv = *(u32 *)p_data;
+ int ret = -EINVAL;
+
+ if ((bytes != 4) || ((offset & (bytes - 1)) != 0)) {
+ gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n",
+ vgpu->id, offset, bytes);
+ return ret;
+ }
+
+ if (in_whitelist(reg_nonpriv)) {
+ ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
+ bytes);
+ } else {
+ gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x\n",
+ vgpu->id, reg_nonpriv);
+ }
+ return ret;
+}
+
static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
@@ -1001,6 +1099,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
if (invalid_read)
gvt_err("invalid pvinfo read: [%x:%x] = %x\n",
offset, bytes, *(u32 *)p_data);
+ vgpu->pv_notified = true;
return 0;
}
@@ -1039,7 +1138,7 @@ static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
char vmid_str[20];
char display_ready_str[20];
- snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d\n", ready);
+ snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d", ready);
env[0] = display_ready_str;
snprintf(vmid_str, 20, "VMID=%d", vgpu->id);
@@ -1078,6 +1177,9 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
case _vgtif_reg(execlist_context_descriptor_lo):
case _vgtif_reg(execlist_context_descriptor_hi):
break;
+ case _vgtif_reg(rsv5[0])..._vgtif_reg(rsv5[3]):
+ enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE);
+ break;
default:
gvt_err("invalid pvinfo write offset %x bytes %x data %x\n",
offset, bytes, data);
@@ -1203,26 +1305,37 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
u32 *data0 = &vgpu_vreg(vgpu, GEN6_PCODE_DATA);
switch (cmd) {
- case 0x6:
- /**
- * "Read memory latency" command on gen9.
- * Below memory latency values are read
- * from skylake platform.
- */
- if (!*data0)
- *data0 = 0x1e1a1100;
- else
- *data0 = 0x61514b3d;
+ case GEN9_PCODE_READ_MEM_LATENCY:
+ if (IS_SKYLAKE(vgpu->gvt->dev_priv)) {
+ /**
+ * "Read memory latency" command on gen9.
+ * Below memory latency values are read
+ * from skylake platform.
+ */
+ if (!*data0)
+ *data0 = 0x1e1a1100;
+ else
+ *data0 = 0x61514b3d;
+ }
+ break;
+ case SKL_PCODE_CDCLK_CONTROL:
+ if (IS_SKYLAKE(vgpu->gvt->dev_priv))
+ *data0 = SKL_CDCLK_READY_FOR_CHANGE;
break;
- case 0x5:
+ case GEN6_PCODE_READ_RC6VIDS:
*data0 |= 0x1;
break;
}
gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n",
vgpu->id, value, *data0);
-
- value &= ~(1 << 31);
+ /**
+ * PCODE_READY clear means ready for pcode read/write,
+ * PCODE_ERROR_MASK clear means no error happened. In GVT-g we
+ * always emulate as pcode read/write success and ready for access
+ * anytime, since we don't touch real physical registers here.
+ */
+ value &= ~(GEN6_PCODE_READY | GEN6_PCODE_ERROR_MASK);
return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes);
}
@@ -1318,6 +1431,17 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
bool enable_execlist;
write_vreg(vgpu, offset, p_data, bytes);
+
+ /* when PPGTT mode enabled, we will check if guest has called
+ * pvinfo, if not, we will treat this guest as non-gvtg-aware
+ * guest, and stop emulating its cfg space, mmio, gtt, etc.
+ */
+ if (((data & _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)) ||
+ (data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)))
+ && !vgpu->pv_notified) {
+ enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
+ return 0;
+ }
if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))
|| (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
@@ -1400,6 +1524,9 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
#define MMIO_GM(reg, d, r, w) \
MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w)
+#define MMIO_GM_RDR(reg, d, r, w) \
+ MMIO_F(reg, 4, F_GMADR | F_CMD_ACCESS, 0xFFFFF000, 0, d, r, w)
+
#define MMIO_RO(reg, d, f, rm, r, w) \
MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w)
@@ -1419,6 +1546,9 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
#define MMIO_RING_GM(prefix, d, r, w) \
MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w)
+#define MMIO_RING_GM_RDR(prefix, d, r, w) \
+ MMIO_RING_F(prefix, 4, F_GMADR | F_CMD_ACCESS, 0xFFFF0000, 0, d, r, w)
+
#define MMIO_RING_RO(prefix, d, f, rm, r, w) \
MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w)
@@ -1427,73 +1557,81 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
struct drm_i915_private *dev_priv = gvt->dev_priv;
int ret;
- MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
+ MMIO_RING_DFH(RING_IMR, D_ALL, F_CMD_ACCESS, NULL,
+ intel_vgpu_reg_imr_handler);
MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler);
MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler);
MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler);
MMIO_D(SDEISR, D_ALL);
- MMIO_RING_D(RING_HWSTAM, D_ALL);
+ MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_GM(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL);
- MMIO_GM(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
- MMIO_GM(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
- MMIO_GM(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+ MMIO_GM_RDR(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+ MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+ MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL);
+ MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL);
#define RING_REG(base) (base + 0x28)
- MMIO_RING_D(RING_REG, D_ALL);
+ MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
#undef RING_REG
#define RING_REG(base) (base + 0x134)
- MMIO_RING_D(RING_REG, D_ALL);
+ MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL);
#undef RING_REG
- MMIO_GM(0x2148, D_ALL, NULL, NULL);
- MMIO_GM(CCID, D_ALL, NULL, NULL);
- MMIO_GM(0x12198, D_ALL, NULL, NULL);
+ MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL);
+ MMIO_GM_RDR(CCID, D_ALL, NULL, NULL);
+ MMIO_GM_RDR(0x12198, D_ALL, NULL, NULL);
MMIO_D(GEN7_CXT_SIZE, D_ALL);
- MMIO_RING_D(RING_TAIL, D_ALL);
- MMIO_RING_D(RING_HEAD, D_ALL);
- MMIO_RING_D(RING_CTL, D_ALL);
- MMIO_RING_D(RING_ACTHD, D_ALL);
- MMIO_RING_GM(RING_START, D_ALL, NULL, NULL);
+ MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL);
/* RING MODE */
#define RING_REG(base) (base + 0x29c)
- MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK, NULL, ring_mode_mmio_write);
+ MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
+ ring_mode_mmio_write);
#undef RING_REG
- MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK, NULL, NULL);
- MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
+ MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS,
ring_timestamp_mmio_read, NULL);
MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS,
ring_timestamp_mmio_read, NULL);
- MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
-
- MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(0x2088, D_ALL, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK, NULL, NULL);
- MMIO_DFH(0x2470, D_ALL, F_MODE_MASK, NULL, NULL);
- MMIO_D(GAM_ECOCHK, D_ALL);
- MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(CACHE_MODE_0, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x2124, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x2088, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x2470, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_D(0x9030, D_ALL);
- MMIO_D(0x20a0, D_ALL);
- MMIO_D(0x2420, D_ALL);
- MMIO_D(0x2430, D_ALL);
- MMIO_D(0x2434, D_ALL);
- MMIO_D(0x2438, D_ALL);
- MMIO_D(0x243c, D_ALL);
- MMIO_DFH(0x7018, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x2430, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x2434, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x2438, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x243c, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x7018, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0xe100, D_ALL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
/* display */
MMIO_F(0x60220, 0x20, 0, 0, 0, D_ALL, NULL, NULL);
@@ -2022,8 +2160,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(FORCEWAKE_ACK, D_ALL);
MMIO_D(GEN6_GT_CORE_STATUS, D_ALL);
MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL);
- MMIO_D(GTFIFODBG, D_ALL);
- MMIO_D(GTFIFOCTL, D_ALL);
+ MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write);
MMIO_DH(FORCEWAKE_ACK_HSW, D_HSW | D_BDW, NULL, NULL);
MMIO_D(ECOBUS, D_ALL);
@@ -2080,7 +2218,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_SKL);
+ MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_BDW);
MMIO_D(GEN6_PCODE_DATA, D_ALL);
MMIO_D(0x13812c, D_ALL);
MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL);
@@ -2159,36 +2297,35 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_D(0x1a054, D_ALL);
MMIO_D(0x44070, D_ALL);
-
- MMIO_D(0x215c, D_HSW_PLUS);
+ MMIO_DFH(0x215c, D_HSW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL);
- MMIO_F(0x2290, 8, 0, 0, 0, D_HSW_PLUS, NULL, NULL);
- MMIO_D(GEN7_OACONTROL, D_HSW);
+ MMIO_F(0x2290, 8, F_CMD_ACCESS, 0, 0, D_HSW_PLUS, NULL, NULL);
+ MMIO_DFH(GEN7_OACONTROL, D_HSW, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x2b00, D_BDW_PLUS);
MMIO_D(0x2360, D_BDW_PLUS);
- MMIO_F(0x5200, 32, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x5240, 32, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(0x5280, 16, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x5200, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x5240, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(0x5280, 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
MMIO_DFH(0x1c17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x1c178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(BCS_SWCTRL, D_ALL);
-
- MMIO_F(HS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(DS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(IA_VERTICES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(IA_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(VS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(GS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(GS_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(CL_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(CL_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(PS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
- MMIO_F(PS_DEPTH_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL);
+ MMIO_DFH(BCS_SWCTRL, D_ALL, F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_F(HS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(DS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(IA_VERTICES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(IA_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(VS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(GS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(GS_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(CL_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(CL_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(PS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
+ MMIO_F(PS_DEPTH_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL);
MMIO_DH(0x4260, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
MMIO_DH(0x4264, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
MMIO_DH(0x4268, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
@@ -2196,6 +2333,17 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_DH(0x4270, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler);
MMIO_DFH(0x4094, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_RING_GM_RDR(RING_BBADDR, D_ALL, NULL, NULL);
+ MMIO_DFH(0x2220, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x12220, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x22220, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_RING_DFH(RING_SYNC_1, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_RING_DFH(RING_SYNC_0, D_ALL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x22178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x1a178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x1a17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x2217c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
return 0;
}
@@ -2204,7 +2352,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
struct drm_i915_private *dev_priv = gvt->dev_priv;
int ret;
- MMIO_DH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL,
+ MMIO_DFH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, NULL,
intel_vgpu_reg_imr_handler);
MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
@@ -2269,24 +2417,31 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL,
intel_vgpu_reg_master_irq_handler);
- MMIO_D(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
- MMIO_D(0x1c134, D_BDW_PLUS);
-
- MMIO_D(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
- MMIO_D(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
- MMIO_GM(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
- MMIO_D(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
- MMIO_D(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
- MMIO_D(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
- MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK, NULL, ring_mode_mmio_write);
- MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK,
- NULL, NULL);
- MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK,
- NULL, NULL);
+ MMIO_DFH(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
+ F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x1c134, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_DFH(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
+ NULL, NULL);
+ MMIO_DFH(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
+ F_CMD_ACCESS, NULL, NULL);
+ MMIO_GM_RDR(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
+ MMIO_DFH(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
+ NULL, NULL);
+ MMIO_DFH(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
+ F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
+ F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
+ ring_mode_mmio_write);
+ MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
+ F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS,
+ F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(RING_TIMESTAMP(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
ring_timestamp_mmio_read, NULL);
- MMIO_RING_D(RING_ACTHD_UDW, D_BDW_PLUS);
+ MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
#define RING_REG(base) (base + 0xd0)
MMIO_RING_F(RING_REG, 4, F_RO, 0,
@@ -2303,13 +2458,16 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
#undef RING_REG
#define RING_REG(base) (base + 0x234)
- MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL);
- MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0, ~0LL, D_BDW_PLUS, NULL, NULL);
+ MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS,
+ NULL, NULL);
+ MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO | F_CMD_ACCESS, 0,
+ ~0LL, D_BDW_PLUS, NULL, NULL);
#undef RING_REG
#define RING_REG(base) (base + 0x244)
- MMIO_RING_D(RING_REG, D_BDW_PLUS);
- MMIO_D(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS);
+ MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS,
+ NULL, NULL);
#undef RING_REG
#define RING_REG(base) (base + 0x370)
@@ -2331,6 +2489,8 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS);
MMIO_D(0x1c054, D_BDW_PLUS);
+ MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write);
+
MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS);
MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS);
@@ -2341,14 +2501,14 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL);
#undef RING_REG
- MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
- MMIO_GM(0x1c080, D_BDW_PLUS, NULL, NULL);
+ MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL);
+ MMIO_GM_RDR(RING_HWS_PGA(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL);
MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW);
- MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW);
- MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW);
+ MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW_PLUS);
+ MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW_PLUS);
+ MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS);
MMIO_D(WM_MISC, D_BDW);
MMIO_D(BDW_EDP_PSR_BASE, D_BDW);
@@ -2362,27 +2522,31 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS);
MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS);
- MMIO_D(0xfdc, D_BDW);
- MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(GEN7_ROW_CHICKEN2, D_BDW_PLUS);
- MMIO_D(GEN8_UCGCTL6, D_BDW_PLUS);
+ MMIO_D(0xfdc, D_BDW_PLUS);
+ MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
+ MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
+ MMIO_DFH(GEN8_UCGCTL6, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(0xb1f0, D_BDW);
- MMIO_D(0xb1c0, D_BDW);
+ MMIO_DFH(0xb1f0, D_BDW, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xb1c0, D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_D(0xb100, D_BDW);
- MMIO_D(0xb10c, D_BDW);
+ MMIO_DFH(0xb100, D_BDW, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xb10c, D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0xb110, D_BDW);
- MMIO_DFH(0x24d0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x24d4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x24d8, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x24dc, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_F(0x24d0, 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS,
+ NULL, force_nonpriv_write);
+
+ MMIO_D(0x22040, D_BDW_PLUS);
+ MMIO_D(0x44484, D_BDW_PLUS);
+ MMIO_D(0x4448c, D_BDW_PLUS);
- MMIO_D(0x83a4, D_BDW);
+ MMIO_DFH(0x83a4, D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS);
- MMIO_D(0x8430, D_BDW);
+ MMIO_DFH(0x8430, D_BDW, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x110000, D_BDW_PLUS);
@@ -2394,10 +2558,19 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
- MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK, NULL, NULL);
-
- MMIO_D(0x2248, D_BDW);
-
+ MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_DFH(0x2248, D_BDW, F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_DFH(0xe220, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe230, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe240, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe260, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe270, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe280, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe2a0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe2b0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0xe2c0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
return 0;
}
@@ -2420,7 +2593,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(HSW_PWR_WELL_BIOS, D_SKL);
MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL, NULL, skl_power_well_ctl_write);
- MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL, NULL, mailbox_write);
MMIO_D(0xa210, D_SKL_PLUS);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
@@ -2578,16 +2750,16 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL);
MMIO_D(0xd08, D_SKL);
- MMIO_D(0x20e0, D_SKL);
- MMIO_D(0x20ec, D_SKL);
+ MMIO_DFH(0x20e0, D_SKL, F_MODE_MASK, NULL, NULL);
+ MMIO_DFH(0x20ec, D_SKL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
/* TRTT */
- MMIO_D(0x4de0, D_SKL);
- MMIO_D(0x4de4, D_SKL);
- MMIO_D(0x4de8, D_SKL);
- MMIO_D(0x4dec, D_SKL);
- MMIO_D(0x4df0, D_SKL);
- MMIO_DH(0x4df4, D_SKL, NULL, gen9_trtte_write);
+ MMIO_DFH(0x4de0, D_SKL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x4de4, D_SKL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x4de8, D_SKL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x4dec, D_SKL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x4df0, D_SKL, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(0x4df4, D_SKL, F_CMD_ACCESS, NULL, gen9_trtte_write);
MMIO_DH(0x4dfc, D_SKL, NULL, gen9_trtt_chicken_write);
MMIO_D(0x45008, D_SKL);
@@ -2611,7 +2783,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(0x65f08, D_SKL);
MMIO_D(0x320f0, D_SKL);
- MMIO_D(_REG_VCS2_EXCC, D_SKL);
+ MMIO_DFH(_REG_VCS2_EXCC, D_SKL, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x70034, D_SKL);
MMIO_D(0x71034, D_SKL);
MMIO_D(0x72034, D_SKL);
@@ -2624,6 +2796,9 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL);
MMIO_D(0x44500, D_SKL);
+ MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
+ MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 0f7f5d97f582..84d801638ede 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -96,10 +96,10 @@ static int gvt_dma_map_iova(struct intel_vgpu *vgpu, kvm_pfn_t pfn,
struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
dma_addr_t daddr;
- page = pfn_to_page(pfn);
- if (is_error_page(page))
+ if (unlikely(!pfn_valid(pfn)))
return -EFAULT;
+ page = pfn_to_page(pfn);
daddr = dma_map_page(dev, page, 0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, daddr))
@@ -295,10 +295,10 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev,
return 0;
return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n"
- "fence: %d\n",
- BYTES_TO_MB(type->low_gm_size),
- BYTES_TO_MB(type->high_gm_size),
- type->fence);
+ "fence: %d\nresolution: %s\n",
+ BYTES_TO_MB(type->low_gm_size),
+ BYTES_TO_MB(type->high_gm_size),
+ type->fence, vgpu_edid_str(type->resolution));
}
static MDEV_TYPE_ATTR_RO(available_instances);
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 4df078bc5d04..60b698cb8365 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -57,6 +57,58 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
(reg >= gvt->device_info.gtt_start_offset \
&& reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
+static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
+ void *p_data, unsigned int bytes, bool read)
+{
+ struct intel_gvt *gvt = NULL;
+ void *pt = NULL;
+ unsigned int offset = 0;
+
+ if (!vgpu || !p_data)
+ return;
+
+ gvt = vgpu->gvt;
+ mutex_lock(&gvt->lock);
+ offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
+ if (reg_is_mmio(gvt, offset)) {
+ if (read)
+ intel_vgpu_default_mmio_read(vgpu, offset, p_data,
+ bytes);
+ else
+ intel_vgpu_default_mmio_write(vgpu, offset, p_data,
+ bytes);
+ } else if (reg_is_gtt(gvt, offset) &&
+ vgpu->gtt.ggtt_mm->virtual_page_table) {
+ offset -= gvt->device_info.gtt_start_offset;
+ pt = vgpu->gtt.ggtt_mm->virtual_page_table + offset;
+ if (read)
+ memcpy(p_data, pt, bytes);
+ else
+ memcpy(pt, p_data, bytes);
+
+ } else if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
+ struct intel_vgpu_guest_page *gp;
+
+ /* Since we enter the failsafe mode early during guest boot,
+ * guest may not have chance to set up its ppgtt table, so
+ * there should not be any wp pages for guest. Keep the wp
+ * related code here in case we need to handle it in furture.
+ */
+ gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT);
+ if (gp) {
+ /* remove write protection to prevent furture traps */
+ intel_vgpu_clean_guest_page(vgpu, gp);
+ if (read)
+ intel_gvt_hypervisor_read_gpa(vgpu, pa,
+ p_data, bytes);
+ else
+ intel_gvt_hypervisor_write_gpa(vgpu, pa,
+ p_data, bytes);
+ }
+ }
+ mutex_unlock(&gvt->lock);
+}
+
/**
* intel_vgpu_emulate_mmio_read - emulate MMIO read
* @vgpu: a vGPU
@@ -75,6 +127,11 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
unsigned int offset = 0;
int ret = -EINVAL;
+
+ if (vgpu->failsafe) {
+ failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true);
+ return 0;
+ }
mutex_lock(&gvt->lock);
if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
@@ -188,6 +245,11 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
u32 old_vreg = 0, old_sreg = 0;
int ret = -EINVAL;
+ if (vgpu->failsafe) {
+ failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, false);
+ return 0;
+ }
+
mutex_lock(&gvt->lock);
if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) {
@@ -236,7 +298,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
if (!mmio && !vgpu->mmio.disable_warn_untrack)
- gvt_err("vgpu%d: write untracked MMIO %x len %d val %x\n",
+ gvt_dbg_mmio("vgpu%d: write untracked MMIO %x len %d val %x\n",
vgpu->id, offset, bytes, *(u32 *)p_data);
if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
@@ -322,6 +384,8 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu)
/* set the bit 0:2(Core C-State ) to C0 */
vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
+
+ vgpu->mmio.disable_warn_untrack = false;
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index d9fb41ab7119..5d1caf9daba9 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -27,7 +27,6 @@
static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
{
- void __iomem *host_va = vgpu->gvt->opregion.opregion_va;
u8 *buf;
int i;
@@ -43,8 +42,8 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
if (!vgpu_opregion(vgpu)->va)
return -ENOMEM;
- memcpy_fromio(vgpu_opregion(vgpu)->va, host_va,
- INTEL_GVT_OPREGION_SIZE);
+ memcpy(vgpu_opregion(vgpu)->va, vgpu->gvt->opregion.opregion_va,
+ INTEL_GVT_OPREGION_SIZE);
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++)
vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index 2b3a642284b6..73f052a4f424 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -53,6 +53,14 @@ static struct render_mmio gen8_render_mmio_list[] = {
{RCS, _MMIO(0x24d4), 0, false},
{RCS, _MMIO(0x24d8), 0, false},
{RCS, _MMIO(0x24dc), 0, false},
+ {RCS, _MMIO(0x24e0), 0, false},
+ {RCS, _MMIO(0x24e4), 0, false},
+ {RCS, _MMIO(0x24e8), 0, false},
+ {RCS, _MMIO(0x24ec), 0, false},
+ {RCS, _MMIO(0x24f0), 0, false},
+ {RCS, _MMIO(0x24f4), 0, false},
+ {RCS, _MMIO(0x24f8), 0, false},
+ {RCS, _MMIO(0x24fc), 0, false},
{RCS, _MMIO(0x7004), 0xffff, true},
{RCS, _MMIO(0x7008), 0xffff, true},
{RCS, _MMIO(0x7000), 0xffff, true},
@@ -76,6 +84,14 @@ static struct render_mmio gen9_render_mmio_list[] = {
{RCS, _MMIO(0x24d4), 0, false},
{RCS, _MMIO(0x24d8), 0, false},
{RCS, _MMIO(0x24dc), 0, false},
+ {RCS, _MMIO(0x24e0), 0, false},
+ {RCS, _MMIO(0x24e4), 0, false},
+ {RCS, _MMIO(0x24e8), 0, false},
+ {RCS, _MMIO(0x24ec), 0, false},
+ {RCS, _MMIO(0x24f0), 0, false},
+ {RCS, _MMIO(0x24f4), 0, false},
+ {RCS, _MMIO(0x24f8), 0, false},
+ {RCS, _MMIO(0x24fc), 0, false},
{RCS, _MMIO(0x7004), 0xffff, true},
{RCS, _MMIO(0x7008), 0xffff, true},
{RCS, _MMIO(0x7000), 0xffff, true},
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index d6b6d0efdd1a..811a84bdbafb 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -130,15 +130,16 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
static int shadow_context_status_change(struct notifier_block *nb,
unsigned long action, void *data)
{
- struct intel_vgpu *vgpu = container_of(nb,
- struct intel_vgpu, shadow_ctx_notifier_block);
- struct drm_i915_gem_request *req =
- (struct drm_i915_gem_request *)data;
- struct intel_gvt_workload_scheduler *scheduler =
- &vgpu->gvt->scheduler;
+ struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
+ struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
+ shadow_ctx_notifier_block[req->engine->id]);
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload =
scheduler->current_workload[req->engine->id];
+ if (unlikely(!workload))
+ return NOTIFY_OK;
+
switch (action) {
case INTEL_CONTEXT_SCHEDULE_IN:
intel_gvt_load_render_mmio(workload->vgpu,
@@ -148,6 +149,15 @@ static int shadow_context_status_change(struct notifier_block *nb,
case INTEL_CONTEXT_SCHEDULE_OUT:
intel_gvt_restore_render_mmio(workload->vgpu,
workload->ring_id);
+ /* If the status is -EINPROGRESS means this workload
+ * doesn't meet any issue during dispatching so when
+ * get the SCHEDULE_OUT set the status to be zero for
+ * good. If the status is NOT -EINPROGRESS means there
+ * is something wrong happened during dispatching and
+ * the status should not be set to zero
+ */
+ if (workload->status == -EINPROGRESS)
+ workload->status = 0;
atomic_set(&workload->shadow_ctx_active, 0);
break;
default:
@@ -214,7 +224,7 @@ out:
workload->status = ret;
if (!IS_ERR_OR_NULL(rq))
- i915_add_request_no_flush(rq);
+ i915_add_request(rq);
mutex_unlock(&dev_priv->drm.struct_mutex);
return ret;
}
@@ -359,15 +369,23 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
workload = scheduler->current_workload[ring_id];
vgpu = workload->vgpu;
- if (!workload->status && !vgpu->resetting) {
+ /* For the workload w/ request, needs to wait for the context
+ * switch to make sure request is completed.
+ * For the workload w/o request, directly complete the workload.
+ */
+ if (workload->req) {
wait_event(workload->shadow_ctx_status_wq,
!atomic_read(&workload->shadow_ctx_active));
- update_guest_context(workload);
+ i915_gem_request_put(fetch_and_zero(&workload->req));
- for_each_set_bit(event, workload->pending_events,
- INTEL_GVT_EVENT_MAX)
- intel_vgpu_trigger_virtual_event(vgpu, event);
+ if (!workload->status && !vgpu->resetting) {
+ update_guest_context(workload);
+
+ for_each_set_bit(event, workload->pending_events,
+ INTEL_GVT_EVENT_MAX)
+ intel_vgpu_trigger_virtual_event(vgpu, event);
+ }
}
gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -397,7 +415,6 @@ static int workload_thread(void *priv)
int ring_id = p->ring_id;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL;
- long lret;
int ret;
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
@@ -446,23 +463,24 @@ static int workload_thread(void *priv)
gvt_dbg_sched("ring id %d wait workload %p\n",
workload->ring_id, workload);
-
- lret = i915_wait_request(workload->req,
+retry:
+ i915_wait_request(workload->req,
0, MAX_SCHEDULE_TIMEOUT);
- if (lret < 0) {
- workload->status = lret;
- gvt_err("fail to wait workload, skip\n");
- } else {
- workload->status = 0;
+ /* I915 has replay mechanism and a request will be replayed
+ * if there is i915 reset. So the seqno will be updated anyway.
+ * If the seqno is not updated yet after waiting, which means
+ * the replay may still be in progress and we can wait again.
+ */
+ if (!i915_gem_request_completed(workload->req)) {
+ gvt_dbg_sched("workload %p not completed, wait again\n",
+ workload);
+ goto retry;
}
complete:
gvt_dbg_sched("will complete workload %p, status: %d\n",
workload, workload->status);
- if (workload->req)
- i915_gem_request_put(fetch_and_zero(&workload->req));
-
complete_current_workload(gvt, ring_id);
if (need_force_wake)
@@ -493,15 +511,16 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
- int i;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id i;
gvt_dbg_core("clean workload scheduler\n");
- for (i = 0; i < I915_NUM_ENGINES; i++) {
- if (scheduler->thread[i]) {
- kthread_stop(scheduler->thread[i]);
- scheduler->thread[i] = NULL;
- }
+ for_each_engine(engine, gvt->dev_priv, i) {
+ atomic_notifier_chain_unregister(
+ &engine->context_status_notifier,
+ &gvt->shadow_ctx_notifier_block[i]);
+ kthread_stop(scheduler->thread[i]);
}
}
@@ -509,18 +528,15 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct workload_thread_param *param = NULL;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id i;
int ret;
- int i;
gvt_dbg_core("init workload scheduler\n");
init_waitqueue_head(&scheduler->workload_complete_wq);
- for (i = 0; i < I915_NUM_ENGINES; i++) {
- /* check ring mask at init time */
- if (!HAS_ENGINE(gvt->dev_priv, i))
- continue;
-
+ for_each_engine(engine, gvt->dev_priv, i) {
init_waitqueue_head(&scheduler->waitq[i]);
param = kzalloc(sizeof(*param), GFP_KERNEL);
@@ -539,6 +555,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
ret = PTR_ERR(scheduler->thread[i]);
goto err;
}
+
+ gvt->shadow_ctx_notifier_block[i].notifier_call =
+ shadow_context_status_change;
+ atomic_notifier_chain_register(&engine->context_status_notifier,
+ &gvt->shadow_ctx_notifier_block[i]);
}
return 0;
err:
@@ -550,9 +571,6 @@ err:
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
{
- atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier,
- &vgpu->shadow_ctx_notifier_block);
-
i915_gem_context_put_unlocked(vgpu->shadow_ctx);
}
@@ -567,10 +585,5 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)
vgpu->shadow_ctx->engine[RCS].initialised = true;
- vgpu->shadow_ctx_notifier_block.notifier_call =
- shadow_context_status_change;
-
- atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier,
- &vgpu->shadow_ctx_notifier_block);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 95a97aa0051e..41cfa5ccae84 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -64,6 +64,20 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
}
+static struct {
+ unsigned int low_mm;
+ unsigned int high_mm;
+ unsigned int fence;
+ enum intel_vgpu_edid edid;
+ char *name;
+} vgpu_types[] = {
+/* Fixed vGPU type table */
+ { MB_TO_BYTES(64), MB_TO_BYTES(512), 4, GVT_EDID_1024_768, "8" },
+ { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" },
+ { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" },
+ { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" },
+};
+
/**
* intel_gvt_init_vgpu_types - initialize vGPU type list
* @gvt : GVT device
@@ -78,9 +92,8 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
unsigned int min_low;
/* vGPU type name is defined as GVTg_Vx_y which contains
- * physical GPU generation type and 'y' means maximum vGPU
- * instances user can create on one physical GPU for this
- * type.
+ * physical GPU generation type (e.g V4 as BDW server, V5 as
+ * SKL server).
*
* Depend on physical SKU resource, might see vGPU types like
* GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
@@ -92,7 +105,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
*/
low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE;
high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE;
- num_types = 4;
+ num_types = sizeof(vgpu_types) / sizeof(vgpu_types[0]);
gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type),
GFP_KERNEL);
@@ -101,28 +114,29 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
min_low = MB_TO_BYTES(32);
for (i = 0; i < num_types; ++i) {
- if (low_avail / min_low == 0)
+ if (low_avail / vgpu_types[i].low_mm == 0)
break;
- gvt->types[i].low_gm_size = min_low;
- gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U));
- gvt->types[i].fence = 4;
- gvt->types[i].max_instance = min(low_avail / min_low,
- high_avail / gvt->types[i].high_gm_size);
- gvt->types[i].avail_instance = gvt->types[i].max_instance;
+
+ gvt->types[i].low_gm_size = vgpu_types[i].low_mm;
+ gvt->types[i].high_gm_size = vgpu_types[i].high_mm;
+ gvt->types[i].fence = vgpu_types[i].fence;
+ gvt->types[i].resolution = vgpu_types[i].edid;
+ gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
+ high_avail / vgpu_types[i].high_mm);
if (IS_GEN8(gvt->dev_priv))
- sprintf(gvt->types[i].name, "GVTg_V4_%u",
- gvt->types[i].max_instance);
+ sprintf(gvt->types[i].name, "GVTg_V4_%s",
+ vgpu_types[i].name);
else if (IS_GEN9(gvt->dev_priv))
- sprintf(gvt->types[i].name, "GVTg_V5_%u",
- gvt->types[i].max_instance);
+ sprintf(gvt->types[i].name, "GVTg_V5_%s",
+ vgpu_types[i].name);
- min_low <<= 1;
- gvt_dbg_core("type[%d]: %s max %u avail %u low %u high %u fence %u\n",
- i, gvt->types[i].name, gvt->types[i].max_instance,
+ gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u res %s\n",
+ i, gvt->types[i].name,
gvt->types[i].avail_instance,
gvt->types[i].low_gm_size,
- gvt->types[i].high_gm_size, gvt->types[i].fence);
+ gvt->types[i].high_gm_size, gvt->types[i].fence,
+ vgpu_edid_str(gvt->types[i].resolution));
}
gvt->num_types = i;
@@ -138,7 +152,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
{
int i;
unsigned int low_gm_avail, high_gm_avail, fence_avail;
- unsigned int low_gm_min, high_gm_min, fence_min, total_min;
+ unsigned int low_gm_min, high_gm_min, fence_min;
/* Need to depend on maxium hw resource size but keep on
* static config for now.
@@ -154,12 +168,11 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
fence_min = fence_avail / gvt->types[i].fence;
- total_min = min(min(low_gm_min, high_gm_min), fence_min);
- gvt->types[i].avail_instance = min(gvt->types[i].max_instance,
- total_min);
+ gvt->types[i].avail_instance = min(min(low_gm_min, high_gm_min),
+ fence_min);
- gvt_dbg_core("update type[%d]: %s max %u avail %u low %u high %u fence %u\n",
- i, gvt->types[i].name, gvt->types[i].max_instance,
+ gvt_dbg_core("update type[%d]: %s avail %u low %u high %u fence %u\n",
+ i, gvt->types[i].name,
gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
gvt->types[i].high_gm_size, gvt->types[i].fence);
}
@@ -248,7 +261,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_detach_hypervisor_vgpu;
- ret = intel_vgpu_init_display(vgpu);
+ ret = intel_vgpu_init_display(vgpu, param->resolution);
if (ret)
goto out_clean_gtt;
@@ -312,6 +325,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
param.low_gm_sz = type->low_gm_size;
param.high_gm_sz = type->high_gm_size;
param.fence_sz = type->fence;
+ param.resolution = type->resolution;
/* XXX current param based on MB */
param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
@@ -387,8 +401,12 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
populate_pvinfo_page(vgpu);
intel_vgpu_reset_display(vgpu);
- if (dmlr)
+ if (dmlr) {
intel_vgpu_reset_cfg_space(vgpu);
+ /* only reset the failsafe mode when dmlr reset */
+ vgpu->failsafe = false;
+ vgpu->pv_notified = false;
+ }
}
vgpu->resetting = false;
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 21b1cd917d81..7af100f84410 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1279,11 +1279,17 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
* space. Parsing should be faster in some cases this way.
*/
batch_end = cmd + (batch_len / sizeof(*batch_end));
- while (cmd < batch_end) {
+ do {
u32 length;
- if (*cmd == MI_BATCH_BUFFER_END)
+ if (*cmd == MI_BATCH_BUFFER_END) {
+ if (needs_clflush_after) {
+ void *ptr = ptr_mask_bits(shadow_batch_obj->mm.mapping);
+ drm_clflush_virt_range(ptr,
+ (void *)(cmd + 1) - ptr);
+ }
break;
+ }
desc = find_cmd(engine, *cmd, desc, &default_desc);
if (!desc) {
@@ -1323,17 +1329,14 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
}
cmd += length;
- }
-
- if (cmd >= batch_end) {
- DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
- ret = -EINVAL;
- }
+ if (cmd >= batch_end) {
+ DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
+ ret = -EINVAL;
+ break;
+ }
+ } while (1);
- if (ret == 0 && needs_clflush_after)
- drm_clflush_virt_range(shadow_batch_obj->mm.mapping, batch_len);
i915_gem_object_unpin_map(shadow_batch_obj);
-
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 7d7244798507..47e707d83c4d 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -27,7 +27,7 @@
*/
#include <linux/debugfs.h>
-#include <linux/list_sort.h>
+#include <linux/sort.h>
#include "intel_drv.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
@@ -35,6 +35,23 @@ static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
return to_i915(node->minor->dev);
}
+static __always_inline void seq_print_param(struct seq_file *m,
+ const char *name,
+ const char *type,
+ const void *x)
+{
+ if (!__builtin_strcmp(type, "bool"))
+ seq_printf(m, "i915.%s=%s\n", name, yesno(*(const bool *)x));
+ else if (!__builtin_strcmp(type, "int"))
+ seq_printf(m, "i915.%s=%d\n", name, *(const int *)x);
+ else if (!__builtin_strcmp(type, "unsigned int"))
+ seq_printf(m, "i915.%s=%u\n", name, *(const unsigned int *)x);
+ else if (!__builtin_strcmp(type, "char *"))
+ seq_printf(m, "i915.%s=%s\n", name, *(const char **)x);
+ else
+ BUILD_BUG();
+}
+
static int i915_capabilities(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -43,10 +60,17 @@ static int i915_capabilities(struct seq_file *m, void *data)
seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv));
seq_printf(m, "platform: %s\n", intel_platform_name(info->platform));
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv));
+
#define PRINT_FLAG(x) seq_printf(m, #x ": %s\n", yesno(info->x))
DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG);
#undef PRINT_FLAG
+ kernel_param_lock(THIS_MODULE);
+#define PRINT_PARAM(T, x) seq_print_param(m, #x, #T, &i915.x);
+ I915_PARAMS_FOR_EACH(PRINT_PARAM);
+#undef PRINT_PARAM
+ kernel_param_unlock(THIS_MODULE);
+
return 0;
}
@@ -180,13 +204,12 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
}
-static int obj_rank_by_stolen(void *priv,
- struct list_head *A, struct list_head *B)
+static int obj_rank_by_stolen(const void *A, const void *B)
{
- struct drm_i915_gem_object *a =
- container_of(A, struct drm_i915_gem_object, obj_exec_link);
- struct drm_i915_gem_object *b =
- container_of(B, struct drm_i915_gem_object, obj_exec_link);
+ const struct drm_i915_gem_object *a =
+ *(const struct drm_i915_gem_object **)A;
+ const struct drm_i915_gem_object *b =
+ *(const struct drm_i915_gem_object **)B;
if (a->stolen->start < b->stolen->start)
return -1;
@@ -199,49 +222,60 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
+ struct drm_i915_gem_object **objects;
struct drm_i915_gem_object *obj;
u64 total_obj_size, total_gtt_size;
- LIST_HEAD(stolen);
- int count, ret;
+ unsigned long total, count, n;
+ int ret;
+
+ total = READ_ONCE(dev_priv->mm.object_count);
+ objects = drm_malloc_ab(total, sizeof(*objects));
+ if (!objects)
+ return -ENOMEM;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
- return ret;
+ goto out;
total_obj_size = total_gtt_size = count = 0;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_link) {
+ if (count == total)
+ break;
+
if (obj->stolen == NULL)
continue;
- list_add(&obj->obj_exec_link, &stolen);
-
+ objects[count++] = obj;
total_obj_size += obj->base.size;
total_gtt_size += i915_gem_obj_total_ggtt_size(obj);
- count++;
+
}
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_link) {
+ if (count == total)
+ break;
+
if (obj->stolen == NULL)
continue;
- list_add(&obj->obj_exec_link, &stolen);
-
+ objects[count++] = obj;
total_obj_size += obj->base.size;
- count++;
}
- list_sort(NULL, &stolen, obj_rank_by_stolen);
+
+ sort(objects, count, sizeof(*objects), obj_rank_by_stolen, NULL);
+
seq_puts(m, "Stolen:\n");
- while (!list_empty(&stolen)) {
- obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
+ for (n = 0; n < count; n++) {
seq_puts(m, " ");
- describe_obj(m, obj);
+ describe_obj(m, objects[n]);
seq_putc(m, '\n');
- list_del_init(&obj->obj_exec_link);
}
- mutex_unlock(&dev->struct_mutex);
-
- seq_printf(m, "Total %d objects, %llu bytes, %llu GTT size\n",
+ seq_printf(m, "Total %lu objects, %llu bytes, %llu GTT size\n",
count, total_obj_size, total_gtt_size);
- return 0;
+
+ mutex_unlock(&dev->struct_mutex);
+out:
+ drm_free_large(objects);
+ return ret;
}
struct file_stats {
@@ -428,7 +462,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
dpy_count, dpy_size);
seq_printf(m, "%llu [%llu] gtt total\n",
- ggtt->base.total, ggtt->mappable_end - ggtt->base.start);
+ ggtt->base.total, ggtt->mappable_end);
seq_putc(m, '\n');
print_batch_pool_stats(m, dev_priv);
@@ -456,7 +490,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
mutex_lock(&dev->struct_mutex);
request = list_first_entry_or_null(&file_priv->mm.request_list,
struct drm_i915_gem_request,
- client_list);
+ client_link);
rcu_read_lock();
task = pid_task(request && request->ctx->pid ?
request->ctx->pid : file->pid,
@@ -676,14 +710,14 @@ static void i915_ring_seqno_info(struct seq_file *m,
seq_printf(m, "Current sequence (%s): %x\n",
engine->name, intel_engine_get_seqno(engine));
- spin_lock_irq(&b->lock);
+ spin_lock_irq(&b->rb_lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = rb_entry(rb, typeof(*w), node);
seq_printf(m, "Waiting (%s): %s [%d] on %x\n",
engine->name, w->tsk->comm, w->tsk->pid, w->seqno);
}
- spin_unlock_irq(&b->lock);
+ spin_unlock_irq(&b->rb_lock);
}
static int i915_gem_seqno_info(struct seq_file *m, void *data)
@@ -827,10 +861,22 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
I915_READ(VLV_IIR_RW));
seq_printf(m, "Display IMR:\t%08x\n",
I915_READ(VLV_IMR));
- for_each_pipe(dev_priv, pipe)
+ for_each_pipe(dev_priv, pipe) {
+ enum intel_display_power_domain power_domain;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv,
+ power_domain)) {
+ seq_printf(m, "Pipe %c power disabled\n",
+ pipe_name(pipe));
+ continue;
+ }
+
seq_printf(m, "Pipe %c stat:\t%08x\n",
pipe_name(pipe),
I915_READ(PIPESTAT(pipe)));
+ intel_display_power_put(dev_priv, power_domain);
+ }
seq_printf(m, "Master IER:\t%08x\n",
I915_READ(VLV_MASTER_IER));
@@ -928,101 +974,96 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
}
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
-
-static ssize_t
-i915_error_state_write(struct file *filp,
- const char __user *ubuf,
- size_t cnt,
- loff_t *ppos)
+static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *pos)
{
- struct i915_error_state_file_priv *error_priv = filp->private_data;
-
- DRM_DEBUG_DRIVER("Resetting error state\n");
- i915_destroy_error_state(error_priv->i915);
+ struct i915_gpu_state *error = file->private_data;
+ struct drm_i915_error_state_buf str;
+ ssize_t ret;
+ loff_t tmp;
- return cnt;
-}
-
-static int i915_error_state_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *dev_priv = inode->i_private;
- struct i915_error_state_file_priv *error_priv;
+ if (!error)
+ return 0;
- error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
- if (!error_priv)
- return -ENOMEM;
+ ret = i915_error_state_buf_init(&str, error->i915, count, *pos);
+ if (ret)
+ return ret;
- error_priv->i915 = dev_priv;
+ ret = i915_error_state_to_str(&str, error);
+ if (ret)
+ goto out;
- i915_error_state_get(&dev_priv->drm, error_priv);
+ tmp = 0;
+ ret = simple_read_from_buffer(ubuf, count, &tmp, str.buf, str.bytes);
+ if (ret < 0)
+ goto out;
- file->private_data = error_priv;
+ *pos = str.start + ret;
+out:
+ i915_error_state_buf_release(&str);
+ return ret;
+}
+static int gpu_state_release(struct inode *inode, struct file *file)
+{
+ i915_gpu_state_put(file->private_data);
return 0;
}
-static int i915_error_state_release(struct inode *inode, struct file *file)
+static int i915_gpu_info_open(struct inode *inode, struct file *file)
{
- struct i915_error_state_file_priv *error_priv = file->private_data;
+ struct i915_gpu_state *gpu;
- i915_error_state_put(error_priv);
- kfree(error_priv);
+ gpu = i915_capture_gpu_state(inode->i_private);
+ if (!gpu)
+ return -ENOMEM;
+ file->private_data = gpu;
return 0;
}
-static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
- size_t count, loff_t *pos)
+static const struct file_operations i915_gpu_info_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_gpu_info_open,
+ .read = gpu_state_read,
+ .llseek = default_llseek,
+ .release = gpu_state_release,
+};
+
+static ssize_t
+i915_error_state_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
{
- struct i915_error_state_file_priv *error_priv = file->private_data;
- struct drm_i915_error_state_buf error_str;
- loff_t tmp_pos = 0;
- ssize_t ret_count = 0;
- int ret;
+ struct i915_gpu_state *error = filp->private_data;
- ret = i915_error_state_buf_init(&error_str, error_priv->i915,
- count, *pos);
- if (ret)
- return ret;
+ if (!error)
+ return 0;
- ret = i915_error_state_to_str(&error_str, error_priv);
- if (ret)
- goto out;
+ DRM_DEBUG_DRIVER("Resetting error state\n");
+ i915_reset_error_state(error->i915);
- ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
- error_str.buf,
- error_str.bytes);
+ return cnt;
+}
- if (ret_count < 0)
- ret = ret_count;
- else
- *pos = error_str.start + ret_count;
-out:
- i915_error_state_buf_release(&error_str);
- return ret ?: ret_count;
+static int i915_error_state_open(struct inode *inode, struct file *file)
+{
+ file->private_data = i915_first_error_state(inode->i_private);
+ return 0;
}
static const struct file_operations i915_error_state_fops = {
.owner = THIS_MODULE,
.open = i915_error_state_open,
- .read = i915_error_state_read,
+ .read = gpu_state_read,
.write = i915_error_state_write,
.llseek = default_llseek,
- .release = i915_error_state_release,
+ .release = gpu_state_release,
};
-
#endif
static int
-i915_next_seqno_get(void *data, u64 *val)
-{
- struct drm_i915_private *dev_priv = data;
-
- *val = 1 + atomic_read(&dev_priv->gt.global_timeline.seqno);
- return 0;
-}
-
-static int
i915_next_seqno_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
@@ -1040,13 +1081,12 @@ i915_next_seqno_set(void *data, u64 val)
}
DEFINE_SIMPLE_ATTRIBUTE(i915_next_seqno_fops,
- i915_next_seqno_get, i915_next_seqno_set,
+ NULL, i915_next_seqno_set,
"0x%llx\n");
static int i915_frequency_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
int ret = 0;
intel_runtime_pm_get(dev_priv);
@@ -1109,10 +1149,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
}
/* RPSTAT1 is in the GT power well */
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- goto out;
-
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
reqf = I915_READ(GEN6_RPNSWREQ);
@@ -1147,7 +1183,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
cagf = intel_gpu_freq(dev_priv, cagf);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- mutex_unlock(&dev->struct_mutex);
if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
pm_ier = I915_READ(GEN6_PMIER);
@@ -1164,7 +1199,8 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
}
seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
- seq_printf(m, "pm_intr_keep: 0x%08x\n", dev_priv->rps.pm_intr_keep);
+ seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
+ dev_priv->rps.pm_intrmsk_mbz);
seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
seq_printf(m, "Render p-state ratio: %d\n",
(gt_perf_status & (IS_GEN9(dev_priv) ? 0x1ff00 : 0xff00)) >> 8);
@@ -1198,21 +1234,18 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
rp_state_cap >> 16) & 0xff;
- max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
- GEN9_FREQ_SCALER : 1);
+ max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
max_freq = (rp_state_cap & 0xff00) >> 8;
- max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
- GEN9_FREQ_SCALER : 1);
+ max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
rp_state_cap >> 0) & 0xff;
- max_freq *= (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
- GEN9_FREQ_SCALER : 1);
+ max_freq *= (IS_GEN9_BC(dev_priv) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
seq_printf(m, "Max overclocked frequency: %dMHz\n",
@@ -1236,11 +1269,10 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_puts(m, "no P-state info available\n");
}
- seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk_freq);
+ seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
-out:
intel_runtime_pm_put(dev_priv);
return ret;
}
@@ -1283,16 +1315,18 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
enum intel_engine_id id;
if (test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
- seq_printf(m, "Wedged\n");
- if (test_bit(I915_RESET_IN_PROGRESS, &dev_priv->gpu_error.flags))
- seq_printf(m, "Reset in progress\n");
+ seq_puts(m, "Wedged\n");
+ if (test_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags))
+ seq_puts(m, "Reset in progress: struct_mutex backoff\n");
+ if (test_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags))
+ seq_puts(m, "Reset in progress: reset handoff to waiter\n");
if (waitqueue_active(&dev_priv->gpu_error.wait_queue))
- seq_printf(m, "Waiter holding struct mutex\n");
+ seq_puts(m, "Waiter holding struct mutex\n");
if (waitqueue_active(&dev_priv->gpu_error.reset_queue))
- seq_printf(m, "struct_mutex blocked for reset\n");
+ seq_puts(m, "struct_mutex blocked for reset\n");
if (!i915.enable_hangcheck) {
- seq_printf(m, "Hangcheck disabled\n");
+ seq_puts(m, "Hangcheck disabled\n");
return 0;
}
@@ -1307,35 +1341,40 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
intel_runtime_pm_put(dev_priv);
- if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work)) {
- seq_printf(m, "Hangcheck active, fires in %dms\n",
+ if (timer_pending(&dev_priv->gpu_error.hangcheck_work.timer))
+ seq_printf(m, "Hangcheck active, timer fires in %dms\n",
jiffies_to_msecs(dev_priv->gpu_error.hangcheck_work.timer.expires -
jiffies));
- } else
- seq_printf(m, "Hangcheck inactive\n");
+ else if (delayed_work_pending(&dev_priv->gpu_error.hangcheck_work))
+ seq_puts(m, "Hangcheck active, work pending\n");
+ else
+ seq_puts(m, "Hangcheck inactive\n");
+
+ seq_printf(m, "GT active? %s\n", yesno(dev_priv->gt.awake));
for_each_engine(engine, dev_priv, id) {
struct intel_breadcrumbs *b = &engine->breadcrumbs;
struct rb_node *rb;
seq_printf(m, "%s:\n", engine->name);
- seq_printf(m, "\tseqno = %x [current %x, last %x]\n",
+ seq_printf(m, "\tseqno = %x [current %x, last %x], inflight %d\n",
engine->hangcheck.seqno, seqno[id],
- intel_engine_last_submit(engine));
+ intel_engine_last_submit(engine),
+ engine->timeline->inflight_seqnos);
seq_printf(m, "\twaiters? %s, fake irq active? %s, stalled? %s\n",
yesno(intel_engine_has_waiter(engine)),
yesno(test_bit(engine->id,
&dev_priv->gpu_error.missed_irq_rings)),
yesno(engine->hangcheck.stalled));
- spin_lock_irq(&b->lock);
+ spin_lock_irq(&b->rb_lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = rb_entry(rb, typeof(*w), node);
seq_printf(m, "\t%s [%d] waiting for %x\n",
w->tsk->comm, w->tsk->pid, w->seqno);
}
- spin_unlock_irq(&b->lock);
+ spin_unlock_irq(&b->rb_lock);
seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
(long long)engine->hangcheck.acthd,
@@ -1367,14 +1406,10 @@ static int ironlake_drpc_info(struct seq_file *m)
u32 rgvmodectl, rstdbyctl;
u16 crstandvid;
- intel_runtime_pm_get(dev_priv);
-
rgvmodectl = I915_READ(MEMMODECTL);
rstdbyctl = I915_READ(RSTDBYCTL);
crstandvid = I915_READ16(CRSTANDVID);
- intel_runtime_pm_put(dev_priv);
-
seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
seq_printf(m, "Boost freq: %d\n",
(rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
@@ -1438,19 +1473,26 @@ static int i915_forcewake_domains(struct seq_file *m, void *data)
return 0;
}
+static void print_rc6_res(struct seq_file *m,
+ const char *title,
+ const i915_reg_t reg)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+
+ seq_printf(m, "%s %u (%llu us)\n",
+ title, I915_READ(reg),
+ intel_rc6_residency_us(dev_priv, reg));
+}
+
static int vlv_drpc_info(struct seq_file *m)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
u32 rpmodectl1, rcctl1, pw_status;
- intel_runtime_pm_get(dev_priv);
-
pw_status = I915_READ(VLV_GTLC_PW_STATUS);
rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
rcctl1 = I915_READ(GEN6_RC_CONTROL);
- intel_runtime_pm_put(dev_priv);
-
seq_printf(m, "Video Turbo Mode: %s\n",
yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
seq_printf(m, "Turbo enabled: %s\n",
@@ -1468,10 +1510,8 @@ static int vlv_drpc_info(struct seq_file *m)
seq_printf(m, "Media Power Well: %s\n",
(pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
- seq_printf(m, "Render RC6 residency since boot: %u\n",
- I915_READ(VLV_GT_RENDER_RC6));
- seq_printf(m, "Media RC6 residency since boot: %u\n",
- I915_READ(VLV_GT_MEDIA_RC6));
+ print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
+ print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
return i915_forcewake_domains(m, NULL);
}
@@ -1479,21 +1519,12 @@ static int vlv_drpc_info(struct seq_file *m)
static int gen6_drpc_info(struct seq_file *m)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
unsigned forcewake_count;
- int count = 0, ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
- intel_runtime_pm_get(dev_priv);
-
- spin_lock_irq(&dev_priv->uncore.lock);
- forcewake_count = dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count;
- spin_unlock_irq(&dev_priv->uncore.lock);
+ int count = 0;
+ forcewake_count = READ_ONCE(dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count);
if (forcewake_count) {
seq_puts(m, "RC information inaccurate because somebody "
"holds a forcewake reference \n");
@@ -1513,13 +1544,11 @@ static int gen6_drpc_info(struct seq_file *m)
gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
}
- mutex_unlock(&dev->struct_mutex);
+
mutex_lock(&dev_priv->rps.hw_lock);
sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
mutex_unlock(&dev_priv->rps.hw_lock);
- intel_runtime_pm_put(dev_priv);
-
seq_printf(m, "Video Turbo Mode: %s\n",
yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
seq_printf(m, "HW control enabled: %s\n",
@@ -1575,14 +1604,11 @@ static int gen6_drpc_info(struct seq_file *m)
}
/* Not exactly sure what this is */
- seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
- I915_READ(GEN6_GT_GFX_RC6_LOCKED));
- seq_printf(m, "RC6 residency since boot: %u\n",
- I915_READ(GEN6_GT_GFX_RC6));
- seq_printf(m, "RC6+ residency since boot: %u\n",
- I915_READ(GEN6_GT_GFX_RC6p));
- seq_printf(m, "RC6++ residency since boot: %u\n",
- I915_READ(GEN6_GT_GFX_RC6pp));
+ print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
+ GEN6_GT_GFX_RC6_LOCKED);
+ print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
+ print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
+ print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
seq_printf(m, "RC6 voltage: %dmV\n",
GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
@@ -1596,13 +1622,20 @@ static int gen6_drpc_info(struct seq_file *m)
static int i915_drpc_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ int err;
+
+ intel_runtime_pm_get(dev_priv);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return vlv_drpc_info(m);
+ err = vlv_drpc_info(m);
else if (INTEL_GEN(dev_priv) >= 6)
- return gen6_drpc_info(m);
+ err = gen6_drpc_info(m);
else
- return ironlake_drpc_info(m);
+ err = ironlake_drpc_info(m);
+
+ intel_runtime_pm_put(dev_priv);
+
+ return err;
}
static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
@@ -1723,7 +1756,9 @@ static int i915_sr_status(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
- if (HAS_PCH_SPLIT(dev_priv))
+ if (INTEL_GEN(dev_priv) >= 9)
+ /* no global SR status; inspect per-plane WM */;
+ else if (HAS_PCH_SPLIT(dev_priv))
sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
IS_I945G(dev_priv) || IS_I945GM(dev_priv))
@@ -1788,7 +1823,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
if (ret)
goto out;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_GEN9_BC(dev_priv)) {
/* Convert GT frequency to 50 HZ units */
min_gpu_freq =
dev_priv->rps.min_freq_softlimit / GEN9_FREQ_SCALER;
@@ -1808,8 +1843,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
&ia_freq);
seq_printf(m, "%d\t\t%d\t\t\t\t%d\n",
intel_gpu_freq(dev_priv, (gpu_freq *
- (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ?
- GEN9_FREQ_SCALER : 1))),
+ (IS_GEN9_BC(dev_priv) ?
+ GEN9_FREQ_SCALER : 1))),
((ia_freq >> 0) & 0xff) * 100,
((ia_freq >> 8) & 0xff) * 100);
}
@@ -2302,10 +2337,10 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n",
rps_power_to_str(dev_priv->rps.power));
seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n",
- 100 * rpup / rpupei,
+ rpup && rpupei ? 100 * rpup / rpupei : 0,
dev_priv->rps.up_threshold);
seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n",
- 100 * rpdown / rpdownei,
+ rpdown && rpdownei ? 100 * rpdown / rpdownei : 0,
dev_priv->rps.down_threshold);
} else {
seq_puts(m, "\nRPS Autotuning inactive\n");
@@ -2351,7 +2386,9 @@ static int i915_huc_load_status_info(struct seq_file *m, void *data)
seq_printf(m, "\tRSA: offset is %d; size = %d\n",
huc_fw->rsa_offset, huc_fw->rsa_size);
+ intel_runtime_pm_get(dev_priv);
seq_printf(m, "\nHuC status 0x%08x:\n", I915_READ(HUC_STATUS2));
+ intel_runtime_pm_put(dev_priv);
return 0;
}
@@ -2383,6 +2420,8 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
seq_printf(m, "\tRSA: offset is %d; size = %d\n",
guc_fw->rsa_offset, guc_fw->rsa_size);
+ intel_runtime_pm_get(dev_priv);
+
tmp = I915_READ(GUC_STATUS);
seq_printf(m, "\nGuC status 0x%08x:\n", tmp);
@@ -2396,6 +2435,8 @@ static int i915_guc_load_status_info(struct seq_file *m, void *data)
for (i = 0; i < 16; i++)
seq_printf(m, "\t%2d: \t0x%x\n", i, I915_READ(SOFT_SCRATCH(i)));
+ intel_runtime_pm_put(dev_priv);
+
return 0;
}
@@ -2677,12 +2718,14 @@ static int i915_sink_crc(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = node_to_i915(m->private);
struct drm_device *dev = &dev_priv->drm;
struct intel_connector *connector;
+ struct drm_connector_list_iter conn_iter;
struct intel_dp *intel_dp = NULL;
int ret;
u8 crc[6];
drm_modeset_lock_all(dev);
- for_each_intel_connector(dev, connector) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
struct drm_crtc *crtc;
if (!connector->base.state->best_encoder)
@@ -2708,6 +2751,7 @@ static int i915_sink_crc(struct seq_file *m, void *data)
}
ret = -ENODEV;
out:
+ drm_connector_list_iter_end(&conn_iter);
drm_modeset_unlock_all(dev);
return ret;
}
@@ -2777,15 +2821,10 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
seq_printf(m, "%-25s %d\n", power_well->name,
power_well->count);
- for (power_domain = 0; power_domain < POWER_DOMAIN_NUM;
- power_domain++) {
- if (!(BIT(power_domain) & power_well->domains))
- continue;
-
+ for_each_power_domain(power_domain, power_well->domains)
seq_printf(m, " %-23s %d\n",
intel_display_power_domain_str(power_domain),
power_domains->domain_use_count[power_domain]);
- }
}
mutex_unlock(&power_domains->lock);
@@ -3149,9 +3188,9 @@ static int i915_display_info(struct seq_file *m, void *unused)
struct drm_device *dev = &dev_priv->drm;
struct intel_crtc *crtc;
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
intel_runtime_pm_get(dev_priv);
- drm_modeset_lock_all(dev);
seq_printf(m, "CRTC info\n");
seq_printf(m, "---------\n");
for_each_intel_crtc(dev, crtc) {
@@ -3159,6 +3198,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
struct intel_crtc_state *pipe_config;
int x, y;
+ drm_modeset_lock(&crtc->base.mutex, NULL);
pipe_config = to_intel_crtc_state(crtc->base.state);
seq_printf(m, "CRTC %d: pipe: %c, active=%s, (size=%dx%d), dither=%s, bpp=%d\n",
@@ -3183,15 +3223,19 @@ static int i915_display_info(struct seq_file *m, void *unused)
seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s \n",
yesno(!crtc->cpu_fifo_underrun_disabled),
yesno(!crtc->pch_fifo_underrun_disabled));
+ drm_modeset_unlock(&crtc->base.mutex);
}
seq_printf(m, "\n");
seq_printf(m, "Connector info\n");
seq_printf(m, "--------------\n");
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ mutex_lock(&dev->mode_config.mutex);
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter)
intel_connector_info(m, connector);
- }
- drm_modeset_unlock_all(dev);
+ drm_connector_list_iter_end(&conn_iter);
+ mutex_unlock(&dev->mode_config.mutex);
+
intel_runtime_pm_put(dev_priv);
return 0;
@@ -3205,6 +3249,11 @@ static int i915_engine_info(struct seq_file *m, void *unused)
intel_runtime_pm_get(dev_priv);
+ seq_printf(m, "GT awake? %s\n",
+ yesno(dev_priv->gt.awake));
+ seq_printf(m, "Global active requests: %d\n",
+ dev_priv->gt.active_requests);
+
for_each_engine(engine, dev_priv, id) {
struct intel_breadcrumbs *b = &engine->breadcrumbs;
struct drm_i915_gem_request *rq;
@@ -3212,11 +3261,12 @@ static int i915_engine_info(struct seq_file *m, void *unused)
u64 addr;
seq_printf(m, "%s\n", engine->name);
- seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms]\n",
+ seq_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n",
intel_engine_get_seqno(engine),
intel_engine_last_submit(engine),
engine->hangcheck.seqno,
- jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
+ jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp),
+ engine->timeline->inflight_seqnos);
rcu_read_lock();
@@ -3294,15 +3344,21 @@ static int i915_engine_info(struct seq_file *m, void *unused)
rcu_read_lock();
rq = READ_ONCE(engine->execlist_port[0].request);
- if (rq)
- print_request(m, rq, "\t\tELSP[0] ");
- else
+ if (rq) {
+ seq_printf(m, "\t\tELSP[0] count=%d, ",
+ engine->execlist_port[0].count);
+ print_request(m, rq, "rq: ");
+ } else {
seq_printf(m, "\t\tELSP[0] idle\n");
+ }
rq = READ_ONCE(engine->execlist_port[1].request);
- if (rq)
- print_request(m, rq, "\t\tELSP[1] ");
- else
+ if (rq) {
+ seq_printf(m, "\t\tELSP[1] count=%d, ",
+ engine->execlist_port[1].count);
+ print_request(m, rq, "rq: ");
+ } else {
seq_printf(m, "\t\tELSP[1] idle\n");
+ }
rcu_read_unlock();
spin_lock_irq(&engine->timeline->lock);
@@ -3320,14 +3376,14 @@ static int i915_engine_info(struct seq_file *m, void *unused)
I915_READ(RING_PP_DIR_DCLV(engine)));
}
- spin_lock_irq(&b->lock);
+ spin_lock_irq(&b->rb_lock);
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
struct intel_wait *w = rb_entry(rb, typeof(*w), node);
seq_printf(m, "\t%s [%d] waiting for %x\n",
w->tsk->comm, w->tsk->pid, w->seqno);
}
- spin_unlock_irq(&b->lock);
+ spin_unlock_irq(&b->rb_lock);
seq_puts(m, "\n");
}
@@ -3512,13 +3568,16 @@ static void drrs_status_per_crtc(struct seq_file *m,
struct i915_drrs *drrs = &dev_priv->drrs;
int vrefresh = 0;
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
- drm_for_each_connector(connector, dev) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->state->crtc != &intel_crtc->base)
continue;
seq_printf(m, "%s:\n", connector->name);
}
+ drm_connector_list_iter_end(&conn_iter);
if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
seq_puts(m, "\tVBT: DRRS_type: Static");
@@ -3604,9 +3663,10 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
struct intel_encoder *intel_encoder;
struct intel_digital_port *intel_dig_port;
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
- drm_modeset_lock_all(dev);
- drm_for_each_connector(connector, dev) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
continue;
@@ -3622,7 +3682,8 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
port_name(intel_dig_port->port));
drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
}
- drm_modeset_unlock_all(dev);
+ drm_connector_list_iter_end(&conn_iter);
+
return 0;
}
@@ -3634,14 +3695,12 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
int status = 0;
struct drm_device *dev;
struct drm_connector *connector;
- struct list_head *connector_list;
+ struct drm_connector_list_iter conn_iter;
struct intel_dp *intel_dp;
int val = 0;
dev = ((struct seq_file *)file->private_data)->private;
- connector_list = &dev->mode_config.connector_list;
-
if (len == 0)
return 0;
@@ -3657,7 +3716,8 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
input_buffer[len] = '\0';
DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
- list_for_each_entry(connector, connector_list, head) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort)
continue;
@@ -3667,7 +3727,7 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
intel_dp = enc_to_intel_dp(connector->encoder);
status = kstrtoint(input_buffer, 10, &val);
if (status < 0)
- goto out;
+ break;
DRM_DEBUG_DRIVER("Got %d for test active\n", val);
/* To prevent erroneous activation of the compliance
* testing code, only accept an actual value of 1 here
@@ -3678,6 +3738,7 @@ static ssize_t i915_displayport_test_active_write(struct file *file,
intel_dp->compliance.test_active = 0;
}
}
+ drm_connector_list_iter_end(&conn_iter);
out:
kfree(input_buffer);
if (status < 0)
@@ -3691,10 +3752,11 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data)
{
struct drm_device *dev = m->private;
struct drm_connector *connector;
- struct list_head *connector_list = &dev->mode_config.connector_list;
+ struct drm_connector_list_iter conn_iter;
struct intel_dp *intel_dp;
- list_for_each_entry(connector, connector_list, head) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort)
continue;
@@ -3709,6 +3771,7 @@ static int i915_displayport_test_active_show(struct seq_file *m, void *data)
} else
seq_puts(m, "0");
}
+ drm_connector_list_iter_end(&conn_iter);
return 0;
}
@@ -3735,10 +3798,11 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
{
struct drm_device *dev = m->private;
struct drm_connector *connector;
- struct list_head *connector_list = &dev->mode_config.connector_list;
+ struct drm_connector_list_iter conn_iter;
struct intel_dp *intel_dp;
- list_for_each_entry(connector, connector_list, head) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort)
continue;
@@ -3746,10 +3810,23 @@ static int i915_displayport_test_data_show(struct seq_file *m, void *data)
if (connector->status == connector_status_connected &&
connector->encoder != NULL) {
intel_dp = enc_to_intel_dp(connector->encoder);
- seq_printf(m, "%lx", intel_dp->compliance.test_data.edid);
+ if (intel_dp->compliance.test_type ==
+ DP_TEST_LINK_EDID_READ)
+ seq_printf(m, "%lx",
+ intel_dp->compliance.test_data.edid);
+ else if (intel_dp->compliance.test_type ==
+ DP_TEST_LINK_VIDEO_PATTERN) {
+ seq_printf(m, "hdisplay: %d\n",
+ intel_dp->compliance.test_data.hdisplay);
+ seq_printf(m, "vdisplay: %d\n",
+ intel_dp->compliance.test_data.vdisplay);
+ seq_printf(m, "bpc: %u\n",
+ intel_dp->compliance.test_data.bpc);
+ }
} else
seq_puts(m, "0");
}
+ drm_connector_list_iter_end(&conn_iter);
return 0;
}
@@ -3774,10 +3851,11 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
{
struct drm_device *dev = m->private;
struct drm_connector *connector;
- struct list_head *connector_list = &dev->mode_config.connector_list;
+ struct drm_connector_list_iter conn_iter;
struct intel_dp *intel_dp;
- list_for_each_entry(connector, connector_list, head) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->connector_type !=
DRM_MODE_CONNECTOR_DisplayPort)
continue;
@@ -3789,6 +3867,7 @@ static int i915_displayport_test_type_show(struct seq_file *m, void *data)
} else
seq_puts(m, "0");
}
+ drm_connector_list_iter_end(&conn_iter);
return 0;
}
@@ -4060,12 +4139,16 @@ i915_wedged_set(void *data, u64 val)
* while it is writing to 'i915_wedged'
*/
- if (i915_reset_in_progress(&dev_priv->gpu_error))
+ if (i915_reset_backoff(&dev_priv->gpu_error))
return -EAGAIN;
i915_handle_error(dev_priv, val,
"Manually setting wedged to %llu", val);
+ wait_on_bit(&dev_priv->gpu_error.flags,
+ I915_RESET_HANDOFF,
+ TASK_UNINTERRUPTIBLE);
+
return 0;
}
@@ -4074,6 +4157,41 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
"%llu\n");
static int
+fault_irq_set(struct drm_i915_private *i915,
+ unsigned long *irq,
+ unsigned long val)
+{
+ int err;
+
+ err = mutex_lock_interruptible(&i915->drm.struct_mutex);
+ if (err)
+ return err;
+
+ err = i915_gem_wait_for_idle(i915,
+ I915_WAIT_LOCKED |
+ I915_WAIT_INTERRUPTIBLE);
+ if (err)
+ goto err_unlock;
+
+ /* Retire to kick idle work */
+ i915_gem_retire_requests(i915);
+ GEM_BUG_ON(i915->gt.active_requests);
+
+ *irq = val;
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ /* Flush idle worker to disarm irq */
+ while (flush_delayed_work(&i915->gt.idle_work))
+ ;
+
+ return 0;
+
+err_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+static int
i915_ring_missed_irq_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
@@ -4085,18 +4203,9 @@ i915_ring_missed_irq_get(void *data, u64 *val)
static int
i915_ring_missed_irq_set(void *data, u64 val)
{
- struct drm_i915_private *dev_priv = data;
- struct drm_device *dev = &dev_priv->drm;
- int ret;
-
- /* Lock against concurrent debugfs callers */
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
- dev_priv->gpu_error.missed_irq_rings = val;
- mutex_unlock(&dev->struct_mutex);
+ struct drm_i915_private *i915 = data;
- return 0;
+ return fault_irq_set(i915, &i915->gpu_error.missed_irq_rings, val);
}
DEFINE_SIMPLE_ATTRIBUTE(i915_ring_missed_irq_fops,
@@ -4116,13 +4225,12 @@ i915_ring_test_irq_get(void *data, u64 *val)
static int
i915_ring_test_irq_set(void *data, u64 val)
{
- struct drm_i915_private *dev_priv = data;
+ struct drm_i915_private *i915 = data;
- val &= INTEL_INFO(dev_priv)->ring_mask;
+ val &= INTEL_INFO(i915)->ring_mask;
DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
- dev_priv->gpu_error.test_irq_rings = val;
- return 0;
+ return fault_irq_set(i915, &i915->gpu_error.test_irq_rings, val);
}
DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
@@ -4134,11 +4242,13 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_ring_test_irq_fops,
#define DROP_RETIRE 0x4
#define DROP_ACTIVE 0x8
#define DROP_FREED 0x10
+#define DROP_SHRINK_ALL 0x20
#define DROP_ALL (DROP_UNBOUND | \
DROP_BOUND | \
DROP_RETIRE | \
DROP_ACTIVE | \
- DROP_FREED)
+ DROP_FREED | \
+ DROP_SHRINK_ALL)
static int
i915_drop_caches_get(void *data, u64 *val)
{
@@ -4173,12 +4283,17 @@ i915_drop_caches_set(void *data, u64 val)
if (val & (DROP_RETIRE | DROP_ACTIVE))
i915_gem_retire_requests(dev_priv);
+ lockdep_set_current_reclaim_state(GFP_KERNEL);
if (val & DROP_BOUND)
i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
if (val & DROP_UNBOUND)
i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_UNBOUND);
+ if (val & DROP_SHRINK_ALL)
+ i915_gem_shrink_all(dev_priv);
+ lockdep_clear_current_reclaim_state();
+
unlock:
mutex_unlock(&dev->struct_mutex);
@@ -4237,7 +4352,8 @@ i915_max_freq_set(void *data, u64 val)
dev_priv->rps.max_freq_softlimit = val;
- intel_set_rps(dev_priv, val);
+ if (intel_set_rps(dev_priv, val))
+ DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4292,7 +4408,8 @@ i915_min_freq_set(void *data, u64 val)
dev_priv->rps.min_freq_softlimit = val;
- intel_set_rps(dev_priv, val);
+ if (intel_set_rps(dev_priv, val))
+ DRM_DEBUG_DRIVER("failed to update RPS to new softlimit\n");
mutex_unlock(&dev_priv->rps.hw_lock);
@@ -4418,7 +4535,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
sseu->slice_mask |= BIT(s);
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ if (IS_GEN9_BC(dev_priv))
sseu->subslice_mask =
INTEL_INFO(dev_priv)->sseu.subslice_mask;
@@ -4567,6 +4684,81 @@ static const struct file_operations i915_forcewake_fops = {
.release = i915_forcewake_release,
};
+static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ struct i915_hotplug *hotplug = &dev_priv->hotplug;
+
+ seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
+ seq_printf(m, "Detected: %s\n",
+ yesno(delayed_work_pending(&hotplug->reenable_work)));
+
+ return 0;
+}
+
+static ssize_t i915_hpd_storm_ctl_write(struct file *file,
+ const char __user *ubuf, size_t len,
+ loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ struct i915_hotplug *hotplug = &dev_priv->hotplug;
+ unsigned int new_threshold;
+ int i;
+ char *newline;
+ char tmp[16];
+
+ if (len >= sizeof(tmp))
+ return -EINVAL;
+
+ if (copy_from_user(tmp, ubuf, len))
+ return -EFAULT;
+
+ tmp[len] = '\0';
+
+ /* Strip newline, if any */
+ newline = strchr(tmp, '\n');
+ if (newline)
+ *newline = '\0';
+
+ if (strcmp(tmp, "reset") == 0)
+ new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
+ else if (kstrtouint(tmp, 10, &new_threshold) != 0)
+ return -EINVAL;
+
+ if (new_threshold > 0)
+ DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
+ new_threshold);
+ else
+ DRM_DEBUG_KMS("Disabling HPD storm detection\n");
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ hotplug->hpd_storm_threshold = new_threshold;
+ /* Reset the HPD storm stats so we don't accidentally trigger a storm */
+ for_each_hpd_pin(i)
+ hotplug->stats[i].count = 0;
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /* Re-enable hpd immediately if we were in an irq storm */
+ flush_delayed_work(&dev_priv->hotplug.reenable_work);
+
+ return len;
+}
+
+static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
+}
+
+static const struct file_operations i915_hpd_storm_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_hpd_storm_ctl_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_hpd_storm_ctl_write
+};
+
static const struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
@@ -4633,6 +4825,7 @@ static const struct i915_debugfs_files {
{"i915_gem_drop_caches", &i915_drop_caches_fops},
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
{"i915_error_state", &i915_error_state_fops},
+ {"i915_gpu_info", &i915_gpu_info_fops},
#endif
{"i915_next_seqno", &i915_next_seqno_fops},
{"i915_display_crc_ctl", &i915_display_crc_ctl_fops},
@@ -4643,7 +4836,8 @@ static const struct i915_debugfs_files {
{"i915_dp_test_data", &i915_displayport_test_data_fops},
{"i915_dp_test_type", &i915_displayport_test_type_fops},
{"i915_dp_test_active", &i915_displayport_test_active_fops},
- {"i915_guc_log_control", &i915_guc_log_control_fops}
+ {"i915_guc_log_control", &i915_guc_log_control_fops},
+ {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops}
};
int i915_debugfs_register(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 655d146e1126..03d9e45694c9 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -43,6 +43,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
@@ -248,6 +249,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_IRQ_ACTIVE:
case I915_PARAM_ALLOW_BATCHBUFFER:
case I915_PARAM_LAST_DISPATCH:
+ case I915_PARAM_HAS_EXEC_CONSTANTS:
/* Reject all old ums/dri params. */
return -ENODEV;
case I915_PARAM_CHIPSET_ID:
@@ -274,9 +276,6 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_BSD2:
value = !!dev_priv->engine[VCS2];
break;
- case I915_PARAM_HAS_EXEC_CONSTANTS:
- value = INTEL_GEN(dev_priv) >= 4;
- break;
case I915_PARAM_HAS_LLC:
value = HAS_LLC(dev_priv);
break;
@@ -318,10 +317,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
value = INTEL_INFO(dev_priv)->sseu.min_eu_in_pool;
break;
case I915_PARAM_HUC_STATUS:
- /* The register is already force-woken. We dont need
- * any rpm here
- */
+ intel_runtime_pm_get(dev_priv);
value = I915_READ(HUC_STATUS2) & HUC_FW_VERIFIED;
+ intel_runtime_pm_put(dev_priv);
break;
case I915_PARAM_MMAP_GTT_VERSION:
/* Though we've started our numbering from 1, and so class all
@@ -350,6 +348,8 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_EXEC_HANDLE_LUT:
case I915_PARAM_HAS_COHERENT_PHYS_GTT:
case I915_PARAM_HAS_EXEC_SOFTPIN:
+ case I915_PARAM_HAS_EXEC_ASYNC:
+ case I915_PARAM_HAS_EXEC_FENCE:
/* For the time being all of these are always true;
* if some supported hardware does not have one of these
* features this value needs to be provided from
@@ -567,9 +567,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (i915_inject_load_failure())
return -ENODEV;
- ret = intel_bios_init(dev_priv);
- if (ret)
- DRM_INFO("failed to find VBIOS tables\n");
+ intel_bios_init(dev_priv);
/* If we have > 1 VGA cards, then we need to arbitrate access
* to the common VGA resources.
@@ -607,8 +605,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
if (ret)
goto cleanup_irq;
- intel_huc_init(dev_priv);
- intel_guc_init(dev_priv);
+ intel_uc_init_fw(dev_priv);
ret = i915_gem_init(dev_priv);
if (ret)
@@ -756,6 +753,15 @@ out_err:
return -ENOMEM;
}
+static void i915_engines_cleanup(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, i915, id)
+ kfree(engine);
+}
+
static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
{
destroy_workqueue(dev_priv->hotplug.dp_wq);
@@ -769,10 +775,17 @@ static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
*/
static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
{
- if (IS_HSW_EARLY_SDV(dev_priv) ||
- IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
+ bool pre = false;
+
+ pre |= IS_HSW_EARLY_SDV(dev_priv);
+ pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
+ pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
+
+ if (pre) {
DRM_ERROR("This is a pre-production stepping. "
"It may not be fully functional.\n");
+ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
+ }
}
/**
@@ -808,9 +821,9 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
spin_lock_init(&dev_priv->gpu_error.lock);
mutex_init(&dev_priv->backlight_lock);
spin_lock_init(&dev_priv->uncore.lock);
+
spin_lock_init(&dev_priv->mm.object_stat_lock);
spin_lock_init(&dev_priv->mmio_flip_lock);
- spin_lock_init(&dev_priv->wm.dsparb_lock);
mutex_init(&dev_priv->sb_lock);
mutex_init(&dev_priv->modeset_restore_lock);
mutex_init(&dev_priv->av_mutex);
@@ -818,12 +831,15 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
mutex_init(&dev_priv->pps_mutex);
intel_uc_init_early(dev_priv);
-
i915_memcpy_init_early(dev_priv);
+ ret = intel_engines_init_early(dev_priv);
+ if (ret)
+ return ret;
+
ret = i915_workqueues_init(dev_priv);
if (ret < 0)
- return ret;
+ goto err_engines;
/* This must be called before any calls to HAS_PCH_* */
intel_detect_pch(dev_priv);
@@ -852,6 +868,8 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
err_workqueues:
i915_workqueues_cleanup(dev_priv);
+err_engines:
+ i915_engines_cleanup(dev_priv);
return ret;
}
@@ -864,6 +882,7 @@ static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
i915_perf_fini(dev_priv);
i915_gem_load_cleanup(dev_priv);
i915_workqueues_cleanup(dev_priv);
+ i915_engines_cleanup(dev_priv);
}
static int i915_mmio_setup(struct drm_i915_private *dev_priv)
@@ -930,6 +949,7 @@ static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
goto put_bridge;
intel_uncore_init(dev_priv);
+ i915_gem_init_mmio(dev_priv);
return 0;
@@ -967,7 +987,9 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
i915.semaphores = intel_sanitize_semaphores(dev_priv, i915.semaphores);
- DRM_DEBUG_DRIVER("use GPU sempahores? %s\n", yesno(i915.semaphores));
+ DRM_DEBUG_DRIVER("use GPU semaphores? %s\n", yesno(i915.semaphores));
+
+ intel_uc_sanitize_options(dev_priv);
}
/**
@@ -1185,11 +1207,15 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv)
*/
int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ const struct intel_device_info *match_info =
+ (struct intel_device_info *)ent->driver_data;
struct drm_i915_private *dev_priv;
int ret;
- if (i915.nuclear_pageflip)
- driver.driver_features |= DRIVER_ATOMIC;
+ /* Enable nuclear pageflip on ILK+, except vlv/chv */
+ if (!i915.nuclear_pageflip &&
+ (match_info->gen < 5 || match_info->has_gmch_display))
+ driver.driver_features &= ~DRIVER_ATOMIC;
ret = -ENOMEM;
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
@@ -1197,8 +1223,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev);
if (ret) {
DRM_DEV_ERROR(&pdev->dev, "allocation failed\n");
- kfree(dev_priv);
- return ret;
+ goto out_free;
}
dev_priv->drm.pdev = pdev;
@@ -1206,7 +1231,7 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = pci_enable_device(pdev);
if (ret)
- goto out_free_priv;
+ goto out_fini;
pci_set_drvdata(pdev, &dev_priv->drm);
@@ -1270,9 +1295,11 @@ out_runtime_pm_put:
i915_driver_cleanup_early(dev_priv);
out_pci_disable:
pci_disable_device(pdev);
-out_free_priv:
+out_fini:
i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
- drm_dev_unref(&dev_priv->drm);
+ drm_dev_fini(&dev_priv->drm);
+out_free:
+ kfree(dev_priv);
return ret;
}
@@ -1280,6 +1307,8 @@ void i915_driver_unload(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct drm_modeset_acquire_ctx ctx;
+ int ret;
intel_fbdev_fini(dev);
@@ -1288,6 +1317,24 @@ void i915_driver_unload(struct drm_device *dev)
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+ drm_modeset_acquire_init(&ctx, 0);
+ while (1) {
+ ret = drm_modeset_lock_all_ctx(dev, &ctx);
+ if (!ret)
+ ret = drm_atomic_helper_disable_all(dev, &ctx);
+
+ if (ret != -EDEADLK)
+ break;
+
+ drm_modeset_backoff(&ctx);
+ }
+
+ if (ret)
+ DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
intel_gvt_cleanup(dev_priv);
i915_driver_unregister(dev_priv);
@@ -1317,7 +1364,7 @@ void i915_driver_unload(struct drm_device *dev)
/* Free error state after interrupts are fully disabled. */
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
- i915_destroy_error_state(dev_priv);
+ i915_reset_error_state(dev_priv);
/* Flush any outstanding unpin_work. */
drain_workqueue(dev_priv->wq);
@@ -1333,8 +1380,16 @@ void i915_driver_unload(struct drm_device *dev)
i915_driver_cleanup_mmio(dev_priv);
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
+}
+
+static void i915_driver_release(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
i915_driver_cleanup_early(dev_priv);
+ drm_dev_fini(&dev_priv->drm);
+
+ kfree(dev_priv);
}
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
@@ -1366,17 +1421,14 @@ static void i915_driver_lastclose(struct drm_device *dev)
vga_switcheroo_process_delayed_switch();
}
-static void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
+static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
{
+ struct drm_i915_file_private *file_priv = file->driver_priv;
+
mutex_lock(&dev->struct_mutex);
i915_gem_context_close(dev, file);
i915_gem_release(dev, file);
mutex_unlock(&dev->struct_mutex);
-}
-
-static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
-{
- struct drm_i915_file_private *file_priv = file->driver_priv;
kfree(file_priv);
}
@@ -1455,7 +1507,7 @@ static int i915_drm_suspend(struct drm_device *dev)
opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
intel_opregion_notify_adapter(dev_priv, opregion_target_state);
- intel_uncore_forcewake_reset(dev_priv, false);
+ intel_uncore_suspend(dev_priv);
intel_opregion_unregister(dev_priv);
intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
@@ -1700,7 +1752,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
ret);
- intel_uncore_early_sanitize(dev_priv, true);
+ intel_uncore_resume_early(dev_priv);
if (IS_GEN9_LP(dev_priv)) {
if (!dev_priv->suspended_to_idle)
@@ -1716,6 +1768,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
intel_power_domains_init_hw(dev_priv, true);
+ i915_gem_sanitize(dev_priv);
+
enable_rpm_wakeref_asserts(dev_priv);
out:
@@ -1761,12 +1815,15 @@ void i915_reset(struct drm_i915_private *dev_priv)
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
+ GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
- if (!test_and_clear_bit(I915_RESET_IN_PROGRESS, &error->flags))
+ if (!test_bit(I915_RESET_HANDOFF, &error->flags))
return;
/* Clear any previous failed attempts at recovery. Time to try again. */
- __clear_bit(I915_WEDGED, &error->flags);
+ if (!i915_gem_unset_wedged(dev_priv))
+ goto wakeup;
+
error->reset_count++;
pr_notice("drm/i915: Resetting chip after gpu hang\n");
@@ -1787,7 +1844,7 @@ void i915_reset(struct drm_i915_private *dev_priv)
goto error;
}
- i915_gem_reset_finish(dev_priv);
+ i915_gem_reset(dev_priv);
intel_overlay_reset(dev_priv);
/* Ok, now get things going again... */
@@ -1812,14 +1869,18 @@ void i915_reset(struct drm_i915_private *dev_priv)
i915_queue_hangcheck(dev_priv);
-wakeup:
+finish:
+ i915_gem_reset_finish(dev_priv);
enable_irq(dev_priv->drm.irq);
- wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS);
+
+wakeup:
+ clear_bit(I915_RESET_HANDOFF, &error->flags);
+ wake_up_bit(&error->flags, I915_RESET_HANDOFF);
return;
error:
i915_gem_set_wedged(dev_priv);
- goto wakeup;
+ goto finish;
}
static int i915_pm_suspend(struct device *kdev)
@@ -2342,7 +2403,7 @@ static int intel_runtime_suspend(struct device *kdev)
return ret;
}
- intel_uncore_forcewake_reset(dev_priv, false);
+ intel_uncore_suspend(dev_priv);
enable_rpm_wakeref_asserts(dev_priv);
WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
@@ -2532,7 +2593,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
@@ -2574,10 +2635,10 @@ static struct drm_driver driver = {
*/
.driver_features =
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
- DRIVER_RENDER | DRIVER_MODESET,
+ DRIVER_RENDER | DRIVER_MODESET | DRIVER_ATOMIC,
+ .release = i915_driver_release,
.open = i915_driver_open,
.lastclose = i915_driver_lastclose,
- .preclose = i915_driver_preclose,
.postclose = i915_driver_postclose,
.set_busid = drm_pci_set_busid,
@@ -2603,3 +2664,7 @@ static struct drm_driver driver = {
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
};
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/mock_drm.c"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8df73751c367..a5947a496d0a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -79,8 +79,8 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20170123"
-#define DRIVER_TIMESTAMP 1485156432
+#define DRIVER_DATE "20170320"
+#define DRIVER_TIMESTAMP 1489994464
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -293,6 +293,7 @@ enum plane_id {
PLANE_PRIMARY,
PLANE_SPRITE0,
PLANE_SPRITE1,
+ PLANE_SPRITE2,
PLANE_CURSOR,
I915_MAX_PLANES,
};
@@ -343,6 +344,11 @@ enum intel_display_power_domain {
POWER_DOMAIN_PORT_DDI_C_LANES,
POWER_DOMAIN_PORT_DDI_D_LANES,
POWER_DOMAIN_PORT_DDI_E_LANES,
+ POWER_DOMAIN_PORT_DDI_A_IO,
+ POWER_DOMAIN_PORT_DDI_B_IO,
+ POWER_DOMAIN_PORT_DDI_C_IO,
+ POWER_DOMAIN_PORT_DDI_D_IO,
+ POWER_DOMAIN_PORT_DDI_E_IO,
POWER_DOMAIN_PORT_DSI,
POWER_DOMAIN_PORT_CRT,
POWER_DOMAIN_PORT_OTHER,
@@ -384,6 +390,8 @@ enum hpd_pin {
#define for_each_hpd_pin(__pin) \
for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)
+#define HPD_STORM_DEFAULT_THRESHOLD 5
+
struct i915_hotplug {
struct work_struct hotplug_work;
@@ -407,6 +415,8 @@ struct i915_hotplug {
struct work_struct poll_init_work;
bool poll_enabled;
+ unsigned int hpd_storm_threshold;
+
/*
* if we get a HPD irq from DP and a HPD irq from non-DP
* the non-DP HPD could block the workqueue on a mode config
@@ -479,10 +489,8 @@ struct i915_hotplug {
&(dev)->mode_config.encoder_list, \
base.head)
-#define for_each_intel_connector(dev, intel_connector) \
- list_for_each_entry(intel_connector, \
- &(dev)->mode_config.connector_list, \
- base.head)
+#define for_each_intel_connector_iter(intel_connector, iter) \
+ while ((intel_connector = to_intel_connector(drm_connector_list_iter_next(iter))))
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
@@ -494,7 +502,35 @@ struct i915_hotplug {
#define for_each_power_domain(domain, mask) \
for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
- for_each_if ((1 << (domain)) & (mask))
+ for_each_if (BIT_ULL(domain) & (mask))
+
+#define for_each_power_well(__dev_priv, __power_well) \
+ for ((__power_well) = (__dev_priv)->power_domains.power_wells; \
+ (__power_well) - (__dev_priv)->power_domains.power_wells < \
+ (__dev_priv)->power_domains.power_well_count; \
+ (__power_well)++)
+
+#define for_each_power_well_rev(__dev_priv, __power_well) \
+ for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
+ (__dev_priv)->power_domains.power_well_count - 1; \
+ (__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
+ (__power_well)--)
+
+#define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \
+ for_each_power_well(__dev_priv, __power_well) \
+ for_each_if ((__power_well)->domains & (__domain_mask))
+
+#define for_each_power_domain_well_rev(__dev_priv, __power_well, __domain_mask) \
+ for_each_power_well_rev(__dev_priv, __power_well) \
+ for_each_if ((__power_well)->domains & (__domain_mask))
+
+#define for_each_intel_plane_in_state(__state, plane, plane_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->base.dev->mode_config.num_total_plane && \
+ ((plane) = to_intel_plane((__state)->base.planes[__i].ptr), \
+ (plane_state) = to_intel_plane_state((__state)->base.planes[__i].state), 1); \
+ (__i)++) \
+ for_each_if (plane_state)
struct drm_i915_private;
struct i915_mm_struct;
@@ -600,9 +636,13 @@ struct intel_initial_plane_config;
struct intel_crtc;
struct intel_limit;
struct dpll;
+struct intel_cdclk_state;
struct drm_i915_display_funcs {
- int (*get_display_clock_speed)(struct drm_i915_private *dev_priv);
+ void (*get_cdclk)(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state);
+ void (*set_cdclk)(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *cdclk_state);
int (*get_fifo_size)(struct drm_i915_private *dev_priv, int plane);
int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
int (*compute_intermediate_wm)(struct drm_device *dev,
@@ -617,7 +657,6 @@ struct drm_i915_display_funcs {
int (*compute_global_watermarks)(struct drm_atomic_state *state);
void (*update_wm)(struct intel_crtc *crtc);
int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
- void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
bool (*get_pipe_config)(struct intel_crtc *,
@@ -636,7 +675,8 @@ struct drm_i915_display_funcs {
struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode);
void (*audio_codec_disable)(struct intel_encoder *encoder);
- void (*fdi_link_train)(struct drm_crtc *crtc);
+ void (*fdi_link_train)(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state);
void (*init_clock_gating)(struct drm_i915_private *dev_priv);
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -722,6 +762,7 @@ struct intel_uncore {
const struct intel_forcewake_range *fw_domains_table;
unsigned int fw_domains_table_entries;
+ struct notifier_block pmic_bus_access_nb;
struct intel_uncore_funcs funcs;
unsigned fifo_count;
@@ -856,6 +897,7 @@ enum intel_platform {
INTEL_BROXTON,
INTEL_KABYLAKE,
INTEL_GEMINILAKE,
+ INTEL_MAX_PLATFORMS
};
struct intel_device_info {
@@ -890,7 +932,7 @@ struct intel_device_info {
struct intel_display_error_state;
-struct drm_i915_error_state {
+struct i915_gpu_state {
struct kref ref;
struct timeval time;
struct timeval boottime;
@@ -900,16 +942,20 @@ struct drm_i915_error_state {
char error_msg[128];
bool simulated;
+ bool awake;
+ bool wakelock;
+ bool suspended;
int iommu;
u32 reset_count;
u32 suspend_count;
struct intel_device_info device_info;
+ struct i915_params params;
/* Generic register state */
u32 eir;
u32 pgtbl_er;
u32 ier;
- u32 gtier[4];
+ u32 gtier[4], ngtier;
u32 ccid;
u32 derrmr;
u32 forcewake;
@@ -923,6 +969,7 @@ struct drm_i915_error_state {
u32 gab_ctl;
u32 gfx_mode;
+ u32 nfence;
u64 fence[I915_MAX_NUM_FENCES];
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
@@ -970,6 +1017,16 @@ struct drm_i915_error_state {
u32 semaphore_mboxes[I915_NUM_ENGINES - 1];
struct intel_instdone instdone;
+ struct drm_i915_error_context {
+ char comm[TASK_COMM_LEN];
+ pid_t pid;
+ u32 handle;
+ u32 hw_id;
+ int ban_score;
+ int active;
+ int guilty;
+ } context;
+
struct drm_i915_error_object {
u64 gtt_offset;
u64 gtt_size;
@@ -1003,10 +1060,6 @@ struct drm_i915_error_state {
u32 pp_dir_base;
};
} vm_info;
-
- pid_t pid;
- char comm[TASK_COMM_LEN];
- int context_bans;
} engine[I915_NUM_ENGINES];
struct drm_i915_error_buffer {
@@ -1270,7 +1323,7 @@ struct vlv_s0ix_state {
};
struct intel_rps_ei {
- u32 cz_clock;
+ ktime_t ktime;
u32 render_c0;
u32 media_c0;
};
@@ -1285,7 +1338,7 @@ struct intel_gen6_power_mgmt {
u32 pm_iir;
/* PM interrupt bits that should never be masked */
- u32 pm_intr_keep;
+ u32 pm_intrmsk_mbz;
/* Frequencies are stored in potentially platform dependent multiples.
* In other words, *_freq needs to be multiplied by X to be interesting.
@@ -1324,7 +1377,7 @@ struct intel_gen6_power_mgmt {
unsigned boosts;
/* manual wa residency calculations */
- struct intel_rps_ei up_ei, down_ei;
+ struct intel_rps_ei ei;
/*
* Protects RPS/RC6 register access and PCU communication.
@@ -1395,7 +1448,7 @@ struct i915_power_well {
int count;
/* cached hw enabled state */
bool hw_enabled;
- unsigned long domains;
+ u64 domains;
/* unique identifier for this power well */
unsigned long id;
/*
@@ -1456,7 +1509,7 @@ struct i915_gem_mm {
struct work_struct free_work;
/** Usable portion of the GTT for GEM */
- phys_addr_t stolen_base; /* limited to low memory (32-bit) */
+ dma_addr_t stolen_base; /* limited to low memory (32-bit) */
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
@@ -1498,11 +1551,6 @@ struct drm_i915_error_state_buf {
loff_t pos;
};
-struct i915_error_state_file_priv {
- struct drm_i915_private *i915;
- struct drm_i915_error_state *error;
-};
-
#define I915_RESET_TIMEOUT (10 * HZ) /* 10s */
#define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */
@@ -1519,7 +1567,7 @@ struct i915_gpu_error {
/* For reset and error_state handling. */
spinlock_t lock;
/* Protected by the above dev->gpu_error.lock. */
- struct drm_i915_error_state *first_error;
+ struct i915_gpu_state *first_error;
unsigned long missed_irq_rings;
@@ -1547,8 +1595,33 @@ struct i915_gpu_error {
*/
unsigned long reset_count;
+ /**
+ * flags: Control various stages of the GPU reset
+ *
+ * #I915_RESET_BACKOFF - When we start a reset, we want to stop any
+ * other users acquiring the struct_mutex. To do this we set the
+ * #I915_RESET_BACKOFF bit in the error flags when we detect a reset
+ * and then check for that bit before acquiring the struct_mutex (in
+ * i915_mutex_lock_interruptible()?). I915_RESET_BACKOFF serves a
+ * secondary role in preventing two concurrent global reset attempts.
+ *
+ * #I915_RESET_HANDOFF - To perform the actual GPU reset, we need the
+ * struct_mutex. We try to acquire the struct_mutex in the reset worker,
+ * but it may be held by some long running waiter (that we cannot
+ * interrupt without causing trouble). Once we are ready to do the GPU
+ * reset, we set the I915_RESET_HANDOFF bit and wakeup any waiters. If
+ * they already hold the struct_mutex and want to participate they can
+ * inspect the bit and do the reset directly, otherwise the worker
+ * waits for the struct_mutex.
+ *
+ * #I915_WEDGED - If reset fails and we can no longer use the GPU,
+ * we set the #I915_WEDGED bit. Prior to command submission, e.g.
+ * i915_gem_request_alloc(), this bit is checked and the sequence
+ * aborted (with -EIO reported to userspace) if set.
+ */
unsigned long flags;
-#define I915_RESET_IN_PROGRESS 0
+#define I915_RESET_BACKOFF 0
+#define I915_RESET_HANDOFF 1
#define I915_WEDGED (BITS_PER_LONG - 1)
/**
@@ -2053,6 +2126,10 @@ struct i915_oa_ops {
bool (*oa_buffer_is_empty)(struct drm_i915_private *dev_priv);
};
+struct intel_cdclk_state {
+ unsigned int cdclk, vco, ref;
+};
+
struct drm_i915_private {
struct drm_device drm;
@@ -2063,8 +2140,6 @@ struct drm_i915_private {
const struct intel_device_info info;
- int relative_constants_mode;
-
void __iomem *regs;
struct intel_uncore uncore;
@@ -2157,13 +2232,7 @@ struct drm_i915_private {
unsigned int fsb_freq, mem_freq, is_ddr3;
unsigned int skl_preferred_vco_freq;
- unsigned int cdclk_freq, max_cdclk_freq;
-
- /*
- * For reading holding any crtc lock is sufficient,
- * for writing must hold all of them.
- */
- unsigned int atomic_cdclk_freq;
+ unsigned int max_cdclk_freq;
unsigned int max_dotclk_freq;
unsigned int rawclk_freq;
@@ -2171,8 +2240,22 @@ struct drm_i915_private {
unsigned int czclk_freq;
struct {
- unsigned int vco, ref;
- } cdclk_pll;
+ /*
+ * The current logical cdclk state.
+ * See intel_atomic_state.cdclk.logical
+ *
+ * For reading holding any crtc lock is sufficient,
+ * for writing must hold all of them.
+ */
+ struct intel_cdclk_state logical;
+ /*
+ * The current actual cdclk state.
+ * See intel_atomic_state.cdclk.actual
+ */
+ struct intel_cdclk_state actual;
+ /* The current hardware cdclk state */
+ struct intel_cdclk_state hw;
+ } cdclk;
/**
* wq - Driver workqueue for GEM.
@@ -2317,9 +2400,6 @@ struct drm_i915_private {
} sagv_status;
struct {
- /* protects DSPARB registers on pre-g4x/vlv/chv */
- spinlock_t dsparb_lock;
-
/*
* Raw watermark latency values:
* in 0.1us units for WM0,
@@ -2486,6 +2566,11 @@ static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
return container_of(guc, struct drm_i915_private, guc);
}
+static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc)
+{
+ return container_of(huc, struct drm_i915_private, huc);
+}
+
/* Simple iterator over all initialised engines */
#define for_each_engine(engine__, dev_priv__, id__) \
for ((id__) = 0; \
@@ -2752,6 +2837,12 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_KBL_REVID(dev_priv, since, until) \
(IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until))
+#define GLK_REVID_A0 0x0
+#define GLK_REVID_A1 0x1
+
+#define IS_GLK_REVID(dev_priv, since, until) \
+ (IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until))
+
/*
* The genX designation typically refers to the render engine, so render
* capability related checks should use IS_GEN, while display and other checks
@@ -2767,8 +2858,9 @@ intel_info(const struct drm_i915_private *dev_priv)
#define IS_GEN8(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(7)))
#define IS_GEN9(dev_priv) (!!((dev_priv)->info.gen_mask & BIT(8)))
-#define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && INTEL_INFO(dev_priv)->is_lp)
#define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp)
+#define IS_GEN9_LP(dev_priv) (IS_GEN9(dev_priv) && IS_LP(dev_priv))
+#define IS_GEN9_BC(dev_priv) (IS_GEN9(dev_priv) && !IS_LP(dev_priv))
#define ENGINE_MASK(id) BIT(id)
#define RENDER_RING ENGINE_MASK(RCS)
@@ -2810,9 +2902,7 @@ intel_info(const struct drm_i915_private *dev_priv)
/* WaRsDisableCoarsePowerGating:skl,bxt */
#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
- (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) || \
- IS_SKL_GT3(dev_priv) || \
- IS_SKL_GT4(dev_priv))
+ (IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
/*
* dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
@@ -2952,6 +3042,9 @@ extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
+int intel_engines_init_early(struct drm_i915_private *dev_priv);
+int intel_engines_init(struct drm_i915_private *dev_priv);
+
/* intel_hotplug.c */
void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
u32 pin_mask, u32 long_mask);
@@ -2990,14 +3083,12 @@ int intel_irq_install(struct drm_i915_private *dev_priv);
void intel_irq_uninstall(struct drm_i915_private *dev_priv);
extern void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
-extern void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
- bool restore_forcewake);
extern void intel_uncore_init(struct drm_i915_private *dev_priv);
extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
extern void intel_uncore_fini(struct drm_i915_private *dev_priv);
-extern void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
- bool restore);
+extern void intel_uncore_suspend(struct drm_i915_private *dev_priv);
+extern void intel_uncore_resume_early(struct drm_i915_private *dev_priv);
const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
enum forcewake_domains domains);
@@ -3129,6 +3220,7 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+void i915_gem_sanitize(struct drm_i915_private *i915);
int i915_gem_load_init(struct drm_i915_private *dev_priv);
void i915_gem_load_cleanup(struct drm_i915_private *dev_priv);
void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
@@ -3288,9 +3380,9 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
unsigned int *needs_clflush);
int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
unsigned int *needs_clflush);
-#define CLFLUSH_BEFORE 0x1
-#define CLFLUSH_AFTER 0x2
-#define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER)
+#define CLFLUSH_BEFORE BIT(0)
+#define CLFLUSH_AFTER BIT(1)
+#define CLFLUSH_FLAGS (CLFLUSH_BEFORE | CLFLUSH_AFTER)
static inline void
i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
@@ -3320,9 +3412,14 @@ i915_gem_find_active_request(struct intel_engine_cs *engine);
void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
-static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
+static inline bool i915_reset_backoff(struct i915_gpu_error *error)
+{
+ return unlikely(test_bit(I915_RESET_BACKOFF, &error->flags));
+}
+
+static inline bool i915_reset_handoff(struct i915_gpu_error *error)
{
- return unlikely(test_bit(I915_RESET_IN_PROGRESS, &error->flags));
+ return unlikely(test_bit(I915_RESET_HANDOFF, &error->flags));
}
static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
@@ -3330,9 +3427,9 @@ static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
return unlikely(test_bit(I915_WEDGED, &error->flags));
}
-static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error)
+static inline bool i915_reset_backoff_or_wedged(struct i915_gpu_error *error)
{
- return i915_reset_in_progress(error) | i915_terminally_wedged(error);
+ return i915_reset_backoff(error) | i915_terminally_wedged(error);
}
static inline u32 i915_reset_count(struct i915_gpu_error *error)
@@ -3341,15 +3438,18 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
}
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
+void i915_gem_reset(struct drm_i915_private *dev_priv);
void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
-void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
+bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
+
+void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv);
-int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
- unsigned int flags);
+int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
+ unsigned int flags);
int __must_check i915_gem_suspend(struct drm_i915_private *dev_priv);
void i915_gem_resume(struct drm_i915_private *dev_priv);
int i915_gem_fault(struct vm_fault *vmf);
@@ -3543,7 +3643,7 @@ static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
__printf(2, 3)
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
- const struct i915_error_state_file_priv *error);
+ const struct i915_gpu_state *gpu);
int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
struct drm_i915_private *i915,
size_t count, loff_t pos);
@@ -3552,13 +3652,28 @@ static inline void i915_error_state_buf_release(
{
kfree(eb->buf);
}
+
+struct i915_gpu_state *i915_capture_gpu_state(struct drm_i915_private *i915);
void i915_capture_error_state(struct drm_i915_private *dev_priv,
u32 engine_mask,
const char *error_msg);
-void i915_error_state_get(struct drm_device *dev,
- struct i915_error_state_file_priv *error_priv);
-void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
-void i915_destroy_error_state(struct drm_i915_private *dev_priv);
+
+static inline struct i915_gpu_state *
+i915_gpu_state_get(struct i915_gpu_state *gpu)
+{
+ kref_get(&gpu->ref);
+ return gpu;
+}
+
+void __i915_gpu_state_free(struct kref *kref);
+static inline void i915_gpu_state_put(struct i915_gpu_state *gpu)
+{
+ if (gpu)
+ kref_put(&gpu->ref, __i915_gpu_state_free);
+}
+
+struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915);
+void i915_reset_error_state(struct drm_i915_private *i915);
#else
@@ -3568,7 +3683,13 @@ static inline void i915_capture_error_state(struct drm_i915_private *dev_priv,
{
}
-static inline void i915_destroy_error_state(struct drm_i915_private *dev_priv)
+static inline struct i915_gpu_state *
+i915_first_error_state(struct drm_i915_private *i915)
+{
+ return NULL;
+}
+
+static inline void i915_reset_error_state(struct drm_i915_private *i915)
{
}
@@ -3626,7 +3747,7 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
extern void intel_i2c_reset(struct drm_i915_private *dev_priv);
/* intel_bios.c */
-int intel_bios_init(struct drm_i915_private *dev_priv);
+void intel_bios_init(struct drm_i915_private *dev_priv);
bool intel_bios_is_valid_vbt(const void *buf, size_t size);
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
@@ -3708,7 +3829,7 @@ extern void i915_redisable_vga(struct drm_i915_private *dev_priv);
extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
-extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
+extern int intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
bool enable);
@@ -3724,7 +3845,6 @@ extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
extern struct intel_display_error_state *
intel_display_capture_error_state(struct drm_i915_private *dev_priv);
extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
- struct drm_i915_private *dev_priv,
struct intel_display_error_state *error);
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
@@ -3734,7 +3854,7 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
/* intel_sideband.c */
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
-void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
+int vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg);
void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val);
@@ -3790,6 +3910,8 @@ void vlv_phy_reset_lanes(struct intel_encoder *encoder);
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
+u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
+ const i915_reg_t reg);
#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
@@ -3953,14 +4075,34 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
}
static inline bool
-__i915_request_irq_complete(struct drm_i915_gem_request *req)
+__i915_request_irq_complete(const struct drm_i915_gem_request *req)
{
struct intel_engine_cs *engine = req->engine;
+ u32 seqno;
+
+ /* Note that the engine may have wrapped around the seqno, and
+ * so our request->global_seqno will be ahead of the hardware,
+ * even though it completed the request before wrapping. We catch
+ * this by kicking all the waiters before resetting the seqno
+ * in hardware, and also signal the fence.
+ */
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &req->fence.flags))
+ return true;
+
+ /* The request was dequeued before we were awoken. We check after
+ * inspecting the hw to confirm that this was the same request
+ * that generated the HWS update. The memory barriers within
+ * the request execution are sufficient to ensure that a check
+ * after reading the value from hw matches this request.
+ */
+ seqno = i915_gem_request_global_seqno(req);
+ if (!seqno)
+ return false;
/* Before we do the heavier coherent read of the seqno,
* check the value (hopefully) in the CPU cacheline.
*/
- if (__i915_gem_request_completed(req))
+ if (__i915_gem_request_completed(req, seqno))
return true;
/* Ensure our read of the seqno is coherent so that we
@@ -3975,9 +4117,8 @@ __i915_request_irq_complete(struct drm_i915_gem_request *req)
* is woken.
*/
if (engine->irq_seqno_barrier &&
- rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh) == current &&
- cmpxchg_relaxed(&engine->breadcrumbs.irq_posted, 1, 0)) {
- struct task_struct *tsk;
+ test_and_clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted)) {
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
/* The ordering of irq_posted versus applying the barrier
* is crucial. The clearing of the current irq_posted must
@@ -3999,19 +4140,18 @@ __i915_request_irq_complete(struct drm_i915_gem_request *req)
* the seqno before we believe it coherent since they see
* irq_posted == false but we are still running).
*/
- rcu_read_lock();
- tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh);
- if (tsk && tsk != current)
+ spin_lock_irq(&b->irq_lock);
+ if (b->irq_wait && b->irq_wait->tsk != current)
/* Note that if the bottom-half is changed as we
* are sending the wake-up, the new bottom-half will
* be woken by whomever made the change. We only have
* to worry about when we steal the irq-posted for
* ourself.
*/
- wake_up_process(tsk);
- rcu_read_unlock();
+ wake_up_process(b->irq_wait->tsk);
+ spin_unlock_irq(&b->irq_lock);
- if (__i915_gem_request_completed(req))
+ if (__i915_gem_request_completed(req, seqno))
return true;
}
@@ -4042,4 +4182,10 @@ int remap_io_mapping(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn, unsigned long size,
struct io_mapping *iomap);
+static inline bool i915_gem_object_is_coherent(struct drm_i915_gem_object *obj)
+{
+ return (obj->cache_level != I915_CACHE_NONE ||
+ HAS_LLC(to_i915(obj->base.dev)));
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6908123162d1..58e1db77d70e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -29,12 +29,14 @@
#include <drm/drm_vma_manager.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_gem_clflush.h"
#include "i915_vgpu.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include "intel_frontbuffer.h"
#include "intel_mocs.h"
#include <linux/dma-fence-array.h>
+#include <linux/kthread.h>
#include <linux/reservation.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
@@ -47,18 +49,12 @@ static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
-static bool cpu_cache_is_coherent(struct drm_device *dev,
- enum i915_cache_level level)
-{
- return HAS_LLC(to_i915(dev)) || level != I915_CACHE_NONE;
-}
-
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
return false;
- if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+ if (!i915_gem_object_is_coherent(obj))
return true;
return obj->pin_display;
@@ -107,16 +103,13 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
might_sleep();
- if (!i915_reset_in_progress(error))
- return 0;
-
/*
* Only wait 10 seconds for the gpu reset to complete to avoid hanging
* userspace. If it takes that long something really bad is going on and
* we should simply try to bail out and fail as gracefully as possible.
*/
ret = wait_event_interruptible_timeout(error->reset_queue,
- !i915_reset_in_progress(error),
+ !i915_reset_backoff(error),
I915_RESET_TIMEOUT);
if (ret == 0) {
DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
@@ -254,7 +247,7 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
if (needs_clflush &&
(obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
- !cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+ !i915_gem_object_is_coherent(obj))
drm_clflush_sg(pages);
obj->base.read_domains = I915_GEM_DOMAIN_CPU;
@@ -312,6 +305,8 @@ static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
.release = i915_gem_object_release_phys,
};
+static const struct drm_i915_gem_object_ops i915_gem_object_ops;
+
int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
@@ -399,7 +394,7 @@ out:
if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
i915_gem_request_retire_upto(rq);
- if (rps && rq->global_seqno == intel_engine_last_submit(rq->engine)) {
+ if (rps && i915_gem_request_global_seqno(rq) == intel_engine_last_submit(rq->engine)) {
/* The GPU is now idle and this client has stalled.
* Since no other client has submitted a request in the
* meantime, assume that this client is the only one
@@ -424,7 +419,9 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
long timeout,
struct intel_rps_client *rps)
{
+ unsigned int seq = __read_seqcount_begin(&resv->seq);
struct dma_fence *excl;
+ bool prune_fences = false;
if (flags & I915_WAIT_ALL) {
struct dma_fence **shared;
@@ -449,15 +446,31 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
for (; i < count; i++)
dma_fence_put(shared[i]);
kfree(shared);
+
+ prune_fences = count && timeout >= 0;
} else {
excl = reservation_object_get_excl_rcu(resv);
}
- if (excl && timeout >= 0)
+ if (excl && timeout >= 0) {
timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps);
+ prune_fences = timeout >= 0;
+ }
dma_fence_put(excl);
+ /* Oportunistically prune the fences iff we know they have *all* been
+ * signaled and that the reservation object has not been changed (i.e.
+ * no new fences have been added).
+ */
+ if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
+ if (reservation_object_trylock(resv)) {
+ if (!__read_seqcount_retry(&resv->seq, seq))
+ reservation_object_add_excl_fence(resv, NULL);
+ reservation_object_unlock(resv);
+ }
+ }
+
return timeout;
}
@@ -585,9 +598,18 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
if (obj->mm.pages)
return -EBUSY;
+ GEM_BUG_ON(obj->ops != &i915_gem_object_ops);
obj->ops = &i915_gem_phys_ops;
- return i915_gem_object_pin_pages(obj);
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ goto err_xfer;
+
+ return 0;
+
+err_xfer:
+ obj->ops = &i915_gem_object_ops;
+ return ret;
}
static int
@@ -608,7 +630,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
drm_clflush_virt_range(vaddr, args->size);
i915_gem_chipset_flush(to_i915(obj->base.dev));
- intel_fb_obj_flush(obj, false, ORIGIN_CPU);
+ intel_fb_obj_flush(obj, ORIGIN_CPU);
return 0;
}
@@ -763,6 +785,15 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
if (ret)
return ret;
+ if (i915_gem_object_is_coherent(obj) ||
+ !static_cpu_has(X86_FEATURE_CLFLUSH)) {
+ ret = i915_gem_object_set_to_cpu_domain(obj, false);
+ if (ret)
+ goto err_unpin;
+ else
+ goto out;
+ }
+
i915_gem_object_flush_gtt_write_domain(obj);
/* If we're not in the cpu read domain, set ourself into the gtt
@@ -771,17 +802,9 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
* anyway again before the next pread happens.
*/
if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
- *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
- obj->cache_level);
-
- if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
- ret = i915_gem_object_set_to_cpu_domain(obj, false);
- if (ret)
- goto err_unpin;
-
- *needs_clflush = 0;
- }
+ *needs_clflush = CLFLUSH_BEFORE;
+out:
/* return with the pages pinned */
return 0;
@@ -814,6 +837,15 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
if (ret)
return ret;
+ if (i915_gem_object_is_coherent(obj) ||
+ !static_cpu_has(X86_FEATURE_CLFLUSH)) {
+ ret = i915_gem_object_set_to_cpu_domain(obj, true);
+ if (ret)
+ goto err_unpin;
+ else
+ goto out;
+ }
+
i915_gem_object_flush_gtt_write_domain(obj);
/* If we're not in the cpu write domain, set ourself into the
@@ -822,26 +854,15 @@ int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
* right away and we therefore have to clflush anyway.
*/
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
- *needs_clflush |= cpu_write_needs_clflush(obj) << 1;
+ *needs_clflush |= CLFLUSH_AFTER;
/* Same trick applies to invalidate partially written cachelines read
* before writing.
*/
if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
- *needs_clflush |= !cpu_cache_is_coherent(obj->base.dev,
- obj->cache_level);
-
- if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
- ret = i915_gem_object_set_to_cpu_domain(obj, true);
- if (ret)
- goto err_unpin;
-
- *needs_clflush = 0;
- }
-
- if ((*needs_clflush & CLFLUSH_AFTER) == 0)
- obj->cache_dirty = true;
+ *needs_clflush |= CLFLUSH_BEFORE;
+out:
intel_fb_obj_invalidate(obj, ORIGIN_CPU);
obj->mm.dirty = true;
/* return with the pages pinned */
@@ -1257,7 +1278,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
user_data += page_length;
offset += page_length;
}
- intel_fb_obj_flush(obj, false, ORIGIN_CPU);
+ intel_fb_obj_flush(obj, ORIGIN_CPU);
mutex_lock(&i915->drm.struct_mutex);
out_unpin:
@@ -1393,7 +1414,7 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
offset = 0;
}
- intel_fb_obj_flush(obj, false, ORIGIN_CPU);
+ intel_fb_obj_flush(obj, ORIGIN_CPU);
i915_gem_obj_finish_shmem_access(obj);
return ret;
}
@@ -1434,6 +1455,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
trace_i915_gem_object_pwrite(obj, args->offset, args->size);
+ ret = -ENODEV;
+ if (obj->ops->pwrite)
+ ret = obj->ops->pwrite(obj, args);
+ if (ret != -ENODEV)
+ goto err;
+
ret = i915_gem_object_wait(obj,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_ALL,
@@ -1596,23 +1623,16 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_sw_finish *args = data;
struct drm_i915_gem_object *obj;
- int err = 0;
obj = i915_gem_object_lookup(file, args->handle);
if (!obj)
return -ENOENT;
/* Pinned buffers may be scanout, so flush the cache */
- if (READ_ONCE(obj->pin_display)) {
- err = i915_mutex_lock_interruptible(dev);
- if (!err) {
- i915_gem_object_flush_cpu_write_domain(obj);
- mutex_unlock(&dev->struct_mutex);
- }
- }
-
+ i915_gem_object_flush_if_display(obj);
i915_gem_object_put(obj);
- return err;
+
+ return 0;
}
/**
@@ -2119,6 +2139,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj)
*/
shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
obj->mm.madv = __I915_MADV_PURGED;
+ obj->mm.pages = ERR_PTR(-EFAULT);
}
/* Try to discard unwanted pages */
@@ -2218,22 +2239,24 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
__i915_gem_object_reset_page_iter(obj);
- obj->ops->put_pages(obj, pages);
+ if (!IS_ERR(pages))
+ obj->ops->put_pages(obj, pages);
+
unlock:
mutex_unlock(&obj->mm.lock);
}
-static void i915_sg_trim(struct sg_table *orig_st)
+static bool i915_sg_trim(struct sg_table *orig_st)
{
struct sg_table new_st;
struct scatterlist *sg, *new_sg;
unsigned int i;
if (orig_st->nents == orig_st->orig_nents)
- return;
+ return false;
if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
- return;
+ return false;
new_sg = new_st.sgl;
for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
@@ -2246,6 +2269,7 @@ static void i915_sg_trim(struct sg_table *orig_st)
sg_free_table(orig_st);
*orig_st = new_st;
+ return true;
}
static struct sg_table *
@@ -2437,7 +2461,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
if (err)
return err;
- if (unlikely(!obj->mm.pages)) {
+ if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
err = ____i915_gem_object_get_pages(obj);
if (err)
goto unlock;
@@ -2515,7 +2539,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
pinned = true;
if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
- if (unlikely(!obj->mm.pages)) {
+ if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
ret = ____i915_gem_object_get_pages(obj);
if (ret)
goto err_unlock;
@@ -2563,6 +2587,75 @@ err_unlock:
goto out_unlock;
}
+static int
+i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
+ const struct drm_i915_gem_pwrite *arg)
+{
+ struct address_space *mapping = obj->base.filp->f_mapping;
+ char __user *user_data = u64_to_user_ptr(arg->data_ptr);
+ u64 remain, offset;
+ unsigned int pg;
+
+ /* Before we instantiate/pin the backing store for our use, we
+ * can prepopulate the shmemfs filp efficiently using a write into
+ * the pagecache. We avoid the penalty of instantiating all the
+ * pages, important if the user is just writing to a few and never
+ * uses the object on the GPU, and using a direct write into shmemfs
+ * allows it to avoid the cost of retrieving a page (either swapin
+ * or clearing-before-use) before it is overwritten.
+ */
+ if (READ_ONCE(obj->mm.pages))
+ return -ENODEV;
+
+ /* Before the pages are instantiated the object is treated as being
+ * in the CPU domain. The pages will be clflushed as required before
+ * use, and we can freely write into the pages directly. If userspace
+ * races pwrite with any other operation; corruption will ensue -
+ * that is userspace's prerogative!
+ */
+
+ remain = arg->size;
+ offset = arg->offset;
+ pg = offset_in_page(offset);
+
+ do {
+ unsigned int len, unwritten;
+ struct page *page;
+ void *data, *vaddr;
+ int err;
+
+ len = PAGE_SIZE - pg;
+ if (len > remain)
+ len = remain;
+
+ err = pagecache_write_begin(obj->base.filp, mapping,
+ offset, len, 0,
+ &page, &data);
+ if (err < 0)
+ return err;
+
+ vaddr = kmap(page);
+ unwritten = copy_from_user(vaddr + pg, user_data, len);
+ kunmap(page);
+
+ err = pagecache_write_end(obj->base.filp, mapping,
+ offset, len, len - unwritten,
+ page, data);
+ if (err < 0)
+ return err;
+
+ if (unwritten)
+ return -EFAULT;
+
+ remain -= len;
+ user_data += len;
+ offset += len;
+ pg = 0;
+ } while (remain);
+
+ return 0;
+}
+
static bool ban_context(const struct i915_gem_context *ctx)
{
return (i915_gem_context_is_bannable(ctx) &&
@@ -2596,7 +2689,8 @@ static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
struct drm_i915_gem_request *
i915_gem_find_active_request(struct intel_engine_cs *engine)
{
- struct drm_i915_gem_request *request;
+ struct drm_i915_gem_request *request, *active = NULL;
+ unsigned long flags;
/* We are called by the error capture and reset at a random
* point in time. In particular, note that neither is crucially
@@ -2606,15 +2700,22 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
* extra delay for a recent interrupt is pointless. Hence, we do
* not need an engine->irq_seqno_barrier() before the seqno reads.
*/
+ spin_lock_irqsave(&engine->timeline->lock, flags);
list_for_each_entry(request, &engine->timeline->requests, link) {
- if (__i915_gem_request_completed(request))
+ if (__i915_gem_request_completed(request,
+ request->global_seqno))
continue;
GEM_BUG_ON(request->engine != engine);
- return request;
+ GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &request->fence.flags));
+
+ active = request;
+ break;
}
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
- return NULL;
+ return active;
}
static bool engine_stalled(struct intel_engine_cs *engine)
@@ -2641,7 +2742,30 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
for_each_engine(engine, dev_priv, id) {
struct drm_i915_gem_request *request;
+ /* Prevent the signaler thread from updating the request
+ * state (by calling dma_fence_signal) as we are processing
+ * the reset. The write from the GPU of the seqno is
+ * asynchronous and the signaler thread may see a different
+ * value to us and declare the request complete, even though
+ * the reset routine have picked that request as the active
+ * (incomplete) request. This conflict is not handled
+ * gracefully!
+ */
+ kthread_park(engine->breadcrumbs.signaler);
+
+ /* Prevent request submission to the hardware until we have
+ * completed the reset in i915_gem_reset_finish(). If a request
+ * is completed by one engine, it may then queue a request
+ * to a second via its engine->irq_tasklet *just* as we are
+ * calling engine->init_hw() and also writing the ELSP.
+ * Turning off the engine->irq_tasklet until the reset is over
+ * prevents the race.
+ */
tasklet_kill(&engine->irq_tasklet);
+ tasklet_disable(&engine->irq_tasklet);
+
+ if (engine->irq_seqno_barrier)
+ engine->irq_seqno_barrier(engine);
if (engine_stalled(engine)) {
request = i915_gem_find_active_request(engine);
@@ -2739,9 +2863,6 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *request;
- if (engine->irq_seqno_barrier)
- engine->irq_seqno_barrier(engine);
-
request = i915_gem_find_active_request(engine);
if (request && i915_gem_reset_request(request)) {
DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
@@ -2756,7 +2877,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine)
engine->reset_hw(engine, request);
}
-void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
+void i915_gem_reset(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -2765,8 +2886,14 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
i915_gem_retire_requests(dev_priv);
- for_each_engine(engine, dev_priv, id)
+ for_each_engine(engine, dev_priv, id) {
+ struct i915_gem_context *ctx;
+
i915_gem_reset_engine(engine);
+ ctx = fetch_and_zero(&engine->last_retired_context);
+ if (ctx)
+ engine->context_unpin(engine, ctx);
+ }
i915_gem_restore_fences(dev_priv);
@@ -2778,6 +2905,19 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
}
}
+void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ for_each_engine(engine, dev_priv, id) {
+ tasklet_enable(&engine->irq_tasklet);
+ kthread_unpark(engine->breadcrumbs.signaler);
+ }
+}
+
static void nop_submit_request(struct drm_i915_gem_request *request)
{
dma_fence_set_error(&request->fence, -EIO);
@@ -2857,6 +2997,65 @@ void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
}
+bool i915_gem_unset_wedged(struct drm_i915_private *i915)
+{
+ struct i915_gem_timeline *tl;
+ int i;
+
+ lockdep_assert_held(&i915->drm.struct_mutex);
+ if (!test_bit(I915_WEDGED, &i915->gpu_error.flags))
+ return true;
+
+ /* Before unwedging, make sure that all pending operations
+ * are flushed and errored out - we may have requests waiting upon
+ * third party fences. We marked all inflight requests as EIO, and
+ * every execbuf since returned EIO, for consistency we want all
+ * the currently pending requests to also be marked as EIO, which
+ * is done inside our nop_submit_request - and so we must wait.
+ *
+ * No more can be submitted until we reset the wedged bit.
+ */
+ list_for_each_entry(tl, &i915->gt.timelines, link) {
+ for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
+ struct drm_i915_gem_request *rq;
+
+ rq = i915_gem_active_peek(&tl->engine[i].last_request,
+ &i915->drm.struct_mutex);
+ if (!rq)
+ continue;
+
+ /* We can't use our normal waiter as we want to
+ * avoid recursively trying to handle the current
+ * reset. The basic dma_fence_default_wait() installs
+ * a callback for dma_fence_signal(), which is
+ * triggered by our nop handler (indirectly, the
+ * callback enables the signaler thread which is
+ * woken by the nop_submit_request() advancing the seqno
+ * and when the seqno passes the fence, the signaler
+ * then signals the fence waking us up).
+ */
+ if (dma_fence_default_wait(&rq->fence, true,
+ MAX_SCHEDULE_TIMEOUT) < 0)
+ return false;
+ }
+ }
+
+ /* Undo nop_submit_request. We prevent all new i915 requests from
+ * being queued (by disallowing execbuf whilst wedged) so having
+ * waited for all active requests above, we know the system is idle
+ * and do not have to worry about a thread being inside
+ * engine->submit_request() as we swap over. So unlike installing
+ * the nop_submit_request on reset, we can do this from normal
+ * context and do not require stop_machine().
+ */
+ intel_engines_reset_default_submission(i915);
+
+ smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
+ clear_bit(I915_WEDGED, &i915->gpu_error.flags);
+
+ return true;
+}
+
static void
i915_gem_retire_work_handler(struct work_struct *work)
{
@@ -2900,8 +3099,8 @@ i915_gem_idle_work_handler(struct work_struct *work)
* new request is submitted.
*/
wait_for(READ_ONCE(dev_priv->gt.active_requests) ||
- intel_execlists_idle(dev_priv), 10);
-
+ intel_engines_are_idle(dev_priv),
+ 10);
if (READ_ONCE(dev_priv->gt.active_requests))
return;
@@ -2926,11 +3125,13 @@ i915_gem_idle_work_handler(struct work_struct *work)
if (dev_priv->gt.active_requests)
goto out_unlock;
- if (wait_for(intel_execlists_idle(dev_priv), 10))
+ if (wait_for(intel_engines_are_idle(dev_priv), 10))
DRM_ERROR("Timeout waiting for engines to idle\n");
- for_each_engine(engine, dev_priv, id)
+ for_each_engine(engine, dev_priv, id) {
+ intel_engine_disarm_breadcrumbs(engine);
i915_gem_batch_pool_fini(&engine->batch_pool);
+ }
GEM_BUG_ON(!dev_priv->gt.awake);
dev_priv->gt.awake = false;
@@ -3029,6 +3230,16 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
if (args->timeout_ns < 0)
args->timeout_ns = 0;
+
+ /*
+ * Apparently ktime isn't accurate enough and occasionally has a
+ * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
+ * things up to make the test happy. We allow up to 1 jiffy.
+ *
+ * This is a regression from the timespec->ktime conversion.
+ */
+ if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
+ args->timeout_ns = 0;
}
i915_gem_object_put(obj);
@@ -3071,41 +3282,6 @@ int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
return 0;
}
-void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
- bool force)
-{
- /* If we don't have a page list set up, then we're not pinned
- * to GPU, and we can ignore the cache flush because it'll happen
- * again at bind time.
- */
- if (!obj->mm.pages)
- return;
-
- /*
- * Stolen memory is always coherent with the GPU as it is explicitly
- * marked as wc by the system, or the system is cache-coherent.
- */
- if (obj->stolen || obj->phys_handle)
- return;
-
- /* If the GPU is snooping the contents of the CPU cache,
- * we do not need to manually clear the CPU cache lines. However,
- * the caches are only snooped when the render cache is
- * flushed/invalidated. As we always have to emit invalidations
- * and flushes when moving into and out of the RENDER domain, correct
- * snooping behaviour occurs naturally as the result of our domain
- * tracking.
- */
- if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
- obj->cache_dirty = true;
- return;
- }
-
- trace_i915_gem_object_clflush(obj);
- drm_clflush_sg(obj->mm.pages);
- obj->cache_dirty = false;
-}
-
/** Flushes the GTT write domain for the object if it's dirty. */
static void
i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
@@ -3134,12 +3310,9 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
POSTING_READ(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
- intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT));
+ intel_fb_obj_flush(obj, write_origin(obj, I915_GEM_DOMAIN_GTT));
obj->base.write_domain = 0;
- trace_i915_gem_object_change_domain(obj,
- obj->base.read_domains,
- I915_GEM_DOMAIN_GTT);
}
/** Flushes the CPU write domain for the object if it's dirty. */
@@ -3149,13 +3322,27 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
return;
- i915_gem_clflush_object(obj, obj->pin_display);
- intel_fb_obj_flush(obj, false, ORIGIN_CPU);
+ i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
+ obj->base.write_domain = 0;
+}
+
+static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
+{
+ if (obj->base.write_domain != I915_GEM_DOMAIN_CPU && !obj->cache_dirty)
+ return;
+ i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
obj->base.write_domain = 0;
- trace_i915_gem_object_change_domain(obj,
- obj->base.read_domains,
- I915_GEM_DOMAIN_CPU);
+}
+
+void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
+{
+ if (!READ_ONCE(obj->pin_display))
+ return;
+
+ mutex_lock(&obj->base.dev->struct_mutex);
+ __i915_gem_object_flush_for_display(obj);
+ mutex_unlock(&obj->base.dev->struct_mutex);
}
/**
@@ -3169,7 +3356,6 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
int
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
{
- uint32_t old_write_domain, old_read_domains;
int ret;
lockdep_assert_held(&obj->base.dev->struct_mutex);
@@ -3207,9 +3393,6 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
mb();
- old_write_domain = obj->base.write_domain;
- old_read_domains = obj->base.read_domains;
-
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
@@ -3221,10 +3404,6 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
obj->mm.dirty = true;
}
- trace_i915_gem_object_change_domain(obj,
- old_read_domains,
- old_write_domain);
-
i915_gem_object_unpin_pages(obj);
return 0;
}
@@ -3349,7 +3528,7 @@ restart:
}
if (obj->base.write_domain == I915_GEM_DOMAIN_CPU &&
- cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+ i915_gem_object_is_coherent(obj))
obj->cache_dirty = true;
list_for_each_entry(vma, &obj->vma_list, obj_link)
@@ -3461,7 +3640,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view)
{
struct i915_vma *vma;
- u32 old_read_domains, old_write_domain;
int ret;
lockdep_assert_held(&obj->base.dev->struct_mutex);
@@ -3521,24 +3699,14 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
/* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
- if (obj->cache_dirty || obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
- i915_gem_clflush_object(obj, true);
- intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
- }
-
- old_write_domain = obj->base.write_domain;
- old_read_domains = obj->base.read_domains;
+ __i915_gem_object_flush_for_display(obj);
+ intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- obj->base.write_domain = 0;
obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
- trace_i915_gem_object_change_domain(obj,
- old_read_domains,
- old_write_domain);
-
return vma;
err_unpin_display:
@@ -3574,7 +3742,6 @@ i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
int
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
{
- uint32_t old_write_domain, old_read_domains;
int ret;
lockdep_assert_held(&obj->base.dev->struct_mutex);
@@ -3593,13 +3760,9 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
i915_gem_object_flush_gtt_write_domain(obj);
- old_write_domain = obj->base.write_domain;
- old_read_domains = obj->base.read_domains;
-
/* Flush the CPU cache if it's still invalid. */
if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
- i915_gem_clflush_object(obj, false);
-
+ i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
}
@@ -3616,10 +3779,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
- trace_i915_gem_object_change_domain(obj,
- old_read_domains,
- old_write_domain);
-
return 0;
}
@@ -3647,16 +3806,14 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
return -EIO;
spin_lock(&file_priv->mm.lock);
- list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
+ list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
if (time_after_eq(request->emitted_jiffies, recent_enough))
break;
- /*
- * Note that the request might not have been submitted yet.
- * In which case emitted_jiffies will be zero.
- */
- if (!request->emitted_jiffies)
- continue;
+ if (target) {
+ list_del(&target->client_link);
+ target->file_priv = NULL;
+ }
target = request;
}
@@ -3942,7 +4099,7 @@ frontbuffer_retire(struct i915_gem_active *active,
struct drm_i915_gem_object *obj =
container_of(active, typeof(*obj), frontbuffer_write);
- intel_fb_obj_flush(obj, true, ORIGIN_CS);
+ intel_fb_obj_flush(obj, ORIGIN_CS);
}
void i915_gem_object_init(struct drm_i915_gem_object *obj,
@@ -3974,8 +4131,11 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE,
+
.get_pages = i915_gem_object_get_pages_gtt,
.put_pages = i915_gem_object_put_pages_gtt,
+
+ .pwrite = i915_gem_object_pwrite_gtt,
};
struct drm_i915_gem_object *
@@ -4203,11 +4363,29 @@ static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
!i915_gem_context_is_kernel(engine->last_retired_context));
}
+void i915_gem_sanitize(struct drm_i915_private *i915)
+{
+ /*
+ * If we inherit context state from the BIOS or earlier occupants
+ * of the GPU, the GPU may be in an inconsistent state when we
+ * try to take over. The only way to remove the earlier state
+ * is by resetting. However, resetting on earlier gen is tricky as
+ * it may impact the display and we are uncertain about the stability
+ * of the reset, so we only reset recent machines with logical
+ * context support (that must be reset to remove any stray contexts).
+ */
+ if (HAS_HW_CONTEXTS(i915)) {
+ int reset = intel_gpu_reset(i915, ALL_ENGINES);
+ WARN_ON(reset && reset != -ENODEV);
+ }
+}
+
int i915_gem_suspend(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
int ret;
+ intel_runtime_pm_get(dev_priv);
intel_suspend_gt_powersave(dev_priv);
mutex_lock(&dev->struct_mutex);
@@ -4222,13 +4400,13 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
*/
ret = i915_gem_switch_to_kernel_context(dev_priv);
if (ret)
- goto err;
+ goto err_unlock;
ret = i915_gem_wait_for_idle(dev_priv,
I915_WAIT_INTERRUPTIBLE |
I915_WAIT_LOCKED);
if (ret)
- goto err;
+ goto err_unlock;
i915_gem_retire_requests(dev_priv);
GEM_BUG_ON(dev_priv->gt.active_requests);
@@ -4252,7 +4430,7 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
* reset the GPU back to its idle, low power state.
*/
WARN_ON(dev_priv->gt.awake);
- WARN_ON(!intel_execlists_idle(dev_priv));
+ WARN_ON(!intel_engines_are_idle(dev_priv));
/*
* Neither the BIOS, ourselves or any other kernel
@@ -4273,15 +4451,13 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
* machines is a good idea, we don't - just in case it leaves the
* machine in an unusable condition.
*/
- if (HAS_HW_CONTEXTS(dev_priv)) {
- int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
- WARN_ON(reset && reset != -ENODEV);
- }
-
- return 0;
+ i915_gem_sanitize(dev_priv);
+ goto out_rpm_put;
-err:
+err_unlock:
mutex_unlock(&dev->struct_mutex);
+out_rpm_put:
+ intel_runtime_pm_put(dev_priv);
return ret;
}
@@ -4351,11 +4527,24 @@ static void init_unused_rings(struct drm_i915_private *dev_priv)
}
}
-int
-i915_gem_init_hw(struct drm_i915_private *dev_priv)
+static int __i915_gem_restart_engines(void *data)
{
+ struct drm_i915_private *i915 = data;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ int err;
+
+ for_each_engine(engine, i915, id) {
+ err = engine->init_hw(engine);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int i915_gem_init_hw(struct drm_i915_private *dev_priv)
+{
int ret;
dev_priv->gt.last_init_time = ktime_get();
@@ -4401,16 +4590,14 @@ i915_gem_init_hw(struct drm_i915_private *dev_priv)
}
/* Need to do basic initialisation of all rings first: */
- for_each_engine(engine, dev_priv, id) {
- ret = engine->init_hw(engine);
- if (ret)
- goto out;
- }
+ ret = __i915_gem_restart_engines(dev_priv);
+ if (ret)
+ goto out;
intel_mocs_init_l3cc_table(dev_priv);
/* We can't enable contexts until all firmware is loaded */
- ret = intel_guc_setup(dev_priv);
+ ret = intel_uc_init_hw(dev_priv);
if (ret)
goto out;
@@ -4446,6 +4633,8 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
mutex_lock(&dev_priv->drm.struct_mutex);
+ i915_gem_clflush_init(dev_priv);
+
if (!i915.enable_execlists) {
dev_priv->gt.resume = intel_legacy_submission_resume;
dev_priv->gt.cleanup_engine = intel_engine_cleanup;
@@ -4494,6 +4683,11 @@ out_unlock:
return ret;
}
+void i915_gem_init_mmio(struct drm_i915_private *i915)
+{
+ i915_gem_sanitize(i915);
+}
+
void
i915_gem_cleanup_engines(struct drm_i915_private *dev_priv)
{
@@ -4583,8 +4777,6 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
- dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
-
init_waitqueue_head(&dev_priv->pending_flip_queue);
dev_priv->mm.interruptible = true;
@@ -4609,7 +4801,9 @@ err_out:
void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
{
+ i915_gem_drain_freed_objects(dev_priv);
WARN_ON(!llist_empty(&dev_priv->mm.free_list));
+ WARN_ON(dev_priv->mm.object_count);
mutex_lock(&dev_priv->drm.struct_mutex);
i915_gem_timeline_fini(&dev_priv->gt.global_timeline);
@@ -4627,14 +4821,10 @@ void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
int i915_gem_freeze(struct drm_i915_private *dev_priv)
{
- intel_runtime_pm_get(dev_priv);
-
mutex_lock(&dev_priv->drm.struct_mutex);
i915_gem_shrink_all(dev_priv);
mutex_unlock(&dev_priv->drm.struct_mutex);
- intel_runtime_pm_put(dev_priv);
-
return 0;
}
@@ -4685,7 +4875,7 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file)
* file_priv.
*/
spin_lock(&file_priv->mm.lock);
- list_for_each_entry(request, &file_priv->mm.request_list, client_list)
+ list_for_each_entry(request, &file_priv->mm.request_list, client_link)
request->file_priv = NULL;
spin_unlock(&file_priv->mm.lock);
@@ -4763,38 +4953,49 @@ i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
const void *data, size_t size)
{
struct drm_i915_gem_object *obj;
- struct sg_table *sg;
- size_t bytes;
- int ret;
+ struct file *file;
+ size_t offset;
+ int err;
obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE));
if (IS_ERR(obj))
return obj;
- ret = i915_gem_object_set_to_cpu_domain(obj, true);
- if (ret)
- goto fail;
+ GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- goto fail;
+ file = obj->base.filp;
+ offset = 0;
+ do {
+ unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
+ struct page *page;
+ void *pgdata, *vaddr;
- sg = obj->mm.pages;
- bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
- obj->mm.dirty = true; /* Backing store is now out of date */
- i915_gem_object_unpin_pages(obj);
+ err = pagecache_write_begin(file, file->f_mapping,
+ offset, len, 0,
+ &page, &pgdata);
+ if (err < 0)
+ goto fail;
- if (WARN_ON(bytes != size)) {
- DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
- ret = -EFAULT;
- goto fail;
- }
+ vaddr = kmap(page);
+ memcpy(vaddr, data, len);
+ kunmap(page);
+
+ err = pagecache_write_end(file, file->f_mapping,
+ offset, len, len,
+ page, pgdata);
+ if (err < 0)
+ goto fail;
+
+ size -= len;
+ data += len;
+ offset += len;
+ } while (size);
return obj;
fail:
i915_gem_object_put(obj);
- return ERR_PTR(ret);
+ return ERR_PTR(err);
}
struct scatterlist *
@@ -4949,3 +5150,11 @@ i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
sg = i915_gem_object_get_sg(obj, n, &offset);
return sg_dma_address(sg) + (offset << PAGE_SHIFT);
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/scatterlist.c"
+#include "selftests/mock_gem_device.c"
+#include "selftests/huge_gem_object.c"
+#include "selftests/i915_gem_object.c"
+#include "selftests/i915_gem_coherency.c"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index a585d47c420a..5a49487368ca 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -28,9 +28,18 @@
#ifdef CONFIG_DRM_I915_DEBUG_GEM
#define GEM_BUG_ON(expr) BUG_ON(expr)
#define GEM_WARN_ON(expr) WARN_ON(expr)
+
+#define GEM_DEBUG_DECL(var) var
+#define GEM_DEBUG_EXEC(expr) expr
+#define GEM_DEBUG_BUG_ON(expr) GEM_BUG_ON(expr)
+
#else
#define GEM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
#define GEM_WARN_ON(expr) (BUILD_BUG_ON_INVALID(expr), 0)
+
+#define GEM_DEBUG_DECL(var)
+#define GEM_DEBUG_EXEC(expr) do { } while (0)
+#define GEM_DEBUG_BUG_ON(expr)
#endif
#define I915_NUM_ENGINES 5
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index b3bc119ec1bb..41aa598c4f3b 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -96,8 +96,7 @@ struct drm_i915_gem_object *
i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
size_t size)
{
- struct drm_i915_gem_object *obj = NULL;
- struct drm_i915_gem_object *tmp;
+ struct drm_i915_gem_object *obj;
struct list_head *list;
int n, ret;
@@ -112,31 +111,29 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
n = ARRAY_SIZE(pool->cache_list) - 1;
list = &pool->cache_list[n];
- list_for_each_entry(tmp, list, batch_pool_link) {
+ list_for_each_entry(obj, list, batch_pool_link) {
/* The batches are strictly LRU ordered */
- if (i915_gem_object_is_active(tmp))
- break;
+ if (i915_gem_object_is_active(obj)) {
+ if (!reservation_object_test_signaled_rcu(obj->resv,
+ true))
+ break;
- GEM_BUG_ON(!reservation_object_test_signaled_rcu(tmp->resv,
- true));
+ i915_gem_retire_requests(pool->engine->i915);
+ GEM_BUG_ON(i915_gem_object_is_active(obj));
+ }
- if (tmp->base.size >= size) {
- /* Clear the set of shared fences early */
- ww_mutex_lock(&tmp->resv->lock, NULL);
- reservation_object_add_excl_fence(tmp->resv, NULL);
- ww_mutex_unlock(&tmp->resv->lock);
+ GEM_BUG_ON(!reservation_object_test_signaled_rcu(obj->resv,
+ true));
- obj = tmp;
- break;
- }
+ if (obj->base.size >= size)
+ goto found;
}
- if (obj == NULL) {
- obj = i915_gem_object_create_internal(pool->engine->i915, size);
- if (IS_ERR(obj))
- return obj;
- }
+ obj = i915_gem_object_create_internal(pool->engine->i915, size);
+ if (IS_ERR(obj))
+ return obj;
+found:
ret = i915_gem_object_pin_pages(obj);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.c b/drivers/gpu/drm/i915/i915_gem_clflush.c
new file mode 100644
index 000000000000..d925fb582ba7
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_clflush.c
@@ -0,0 +1,189 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+#include "intel_frontbuffer.h"
+#include "i915_gem_clflush.h"
+
+static DEFINE_SPINLOCK(clflush_lock);
+static u64 clflush_context;
+
+struct clflush {
+ struct dma_fence dma; /* Must be first for dma_fence_free() */
+ struct i915_sw_fence wait;
+ struct work_struct work;
+ struct drm_i915_gem_object *obj;
+};
+
+static const char *i915_clflush_get_driver_name(struct dma_fence *fence)
+{
+ return DRIVER_NAME;
+}
+
+static const char *i915_clflush_get_timeline_name(struct dma_fence *fence)
+{
+ return "clflush";
+}
+
+static bool i915_clflush_enable_signaling(struct dma_fence *fence)
+{
+ return true;
+}
+
+static void i915_clflush_release(struct dma_fence *fence)
+{
+ struct clflush *clflush = container_of(fence, typeof(*clflush), dma);
+
+ i915_sw_fence_fini(&clflush->wait);
+
+ BUILD_BUG_ON(offsetof(typeof(*clflush), dma));
+ dma_fence_free(&clflush->dma);
+}
+
+static const struct dma_fence_ops i915_clflush_ops = {
+ .get_driver_name = i915_clflush_get_driver_name,
+ .get_timeline_name = i915_clflush_get_timeline_name,
+ .enable_signaling = i915_clflush_enable_signaling,
+ .wait = dma_fence_default_wait,
+ .release = i915_clflush_release,
+};
+
+static void __i915_do_clflush(struct drm_i915_gem_object *obj)
+{
+ drm_clflush_sg(obj->mm.pages);
+ obj->cache_dirty = false;
+
+ intel_fb_obj_flush(obj, ORIGIN_CPU);
+}
+
+static void i915_clflush_work(struct work_struct *work)
+{
+ struct clflush *clflush = container_of(work, typeof(*clflush), work);
+ struct drm_i915_gem_object *obj = clflush->obj;
+
+ if (!obj->cache_dirty)
+ goto out;
+
+ if (i915_gem_object_pin_pages(obj)) {
+ DRM_ERROR("Failed to acquire obj->pages for clflushing\n");
+ goto out;
+ }
+
+ __i915_do_clflush(obj);
+
+ i915_gem_object_unpin_pages(obj);
+
+out:
+ i915_gem_object_put(obj);
+
+ dma_fence_signal(&clflush->dma);
+ dma_fence_put(&clflush->dma);
+}
+
+static int __i915_sw_fence_call
+i915_clflush_notify(struct i915_sw_fence *fence,
+ enum i915_sw_fence_notify state)
+{
+ struct clflush *clflush = container_of(fence, typeof(*clflush), wait);
+
+ switch (state) {
+ case FENCE_COMPLETE:
+ schedule_work(&clflush->work);
+ break;
+
+ case FENCE_FREE:
+ dma_fence_put(&clflush->dma);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
+ unsigned int flags)
+{
+ struct clflush *clflush;
+
+ /*
+ * Stolen memory is always coherent with the GPU as it is explicitly
+ * marked as wc by the system, or the system is cache-coherent.
+ * Similarly, we only access struct pages through the CPU cache, so
+ * anything not backed by physical memory we consider to be always
+ * coherent and not need clflushing.
+ */
+ if (!i915_gem_object_has_struct_page(obj))
+ return;
+
+ obj->cache_dirty = true;
+
+ /* If the GPU is snooping the contents of the CPU cache,
+ * we do not need to manually clear the CPU cache lines. However,
+ * the caches are only snooped when the render cache is
+ * flushed/invalidated. As we always have to emit invalidations
+ * and flushes when moving into and out of the RENDER domain, correct
+ * snooping behaviour occurs naturally as the result of our domain
+ * tracking.
+ */
+ if (!(flags & I915_CLFLUSH_FORCE) && i915_gem_object_is_coherent(obj))
+ return;
+
+ trace_i915_gem_object_clflush(obj);
+
+ clflush = NULL;
+ if (!(flags & I915_CLFLUSH_SYNC))
+ clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
+ if (clflush) {
+ dma_fence_init(&clflush->dma,
+ &i915_clflush_ops,
+ &clflush_lock,
+ clflush_context,
+ 0);
+ i915_sw_fence_init(&clflush->wait, i915_clflush_notify);
+
+ clflush->obj = i915_gem_object_get(obj);
+ INIT_WORK(&clflush->work, i915_clflush_work);
+
+ dma_fence_get(&clflush->dma);
+
+ i915_sw_fence_await_reservation(&clflush->wait,
+ obj->resv, NULL,
+ false, I915_FENCE_TIMEOUT,
+ GFP_KERNEL);
+
+ reservation_object_lock(obj->resv, NULL);
+ reservation_object_add_excl_fence(obj->resv, &clflush->dma);
+ reservation_object_unlock(obj->resv);
+
+ i915_sw_fence_commit(&clflush->wait);
+ } else if (obj->mm.pages) {
+ __i915_do_clflush(obj);
+ } else {
+ GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
+ }
+}
+
+void i915_gem_clflush_init(struct drm_i915_private *i915)
+{
+ clflush_context = dma_fence_context_alloc(1);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.h b/drivers/gpu/drm/i915/i915_gem_clflush.h
new file mode 100644
index 000000000000..b62d61a2d15f
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_clflush.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_GEM_CLFLUSH_H__
+#define __I915_GEM_CLFLUSH_H__
+
+struct drm_i915_private;
+struct drm_i915_gem_object;
+
+void i915_gem_clflush_init(struct drm_i915_private *i915);
+void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
+ unsigned int flags);
+#define I915_CLFLUSH_FORCE BIT(0)
+#define I915_CLFLUSH_SYNC BIT(1)
+
+#endif /* __I915_GEM_CLFLUSH_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 17f90c618208..486051ed681d 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -92,21 +92,6 @@
#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
-/* This is a HW constraint. The value below is the largest known requirement
- * I've seen in a spec to date, and that was a workaround for a non-shipping
- * part. It should be safe to decrease this, but it's more future proof as is.
- */
-#define GEN6_CONTEXT_ALIGN (64<<10)
-#define GEN7_CONTEXT_ALIGN I915_GTT_MIN_ALIGNMENT
-
-static size_t get_context_alignment(struct drm_i915_private *dev_priv)
-{
- if (IS_GEN6(dev_priv))
- return GEN6_CONTEXT_ALIGN;
-
- return GEN7_CONTEXT_ALIGN;
-}
-
static int get_context_size(struct drm_i915_private *dev_priv)
{
int ret;
@@ -236,6 +221,30 @@ static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
return 0;
}
+static u32 default_desc_template(const struct drm_i915_private *i915,
+ const struct i915_hw_ppgtt *ppgtt)
+{
+ u32 address_mode;
+ u32 desc;
+
+ desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
+
+ address_mode = INTEL_LEGACY_32B_CONTEXT;
+ if (ppgtt && i915_vm_is_48bit(&ppgtt->base))
+ address_mode = INTEL_LEGACY_64B_CONTEXT;
+ desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
+
+ if (IS_GEN8(i915))
+ desc |= GEN8_CTX_L3LLC_COHERENT;
+
+ /* TODO: WaDisableLiteRestore when we start using semaphore
+ * signalling between Command Streamers
+ * ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
+ */
+
+ return desc;
+}
+
static struct i915_gem_context *
__create_hw_context(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *file_priv)
@@ -257,8 +266,6 @@ __create_hw_context(struct drm_i915_private *dev_priv,
list_add_tail(&ctx->link, &dev_priv->context_list);
ctx->i915 = dev_priv;
- ctx->ggtt_alignment = get_context_alignment(dev_priv);
-
if (dev_priv->hw_context_size) {
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
@@ -309,9 +316,8 @@ __create_hw_context(struct drm_i915_private *dev_priv,
i915_gem_context_set_bannable(ctx);
ctx->ring_size = 4 * PAGE_SIZE;
- ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
- GEN8_CTX_ADDRESSING_MODE_SHIFT;
- ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier);
+ ctx->desc_template =
+ default_desc_template(dev_priv, dev_priv->mm.aliasing_ppgtt);
/* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not
* present or not in use we still need a small bias as ring wraparound
@@ -332,6 +338,13 @@ err_out:
return ERR_PTR(ret);
}
+static void __destroy_hw_context(struct i915_gem_context *ctx,
+ struct drm_i915_file_private *file_priv)
+{
+ idr_remove(&file_priv->context_idr, ctx->user_handle);
+ context_close(ctx);
+}
+
/**
* The default context needs to exist per ring that uses contexts. It stores the
* context state of the GPU for applications that don't utilize HW contexts, as
@@ -356,12 +369,12 @@ i915_gem_create_context(struct drm_i915_private *dev_priv,
if (IS_ERR(ppgtt)) {
DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
- idr_remove(&file_priv->context_idr, ctx->user_handle);
- context_close(ctx);
+ __destroy_hw_context(ctx, file_priv);
return ERR_CAST(ppgtt);
}
ctx->ppgtt = ppgtt;
+ ctx->desc_template = default_desc_template(dev_priv, ppgtt);
}
trace_i915_context_create(ctx);
@@ -400,7 +413,8 @@ i915_gem_context_create_gvt(struct drm_device *dev)
i915_gem_context_set_closed(ctx); /* not user accessible */
i915_gem_context_clear_bannable(ctx);
i915_gem_context_set_force_single_submission(ctx);
- ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
+ if (!i915.enable_guc_submission)
+ ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
GEM_BUG_ON(i915_gem_context_is_kernel(ctx));
out:
@@ -451,6 +465,11 @@ int i915_gem_context_init(struct drm_i915_private *dev_priv)
return PTR_ERR(ctx);
}
+ /* For easy recognisablity, we want the kernel context to be 0 and then
+ * all user contexts will have non-zero hw_id.
+ */
+ GEM_BUG_ON(ctx->hw_id);
+
i915_gem_context_clear_bannable(ctx);
ctx->priority = I915_PRIORITY_MIN; /* lowest priority; idle task */
dev_priv->kernel_context = ctx;
@@ -560,27 +579,15 @@ static inline int
mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
{
struct drm_i915_private *dev_priv = req->i915;
- struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
enum intel_engine_id id;
- u32 flags = hw_flags | MI_MM_SPACE_GTT;
+ u32 *cs, flags = hw_flags | MI_MM_SPACE_GTT;
const int num_rings =
/* Use an extended w/a on ivb+ if signalling from other rings */
i915.semaphores ?
INTEL_INFO(dev_priv)->num_rings - 1 :
0;
- int len, ret;
-
- /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
- * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
- * explicitly, so we rely on the value at ring init, stored in
- * itlb_before_ctx_switch.
- */
- if (IS_GEN6(dev_priv)) {
- ret = engine->emit_flush(req, EMIT_INVALIDATE);
- if (ret)
- return ret;
- }
+ int len;
/* These flags are for resource streamer on HSW+ */
if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8)
@@ -593,99 +600,92 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
if (INTEL_GEN(dev_priv) >= 7)
len += 2 + (num_rings ? 4*num_rings + 6 : 0);
- ret = intel_ring_begin(req, len);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, len);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
if (INTEL_GEN(dev_priv) >= 7) {
- intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
if (num_rings) {
struct intel_engine_cs *signaller;
- intel_ring_emit(ring,
- MI_LOAD_REGISTER_IMM(num_rings));
+ *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
for_each_engine(signaller, dev_priv, id) {
if (signaller == engine)
continue;
- intel_ring_emit_reg(ring,
- RING_PSMI_CTL(signaller->mmio_base));
- intel_ring_emit(ring,
- _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+ *cs++ = i915_mmio_reg_offset(
+ RING_PSMI_CTL(signaller->mmio_base));
+ *cs++ = _MASKED_BIT_ENABLE(
+ GEN6_PSMI_SLEEP_MSG_DISABLE);
}
}
}
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring,
- i915_ggtt_offset(req->ctx->engine[RCS].state) | flags);
+ *cs++ = MI_NOOP;
+ *cs++ = MI_SET_CONTEXT;
+ *cs++ = i915_ggtt_offset(req->ctx->engine[RCS].state) | flags;
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv
*/
- intel_ring_emit(ring, MI_NOOP);
+ *cs++ = MI_NOOP;
if (INTEL_GEN(dev_priv) >= 7) {
if (num_rings) {
struct intel_engine_cs *signaller;
i915_reg_t last_reg = {}; /* keep gcc quiet */
- intel_ring_emit(ring,
- MI_LOAD_REGISTER_IMM(num_rings));
+ *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
for_each_engine(signaller, dev_priv, id) {
if (signaller == engine)
continue;
last_reg = RING_PSMI_CTL(signaller->mmio_base);
- intel_ring_emit_reg(ring, last_reg);
- intel_ring_emit(ring,
- _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+ *cs++ = i915_mmio_reg_offset(last_reg);
+ *cs++ = _MASKED_BIT_DISABLE(
+ GEN6_PSMI_SLEEP_MSG_DISABLE);
}
/* Insert a delay before the next switch! */
- intel_ring_emit(ring,
- MI_STORE_REGISTER_MEM |
- MI_SRM_LRM_GLOBAL_GTT);
- intel_ring_emit_reg(ring, last_reg);
- intel_ring_emit(ring,
- i915_ggtt_offset(engine->scratch));
- intel_ring_emit(ring, MI_NOOP);
+ *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+ *cs++ = i915_mmio_reg_offset(last_reg);
+ *cs++ = i915_ggtt_offset(engine->scratch);
+ *cs++ = MI_NOOP;
}
- intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
}
- intel_ring_advance(ring);
+ intel_ring_advance(req, cs);
- return ret;
+ return 0;
}
static int remap_l3(struct drm_i915_gem_request *req, int slice)
{
- u32 *remap_info = req->i915->l3_parity.remap_info[slice];
- struct intel_ring *ring = req->ring;
- int i, ret;
+ u32 *cs, *remap_info = req->i915->l3_parity.remap_info[slice];
+ int i;
if (!remap_info)
return 0;
- ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
/*
* Note: We do not worry about the concurrent register cacheline hang
* here because no other code should access these registers other than
* at initialization time.
*/
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
+ *cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4);
for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
- intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
- intel_ring_emit(ring, remap_info[i]);
+ *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
+ *cs++ = remap_info[i];
}
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return 0;
}
@@ -933,7 +933,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
}
ret = i915_switch_context(req);
- i915_add_request_no_flush(req);
+ i915_add_request(req);
if (ret)
return ret;
}
@@ -1014,8 +1014,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
return PTR_ERR(ctx);
}
- idr_remove(&file_priv->context_idr, ctx->user_handle);
- context_close(ctx);
+ __destroy_hw_context(ctx, file_priv);
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG("HW context %d destroyed\n", args->ctx_id);
@@ -1164,3 +1163,8 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
return 0;
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/mock_context.c"
+#include "selftests/i915_gem_context.c"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index 0ac750b90f3d..4af2ab94558b 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -140,8 +140,6 @@ struct i915_gem_context {
*/
int priority;
- /** ggtt_alignment: alignment restriction for context objects */
- u32 ggtt_alignment;
/** ggtt_offset_bias: placement restriction for context objects */
u32 ggtt_offset_bias;
@@ -160,9 +158,6 @@ struct i915_gem_context {
/** desc_template: invariant fields for the HW context descriptor */
u32 desc_template;
- /** status_notifier: list of callbacks for context-switch changes */
- struct atomic_notifier_head status_notifier;
-
/** guilty_count: How many times this context has caused a GPU hang. */
unsigned int guilty_count;
/**
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 29bb8011dbc4..11898cd97596 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -307,3 +307,8 @@ fail_detach:
return ERR_PTR(ret);
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/mock_dmabuf.c"
+#include "selftests/i915_gem_dmabuf.c"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index c181b1bb3d2c..2da3a94fc9f3 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -258,6 +258,9 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
int ret = 0;
lockdep_assert_held(&vm->i915->drm.struct_mutex);
+ GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
+ GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
+
trace_i915_gem_evict_node(vm, target, flags);
/* Retire before we search the active list. Although we have
@@ -271,11 +274,13 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
check_color = vm->mm.color_adjust;
if (check_color) {
/* Expand search to cover neighbouring guard pages (or lack!) */
- if (start > vm->start)
+ if (start)
start -= I915_GTT_PAGE_SIZE;
- if (end < vm->start + vm->total)
- end += I915_GTT_PAGE_SIZE;
+
+ /* Always look at the page afterwards to avoid the end-of-GTT */
+ end += I915_GTT_PAGE_SIZE;
}
+ GEM_BUG_ON(start >= end);
drm_mm_for_each_node_in_range(node, &vm->mm, start, end) {
/* If we find any non-objects (!vma), we cannot evict them */
@@ -284,6 +289,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
break;
}
+ GEM_BUG_ON(!node->allocated);
vma = container_of(node, typeof(*vma), node);
/* If we are using coloring to insert guard pages between
@@ -293,12 +299,12 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
* those as well to make room for our guard pages.
*/
if (check_color) {
- if (vma->node.start + vma->node.size == node->start) {
- if (vma->node.color == node->color)
+ if (node->start + node->size == target->start) {
+ if (node->color == target->color)
continue;
}
- if (vma->node.start == node->start + node->size) {
- if (vma->node.color == node->color)
+ if (node->start == target->start + target->size) {
+ if (node->color == target->color)
continue;
}
}
@@ -387,3 +393,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
return 0;
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_gem_evict.c"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d02cfaefe1c8..dd7181ed5eca 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -28,12 +28,14 @@
#include <linux/dma_remapping.h>
#include <linux/reservation.h>
+#include <linux/sync_file.h>
#include <linux/uaccess.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_gem_clflush.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include "intel_frontbuffer.h"
@@ -1110,13 +1112,18 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj;
+ if (vma->exec_entry->flags & EXEC_OBJECT_ASYNC)
+ continue;
+
+ if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) {
+ i915_gem_clflush_object(obj, 0);
+ obj->base.write_domain = 0;
+ }
+
ret = i915_gem_request_await_object
(req, obj, obj->base.pending_write_domain);
if (ret)
return ret;
-
- if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
- i915_gem_clflush_object(obj, false);
}
/* Unconditionally flush any chipset caches (for streaming writes). */
@@ -1297,12 +1304,12 @@ static void eb_export_fence(struct drm_i915_gem_object *obj,
* handle an error right now. Worst case should be missed
* synchronisation leading to rendering corruption.
*/
- ww_mutex_lock(&resv->lock, NULL);
+ reservation_object_lock(resv, NULL);
if (flags & EXEC_OBJECT_WRITE)
reservation_object_add_excl_fence(resv, &req->fence);
else if (reservation_object_reserve_shared(resv) == 0)
reservation_object_add_shared_fence(resv, &req->fence);
- ww_mutex_unlock(&resv->lock);
+ reservation_object_unlock(resv);
}
static void
@@ -1313,8 +1320,6 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
list_for_each_entry(vma, vmas, exec_list) {
struct drm_i915_gem_object *obj = vma->obj;
- u32 old_read = obj->base.read_domains;
- u32 old_write = obj->base.write_domain;
obj->base.write_domain = obj->base.pending_write_domain;
if (obj->base.write_domain)
@@ -1325,32 +1330,31 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
i915_vma_move_to_active(vma, req, vma->exec_entry->flags);
eb_export_fence(obj, req, vma->exec_entry->flags);
- trace_i915_gem_object_change_domain(obj, old_read, old_write);
}
}
static int
i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
{
- struct intel_ring *ring = req->ring;
- int ret, i;
+ u32 *cs;
+ int i;
if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
DRM_DEBUG("sol reset is gen7/rcs only\n");
return -EINVAL;
}
- ret = intel_ring_begin(req, 4 * 3);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 4 * 3);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
for (i = 0; i < 4; i++) {
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
- intel_ring_emit(ring, 0);
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
+ *cs++ = 0;
}
- intel_ring_advance(ring);
+ intel_ring_advance(req, cs);
return 0;
}
@@ -1403,15 +1407,20 @@ out:
return vma;
}
+static void
+add_to_client(struct drm_i915_gem_request *req,
+ struct drm_file *file)
+{
+ req->file_priv = file->driver_priv;
+ list_add_tail(&req->client_link, &req->file_priv->mm.request_list);
+}
+
static int
execbuf_submit(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas)
{
- struct drm_i915_private *dev_priv = params->request->i915;
u64 exec_start, exec_len;
- int instp_mode;
- u32 instp_mask;
int ret;
ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
@@ -1422,56 +1431,11 @@ execbuf_submit(struct i915_execbuffer_params *params,
if (ret)
return ret;
- instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
- instp_mask = I915_EXEC_CONSTANTS_MASK;
- switch (instp_mode) {
- case I915_EXEC_CONSTANTS_REL_GENERAL:
- case I915_EXEC_CONSTANTS_ABSOLUTE:
- case I915_EXEC_CONSTANTS_REL_SURFACE:
- if (instp_mode != 0 && params->engine->id != RCS) {
- DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
- return -EINVAL;
- }
-
- if (instp_mode != dev_priv->relative_constants_mode) {
- if (INTEL_INFO(dev_priv)->gen < 4) {
- DRM_DEBUG("no rel constants on pre-gen4\n");
- return -EINVAL;
- }
-
- if (INTEL_INFO(dev_priv)->gen > 5 &&
- instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
- DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
- return -EINVAL;
- }
-
- /* The HW changed the meaning on this bit on gen6 */
- if (INTEL_INFO(dev_priv)->gen >= 6)
- instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
- }
- break;
- default:
- DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
+ if (args->flags & I915_EXEC_CONSTANTS_MASK) {
+ DRM_DEBUG("I915_EXEC_CONSTANTS_* unsupported\n");
return -EINVAL;
}
- if (params->engine->id == RCS &&
- instp_mode != dev_priv->relative_constants_mode) {
- struct intel_ring *ring = params->request->ring;
-
- ret = intel_ring_begin(params->request, 4);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, INSTPM);
- intel_ring_emit(ring, instp_mask << 16 | instp_mode);
- intel_ring_advance(ring);
-
- dev_priv->relative_constants_mode = instp_mode;
- }
-
if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
ret = i915_reset_gen7_sol_offsets(params->request);
if (ret)
@@ -1491,8 +1455,6 @@ execbuf_submit(struct i915_execbuffer_params *params,
if (ret)
return ret;
- trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
-
i915_gem_execbuffer_move_to_active(vmas, params->request);
return 0;
@@ -1591,6 +1553,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct i915_execbuffer_params *params = &params_master;
const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u32 dispatch_flags;
+ struct dma_fence *in_fence = NULL;
+ struct sync_file *out_fence = NULL;
+ int out_fence_fd = -1;
int ret;
bool need_relocs;
@@ -1634,6 +1599,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
dispatch_flags |= I915_DISPATCH_RS;
}
+ if (args->flags & I915_EXEC_FENCE_IN) {
+ in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
+ if (!in_fence)
+ return -EINVAL;
+ }
+
+ if (args->flags & I915_EXEC_FENCE_OUT) {
+ out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+ if (out_fence_fd < 0) {
+ ret = out_fence_fd;
+ goto err_in_fence;
+ }
+ }
+
/* Take a local wakeref for preparing to dispatch the execbuf as
* we expect to access the hardware fairly frequently in the
* process. Upon first dispatch, we acquire another prolonged
@@ -1778,6 +1757,21 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err_batch_unpin;
}
+ if (in_fence) {
+ ret = i915_gem_request_await_dma_fence(params->request,
+ in_fence);
+ if (ret < 0)
+ goto err_request;
+ }
+
+ if (out_fence_fd != -1) {
+ out_fence = sync_file_create(&params->request->fence);
+ if (!out_fence) {
+ ret = -ENOMEM;
+ goto err_request;
+ }
+ }
+
/* Whilst this request exists, batch_obj will be on the
* active_list, and so will hold the active reference. Only when this
* request is retired will the the batch_obj be moved onto the
@@ -1786,10 +1780,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
*/
params->request->batch = params->batch;
- ret = i915_gem_request_add_to_client(params->request, file);
- if (ret)
- goto err_request;
-
/*
* Save assorted stuff away to pass through to *_submission().
* NB: This data should be 'persistent' and not local as it will
@@ -1802,9 +1792,23 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
params->dispatch_flags = dispatch_flags;
params->ctx = ctx;
+ trace_i915_gem_request_queue(params->request, dispatch_flags);
+
ret = execbuf_submit(params, args, &eb->vmas);
err_request:
__i915_add_request(params->request, ret == 0);
+ add_to_client(params->request, file);
+
+ if (out_fence) {
+ if (ret == 0) {
+ fd_install(out_fence_fd, out_fence->file);
+ args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */
+ args->rsvd2 |= (u64)out_fence_fd << 32;
+ out_fence_fd = -1;
+ } else {
+ fput(out_fence->file);
+ }
+ }
err_batch_unpin:
/*
@@ -1826,6 +1830,10 @@ pre_mutex_err:
/* intel_gpu_busy should also get a ref, so it will free when the device
* is really idle. */
intel_runtime_pm_put(dev_priv);
+ if (out_fence_fd != -1)
+ put_unused_fd(out_fence_fd);
+err_in_fence:
+ dma_fence_put(in_fence);
return ret;
}
@@ -1933,11 +1941,6 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
return -EINVAL;
}
- if (args->rsvd2 != 0) {
- DRM_DEBUG("dirty rvsd2 field\n");
- return -EINVAL;
- }
-
exec2_list = drm_malloc_gfp(args->buffer_count,
sizeof(*exec2_list),
GFP_TEMPORARY);
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index fadbe8f4c745..5fe2cd8c8f28 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -248,7 +248,14 @@ static int fence_update(struct drm_i915_fence_reg *fence,
list_move(&fence->link, &fence->i915->mm.fence_list);
}
- fence_write(fence, vma);
+ /* We only need to update the register itself if the device is awake.
+ * If the device is currently powered down, we will defer the write
+ * to the runtime resume, see i915_gem_restore_fences().
+ */
+ if (intel_runtime_pm_get_if_in_use(fence->i915)) {
+ fence_write(fence, vma);
+ intel_runtime_pm_put(fence->i915);
+ }
if (vma) {
if (fence->vma != vma) {
@@ -278,8 +285,6 @@ i915_vma_put_fence(struct i915_vma *vma)
{
struct drm_i915_fence_reg *fence = vma->fence;
- assert_rpm_wakelock_held(vma->vm->i915);
-
if (!fence)
return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 2801a4d56324..cee9c4fec52a 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -23,6 +23,9 @@
*
*/
+#include <linux/slab.h> /* fault-inject.h is not standalone! */
+
+#include <linux/fault-inject.h>
#include <linux/log2.h>
#include <linux/random.h>
#include <linux/seq_file.h>
@@ -187,11 +190,17 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 unused)
{
- u32 pte_flags = 0;
+ u32 pte_flags;
+ int ret;
+
+ ret = vma->vm->allocate_va_range(vma->vm, vma->node.start, vma->size);
+ if (ret)
+ return ret;
vma->pages = vma->obj->mm.pages;
/* Currently applicable only to VLV */
+ pte_flags = 0;
if (vma->obj->gt_ro)
pte_flags |= PTE_READ_ONLY;
@@ -203,9 +212,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
static void ppgtt_unbind_vma(struct i915_vma *vma)
{
- vma->vm->clear_range(vma->vm,
- vma->node.start,
- vma->size);
+ vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
}
static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
@@ -340,268 +347,229 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
return pte;
}
-static int __setup_page_dma(struct drm_i915_private *dev_priv,
- struct i915_page_dma *p, gfp_t flags)
+static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
{
- struct device *kdev = &dev_priv->drm.pdev->dev;
+ struct page *page;
- p->page = alloc_page(flags);
- if (!p->page)
- return -ENOMEM;
+ if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
+ i915_gem_shrink_all(vm->i915);
- p->daddr = dma_map_page(kdev,
- p->page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (vm->free_pages.nr)
+ return vm->free_pages.pages[--vm->free_pages.nr];
- if (dma_mapping_error(kdev, p->daddr)) {
- __free_page(p->page);
- return -EINVAL;
- }
+ page = alloc_page(gfp);
+ if (!page)
+ return NULL;
- return 0;
+ if (vm->pt_kmap_wc)
+ set_pages_array_wc(&page, 1);
+
+ return page;
}
-static int setup_page_dma(struct drm_i915_private *dev_priv,
- struct i915_page_dma *p)
+static void vm_free_pages_release(struct i915_address_space *vm)
{
- return __setup_page_dma(dev_priv, p, I915_GFP_DMA);
+ GEM_BUG_ON(!pagevec_count(&vm->free_pages));
+
+ if (vm->pt_kmap_wc)
+ set_pages_array_wb(vm->free_pages.pages,
+ pagevec_count(&vm->free_pages));
+
+ __pagevec_release(&vm->free_pages);
}
-static void cleanup_page_dma(struct drm_i915_private *dev_priv,
- struct i915_page_dma *p)
+static void vm_free_page(struct i915_address_space *vm, struct page *page)
{
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ if (!pagevec_add(&vm->free_pages, page))
+ vm_free_pages_release(vm);
+}
- if (WARN_ON(!p->page))
- return;
+static int __setup_page_dma(struct i915_address_space *vm,
+ struct i915_page_dma *p,
+ gfp_t gfp)
+{
+ p->page = vm_alloc_page(vm, gfp | __GFP_NOWARN | __GFP_NORETRY);
+ if (unlikely(!p->page))
+ return -ENOMEM;
+
+ p->daddr = dma_map_page(vm->dma, p->page, 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
+ vm_free_page(vm, p->page);
+ return -ENOMEM;
+ }
- dma_unmap_page(&pdev->dev, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- __free_page(p->page);
- memset(p, 0, sizeof(*p));
+ return 0;
}
-static void *kmap_page_dma(struct i915_page_dma *p)
+static int setup_page_dma(struct i915_address_space *vm,
+ struct i915_page_dma *p)
{
- return kmap_atomic(p->page);
+ return __setup_page_dma(vm, p, I915_GFP_DMA);
}
-/* We use the flushing unmap only with ppgtt structures:
- * page directories, page tables and scratch pages.
- */
-static void kunmap_page_dma(struct drm_i915_private *dev_priv, void *vaddr)
+static void cleanup_page_dma(struct i915_address_space *vm,
+ struct i915_page_dma *p)
{
- /* There are only few exceptions for gen >=6. chv and bxt.
- * And we are not sure about the latter so play safe for now.
- */
- if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
- drm_clflush_virt_range(vaddr, PAGE_SIZE);
-
- kunmap_atomic(vaddr);
+ dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ vm_free_page(vm, p->page);
}
-#define kmap_px(px) kmap_page_dma(px_base(px))
-#define kunmap_px(ppgtt, vaddr) \
- kunmap_page_dma((ppgtt)->base.i915, (vaddr))
+#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
-#define setup_px(dev_priv, px) setup_page_dma((dev_priv), px_base(px))
-#define cleanup_px(dev_priv, px) cleanup_page_dma((dev_priv), px_base(px))
-#define fill_px(dev_priv, px, v) fill_page_dma((dev_priv), px_base(px), (v))
-#define fill32_px(dev_priv, px, v) \
- fill_page_dma_32((dev_priv), px_base(px), (v))
+#define setup_px(vm, px) setup_page_dma((vm), px_base(px))
+#define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
+#define fill_px(ppgtt, px, v) fill_page_dma((vm), px_base(px), (v))
+#define fill32_px(ppgtt, px, v) fill_page_dma_32((vm), px_base(px), (v))
-static void fill_page_dma(struct drm_i915_private *dev_priv,
- struct i915_page_dma *p, const uint64_t val)
+static void fill_page_dma(struct i915_address_space *vm,
+ struct i915_page_dma *p,
+ const u64 val)
{
+ u64 * const vaddr = kmap_atomic(p->page);
int i;
- uint64_t * const vaddr = kmap_page_dma(p);
for (i = 0; i < 512; i++)
vaddr[i] = val;
- kunmap_page_dma(dev_priv, vaddr);
+ kunmap_atomic(vaddr);
}
-static void fill_page_dma_32(struct drm_i915_private *dev_priv,
- struct i915_page_dma *p, const uint32_t val32)
+static void fill_page_dma_32(struct i915_address_space *vm,
+ struct i915_page_dma *p,
+ const u32 v)
{
- uint64_t v = val32;
-
- v = v << 32 | val32;
-
- fill_page_dma(dev_priv, p, v);
+ fill_page_dma(vm, p, (u64)v << 32 | v);
}
static int
-setup_scratch_page(struct drm_i915_private *dev_priv,
- struct i915_page_dma *scratch,
- gfp_t gfp)
+setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
{
- return __setup_page_dma(dev_priv, scratch, gfp | __GFP_ZERO);
+ return __setup_page_dma(vm, &vm->scratch_page, gfp | __GFP_ZERO);
}
-static void cleanup_scratch_page(struct drm_i915_private *dev_priv,
- struct i915_page_dma *scratch)
+static void cleanup_scratch_page(struct i915_address_space *vm)
{
- cleanup_page_dma(dev_priv, scratch);
+ cleanup_page_dma(vm, &vm->scratch_page);
}
-static struct i915_page_table *alloc_pt(struct drm_i915_private *dev_priv)
+static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
{
struct i915_page_table *pt;
- const size_t count = INTEL_GEN(dev_priv) >= 8 ? GEN8_PTES : GEN6_PTES;
- int ret = -ENOMEM;
- pt = kzalloc(sizeof(*pt), GFP_KERNEL);
- if (!pt)
+ pt = kmalloc(sizeof(*pt), GFP_KERNEL | __GFP_NOWARN);
+ if (unlikely(!pt))
return ERR_PTR(-ENOMEM);
- pt->used_ptes = kcalloc(BITS_TO_LONGS(count), sizeof(*pt->used_ptes),
- GFP_KERNEL);
-
- if (!pt->used_ptes)
- goto fail_bitmap;
-
- ret = setup_px(dev_priv, pt);
- if (ret)
- goto fail_page_m;
+ if (unlikely(setup_px(vm, pt))) {
+ kfree(pt);
+ return ERR_PTR(-ENOMEM);
+ }
+ pt->used_ptes = 0;
return pt;
-
-fail_page_m:
- kfree(pt->used_ptes);
-fail_bitmap:
- kfree(pt);
-
- return ERR_PTR(ret);
}
-static void free_pt(struct drm_i915_private *dev_priv,
- struct i915_page_table *pt)
+static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
{
- cleanup_px(dev_priv, pt);
- kfree(pt->used_ptes);
+ cleanup_px(vm, pt);
kfree(pt);
}
static void gen8_initialize_pt(struct i915_address_space *vm,
struct i915_page_table *pt)
{
- gen8_pte_t scratch_pte;
-
- scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC);
-
- fill_px(vm->i915, pt, scratch_pte);
+ fill_px(vm, pt,
+ gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC));
}
static void gen6_initialize_pt(struct i915_address_space *vm,
struct i915_page_table *pt)
{
- gen6_pte_t scratch_pte;
-
- WARN_ON(vm->scratch_page.daddr == 0);
-
- scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, 0);
-
- fill32_px(vm->i915, pt, scratch_pte);
+ fill32_px(vm, pt,
+ vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
}
-static struct i915_page_directory *alloc_pd(struct drm_i915_private *dev_priv)
+static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
{
struct i915_page_directory *pd;
- int ret = -ENOMEM;
- pd = kzalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd)
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL | __GFP_NOWARN);
+ if (unlikely(!pd))
return ERR_PTR(-ENOMEM);
- pd->used_pdes = kcalloc(BITS_TO_LONGS(I915_PDES),
- sizeof(*pd->used_pdes), GFP_KERNEL);
- if (!pd->used_pdes)
- goto fail_bitmap;
-
- ret = setup_px(dev_priv, pd);
- if (ret)
- goto fail_page_m;
+ if (unlikely(setup_px(vm, pd))) {
+ kfree(pd);
+ return ERR_PTR(-ENOMEM);
+ }
+ pd->used_pdes = 0;
return pd;
-
-fail_page_m:
- kfree(pd->used_pdes);
-fail_bitmap:
- kfree(pd);
-
- return ERR_PTR(ret);
}
-static void free_pd(struct drm_i915_private *dev_priv,
+static void free_pd(struct i915_address_space *vm,
struct i915_page_directory *pd)
{
- if (px_page(pd)) {
- cleanup_px(dev_priv, pd);
- kfree(pd->used_pdes);
- kfree(pd);
- }
+ cleanup_px(vm, pd);
+ kfree(pd);
}
static void gen8_initialize_pd(struct i915_address_space *vm,
struct i915_page_directory *pd)
{
- gen8_pde_t scratch_pde;
-
- scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
+ unsigned int i;
- fill_px(vm->i915, pd, scratch_pde);
+ fill_px(vm, pd,
+ gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC));
+ for (i = 0; i < I915_PDES; i++)
+ pd->page_table[i] = vm->scratch_pt;
}
-static int __pdp_init(struct drm_i915_private *dev_priv,
+static int __pdp_init(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp)
{
- size_t pdpes = I915_PDPES_PER_PDP(dev_priv);
+ const unsigned int pdpes = i915_pdpes_per_pdp(vm);
+ unsigned int i;
- pdp->used_pdpes = kcalloc(BITS_TO_LONGS(pdpes),
- sizeof(unsigned long),
- GFP_KERNEL);
- if (!pdp->used_pdpes)
+ pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (unlikely(!pdp->page_directory))
return -ENOMEM;
- pdp->page_directory = kcalloc(pdpes, sizeof(*pdp->page_directory),
- GFP_KERNEL);
- if (!pdp->page_directory) {
- kfree(pdp->used_pdpes);
- /* the PDP might be the statically allocated top level. Keep it
- * as clean as possible */
- pdp->used_pdpes = NULL;
- return -ENOMEM;
- }
+ for (i = 0; i < pdpes; i++)
+ pdp->page_directory[i] = vm->scratch_pd;
return 0;
}
static void __pdp_fini(struct i915_page_directory_pointer *pdp)
{
- kfree(pdp->used_pdpes);
kfree(pdp->page_directory);
pdp->page_directory = NULL;
}
-static struct
-i915_page_directory_pointer *alloc_pdp(struct drm_i915_private *dev_priv)
+static inline bool use_4lvl(const struct i915_address_space *vm)
+{
+ return i915_vm_is_48bit(vm);
+}
+
+static struct i915_page_directory_pointer *
+alloc_pdp(struct i915_address_space *vm)
{
struct i915_page_directory_pointer *pdp;
int ret = -ENOMEM;
- WARN_ON(!USES_FULL_48BIT_PPGTT(dev_priv));
+ WARN_ON(!use_4lvl(vm));
pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
if (!pdp)
return ERR_PTR(-ENOMEM);
- ret = __pdp_init(dev_priv, pdp);
+ ret = __pdp_init(vm, pdp);
if (ret)
goto fail_bitmap;
- ret = setup_px(dev_priv, pdp);
+ ret = setup_px(vm, pdp);
if (ret)
goto fail_page_m;
@@ -615,14 +583,16 @@ fail_bitmap:
return ERR_PTR(ret);
}
-static void free_pdp(struct drm_i915_private *dev_priv,
+static void free_pdp(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp)
{
__pdp_fini(pdp);
- if (USES_FULL_48BIT_PPGTT(dev_priv)) {
- cleanup_px(dev_priv, pdp);
- kfree(pdp);
- }
+
+ if (!use_4lvl(vm))
+ return;
+
+ cleanup_px(vm, pdp);
+ kfree(pdp);
}
static void gen8_initialize_pdp(struct i915_address_space *vm,
@@ -632,47 +602,18 @@ static void gen8_initialize_pdp(struct i915_address_space *vm,
scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
- fill_px(vm->i915, pdp, scratch_pdpe);
+ fill_px(vm, pdp, scratch_pdpe);
}
static void gen8_initialize_pml4(struct i915_address_space *vm,
struct i915_pml4 *pml4)
{
- gen8_ppgtt_pml4e_t scratch_pml4e;
-
- scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp),
- I915_CACHE_LLC);
-
- fill_px(vm->i915, pml4, scratch_pml4e);
-}
-
-static void
-gen8_setup_pdpe(struct i915_hw_ppgtt *ppgtt,
- struct i915_page_directory_pointer *pdp,
- struct i915_page_directory *pd,
- int index)
-{
- gen8_ppgtt_pdpe_t *page_directorypo;
+ unsigned int i;
- if (!USES_FULL_48BIT_PPGTT(to_i915(ppgtt->base.dev)))
- return;
-
- page_directorypo = kmap_px(pdp);
- page_directorypo[index] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
- kunmap_px(ppgtt, page_directorypo);
-}
-
-static void
-gen8_setup_pml4e(struct i915_hw_ppgtt *ppgtt,
- struct i915_pml4 *pml4,
- struct i915_page_directory_pointer *pdp,
- int index)
-{
- gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4);
-
- WARN_ON(!USES_FULL_48BIT_PPGTT(to_i915(ppgtt->base.dev)));
- pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
- kunmap_px(ppgtt, pagemap);
+ fill_px(vm, pml4,
+ gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC));
+ for (i = 0; i < GEN8_PML4ES_PER_PML4; i++)
+ pml4->pdps[i] = vm->scratch_pdp;
}
/* Broadwell Page Directory Pointer Descriptors */
@@ -680,33 +621,32 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
unsigned entry,
dma_addr_t addr)
{
- struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
- int ret;
+ u32 *cs;
BUG_ON(entry >= 4);
- ret = intel_ring_begin(req, 6);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, entry));
- intel_ring_emit(ring, upper_32_bits(addr));
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, entry));
- intel_ring_emit(ring, lower_32_bits(addr));
- intel_ring_advance(ring);
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry));
+ *cs++ = upper_32_bits(addr);
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
+ *cs++ = lower_32_bits(addr);
+ intel_ring_advance(req, cs);
return 0;
}
-static int gen8_legacy_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_request *req)
+static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
+ struct drm_i915_gem_request *req)
{
int i, ret;
- for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
+ for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
ret = gen8_write_pdp(req, i, pd_daddr);
@@ -717,8 +657,8 @@ static int gen8_legacy_mm_switch(struct i915_hw_ppgtt *ppgtt,
return 0;
}
-static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_gem_request *req)
+static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
+ struct drm_i915_gem_request *req)
{
return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
}
@@ -738,70 +678,80 @@ static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
*/
static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
struct i915_page_table *pt,
- uint64_t start,
- uint64_t length)
+ u64 start, u64 length)
{
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
unsigned int num_entries = gen8_pte_count(start, length);
unsigned int pte = gen8_pte_index(start);
unsigned int pte_end = pte + num_entries;
- gen8_pte_t *pt_vaddr;
- gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC);
+ const gen8_pte_t scratch_pte =
+ gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
+ gen8_pte_t *vaddr;
- if (WARN_ON(!px_page(pt)))
- return false;
+ GEM_BUG_ON(num_entries > pt->used_ptes);
- GEM_BUG_ON(pte_end > GEN8_PTES);
+ pt->used_ptes -= num_entries;
+ if (!pt->used_ptes)
+ return true;
- bitmap_clear(pt->used_ptes, pte, num_entries);
- if (USES_FULL_PPGTT(vm->i915)) {
- if (bitmap_empty(pt->used_ptes, GEN8_PTES))
- return true;
- }
+ vaddr = kmap_atomic_px(pt);
+ while (pte < pte_end)
+ vaddr[pte++] = scratch_pte;
+ kunmap_atomic(vaddr);
- pt_vaddr = kmap_px(pt);
+ return false;
+}
- while (pte < pte_end)
- pt_vaddr[pte++] = scratch_pte;
+static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
+ struct i915_page_directory *pd,
+ struct i915_page_table *pt,
+ unsigned int pde)
+{
+ gen8_pde_t *vaddr;
- kunmap_px(ppgtt, pt_vaddr);
+ pd->page_table[pde] = pt;
- return false;
+ vaddr = kmap_atomic_px(pd);
+ vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC);
+ kunmap_atomic(vaddr);
}
-/* Removes entries from a single page dir, releasing it if it's empty.
- * Caller can use the return value to update higher-level entries
- */
static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
struct i915_page_directory *pd,
- uint64_t start,
- uint64_t length)
+ u64 start, u64 length)
{
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_table *pt;
- uint64_t pde;
- gen8_pde_t *pde_vaddr;
- gen8_pde_t scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt),
- I915_CACHE_LLC);
+ u32 pde;
gen8_for_each_pde(pt, pd, start, length, pde) {
- if (WARN_ON(!pd->page_table[pde]))
- break;
-
- if (gen8_ppgtt_clear_pt(vm, pt, start, length)) {
- __clear_bit(pde, pd->used_pdes);
- pde_vaddr = kmap_px(pd);
- pde_vaddr[pde] = scratch_pde;
- kunmap_px(ppgtt, pde_vaddr);
- free_pt(vm->i915, pt);
- }
+ GEM_BUG_ON(pt == vm->scratch_pt);
+
+ if (!gen8_ppgtt_clear_pt(vm, pt, start, length))
+ continue;
+
+ gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde);
+ GEM_BUG_ON(!pd->used_pdes);
+ pd->used_pdes--;
+
+ free_pt(vm, pt);
}
- if (bitmap_empty(pd->used_pdes, I915_PDES))
- return true;
+ return !pd->used_pdes;
+}
- return false;
+static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
+ struct i915_page_directory_pointer *pdp,
+ struct i915_page_directory *pd,
+ unsigned int pdpe)
+{
+ gen8_ppgtt_pdpe_t *vaddr;
+
+ pdp->page_directory[pdpe] = pd;
+ if (!use_4lvl(vm))
+ return;
+
+ vaddr = kmap_atomic_px(pdp);
+ vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
+ kunmap_atomic(vaddr);
}
/* Removes entries from a single page dir pointer, releasing it if it's empty.
@@ -809,138 +759,189 @@ static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
*/
static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp,
- uint64_t start,
- uint64_t length)
+ u64 start, u64 length)
{
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_directory *pd;
- uint64_t pdpe;
+ unsigned int pdpe;
gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
- if (WARN_ON(!pdp->page_directory[pdpe]))
- break;
+ GEM_BUG_ON(pd == vm->scratch_pd);
- if (gen8_ppgtt_clear_pd(vm, pd, start, length)) {
- __clear_bit(pdpe, pdp->used_pdpes);
- gen8_setup_pdpe(ppgtt, pdp, vm->scratch_pd, pdpe);
- free_pd(vm->i915, pd);
- }
+ if (!gen8_ppgtt_clear_pd(vm, pd, start, length))
+ continue;
+
+ gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
+ GEM_BUG_ON(!pdp->used_pdpes);
+ pdp->used_pdpes--;
+
+ free_pd(vm, pd);
}
- mark_tlbs_dirty(ppgtt);
+ return !pdp->used_pdpes;
+}
- if (bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(dev_priv)))
- return true;
+static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ gen8_ppgtt_clear_pdp(vm, &i915_vm_to_ppgtt(vm)->pdp, start, length);
+}
- return false;
+static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4,
+ struct i915_page_directory_pointer *pdp,
+ unsigned int pml4e)
+{
+ gen8_ppgtt_pml4e_t *vaddr;
+
+ pml4->pdps[pml4e] = pdp;
+
+ vaddr = kmap_atomic_px(pml4);
+ vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
+ kunmap_atomic(vaddr);
}
/* Removes entries from a single pml4.
* This is the top-level structure in 4-level page tables used on gen8+.
* Empty entries are always scratch pml4e.
*/
-static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
- struct i915_pml4 *pml4,
- uint64_t start,
- uint64_t length)
+static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
+ u64 start, u64 length)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct i915_pml4 *pml4 = &ppgtt->pml4;
struct i915_page_directory_pointer *pdp;
- uint64_t pml4e;
+ unsigned int pml4e;
- GEM_BUG_ON(!USES_FULL_48BIT_PPGTT(vm->i915));
+ GEM_BUG_ON(!use_4lvl(vm));
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
- if (WARN_ON(!pml4->pdps[pml4e]))
- break;
+ GEM_BUG_ON(pdp == vm->scratch_pdp);
- if (gen8_ppgtt_clear_pdp(vm, pdp, start, length)) {
- __clear_bit(pml4e, pml4->used_pml4es);
- gen8_setup_pml4e(ppgtt, pml4, vm->scratch_pdp, pml4e);
- free_pdp(vm->i915, pdp);
- }
+ if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length))
+ continue;
+
+ gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
+
+ free_pdp(vm, pdp);
}
}
-static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
- uint64_t start, uint64_t length)
-{
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+struct sgt_dma {
+ struct scatterlist *sg;
+ dma_addr_t dma, max;
+};
- if (USES_FULL_48BIT_PPGTT(vm->i915))
- gen8_ppgtt_clear_pml4(vm, &ppgtt->pml4, start, length);
- else
- gen8_ppgtt_clear_pdp(vm, &ppgtt->pdp, start, length);
+struct gen8_insert_pte {
+ u16 pml4e;
+ u16 pdpe;
+ u16 pde;
+ u16 pte;
+};
+
+static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start)
+{
+ return (struct gen8_insert_pte) {
+ gen8_pml4e_index(start),
+ gen8_pdpe_index(start),
+ gen8_pde_index(start),
+ gen8_pte_index(start),
+ };
}
-static void
-gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
+static __always_inline bool
+gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
struct i915_page_directory_pointer *pdp,
- struct sg_page_iter *sg_iter,
- uint64_t start,
+ struct sgt_dma *iter,
+ struct gen8_insert_pte *idx,
enum i915_cache_level cache_level)
{
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- gen8_pte_t *pt_vaddr;
- unsigned pdpe = gen8_pdpe_index(start);
- unsigned pde = gen8_pde_index(start);
- unsigned pte = gen8_pte_index(start);
-
- pt_vaddr = NULL;
-
- while (__sg_page_iter_next(sg_iter)) {
- if (pt_vaddr == NULL) {
- struct i915_page_directory *pd = pdp->page_directory[pdpe];
- struct i915_page_table *pt = pd->page_table[pde];
- pt_vaddr = kmap_px(pt);
+ struct i915_page_directory *pd;
+ const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
+ gen8_pte_t *vaddr;
+ bool ret;
+
+ GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
+ pd = pdp->page_directory[idx->pdpe];
+ vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
+ do {
+ vaddr[idx->pte] = pte_encode | iter->dma;
+
+ iter->dma += PAGE_SIZE;
+ if (iter->dma >= iter->max) {
+ iter->sg = __sg_next(iter->sg);
+ if (!iter->sg) {
+ ret = false;
+ break;
+ }
+
+ iter->dma = sg_dma_address(iter->sg);
+ iter->max = iter->dma + iter->sg->length;
}
- pt_vaddr[pte] =
- gen8_pte_encode(sg_page_iter_dma_address(sg_iter),
- cache_level);
- if (++pte == GEN8_PTES) {
- kunmap_px(ppgtt, pt_vaddr);
- pt_vaddr = NULL;
- if (++pde == I915_PDES) {
- if (++pdpe == I915_PDPES_PER_PDP(vm->i915))
+ if (++idx->pte == GEN8_PTES) {
+ idx->pte = 0;
+
+ if (++idx->pde == I915_PDES) {
+ idx->pde = 0;
+
+ /* Limited by sg length for 3lvl */
+ if (++idx->pdpe == GEN8_PML4ES_PER_PML4) {
+ idx->pdpe = 0;
+ ret = true;
break;
- pde = 0;
+ }
+
+ GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
+ pd = pdp->page_directory[idx->pdpe];
}
- pte = 0;
+
+ kunmap_atomic(vaddr);
+ vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
}
- }
+ } while (1);
+ kunmap_atomic(vaddr);
- if (pt_vaddr)
- kunmap_px(ppgtt, pt_vaddr);
+ return ret;
}
-static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
- struct sg_table *pages,
- uint64_t start,
- enum i915_cache_level cache_level,
- u32 unused)
+static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
+ struct sg_table *pages,
+ u64 start,
+ enum i915_cache_level cache_level,
+ u32 unused)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- struct sg_page_iter sg_iter;
+ struct sgt_dma iter = {
+ .sg = pages->sgl,
+ .dma = sg_dma_address(iter.sg),
+ .max = iter.dma + iter.sg->length,
+ };
+ struct gen8_insert_pte idx = gen8_insert_pte(start);
- __sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0);
+ gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
+ cache_level);
+}
- if (!USES_FULL_48BIT_PPGTT(vm->i915)) {
- gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
- cache_level);
- } else {
- struct i915_page_directory_pointer *pdp;
- uint64_t pml4e;
- uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT;
+static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
+ struct sg_table *pages,
+ u64 start,
+ enum i915_cache_level cache_level,
+ u32 unused)
+{
+ struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct sgt_dma iter = {
+ .sg = pages->sgl,
+ .dma = sg_dma_address(iter.sg),
+ .max = iter.dma + iter.sg->length,
+ };
+ struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
+ struct gen8_insert_pte idx = gen8_insert_pte(start);
- gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
- gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter,
- start, cache_level);
- }
- }
+ while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], &iter,
+ &idx, cache_level))
+ GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
}
-static void gen8_free_page_tables(struct drm_i915_private *dev_priv,
+static void gen8_free_page_tables(struct i915_address_space *vm,
struct i915_page_directory *pd)
{
int i;
@@ -948,38 +949,34 @@ static void gen8_free_page_tables(struct drm_i915_private *dev_priv,
if (!px_page(pd))
return;
- for_each_set_bit(i, pd->used_pdes, I915_PDES) {
- if (WARN_ON(!pd->page_table[i]))
- continue;
-
- free_pt(dev_priv, pd->page_table[i]);
- pd->page_table[i] = NULL;
+ for (i = 0; i < I915_PDES; i++) {
+ if (pd->page_table[i] != vm->scratch_pt)
+ free_pt(vm, pd->page_table[i]);
}
}
static int gen8_init_scratch(struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = vm->i915;
int ret;
- ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA);
+ ret = setup_scratch_page(vm, I915_GFP_DMA);
if (ret)
return ret;
- vm->scratch_pt = alloc_pt(dev_priv);
+ vm->scratch_pt = alloc_pt(vm);
if (IS_ERR(vm->scratch_pt)) {
ret = PTR_ERR(vm->scratch_pt);
goto free_scratch_page;
}
- vm->scratch_pd = alloc_pd(dev_priv);
+ vm->scratch_pd = alloc_pd(vm);
if (IS_ERR(vm->scratch_pd)) {
ret = PTR_ERR(vm->scratch_pd);
goto free_pt;
}
- if (USES_FULL_48BIT_PPGTT(dev_priv)) {
- vm->scratch_pdp = alloc_pdp(dev_priv);
+ if (use_4lvl(vm)) {
+ vm->scratch_pdp = alloc_pdp(vm);
if (IS_ERR(vm->scratch_pdp)) {
ret = PTR_ERR(vm->scratch_pdp);
goto free_pd;
@@ -988,29 +985,30 @@ static int gen8_init_scratch(struct i915_address_space *vm)
gen8_initialize_pt(vm, vm->scratch_pt);
gen8_initialize_pd(vm, vm->scratch_pd);
- if (USES_FULL_48BIT_PPGTT(dev_priv))
+ if (use_4lvl(vm))
gen8_initialize_pdp(vm, vm->scratch_pdp);
return 0;
free_pd:
- free_pd(dev_priv, vm->scratch_pd);
+ free_pd(vm, vm->scratch_pd);
free_pt:
- free_pt(dev_priv, vm->scratch_pt);
+ free_pt(vm, vm->scratch_pt);
free_scratch_page:
- cleanup_scratch_page(dev_priv, &vm->scratch_page);
+ cleanup_scratch_page(vm);
return ret;
}
static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
{
+ struct i915_address_space *vm = &ppgtt->base;
+ struct drm_i915_private *dev_priv = vm->i915;
enum vgt_g2v_type msg;
- struct drm_i915_private *dev_priv = ppgtt->base.i915;
int i;
- if (USES_FULL_48BIT_PPGTT(dev_priv)) {
- u64 daddr = px_dma(&ppgtt->pml4);
+ if (use_4lvl(vm)) {
+ const u64 daddr = px_dma(&ppgtt->pml4);
I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
@@ -1018,8 +1016,8 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
} else {
- for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
- u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
+ for (i = 0; i < GEN8_3LVL_PDPES; i++) {
+ const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
@@ -1036,44 +1034,42 @@ static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
static void gen8_free_scratch(struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = vm->i915;
-
- if (USES_FULL_48BIT_PPGTT(dev_priv))
- free_pdp(dev_priv, vm->scratch_pdp);
- free_pd(dev_priv, vm->scratch_pd);
- free_pt(dev_priv, vm->scratch_pt);
- cleanup_scratch_page(dev_priv, &vm->scratch_page);
+ if (use_4lvl(vm))
+ free_pdp(vm, vm->scratch_pdp);
+ free_pd(vm, vm->scratch_pd);
+ free_pt(vm, vm->scratch_pt);
+ cleanup_scratch_page(vm);
}
-static void gen8_ppgtt_cleanup_3lvl(struct drm_i915_private *dev_priv,
+static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm,
struct i915_page_directory_pointer *pdp)
{
+ const unsigned int pdpes = i915_pdpes_per_pdp(vm);
int i;
- for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev_priv)) {
- if (WARN_ON(!pdp->page_directory[i]))
+ for (i = 0; i < pdpes; i++) {
+ if (pdp->page_directory[i] == vm->scratch_pd)
continue;
- gen8_free_page_tables(dev_priv, pdp->page_directory[i]);
- free_pd(dev_priv, pdp->page_directory[i]);
+ gen8_free_page_tables(vm, pdp->page_directory[i]);
+ free_pd(vm, pdp->page_directory[i]);
}
- free_pdp(dev_priv, pdp);
+ free_pdp(vm, pdp);
}
static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
{
- struct drm_i915_private *dev_priv = ppgtt->base.i915;
int i;
- for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) {
- if (WARN_ON(!ppgtt->pml4.pdps[i]))
+ for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
+ if (ppgtt->pml4.pdps[i] == ppgtt->base.scratch_pdp)
continue;
- gen8_ppgtt_cleanup_3lvl(dev_priv, ppgtt->pml4.pdps[i]);
+ gen8_ppgtt_cleanup_3lvl(&ppgtt->base, ppgtt->pml4.pdps[i]);
}
- cleanup_px(dev_priv, &ppgtt->pml4);
+ cleanup_px(&ppgtt->base, &ppgtt->pml4);
}
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
@@ -1084,414 +1080,162 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
if (intel_vgpu_active(dev_priv))
gen8_ppgtt_notify_vgt(ppgtt, false);
- if (!USES_FULL_48BIT_PPGTT(dev_priv))
- gen8_ppgtt_cleanup_3lvl(dev_priv, &ppgtt->pdp);
- else
+ if (use_4lvl(vm))
gen8_ppgtt_cleanup_4lvl(ppgtt);
+ else
+ gen8_ppgtt_cleanup_3lvl(&ppgtt->base, &ppgtt->pdp);
gen8_free_scratch(vm);
}
-/**
- * gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range.
- * @vm: Master vm structure.
- * @pd: Page directory for this address range.
- * @start: Starting virtual address to begin allocations.
- * @length: Size of the allocations.
- * @new_pts: Bitmap set by function with new allocations. Likely used by the
- * caller to free on error.
- *
- * Allocate the required number of page tables. Extremely similar to
- * gen8_ppgtt_alloc_page_directories(). The main difference is here we are limited by
- * the page directory boundary (instead of the page directory pointer). That
- * boundary is 1GB virtual. Therefore, unlike gen8_ppgtt_alloc_page_directories(), it is
- * possible, and likely that the caller will need to use multiple calls of this
- * function to achieve the appropriate allocation.
- *
- * Return: 0 if success; negative error code otherwise.
- */
-static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
- struct i915_page_directory *pd,
- uint64_t start,
- uint64_t length,
- unsigned long *new_pts)
+static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
+ struct i915_page_directory *pd,
+ u64 start, u64 length)
{
- struct drm_i915_private *dev_priv = vm->i915;
struct i915_page_table *pt;
- uint32_t pde;
+ u64 from = start;
+ unsigned int pde;
gen8_for_each_pde(pt, pd, start, length, pde) {
- /* Don't reallocate page tables */
- if (test_bit(pde, pd->used_pdes)) {
- /* Scratch is never allocated this way */
- WARN_ON(pt == vm->scratch_pt);
- continue;
- }
-
- pt = alloc_pt(dev_priv);
- if (IS_ERR(pt))
- goto unwind_out;
-
- gen8_initialize_pt(vm, pt);
- pd->page_table[pde] = pt;
- __set_bit(pde, new_pts);
- trace_i915_page_table_entry_alloc(vm, pde, start, GEN8_PDE_SHIFT);
- }
-
- return 0;
-
-unwind_out:
- for_each_set_bit(pde, new_pts, I915_PDES)
- free_pt(dev_priv, pd->page_table[pde]);
-
- return -ENOMEM;
-}
-
-/**
- * gen8_ppgtt_alloc_page_directories() - Allocate page directories for VA range.
- * @vm: Master vm structure.
- * @pdp: Page directory pointer for this address range.
- * @start: Starting virtual address to begin allocations.
- * @length: Size of the allocations.
- * @new_pds: Bitmap set by function with new allocations. Likely used by the
- * caller to free on error.
- *
- * Allocate the required number of page directories starting at the pde index of
- * @start, and ending at the pde index @start + @length. This function will skip
- * over already allocated page directories within the range, and only allocate
- * new ones, setting the appropriate pointer within the pdp as well as the
- * correct position in the bitmap @new_pds.
- *
- * The function will only allocate the pages within the range for a give page
- * directory pointer. In other words, if @start + @length straddles a virtually
- * addressed PDP boundary (512GB for 4k pages), there will be more allocations
- * required by the caller, This is not currently possible, and the BUG in the
- * code will prevent it.
- *
- * Return: 0 if success; negative error code otherwise.
- */
-static int
-gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
- struct i915_page_directory_pointer *pdp,
- uint64_t start,
- uint64_t length,
- unsigned long *new_pds)
-{
- struct drm_i915_private *dev_priv = vm->i915;
- struct i915_page_directory *pd;
- uint32_t pdpe;
- uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv);
-
- WARN_ON(!bitmap_empty(new_pds, pdpes));
-
- gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
- if (test_bit(pdpe, pdp->used_pdpes))
- continue;
-
- pd = alloc_pd(dev_priv);
- if (IS_ERR(pd))
- goto unwind_out;
-
- gen8_initialize_pd(vm, pd);
- pdp->page_directory[pdpe] = pd;
- __set_bit(pdpe, new_pds);
- trace_i915_page_directory_entry_alloc(vm, pdpe, start, GEN8_PDPE_SHIFT);
- }
+ if (pt == vm->scratch_pt) {
+ pt = alloc_pt(vm);
+ if (IS_ERR(pt))
+ goto unwind;
- return 0;
-
-unwind_out:
- for_each_set_bit(pdpe, new_pds, pdpes)
- free_pd(dev_priv, pdp->page_directory[pdpe]);
-
- return -ENOMEM;
-}
-
-/**
- * gen8_ppgtt_alloc_page_dirpointers() - Allocate pdps for VA range.
- * @vm: Master vm structure.
- * @pml4: Page map level 4 for this address range.
- * @start: Starting virtual address to begin allocations.
- * @length: Size of the allocations.
- * @new_pdps: Bitmap set by function with new allocations. Likely used by the
- * caller to free on error.
- *
- * Allocate the required number of page directory pointers. Extremely similar to
- * gen8_ppgtt_alloc_page_directories() and gen8_ppgtt_alloc_pagetabs().
- * The main difference is here we are limited by the pml4 boundary (instead of
- * the page directory pointer).
- *
- * Return: 0 if success; negative error code otherwise.
- */
-static int
-gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
- struct i915_pml4 *pml4,
- uint64_t start,
- uint64_t length,
- unsigned long *new_pdps)
-{
- struct drm_i915_private *dev_priv = vm->i915;
- struct i915_page_directory_pointer *pdp;
- uint32_t pml4e;
-
- WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4));
-
- gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
- if (!test_bit(pml4e, pml4->used_pml4es)) {
- pdp = alloc_pdp(dev_priv);
- if (IS_ERR(pdp))
- goto unwind_out;
+ gen8_initialize_pt(vm, pt);
- gen8_initialize_pdp(vm, pdp);
- pml4->pdps[pml4e] = pdp;
- __set_bit(pml4e, new_pdps);
- trace_i915_page_directory_pointer_entry_alloc(vm,
- pml4e,
- start,
- GEN8_PML4E_SHIFT);
+ gen8_ppgtt_set_pde(vm, pd, pt, pde);
+ pd->used_pdes++;
+ GEM_BUG_ON(pd->used_pdes > I915_PDES);
}
- }
-
- return 0;
-
-unwind_out:
- for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
- free_pdp(dev_priv, pml4->pdps[pml4e]);
-
- return -ENOMEM;
-}
-
-static void
-free_gen8_temp_bitmaps(unsigned long *new_pds, unsigned long *new_pts)
-{
- kfree(new_pts);
- kfree(new_pds);
-}
-
-/* Fills in the page directory bitmap, and the array of page tables bitmap. Both
- * of these are based on the number of PDPEs in the system.
- */
-static
-int __must_check alloc_gen8_temp_bitmaps(unsigned long **new_pds,
- unsigned long **new_pts,
- uint32_t pdpes)
-{
- unsigned long *pds;
- unsigned long *pts;
-
- pds = kcalloc(BITS_TO_LONGS(pdpes), sizeof(unsigned long), GFP_TEMPORARY);
- if (!pds)
- return -ENOMEM;
-
- pts = kcalloc(pdpes, BITS_TO_LONGS(I915_PDES) * sizeof(unsigned long),
- GFP_TEMPORARY);
- if (!pts)
- goto err_out;
-
- *new_pds = pds;
- *new_pts = pts;
+ pt->used_ptes += gen8_pte_count(start, length);
+ }
return 0;
-err_out:
- free_gen8_temp_bitmaps(pds, pts);
+unwind:
+ gen8_ppgtt_clear_pd(vm, pd, from, start - from);
return -ENOMEM;
}
-static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
- struct i915_page_directory_pointer *pdp,
- uint64_t start,
- uint64_t length)
+static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
+ struct i915_page_directory_pointer *pdp,
+ u64 start, u64 length)
{
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- unsigned long *new_page_dirs, *new_page_tables;
- struct drm_i915_private *dev_priv = vm->i915;
struct i915_page_directory *pd;
- const uint64_t orig_start = start;
- const uint64_t orig_length = length;
- uint32_t pdpe;
- uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv);
+ u64 from = start;
+ unsigned int pdpe;
int ret;
- ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
- if (ret)
- return ret;
-
- /* Do the allocations first so we can easily bail out */
- ret = gen8_ppgtt_alloc_page_directories(vm, pdp, start, length,
- new_page_dirs);
- if (ret) {
- free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
- return ret;
- }
-
- /* For every page directory referenced, allocate page tables */
gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
- ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length,
- new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES));
- if (ret)
- goto err_out;
- }
+ if (pd == vm->scratch_pd) {
+ pd = alloc_pd(vm);
+ if (IS_ERR(pd))
+ goto unwind;
- start = orig_start;
- length = orig_length;
+ gen8_initialize_pd(vm, pd);
+ gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
+ pdp->used_pdpes++;
+ GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
- /* Allocations have completed successfully, so set the bitmaps, and do
- * the mappings. */
- gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
- gen8_pde_t *const page_directory = kmap_px(pd);
- struct i915_page_table *pt;
- uint64_t pd_len = length;
- uint64_t pd_start = start;
- uint32_t pde;
-
- /* Every pd should be allocated, we just did that above. */
- WARN_ON(!pd);
-
- gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
- /* Same reasoning as pd */
- WARN_ON(!pt);
- WARN_ON(!pd_len);
- WARN_ON(!gen8_pte_count(pd_start, pd_len));
-
- /* Set our used ptes within the page table */
- bitmap_set(pt->used_ptes,
- gen8_pte_index(pd_start),
- gen8_pte_count(pd_start, pd_len));
-
- /* Our pde is now pointing to the pagetable, pt */
- __set_bit(pde, pd->used_pdes);
-
- /* Map the PDE to the page table */
- page_directory[pde] = gen8_pde_encode(px_dma(pt),
- I915_CACHE_LLC);
- trace_i915_page_table_entry_map(&ppgtt->base, pde, pt,
- gen8_pte_index(start),
- gen8_pte_count(start, length),
- GEN8_PTES);
-
- /* NB: We haven't yet mapped ptes to pages. At this
- * point we're still relying on insert_entries() */
+ mark_tlbs_dirty(i915_vm_to_ppgtt(vm));
}
- kunmap_px(ppgtt, page_directory);
- __set_bit(pdpe, pdp->used_pdpes);
- gen8_setup_pdpe(ppgtt, pdp, pd, pdpe);
+ ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
+ if (unlikely(ret))
+ goto unwind_pd;
}
- free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
- mark_tlbs_dirty(ppgtt);
return 0;
-err_out:
- while (pdpe--) {
- unsigned long temp;
-
- for_each_set_bit(temp, new_page_tables + pdpe *
- BITS_TO_LONGS(I915_PDES), I915_PDES)
- free_pt(dev_priv,
- pdp->page_directory[pdpe]->page_table[temp]);
+unwind_pd:
+ if (!pd->used_pdes) {
+ gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
+ GEM_BUG_ON(!pdp->used_pdpes);
+ pdp->used_pdpes--;
+ free_pd(vm, pd);
}
+unwind:
+ gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
+ return -ENOMEM;
+}
- for_each_set_bit(pdpe, new_page_dirs, pdpes)
- free_pd(dev_priv, pdp->page_directory[pdpe]);
-
- free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
- mark_tlbs_dirty(ppgtt);
- return ret;
+static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+ return gen8_ppgtt_alloc_pdp(vm,
+ &i915_vm_to_ppgtt(vm)->pdp, start, length);
}
-static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
- struct i915_pml4 *pml4,
- uint64_t start,
- uint64_t length)
+static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
+ u64 start, u64 length)
{
- DECLARE_BITMAP(new_pdps, GEN8_PML4ES_PER_PML4);
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ struct i915_pml4 *pml4 = &ppgtt->pml4;
struct i915_page_directory_pointer *pdp;
- uint64_t pml4e;
- int ret = 0;
-
- /* Do the pml4 allocations first, so we don't need to track the newly
- * allocated tables below the pdp */
- bitmap_zero(new_pdps, GEN8_PML4ES_PER_PML4);
-
- /* The pagedirectory and pagetable allocations are done in the shared 3
- * and 4 level code. Just allocate the pdps.
- */
- ret = gen8_ppgtt_alloc_page_dirpointers(vm, pml4, start, length,
- new_pdps);
- if (ret)
- return ret;
-
- WARN(bitmap_weight(new_pdps, GEN8_PML4ES_PER_PML4) > 2,
- "The allocation has spanned more than 512GB. "
- "It is highly likely this is incorrect.");
+ u64 from = start;
+ u32 pml4e;
+ int ret;
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
- WARN_ON(!pdp);
+ if (pml4->pdps[pml4e] == vm->scratch_pdp) {
+ pdp = alloc_pdp(vm);
+ if (IS_ERR(pdp))
+ goto unwind;
- ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length);
- if (ret)
- goto err_out;
+ gen8_initialize_pdp(vm, pdp);
+ gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
+ }
- gen8_setup_pml4e(ppgtt, pml4, pdp, pml4e);
+ ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length);
+ if (unlikely(ret))
+ goto unwind_pdp;
}
- bitmap_or(pml4->used_pml4es, new_pdps, pml4->used_pml4es,
- GEN8_PML4ES_PER_PML4);
-
return 0;
-err_out:
- for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
- gen8_ppgtt_cleanup_3lvl(vm->i915, pml4->pdps[pml4e]);
-
- return ret;
-}
-
-static int gen8_alloc_va_range(struct i915_address_space *vm,
- uint64_t start, uint64_t length)
-{
- struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
-
- if (USES_FULL_48BIT_PPGTT(vm->i915))
- return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
- else
- return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
+unwind_pdp:
+ if (!pdp->used_pdpes) {
+ gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
+ free_pdp(vm, pdp);
+ }
+unwind:
+ gen8_ppgtt_clear_4lvl(vm, from, start - from);
+ return -ENOMEM;
}
-static void gen8_dump_pdp(struct i915_page_directory_pointer *pdp,
- uint64_t start, uint64_t length,
+static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
+ struct i915_page_directory_pointer *pdp,
+ u64 start, u64 length,
gen8_pte_t scratch_pte,
struct seq_file *m)
{
+ struct i915_address_space *vm = &ppgtt->base;
struct i915_page_directory *pd;
- uint32_t pdpe;
+ u32 pdpe;
gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
struct i915_page_table *pt;
- uint64_t pd_len = length;
- uint64_t pd_start = start;
- uint32_t pde;
+ u64 pd_len = length;
+ u64 pd_start = start;
+ u32 pde;
- if (!test_bit(pdpe, pdp->used_pdpes))
+ if (pdp->page_directory[pdpe] == ppgtt->base.scratch_pd)
continue;
seq_printf(m, "\tPDPE #%d\n", pdpe);
gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
- uint32_t pte;
+ u32 pte;
gen8_pte_t *pt_vaddr;
- if (!test_bit(pde, pd->used_pdes))
+ if (pd->page_table[pde] == ppgtt->base.scratch_pt)
continue;
- pt_vaddr = kmap_px(pt);
+ pt_vaddr = kmap_atomic_px(pt);
for (pte = 0; pte < GEN8_PTES; pte += 4) {
- uint64_t va =
- (pdpe << GEN8_PDPE_SHIFT) |
- (pde << GEN8_PDE_SHIFT) |
- (pte << GEN8_PTE_SHIFT);
+ u64 va = (pdpe << GEN8_PDPE_SHIFT |
+ pde << GEN8_PDE_SHIFT |
+ pte << GEN8_PTE_SHIFT);
int i;
bool found = false;
@@ -1510,9 +1254,6 @@ static void gen8_dump_pdp(struct i915_page_directory_pointer *pdp,
}
seq_puts(m, "\n");
}
- /* don't use kunmap_px, it could trigger
- * an unnecessary flush.
- */
kunmap_atomic(pt_vaddr);
}
}
@@ -1521,53 +1262,57 @@ static void gen8_dump_pdp(struct i915_page_directory_pointer *pdp,
static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
{
struct i915_address_space *vm = &ppgtt->base;
- uint64_t start = ppgtt->base.start;
- uint64_t length = ppgtt->base.total;
- gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC);
+ const gen8_pte_t scratch_pte =
+ gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
+ u64 start = 0, length = ppgtt->base.total;
- if (!USES_FULL_48BIT_PPGTT(vm->i915)) {
- gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
- } else {
- uint64_t pml4e;
+ if (use_4lvl(vm)) {
+ u64 pml4e;
struct i915_pml4 *pml4 = &ppgtt->pml4;
struct i915_page_directory_pointer *pdp;
gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
- if (!test_bit(pml4e, pml4->used_pml4es))
+ if (pml4->pdps[pml4e] == ppgtt->base.scratch_pdp)
continue;
seq_printf(m, " PML4E #%llu\n", pml4e);
- gen8_dump_pdp(pdp, start, length, scratch_pte, m);
+ gen8_dump_pdp(ppgtt, pdp, start, length, scratch_pte, m);
}
+ } else {
+ gen8_dump_pdp(ppgtt, &ppgtt->pdp, start, length, scratch_pte, m);
}
}
-static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt)
+static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
{
- unsigned long *new_page_dirs, *new_page_tables;
- uint32_t pdpes = I915_PDPES_PER_PDP(to_i915(ppgtt->base.dev));
- int ret;
+ struct i915_address_space *vm = &ppgtt->base;
+ struct i915_page_directory_pointer *pdp = &ppgtt->pdp;
+ struct i915_page_directory *pd;
+ u64 start = 0, length = ppgtt->base.total;
+ u64 from = start;
+ unsigned int pdpe;
- /* We allocate temp bitmap for page tables for no gain
- * but as this is for init only, lets keep the things simple
- */
- ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
- if (ret)
- return ret;
+ gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
+ pd = alloc_pd(vm);
+ if (IS_ERR(pd))
+ goto unwind;
- /* Allocate for all pdps regardless of how the ppgtt
- * was defined.
- */
- ret = gen8_ppgtt_alloc_page_directories(&ppgtt->base, &ppgtt->pdp,
- 0, 1ULL << 32,
- new_page_dirs);
- if (!ret)
- *ppgtt->pdp.used_pdpes = *new_page_dirs;
+ gen8_initialize_pd(vm, pd);
+ gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
+ pdp->used_pdpes++;
+ }
- free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
+ pdp->used_pdpes++; /* never remove */
+ return 0;
- return ret;
+unwind:
+ start -= from;
+ gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
+ gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
+ free_pd(vm, pd);
+ }
+ pdp->used_pdpes = 0;
+ return -ENOMEM;
}
/*
@@ -1579,52 +1324,64 @@ static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt)
*/
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
- struct drm_i915_private *dev_priv = ppgtt->base.i915;
+ struct i915_address_space *vm = &ppgtt->base;
+ struct drm_i915_private *dev_priv = vm->i915;
int ret;
+ ppgtt->base.total = USES_FULL_48BIT_PPGTT(dev_priv) ?
+ 1ULL << 48 :
+ 1ULL << 32;
+
ret = gen8_init_scratch(&ppgtt->base);
- if (ret)
+ if (ret) {
+ ppgtt->base.total = 0;
return ret;
+ }
- ppgtt->base.start = 0;
- ppgtt->base.cleanup = gen8_ppgtt_cleanup;
- ppgtt->base.allocate_va_range = gen8_alloc_va_range;
- ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
- ppgtt->base.clear_range = gen8_ppgtt_clear_range;
- ppgtt->base.unbind_vma = ppgtt_unbind_vma;
- ppgtt->base.bind_vma = ppgtt_bind_vma;
- ppgtt->debug_dump = gen8_dump_ppgtt;
+ /* There are only few exceptions for gen >=6. chv and bxt.
+ * And we are not sure about the latter so play safe for now.
+ */
+ if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
+ ppgtt->base.pt_kmap_wc = true;
- if (USES_FULL_48BIT_PPGTT(dev_priv)) {
- ret = setup_px(dev_priv, &ppgtt->pml4);
+ if (use_4lvl(vm)) {
+ ret = setup_px(&ppgtt->base, &ppgtt->pml4);
if (ret)
goto free_scratch;
gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
- ppgtt->base.total = 1ULL << 48;
- ppgtt->switch_mm = gen8_48b_mm_switch;
+ ppgtt->switch_mm = gen8_mm_switch_4lvl;
+ ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_4lvl;
+ ppgtt->base.insert_entries = gen8_ppgtt_insert_4lvl;
+ ppgtt->base.clear_range = gen8_ppgtt_clear_4lvl;
} else {
- ret = __pdp_init(dev_priv, &ppgtt->pdp);
+ ret = __pdp_init(&ppgtt->base, &ppgtt->pdp);
if (ret)
goto free_scratch;
- ppgtt->base.total = 1ULL << 32;
- ppgtt->switch_mm = gen8_legacy_mm_switch;
- trace_i915_page_directory_pointer_entry_alloc(&ppgtt->base,
- 0, 0,
- GEN8_PML4E_SHIFT);
-
if (intel_vgpu_active(dev_priv)) {
- ret = gen8_preallocate_top_level_pdps(ppgtt);
- if (ret)
+ ret = gen8_preallocate_top_level_pdp(ppgtt);
+ if (ret) {
+ __pdp_fini(&ppgtt->pdp);
goto free_scratch;
+ }
}
+
+ ppgtt->switch_mm = gen8_mm_switch_3lvl;
+ ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_3lvl;
+ ppgtt->base.insert_entries = gen8_ppgtt_insert_3lvl;
+ ppgtt->base.clear_range = gen8_ppgtt_clear_3lvl;
}
if (intel_vgpu_active(dev_priv))
gen8_ppgtt_notify_vgt(ppgtt, true);
+ ppgtt->base.cleanup = gen8_ppgtt_cleanup;
+ ppgtt->base.unbind_vma = ppgtt_unbind_vma;
+ ppgtt->base.bind_vma = ppgtt_bind_vma;
+ ppgtt->debug_dump = gen8_dump_ppgtt;
+
return 0;
free_scratch:
@@ -1637,9 +1394,8 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
struct i915_address_space *vm = &ppgtt->base;
struct i915_page_table *unused;
gen6_pte_t scratch_pte;
- uint32_t pd_entry;
- uint32_t pte, pde;
- uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
+ u32 pd_entry, pte, pde;
+ u32 start = 0, length = ppgtt->base.total;
scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
I915_CACHE_LLC, 0);
@@ -1658,7 +1414,7 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
expected);
seq_printf(m, "\tPDE: %x\n", pd_entry);
- pt_vaddr = kmap_px(ppgtt->pd.page_table[pde]);
+ pt_vaddr = kmap_atomic_px(ppgtt->pd.page_table[pde]);
for (pte = 0; pte < GEN6_PTES; pte+=4) {
unsigned long va =
@@ -1681,73 +1437,59 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
}
seq_puts(m, "\n");
}
- kunmap_px(ppgtt, pt_vaddr);
+ kunmap_atomic(pt_vaddr);
}
}
/* Write pde (index) from the page directory @pd to the page table @pt */
-static void gen6_write_pde(struct i915_page_directory *pd,
- const int pde, struct i915_page_table *pt)
+static inline void gen6_write_pde(const struct i915_hw_ppgtt *ppgtt,
+ const unsigned int pde,
+ const struct i915_page_table *pt)
{
/* Caller needs to make sure the write completes if necessary */
- struct i915_hw_ppgtt *ppgtt =
- container_of(pd, struct i915_hw_ppgtt, pd);
- u32 pd_entry;
-
- pd_entry = GEN6_PDE_ADDR_ENCODE(px_dma(pt));
- pd_entry |= GEN6_PDE_VALID;
-
- writel(pd_entry, ppgtt->pd_addr + pde);
+ writel_relaxed(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
+ ppgtt->pd_addr + pde);
}
/* Write all the page tables found in the ppgtt structure to incrementing page
* directories. */
-static void gen6_write_page_range(struct drm_i915_private *dev_priv,
- struct i915_page_directory *pd,
- uint32_t start, uint32_t length)
+static void gen6_write_page_range(struct i915_hw_ppgtt *ppgtt,
+ u32 start, u32 length)
{
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_page_table *pt;
- uint32_t pde;
+ unsigned int pde;
- gen6_for_each_pde(pt, pd, start, length, pde)
- gen6_write_pde(pd, pde, pt);
+ gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde)
+ gen6_write_pde(ppgtt, pde, pt);
- /* Make sure write is complete before other code can use this page
- * table. Also require for WC mapped PTEs */
- readl(ggtt->gsm);
+ mark_tlbs_dirty(ppgtt);
+ wmb();
}
-static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
+static inline u32 get_pd_offset(struct i915_hw_ppgtt *ppgtt)
{
- BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
-
- return (ppgtt->pd.base.ggtt_offset / 64) << 16;
+ GEM_BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
+ return ppgtt->pd.base.ggtt_offset << 10;
}
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
- struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
- int ret;
+ u32 *cs;
/* NB: TLBs must be flushed and invalidated before a switch */
- ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH);
- if (ret)
- return ret;
-
- ret = intel_ring_begin(req, 6);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
- intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
- intel_ring_emit(ring, PP_DIR_DCLV_2G);
- intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
- intel_ring_emit(ring, get_pd_offset(ppgtt));
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ cs = intel_ring_begin(req, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_LOAD_REGISTER_IMM(2);
+ *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
+ *cs++ = PP_DIR_DCLV_2G;
+ *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+ *cs++ = get_pd_offset(ppgtt);
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return 0;
}
@@ -1755,33 +1497,21 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req)
{
- struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
- int ret;
+ u32 *cs;
/* NB: TLBs must be flushed and invalidated before a switch */
- ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH);
- if (ret)
- return ret;
-
- ret = intel_ring_begin(req, 6);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
- intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
- intel_ring_emit(ring, PP_DIR_DCLV_2G);
- intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
- intel_ring_emit(ring, get_pd_offset(ppgtt));
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
-
- /* XXX: RCS is the only one to auto invalidate the TLBs? */
- if (engine->id != RCS) {
- ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH);
- if (ret)
- return ret;
- }
+ cs = intel_ring_begin(req, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_LOAD_REGISTER_IMM(2);
+ *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
+ *cs++ = PP_DIR_DCLV_2G;
+ *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+ *cs++ = get_pd_offset(ppgtt);
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return 0;
}
@@ -1813,7 +1543,7 @@ static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
{
struct intel_engine_cs *engine;
- uint32_t ecochk, ecobits;
+ u32 ecochk, ecobits;
enum intel_engine_id id;
ecobits = I915_READ(GAC_ECO_BITS);
@@ -1837,7 +1567,7 @@ static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
{
- uint32_t ecochk, gab_ctl, ecobits;
+ u32 ecochk, gab_ctl, ecobits;
ecobits = I915_READ(GAC_ECO_BITS);
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
@@ -1854,168 +1584,124 @@ static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
/* PPGTT support for Sandybdrige/Gen6 and later */
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
- uint64_t start,
- uint64_t length)
+ u64 start, u64 length)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
- gen6_pte_t *pt_vaddr, scratch_pte;
- unsigned first_entry = start >> PAGE_SHIFT;
- unsigned num_entries = length >> PAGE_SHIFT;
- unsigned act_pt = first_entry / GEN6_PTES;
- unsigned first_pte = first_entry % GEN6_PTES;
- unsigned last_pte, i;
-
- scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC, 0);
+ unsigned int first_entry = start >> PAGE_SHIFT;
+ unsigned int pde = first_entry / GEN6_PTES;
+ unsigned int pte = first_entry % GEN6_PTES;
+ unsigned int num_entries = length >> PAGE_SHIFT;
+ gen6_pte_t scratch_pte =
+ vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
while (num_entries) {
- last_pte = first_pte + num_entries;
- if (last_pte > GEN6_PTES)
- last_pte = GEN6_PTES;
+ struct i915_page_table *pt = ppgtt->pd.page_table[pde++];
+ unsigned int end = min(pte + num_entries, GEN6_PTES);
+ gen6_pte_t *vaddr;
- pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
+ num_entries -= end - pte;
- for (i = first_pte; i < last_pte; i++)
- pt_vaddr[i] = scratch_pte;
+ /* Note that the hw doesn't support removing PDE on the fly
+ * (they are cached inside the context with no means to
+ * invalidate the cache), so we can only reset the PTE
+ * entries back to scratch.
+ */
- kunmap_px(ppgtt, pt_vaddr);
+ vaddr = kmap_atomic_px(pt);
+ do {
+ vaddr[pte++] = scratch_pte;
+ } while (pte < end);
+ kunmap_atomic(vaddr);
- num_entries -= last_pte - first_pte;
- first_pte = 0;
- act_pt++;
+ pte = 0;
}
}
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
- uint64_t start,
- enum i915_cache_level cache_level, u32 flags)
+ u64 start,
+ enum i915_cache_level cache_level,
+ u32 flags)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
unsigned first_entry = start >> PAGE_SHIFT;
unsigned act_pt = first_entry / GEN6_PTES;
unsigned act_pte = first_entry % GEN6_PTES;
- gen6_pte_t *pt_vaddr = NULL;
- struct sgt_iter sgt_iter;
- dma_addr_t addr;
+ const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
+ struct sgt_dma iter;
+ gen6_pte_t *vaddr;
+
+ vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
+ iter.sg = pages->sgl;
+ iter.dma = sg_dma_address(iter.sg);
+ iter.max = iter.dma + iter.sg->length;
+ do {
+ vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
- for_each_sgt_dma(addr, sgt_iter, pages) {
- if (pt_vaddr == NULL)
- pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
+ iter.dma += PAGE_SIZE;
+ if (iter.dma == iter.max) {
+ iter.sg = __sg_next(iter.sg);
+ if (!iter.sg)
+ break;
- pt_vaddr[act_pte] =
- vm->pte_encode(addr, cache_level, flags);
+ iter.dma = sg_dma_address(iter.sg);
+ iter.max = iter.dma + iter.sg->length;
+ }
if (++act_pte == GEN6_PTES) {
- kunmap_px(ppgtt, pt_vaddr);
- pt_vaddr = NULL;
- act_pt++;
+ kunmap_atomic(vaddr);
+ vaddr = kmap_atomic_px(ppgtt->pd.page_table[++act_pt]);
act_pte = 0;
}
- }
-
- if (pt_vaddr)
- kunmap_px(ppgtt, pt_vaddr);
+ } while (1);
+ kunmap_atomic(vaddr);
}
static int gen6_alloc_va_range(struct i915_address_space *vm,
- uint64_t start_in, uint64_t length_in)
+ u64 start, u64 length)
{
- DECLARE_BITMAP(new_page_tables, I915_PDES);
- struct drm_i915_private *dev_priv = vm->i915;
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_table *pt;
- uint32_t start, length, start_save, length_save;
- uint32_t pde;
- int ret;
+ u64 from = start;
+ unsigned int pde;
+ bool flush = false;
- start = start_save = start_in;
- length = length_save = length_in;
-
- bitmap_zero(new_page_tables, I915_PDES);
-
- /* The allocation is done in two stages so that we can bail out with
- * minimal amount of pain. The first stage finds new page tables that
- * need allocation. The second stage marks use ptes within the page
- * tables.
- */
gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
- if (pt != vm->scratch_pt) {
- WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
- continue;
- }
-
- /* We've already allocated a page table */
- WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES));
+ if (pt == vm->scratch_pt) {
+ pt = alloc_pt(vm);
+ if (IS_ERR(pt))
+ goto unwind_out;
- pt = alloc_pt(dev_priv);
- if (IS_ERR(pt)) {
- ret = PTR_ERR(pt);
- goto unwind_out;
+ gen6_initialize_pt(vm, pt);
+ ppgtt->pd.page_table[pde] = pt;
+ gen6_write_pde(ppgtt, pde, pt);
+ flush = true;
}
-
- gen6_initialize_pt(vm, pt);
-
- ppgtt->pd.page_table[pde] = pt;
- __set_bit(pde, new_page_tables);
- trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT);
}
- start = start_save;
- length = length_save;
-
- gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
- DECLARE_BITMAP(tmp_bitmap, GEN6_PTES);
-
- bitmap_zero(tmp_bitmap, GEN6_PTES);
- bitmap_set(tmp_bitmap, gen6_pte_index(start),
- gen6_pte_count(start, length));
-
- if (__test_and_clear_bit(pde, new_page_tables))
- gen6_write_pde(&ppgtt->pd, pde, pt);
-
- trace_i915_page_table_entry_map(vm, pde, pt,
- gen6_pte_index(start),
- gen6_pte_count(start, length),
- GEN6_PTES);
- bitmap_or(pt->used_ptes, tmp_bitmap, pt->used_ptes,
- GEN6_PTES);
+ if (flush) {
+ mark_tlbs_dirty(ppgtt);
+ wmb();
}
- WARN_ON(!bitmap_empty(new_page_tables, I915_PDES));
-
- /* Make sure write is complete before other code can use this page
- * table. Also require for WC mapped PTEs */
- readl(ggtt->gsm);
-
- mark_tlbs_dirty(ppgtt);
return 0;
unwind_out:
- for_each_set_bit(pde, new_page_tables, I915_PDES) {
- struct i915_page_table *pt = ppgtt->pd.page_table[pde];
-
- ppgtt->pd.page_table[pde] = vm->scratch_pt;
- free_pt(dev_priv, pt);
- }
-
- mark_tlbs_dirty(ppgtt);
- return ret;
+ gen6_ppgtt_clear_range(vm, from, start);
+ return -ENOMEM;
}
static int gen6_init_scratch(struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = vm->i915;
int ret;
- ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA);
+ ret = setup_scratch_page(vm, I915_GFP_DMA);
if (ret)
return ret;
- vm->scratch_pt = alloc_pt(dev_priv);
+ vm->scratch_pt = alloc_pt(vm);
if (IS_ERR(vm->scratch_pt)) {
- cleanup_scratch_page(dev_priv, &vm->scratch_page);
+ cleanup_scratch_page(vm);
return PTR_ERR(vm->scratch_pt);
}
@@ -2026,25 +1712,22 @@ static int gen6_init_scratch(struct i915_address_space *vm)
static void gen6_free_scratch(struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = vm->i915;
-
- free_pt(dev_priv, vm->scratch_pt);
- cleanup_scratch_page(dev_priv, &vm->scratch_page);
+ free_pt(vm, vm->scratch_pt);
+ cleanup_scratch_page(vm);
}
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
struct i915_page_directory *pd = &ppgtt->pd;
- struct drm_i915_private *dev_priv = vm->i915;
struct i915_page_table *pt;
- uint32_t pde;
+ u32 pde;
drm_mm_remove_node(&ppgtt->node);
gen6_for_all_pdes(pt, pd, pde)
if (pt != vm->scratch_pt)
- free_pt(dev_priv, pt);
+ free_pt(vm, pt);
gen6_free_scratch(vm);
}
@@ -2077,6 +1760,12 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
if (ppgtt->node.start < ggtt->mappable_end)
DRM_DEBUG("Forced to use aperture for PDEs\n");
+ ppgtt->pd.base.ggtt_offset =
+ ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
+
+ ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
+ ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
+
return 0;
err_out:
@@ -2090,10 +1779,10 @@ static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
}
static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
- uint64_t start, uint64_t length)
+ u64 start, u64 length)
{
struct i915_page_table *unused;
- uint32_t pde;
+ u32 pde;
gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
@@ -2119,32 +1808,30 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
if (ret)
return ret;
- ppgtt->base.allocate_va_range = gen6_alloc_va_range;
+ ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
+
+ gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
+ gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
+
+ ret = gen6_alloc_va_range(&ppgtt->base, 0, ppgtt->base.total);
+ if (ret) {
+ gen6_ppgtt_cleanup(&ppgtt->base);
+ return ret;
+ }
+
ppgtt->base.clear_range = gen6_ppgtt_clear_range;
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.unbind_vma = ppgtt_unbind_vma;
ppgtt->base.bind_vma = ppgtt_bind_vma;
ppgtt->base.cleanup = gen6_ppgtt_cleanup;
- ppgtt->base.start = 0;
- ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
ppgtt->debug_dump = gen6_dump_ppgtt;
- ppgtt->pd.base.ggtt_offset =
- ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
-
- ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
- ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
-
- gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
-
- gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total);
-
DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
ppgtt->node.size >> 20,
ppgtt->node.start / PAGE_SIZE);
- DRM_DEBUG("Adding PPGTT at offset %x\n",
- ppgtt->pd.base.ggtt_offset << 10);
+ DRM_DEBUG_DRIVER("Adding PPGTT at offset %x\n",
+ ppgtt->pd.base.ggtt_offset << 10);
return 0;
}
@@ -2153,6 +1840,7 @@ static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_private *dev_priv)
{
ppgtt->base.i915 = dev_priv;
+ ppgtt->base.dma = &dev_priv->drm.pdev->dev;
if (INTEL_INFO(dev_priv)->gen < 8)
return gen6_ppgtt_init(ppgtt);
@@ -2165,15 +1853,23 @@ static void i915_address_space_init(struct i915_address_space *vm,
const char *name)
{
i915_gem_timeline_init(dev_priv, &vm->timeline, name);
- drm_mm_init(&vm->mm, vm->start, vm->total);
+
+ drm_mm_init(&vm->mm, 0, vm->total);
+ vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
+
INIT_LIST_HEAD(&vm->active_list);
INIT_LIST_HEAD(&vm->inactive_list);
INIT_LIST_HEAD(&vm->unbound_list);
+
list_add_tail(&vm->global_link, &dev_priv->vm_list);
+ pagevec_init(&vm->free_pages, false);
}
static void i915_address_space_fini(struct i915_address_space *vm)
{
+ if (pagevec_count(&vm->free_pages))
+ vm_free_pages_release(vm);
+
i915_gem_timeline_fini(&vm->timeline);
drm_mm_takedown(&vm->mm);
list_del(&vm->global_link);
@@ -2185,34 +1881,17 @@ static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
* called on driver load and after a GPU reset, so you can place
* workarounds here even if they get overwritten by GPU reset.
*/
- /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt */
+ /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk */
if (IS_BROADWELL(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
else if (IS_CHERRYVIEW(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
- else if (IS_SKYLAKE(dev_priv))
+ else if (IS_GEN9_BC(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
- else if (IS_BROXTON(dev_priv))
+ else if (IS_GEN9_LP(dev_priv))
I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
}
-static int i915_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
- struct drm_i915_private *dev_priv,
- struct drm_i915_file_private *file_priv,
- const char *name)
-{
- int ret;
-
- ret = __hw_ppgtt_init(ppgtt, dev_priv);
- if (ret == 0) {
- kref_init(&ppgtt->ref);
- i915_address_space_init(&ppgtt->base, dev_priv, name);
- ppgtt->base.file = file_priv;
- }
-
- return ret;
-}
-
int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
{
gtt_write_workarounds(dev_priv);
@@ -2250,12 +1929,16 @@ i915_ppgtt_create(struct drm_i915_private *dev_priv,
if (!ppgtt)
return ERR_PTR(-ENOMEM);
- ret = i915_ppgtt_init(ppgtt, dev_priv, fpriv, name);
+ ret = __hw_ppgtt_init(ppgtt, dev_priv);
if (ret) {
kfree(ppgtt);
return ERR_PTR(ret);
}
+ kref_init(&ppgtt->ref);
+ i915_address_space_init(&ppgtt->base, dev_priv, name);
+ ppgtt->base.file = fpriv;
+
trace_i915_ppgtt_create(&ppgtt->base);
return ppgtt;
@@ -2294,9 +1977,8 @@ void i915_ppgtt_release(struct kref *kref)
WARN_ON(!list_empty(&ppgtt->base.inactive_list));
WARN_ON(!list_empty(&ppgtt->base.unbound_list));
- i915_address_space_fini(&ppgtt->base);
-
ppgtt->base.cleanup(&ppgtt->base);
+ i915_address_space_fini(&ppgtt->base);
kfree(ppgtt);
}
@@ -2358,7 +2040,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
i915_check_and_clear_faults(dev_priv);
- ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total);
+ ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
i915_ggtt_invalidate(dev_priv);
}
@@ -2395,7 +2077,7 @@ static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
static void gen8_ggtt_insert_page(struct i915_address_space *vm,
dma_addr_t addr,
- uint64_t offset,
+ u64 offset,
enum i915_cache_level level,
u32 unused)
{
@@ -2410,32 +2092,22 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
- uint64_t start,
- enum i915_cache_level level, u32 unused)
+ u64 start,
+ enum i915_cache_level level,
+ u32 unused)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
struct sgt_iter sgt_iter;
gen8_pte_t __iomem *gtt_entries;
- gen8_pte_t gtt_entry;
+ const gen8_pte_t pte_encode = gen8_pte_encode(0, level);
dma_addr_t addr;
- int i = 0;
- gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
+ gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
+ gtt_entries += start >> PAGE_SHIFT;
+ for_each_sgt_dma(addr, sgt_iter, st)
+ gen8_set_pte(gtt_entries++, pte_encode | addr);
- for_each_sgt_dma(addr, sgt_iter, st) {
- gtt_entry = gen8_pte_encode(addr, level);
- gen8_set_pte(&gtt_entries[i++], gtt_entry);
- }
-
- /*
- * XXX: This serves as a posting read to make sure that the PTE has
- * actually been updated. There is some concern that even though
- * registers and PTEs are within the same BAR that they are potentially
- * of NUMA access patterns. Therefore, even with the way we assume
- * hardware should work, we must keep this posting read for paranoia.
- */
- if (i != 0)
- WARN_ON(readq(&gtt_entries[i-1]) != gtt_entry);
+ wmb();
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
@@ -2444,35 +2116,9 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
ggtt->invalidate(vm->i915);
}
-struct insert_entries {
- struct i915_address_space *vm;
- struct sg_table *st;
- uint64_t start;
- enum i915_cache_level level;
- u32 flags;
-};
-
-static int gen8_ggtt_insert_entries__cb(void *_arg)
-{
- struct insert_entries *arg = _arg;
- gen8_ggtt_insert_entries(arg->vm, arg->st,
- arg->start, arg->level, arg->flags);
- return 0;
-}
-
-static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm,
- struct sg_table *st,
- uint64_t start,
- enum i915_cache_level level,
- u32 flags)
-{
- struct insert_entries arg = { vm, st, start, level, flags };
- stop_machine(gen8_ggtt_insert_entries__cb, &arg, NULL);
-}
-
static void gen6_ggtt_insert_page(struct i915_address_space *vm,
dma_addr_t addr,
- uint64_t offset,
+ u64 offset,
enum i915_cache_level level,
u32 flags)
{
@@ -2493,31 +2139,18 @@ static void gen6_ggtt_insert_page(struct i915_address_space *vm,
*/
static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
- uint64_t start,
- enum i915_cache_level level, u32 flags)
+ u64 start,
+ enum i915_cache_level level,
+ u32 flags)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- struct sgt_iter sgt_iter;
- gen6_pte_t __iomem *gtt_entries;
- gen6_pte_t gtt_entry;
+ gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
+ unsigned int i = start >> PAGE_SHIFT;
+ struct sgt_iter iter;
dma_addr_t addr;
- int i = 0;
-
- gtt_entries = (gen6_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
-
- for_each_sgt_dma(addr, sgt_iter, st) {
- gtt_entry = vm->pte_encode(addr, level, flags);
- iowrite32(gtt_entry, &gtt_entries[i++]);
- }
-
- /* XXX: This serves as a posting read to make sure that the PTE has
- * actually been updated. There is some concern that even though
- * registers and PTEs are within the same BAR that they are potentially
- * of NUMA access patterns. Therefore, even with the way we assume
- * hardware should work, we must keep this posting read for paranoia.
- */
- if (i != 0)
- WARN_ON(readl(&gtt_entries[i-1]) != gtt_entry);
+ for_each_sgt_dma(addr, iter, st)
+ iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
+ wmb();
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
@@ -2527,17 +2160,19 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
}
static void nop_clear_range(struct i915_address_space *vm,
- uint64_t start, uint64_t length)
+ u64 start, u64 length)
{
}
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
- uint64_t start, uint64_t length)
+ u64 start, u64 length)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
- gen8_pte_t scratch_pte, __iomem *gtt_base =
+ const gen8_pte_t scratch_pte =
+ gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
+ gen8_pte_t __iomem *gtt_base =
(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
const int max_entries = ggtt_total_entries(ggtt) - first_entry;
int i;
@@ -2547,16 +2182,12 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
first_entry, num_entries, max_entries))
num_entries = max_entries;
- scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
- I915_CACHE_LLC);
for (i = 0; i < num_entries; i++)
gen8_set_pte(&gtt_base[i], scratch_pte);
- readl(gtt_base);
}
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
- uint64_t start,
- uint64_t length)
+ u64 start, u64 length)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
unsigned first_entry = start >> PAGE_SHIFT;
@@ -2576,12 +2207,11 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
- readl(gtt_base);
}
static void i915_ggtt_insert_page(struct i915_address_space *vm,
dma_addr_t addr,
- uint64_t offset,
+ u64 offset,
enum i915_cache_level cache_level,
u32 unused)
{
@@ -2593,19 +2223,18 @@ static void i915_ggtt_insert_page(struct i915_address_space *vm,
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
- uint64_t start,
- enum i915_cache_level cache_level, u32 unused)
+ u64 start,
+ enum i915_cache_level cache_level,
+ u32 unused)
{
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
-
}
static void i915_ggtt_clear_range(struct i915_address_space *vm,
- uint64_t start,
- uint64_t length)
+ u64 start, u64 length)
{
intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
}
@@ -2616,14 +2245,16 @@ static int ggtt_bind_vma(struct i915_vma *vma,
{
struct drm_i915_private *i915 = vma->vm->i915;
struct drm_i915_gem_object *obj = vma->obj;
- u32 pte_flags = 0;
- int ret;
+ u32 pte_flags;
- ret = i915_get_ggtt_vma_pages(vma);
- if (ret)
- return ret;
+ if (unlikely(!vma->pages)) {
+ int ret = i915_get_ggtt_vma_pages(vma);
+ if (ret)
+ return ret;
+ }
/* Currently applicable only to VLV */
+ pte_flags = 0;
if (obj->gt_ro)
pte_flags |= PTE_READ_ONLY;
@@ -2642,6 +2273,15 @@ static int ggtt_bind_vma(struct i915_vma *vma,
return 0;
}
+static void ggtt_unbind_vma(struct i915_vma *vma)
+{
+ struct drm_i915_private *i915 = vma->vm->i915;
+
+ intel_runtime_pm_get(i915);
+ vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
+ intel_runtime_pm_put(i915);
+}
+
static int aliasing_gtt_bind_vma(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags)
@@ -2650,15 +2290,32 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
u32 pte_flags;
int ret;
- ret = i915_get_ggtt_vma_pages(vma);
- if (ret)
- return ret;
+ if (unlikely(!vma->pages)) {
+ ret = i915_get_ggtt_vma_pages(vma);
+ if (ret)
+ return ret;
+ }
/* Currently applicable only to VLV */
pte_flags = 0;
if (vma->obj->gt_ro)
pte_flags |= PTE_READ_ONLY;
+ if (flags & I915_VMA_LOCAL_BIND) {
+ struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
+
+ if (appgtt->base.allocate_va_range) {
+ ret = appgtt->base.allocate_va_range(&appgtt->base,
+ vma->node.start,
+ vma->node.size);
+ if (ret)
+ goto err_pages;
+ }
+
+ appgtt->base.insert_entries(&appgtt->base,
+ vma->pages, vma->node.start,
+ cache_level, pte_flags);
+ }
if (flags & I915_VMA_GLOBAL_BIND) {
intel_runtime_pm_get(i915);
@@ -2668,32 +2325,35 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
intel_runtime_pm_put(i915);
}
- if (flags & I915_VMA_LOCAL_BIND) {
- struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
- appgtt->base.insert_entries(&appgtt->base,
- vma->pages, vma->node.start,
- cache_level, pte_flags);
- }
-
return 0;
+
+err_pages:
+ if (!(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND))) {
+ if (vma->pages != vma->obj->mm.pages) {
+ GEM_BUG_ON(!vma->pages);
+ sg_free_table(vma->pages);
+ kfree(vma->pages);
+ }
+ vma->pages = NULL;
+ }
+ return ret;
}
-static void ggtt_unbind_vma(struct i915_vma *vma)
+static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
{
struct drm_i915_private *i915 = vma->vm->i915;
- struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
- const u64 size = min(vma->size, vma->node.size);
if (vma->flags & I915_VMA_GLOBAL_BIND) {
intel_runtime_pm_get(i915);
- vma->vm->clear_range(vma->vm,
- vma->node.start, size);
+ vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
intel_runtime_pm_put(i915);
}
- if (vma->flags & I915_VMA_LOCAL_BIND && appgtt)
- appgtt->base.clear_range(&appgtt->base,
- vma->node.start, size);
+ if (vma->flags & I915_VMA_LOCAL_BIND) {
+ struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->base;
+
+ vm->clear_range(vm, vma->node.start, vma->size);
+ }
}
void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
@@ -2719,14 +2379,76 @@ static void i915_gtt_color_adjust(const struct drm_mm_node *node,
u64 *start,
u64 *end)
{
- if (node->color != color)
+ if (node->allocated && node->color != color)
*start += I915_GTT_PAGE_SIZE;
+ /* Also leave a space between the unallocated reserved node after the
+ * GTT and any objects within the GTT, i.e. we use the color adjustment
+ * to insert a guard page to prevent prefetches crossing over the
+ * GTT boundary.
+ */
node = list_next_entry(node, node_list);
- if (node->allocated && node->color != color)
+ if (node->color != color)
*end -= I915_GTT_PAGE_SIZE;
}
+int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
+{
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_hw_ppgtt *ppgtt;
+ int err;
+
+ ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM), "[alias]");
+ if (IS_ERR(ppgtt))
+ return PTR_ERR(ppgtt);
+
+ if (WARN_ON(ppgtt->base.total < ggtt->base.total)) {
+ err = -ENODEV;
+ goto err_ppgtt;
+ }
+
+ if (ppgtt->base.allocate_va_range) {
+ /* Note we only pre-allocate as far as the end of the global
+ * GTT. On 48b / 4-level page-tables, the difference is very,
+ * very significant! We have to preallocate as GVT/vgpu does
+ * not like the page directory disappearing.
+ */
+ err = ppgtt->base.allocate_va_range(&ppgtt->base,
+ 0, ggtt->base.total);
+ if (err)
+ goto err_ppgtt;
+ }
+
+ i915->mm.aliasing_ppgtt = ppgtt;
+
+ WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
+ ggtt->base.bind_vma = aliasing_gtt_bind_vma;
+
+ WARN_ON(ggtt->base.unbind_vma != ggtt_unbind_vma);
+ ggtt->base.unbind_vma = aliasing_gtt_unbind_vma;
+
+ return 0;
+
+err_ppgtt:
+ i915_ppgtt_put(ppgtt);
+ return err;
+}
+
+void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
+{
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_hw_ppgtt *ppgtt;
+
+ ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt);
+ if (!ppgtt)
+ return;
+
+ i915_ppgtt_put(ppgtt);
+
+ ggtt->base.bind_vma = ggtt_bind_vma;
+ ggtt->base.unbind_vma = ggtt_unbind_vma;
+}
+
int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
{
/* Let GEM Manage all of the aperture.
@@ -2740,7 +2462,6 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
*/
struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long hole_start, hole_end;
- struct i915_hw_ppgtt *ppgtt;
struct drm_mm_node *entry;
int ret;
@@ -2769,38 +2490,13 @@ int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
- ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
- if (!ppgtt) {
- ret = -ENOMEM;
- goto err;
- }
-
- ret = __hw_ppgtt_init(ppgtt, dev_priv);
+ ret = i915_gem_init_aliasing_ppgtt(dev_priv);
if (ret)
- goto err_ppgtt;
-
- if (ppgtt->base.allocate_va_range) {
- ret = ppgtt->base.allocate_va_range(&ppgtt->base, 0,
- ppgtt->base.total);
- if (ret)
- goto err_ppgtt_cleanup;
- }
-
- ppgtt->base.clear_range(&ppgtt->base,
- ppgtt->base.start,
- ppgtt->base.total);
-
- dev_priv->mm.aliasing_ppgtt = ppgtt;
- WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
- ggtt->base.bind_vma = aliasing_gtt_bind_vma;
+ goto err;
}
return 0;
-err_ppgtt_cleanup:
- ppgtt->base.cleanup(&ppgtt->base);
-err_ppgtt:
- kfree(ppgtt);
err:
drm_mm_remove_node(&ggtt->error_capture);
return ret;
@@ -2813,27 +2509,31 @@ err:
void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
+ struct i915_vma *vma, *vn;
- if (dev_priv->mm.aliasing_ppgtt) {
- struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- ppgtt->base.cleanup(&ppgtt->base);
- kfree(ppgtt);
- }
+ ggtt->base.closed = true;
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ WARN_ON(!list_empty(&ggtt->base.active_list));
+ list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link)
+ WARN_ON(i915_vma_unbind(vma));
+ mutex_unlock(&dev_priv->drm.struct_mutex);
i915_gem_cleanup_stolen(&dev_priv->drm);
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ i915_gem_fini_aliasing_ppgtt(dev_priv);
+
if (drm_mm_node_allocated(&ggtt->error_capture))
drm_mm_remove_node(&ggtt->error_capture);
if (drm_mm_initialized(&ggtt->base.mm)) {
intel_vgt_deballoon(dev_priv);
-
- mutex_lock(&dev_priv->drm.struct_mutex);
i915_address_space_fini(&ggtt->base);
- mutex_unlock(&dev_priv->drm.struct_mutex);
}
ggtt->base.cleanup(&ggtt->base);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
arch_phys_wc_del(ggtt->mtrr);
io_mapping_fini(&ggtt->mappable);
@@ -2943,7 +2643,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
return -ENOMEM;
}
- ret = setup_scratch_page(dev_priv, &ggtt->base.scratch_page, GFP_DMA32);
+ ret = setup_scratch_page(&ggtt->base, GFP_DMA32);
if (ret) {
DRM_ERROR("Scratch setup failed\n");
/* iounmap will also get called at remove, but meh */
@@ -2959,7 +2659,7 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
* writing this data shouldn't be harmful even in those cases. */
static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
{
- uint64_t pat;
+ u64 pat;
pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
@@ -2994,7 +2694,7 @@ static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
{
- uint64_t pat;
+ u64 pat;
/*
* Map WB on BDW to snooped on CHV.
@@ -3032,7 +2732,7 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
iounmap(ggtt->gsm);
- cleanup_scratch_page(vm->i915, &vm->scratch_page);
+ cleanup_scratch_page(vm);
}
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
@@ -3078,8 +2778,6 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->base.clear_range = gen8_ggtt_clear_range;
ggtt->base.insert_entries = gen8_ggtt_insert_entries;
- if (IS_CHERRYVIEW(dev_priv))
- ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
ggtt->invalidate = gen6_ggtt_invalidate;
@@ -3183,6 +2881,7 @@ int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
int ret;
ggtt->base.i915 = dev_priv;
+ ggtt->base.dma = &dev_priv->drm.pdev->dev;
if (INTEL_GEN(dev_priv) <= 5)
ret = i915_gmch_probe(ggtt);
@@ -3242,14 +2941,14 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
INIT_LIST_HEAD(&dev_priv->vm_list);
- /* Subtract the guard page before address space initialization to
- * shrink the range used by drm_mm.
+ /* Note that we use page colouring to enforce a guard page at the
+ * end of the address space. This is required as the CS may prefetch
+ * beyond the end of the batch buffer, across the page boundary,
+ * and beyond the end of the GTT if we do not provide a guard.
*/
mutex_lock(&dev_priv->drm.struct_mutex);
- ggtt->base.total -= PAGE_SIZE;
i915_address_space_init(&ggtt->base, dev_priv, "[global]");
- ggtt->base.total += PAGE_SIZE;
- if (!HAS_LLC(dev_priv))
+ if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -3303,7 +3002,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
i915_check_and_clear_faults(dev_priv);
/* First fill our portion of the GTT with scratch pages */
- ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total);
+ ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
@@ -3344,8 +3043,6 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
struct i915_address_space *vm;
list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
- /* TODO: Perhaps it shouldn't be gen6 specific */
-
struct i915_hw_ppgtt *ppgtt;
if (i915_is_ggtt(vm))
@@ -3353,8 +3050,7 @@ void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
else
ppgtt = i915_vm_to_ppgtt(vm);
- gen6_write_page_range(dev_priv, &ppgtt->pd,
- 0, ppgtt->base.total);
+ gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
}
}
@@ -3389,11 +3085,11 @@ rotate_pages(const dma_addr_t *in, unsigned int offset,
return sg;
}
-static struct sg_table *
-intel_rotate_fb_obj_pages(const struct intel_rotation_info *rot_info,
- struct drm_i915_gem_object *obj)
+static noinline struct sg_table *
+intel_rotate_pages(struct intel_rotation_info *rot_info,
+ struct drm_i915_gem_object *obj)
{
- const size_t n_pages = obj->base.size / PAGE_SIZE;
+ const unsigned long n_pages = obj->base.size / PAGE_SIZE;
unsigned int size = intel_rotation_info_size(rot_info);
struct sgt_iter sgt_iter;
dma_addr_t dma_addr;
@@ -3452,7 +3148,7 @@ err_st_alloc:
return ERR_PTR(ret);
}
-static struct sg_table *
+static noinline struct sg_table *
intel_partial_pages(const struct i915_ggtt_view *view,
struct drm_i915_gem_object *obj)
{
@@ -3506,7 +3202,7 @@ err_st_alloc:
static int
i915_get_ggtt_vma_pages(struct i915_vma *vma)
{
- int ret = 0;
+ int ret;
/* The vma->pages are only valid within the lifespan of the borrowed
* obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
@@ -3515,32 +3211,33 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
*/
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
- if (vma->pages)
+ switch (vma->ggtt_view.type) {
+ case I915_GGTT_VIEW_NORMAL:
+ vma->pages = vma->obj->mm.pages;
return 0;
- if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
- vma->pages = vma->obj->mm.pages;
- else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
+ case I915_GGTT_VIEW_ROTATED:
vma->pages =
- intel_rotate_fb_obj_pages(&vma->ggtt_view.rotated,
- vma->obj);
- else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
+ intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
+ break;
+
+ case I915_GGTT_VIEW_PARTIAL:
vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
- else
+ break;
+
+ default:
WARN_ONCE(1, "GGTT view %u not implemented!\n",
vma->ggtt_view.type);
+ return -EINVAL;
+ }
- if (!vma->pages) {
- DRM_ERROR("Failed to get pages for GGTT view type %u!\n",
- vma->ggtt_view.type);
- ret = -EINVAL;
- } else if (IS_ERR(vma->pages)) {
+ ret = 0;
+ if (unlikely(IS_ERR(vma->pages))) {
ret = PTR_ERR(vma->pages);
vma->pages = NULL;
DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
vma->ggtt_view.type, ret);
}
-
return ret;
}
@@ -3743,3 +3440,8 @@ int i915_gem_gtt_insert(struct i915_address_space *vm,
size, alignment, color,
start, end, DRM_MM_INSERT_EVICT);
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/mock_gtt.c"
+#include "selftests/i915_gem_gtt.c"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 3c5ef5358cef..fb15684c1d83 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -36,9 +36,11 @@
#include <linux/io-mapping.h>
#include <linux/mm.h>
+#include <linux/pagevec.h>
#include "i915_gem_timeline.h"
#include "i915_gem_request.h"
+#include "i915_selftest.h"
#define I915_GTT_PAGE_SIZE 4096UL
#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
@@ -51,11 +53,11 @@
struct drm_i915_file_private;
struct drm_i915_fence_reg;
-typedef uint32_t gen6_pte_t;
-typedef uint64_t gen8_pte_t;
-typedef uint64_t gen8_pde_t;
-typedef uint64_t gen8_ppgtt_pdpe_t;
-typedef uint64_t gen8_ppgtt_pml4e_t;
+typedef u32 gen6_pte_t;
+typedef u64 gen8_pte_t;
+typedef u64 gen8_pde_t;
+typedef u64 gen8_ppgtt_pdpe_t;
+typedef u64 gen8_ppgtt_pml4e_t;
#define ggtt_total_entries(ggtt) ((ggtt)->base.total >> PAGE_SHIFT)
@@ -67,7 +69,7 @@ typedef uint64_t gen8_ppgtt_pml4e_t;
#define GEN6_PTE_UNCACHED (1 << 1)
#define GEN6_PTE_VALID (1 << 0)
-#define I915_PTES(pte_len) (PAGE_SIZE / (pte_len))
+#define I915_PTES(pte_len) ((unsigned int)(PAGE_SIZE / (pte_len)))
#define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1)
#define I915_PDES 512
#define I915_PDE_MASK (I915_PDES - 1)
@@ -99,13 +101,20 @@ typedef uint64_t gen8_ppgtt_pml4e_t;
#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
-/* GEN8 legacy style address is defined as a 3 level page table:
+/* GEN8 32b style address is defined as a 3 level page table:
* 31:30 | 29:21 | 20:12 | 11:0
* PDPE | PDE | PTE | offset
* The difference as compared to normal x86 3 level page table is the PDPEs are
* programmed via register.
- *
- * GEN8 48b legacy style address is defined as a 4 level page table:
+ */
+#define GEN8_3LVL_PDPES 4
+#define GEN8_PDE_SHIFT 21
+#define GEN8_PDE_MASK 0x1ff
+#define GEN8_PTE_SHIFT 12
+#define GEN8_PTE_MASK 0x1ff
+#define GEN8_PTES I915_PTES(sizeof(gen8_pte_t))
+
+/* GEN8 48b style address is defined as a 4 level page table:
* 47:39 | 38:30 | 29:21 | 20:12 | 11:0
* PML4E | PDPE | PDE | PTE | offset
*/
@@ -116,15 +125,6 @@ typedef uint64_t gen8_ppgtt_pml4e_t;
/* NB: GEN8_PDPE_MASK is untrue for 32b platforms, but it has no impact on 32b page
* tables */
#define GEN8_PDPE_MASK 0x1ff
-#define GEN8_PDE_SHIFT 21
-#define GEN8_PDE_MASK 0x1ff
-#define GEN8_PTE_SHIFT 12
-#define GEN8_PTE_MASK 0x1ff
-#define GEN8_LEGACY_PDPES 4
-#define GEN8_PTES I915_PTES(sizeof(gen8_pte_t))
-
-#define I915_PDPES_PER_PDP(dev_priv) (USES_FULL_48BIT_PPGTT(dev_priv) ?\
- GEN8_PML4ES_PER_PML4 : GEN8_LEGACY_PDPES)
#define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
#define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
@@ -141,7 +141,7 @@ typedef uint64_t gen8_ppgtt_pml4e_t;
#define GEN8_PPAT_WC (1<<0)
#define GEN8_PPAT_UC (0<<0)
#define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
-#define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
+#define GEN8_PPAT(i, x) ((u64)(x) << ((i) * 8))
struct sg_table;
@@ -208,7 +208,7 @@ struct i915_page_dma {
/* For gen6/gen7 only. This is the offset in the GGTT
* where the page directory entries for PPGTT begin
*/
- uint32_t ggtt_offset;
+ u32 ggtt_offset;
};
};
@@ -218,28 +218,24 @@ struct i915_page_dma {
struct i915_page_table {
struct i915_page_dma base;
-
- unsigned long *used_ptes;
+ unsigned int used_ptes;
};
struct i915_page_directory {
struct i915_page_dma base;
- unsigned long *used_pdes;
struct i915_page_table *page_table[I915_PDES]; /* PDEs */
+ unsigned int used_pdes;
};
struct i915_page_directory_pointer {
struct i915_page_dma base;
-
- unsigned long *used_pdpes;
struct i915_page_directory **page_directory;
+ unsigned int used_pdpes;
};
struct i915_pml4 {
struct i915_page_dma base;
-
- DECLARE_BITMAP(used_pml4es, GEN8_PML4ES_PER_PML4);
struct i915_page_directory_pointer *pdps[GEN8_PML4ES_PER_PML4];
};
@@ -247,6 +243,7 @@ struct i915_address_space {
struct drm_mm mm;
struct i915_gem_timeline timeline;
struct drm_i915_private *i915;
+ struct device *dma;
/* Every address space belongs to a struct file - except for the global
* GTT that is owned by the driver (and so @file is set to NULL). In
* principle, no information should leak from one context to another
@@ -257,7 +254,6 @@ struct i915_address_space {
*/
struct drm_i915_file_private *file;
struct list_head global_link;
- u64 start; /* Start offset always 0 for dri2 */
u64 total; /* size addr space maps (ex. 2GB for ggtt) */
bool closed;
@@ -297,6 +293,9 @@ struct i915_address_space {
*/
struct list_head unbound_list;
+ struct pagevec free_pages;
+ bool pt_kmap_wc;
+
/* FIXME: Need a more generic return type */
gen6_pte_t (*pte_encode)(dma_addr_t addr,
enum i915_cache_level level,
@@ -304,20 +303,19 @@ struct i915_address_space {
/* flags for pte_encode */
#define PTE_READ_ONLY (1<<0)
int (*allocate_va_range)(struct i915_address_space *vm,
- uint64_t start,
- uint64_t length);
+ u64 start, u64 length);
void (*clear_range)(struct i915_address_space *vm,
- uint64_t start,
- uint64_t length);
+ u64 start, u64 length);
void (*insert_page)(struct i915_address_space *vm,
dma_addr_t addr,
- uint64_t offset,
+ u64 offset,
enum i915_cache_level cache_level,
u32 flags);
void (*insert_entries)(struct i915_address_space *vm,
struct sg_table *st,
- uint64_t start,
- enum i915_cache_level cache_level, u32 flags);
+ u64 start,
+ enum i915_cache_level cache_level,
+ u32 flags);
void (*cleanup)(struct i915_address_space *vm);
/** Unmap an object from an address space. This usually consists of
* setting the valid PTE entries to a reserved scratch page. */
@@ -326,10 +324,18 @@ struct i915_address_space {
int (*bind_vma)(struct i915_vma *vma,
enum i915_cache_level cache_level,
u32 flags);
+
+ I915_SELFTEST_DECLARE(struct fault_attr fault_attr);
};
#define i915_is_ggtt(V) (!(V)->file)
+static inline bool
+i915_vm_is_48bit(const struct i915_address_space *vm)
+{
+ return (vm->total - 1) >> 32;
+}
+
/* The Graphics Translation Table is the way in which GEN hardware translates a
* Graphics Virtual Address into a Physical Address. In addition to the normal
* collateral associated with any va->pa translations GEN hardware also has a
@@ -381,7 +387,6 @@ struct i915_hw_ppgtt {
gen6_pte_t __iomem *pd_addr;
- int (*enable)(struct i915_hw_ppgtt *ppgtt);
int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_request *req);
void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
@@ -409,9 +414,9 @@ struct i915_hw_ppgtt {
(pt = (pd)->page_table[iter], true); \
++iter)
-static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
+static inline u32 i915_pte_index(u64 address, unsigned int pde_shift)
{
- const uint32_t mask = NUM_PTE(pde_shift) - 1;
+ const u32 mask = NUM_PTE(pde_shift) - 1;
return (address >> PAGE_SHIFT) & mask;
}
@@ -420,11 +425,10 @@ static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
* does not cross a page table boundary, so the max value would be
* GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
*/
-static inline uint32_t i915_pte_count(uint64_t addr, size_t length,
- uint32_t pde_shift)
+static inline u32 i915_pte_count(u64 addr, u64 length, unsigned int pde_shift)
{
- const uint64_t mask = ~((1ULL << pde_shift) - 1);
- uint64_t end;
+ const u64 mask = ~((1ULL << pde_shift) - 1);
+ u64 end;
WARN_ON(length == 0);
WARN_ON(offset_in_page(addr|length));
@@ -437,26 +441,35 @@ static inline uint32_t i915_pte_count(uint64_t addr, size_t length,
return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
}
-static inline uint32_t i915_pde_index(uint64_t addr, uint32_t shift)
+static inline u32 i915_pde_index(u64 addr, u32 shift)
{
return (addr >> shift) & I915_PDE_MASK;
}
-static inline uint32_t gen6_pte_index(uint32_t addr)
+static inline u32 gen6_pte_index(u32 addr)
{
return i915_pte_index(addr, GEN6_PDE_SHIFT);
}
-static inline size_t gen6_pte_count(uint32_t addr, uint32_t length)
+static inline u32 gen6_pte_count(u32 addr, u32 length)
{
return i915_pte_count(addr, length, GEN6_PDE_SHIFT);
}
-static inline uint32_t gen6_pde_index(uint32_t addr)
+static inline u32 gen6_pde_index(u32 addr)
{
return i915_pde_index(addr, GEN6_PDE_SHIFT);
}
+static inline unsigned int
+i915_pdpes_per_pdp(const struct i915_address_space *vm)
+{
+ if (i915_vm_is_48bit(vm))
+ return GEN8_PML4ES_PER_PML4;
+
+ return GEN8_3LVL_PDPES;
+}
+
/* Equivalent to the gen6 version, For each pde iterates over every pde
* between from start until start + length. On gen8+ it simply iterates
* over every page directory entry in a page directory.
@@ -471,7 +484,7 @@ static inline uint32_t gen6_pde_index(uint32_t addr)
#define gen8_for_each_pdpe(pd, pdp, start, length, iter) \
for (iter = gen8_pdpe_index(start); \
- length > 0 && iter < I915_PDPES_PER_PDP(dev) && \
+ length > 0 && iter < i915_pdpes_per_pdp(vm) && \
(pd = (pdp)->page_directory[iter], true); \
({ u64 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT); \
temp = min(temp - start, length); \
@@ -485,27 +498,27 @@ static inline uint32_t gen6_pde_index(uint32_t addr)
temp = min(temp - start, length); \
start += temp, length -= temp; }), ++iter)
-static inline uint32_t gen8_pte_index(uint64_t address)
+static inline u32 gen8_pte_index(u64 address)
{
return i915_pte_index(address, GEN8_PDE_SHIFT);
}
-static inline uint32_t gen8_pde_index(uint64_t address)
+static inline u32 gen8_pde_index(u64 address)
{
return i915_pde_index(address, GEN8_PDE_SHIFT);
}
-static inline uint32_t gen8_pdpe_index(uint64_t address)
+static inline u32 gen8_pdpe_index(u64 address)
{
return (address >> GEN8_PDPE_SHIFT) & GEN8_PDPE_MASK;
}
-static inline uint32_t gen8_pml4e_index(uint64_t address)
+static inline u32 gen8_pml4e_index(u64 address)
{
return (address >> GEN8_PML4E_SHIFT) & GEN8_PML4E_MASK;
}
-static inline size_t gen8_pte_count(uint64_t address, uint64_t length)
+static inline u64 gen8_pte_count(u64 address, u64 length)
{
return i915_pte_count(address, length, GEN8_PDE_SHIFT);
}
@@ -513,9 +526,7 @@ static inline size_t gen8_pte_count(uint64_t address, uint64_t length)
static inline dma_addr_t
i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
{
- return test_bit(n, ppgtt->pdp.used_pdpes) ?
- px_dma(ppgtt->pdp.page_directory[n]) :
- px_dma(ppgtt->base.scratch_pd);
+ return px_dma(ppgtt->pdp.page_directory[n]);
}
static inline struct i915_ggtt *
@@ -525,6 +536,9 @@ i915_vm_to_ggtt(struct i915_address_space *vm)
return container_of(vm, struct i915_ggtt, base);
}
+int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915);
+void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915);
+
int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_internal.c b/drivers/gpu/drm/i915/i915_gem_internal.c
index 933019e1b206..fc950abbe400 100644
--- a/drivers/gpu/drm/i915/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/i915_gem_internal.c
@@ -35,8 +35,10 @@ static void internal_free_pages(struct sg_table *st)
{
struct scatterlist *sg;
- for (sg = st->sgl; sg; sg = __sg_next(sg))
- __free_pages(sg_page(sg), get_order(sg->length));
+ for (sg = st->sgl; sg; sg = __sg_next(sg)) {
+ if (sg_page(sg))
+ __free_pages(sg_page(sg), get_order(sg->length));
+ }
sg_free_table(st);
kfree(st);
@@ -133,6 +135,7 @@ create_st:
return st;
err:
+ sg_set_page(sg, NULL, 0, 0);
sg_mark_end(sg);
internal_free_pages(st);
return ERR_PTR(-ENOMEM);
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h
index bf90b07163d1..174cf923c236 100644
--- a/drivers/gpu/drm/i915/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/i915_gem_object.h
@@ -33,6 +33,8 @@
#include <drm/i915_drm.h>
+#include "i915_selftest.h"
+
struct drm_i915_gem_object_ops {
unsigned int flags;
#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
@@ -54,6 +56,9 @@ struct drm_i915_gem_object_ops {
struct sg_table *(*get_pages)(struct drm_i915_gem_object *);
void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *);
+ int (*pwrite)(struct drm_i915_gem_object *,
+ const struct drm_i915_gem_pwrite *);
+
int (*dmabuf_export)(struct drm_i915_gem_object *);
void (*release)(struct drm_i915_gem_object *);
};
@@ -84,6 +89,7 @@ struct drm_i915_gem_object {
struct list_head obj_exec_link;
struct list_head batch_pool_link;
+ I915_SELFTEST_DECLARE(struct list_head st_link);
unsigned long flags;
@@ -162,19 +168,23 @@ struct drm_i915_gem_object {
struct reservation_object *resv;
/** References from framebuffers, locks out tiling changes. */
- unsigned long framebuffer_references;
+ unsigned int framebuffer_references;
/** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17;
- struct i915_gem_userptr {
- uintptr_t ptr;
- unsigned read_only :1;
+ union {
+ struct i915_gem_userptr {
+ uintptr_t ptr;
+ unsigned read_only :1;
- struct i915_mm_struct *mm;
- struct i915_mmu_object *mmu_object;
- struct work_struct *work;
- } userptr;
+ struct i915_mm_struct *mm;
+ struct i915_mmu_object *mmu_object;
+ struct work_struct *work;
+ } userptr;
+
+ unsigned long scratch;
+ };
/** for phys allocated objects */
struct drm_dma_handle *phys_handle;
@@ -253,10 +263,14 @@ extern void drm_gem_object_unreference(struct drm_gem_object *);
__deprecated
extern void drm_gem_object_unreference_unlocked(struct drm_gem_object *);
-static inline bool
-i915_gem_object_is_dead(const struct drm_i915_gem_object *obj)
+static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
+{
+ reservation_object_lock(obj->resv, NULL);
+}
+
+static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
{
- return kref_read(&obj->base.refcount) == 0;
+ reservation_object_unlock(obj->resv);
}
static inline bool
@@ -299,6 +313,12 @@ i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
+static inline bool
+i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
+{
+ return READ_ONCE(obj->framebuffer_references);
+}
+
static inline unsigned int
i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
{
@@ -357,5 +377,7 @@ i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
return engine;
}
+void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index e7c3c0318ff6..0e8d1010cecb 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -72,7 +72,6 @@ static void i915_fence_release(struct dma_fence *fence)
* caught trying to reuse dead objects.
*/
i915_sw_fence_fini(&req->submit);
- i915_sw_fence_fini(&req->execute);
kmem_cache_free(req->i915->requests, req);
}
@@ -86,42 +85,20 @@ const struct dma_fence_ops i915_fence_ops = {
.release = i915_fence_release,
};
-int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
- struct drm_file *file)
-{
- struct drm_i915_private *dev_private;
- struct drm_i915_file_private *file_priv;
-
- WARN_ON(!req || !file || req->file_priv);
-
- if (!req || !file)
- return -EINVAL;
-
- if (req->file_priv)
- return -EINVAL;
-
- dev_private = req->i915;
- file_priv = file->driver_priv;
-
- spin_lock(&file_priv->mm.lock);
- req->file_priv = file_priv;
- list_add_tail(&req->client_list, &file_priv->mm.request_list);
- spin_unlock(&file_priv->mm.lock);
-
- return 0;
-}
-
static inline void
i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
{
- struct drm_i915_file_private *file_priv = request->file_priv;
+ struct drm_i915_file_private *file_priv;
+ file_priv = request->file_priv;
if (!file_priv)
return;
spin_lock(&file_priv->mm.lock);
- list_del(&request->client_list);
- request->file_priv = NULL;
+ if (request->file_priv) {
+ list_del(&request->client_link);
+ request->file_priv = NULL;
+ }
spin_unlock(&file_priv->mm.lock);
}
@@ -201,6 +178,92 @@ i915_priotree_init(struct i915_priotree *pt)
pt->priority = INT_MIN;
}
+static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
+{
+ struct i915_gem_timeline *timeline = &i915->gt.global_timeline;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int ret;
+
+ /* Carefully retire all requests without writing to the rings */
+ ret = i915_gem_wait_for_idle(i915,
+ I915_WAIT_INTERRUPTIBLE |
+ I915_WAIT_LOCKED);
+ if (ret)
+ return ret;
+
+ i915_gem_retire_requests(i915);
+ GEM_BUG_ON(i915->gt.active_requests > 1);
+
+ /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
+ for_each_engine(engine, i915, id) {
+ struct intel_timeline *tl = &timeline->engine[id];
+
+ if (wait_for(intel_engine_is_idle(engine), 50))
+ return -EBUSY;
+
+ if (!i915_seqno_passed(seqno, tl->seqno)) {
+ /* spin until threads are complete */
+ while (intel_breadcrumbs_busy(engine))
+ cond_resched();
+ }
+
+ /* Finally reset hw state */
+ tl->seqno = seqno;
+ intel_engine_init_global_seqno(engine, seqno);
+ }
+
+ list_for_each_entry(timeline, &i915->gt.timelines, link) {
+ for_each_engine(engine, i915, id) {
+ struct intel_timeline *tl = &timeline->engine[id];
+
+ memset(tl->sync_seqno, 0, sizeof(tl->sync_seqno));
+ }
+ }
+
+ return 0;
+}
+
+int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ if (seqno == 0)
+ return -EINVAL;
+
+ /* HWS page needs to be set less than what we
+ * will inject to ring
+ */
+ return reset_all_global_seqno(dev_priv, seqno - 1);
+}
+
+static int reserve_seqno(struct intel_engine_cs *engine)
+{
+ u32 active = ++engine->timeline->inflight_seqnos;
+ u32 seqno = engine->timeline->seqno;
+ int ret;
+
+ /* Reservation is fine until we need to wrap around */
+ if (likely(!add_overflows(seqno, active)))
+ return 0;
+
+ ret = reset_all_global_seqno(engine->i915, 0);
+ if (ret) {
+ engine->timeline->inflight_seqnos--;
+ return ret;
+ }
+
+ return 0;
+}
+
+static void unreserve_seqno(struct intel_engine_cs *engine)
+{
+ GEM_BUG_ON(!engine->timeline->inflight_seqnos);
+ engine->timeline->inflight_seqnos--;
+}
+
void i915_gem_retire_noop(struct i915_gem_active *active,
struct drm_i915_gem_request *request)
{
@@ -214,7 +277,6 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
lockdep_assert_held(&request->i915->drm.struct_mutex);
GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit));
- GEM_BUG_ON(!i915_sw_fence_signaled(&request->execute));
GEM_BUG_ON(!i915_gem_request_completed(request));
GEM_BUG_ON(!request->i915->gt.active_requests);
@@ -240,6 +302,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
&request->i915->gt.idle_work,
msecs_to_jiffies(100));
}
+ unreserve_seqno(request->engine);
/* Walk through the active list, calling retire on each. This allows
* objects to track their GPU activity and mark themselves as idle
@@ -310,88 +373,9 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
} while (tmp != req);
}
-static int i915_gem_init_global_seqno(struct drm_i915_private *i915, u32 seqno)
-{
- struct i915_gem_timeline *timeline = &i915->gt.global_timeline;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int ret;
-
- /* Carefully retire all requests without writing to the rings */
- ret = i915_gem_wait_for_idle(i915,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED);
- if (ret)
- return ret;
-
- i915_gem_retire_requests(i915);
- GEM_BUG_ON(i915->gt.active_requests > 1);
-
- /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
- if (!i915_seqno_passed(seqno, atomic_read(&timeline->seqno))) {
- while (intel_breadcrumbs_busy(i915))
- cond_resched(); /* spin until threads are complete */
- }
- atomic_set(&timeline->seqno, seqno);
-
- /* Finally reset hw state */
- for_each_engine(engine, i915, id)
- intel_engine_init_global_seqno(engine, seqno);
-
- list_for_each_entry(timeline, &i915->gt.timelines, link) {
- for_each_engine(engine, i915, id) {
- struct intel_timeline *tl = &timeline->engine[id];
-
- memset(tl->sync_seqno, 0, sizeof(tl->sync_seqno));
- }
- }
-
- return 0;
-}
-
-int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
+static u32 timeline_get_seqno(struct intel_timeline *tl)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
- if (seqno == 0)
- return -EINVAL;
-
- /* HWS page needs to be set less than what we
- * will inject to ring
- */
- return i915_gem_init_global_seqno(dev_priv, seqno - 1);
-}
-
-static int reserve_global_seqno(struct drm_i915_private *i915)
-{
- u32 active_requests = ++i915->gt.active_requests;
- u32 seqno = atomic_read(&i915->gt.global_timeline.seqno);
- int ret;
-
- /* Reservation is fine until we need to wrap around */
- if (likely(seqno + active_requests > seqno))
- return 0;
-
- ret = i915_gem_init_global_seqno(i915, 0);
- if (ret) {
- i915->gt.active_requests--;
- return ret;
- }
-
- return 0;
-}
-
-static u32 __timeline_get_seqno(struct i915_gem_timeline *tl)
-{
- /* seqno only incremented under a mutex */
- return ++tl->seqno.counter;
-}
-
-static u32 timeline_get_seqno(struct i915_gem_timeline *tl)
-{
- return atomic_inc_return(&tl->seqno);
+ return ++tl->seqno;
}
void __i915_gem_request_submit(struct drm_i915_gem_request *request)
@@ -400,19 +384,19 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request)
struct intel_timeline *timeline;
u32 seqno;
+ GEM_BUG_ON(!irqs_disabled());
+ lockdep_assert_held(&engine->timeline->lock);
+
+ trace_i915_gem_request_execute(request);
+
/* Transfer from per-context onto the global per-engine timeline */
timeline = engine->timeline;
GEM_BUG_ON(timeline == request->timeline);
- assert_spin_locked(&timeline->lock);
- seqno = timeline_get_seqno(timeline->common);
+ seqno = timeline_get_seqno(timeline);
GEM_BUG_ON(!seqno);
GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno));
- GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno, seqno));
- request->previous_seqno = timeline->last_submitted_seqno;
- timeline->last_submitted_seqno = seqno;
-
/* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
request->global_seqno = seqno;
@@ -420,7 +404,6 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request)
intel_engine_enable_signaling(request);
spin_unlock(&request->lock);
- GEM_BUG_ON(!request->global_seqno);
engine->emit_breadcrumb(request,
request->ring->vaddr + request->postfix);
@@ -428,7 +411,7 @@ void __i915_gem_request_submit(struct drm_i915_gem_request *request)
list_move_tail(&request->link, &timeline->requests);
spin_unlock(&request->timeline->lock);
- i915_sw_fence_commit(&request->execute);
+ wake_up_all(&request->execute);
}
void i915_gem_request_submit(struct drm_i915_gem_request *request)
@@ -444,33 +427,66 @@ void i915_gem_request_submit(struct drm_i915_gem_request *request)
spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
-static int __i915_sw_fence_call
-submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
{
- struct drm_i915_gem_request *request =
- container_of(fence, typeof(*request), submit);
+ struct intel_engine_cs *engine = request->engine;
+ struct intel_timeline *timeline;
- switch (state) {
- case FENCE_COMPLETE:
- request->engine->submit_request(request);
- break;
+ GEM_BUG_ON(!irqs_disabled());
+ lockdep_assert_held(&engine->timeline->lock);
- case FENCE_FREE:
- i915_gem_request_put(request);
- break;
- }
+ /* Only unwind in reverse order, required so that the per-context list
+ * is kept in seqno/ring order.
+ */
+ GEM_BUG_ON(request->global_seqno != engine->timeline->seqno);
+ engine->timeline->seqno--;
- return NOTIFY_DONE;
+ /* We may be recursing from the signal callback of another i915 fence */
+ spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
+ request->global_seqno = 0;
+ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags))
+ intel_engine_cancel_signaling(request);
+ spin_unlock(&request->lock);
+
+ /* Transfer back from the global per-engine timeline to per-context */
+ timeline = request->timeline;
+ GEM_BUG_ON(timeline == engine->timeline);
+
+ spin_lock(&timeline->lock);
+ list_move(&request->link, &timeline->requests);
+ spin_unlock(&timeline->lock);
+
+ /* We don't need to wake_up any waiters on request->execute, they
+ * will get woken by any other event or us re-adding this request
+ * to the engine timeline (__i915_gem_request_submit()). The waiters
+ * should be quite adapt at finding that the request now has a new
+ * global_seqno to the one they went to sleep on.
+ */
+}
+
+void i915_gem_request_unsubmit(struct drm_i915_gem_request *request)
+{
+ struct intel_engine_cs *engine = request->engine;
+ unsigned long flags;
+
+ /* Will be called from irq-context when using foreign fences. */
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+
+ __i915_gem_request_unsubmit(request);
+
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
static int __i915_sw_fence_call
-execute_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
struct drm_i915_gem_request *request =
- container_of(fence, typeof(*request), execute);
+ container_of(fence, typeof(*request), submit);
switch (state) {
case FENCE_COMPLETE:
+ trace_i915_gem_request_submit(request);
+ request->engine->submit_request(request);
break;
case FENCE_FREE:
@@ -517,14 +533,14 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
if (ret)
return ERR_PTR(ret);
- ret = reserve_global_seqno(dev_priv);
+ ret = reserve_seqno(engine);
if (ret)
goto err_unpin;
/* Move the oldest request to the slab-cache (if not in use!) */
req = list_first_entry_or_null(&engine->timeline->requests,
typeof(*req), link);
- if (req && __i915_gem_request_completed(req))
+ if (req && i915_gem_request_completed(req))
i915_gem_request_retire(req);
/* Beware: Dragons be flying overhead.
@@ -569,17 +585,11 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
&i915_fence_ops,
&req->lock,
req->timeline->fence_context,
- __timeline_get_seqno(req->timeline->common));
+ timeline_get_seqno(req->timeline));
/* We bump the ref for the fence chain */
i915_sw_fence_init(&i915_gem_request_get(req)->submit, submit_notify);
- i915_sw_fence_init(&i915_gem_request_get(req)->execute, execute_notify);
-
- /* Ensure that the execute fence completes after the submit fence -
- * as we complete the execute fence from within the submit fence
- * callback, its completion would otherwise be visible first.
- */
- i915_sw_fence_await_sw_fence(&req->execute, &req->submit, &req->execq);
+ init_waitqueue_head(&req->execute);
i915_priotree_init(&req->priotree);
@@ -614,6 +624,8 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
*/
req->head = req->ring->tail;
+ /* Check that we didn't interrupt ourselves with a new request */
+ GEM_BUG_ON(req->timeline->seqno != req->fence.seqno);
return req;
err_ctx:
@@ -624,7 +636,7 @@ err_ctx:
kmem_cache_free(dev_priv->requests, req);
err_unreserve:
- dev_priv->gt.active_requests--;
+ unreserve_seqno(engine);
err_unpin:
engine->context_unpin(engine, ctx);
return ERR_PTR(ret);
@@ -634,6 +646,7 @@ static int
i915_gem_request_await_request(struct drm_i915_gem_request *to,
struct drm_i915_gem_request *from)
{
+ u32 seqno;
int ret;
GEM_BUG_ON(to == from);
@@ -656,14 +669,15 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
return ret < 0 ? ret : 0;
}
- if (!from->global_seqno) {
+ seqno = i915_gem_request_global_seqno(from);
+ if (!seqno) {
ret = i915_sw_fence_await_dma_fence(&to->submit,
&from->fence, 0,
GFP_KERNEL);
return ret < 0 ? ret : 0;
}
- if (from->global_seqno <= to->timeline->sync_seqno[from->engine->id])
+ if (seqno <= to->timeline->sync_seqno[from->engine->id])
return 0;
trace_i915_gem_ring_sync_to(to, from);
@@ -681,7 +695,7 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
return ret;
}
- to->timeline->sync_seqno[from->engine->id] = from->global_seqno;
+ to->timeline->sync_seqno[from->engine->id] = seqno;
return 0;
}
@@ -827,6 +841,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
struct intel_ring *ring = request->ring;
struct intel_timeline *timeline = request->timeline;
struct drm_i915_gem_request *prev;
+ u32 *cs;
int err;
lockdep_assert_held(&request->i915->drm.struct_mutex);
@@ -836,8 +851,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
* our i915_gem_request_alloc() and called __i915_add_request() before
* us, the timeline will hold its seqno which is later than ours.
*/
- GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno,
- request->fence.seqno));
+ GEM_BUG_ON(timeline->seqno != request->fence.seqno);
/*
* To ensure that this call will not fail, space for its emissions
@@ -865,10 +879,9 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
* GPU processing the request, we never over-estimate the
* position of the ring's HEAD.
*/
- err = intel_ring_begin(request, engine->emit_breadcrumb_sz);
- GEM_BUG_ON(err);
- request->postfix = ring->tail;
- ring->tail += engine->emit_breadcrumb_sz * sizeof(u32);
+ cs = intel_ring_begin(request, engine->emit_breadcrumb_sz);
+ GEM_BUG_ON(IS_ERR(cs));
+ request->postfix = intel_ring_offset(request, cs);
/* Seal the request and mark it as pending execution. Note that
* we may inspect this state, without holding any locks, during
@@ -892,16 +905,14 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
list_add_tail(&request->link, &timeline->requests);
spin_unlock_irq(&timeline->lock);
- GEM_BUG_ON(i915_seqno_passed(timeline->last_submitted_seqno,
- request->fence.seqno));
-
- timeline->last_submitted_seqno = request->fence.seqno;
+ GEM_BUG_ON(timeline->seqno != request->fence.seqno);
i915_gem_active_set(&timeline->last_request, request);
list_add_tail(&request->ring_link, &ring->request_list);
request->emitted_jiffies = jiffies;
- i915_gem_mark_busy(engine);
+ if (!request->i915->gt.active_requests++)
+ i915_gem_mark_busy(engine);
/* Let the backend know a new request has arrived that may need
* to adjust the existing execution schedule due to a high priority
@@ -921,16 +932,6 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
}
-static void reset_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&q->lock, flags);
- if (list_empty(&wait->task_list))
- __add_wait_queue(q, wait);
- spin_unlock_irqrestore(&q->lock, flags);
-}
-
static unsigned long local_clock_us(unsigned int *cpu)
{
unsigned long t;
@@ -964,9 +965,10 @@ static bool busywait_stop(unsigned long timeout, unsigned int cpu)
}
bool __i915_spin_request(const struct drm_i915_gem_request *req,
- int state, unsigned long timeout_us)
+ u32 seqno, int state, unsigned long timeout_us)
{
- unsigned int cpu;
+ struct intel_engine_cs *engine = req->engine;
+ unsigned int irq, cpu;
/* When waiting for high frequency requests, e.g. during synchronous
* rendering split between the CPU and GPU, the finite amount of time
@@ -978,11 +980,24 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
* takes to sleep on a request, on the order of a microsecond.
*/
+ irq = atomic_read(&engine->irq_count);
timeout_us += local_clock_us(&cpu);
do {
- if (__i915_gem_request_completed(req))
+ if (seqno != i915_gem_request_global_seqno(req))
+ break;
+
+ if (i915_seqno_passed(intel_engine_get_seqno(req->engine),
+ seqno))
return true;
+ /* Seqno are meant to be ordered *before* the interrupt. If
+ * we see an interrupt without a corresponding seqno advance,
+ * assume we won't see one in the near future but require
+ * the engine->seqno_barrier() to fixup coherency.
+ */
+ if (atomic_read(&engine->irq_count) != irq)
+ break;
+
if (signal_pending_state(state, current))
break;
@@ -995,52 +1010,14 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
return false;
}
-static long
-__i915_request_wait_for_execute(struct drm_i915_gem_request *request,
- unsigned int flags,
- long timeout)
+static bool __i915_wait_request_check_and_reset(struct drm_i915_gem_request *request)
{
- const int state = flags & I915_WAIT_INTERRUPTIBLE ?
- TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
- wait_queue_head_t *q = &request->i915->gpu_error.wait_queue;
- DEFINE_WAIT(reset);
- DEFINE_WAIT(wait);
-
- if (flags & I915_WAIT_LOCKED)
- add_wait_queue(q, &reset);
-
- do {
- prepare_to_wait(&request->execute.wait, &wait, state);
-
- if (i915_sw_fence_done(&request->execute))
- break;
-
- if (flags & I915_WAIT_LOCKED &&
- i915_reset_in_progress(&request->i915->gpu_error)) {
- __set_current_state(TASK_RUNNING);
- i915_reset(request->i915);
- reset_wait_queue(q, &reset);
- continue;
- }
-
- if (signal_pending_state(state, current)) {
- timeout = -ERESTARTSYS;
- break;
- }
-
- if (!timeout) {
- timeout = -ETIME;
- break;
- }
-
- timeout = io_schedule_timeout(timeout);
- } while (1);
- finish_wait(&request->execute.wait, &wait);
-
- if (flags & I915_WAIT_LOCKED)
- remove_wait_queue(q, &reset);
+ if (likely(!i915_reset_handoff(&request->i915->gpu_error)))
+ return false;
- return timeout;
+ __set_current_state(TASK_RUNNING);
+ i915_reset(request->i915);
+ return true;
}
/**
@@ -1068,7 +1045,9 @@ long i915_wait_request(struct drm_i915_gem_request *req,
{
const int state = flags & I915_WAIT_INTERRUPTIBLE ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
- DEFINE_WAIT(reset);
+ wait_queue_head_t *errq = &req->i915->gpu_error.wait_queue;
+ DEFINE_WAIT_FUNC(reset, default_wake_function);
+ DEFINE_WAIT_FUNC(exec, default_wake_function);
struct intel_wait wait;
might_sleep();
@@ -1085,27 +1064,45 @@ long i915_wait_request(struct drm_i915_gem_request *req,
if (!timeout)
return -ETIME;
- trace_i915_gem_request_wait_begin(req);
+ trace_i915_gem_request_wait_begin(req, flags);
+
+ add_wait_queue(&req->execute, &exec);
+ if (flags & I915_WAIT_LOCKED)
+ add_wait_queue(errq, &reset);
+
+ intel_wait_init(&wait, req);
+
+restart:
+ do {
+ set_current_state(state);
+ if (intel_wait_update_request(&wait, req))
+ break;
+
+ if (flags & I915_WAIT_LOCKED &&
+ __i915_wait_request_check_and_reset(req))
+ continue;
- if (!i915_sw_fence_done(&req->execute)) {
- timeout = __i915_request_wait_for_execute(req, flags, timeout);
- if (timeout < 0)
+ if (signal_pending_state(state, current)) {
+ timeout = -ERESTARTSYS;
goto complete;
+ }
- GEM_BUG_ON(!i915_sw_fence_done(&req->execute));
- }
- GEM_BUG_ON(!i915_sw_fence_done(&req->submit));
- GEM_BUG_ON(!req->global_seqno);
+ if (!timeout) {
+ timeout = -ETIME;
+ goto complete;
+ }
+
+ timeout = io_schedule_timeout(timeout);
+ } while (1);
+
+ GEM_BUG_ON(!intel_wait_has_seqno(&wait));
+ GEM_BUG_ON(!i915_sw_fence_signaled(&req->submit));
/* Optimistic short spin before touching IRQs */
if (i915_spin_request(req, state, 5))
goto complete;
set_current_state(state);
- if (flags & I915_WAIT_LOCKED)
- add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
-
- intel_wait_init(&wait, req->global_seqno);
if (intel_engine_add_wait(req->engine, &wait))
/* In order to check that we haven't missed the interrupt
* as we enabled it, we need to kick ourselves to do a
@@ -1113,6 +1110,9 @@ long i915_wait_request(struct drm_i915_gem_request *req,
*/
goto wakeup;
+ if (flags & I915_WAIT_LOCKED)
+ __i915_wait_request_check_and_reset(req);
+
for (;;) {
if (signal_pending_state(state, current)) {
timeout = -ERESTARTSYS;
@@ -1126,7 +1126,8 @@ long i915_wait_request(struct drm_i915_gem_request *req,
timeout = io_schedule_timeout(timeout);
- if (intel_wait_complete(&wait))
+ if (intel_wait_complete(&wait) &&
+ intel_wait_check_request(&wait, req))
break;
set_current_state(state);
@@ -1151,25 +1152,25 @@ wakeup:
* itself, or indirectly by recovering the GPU).
*/
if (flags & I915_WAIT_LOCKED &&
- i915_reset_in_progress(&req->i915->gpu_error)) {
- __set_current_state(TASK_RUNNING);
- i915_reset(req->i915);
- reset_wait_queue(&req->i915->gpu_error.wait_queue,
- &reset);
+ __i915_wait_request_check_and_reset(req))
continue;
- }
/* Only spin if we know the GPU is processing this request */
if (i915_spin_request(req, state, 2))
break;
+
+ if (!intel_wait_check_request(&wait, req)) {
+ intel_engine_remove_wait(req->engine, &wait);
+ goto restart;
+ }
}
intel_engine_remove_wait(req->engine, &wait);
- if (flags & I915_WAIT_LOCKED)
- remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
- __set_current_state(TASK_RUNNING);
-
complete:
+ __set_current_state(TASK_RUNNING);
+ if (flags & I915_WAIT_LOCKED)
+ remove_wait_queue(errq, &reset);
+ remove_wait_queue(&req->execute, &exec);
trace_i915_gem_request_wait_end(req);
return timeout;
@@ -1178,14 +1179,21 @@ complete:
static void engine_retire_requests(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *request, *next;
+ u32 seqno = intel_engine_get_seqno(engine);
+ LIST_HEAD(retire);
+ spin_lock_irq(&engine->timeline->lock);
list_for_each_entry_safe(request, next,
&engine->timeline->requests, link) {
- if (!__i915_gem_request_completed(request))
- return;
+ if (!i915_seqno_passed(seqno, request->global_seqno))
+ break;
- i915_gem_request_retire(request);
+ list_move_tail(&request->link, &retire);
}
+ spin_unlock_irq(&engine->timeline->lock);
+
+ list_for_each_entry_safe(request, next, &retire, link)
+ i915_gem_request_retire(request);
}
void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
@@ -1201,3 +1209,8 @@ void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
for_each_engine(engine, dev_priv, id)
engine_retire_requests(engine);
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/mock_request.c"
+#include "selftests/i915_gem_request.c"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index ea511f06efaf..a211c53c813f 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -32,10 +32,12 @@
struct drm_file;
struct drm_i915_gem_object;
+struct drm_i915_gem_request;
struct intel_wait {
struct rb_node node;
struct task_struct *tsk;
+ struct drm_i915_gem_request *request;
u32 seqno;
};
@@ -119,18 +121,10 @@ struct drm_i915_gem_request {
* The submit fence is used to await upon all of the request's
* dependencies. When it is signaled, the request is ready to run.
* It is used by the driver to then queue the request for execution.
- *
- * The execute fence is used to signal when the request has been
- * sent to hardware.
- *
- * It is illegal for the submit fence of one request to wait upon the
- * execute fence of an earlier request. It should be sufficient to
- * wait upon the submit fence of the earlier request.
*/
struct i915_sw_fence submit;
- struct i915_sw_fence execute;
wait_queue_t submitq;
- wait_queue_t execq;
+ wait_queue_head_t execute;
/* A list of everyone we wait upon, and everyone who waits upon us.
* Even though we will not be submitted to the hardware before the
@@ -143,13 +137,12 @@ struct drm_i915_gem_request {
struct i915_priotree priotree;
struct i915_dependency dep;
- u32 global_seqno;
-
- /** GEM sequence number associated with the previous request,
- * when the HWS breadcrumb is equal to this the GPU is processing
- * this request.
+ /** GEM sequence number associated with this request on the
+ * global execution timeline. It is zero when the request is not
+ * on the HW queue (i.e. not on the engine timeline list).
+ * Its value is guarded by the timeline spinlock.
*/
- u32 previous_seqno;
+ u32 global_seqno;
/** Position in the ring of the start of the request */
u32 head;
@@ -187,7 +180,7 @@ struct drm_i915_gem_request {
struct drm_i915_file_private *file_priv;
/** file_priv list entry for this request */
- struct list_head client_list;
+ struct list_head client_link;
};
extern const struct dma_fence_ops i915_fence_ops;
@@ -200,8 +193,6 @@ static inline bool dma_fence_is_i915(const struct dma_fence *fence)
struct drm_i915_gem_request * __must_check
i915_gem_request_alloc(struct intel_engine_cs *engine,
struct i915_gem_context *ctx);
-int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
- struct drm_file *file);
void i915_gem_request_retire_upto(struct drm_i915_gem_request *req);
static inline struct drm_i915_gem_request *
@@ -243,6 +234,30 @@ static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
*pdst = src;
}
+/**
+ * i915_gem_request_global_seqno - report the current global seqno
+ * @request - the request
+ *
+ * A request is assigned a global seqno only when it is on the hardware
+ * execution queue. The global seqno can be used to maintain a list of
+ * requests on the same engine in retirement order, for example for
+ * constructing a priority queue for waiting. Prior to its execution, or
+ * if it is subsequently removed in the event of preemption, its global
+ * seqno is zero. As both insertion and removal from the execution queue
+ * may operate in IRQ context, it is not guarded by the usual struct_mutex
+ * BKL. Instead those relying on the global seqno must be prepared for its
+ * value to change between reads. Only when the request is complete can
+ * the global seqno be stable (due to the memory barriers on submitting
+ * the commands to the hardware to write the breadcrumb, if the HWS shows
+ * that it has passed the global seqno and the global seqno is unchanged
+ * after the read, it is indeed complete).
+ */
+static u32
+i915_gem_request_global_seqno(const struct drm_i915_gem_request *request)
+{
+ return READ_ONCE(request->global_seqno);
+}
+
int
i915_gem_request_await_object(struct drm_i915_gem_request *to,
struct drm_i915_gem_object *obj,
@@ -252,13 +267,14 @@ int i915_gem_request_await_dma_fence(struct drm_i915_gem_request *req,
void __i915_add_request(struct drm_i915_gem_request *req, bool flush_caches);
#define i915_add_request(req) \
- __i915_add_request(req, true)
-#define i915_add_request_no_flush(req) \
__i915_add_request(req, false)
void __i915_gem_request_submit(struct drm_i915_gem_request *request);
void i915_gem_request_submit(struct drm_i915_gem_request *request);
+void __i915_gem_request_unsubmit(struct drm_i915_gem_request *request);
+void i915_gem_request_unsubmit(struct drm_i915_gem_request *request);
+
struct intel_rps_client;
#define NO_WAITBOOST ERR_PTR(-1)
#define IS_RPS_CLIENT(p) (!IS_ERR(p))
@@ -283,46 +299,58 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
}
static inline bool
-__i915_gem_request_started(const struct drm_i915_gem_request *req)
+__i915_gem_request_started(const struct drm_i915_gem_request *req, u32 seqno)
{
- GEM_BUG_ON(!req->global_seqno);
+ GEM_BUG_ON(!seqno);
return i915_seqno_passed(intel_engine_get_seqno(req->engine),
- req->previous_seqno);
+ seqno - 1);
}
static inline bool
i915_gem_request_started(const struct drm_i915_gem_request *req)
{
- if (!req->global_seqno)
+ u32 seqno;
+
+ seqno = i915_gem_request_global_seqno(req);
+ if (!seqno)
return false;
- return __i915_gem_request_started(req);
+ return __i915_gem_request_started(req, seqno);
}
static inline bool
-__i915_gem_request_completed(const struct drm_i915_gem_request *req)
+__i915_gem_request_completed(const struct drm_i915_gem_request *req, u32 seqno)
{
- GEM_BUG_ON(!req->global_seqno);
- return i915_seqno_passed(intel_engine_get_seqno(req->engine),
- req->global_seqno);
+ GEM_BUG_ON(!seqno);
+ return i915_seqno_passed(intel_engine_get_seqno(req->engine), seqno) &&
+ seqno == i915_gem_request_global_seqno(req);
}
static inline bool
i915_gem_request_completed(const struct drm_i915_gem_request *req)
{
- if (!req->global_seqno)
+ u32 seqno;
+
+ seqno = i915_gem_request_global_seqno(req);
+ if (!seqno)
return false;
- return __i915_gem_request_completed(req);
+ return __i915_gem_request_completed(req, seqno);
}
bool __i915_spin_request(const struct drm_i915_gem_request *request,
- int state, unsigned long timeout_us);
+ u32 seqno, int state, unsigned long timeout_us);
static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
int state, unsigned long timeout_us)
{
- return (__i915_gem_request_started(request) &&
- __i915_spin_request(request, state, timeout_us));
+ u32 seqno;
+
+ seqno = i915_gem_request_global_seqno(request);
+ if (!seqno)
+ return 0;
+
+ return (__i915_gem_request_started(request, seqno) &&
+ __i915_spin_request(request, seqno, state, timeout_us));
}
/* We treat requests as fences. This is not be to confused with our
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 401006b4c6a3..2978acdd995e 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -207,7 +207,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
if (!(flags & I915_SHRINK_ACTIVE) &&
(i915_gem_object_is_active(obj) ||
- obj->framebuffer_references))
+ i915_gem_object_is_framebuffer(obj)))
continue;
if (!can_release_pages(obj))
@@ -259,11 +259,14 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{
unsigned long freed;
+ intel_runtime_pm_get(dev_priv);
freed = i915_gem_shrink(dev_priv, -1UL,
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_ACTIVE);
- rcu_barrier(); /* wait until our RCU delayed slab frees are completed */
+ intel_runtime_pm_put(dev_priv);
+
+ synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */
return freed;
}
@@ -380,9 +383,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
return NOTIFY_DONE;
- intel_runtime_pm_get(dev_priv);
freed_pages = i915_gem_shrink_all(dev_priv);
- intel_runtime_pm_put(dev_priv);
/* Because we may be allocating inside our own driver, we cannot
* assert that there are no objects with pinned pages that are not
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 9673bcc3b6ad..f3abdc27c5dd 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -79,12 +79,12 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
mutex_unlock(&dev_priv->mm.stolen_lock);
}
-static unsigned long i915_stolen_to_physical(struct drm_i915_private *dev_priv)
+static dma_addr_t i915_stolen_to_dma(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct resource *r;
- u32 base;
+ dma_addr_t base;
/* Almost universally we can find the Graphics Base of Stolen Memory
* at register BSM (0x5c) in the igfx configuration space. On a few
@@ -189,14 +189,14 @@ static unsigned long i915_stolen_to_physical(struct drm_i915_private *dev_priv)
base = tom - tseg_size - ggtt->stolen_size;
}
- if (base == 0)
+ if (base == 0 || add_overflows(base, ggtt->stolen_size))
return 0;
/* make sure we don't clobber the GTT if it's within stolen memory */
if (INTEL_GEN(dev_priv) <= 4 &&
!IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) {
struct {
- u32 start, end;
+ dma_addr_t start, end;
} stolen[2] = {
{ .start = base, .end = base + ggtt->stolen_size, },
{ .start = base, .end = base + ggtt->stolen_size, },
@@ -228,11 +228,13 @@ static unsigned long i915_stolen_to_physical(struct drm_i915_private *dev_priv)
if (stolen[0].start != stolen[1].start ||
stolen[0].end != stolen[1].end) {
+ dma_addr_t end = base + ggtt->stolen_size - 1;
+
DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
(unsigned long long)ggtt_start,
(unsigned long long)ggtt_end - 1);
- DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
- base, base + (u32)ggtt->stolen_size - 1);
+ DRM_DEBUG_KMS("Stolen memory adjusted to %pad - %pad\n",
+ &base, &end);
}
}
@@ -261,8 +263,10 @@ static unsigned long i915_stolen_to_physical(struct drm_i915_private *dev_priv)
* range. Apparently this works.
*/
if (r == NULL && !IS_GEN3(dev_priv)) {
- DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
- base, base + (uint32_t)ggtt->stolen_size);
+ dma_addr_t end = base + ggtt->stolen_size;
+
+ DRM_ERROR("conflict detected with stolen region: [%pad - %pad]\n",
+ &base, &end);
base = 0;
}
}
@@ -281,13 +285,13 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
}
static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
- phys_addr_t *base, u32 *size)
+ dma_addr_t *base, u32 *size)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
CTG_STOLEN_RESERVED :
ELK_STOLEN_RESERVED);
- phys_addr_t stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
+ dma_addr_t stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
@@ -304,7 +308,7 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
- phys_addr_t *base, u32 *size)
+ dma_addr_t *base, u32 *size)
{
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
@@ -330,7 +334,7 @@ static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
- phys_addr_t *base, u32 *size)
+ dma_addr_t *base, u32 *size)
{
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
@@ -350,7 +354,7 @@ static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
- phys_addr_t *base, u32 *size)
+ dma_addr_t *base, u32 *size)
{
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
@@ -376,11 +380,11 @@ static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
}
static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
- phys_addr_t *base, u32 *size)
+ dma_addr_t *base, u32 *size)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
- phys_addr_t stolen_top;
+ dma_addr_t stolen_top;
stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
@@ -399,7 +403,7 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
- phys_addr_t reserved_base, stolen_top;
+ dma_addr_t reserved_base, stolen_top;
u32 reserved_total, reserved_size;
u32 stolen_usable_start;
@@ -420,7 +424,7 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
if (ggtt->stolen_size == 0)
return 0;
- dev_priv->mm.stolen_base = i915_stolen_to_physical(dev_priv);
+ dev_priv->mm.stolen_base = i915_stolen_to_dma(dev_priv);
if (dev_priv->mm.stolen_base == 0)
return 0;
@@ -469,8 +473,8 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
if (reserved_base < dev_priv->mm.stolen_base ||
reserved_base + reserved_size > stolen_top) {
- phys_addr_t reserved_top = reserved_base + reserved_size;
- DRM_DEBUG_KMS("Stolen reserved area [%pa - %pa] outside stolen memory [%pa - %pa]\n",
+ dma_addr_t reserved_top = reserved_base + reserved_size;
+ DRM_DEBUG_KMS("Stolen reserved area [%pad - %pad] outside stolen memory [%pad - %pad]\n",
&reserved_base, &reserved_top,
&dev_priv->mm.stolen_base, &stolen_top);
return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 974ac08df473..a0d6d4317a49 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -158,13 +158,8 @@ i915_tiling_ok(struct drm_i915_gem_object *obj,
if (stride > 8192)
return false;
- if (IS_GEN3(i915)) {
- if (obj->base.size > I830_FENCE_MAX_SIZE_VAL << 20)
- return false;
- } else {
- if (obj->base.size > I830_FENCE_MAX_SIZE_VAL << 19)
- return false;
- }
+ if (!is_power_of_2(stride))
+ return false;
}
if (IS_GEN2(i915) ||
@@ -176,12 +171,7 @@ i915_tiling_ok(struct drm_i915_gem_object *obj,
if (!stride || !IS_ALIGNED(stride, tile_width))
return false;
- /* 965+ just needs multiples of tile width */
- if (INTEL_GEN(i915) >= 4)
- return true;
-
- /* Pre-965 needs power of two tile widths */
- return is_power_of_2(stride);
+ return true;
}
static bool i915_vma_fence_prepare(struct i915_vma *vma,
@@ -248,7 +238,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
if ((tiling | stride) == obj->tiling_and_stride)
return 0;
- if (obj->framebuffer_references)
+ if (i915_gem_object_is_framebuffer(obj))
return -EBUSY;
/* We need to rebind the object if its current allocation
@@ -268,6 +258,12 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
if (err)
return err;
+ i915_gem_object_lock(obj);
+ if (i915_gem_object_is_framebuffer(obj)) {
+ i915_gem_object_unlock(obj);
+ return -EBUSY;
+ }
+
/* If the memory has unknown (i.e. varying) swizzling, we pin the
* pages to prevent them being swapped out and causing corruption
* due to the change in swizzling.
@@ -304,6 +300,7 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
}
obj->tiling_and_stride = tiling | stride;
+ i915_gem_object_unlock(obj);
/* Force the fence to be reacquired for GTT access */
i915_gem_release_mmap(obj);
diff --git a/drivers/gpu/drm/i915/i915_gem_timeline.h b/drivers/gpu/drm/i915/i915_gem_timeline.h
index f2e51f42cc2f..6c53e14cab2a 100644
--- a/drivers/gpu/drm/i915/i915_gem_timeline.h
+++ b/drivers/gpu/drm/i915/i915_gem_timeline.h
@@ -33,7 +33,13 @@ struct i915_gem_timeline;
struct intel_timeline {
u64 fence_context;
- u32 last_submitted_seqno;
+ u32 seqno;
+
+ /**
+ * Count of outstanding requests, from the time they are constructed
+ * to the moment they are retired. Loosely coupled to hardware.
+ */
+ u32 inflight_seqnos;
spinlock_t lock;
@@ -56,7 +62,6 @@ struct intel_timeline {
struct i915_gem_timeline {
struct list_head link;
- atomic_t seqno;
struct drm_i915_private *i915;
const char *name;
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 22b46398831e..58ccf8b8ca1c 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -66,13 +66,18 @@ static void cancel_userptr(struct work_struct *work)
{
struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
struct drm_i915_gem_object *obj = mo->obj;
- struct drm_device *dev = obj->base.dev;
+ struct work_struct *active;
+
+ /* Cancel any active worker and force us to re-evaluate gup */
+ mutex_lock(&obj->mm.lock);
+ active = fetch_and_zero(&obj->userptr.work);
+ mutex_unlock(&obj->mm.lock);
+ if (active)
+ goto out;
i915_gem_object_wait(obj, I915_WAIT_ALL, MAX_SCHEDULE_TIMEOUT, NULL);
- mutex_lock(&dev->struct_mutex);
- /* Cancel any active worker and force us to re-evaluate gup */
- obj->userptr.work = NULL;
+ mutex_lock(&obj->base.dev->struct_mutex);
/* We are inside a kthread context and can't be interrupted */
if (i915_gem_object_unbind(obj) == 0)
@@ -83,8 +88,10 @@ static void cancel_userptr(struct work_struct *work)
atomic_read(&obj->mm.pages_pin_count),
obj->pin_display);
+ mutex_unlock(&obj->base.dev->struct_mutex);
+
+out:
i915_gem_object_put(obj);
- mutex_unlock(&dev->struct_mutex);
}
static void add_object(struct i915_mmu_object *mo)
@@ -145,7 +152,8 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
del_object(mo);
spin_unlock(&mn->lock);
- flush_workqueue(mn->wq);
+ if (!list_empty(&cancelled))
+ flush_workqueue(mn->wq);
}
static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
@@ -541,6 +549,8 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
}
obj->userptr.work = ERR_CAST(pages);
+ if (IS_ERR(pages))
+ __i915_gem_userptr_set_active(obj, false);
}
mutex_unlock(&obj->mm.lock);
@@ -553,8 +563,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
}
static struct sg_table *
-__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
- bool *active)
+__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
{
struct get_pages_work *work;
@@ -591,7 +600,6 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
schedule_work(&work->work);
- *active = true;
return ERR_PTR(-EAGAIN);
}
@@ -599,10 +607,11 @@ static struct sg_table *
i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
{
const int num_pages = obj->base.size >> PAGE_SHIFT;
+ struct mm_struct *mm = obj->userptr.mm->mm;
struct page **pvec;
struct sg_table *pages;
- int pinned, ret;
bool active;
+ int pinned;
/* If userspace should engineer that these pages are replaced in
* the vma between us binding this page into the GTT and completion
@@ -629,37 +638,39 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
return ERR_PTR(-EAGAIN);
}
- /* Let the mmu-notifier know that we have begun and need cancellation */
- ret = __i915_gem_userptr_set_active(obj, true);
- if (ret)
- return ERR_PTR(ret);
-
pvec = NULL;
pinned = 0;
- if (obj->userptr.mm->mm == current->mm) {
- pvec = drm_malloc_gfp(num_pages, sizeof(struct page *),
- GFP_TEMPORARY);
- if (pvec == NULL) {
- __i915_gem_userptr_set_active(obj, false);
- return ERR_PTR(-ENOMEM);
- }
- pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
- !obj->userptr.read_only, pvec);
+ if (mm == current->mm) {
+ pvec = drm_malloc_gfp(num_pages, sizeof(struct page *),
+ GFP_TEMPORARY |
+ __GFP_NORETRY |
+ __GFP_NOWARN);
+ if (pvec) /* defer to worker if malloc fails */
+ pinned = __get_user_pages_fast(obj->userptr.ptr,
+ num_pages,
+ !obj->userptr.read_only,
+ pvec);
}
active = false;
- if (pinned < 0)
- pages = ERR_PTR(pinned), pinned = 0;
- else if (pinned < num_pages)
- pages = __i915_gem_userptr_get_pages_schedule(obj, &active);
- else
+ if (pinned < 0) {
+ pages = ERR_PTR(pinned);
+ pinned = 0;
+ } else if (pinned < num_pages) {
+ pages = __i915_gem_userptr_get_pages_schedule(obj);
+ active = pages == ERR_PTR(-EAGAIN);
+ } else {
pages = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
- if (IS_ERR(pages)) {
- __i915_gem_userptr_set_active(obj, active);
- release_pages(pvec, pinned, 0);
+ active = !IS_ERR(pages);
}
+ if (active)
+ __i915_gem_userptr_set_active(obj, true);
+
+ if (IS_ERR(pages))
+ release_pages(pvec, pinned, 0);
drm_free_large(pvec);
+
return pages;
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 9cd22cda17af..8effc59f5cb5 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -342,7 +342,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
}
static void error_print_instdone(struct drm_i915_error_state_buf *m,
- struct drm_i915_error_engine *ee)
+ const struct drm_i915_error_engine *ee)
{
int slice;
int subslice;
@@ -372,7 +372,7 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
static void error_print_request(struct drm_i915_error_state_buf *m,
const char *prefix,
- struct drm_i915_error_request *erq)
+ const struct drm_i915_error_request *erq)
{
if (!erq->seqno)
return;
@@ -384,8 +384,17 @@ static void error_print_request(struct drm_i915_error_state_buf *m,
erq->head, erq->tail);
}
+static void error_print_context(struct drm_i915_error_state_buf *m,
+ const char *header,
+ const struct drm_i915_error_context *ctx)
+{
+ err_printf(m, "%s%s[%d] user_handle %d hw_id %d, ban score %d guilty %d active %d\n",
+ header, ctx->comm, ctx->pid, ctx->handle, ctx->hw_id,
+ ctx->ban_score, ctx->guilty, ctx->active);
+}
+
static void error_print_engine(struct drm_i915_error_state_buf *m,
- struct drm_i915_error_engine *ee)
+ const struct drm_i915_error_engine *ee)
{
err_printf(m, "%s command stream:\n", engine_str(ee->engine_id));
err_printf(m, " START: 0x%08x\n", ee->start);
@@ -457,6 +466,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
error_print_request(m, " ELSP[0]: ", &ee->execlist[0]);
error_print_request(m, " ELSP[1]: ", &ee->execlist[1]);
+ error_print_context(m, " Active context: ", &ee->context);
}
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
@@ -536,21 +546,57 @@ static void err_print_capabilities(struct drm_i915_error_state_buf *m,
#undef PRINT_FLAG
}
+static __always_inline void err_print_param(struct drm_i915_error_state_buf *m,
+ const char *name,
+ const char *type,
+ const void *x)
+{
+ if (!__builtin_strcmp(type, "bool"))
+ err_printf(m, "i915.%s=%s\n", name, yesno(*(const bool *)x));
+ else if (!__builtin_strcmp(type, "int"))
+ err_printf(m, "i915.%s=%d\n", name, *(const int *)x);
+ else if (!__builtin_strcmp(type, "unsigned int"))
+ err_printf(m, "i915.%s=%u\n", name, *(const unsigned int *)x);
+ else if (!__builtin_strcmp(type, "char *"))
+ err_printf(m, "i915.%s=%s\n", name, *(const char **)x);
+ else
+ BUILD_BUG();
+}
+
+static void err_print_params(struct drm_i915_error_state_buf *m,
+ const struct i915_params *p)
+{
+#define PRINT(T, x) err_print_param(m, #x, #T, &p->x);
+ I915_PARAMS_FOR_EACH(PRINT);
+#undef PRINT
+}
+
+static void err_print_pciid(struct drm_i915_error_state_buf *m,
+ struct drm_i915_private *i915)
+{
+ struct pci_dev *pdev = i915->drm.pdev;
+
+ err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
+ err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
+ err_printf(m, "PCI Subsystem: %04x:%04x\n",
+ pdev->subsystem_vendor,
+ pdev->subsystem_device);
+}
+
int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
- const struct i915_error_state_file_priv *error_priv)
+ const struct i915_gpu_state *error)
{
- struct drm_i915_private *dev_priv = error_priv->i915;
- struct pci_dev *pdev = dev_priv->drm.pdev;
- struct drm_i915_error_state *error = error_priv->error;
+ struct drm_i915_private *dev_priv = m->i915;
struct drm_i915_error_object *obj;
int i, j;
if (!error) {
- err_printf(m, "no error state collected\n");
- goto out;
+ err_printf(m, "No error state collected\n");
+ return 0;
}
- err_printf(m, "%s\n", error->error_msg);
+ if (*error->error_msg)
+ err_printf(m, "%s\n", error->error_msg);
err_printf(m, "Kernel: " UTS_RELEASE "\n");
err_printf(m, "Time: %ld s %ld us\n",
error->time.tv_sec, error->time.tv_usec);
@@ -558,26 +604,22 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
error->boottime.tv_sec, error->boottime.tv_usec);
err_printf(m, "Uptime: %ld s %ld us\n",
error->uptime.tv_sec, error->uptime.tv_usec);
- err_print_capabilities(m, &error->device_info);
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
if (error->engine[i].hangcheck_stalled &&
- error->engine[i].pid != -1) {
- err_printf(m, "Active process (on ring %s): %s [%d], context bans %d\n",
+ error->engine[i].context.pid) {
+ err_printf(m, "Active process (on ring %s): %s [%d], score %d\n",
engine_str(i),
- error->engine[i].comm,
- error->engine[i].pid,
- error->engine[i].context_bans);
+ error->engine[i].context.comm,
+ error->engine[i].context.pid,
+ error->engine[i].context.ban_score);
}
}
err_printf(m, "Reset count: %u\n", error->reset_count);
err_printf(m, "Suspend count: %u\n", error->suspend_count);
err_printf(m, "Platform: %s\n", intel_platform_name(error->device_info.platform));
- err_printf(m, "PCI ID: 0x%04x\n", pdev->device);
- err_printf(m, "PCI Revision: 0x%02x\n", pdev->revision);
- err_printf(m, "PCI Subsystem: %04x:%04x\n",
- pdev->subsystem_vendor,
- pdev->subsystem_device);
+ err_print_pciid(m, error->i915);
+
err_printf(m, "IOMMU enabled?: %d\n", error->iommu);
if (HAS_CSR(dev_priv)) {
@@ -590,21 +632,20 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
CSR_VERSION_MINOR(csr->version));
}
+ err_printf(m, "GT awake: %s\n", yesno(error->awake));
+ err_printf(m, "RPM wakelock: %s\n", yesno(error->wakelock));
+ err_printf(m, "PM suspended: %s\n", yesno(error->suspended));
err_printf(m, "EIR: 0x%08x\n", error->eir);
err_printf(m, "IER: 0x%08x\n", error->ier);
- if (INTEL_GEN(dev_priv) >= 8) {
- for (i = 0; i < 4; i++)
- err_printf(m, "GTIER gt %d: 0x%08x\n", i,
- error->gtier[i]);
- } else if (HAS_PCH_SPLIT(dev_priv) || IS_VALLEYVIEW(dev_priv))
- err_printf(m, "GTIER: 0x%08x\n", error->gtier[0]);
+ for (i = 0; i < error->ngtier; i++)
+ err_printf(m, "GTIER[%d]: 0x%08x\n", i, error->gtier[i]);
err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
err_printf(m, "CCID: 0x%08x\n", error->ccid);
err_printf(m, "Missed interrupts: 0x%08lx\n", dev_priv->gpu_error.missed_irq_rings);
- for (i = 0; i < dev_priv->num_fence_regs; i++)
+ for (i = 0; i < error->nfence; i++)
err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
if (INTEL_GEN(dev_priv) >= 6) {
@@ -653,16 +694,18 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
error->pinned_bo_count);
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
- struct drm_i915_error_engine *ee = &error->engine[i];
+ const struct drm_i915_error_engine *ee = &error->engine[i];
obj = ee->batchbuffer;
if (obj) {
err_puts(m, dev_priv->engine[i]->name);
- if (ee->pid != -1)
- err_printf(m, " (submitted by %s [%d], bans %d)",
- ee->comm,
- ee->pid,
- ee->context_bans);
+ if (ee->context.pid)
+ err_printf(m, " (submitted by %s [%d], ctx %d [%d], score %d)",
+ ee->context.comm,
+ ee->context.pid,
+ ee->context.handle,
+ ee->context.hw_id,
+ ee->context.ban_score);
err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
upper_32_bits(obj->gtt_offset),
lower_32_bits(obj->gtt_offset));
@@ -716,9 +759,11 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
intel_overlay_print_error_state(m, error->overlay);
if (error->display)
- intel_display_print_error_state(m, dev_priv, error->display);
+ intel_display_print_error_state(m, error->display);
+
+ err_print_capabilities(m, &error->device_info);
+ err_print_params(m, &error->params);
-out:
if (m->bytes == 0 && m->err)
return m->err;
@@ -770,10 +815,16 @@ static void i915_error_object_free(struct drm_i915_error_object *obj)
kfree(obj);
}
-static void i915_error_state_free(struct kref *error_ref)
+static __always_inline void free_param(const char *type, void *x)
+{
+ if (!__builtin_strcmp(type, "char *"))
+ kfree(*(void **)x);
+}
+
+void __i915_gpu_state_free(struct kref *error_ref)
{
- struct drm_i915_error_state *error = container_of(error_ref,
- typeof(*error), ref);
+ struct i915_gpu_state *error =
+ container_of(error_ref, typeof(*error), ref);
int i;
for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
@@ -800,6 +851,11 @@ static void i915_error_state_free(struct kref *error_ref)
kfree(error->overlay);
kfree(error->display);
+
+#define FREE(T, x) free_param(#T, &error->params.x);
+ I915_PARAMS_FOR_EACH(FREE);
+#undef FREE
+
kfree(error);
}
@@ -938,7 +994,7 @@ static u32 capture_error_bo(struct drm_i915_error_buffer *err,
* It's only a small step better than a random number in its current form.
*/
static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
- struct drm_i915_error_state *error,
+ struct i915_gpu_state *error,
int *engine_id)
{
uint32_t error_code = 0;
@@ -963,20 +1019,21 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
}
static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
- struct drm_i915_error_state *error)
+ struct i915_gpu_state *error)
{
int i;
- if (IS_GEN3(dev_priv) || IS_GEN2(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 6) {
for (i = 0; i < dev_priv->num_fence_regs; i++)
- error->fence[i] = I915_READ(FENCE_REG(i));
- } else if (IS_GEN5(dev_priv) || IS_GEN4(dev_priv)) {
+ error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
+ } else if (INTEL_GEN(dev_priv) >= 4) {
for (i = 0; i < dev_priv->num_fence_regs; i++)
error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
- } else if (INTEL_GEN(dev_priv) >= 6) {
+ } else {
for (i = 0; i < dev_priv->num_fence_regs; i++)
- error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
+ error->fence[i] = I915_READ(FENCE_REG(i));
}
+ error->nfence = i;
}
static inline u32
@@ -1000,7 +1057,7 @@ gen8_engine_sync_index(struct intel_engine_cs *engine,
return idx;
}
-static void gen8_record_semaphore_state(struct drm_i915_error_state *error,
+static void gen8_record_semaphore_state(struct i915_gpu_state *error,
struct intel_engine_cs *engine,
struct drm_i915_error_engine *ee)
{
@@ -1054,7 +1111,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
if (RB_EMPTY_ROOT(&b->waiters))
return;
- if (!spin_trylock_irq(&b->lock)) {
+ if (!spin_trylock_irq(&b->rb_lock)) {
ee->waiters = ERR_PTR(-EDEADLK);
return;
}
@@ -1062,7 +1119,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
count = 0;
for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
count++;
- spin_unlock_irq(&b->lock);
+ spin_unlock_irq(&b->rb_lock);
waiter = NULL;
if (count)
@@ -1072,7 +1129,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
if (!waiter)
return;
- if (!spin_trylock_irq(&b->lock)) {
+ if (!spin_trylock_irq(&b->rb_lock)) {
kfree(waiter);
ee->waiters = ERR_PTR(-EDEADLK);
return;
@@ -1080,7 +1137,7 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
ee->waiters = waiter;
for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
- struct intel_wait *w = container_of(rb, typeof(*w), node);
+ struct intel_wait *w = rb_entry(rb, typeof(*w), node);
strcpy(waiter->comm, w->tsk->comm);
waiter->pid = w->tsk->pid;
@@ -1090,10 +1147,10 @@ static void error_record_engine_waiters(struct intel_engine_cs *engine,
if (++ee->num_waiters == count)
break;
}
- spin_unlock_irq(&b->lock);
+ spin_unlock_irq(&b->rb_lock);
}
-static void error_record_engine_registers(struct drm_i915_error_state *error,
+static void error_record_engine_registers(struct i915_gpu_state *error,
struct intel_engine_cs *engine,
struct drm_i915_error_engine *ee)
{
@@ -1267,8 +1324,30 @@ static void error_record_engine_execlists(struct intel_engine_cs *engine,
&ee->execlist[n]);
}
+static void record_context(struct drm_i915_error_context *e,
+ struct i915_gem_context *ctx)
+{
+ if (ctx->pid) {
+ struct task_struct *task;
+
+ rcu_read_lock();
+ task = pid_task(ctx->pid, PIDTYPE_PID);
+ if (task) {
+ strcpy(e->comm, task->comm);
+ e->pid = task->pid;
+ }
+ rcu_read_unlock();
+ }
+
+ e->handle = ctx->user_handle;
+ e->hw_id = ctx->hw_id;
+ e->ban_score = ctx->ban_score;
+ e->guilty = ctx->guilty_count;
+ e->active = ctx->active_count;
+}
+
static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
- struct drm_i915_error_state *error)
+ struct i915_gpu_state *error)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
int i;
@@ -1281,7 +1360,6 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
struct drm_i915_error_engine *ee = &error->engine[i];
struct drm_i915_gem_request *request;
- ee->pid = -1;
ee->engine_id = -1;
if (!engine)
@@ -1296,11 +1374,12 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
request = i915_gem_find_active_request(engine);
if (request) {
struct intel_ring *ring;
- struct pid *pid;
ee->vm = request->ctx->ppgtt ?
&request->ctx->ppgtt->base : &ggtt->base;
+ record_context(&ee->context, request->ctx);
+
/* We need to copy these to an anonymous buffer
* as the simplest method to avoid being overwritten
* by userspace.
@@ -1318,19 +1397,6 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
i915_error_object_create(dev_priv,
request->ctx->engine[i].state);
- pid = request->ctx->pid;
- if (pid) {
- struct task_struct *task;
-
- rcu_read_lock();
- task = pid_task(pid, PIDTYPE_PID);
- if (task) {
- strcpy(ee->comm, task->comm);
- ee->pid = task->pid;
- }
- rcu_read_unlock();
- }
-
error->simulated |=
i915_gem_context_no_error_capture(request->ctx);
@@ -1357,7 +1423,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
}
static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
- struct drm_i915_error_state *error,
+ struct i915_gpu_state *error,
struct i915_address_space *vm,
int idx)
{
@@ -1383,7 +1449,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
}
static void i915_capture_active_buffers(struct drm_i915_private *dev_priv,
- struct drm_i915_error_state *error)
+ struct i915_gpu_state *error)
{
int cnt = 0, i, j;
@@ -1408,7 +1474,7 @@ static void i915_capture_active_buffers(struct drm_i915_private *dev_priv,
}
static void i915_capture_pinned_buffers(struct drm_i915_private *dev_priv,
- struct drm_i915_error_state *error)
+ struct i915_gpu_state *error)
{
struct i915_address_space *vm = &dev_priv->ggtt.base;
struct drm_i915_error_buffer *bo;
@@ -1439,7 +1505,7 @@ static void i915_capture_pinned_buffers(struct drm_i915_private *dev_priv,
}
static void i915_gem_capture_guc_log_buffer(struct drm_i915_private *dev_priv,
- struct drm_i915_error_state *error)
+ struct i915_gpu_state *error)
{
/* Capturing log buf contents won't be useful if logging was disabled */
if (!dev_priv->guc.log.vma || (i915.guc_log_level < 0))
@@ -1451,7 +1517,7 @@ static void i915_gem_capture_guc_log_buffer(struct drm_i915_private *dev_priv,
/* Capture all registers which don't fit into another category. */
static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
- struct drm_i915_error_state *error)
+ struct i915_gpu_state *error)
{
int i;
@@ -1508,9 +1574,11 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
error->ier = I915_READ(GEN8_DE_MISC_IER);
for (i = 0; i < 4; i++)
error->gtier[i] = I915_READ(GEN8_GT_IER(i));
+ error->ngtier = 4;
} else if (HAS_PCH_SPLIT(dev_priv)) {
error->ier = I915_READ(DEIER);
error->gtier[0] = I915_READ(GTIER);
+ error->ngtier = 1;
} else if (IS_GEN2(dev_priv)) {
error->ier = I915_READ16(IER);
} else if (!IS_VALLEYVIEW(dev_priv)) {
@@ -1521,7 +1589,7 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
}
static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
- struct drm_i915_error_state *error,
+ struct i915_gpu_state *error,
u32 engine_mask,
const char *error_msg)
{
@@ -1534,12 +1602,12 @@ static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
"GPU HANG: ecode %d:%d:0x%08x",
INTEL_GEN(dev_priv), engine_id, ecode);
- if (engine_id != -1 && error->engine[engine_id].pid != -1)
+ if (engine_id != -1 && error->engine[engine_id].context.pid)
len += scnprintf(error->error_msg + len,
sizeof(error->error_msg) - len,
", in %s [%d]",
- error->engine[engine_id].comm,
- error->engine[engine_id].pid);
+ error->engine[engine_id].context.comm,
+ error->engine[engine_id].context.pid);
scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
", reason: %s, action: %s",
@@ -1548,8 +1616,12 @@ static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
}
static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
- struct drm_i915_error_state *error)
+ struct i915_gpu_state *error)
{
+ error->awake = dev_priv->gt.awake;
+ error->wakelock = atomic_read(&dev_priv->pm.wakeref_count);
+ error->suspended = dev_priv->pm.suspended;
+
error->iommu = -1;
#ifdef CONFIG_INTEL_IOMMU
error->iommu = intel_iommu_gfx_mapped;
@@ -1562,9 +1634,26 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
sizeof(error->device_info));
}
+static __always_inline void dup_param(const char *type, void *x)
+{
+ if (!__builtin_strcmp(type, "char *"))
+ *(void **)x = kstrdup(*(void **)x, GFP_ATOMIC);
+}
+
static int capture(void *data)
{
- struct drm_i915_error_state *error = data;
+ struct i915_gpu_state *error = data;
+
+ do_gettimeofday(&error->time);
+ error->boottime = ktime_to_timeval(ktime_get_boottime());
+ error->uptime =
+ ktime_to_timeval(ktime_sub(ktime_get(),
+ error->i915->gt.last_init_time));
+
+ error->params = i915;
+#define DUP(T, x) dup_param(#T, &error->params.x);
+ I915_PARAMS_FOR_EACH(DUP);
+#undef DUP
i915_capture_gen_state(error->i915, error);
i915_capture_reg_state(error->i915, error);
@@ -1574,12 +1663,6 @@ static int capture(void *data)
i915_capture_pinned_buffers(error->i915, error);
i915_gem_capture_guc_log_buffer(error->i915, error);
- do_gettimeofday(&error->time);
- error->boottime = ktime_to_timeval(ktime_get_boottime());
- error->uptime =
- ktime_to_timeval(ktime_sub(ktime_get(),
- error->i915->gt.last_init_time));
-
error->overlay = intel_overlay_capture_error_state(error->i915);
error->display = intel_display_capture_error_state(error->i915);
@@ -1588,6 +1671,23 @@ static int capture(void *data)
#define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x))
+struct i915_gpu_state *
+i915_capture_gpu_state(struct drm_i915_private *i915)
+{
+ struct i915_gpu_state *error;
+
+ error = kzalloc(sizeof(*error), GFP_ATOMIC);
+ if (!error)
+ return NULL;
+
+ kref_init(&error->ref);
+ error->i915 = i915;
+
+ stop_machine(capture, error, NULL);
+
+ return error;
+}
+
/**
* i915_capture_error_state - capture an error record for later analysis
* @dev: drm device
@@ -1602,7 +1702,7 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
const char *error_msg)
{
static bool warned;
- struct drm_i915_error_state *error;
+ struct i915_gpu_state *error;
unsigned long flags;
if (!i915.error_capture)
@@ -1611,18 +1711,12 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
if (READ_ONCE(dev_priv->gpu_error.first_error))
return;
- /* Account for pipe specific data like PIPE*STAT */
- error = kzalloc(sizeof(*error), GFP_ATOMIC);
+ error = i915_capture_gpu_state(dev_priv);
if (!error) {
DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
return;
}
- kref_init(&error->ref);
- error->i915 = dev_priv;
-
- stop_machine(capture, error, NULL);
-
i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
DRM_INFO("%s\n", error->error_msg);
@@ -1636,7 +1730,7 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
}
if (error) {
- i915_error_state_free(&error->ref);
+ __i915_gpu_state_free(&error->ref);
return;
}
@@ -1652,33 +1746,28 @@ void i915_capture_error_state(struct drm_i915_private *dev_priv,
}
}
-void i915_error_state_get(struct drm_device *dev,
- struct i915_error_state_file_priv *error_priv)
+struct i915_gpu_state *
+i915_first_error_state(struct drm_i915_private *i915)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_gpu_state *error;
- spin_lock_irq(&dev_priv->gpu_error.lock);
- error_priv->error = dev_priv->gpu_error.first_error;
- if (error_priv->error)
- kref_get(&error_priv->error->ref);
- spin_unlock_irq(&dev_priv->gpu_error.lock);
-}
+ spin_lock_irq(&i915->gpu_error.lock);
+ error = i915->gpu_error.first_error;
+ if (error)
+ i915_gpu_state_get(error);
+ spin_unlock_irq(&i915->gpu_error.lock);
-void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
-{
- if (error_priv->error)
- kref_put(&error_priv->error->ref, i915_error_state_free);
+ return error;
}
-void i915_destroy_error_state(struct drm_i915_private *dev_priv)
+void i915_reset_error_state(struct drm_i915_private *i915)
{
- struct drm_i915_error_state *error;
+ struct i915_gpu_state *error;
- spin_lock_irq(&dev_priv->gpu_error.lock);
- error = dev_priv->gpu_error.first_error;
- dev_priv->gpu_error.first_error = NULL;
- spin_unlock_irq(&dev_priv->gpu_error.lock);
+ spin_lock_irq(&i915->gpu_error.lock);
+ error = i915->gpu_error.first_error;
+ i915->gpu_error.first_error = NULL;
+ spin_unlock_irq(&i915->gpu_error.lock);
- if (error)
- kref_put(&error->ref, i915_error_state_free);
+ i915_gpu_state_put(error);
}
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 8ced9e26f075..832ac9e45801 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -25,6 +25,8 @@
#include "i915_drv.h"
#include "intel_uc.h"
+#include <trace/events/dma_fence.h>
+
/**
* DOC: GuC-based command submission
*
@@ -348,7 +350,7 @@ int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
u32 freespace;
int ret;
- spin_lock(&client->wq_lock);
+ spin_lock_irq(&client->wq_lock);
freespace = CIRC_SPACE(client->wq_tail, desc->head, client->wq_size);
freespace -= client->wq_rsvd;
if (likely(freespace >= wqi_size)) {
@@ -358,21 +360,27 @@ int i915_guc_wq_reserve(struct drm_i915_gem_request *request)
client->no_wq_space++;
ret = -EAGAIN;
}
- spin_unlock(&client->wq_lock);
+ spin_unlock_irq(&client->wq_lock);
return ret;
}
+static void guc_client_update_wq_rsvd(struct i915_guc_client *client, int size)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&client->wq_lock, flags);
+ client->wq_rsvd += size;
+ spin_unlock_irqrestore(&client->wq_lock, flags);
+}
+
void i915_guc_wq_unreserve(struct drm_i915_gem_request *request)
{
- const size_t wqi_size = sizeof(struct guc_wq_item);
+ const int wqi_size = sizeof(struct guc_wq_item);
struct i915_guc_client *client = request->i915->guc.execbuf_client;
GEM_BUG_ON(READ_ONCE(client->wq_rsvd) < wqi_size);
-
- spin_lock(&client->wq_lock);
- client->wq_rsvd -= wqi_size;
- spin_unlock(&client->wq_lock);
+ guc_client_update_wq_rsvd(client, -wqi_size);
}
/* Construct a Work Item and append it to the GuC's Work Queue */
@@ -509,15 +517,16 @@ static void __i915_guc_submit(struct drm_i915_gem_request *rq)
unsigned int engine_id = engine->id;
struct intel_guc *guc = &rq->i915->guc;
struct i915_guc_client *client = guc->execbuf_client;
+ unsigned long flags;
int b_ret;
- spin_lock(&client->wq_lock);
- guc_wq_item_append(client, rq);
-
/* WA to flush out the pending GMADR writes to ring buffer. */
if (i915_vma_is_map_and_fenceable(rq->ring->vma))
POSTING_READ_FW(GUC_STATUS);
+ spin_lock_irqsave(&client->wq_lock, flags);
+
+ guc_wq_item_append(client, rq);
b_ret = guc_ring_doorbell(client);
client->submissions[engine_id] += 1;
@@ -527,15 +536,117 @@ static void __i915_guc_submit(struct drm_i915_gem_request *rq)
guc->submissions[engine_id] += 1;
guc->last_seqno[engine_id] = rq->global_seqno;
- spin_unlock(&client->wq_lock);
+
+ spin_unlock_irqrestore(&client->wq_lock, flags);
}
static void i915_guc_submit(struct drm_i915_gem_request *rq)
{
- i915_gem_request_submit(rq);
+ __i915_gem_request_submit(rq);
__i915_guc_submit(rq);
}
+static void nested_enable_signaling(struct drm_i915_gem_request *rq)
+{
+ /* If we use dma_fence_enable_sw_signaling() directly, lockdep
+ * detects an ordering issue between the fence lockclass and the
+ * global_timeline. This circular dependency can only occur via 2
+ * different fences (but same fence lockclass), so we use the nesting
+ * annotation here to prevent the warn, equivalent to the nesting
+ * inside i915_gem_request_submit() for when we also enable the
+ * signaler.
+ */
+
+ if (test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &rq->fence.flags))
+ return;
+
+ GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags));
+ trace_dma_fence_enable_signal(&rq->fence);
+
+ spin_lock_nested(&rq->lock, SINGLE_DEPTH_NESTING);
+ intel_engine_enable_signaling(rq);
+ spin_unlock(&rq->lock);
+}
+
+static bool i915_guc_dequeue(struct intel_engine_cs *engine)
+{
+ struct execlist_port *port = engine->execlist_port;
+ struct drm_i915_gem_request *last = port[0].request;
+ unsigned long flags;
+ struct rb_node *rb;
+ bool submit = false;
+
+ /* After execlist_first is updated, the tasklet will be rescheduled.
+ *
+ * If we are currently running (inside the tasklet) and a third
+ * party queues a request and so updates engine->execlist_first under
+ * the spinlock (which we have elided), it will atomically set the
+ * TASKLET_SCHED flag causing the us to be re-executed and pick up
+ * the change in state (the update to TASKLET_SCHED incurs a memory
+ * barrier making this cross-cpu checking safe).
+ */
+ if (!READ_ONCE(engine->execlist_first))
+ return false;
+
+ spin_lock_irqsave(&engine->timeline->lock, flags);
+ rb = engine->execlist_first;
+ while (rb) {
+ struct drm_i915_gem_request *rq =
+ rb_entry(rb, typeof(*rq), priotree.node);
+
+ if (last && rq->ctx != last->ctx) {
+ if (port != engine->execlist_port)
+ break;
+
+ i915_gem_request_assign(&port->request, last);
+ nested_enable_signaling(last);
+ port++;
+ }
+
+ rb = rb_next(rb);
+ rb_erase(&rq->priotree.node, &engine->execlist_queue);
+ RB_CLEAR_NODE(&rq->priotree.node);
+ rq->priotree.priority = INT_MAX;
+
+ trace_i915_gem_request_in(rq, port - engine->execlist_port);
+ i915_guc_submit(rq);
+ last = rq;
+ submit = true;
+ }
+ if (submit) {
+ i915_gem_request_assign(&port->request, last);
+ nested_enable_signaling(last);
+ engine->execlist_first = rb;
+ }
+ spin_unlock_irqrestore(&engine->timeline->lock, flags);
+
+ return submit;
+}
+
+static void i915_guc_irq_handler(unsigned long data)
+{
+ struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
+ struct execlist_port *port = engine->execlist_port;
+ struct drm_i915_gem_request *rq;
+ bool submit;
+
+ do {
+ rq = port[0].request;
+ while (rq && i915_gem_request_completed(rq)) {
+ trace_i915_gem_request_out(rq);
+ i915_gem_request_put(rq);
+ port[0].request = port[1].request;
+ port[1].request = NULL;
+ rq = port[0].request;
+ }
+
+ submit = false;
+ if (!port[1].request)
+ submit = i915_guc_dequeue(engine);
+ } while (submit);
+}
+
/*
* Everything below here is concerned with setup & teardown, and is
* therefore not part of the somewhat time-critical batch-submission
@@ -800,22 +911,21 @@ static void guc_addon_create(struct intel_guc *guc)
{
struct drm_i915_private *dev_priv = guc_to_i915(guc);
struct i915_vma *vma;
- struct guc_ads *ads;
- struct guc_policies *policies;
- struct guc_mmio_reg_state *reg_state;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
struct page *page;
- u32 size;
-
/* The ads obj includes the struct itself and buffers passed to GuC */
- size = sizeof(struct guc_ads) + sizeof(struct guc_policies) +
- sizeof(struct guc_mmio_reg_state) +
- GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE;
+ struct {
+ struct guc_ads ads;
+ struct guc_policies policies;
+ struct guc_mmio_reg_state reg_state;
+ u8 reg_state_buffer[GUC_S3_SAVE_SPACE_PAGES * PAGE_SIZE];
+ } __packed *blob;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ u32 base;
vma = guc->ads_vma;
if (!vma) {
- vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(size));
+ vma = intel_guc_allocate_vma(guc, PAGE_ALIGN(sizeof(*blob)));
if (IS_ERR(vma))
return;
@@ -823,44 +933,38 @@ static void guc_addon_create(struct intel_guc *guc)
}
page = i915_vma_first_page(vma);
- ads = kmap(page);
-
- /*
- * The GuC requires a "Golden Context" when it reinitialises
- * engines after a reset. Here we use the Render ring default
- * context, which must already exist and be pinned in the GGTT,
- * so its address won't change after we've told the GuC where
- * to find it.
- */
- engine = dev_priv->engine[RCS];
- ads->golden_context_lrca = engine->status_page.ggtt_offset;
-
- for_each_engine(engine, dev_priv, id)
- ads->eng_state_size[engine->guc_id] = intel_lr_context_size(engine);
+ blob = kmap(page);
/* GuC scheduling policies */
- policies = (void *)ads + sizeof(struct guc_ads);
- guc_policies_init(policies);
-
- ads->scheduler_policies =
- guc_ggtt_offset(vma) + sizeof(struct guc_ads);
+ guc_policies_init(&blob->policies);
/* MMIO reg state */
- reg_state = (void *)policies + sizeof(struct guc_policies);
-
for_each_engine(engine, dev_priv, id) {
- reg_state->mmio_white_list[engine->guc_id].mmio_start =
+ blob->reg_state.mmio_white_list[engine->guc_id].mmio_start =
engine->mmio_base + GUC_MMIO_WHITE_LIST_START;
/* Nothing to be saved or restored for now. */
- reg_state->mmio_white_list[engine->guc_id].count = 0;
+ blob->reg_state.mmio_white_list[engine->guc_id].count = 0;
}
- ads->reg_state_addr = ads->scheduler_policies +
- sizeof(struct guc_policies);
+ /*
+ * The GuC requires a "Golden Context" when it reinitialises
+ * engines after a reset. Here we use the Render ring default
+ * context, which must already exist and be pinned in the GGTT,
+ * so its address won't change after we've told the GuC where
+ * to find it.
+ */
+ blob->ads.golden_context_lrca =
+ dev_priv->engine[RCS]->status_page.ggtt_offset;
+
+ for_each_engine(engine, dev_priv, id)
+ blob->ads.eng_state_size[engine->guc_id] =
+ intel_lr_context_size(engine);
- ads->reg_state_buffer = ads->reg_state_addr +
- sizeof(struct guc_mmio_reg_state);
+ base = guc_ggtt_offset(vma);
+ blob->ads.scheduler_policies = base + ptr_offset(blob, policies);
+ blob->ads.reg_state_buffer = base + ptr_offset(blob, reg_state_buffer);
+ blob->ads.reg_state_addr = base + ptr_offset(blob, reg_state);
kunmap(page);
}
@@ -926,6 +1030,48 @@ static void guc_reset_wq(struct i915_guc_client *client)
client->wq_tail = 0;
}
+static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int irqs;
+
+ /* tell all command streamers to forward interrupts (but not vblank) to GuC */
+ irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
+ for_each_engine(engine, dev_priv, id)
+ I915_WRITE(RING_MODE_GEN7(engine), irqs);
+
+ /* route USER_INTERRUPT to Host, all others are sent to GuC. */
+ irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
+ GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
+ /* These three registers have the same bit definitions */
+ I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
+ I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
+ I915_WRITE(GUC_WD_VECS_IER, ~irqs);
+
+ /*
+ * The REDIRECT_TO_GUC bit of the PMINTRMSK register directs all
+ * (unmasked) PM interrupts to the GuC. All other bits of this
+ * register *disable* generation of a specific interrupt.
+ *
+ * 'pm_intrmsk_mbz' indicates bits that are NOT to be set when
+ * writing to the PM interrupt mask register, i.e. interrupts
+ * that must not be disabled.
+ *
+ * If the GuC is handling these interrupts, then we must not let
+ * the PM code disable ANY interrupt that the GuC is expecting.
+ * So for each ENABLED (0) bit in this register, we must SET the
+ * bit in pm_intrmsk_mbz so that it's left enabled for the GuC.
+ * GuC needs ARAT expired interrupt unmasked hence it is set in
+ * pm_intrmsk_mbz.
+ *
+ * Here we CLEAR REDIRECT_TO_GUC bit in pm_intrmsk_mbz, which will
+ * result in the register bit being left SET!
+ */
+ dev_priv->rps.pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
+ dev_priv->rps.pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
+}
+
int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
@@ -942,31 +1088,67 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
guc_init_doorbell_hw(guc);
/* Take over from manual control of ELSP (execlists) */
+ guc_interrupts_capture(dev_priv);
+
for_each_engine(engine, dev_priv, id) {
+ const int wqi_size = sizeof(struct guc_wq_item);
struct drm_i915_gem_request *rq;
- engine->submit_request = i915_guc_submit;
- engine->schedule = NULL;
+ /* The tasklet was initialised by execlists, and may be in
+ * a state of flux (across a reset) and so we just want to
+ * take over the callback without changing any other state
+ * in the tasklet.
+ */
+ engine->irq_tasklet.func = i915_guc_irq_handler;
+ clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
/* Replay the current set of previously submitted requests */
+ spin_lock_irq(&engine->timeline->lock);
list_for_each_entry(rq, &engine->timeline->requests, link) {
- client->wq_rsvd += sizeof(struct guc_wq_item);
+ guc_client_update_wq_rsvd(client, wqi_size);
__i915_guc_submit(rq);
}
+ spin_unlock_irq(&engine->timeline->lock);
}
return 0;
}
+static void guc_interrupts_release(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int irqs;
+
+ /*
+ * tell all command streamers NOT to forward interrupts or vblank
+ * to GuC.
+ */
+ irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
+ irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
+ for_each_engine(engine, dev_priv, id)
+ I915_WRITE(RING_MODE_GEN7(engine), irqs);
+
+ /* route all GT interrupts to the host */
+ I915_WRITE(GUC_BCS_RCS_IER, 0);
+ I915_WRITE(GUC_VCS2_VCS1_IER, 0);
+ I915_WRITE(GUC_WD_VECS_IER, 0);
+
+ dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
+ dev_priv->rps.pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
+}
+
void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
{
struct intel_guc *guc = &dev_priv->guc;
+ guc_interrupts_release(dev_priv);
+
if (!guc->execbuf_client)
return;
/* Revert back to manual ELSP submission */
- intel_execlists_enable_submission(dev_priv);
+ intel_engines_reset_default_submission(dev_priv);
}
void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index a62feb686895..cb20c9408b12 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -180,7 +180,7 @@ i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
{
uint32_t val;
- assert_spin_locked(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq_lock);
WARN_ON(bits & ~mask);
val = I915_READ(PORT_HOTPLUG_EN);
@@ -222,7 +222,7 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv,
{
uint32_t new_val;
- assert_spin_locked(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq_lock);
WARN_ON(enabled_irq_mask & ~interrupt_mask);
@@ -250,7 +250,7 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
uint32_t interrupt_mask,
uint32_t enabled_irq_mask)
{
- assert_spin_locked(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq_lock);
WARN_ON(enabled_irq_mask & ~interrupt_mask);
@@ -302,7 +302,7 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
WARN_ON(enabled_irq_mask & ~interrupt_mask);
- assert_spin_locked(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq_lock);
new_val = dev_priv->pm_imr;
new_val &= ~interrupt_mask;
@@ -340,7 +340,7 @@ void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
{
i915_reg_t reg = gen6_pm_iir(dev_priv);
- assert_spin_locked(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq_lock);
I915_WRITE(reg, reset_mask);
I915_WRITE(reg, reset_mask);
@@ -349,7 +349,7 @@ void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask)
void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
{
- assert_spin_locked(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq_lock);
dev_priv->pm_ier |= enable_mask;
I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier);
@@ -359,7 +359,7 @@ void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask)
void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask)
{
- assert_spin_locked(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq_lock);
dev_priv->pm_ier &= ~disable_mask;
__gen6_mask_pm_irq(dev_priv, disable_mask);
@@ -389,11 +389,6 @@ void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
spin_unlock_irq(&dev_priv->irq_lock);
}
-u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
-{
- return (mask & ~dev_priv->rps.pm_intr_keep);
-}
-
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
{
if (!READ_ONCE(dev_priv->rps.interrupts_enabled))
@@ -463,7 +458,7 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
uint32_t new_val;
uint32_t old_val;
- assert_spin_locked(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq_lock);
WARN_ON(enabled_irq_mask & ~interrupt_mask);
@@ -496,7 +491,7 @@ void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
{
uint32_t new_val;
- assert_spin_locked(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq_lock);
WARN_ON(enabled_irq_mask & ~interrupt_mask);
@@ -530,7 +525,7 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
WARN_ON(enabled_irq_mask & ~interrupt_mask);
- assert_spin_locked(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq_lock);
if (WARN_ON(!intel_irqs_enabled(dev_priv)))
return;
@@ -546,7 +541,7 @@ __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
i915_reg_t reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
- assert_spin_locked(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq_lock);
WARN_ON(!intel_irqs_enabled(dev_priv));
if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
@@ -573,7 +568,7 @@ __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
i915_reg_t reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
- assert_spin_locked(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq_lock);
WARN_ON(!intel_irqs_enabled(dev_priv));
if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
@@ -728,6 +723,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv,
pipe);
const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
+ unsigned long irqflags;
htotal = mode->crtc_htotal;
hsync_start = mode->crtc_hsync_start;
@@ -744,17 +740,21 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
high_frame = PIPEFRAME(pipe);
low_frame = PIPEFRAMEPIXEL(pipe);
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
/*
* High & low register fields aren't synchronized, so make sure
* we get a low value that's stable across two reads of the high
* register.
*/
do {
- high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
- low = I915_READ(low_frame);
- high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
+ high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
+ low = I915_READ_FW(low_frame);
+ high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
} while (high1 != high2);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
high1 >>= PIPE_FRAME_HIGH_SHIFT;
pixel = low & PIPE_PIXEL_MASK;
low >>= PIPE_FRAME_LOW_SHIFT;
@@ -783,6 +783,9 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
enum pipe pipe = crtc->pipe;
int position, vtotal;
+ if (!crtc->active)
+ return -1;
+
vtotal = mode->crtc_vtotal;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
vtotal /= 2;
@@ -809,8 +812,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
for (i = 0; i < 100; i++) {
udelay(1);
- temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
- DSL_LINEMASK_GEN3;
+ temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
if (temp != position) {
position = temp;
break;
@@ -1033,81 +1035,94 @@ static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
static void notify_ring(struct intel_engine_cs *engine)
{
- smp_store_mb(engine->breadcrumbs.irq_posted, true);
- if (intel_engine_wakeup(engine))
- trace_i915_gem_request_notify(engine);
+ struct drm_i915_gem_request *rq = NULL;
+ struct intel_wait *wait;
+
+ atomic_inc(&engine->irq_count);
+ set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
+
+ spin_lock(&engine->breadcrumbs.irq_lock);
+ wait = engine->breadcrumbs.irq_wait;
+ if (wait) {
+ /* We use a callback from the dma-fence to submit
+ * requests after waiting on our own requests. To
+ * ensure minimum delay in queuing the next request to
+ * hardware, signal the fence now rather than wait for
+ * the signaler to be woken up. We still wake up the
+ * waiter in order to handle the irq-seqno coherency
+ * issues (we may receive the interrupt before the
+ * seqno is written, see __i915_request_irq_complete())
+ * and to handle coalescing of multiple seqno updates
+ * and many waiters.
+ */
+ if (i915_seqno_passed(intel_engine_get_seqno(engine),
+ wait->seqno) &&
+ !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &wait->request->fence.flags))
+ rq = i915_gem_request_get(wait->request);
+
+ wake_up_process(wait->tsk);
+ } else {
+ __intel_engine_disarm_breadcrumbs(engine);
+ }
+ spin_unlock(&engine->breadcrumbs.irq_lock);
+
+ if (rq) {
+ dma_fence_signal(&rq->fence);
+ i915_gem_request_put(rq);
+ }
+
+ trace_intel_engine_notify(engine, wait);
}
static void vlv_c0_read(struct drm_i915_private *dev_priv,
struct intel_rps_ei *ei)
{
- ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
+ ei->ktime = ktime_get_raw();
ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
}
-static bool vlv_c0_above(struct drm_i915_private *dev_priv,
- const struct intel_rps_ei *old,
- const struct intel_rps_ei *now,
- int threshold)
-{
- u64 time, c0;
- unsigned int mul = 100;
-
- if (old->cz_clock == 0)
- return false;
-
- if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
- mul <<= 8;
-
- time = now->cz_clock - old->cz_clock;
- time *= threshold * dev_priv->czclk_freq;
-
- /* Workload can be split between render + media, e.g. SwapBuffers
- * being blitted in X after being rendered in mesa. To account for
- * this we need to combine both engines into our activity counter.
- */
- c0 = now->render_c0 - old->render_c0;
- c0 += now->media_c0 - old->media_c0;
- c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
-
- return c0 >= time;
-}
-
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
{
- vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
- dev_priv->rps.up_ei = dev_priv->rps.down_ei;
+ memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
}
static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
{
+ const struct intel_rps_ei *prev = &dev_priv->rps.ei;
struct intel_rps_ei now;
u32 events = 0;
- if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
+ if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
return 0;
vlv_c0_read(dev_priv, &now);
- if (now.cz_clock == 0)
- return 0;
- if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
- if (!vlv_c0_above(dev_priv,
- &dev_priv->rps.down_ei, &now,
- dev_priv->rps.down_threshold))
- events |= GEN6_PM_RP_DOWN_THRESHOLD;
- dev_priv->rps.down_ei = now;
- }
+ if (prev->ktime) {
+ u64 time, c0;
+ u32 render, media;
- if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
- if (vlv_c0_above(dev_priv,
- &dev_priv->rps.up_ei, &now,
- dev_priv->rps.up_threshold))
- events |= GEN6_PM_RP_UP_THRESHOLD;
- dev_priv->rps.up_ei = now;
+ time = ktime_us_delta(now.ktime, prev->ktime);
+ time *= dev_priv->czclk_freq;
+
+ /* Workload can be split between render + media,
+ * e.g. SwapBuffers being blitted in X after being rendered in
+ * mesa. To account for this we need to combine both engines
+ * into our activity counter.
+ */
+ render = now.render_c0 - prev->render_c0;
+ media = now.media_c0 - prev->media_c0;
+ c0 = max(render, media);
+ c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */
+
+ if (c0 > time * dev_priv->rps.up_threshold)
+ events = GEN6_PM_RP_UP_THRESHOLD;
+ else if (c0 < time * dev_priv->rps.down_threshold)
+ events = GEN6_PM_RP_DOWN_THRESHOLD;
}
+ dev_priv->rps.ei = now;
return events;
}
@@ -1127,30 +1142,21 @@ static void gen6_pm_rps_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private, rps.work);
- bool client_boost;
+ bool client_boost = false;
int new_delay, adj, min, max;
- u32 pm_iir;
+ u32 pm_iir = 0;
spin_lock_irq(&dev_priv->irq_lock);
- /* Speed up work cancelation during disabling rps interrupts. */
- if (!dev_priv->rps.interrupts_enabled) {
- spin_unlock_irq(&dev_priv->irq_lock);
- return;
+ if (dev_priv->rps.interrupts_enabled) {
+ pm_iir = fetch_and_zero(&dev_priv->rps.pm_iir);
+ client_boost = fetch_and_zero(&dev_priv->rps.client_boost);
}
-
- pm_iir = dev_priv->rps.pm_iir;
- dev_priv->rps.pm_iir = 0;
- /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
- gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
- client_boost = dev_priv->rps.client_boost;
- dev_priv->rps.client_boost = false;
spin_unlock_irq(&dev_priv->irq_lock);
/* Make sure we didn't queue anything we're not going to process. */
WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
-
if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
- return;
+ goto out;
mutex_lock(&dev_priv->rps.hw_lock);
@@ -1173,20 +1179,12 @@ static void gen6_pm_rps_work(struct work_struct *work)
if (new_delay >= dev_priv->rps.max_freq_softlimit)
adj = 0;
- /*
- * For better performance, jump directly
- * to RPe if we're below it.
- */
- if (new_delay < dev_priv->rps.efficient_freq - adj) {
- new_delay = dev_priv->rps.efficient_freq;
- adj = 0;
- }
} else if (client_boost || any_waiters(dev_priv)) {
adj = 0;
} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
new_delay = dev_priv->rps.efficient_freq;
- else
+ else if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
new_delay = dev_priv->rps.min_freq_softlimit;
adj = 0;
} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
@@ -1209,9 +1207,19 @@ static void gen6_pm_rps_work(struct work_struct *work)
new_delay += adj;
new_delay = clamp_t(int, new_delay, min, max);
- intel_set_rps(dev_priv, new_delay);
+ if (intel_set_rps(dev_priv, new_delay)) {
+ DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n");
+ dev_priv->rps.last_adj = 0;
+ }
mutex_unlock(&dev_priv->rps.hw_lock);
+
+out:
+ /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->rps.interrupts_enabled)
+ gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events);
+ spin_unlock_irq(&dev_priv->irq_lock);
}
@@ -1347,10 +1355,20 @@ static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
static __always_inline void
gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
{
- if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
+ bool tasklet = false;
+
+ if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) {
+ set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+ tasklet = true;
+ }
+
+ if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) {
notify_ring(engine);
- if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
- tasklet_schedule(&engine->irq_tasklet);
+ tasklet |= i915.enable_guc_submission;
+ }
+
+ if (tasklet)
+ tasklet_hi_schedule(&engine->irq_tasklet);
}
static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
@@ -2613,22 +2631,6 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
return ret;
}
-static void i915_error_wake_up(struct drm_i915_private *dev_priv)
-{
- /*
- * Notify all waiters for GPU completion events that reset state has
- * been changed, and that they need to restart their wait after
- * checking for potential errors (and bail out to drop locks if there is
- * a gpu reset pending so that i915_error_work_func can acquire them).
- */
-
- /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
- wake_up_all(&dev_priv->gpu_error.wait_queue);
-
- /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
- wake_up_all(&dev_priv->pending_flip_queue);
-}
-
/**
* i915_reset_and_wakeup - do process context error handling work
* @dev_priv: i915 device private
@@ -2648,16 +2650,11 @@ static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
DRM_DEBUG_DRIVER("resetting chip\n");
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
- /*
- * In most cases it's guaranteed that we get here with an RPM
- * reference held, for example because there is a pending GPU
- * request that won't finish until the reset is done. This
- * isn't the case at least when we get here by doing a
- * simulated reset via debugs, so get an RPM reference.
- */
- intel_runtime_pm_get(dev_priv);
intel_prepare_reset(dev_priv);
+ set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags);
+ wake_up_all(&dev_priv->gpu_error.wait_queue);
+
do {
/*
* All state reset _must_ be completed before we update the
@@ -2672,12 +2669,11 @@ static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
/* We need to wait for anyone holding the lock to wakeup */
} while (wait_on_bit_timeout(&dev_priv->gpu_error.flags,
- I915_RESET_IN_PROGRESS,
+ I915_RESET_HANDOFF,
TASK_UNINTERRUPTIBLE,
HZ));
intel_finish_reset(dev_priv);
- intel_runtime_pm_put(dev_priv);
if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags))
kobject_uevent_env(kobj,
@@ -2687,6 +2683,7 @@ static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
* Note: The wake_up also serves as a memory barrier so that
* waiters see the updated value of the dev_priv->gpu_error.
*/
+ clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags);
wake_up_all(&dev_priv->gpu_error.reset_queue);
}
@@ -2764,31 +2761,29 @@ void i915_handle_error(struct drm_i915_private *dev_priv,
vscnprintf(error_msg, sizeof(error_msg), fmt, args);
va_end(args);
+ /*
+ * In most cases it's guaranteed that we get here with an RPM
+ * reference held, for example because there is a pending GPU
+ * request that won't finish until the reset is done. This
+ * isn't the case at least when we get here by doing a
+ * simulated reset via debugfs, so get an RPM reference.
+ */
+ intel_runtime_pm_get(dev_priv);
+
i915_capture_error_state(dev_priv, engine_mask, error_msg);
i915_clear_error_registers(dev_priv);
if (!engine_mask)
- return;
+ goto out;
- if (test_and_set_bit(I915_RESET_IN_PROGRESS,
+ if (test_and_set_bit(I915_RESET_BACKOFF,
&dev_priv->gpu_error.flags))
- return;
-
- /*
- * Wakeup waiting processes so that the reset function
- * i915_reset_and_wakeup doesn't deadlock trying to grab
- * various locks. By bumping the reset counter first, the woken
- * processes will see a reset in progress and back off,
- * releasing their locks and then wait for the reset completion.
- * We must do this for _all_ gpu waiters that might hold locks
- * that the reset work needs to acquire.
- *
- * Note: The wake_up also provides a memory barrier to ensure that the
- * waiters see the updated value of the reset flags.
- */
- i915_error_wake_up(dev_priv);
+ goto out;
i915_reset_and_wakeup(dev_priv);
+
+out:
+ intel_runtime_pm_put(dev_priv);
}
/* Called from drm generic code, passed 'crtc' which
@@ -3106,19 +3101,9 @@ static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
return enabled_irqs;
}
-static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
+static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
- u32 hotplug_irqs, hotplug, enabled_irqs;
-
- if (HAS_PCH_IBX(dev_priv)) {
- hotplug_irqs = SDE_HOTPLUG_MASK;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
- } else {
- hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
- enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
- }
-
- ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+ u32 hotplug;
/*
* Enable digital hotplug on the PCH, and configure the DP short pulse
@@ -3126,10 +3111,12 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
* The pulse duration bits are reserved on LPT+.
*/
hotplug = I915_READ(PCH_PORT_HOTPLUG);
- hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
- hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
- hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
+ hotplug &= ~(PORTB_PULSE_DURATION_MASK |
+ PORTC_PULSE_DURATION_MASK |
+ PORTD_PULSE_DURATION_MASK);
hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
+ hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
+ hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
/*
* When CPU and PCH are on the same package, port A
* HPD must be enabled in both north and south.
@@ -3139,6 +3126,23 @@ static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
}
+static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
+{
+ u32 hotplug_irqs, enabled_irqs;
+
+ if (HAS_PCH_IBX(dev_priv)) {
+ hotplug_irqs = SDE_HOTPLUG_MASK;
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
+ } else {
+ hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
+ enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
+ }
+
+ ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
+
+ ibx_hpd_detection_setup(dev_priv);
+}
+
static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug;
@@ -3168,9 +3172,25 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
spt_hpd_detection_setup(dev_priv);
}
+static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
+{
+ u32 hotplug;
+
+ /*
+ * Enable digital hotplug on the CPU, and configure the DP short pulse
+ * duration to 2ms (which is the minimum in the Display Port spec)
+ * The pulse duration bits are reserved on HSW+.
+ */
+ hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
+ hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
+ hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
+ DIGITAL_PORTA_PULSE_DURATION_2ms;
+ I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
+}
+
static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
- u32 hotplug_irqs, hotplug, enabled_irqs;
+ u32 hotplug_irqs, enabled_irqs;
if (INTEL_GEN(dev_priv) >= 8) {
hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
@@ -3189,15 +3209,7 @@ static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
}
- /*
- * Enable digital hotplug on the CPU, and configure the DP short pulse
- * duration to 2ms (which is the minimum in the Display Port spec)
- * The pulse duration bits are reserved on HSW+.
- */
- hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
- hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
- hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
- I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
+ ilk_hpd_detection_setup(dev_priv);
ibx_hpd_irq_setup(dev_priv);
}
@@ -3268,7 +3280,7 @@ static void ibx_irq_postinstall(struct drm_device *dev)
if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
HAS_PCH_LPT(dev_priv))
- ; /* TODO: Enable HPD detection on older PCH platforms too */
+ ibx_hpd_detection_setup(dev_priv);
else
spt_hpd_detection_setup(dev_priv);
}
@@ -3345,6 +3357,8 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
gen5_gt_irq_postinstall(dev);
+ ilk_hpd_detection_setup(dev_priv);
+
ibx_irq_postinstall(dev);
if (IS_IRONLAKE_M(dev_priv)) {
@@ -3363,7 +3377,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
{
- assert_spin_locked(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq_lock);
if (dev_priv->display_irqs_enabled)
return;
@@ -3378,7 +3392,7 @@ void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
{
- assert_spin_locked(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq_lock);
if (!dev_priv->display_irqs_enabled)
return;
@@ -3485,6 +3499,8 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
if (IS_GEN9_LP(dev_priv))
bxt_hpd_detection_setup(dev_priv);
+ else if (IS_BROADWELL(dev_priv))
+ ilk_hpd_detection_setup(dev_priv);
}
static int gen8_irq_postinstall(struct drm_device *dev)
@@ -4052,7 +4068,7 @@ static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
{
u32 hotplug_en;
- assert_spin_locked(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq_lock);
/* Note HDMI and DP share hotplug bits */
/* enable bits are the same for all generations */
@@ -4228,11 +4244,11 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
/* Let's track the enabled rps events */
if (IS_VALLEYVIEW(dev_priv))
/* WaGsvRC0ResidencyMethod:vlv */
- dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
+ dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
else
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
- dev_priv->rps.pm_intr_keep = 0;
+ dev_priv->rps.pm_intrmsk_mbz = 0;
/*
* SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
@@ -4241,10 +4257,10 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
* TODO: verify if this can be reproduced on VLV,CHV.
*/
if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
- dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED;
+ dev_priv->rps.pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED;
if (INTEL_INFO(dev_priv)->gen >= 8)
- dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_GUC;
+ dev_priv->rps.pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
if (IS_GEN2(dev_priv)) {
/* Gen2 doesn't have a hardware frame counter */
@@ -4265,6 +4281,18 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (!IS_GEN2(dev_priv))
dev->vblank_disable_immediate = true;
+ /* Most platforms treat the display irq block as an always-on
+ * power domain. vlv/chv can disable it at runtime and need
+ * special care to avoid writing any of the display block registers
+ * outside of the power domain. We defer setting up the display irqs
+ * in this case to the runtime pm.
+ */
+ dev_priv->display_irqs_enabled = true;
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ dev_priv->display_irqs_enabled = false;
+
+ dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD;
+
dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 0e280fbd52f1..b6a7e363d076 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -59,6 +59,8 @@ struct i915_params i915 __read_mostly = {
.enable_guc_loading = 0,
.enable_guc_submission = 0,
.guc_log_level = -1,
+ .guc_firmware_path = NULL,
+ .huc_firmware_path = NULL,
.enable_dp_mst = true,
.inject_load_failure = 0,
.enable_dpcd_backlight = false,
@@ -145,7 +147,7 @@ MODULE_PARM_DESC(enable_psr, "Enable PSR "
"(0=disabled, 1=enabled - link mode chosen per-platform, 2=force link-standby mode, 3=force link-off mode) "
"Default: -1 (use per-chip default)");
-module_param_named_unsafe(alpha_support, i915.alpha_support, int, 0400);
+module_param_named_unsafe(alpha_support, i915.alpha_support, bool, 0400);
MODULE_PARM_DESC(alpha_support,
"Enable alpha quality driver support for latest hardware. "
"See also CONFIG_DRM_I915_ALPHA_SUPPORT.");
@@ -205,9 +207,9 @@ module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600);
MODULE_PARM_DESC(verbose_state_checks,
"Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
-module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0600);
+module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0400);
MODULE_PARM_DESC(nuclear_pageflip,
- "Force atomic modeset functionality; asynchronous mode is not yet supported. (default: false).");
+ "Force enable atomic functionality on platforms that don't have full support yet.");
/* WA to get away with the default setting in VBT for early platforms.Will be removed */
module_param_named_unsafe(edp_vswing, i915.edp_vswing, int, 0400);
@@ -230,6 +232,14 @@ module_param_named(guc_log_level, i915.guc_log_level, int, 0400);
MODULE_PARM_DESC(guc_log_level,
"GuC firmware logging level (-1:disabled (default), 0-3:enabled)");
+module_param_named_unsafe(guc_firmware_path, i915.guc_firmware_path, charp, 0400);
+MODULE_PARM_DESC(guc_firmware_path,
+ "GuC firmware path to use instead of the default one");
+
+module_param_named_unsafe(huc_firmware_path, i915.huc_firmware_path, charp, 0400);
+MODULE_PARM_DESC(huc_firmware_path,
+ "HuC firmware path to use instead of the default one");
+
module_param_named_unsafe(enable_dp_mst, i915.enable_dp_mst, bool, 0600);
MODULE_PARM_DESC(enable_dp_mst,
"Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 8e433de04679..34148cc8637c 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -27,46 +27,53 @@
#include <linux/cache.h> /* for __read_mostly */
+#define I915_PARAMS_FOR_EACH(func) \
+ func(int, modeset); \
+ func(int, panel_ignore_lid); \
+ func(int, semaphores); \
+ func(int, lvds_channel_mode); \
+ func(int, panel_use_ssc); \
+ func(int, vbt_sdvo_panel_type); \
+ func(int, enable_rc6); \
+ func(int, enable_dc); \
+ func(int, enable_fbc); \
+ func(int, enable_ppgtt); \
+ func(int, enable_execlists); \
+ func(int, enable_psr); \
+ func(int, disable_power_well); \
+ func(int, enable_ips); \
+ func(int, invert_brightness); \
+ func(int, enable_guc_loading); \
+ func(int, enable_guc_submission); \
+ func(int, guc_log_level); \
+ func(char *, guc_firmware_path); \
+ func(char *, huc_firmware_path); \
+ func(int, use_mmio_flip); \
+ func(int, mmio_debug); \
+ func(int, edp_vswing); \
+ func(unsigned int, inject_load_failure); \
+ /* leave bools at the end to not create holes */ \
+ func(bool, alpha_support); \
+ func(bool, enable_cmd_parser); \
+ func(bool, enable_hangcheck); \
+ func(bool, fastboot); \
+ func(bool, prefault_disable); \
+ func(bool, load_detect_test); \
+ func(bool, force_reset_modeset_test); \
+ func(bool, reset); \
+ func(bool, error_capture); \
+ func(bool, disable_display); \
+ func(bool, verbose_state_checks); \
+ func(bool, nuclear_pageflip); \
+ func(bool, enable_dp_mst); \
+ func(bool, enable_dpcd_backlight); \
+ func(bool, enable_gvt)
+
+#define MEMBER(T, member) T member
struct i915_params {
- int modeset;
- int panel_ignore_lid;
- int semaphores;
- int lvds_channel_mode;
- int panel_use_ssc;
- int vbt_sdvo_panel_type;
- int enable_rc6;
- int enable_dc;
- int enable_fbc;
- int enable_ppgtt;
- int enable_execlists;
- int enable_psr;
- unsigned int alpha_support;
- int disable_power_well;
- int enable_ips;
- int invert_brightness;
- int enable_guc_loading;
- int enable_guc_submission;
- int guc_log_level;
- int use_mmio_flip;
- int mmio_debug;
- int edp_vswing;
- unsigned int inject_load_failure;
- /* leave bools at the end to not create holes */
- bool enable_cmd_parser;
- bool enable_hangcheck;
- bool fastboot;
- bool prefault_disable;
- bool load_detect_test;
- bool force_reset_modeset_test;
- bool reset;
- bool error_capture;
- bool disable_display;
- bool verbose_state_checks;
- bool nuclear_pageflip;
- bool enable_dp_mst;
- bool enable_dpcd_backlight;
- bool enable_gvt;
+ I915_PARAMS_FOR_EACH(MEMBER);
};
+#undef MEMBER
extern struct i915_params i915 __read_mostly;
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index ecb487b5356f..732101ed57fb 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -27,6 +27,7 @@
#include <linux/vga_switcheroo.h>
#include "i915_drv.h"
+#include "i915_selftest.h"
#define GEN_DEFAULT_PIPEOFFSETS \
.pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
@@ -403,6 +404,7 @@ static const struct intel_device_info intel_geminilake_info = {
.platform = INTEL_GEMINILAKE,
.is_alpha_support = 1,
.ddb_size = 1024,
+ .color = { .degamma_lut_size = 0, .gamma_lut_size = 1024 }
};
static const struct intel_device_info intel_kabylake_info = {
@@ -472,10 +474,19 @@ static const struct pci_device_id pciidlist[] = {
};
MODULE_DEVICE_TABLE(pci, pciidlist);
+static void i915_pci_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+
+ i915_driver_unload(dev);
+ drm_dev_unref(dev);
+}
+
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data;
+ int err;
if (IS_ALPHA_SUPPORT(intel_info) && !i915.alpha_support) {
DRM_INFO("The driver support for your hardware in this kernel version is alpha quality\n"
@@ -499,15 +510,17 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (vga_switcheroo_client_probe_defer(pdev))
return -EPROBE_DEFER;
- return i915_driver_load(pdev, ent);
-}
+ err = i915_driver_load(pdev, ent);
+ if (err)
+ return err;
-static void i915_pci_remove(struct pci_dev *pdev)
-{
- struct drm_device *dev = pci_get_drvdata(pdev);
+ err = i915_live_selftests(pdev);
+ if (err) {
+ i915_pci_remove(pdev);
+ return err > 0 ? -ENOTTY : err;
+ }
- i915_driver_unload(dev);
- drm_dev_unref(dev);
+ return 0;
}
static struct pci_driver i915_pci_driver = {
@@ -521,6 +534,11 @@ static struct pci_driver i915_pci_driver = {
static int __init i915_init(void)
{
bool use_kms = true;
+ int err;
+
+ err = i915_mock_selftests();
+ if (err)
+ return err > 0 ? 0 : err;
/*
* Enable KMS by default, unless explicitly overriden by
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index a1b7eec58be2..8c121187ff39 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1008,7 +1008,7 @@ static void hsw_disable_metric_set(struct drm_i915_private *dev_priv)
static void gen7_update_oacontrol_locked(struct drm_i915_private *dev_priv)
{
- assert_spin_locked(&dev_priv->perf.hook_lock);
+ lockdep_assert_held(&dev_priv->perf.hook_lock);
if (dev_priv->perf.oa.exclusive_stream->enabled) {
struct i915_gem_context *ctx =
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 1c8f5b9a7fcd..04c8f69fcc62 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -48,6 +48,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
return !i915_mmio_reg_equal(reg, INVALID_MMIO_REG);
}
+#define _PICK(__index, ...) (((const u32 []){ __VA_ARGS__ })[__index])
+
#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
#define _MMIO_PIPE(pipe, a, b) _MMIO(_PIPE(pipe, a, b))
#define _PLANE(plane, a, b) _PIPE(plane, a, b)
@@ -56,14 +58,11 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define _MMIO_TRANS(tran, a, b) _MMIO(_TRANS(tran, a, b))
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
#define _MMIO_PORT(port, a, b) _MMIO(_PORT(port, a, b))
-#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \
- (pipe) == PIPE_B ? (b) : (c))
+#define _PIPE3(pipe, ...) _PICK(pipe, __VA_ARGS__)
#define _MMIO_PIPE3(pipe, a, b, c) _MMIO(_PIPE3(pipe, a, b, c))
-#define _PORT3(port, a, b, c) ((port) == PORT_A ? (a) : \
- (port) == PORT_B ? (b) : (c))
+#define _PORT3(port, ...) _PICK(port, __VA_ARGS__)
#define _MMIO_PORT3(pipe, a, b, c) _MMIO(_PORT3(pipe, a, b, c))
-#define _PHY3(phy, a, b, c) ((phy) == DPIO_PHY0 ? (a) : \
- (phy) == DPIO_PHY1 ? (b) : (c))
+#define _PHY3(phy, ...) _PICK(phy, __VA_ARGS__)
#define _MMIO_PHY3(phy, a, b, c) _MMIO(_PHY3(phy, a, b, c))
#define _MASKED_FIELD(mask, value) ({ \
@@ -78,7 +77,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define _MASKED_BIT_ENABLE(a) ({ typeof(a) _a = (a); _MASKED_FIELD(_a, _a); })
#define _MASKED_BIT_DISABLE(a) (_MASKED_FIELD((a), 0))
+/* Engine ID */
+#define RCS_HW 0
+#define VCS_HW 1
+#define BCS_HW 2
+#define VECS_HW 3
+#define VCS2_HW 4
/* PCI config space */
@@ -120,7 +125,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GCFGC 0xf0 /* 915+ only */
#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
-#define GC_DISPLAY_CLOCK_333_MHZ (4 << 4)
+#define GC_DISPLAY_CLOCK_333_320_MHZ (4 << 4)
#define GC_DISPLAY_CLOCK_267_MHZ_PNV (0 << 4)
#define GC_DISPLAY_CLOCK_333_MHZ_PNV (1 << 4)
#define GC_DISPLAY_CLOCK_444_MHZ_PNV (2 << 4)
@@ -1135,8 +1140,6 @@ enum skl_disp_power_wells {
#define VLV_BIAS_CPU_125_SOC_875 (6 << 2)
#define CHV_BIAS_CPU_50_SOC_50 (3 << 2)
-#define VLV_CZ_CLOCK_TO_MILLI_SEC 100000
-
/* vlv2 north clock has */
#define CCK_FUSE_REG 0x8
#define CCK_FUSE_HPLL_FREQ_MASK 0x3
@@ -1553,6 +1556,7 @@ enum skl_disp_power_wells {
_MMIO(_BXT_PHY_CH(phy, ch, reg_ch0, reg_ch1))
#define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090)
+#define MIPIO_RST_CTRL (1 << 2)
#define _BXT_PHY_CTL_DDI_A 0x64C00
#define _BXT_PHY_CTL_DDI_B 0x64C10
@@ -3376,10 +3380,22 @@ enum {
INTEL_LEGACY_64B_CONTEXT
};
+enum {
+ FAULT_AND_HANG = 0,
+ FAULT_AND_HALT, /* Debug only */
+ FAULT_AND_STREAM,
+ FAULT_AND_CONTINUE /* Unsupported */
+};
+
+#define GEN8_CTX_VALID (1<<0)
+#define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
+#define GEN8_CTX_FORCE_RESTORE (1<<2)
+#define GEN8_CTX_L3LLC_COHERENT (1<<5)
+#define GEN8_CTX_PRIVILEGE (1<<8)
#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
-#define GEN8_CTX_ADDRESSING_MODE(dev_priv) (USES_FULL_48BIT_PPGTT(dev_priv) ?\
- INTEL_LEGACY_64B_CONTEXT : \
- INTEL_LEGACY_32B_CONTEXT)
+
+#define GEN8_CTX_ID_SHIFT 32
+#define GEN8_CTX_ID_WIDTH 21
#define CHV_CLK_CTL1 _MMIO(0x101100)
#define VLV_CLK_CTL2 _MMIO(0x101104)
@@ -5887,11 +5903,18 @@ enum {
#define _PLANE_KEYMSK_2_A 0x70298
#define _PLANE_KEYMAX_1_A 0x701a0
#define _PLANE_KEYMAX_2_A 0x702a0
+#define _PLANE_COLOR_CTL_1_A 0x701CC /* GLK+ */
+#define _PLANE_COLOR_CTL_2_A 0x702CC /* GLK+ */
+#define _PLANE_COLOR_CTL_3_A 0x703CC /* GLK+ */
+#define PLANE_COLOR_PIPE_GAMMA_ENABLE (1 << 30)
+#define PLANE_COLOR_PIPE_CSC_ENABLE (1 << 23)
+#define PLANE_COLOR_PLANE_GAMMA_DISABLE (1 << 13)
#define _PLANE_BUF_CFG_1_A 0x7027c
#define _PLANE_BUF_CFG_2_A 0x7037c
#define _PLANE_NV12_BUF_CFG_1_A 0x70278
#define _PLANE_NV12_BUF_CFG_2_A 0x70378
+
#define _PLANE_CTL_1_B 0x71180
#define _PLANE_CTL_2_B 0x71280
#define _PLANE_CTL_3_B 0x71380
@@ -5986,7 +6009,17 @@ enum {
#define PLANE_NV12_BUF_CFG(pipe, plane) \
_MMIO_PLANE(plane, _PLANE_NV12_BUF_CFG_1(pipe), _PLANE_NV12_BUF_CFG_2(pipe))
-/* SKL new cursor registers */
+#define _PLANE_COLOR_CTL_1_B 0x711CC
+#define _PLANE_COLOR_CTL_2_B 0x712CC
+#define _PLANE_COLOR_CTL_3_B 0x713CC
+#define _PLANE_COLOR_CTL_1(pipe) \
+ _PIPE(pipe, _PLANE_COLOR_CTL_1_A, _PLANE_COLOR_CTL_1_B)
+#define _PLANE_COLOR_CTL_2(pipe) \
+ _PIPE(pipe, _PLANE_COLOR_CTL_2_A, _PLANE_COLOR_CTL_2_B)
+#define PLANE_COLOR_CTL(pipe, plane) \
+ _MMIO_PLANE(plane, _PLANE_COLOR_CTL_1(pipe), _PLANE_COLOR_CTL_2(pipe))
+
+#/* SKL new cursor registers */
#define _CUR_BUF_CFG_A 0x7017c
#define _CUR_BUF_CFG_B 0x7117c
#define CUR_BUF_CFG(pipe) _MMIO_PIPE(pipe, _CUR_BUF_CFG_A, _CUR_BUF_CFG_B)
@@ -6466,6 +6499,11 @@ enum {
#define CHICKEN_PAR2_1 _MMIO(0x42090)
#define KVM_CONFIG_CHANGE_NOTIFICATION_SELECT (1 << 14)
+#define CHICKEN_MISC_2 _MMIO(0x42084)
+#define GLK_CL0_PWR_DOWN (1 << 10)
+#define GLK_CL1_PWR_DOWN (1 << 11)
+#define GLK_CL2_PWR_DOWN (1 << 12)
+
#define _CHICKEN_PIPESL_1_A 0x420b0
#define _CHICKEN_PIPESL_1_B 0x420b4
#define HSW_FBCQ_DIS (1 << 22)
@@ -7413,7 +7451,8 @@ enum {
#define VLV_RCEDATA _MMIO(0xA0BC)
#define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0)
#define GEN6_PMINTRMSK _MMIO(0xA168)
-#define GEN8_PMINTR_REDIRECT_TO_GUC (1<<31)
+#define GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC (1<<31)
+#define ARAT_EXPIRED_INTRMSK (1<<9)
#define GEN8_MISC_CTRL0 _MMIO(0xA180)
#define VLV_PWRDWNUPCTL _MMIO(0xA294)
#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4)
@@ -8167,6 +8206,7 @@ enum {
#define PAL_PREC_10_12_BIT (0 << 31)
#define PAL_PREC_SPLIT_MODE (1 << 31)
#define PAL_PREC_AUTO_INCREMENT (1 << 15)
+#define PAL_PREC_INDEX_VALUE_MASK (0x3ff << 0)
#define _PAL_PREC_DATA_A 0x4A404
#define _PAL_PREC_DATA_B 0x4AC04
#define _PAL_PREC_DATA_C 0x4B404
@@ -8176,12 +8216,26 @@ enum {
#define _PAL_PREC_EXT_GC_MAX_A 0x4A420
#define _PAL_PREC_EXT_GC_MAX_B 0x4AC20
#define _PAL_PREC_EXT_GC_MAX_C 0x4B420
+#define _PAL_PREC_EXT2_GC_MAX_A 0x4A430
+#define _PAL_PREC_EXT2_GC_MAX_B 0x4AC30
+#define _PAL_PREC_EXT2_GC_MAX_C 0x4B430
#define PREC_PAL_INDEX(pipe) _MMIO_PIPE(pipe, _PAL_PREC_INDEX_A, _PAL_PREC_INDEX_B)
#define PREC_PAL_DATA(pipe) _MMIO_PIPE(pipe, _PAL_PREC_DATA_A, _PAL_PREC_DATA_B)
#define PREC_PAL_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_GC_MAX_A, _PAL_PREC_GC_MAX_B) + (i) * 4)
#define PREC_PAL_EXT_GC_MAX(pipe, i) _MMIO(_PIPE(pipe, _PAL_PREC_EXT_GC_MAX_A, _PAL_PREC_EXT_GC_MAX_B) + (i) * 4)
+#define _PRE_CSC_GAMC_INDEX_A 0x4A484
+#define _PRE_CSC_GAMC_INDEX_B 0x4AC84
+#define _PRE_CSC_GAMC_INDEX_C 0x4B484
+#define PRE_CSC_GAMC_AUTO_INCREMENT (1 << 10)
+#define _PRE_CSC_GAMC_DATA_A 0x4A488
+#define _PRE_CSC_GAMC_DATA_B 0x4AC88
+#define _PRE_CSC_GAMC_DATA_C 0x4B488
+
+#define PRE_CSC_GAMC_INDEX(pipe) _MMIO_PIPE(pipe, _PRE_CSC_GAMC_INDEX_A, _PRE_CSC_GAMC_INDEX_B)
+#define PRE_CSC_GAMC_DATA(pipe) _MMIO_PIPE(pipe, _PRE_CSC_GAMC_DATA_A, _PRE_CSC_GAMC_DATA_B)
+
/* pipe CSC & degamma/gamma LUTs on CHV */
#define _CGM_PIPE_A_CSC_COEFF01 (VLV_DISPLAY_BASE + 0x67900)
#define _CGM_PIPE_A_CSC_COEFF23 (VLV_DISPLAY_BASE + 0x67904)
@@ -8215,9 +8269,14 @@ enum {
/* MIPI DSI registers */
-#define _MIPI_PORT(port, a, c) _PORT3(port, a, 0, c) /* ports A and C only */
+#define _MIPI_PORT(port, a, c) ((port) ? c : a) /* ports A and C only */
#define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c))
+#define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004)
+#define GLK_TX_ESC_CLK_DIV1_MASK 0x3FF
+#define MIPIO_TXESC_CLK_DIV2 _MMIO(0x160008)
+#define GLK_TX_ESC_CLK_DIV2_MASK 0x3FF
+
/* BXT MIPI clock controls */
#define BXT_MAX_VAR_OUTPUT_KHZ 39500
@@ -8304,10 +8363,12 @@ enum {
#define BXT_DSI_PLL_PVD_RATIO_SHIFT 16
#define BXT_DSI_PLL_PVD_RATIO_MASK (3 << BXT_DSI_PLL_PVD_RATIO_SHIFT)
#define BXT_DSI_PLL_PVD_RATIO_1 (1 << BXT_DSI_PLL_PVD_RATIO_SHIFT)
+#define BXT_DSIC_16X_BY1 (0 << 10)
#define BXT_DSIC_16X_BY2 (1 << 10)
#define BXT_DSIC_16X_BY3 (2 << 10)
#define BXT_DSIC_16X_BY4 (3 << 10)
#define BXT_DSIC_16X_MASK (3 << 10)
+#define BXT_DSIA_16X_BY1 (0 << 8)
#define BXT_DSIA_16X_BY2 (1 << 8)
#define BXT_DSIA_16X_BY3 (2 << 8)
#define BXT_DSIA_16X_BY4 (3 << 8)
@@ -8317,6 +8378,8 @@ enum {
#define BXT_DSI_PLL_RATIO_MAX 0x7D
#define BXT_DSI_PLL_RATIO_MIN 0x22
+#define GLK_DSI_PLL_RATIO_MAX 0x6F
+#define GLK_DSI_PLL_RATIO_MIN 0x22
#define BXT_DSI_PLL_RATIO_MASK 0xFF
#define BXT_REF_CLOCK_KHZ 19200
@@ -8333,6 +8396,12 @@ enum {
#define _BXT_MIPIC_PORT_CTRL 0x6B8C0
#define BXT_MIPI_PORT_CTRL(tc) _MMIO_MIPI(tc, _BXT_MIPIA_PORT_CTRL, _BXT_MIPIC_PORT_CTRL)
+#define BXT_P_DSI_REGULATOR_CFG _MMIO(0x160020)
+#define STAP_SELECT (1 << 0)
+
+#define BXT_P_DSI_REGULATOR_TX_CTRL _MMIO(0x160054)
+#define HS_IO_CTRL_SELECT (1 << 0)
+
#define DPI_ENABLE (1 << 31) /* A + C */
#define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27
#define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27)
@@ -8586,6 +8655,14 @@ enum {
#define LP_BYTECLK_SHIFT 0
#define LP_BYTECLK_MASK (0xffff << 0)
+#define _MIPIA_TLPX_TIME_COUNT (dev_priv->mipi_mmio_base + 0xb0a4)
+#define _MIPIC_TLPX_TIME_COUNT (dev_priv->mipi_mmio_base + 0xb8a4)
+#define MIPI_TLPX_TIME_COUNT(port) _MMIO_MIPI(port, _MIPIA_TLPX_TIME_COUNT, _MIPIC_TLPX_TIME_COUNT)
+
+#define _MIPIA_CLK_LANE_TIMING (dev_priv->mipi_mmio_base + 0xb098)
+#define _MIPIC_CLK_LANE_TIMING (dev_priv->mipi_mmio_base + 0xb898)
+#define MIPI_CLK_LANE_TIMING(port) _MMIO_MIPI(port, _MIPIA_CLK_LANE_TIMING, _MIPIC_CLK_LANE_TIMING)
+
/* bits 31:0 */
#define _MIPIA_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb064)
#define _MIPIC_LP_GEN_DATA (dev_priv->mipi_mmio_base + 0xb864)
diff --git a/drivers/gpu/drm/i915/i915_selftest.h b/drivers/gpu/drm/i915/i915_selftest.h
new file mode 100644
index 000000000000..9d7d86f1733d
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_selftest.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __I915_SELFTEST_H__
+#define __I915_SELFTEST_H__
+
+struct pci_dev;
+struct drm_i915_private;
+
+struct i915_selftest {
+ unsigned long timeout_jiffies;
+ unsigned int timeout_ms;
+ unsigned int random_seed;
+ int mock;
+ int live;
+};
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include <linux/fault-inject.h>
+
+extern struct i915_selftest i915_selftest;
+
+int i915_mock_selftests(void);
+int i915_live_selftests(struct pci_dev *pdev);
+
+/* We extract the function declarations from i915_mock_selftests.h and
+ * i915_live_selftests.h Add your unit test declarations there!
+ *
+ * Mock unit tests are run very early upon module load, before the driver
+ * is probed. All hardware interactions, as well as other subsystems, must
+ * be "mocked".
+ *
+ * Live unit tests are run after the driver is loaded - all hardware
+ * interactions are real.
+ */
+#define selftest(name, func) int func(void);
+#include "selftests/i915_mock_selftests.h"
+#undef selftest
+#define selftest(name, func) int func(struct drm_i915_private *i915);
+#include "selftests/i915_live_selftests.h"
+#undef selftest
+
+struct i915_subtest {
+ int (*func)(void *data);
+ const char *name;
+};
+
+int __i915_subtests(const char *caller,
+ const struct i915_subtest *st,
+ unsigned int count,
+ void *data);
+#define i915_subtests(T, data) \
+ __i915_subtests(__func__, T, ARRAY_SIZE(T), data)
+
+#define SUBTEST(x) { x, #x }
+
+#define I915_SELFTEST_DECLARE(x) x
+#define I915_SELFTEST_ONLY(x) unlikely(x)
+
+#else /* !IS_ENABLED(CONFIG_DRM_I915_SELFTEST) */
+
+static inline int i915_mock_selftests(void) { return 0; }
+static inline int i915_live_selftests(struct pci_dev *pdev) { return 0; }
+
+#define I915_SELFTEST_DECLARE(x)
+#define I915_SELFTEST_ONLY(x) 0
+
+#endif
+
+/* Using the i915_selftest_ prefix becomes a little unwieldy with the helpers.
+ * Instead we use the igt_ shorthand, in reference to the intel-gpu-tools
+ * suite of uabi test cases (which includes a test runner for our selftests).
+ */
+
+#define IGT_TIMEOUT(name__) \
+ unsigned long name__ = jiffies + i915_selftest.timeout_jiffies
+
+__printf(2, 3)
+bool __igt_timeout(unsigned long timeout, const char *fmt, ...);
+
+#define igt_timeout(t, fmt, ...) \
+ __igt_timeout((t), KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
+
+#define igt_can_mi_store_dword_imm(D) (INTEL_GEN(D) > 2)
+
+#endif /* !__I915_SELFTEST_H__ */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 376ac957cd1c..f3fdfda5e558 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -42,32 +42,8 @@ static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
static u32 calc_residency(struct drm_i915_private *dev_priv,
i915_reg_t reg)
{
- u64 raw_time; /* 32b value may overflow during fixed point math */
- u64 units = 128ULL, div = 100000ULL;
- u32 ret;
-
- if (!intel_enable_rc6())
- return 0;
-
- intel_runtime_pm_get(dev_priv);
-
- /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- units = 1;
- div = dev_priv->czclk_freq;
-
- if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
- units <<= 8;
- } else if (IS_GEN9_LP(dev_priv)) {
- units = 1;
- div = 1200; /* 833.33ns */
- }
-
- raw_time = I915_READ(reg) * units;
- ret = DIV_ROUND_UP_ULL(raw_time, div);
-
- intel_runtime_pm_put(dev_priv);
- return ret;
+ return DIV_ROUND_CLOSEST_ULL(intel_rc6_residency_us(dev_priv, reg),
+ 1000);
}
static ssize_t
@@ -395,13 +371,13 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
/* We still need *_set_rps to process the new max_delay and
* update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged. */
- intel_set_rps(dev_priv, val);
+ ret = intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->rps.hw_lock);
intel_runtime_pm_put(dev_priv);
- return count;
+ return ret ?: count;
}
static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
@@ -448,14 +424,13 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
/* We still need *_set_rps to process the new min_delay and
* update the interrupt limits and PMINTRMSK even though
* frequency request may be unchanged. */
- intel_set_rps(dev_priv, val);
+ ret = intel_set_rps(dev_priv, val);
mutex_unlock(&dev_priv->rps.hw_lock);
intel_runtime_pm_put(dev_priv);
- return count;
-
+ return ret ?: count;
}
static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
@@ -523,33 +498,27 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
struct device *kdev = kobj_to_dev(kobj);
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- struct drm_device *dev = &dev_priv->drm;
- struct i915_error_state_file_priv error_priv;
struct drm_i915_error_state_buf error_str;
- ssize_t ret_count = 0;
- int ret;
-
- memset(&error_priv, 0, sizeof(error_priv));
+ struct i915_gpu_state *gpu;
+ ssize_t ret;
- ret = i915_error_state_buf_init(&error_str, to_i915(dev), count, off);
+ ret = i915_error_state_buf_init(&error_str, dev_priv, count, off);
if (ret)
return ret;
- error_priv.i915 = dev_priv;
- i915_error_state_get(dev, &error_priv);
-
- ret = i915_error_state_to_str(&error_str, &error_priv);
+ gpu = i915_first_error_state(dev_priv);
+ ret = i915_error_state_to_str(&error_str, gpu);
if (ret)
goto out;
- ret_count = count < error_str.bytes ? count : error_str.bytes;
+ ret = count < error_str.bytes ? count : error_str.bytes;
+ memcpy(buf, error_str.buf, ret);
- memcpy(buf, error_str.buf, ret_count);
out:
- i915_error_state_put(&error_priv);
+ i915_gpu_state_put(gpu);
i915_error_state_buf_release(&error_str);
- return ret ?: ret_count;
+ return ret;
}
static ssize_t error_state_write(struct file *file, struct kobject *kobj,
@@ -560,7 +529,7 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj,
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
DRM_DEBUG_DRIVER("Resetting error state\n");
- i915_destroy_error_state(dev_priv);
+ i915_reset_error_state(dev_priv);
return count;
}
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 4461df5a94fe..66404c5aee82 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -14,6 +14,206 @@
#define TRACE_SYSTEM i915
#define TRACE_INCLUDE_FILE i915_trace
+/* watermark/fifo updates */
+
+TRACE_EVENT(intel_cpu_fifo_underrun,
+ TP_PROTO(struct drm_i915_private *dev_priv, enum pipe pipe),
+ TP_ARGS(dev_priv, pipe),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = pipe;
+ __entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
+ __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u",
+ pipe_name(__entry->pipe),
+ __entry->frame, __entry->scanline)
+);
+
+TRACE_EVENT(intel_pch_fifo_underrun,
+ TP_PROTO(struct drm_i915_private *dev_priv, enum transcoder pch_transcoder),
+ TP_ARGS(dev_priv, pch_transcoder),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ ),
+
+ TP_fast_assign(
+ enum pipe pipe = (enum pipe)pch_transcoder;
+ __entry->pipe = pipe;
+ __entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
+ __entry->scanline = intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
+ ),
+
+ TP_printk("pch transcoder %c, frame=%u, scanline=%u",
+ pipe_name(__entry->pipe),
+ __entry->frame, __entry->scanline)
+);
+
+TRACE_EVENT(intel_memory_cxsr,
+ TP_PROTO(struct drm_i915_private *dev_priv, bool old, bool new),
+ TP_ARGS(dev_priv, old, new),
+
+ TP_STRUCT__entry(
+ __array(u32, frame, 3)
+ __array(u32, scanline, 3)
+ __field(bool, old)
+ __field(bool, new)
+ ),
+
+ TP_fast_assign(
+ enum pipe pipe;
+ for_each_pipe(dev_priv, pipe) {
+ __entry->frame[pipe] =
+ dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm, pipe);
+ __entry->scanline[pipe] =
+ intel_get_crtc_scanline(intel_get_crtc_for_pipe(dev_priv, pipe));
+ }
+ __entry->old = old;
+ __entry->new = new;
+ ),
+
+ TP_printk("%s->%s, pipe A: frame=%u, scanline=%u, pipe B: frame=%u, scanline=%u, pipe C: frame=%u, scanline=%u",
+ onoff(__entry->old), onoff(__entry->new),
+ __entry->frame[PIPE_A], __entry->scanline[PIPE_A],
+ __entry->frame[PIPE_B], __entry->scanline[PIPE_B],
+ __entry->frame[PIPE_C], __entry->scanline[PIPE_C])
+);
+
+TRACE_EVENT(vlv_wm,
+ TP_PROTO(struct intel_crtc *crtc, const struct vlv_wm_values *wm),
+ TP_ARGS(crtc, wm),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __field(u32, level)
+ __field(u32, cxsr)
+ __field(u32, primary)
+ __field(u32, sprite0)
+ __field(u32, sprite1)
+ __field(u32, cursor)
+ __field(u32, sr_plane)
+ __field(u32, sr_cursor)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
+ crtc->pipe);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ __entry->level = wm->level;
+ __entry->cxsr = wm->cxsr;
+ __entry->primary = wm->pipe[crtc->pipe].plane[PLANE_PRIMARY];
+ __entry->sprite0 = wm->pipe[crtc->pipe].plane[PLANE_SPRITE0];
+ __entry->sprite1 = wm->pipe[crtc->pipe].plane[PLANE_SPRITE1];
+ __entry->cursor = wm->pipe[crtc->pipe].plane[PLANE_CURSOR];
+ __entry->sr_plane = wm->sr.plane;
+ __entry->sr_cursor = wm->sr.cursor;
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u, level=%d, cxsr=%d, wm %d/%d/%d/%d, sr %d/%d",
+ pipe_name(__entry->pipe), __entry->frame,
+ __entry->scanline, __entry->level, __entry->cxsr,
+ __entry->primary, __entry->sprite0, __entry->sprite1, __entry->cursor,
+ __entry->sr_plane, __entry->sr_cursor)
+);
+
+TRACE_EVENT(vlv_fifo_size,
+ TP_PROTO(struct intel_crtc *crtc, u32 sprite0_start, u32 sprite1_start, u32 fifo_size),
+ TP_ARGS(crtc, sprite0_start, sprite1_start, fifo_size),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __field(u32, sprite0_start)
+ __field(u32, sprite1_start)
+ __field(u32, fifo_size)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
+ crtc->pipe);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ __entry->sprite0_start = sprite0_start;
+ __entry->sprite1_start = sprite1_start;
+ __entry->fifo_size = fifo_size;
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u, %d/%d/%d",
+ pipe_name(__entry->pipe), __entry->frame,
+ __entry->scanline, __entry->sprite0_start,
+ __entry->sprite1_start, __entry->fifo_size)
+);
+
+/* plane updates */
+
+TRACE_EVENT(intel_update_plane,
+ TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc),
+ TP_ARGS(plane, crtc),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(const char *, name)
+ __field(u32, frame)
+ __field(u32, scanline)
+ __array(int, src, 4)
+ __array(int, dst, 4)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->name = plane->name;
+ __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
+ crtc->pipe);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ memcpy(__entry->src, &plane->state->src, sizeof(__entry->src));
+ memcpy(__entry->dst, &plane->state->dst, sizeof(__entry->dst));
+ ),
+
+ TP_printk("pipe %c, plane %s, frame=%u, scanline=%u, " DRM_RECT_FP_FMT " -> " DRM_RECT_FMT,
+ pipe_name(__entry->pipe), __entry->name,
+ __entry->frame, __entry->scanline,
+ DRM_RECT_FP_ARG((const struct drm_rect *)__entry->src),
+ DRM_RECT_ARG((const struct drm_rect *)__entry->dst))
+);
+
+TRACE_EVENT(intel_disable_plane,
+ TP_PROTO(struct drm_plane *plane, struct intel_crtc *crtc),
+ TP_ARGS(plane, crtc),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(const char *, name)
+ __field(u32, frame)
+ __field(u32, scanline)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->name = plane->name;
+ __entry->frame = crtc->base.dev->driver->get_vblank_counter(crtc->base.dev,
+ crtc->pipe);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ ),
+
+ TP_printk("pipe %c, plane %s, frame=%u, scanline=%u",
+ pipe_name(__entry->pipe), __entry->name,
+ __entry->frame, __entry->scanline)
+);
+
/* pipe updates */
TRACE_EVENT(i915_pipe_update_start,
@@ -175,134 +375,6 @@ TRACE_EVENT(i915_vma_unbind,
__entry->obj, __entry->offset, __entry->size, __entry->vm)
);
-TRACE_EVENT(i915_va_alloc,
- TP_PROTO(struct i915_vma *vma),
- TP_ARGS(vma),
-
- TP_STRUCT__entry(
- __field(struct i915_address_space *, vm)
- __field(u64, start)
- __field(u64, end)
- ),
-
- TP_fast_assign(
- __entry->vm = vma->vm;
- __entry->start = vma->node.start;
- __entry->end = vma->node.start + vma->node.size - 1;
- ),
-
- TP_printk("vm=%p (%c), 0x%llx-0x%llx",
- __entry->vm, i915_is_ggtt(__entry->vm) ? 'G' : 'P', __entry->start, __entry->end)
-);
-
-DECLARE_EVENT_CLASS(i915_px_entry,
- TP_PROTO(struct i915_address_space *vm, u32 px, u64 start, u64 px_shift),
- TP_ARGS(vm, px, start, px_shift),
-
- TP_STRUCT__entry(
- __field(struct i915_address_space *, vm)
- __field(u32, px)
- __field(u64, start)
- __field(u64, end)
- ),
-
- TP_fast_assign(
- __entry->vm = vm;
- __entry->px = px;
- __entry->start = start;
- __entry->end = ((start + (1ULL << px_shift)) & ~((1ULL << px_shift)-1)) - 1;
- ),
-
- TP_printk("vm=%p, pde=%d (0x%llx-0x%llx)",
- __entry->vm, __entry->px, __entry->start, __entry->end)
-);
-
-DEFINE_EVENT(i915_px_entry, i915_page_table_entry_alloc,
- TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift),
- TP_ARGS(vm, pde, start, pde_shift)
-);
-
-DEFINE_EVENT_PRINT(i915_px_entry, i915_page_directory_entry_alloc,
- TP_PROTO(struct i915_address_space *vm, u32 pdpe, u64 start, u64 pdpe_shift),
- TP_ARGS(vm, pdpe, start, pdpe_shift),
-
- TP_printk("vm=%p, pdpe=%d (0x%llx-0x%llx)",
- __entry->vm, __entry->px, __entry->start, __entry->end)
-);
-
-DEFINE_EVENT_PRINT(i915_px_entry, i915_page_directory_pointer_entry_alloc,
- TP_PROTO(struct i915_address_space *vm, u32 pml4e, u64 start, u64 pml4e_shift),
- TP_ARGS(vm, pml4e, start, pml4e_shift),
-
- TP_printk("vm=%p, pml4e=%d (0x%llx-0x%llx)",
- __entry->vm, __entry->px, __entry->start, __entry->end)
-);
-
-/* Avoid extra math because we only support two sizes. The format is defined by
- * bitmap_scnprintf. Each 32 bits is 8 HEX digits followed by comma */
-#define TRACE_PT_SIZE(bits) \
- ((((bits) == 1024) ? 288 : 144) + 1)
-
-DECLARE_EVENT_CLASS(i915_page_table_entry_update,
- TP_PROTO(struct i915_address_space *vm, u32 pde,
- struct i915_page_table *pt, u32 first, u32 count, u32 bits),
- TP_ARGS(vm, pde, pt, first, count, bits),
-
- TP_STRUCT__entry(
- __field(struct i915_address_space *, vm)
- __field(u32, pde)
- __field(u32, first)
- __field(u32, last)
- __dynamic_array(char, cur_ptes, TRACE_PT_SIZE(bits))
- ),
-
- TP_fast_assign(
- __entry->vm = vm;
- __entry->pde = pde;
- __entry->first = first;
- __entry->last = first + count - 1;
- scnprintf(__get_str(cur_ptes),
- TRACE_PT_SIZE(bits),
- "%*pb",
- bits,
- pt->used_ptes);
- ),
-
- TP_printk("vm=%p, pde=%d, updating %u:%u\t%s",
- __entry->vm, __entry->pde, __entry->last, __entry->first,
- __get_str(cur_ptes))
-);
-
-DEFINE_EVENT(i915_page_table_entry_update, i915_page_table_entry_map,
- TP_PROTO(struct i915_address_space *vm, u32 pde,
- struct i915_page_table *pt, u32 first, u32 count, u32 bits),
- TP_ARGS(vm, pde, pt, first, count, bits)
-);
-
-TRACE_EVENT(i915_gem_object_change_domain,
- TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write),
- TP_ARGS(obj, old_read, old_write),
-
- TP_STRUCT__entry(
- __field(struct drm_i915_gem_object *, obj)
- __field(u32, read_domains)
- __field(u32, write_domain)
- ),
-
- TP_fast_assign(
- __entry->obj = obj;
- __entry->read_domains = obj->base.read_domains | (old_read << 16);
- __entry->write_domain = obj->base.write_domain | (old_write << 16);
- ),
-
- TP_printk("obj=%p, read=%02x=>%02x, write=%02x=>%02x",
- __entry->obj,
- __entry->read_domains >> 16,
- __entry->read_domains & 0xffff,
- __entry->write_domain >> 16,
- __entry->write_domain & 0xffff)
-);
-
TRACE_EVENT(i915_gem_object_pwrite,
TP_PROTO(struct drm_i915_gem_object *obj, u32 offset, u32 len),
TP_ARGS(obj, offset, len),
@@ -503,13 +575,14 @@ TRACE_EVENT(i915_gem_ring_sync_to,
__entry->seqno)
);
-TRACE_EVENT(i915_gem_ring_dispatch,
+TRACE_EVENT(i915_gem_request_queue,
TP_PROTO(struct drm_i915_gem_request *req, u32 flags),
TP_ARGS(req, flags),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
+ __field(u32, ctx)
__field(u32, seqno)
__field(u32, flags)
),
@@ -517,13 +590,14 @@ TRACE_EVENT(i915_gem_ring_dispatch,
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id;
- __entry->seqno = req->global_seqno;
+ __entry->ctx = req->fence.context;
+ __entry->seqno = req->fence.seqno;
__entry->flags = flags;
- dma_fence_enable_sw_signaling(&req->fence);
),
- TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
- __entry->dev, __entry->ring, __entry->seqno, __entry->flags)
+ TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, flags=0x%x",
+ __entry->dev, __entry->ring, __entry->ctx, __entry->seqno,
+ __entry->flags)
);
TRACE_EVENT(i915_gem_ring_flush,
@@ -555,18 +629,23 @@ DECLARE_EVENT_CLASS(i915_gem_request,
TP_STRUCT__entry(
__field(u32, dev)
+ __field(u32, ctx)
__field(u32, ring)
__field(u32, seqno)
+ __field(u32, global)
),
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id;
- __entry->seqno = req->global_seqno;
+ __entry->ctx = req->fence.context;
+ __entry->seqno = req->fence.seqno;
+ __entry->global = req->global_seqno;
),
- TP_printk("dev=%u, ring=%u, seqno=%u",
- __entry->dev, __entry->ring, __entry->seqno)
+ TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, global=%u",
+ __entry->dev, __entry->ring, __entry->ctx, __entry->seqno,
+ __entry->global)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
@@ -574,24 +653,100 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
TP_ARGS(req)
);
-TRACE_EVENT(i915_gem_request_notify,
- TP_PROTO(struct intel_engine_cs *engine),
- TP_ARGS(engine),
+#if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS)
+DEFINE_EVENT(i915_gem_request, i915_gem_request_submit,
+ TP_PROTO(struct drm_i915_gem_request *req),
+ TP_ARGS(req)
+);
+
+DEFINE_EVENT(i915_gem_request, i915_gem_request_execute,
+ TP_PROTO(struct drm_i915_gem_request *req),
+ TP_ARGS(req)
+);
+
+DECLARE_EVENT_CLASS(i915_gem_request_hw,
+ TP_PROTO(struct drm_i915_gem_request *req,
+ unsigned int port),
+ TP_ARGS(req, port),
+
+ TP_STRUCT__entry(
+ __field(u32, dev)
+ __field(u32, ring)
+ __field(u32, seqno)
+ __field(u32, global_seqno)
+ __field(u32, ctx)
+ __field(u32, port)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = req->i915->drm.primary->index;
+ __entry->ring = req->engine->id;
+ __entry->ctx = req->fence.context;
+ __entry->seqno = req->fence.seqno;
+ __entry->global_seqno = req->global_seqno;
+ __entry->port = port;
+ ),
+
+ TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, global=%u, port=%u",
+ __entry->dev, __entry->ring, __entry->ctx,
+ __entry->seqno, __entry->global_seqno,
+ __entry->port)
+);
+
+DEFINE_EVENT(i915_gem_request_hw, i915_gem_request_in,
+ TP_PROTO(struct drm_i915_gem_request *req, unsigned int port),
+ TP_ARGS(req, port)
+);
+
+DEFINE_EVENT(i915_gem_request, i915_gem_request_out,
+ TP_PROTO(struct drm_i915_gem_request *req),
+ TP_ARGS(req)
+);
+#else
+#if !defined(TRACE_HEADER_MULTI_READ)
+static inline void
+trace_i915_gem_request_submit(struct drm_i915_gem_request *req)
+{
+}
+
+static inline void
+trace_i915_gem_request_execute(struct drm_i915_gem_request *req)
+{
+}
+
+static inline void
+trace_i915_gem_request_in(struct drm_i915_gem_request *req, unsigned int port)
+{
+}
+
+static inline void
+trace_i915_gem_request_out(struct drm_i915_gem_request *req)
+{
+}
+#endif
+#endif
+
+TRACE_EVENT(intel_engine_notify,
+ TP_PROTO(struct intel_engine_cs *engine, bool waiters),
+ TP_ARGS(engine, waiters),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
__field(u32, seqno)
+ __field(bool, waiters)
),
TP_fast_assign(
__entry->dev = engine->i915->drm.primary->index;
__entry->ring = engine->id;
__entry->seqno = intel_engine_get_seqno(engine);
+ __entry->waiters = waiters;
),
- TP_printk("dev=%u, ring=%u, seqno=%u",
- __entry->dev, __entry->ring, __entry->seqno)
+ TP_printk("dev=%u, ring=%u, seqno=%u, waiters=%u",
+ __entry->dev, __entry->ring, __entry->seqno,
+ __entry->waiters)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
@@ -599,20 +754,17 @@ DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
TP_ARGS(req)
);
-DEFINE_EVENT(i915_gem_request, i915_gem_request_complete,
- TP_PROTO(struct drm_i915_gem_request *req),
- TP_ARGS(req)
-);
-
TRACE_EVENT(i915_gem_request_wait_begin,
- TP_PROTO(struct drm_i915_gem_request *req),
- TP_ARGS(req),
+ TP_PROTO(struct drm_i915_gem_request *req, unsigned int flags),
+ TP_ARGS(req, flags),
TP_STRUCT__entry(
__field(u32, dev)
__field(u32, ring)
+ __field(u32, ctx)
__field(u32, seqno)
- __field(bool, blocking)
+ __field(u32, global)
+ __field(unsigned int, flags)
),
/* NB: the blocking information is racy since mutex_is_locked
@@ -624,14 +776,16 @@ TRACE_EVENT(i915_gem_request_wait_begin,
TP_fast_assign(
__entry->dev = req->i915->drm.primary->index;
__entry->ring = req->engine->id;
- __entry->seqno = req->global_seqno;
- __entry->blocking =
- mutex_is_locked(&req->i915->drm.struct_mutex);
+ __entry->ctx = req->fence.context;
+ __entry->seqno = req->fence.seqno;
+ __entry->global = req->global_seqno;
+ __entry->flags = flags;
),
- TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
- __entry->dev, __entry->ring,
- __entry->seqno, __entry->blocking ? "yes (NB)" : "no")
+ TP_printk("dev=%u, ring=%u, ctx=%u, seqno=%u, global=%u, blocking=%u, flags=0x%x",
+ __entry->dev, __entry->ring, __entry->ctx, __entry->seqno,
+ __entry->global, !!(__entry->flags & I915_WAIT_LOCKED),
+ __entry->flags)
);
DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
@@ -769,17 +923,19 @@ DECLARE_EVENT_CLASS(i915_context,
TP_STRUCT__entry(
__field(u32, dev)
__field(struct i915_gem_context *, ctx)
+ __field(u32, hw_id)
__field(struct i915_address_space *, vm)
),
TP_fast_assign(
+ __entry->dev = ctx->i915->drm.primary->index;
__entry->ctx = ctx;
+ __entry->hw_id = ctx->hw_id;
__entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
- __entry->dev = ctx->i915->drm.primary->index;
),
- TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
- __entry->dev, __entry->ctx, __entry->vm)
+ TP_printk("dev=%u, ctx=%p, ctx_vm=%p, hw_id=%u",
+ __entry->dev, __entry->ctx, __entry->vm, __entry->hw_id)
)
DEFINE_EVENT(i915_context, i915_context_create,
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 34020873e1f6..94a3a3299910 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -25,6 +25,17 @@
#ifndef __I915_UTILS_H
#define __I915_UTILS_H
+#if GCC_VERSION >= 70000
+#define add_overflows(A, B) \
+ __builtin_add_overflow_p((A), (B), (typeof((A) + (B)))0)
+#else
+#define add_overflows(A, B) ({ \
+ typeof(A) a = (A); \
+ typeof(B) b = (B); \
+ a + b < a; \
+})
+#endif
+
#define range_overflows(start, size, max) ({ \
typeof(start) start__ = (start); \
typeof(size) size__ = (size); \
@@ -55,6 +66,8 @@
#define ptr_pack_bits(ptr, bits) \
((typeof(ptr))((unsigned long)(ptr) | (bits)))
+#define ptr_offset(ptr, member) offsetof(typeof(*(ptr)), member)
+
#define fetch_and_zero(ptr) ({ \
typeof(*ptr) __T = *(ptr); \
*(ptr) = (typeof(*ptr))0; \
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index d0abfd08a01c..4ab8a973b61f 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -179,7 +179,7 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
int intel_vgt_balloon(struct drm_i915_private *dev_priv)
{
struct i915_ggtt *ggtt = &dev_priv->ggtt;
- unsigned long ggtt_end = ggtt->base.start + ggtt->base.total;
+ unsigned long ggtt_end = ggtt->base.total;
unsigned long mappable_base, mappable_size, mappable_end;
unsigned long unmappable_base, unmappable_size, unmappable_end;
@@ -202,8 +202,7 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv)
DRM_INFO("Unmappable graphic memory: base 0x%lx size %ldKiB\n",
unmappable_base, unmappable_size / 1024);
- if (mappable_base < ggtt->base.start ||
- mappable_end > ggtt->mappable_end ||
+ if (mappable_end > ggtt->mappable_end ||
unmappable_base < ggtt->mappable_end ||
unmappable_end > ggtt_end) {
DRM_ERROR("Invalid ballooning configuration!\n");
@@ -219,21 +218,17 @@ int intel_vgt_balloon(struct drm_i915_private *dev_priv)
goto err;
}
- /*
- * No need to partition out the last physical page,
- * because it is reserved to the guard page.
- */
- if (unmappable_end < ggtt_end - PAGE_SIZE) {
+ if (unmappable_end < ggtt_end) {
ret = vgt_balloon_space(ggtt, &bl_info.space[3],
- unmappable_end, ggtt_end - PAGE_SIZE);
+ unmappable_end, ggtt_end);
if (ret)
goto err;
}
/* Mappable graphic memory ballooning */
- if (mappable_base > ggtt->base.start) {
+ if (mappable_base) {
ret = vgt_balloon_space(ggtt, &bl_info.space[0],
- ggtt->base.start, mappable_base);
+ 0, mappable_base);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 155906e84812..1aba47024656 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -78,6 +78,9 @@ vma_create(struct drm_i915_gem_object *obj,
struct rb_node *rb, **p;
int i;
+ /* The aliasing_ppgtt should never be used directly! */
+ GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
+
vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
if (vma == NULL)
return ERR_PTR(-ENOMEM);
@@ -238,7 +241,15 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
u32 vma_flags;
int ret;
- if (WARN_ON(flags == 0))
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ GEM_BUG_ON(vma->size > vma->node.size);
+
+ if (GEM_WARN_ON(range_overflows(vma->node.start,
+ vma->node.size,
+ vma->vm->total)))
+ return -ENODEV;
+
+ if (GEM_WARN_ON(!flags))
return -EINVAL;
bind_flags = 0;
@@ -255,20 +266,6 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
if (bind_flags == 0)
return 0;
- if (GEM_WARN_ON(range_overflows(vma->node.start,
- vma->node.size,
- vma->vm->total)))
- return -ENODEV;
-
- if (vma_flags == 0 && vma->vm->allocate_va_range) {
- trace_i915_va_alloc(vma);
- ret = vma->vm->allocate_va_range(vma->vm,
- vma->node.start,
- vma->node.size);
- if (ret)
- return ret;
- }
-
trace_i915_vma_bind(vma, bind_flags);
ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
if (ret)
@@ -324,8 +321,8 @@ void i915_vma_unpin_and_release(struct i915_vma **p_vma)
__i915_gem_object_release_unless_active(obj);
}
-bool
-i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+bool i915_vma_misplaced(const struct i915_vma *vma,
+ u64 size, u64 alignment, u64 flags)
{
if (!drm_mm_node_allocated(&vma->node))
return false;
@@ -512,10 +509,36 @@ err_unpin:
return ret;
}
+static void
+i915_vma_remove(struct i915_vma *vma)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
+
+ drm_mm_remove_node(&vma->node);
+ list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
+
+ /* Since the unbound list is global, only move to that list if
+ * no more VMAs exist.
+ */
+ if (--obj->bind_count == 0)
+ list_move_tail(&obj->global_link,
+ &to_i915(obj->base.dev)->mm.unbound_list);
+
+ /* And finally now the object is completely decoupled from this vma,
+ * we can drop its hold on the backing storage and allow it to be
+ * reaped by the shrinker.
+ */
+ i915_gem_object_unpin_pages(obj);
+ GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+}
+
int __i915_vma_do_pin(struct i915_vma *vma,
u64 size, u64 alignment, u64 flags)
{
- unsigned int bound = vma->flags;
+ const unsigned int bound = vma->flags;
int ret;
lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
@@ -524,18 +547,18 @@ int __i915_vma_do_pin(struct i915_vma *vma,
if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
ret = -EBUSY;
- goto err;
+ goto err_unpin;
}
if ((bound & I915_VMA_BIND_MASK) == 0) {
ret = i915_vma_insert(vma, size, alignment, flags);
if (ret)
- goto err;
+ goto err_unpin;
}
ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
if (ret)
- goto err;
+ goto err_remove;
if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
__i915_vma_set_map_and_fenceable(vma);
@@ -544,7 +567,12 @@ int __i915_vma_do_pin(struct i915_vma *vma,
GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
return 0;
-err:
+err_remove:
+ if ((bound & I915_VMA_BIND_MASK) == 0) {
+ GEM_BUG_ON(vma->pages);
+ i915_vma_remove(vma);
+ }
+err_unpin:
__i915_vma_unpin(vma);
return ret;
}
@@ -657,9 +685,6 @@ int i915_vma_unbind(struct i915_vma *vma)
}
vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
- drm_mm_remove_node(&vma->node);
- list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
-
if (vma->pages != obj->mm.pages) {
GEM_BUG_ON(!vma->pages);
sg_free_table(vma->pages);
@@ -667,18 +692,7 @@ int i915_vma_unbind(struct i915_vma *vma)
}
vma->pages = NULL;
- /* Since the unbound list is global, only move to that list if
- * no more VMAs exist. */
- if (--obj->bind_count == 0)
- list_move_tail(&obj->global_link,
- &to_i915(obj->base.dev)->mm.unbound_list);
-
- /* And finally now the object is completely decoupled from this vma,
- * we can drop its hold on the backing storage and allow it to be
- * reaped by the shrinker.
- */
- i915_gem_object_unpin_pages(obj);
- GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count);
+ i915_vma_remove(vma);
destroy:
if (unlikely(i915_vma_is_closed(vma)))
@@ -687,3 +701,6 @@ destroy:
return 0;
}
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/i915_vma.c"
+#endif
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index e39d922cfb6f..2e03f81dddbe 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -228,8 +228,8 @@ i915_vma_compare(struct i915_vma *vma,
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
u32 flags);
bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level);
-bool
-i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags);
+bool i915_vma_misplaced(const struct i915_vma *vma,
+ u64 size, u64 alignment, u64 flags);
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma);
void i915_vma_close(struct i915_vma *vma);
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index aa9160e7f1d8..50fb1f76cc5f 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -99,6 +99,7 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state->update_wm_pre = false;
crtc_state->update_wm_post = false;
crtc_state->fb_changed = false;
+ crtc_state->fifo_changed = false;
crtc_state->wm.need_postvbl_update = false;
crtc_state->fb_bits = 0;
@@ -121,7 +122,7 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
/**
* intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
- * @dev: DRM device
+ * @dev_priv: i915 device
* @crtc: intel crtc
* @crtc_state: incoming crtc_state to validate and setup scalers
*
@@ -136,9 +137,9 @@ intel_crtc_destroy_state(struct drm_crtc *crtc,
* 0 - scalers were setup succesfully
* error code - otherwise
*/
-int intel_atomic_setup_scalers(struct drm_device *dev,
- struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state)
+int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
+ struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *crtc_state)
{
struct drm_plane *plane = NULL;
struct intel_plane *intel_plane;
@@ -199,7 +200,7 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
*/
if (!plane) {
struct drm_plane_state *state;
- plane = drm_plane_from_index(dev, i);
+ plane = drm_plane_from_index(&dev_priv->drm, i);
state = drm_atomic_get_plane_state(drm_state, plane);
if (IS_ERR(state)) {
DRM_DEBUG_KMS("Failed to add [PLANE:%d] to drm_state\n",
@@ -247,7 +248,9 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
}
/* set scaler mode */
- if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) {
+ if (IS_GEMINILAKE(dev_priv)) {
+ scaler_state->scalers[*scaler_id].mode = 0;
+ } else if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) {
/*
* when only 1 scaler is in use on either pipe A or B,
* scaler 0 operates in high quality (HQ) mode.
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index 41fd94e62d3c..cfb47293fd53 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -189,6 +189,12 @@ int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
+ /* FIXME pre-g4x don't work like this */
+ if (intel_state->base.visible)
+ crtc_state->active_planes |= BIT(intel_plane->id);
+ else
+ crtc_state->active_planes &= ~BIT(intel_plane->id);
+
return intel_plane_atomic_calc_changes(&crtc_state->base, state);
}
@@ -225,12 +231,19 @@ static void intel_plane_atomic_update(struct drm_plane *plane,
to_intel_plane_state(plane->state);
struct drm_crtc *crtc = plane->state->crtc ?: old_state->crtc;
- if (intel_state->base.visible)
+ if (intel_state->base.visible) {
+ trace_intel_update_plane(plane,
+ to_intel_crtc(crtc));
+
intel_plane->update_plane(plane,
to_intel_crtc_state(crtc->state),
intel_state);
- else
+ } else {
+ trace_intel_disable_plane(plane,
+ to_intel_crtc(crtc));
+
intel_plane->disable_plane(plane, crtc);
+ }
}
const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index d76f3033e890..52c207e81f41 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -720,7 +720,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
u32 tmp;
- if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
+ if (!IS_GEN9_BC(dev_priv))
return;
i915_audio_component_get_power(kdev);
@@ -752,7 +752,7 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev)
if (WARN_ON_ONCE(!HAS_DDI(dev_priv)))
return -ENODEV;
- return dev_priv->cdclk_freq;
+ return dev_priv->cdclk.hw.cdclk;
}
/*
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index e144f033f4b5..639d45c1dd2e 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1341,6 +1341,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
return;
}
+/* Common defaults which may be overridden by VBT. */
static void
init_vbt_defaults(struct drm_i915_private *dev_priv)
{
@@ -1377,6 +1378,18 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
&dev_priv->vbt.ddi_port_info[port];
info->hdmi_level_shift = HDMI_LEVEL_SHIFT_UNKNOWN;
+ }
+}
+
+/* Defaults to initialize only if there is no VBT. */
+static void
+init_vbt_missing_defaults(struct drm_i915_private *dev_priv)
+{
+ enum port port;
+
+ for (port = PORT_A; port < I915_MAX_PORTS; port++) {
+ struct ddi_vbt_port_info *info =
+ &dev_priv->vbt.ddi_port_info[port];
info->supports_dvi = (port != PORT_A && port != PORT_E);
info->supports_hdmi = info->supports_dvi;
@@ -1462,36 +1475,35 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
* intel_bios_init - find VBT and initialize settings from the BIOS
* @dev_priv: i915 device instance
*
- * Loads the Video BIOS and checks that the VBT exists. Sets scratch registers
- * to appropriate values.
- *
- * Returns 0 on success, nonzero on failure.
+ * Parse and initialize settings from the Video BIOS Tables (VBT). If the VBT
+ * was not found in ACPI OpRegion, try to find it in PCI ROM first. Also
+ * initialize some defaults if the VBT is not present at all.
*/
-int
-intel_bios_init(struct drm_i915_private *dev_priv)
+void intel_bios_init(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
const struct vbt_header *vbt = dev_priv->opregion.vbt;
const struct bdb_header *bdb;
u8 __iomem *bios = NULL;
- if (HAS_PCH_NOP(dev_priv))
- return -ENODEV;
+ if (HAS_PCH_NOP(dev_priv)) {
+ DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
+ return;
+ }
init_vbt_defaults(dev_priv);
+ /* If the OpRegion does not have VBT, look in PCI ROM. */
if (!vbt) {
size_t size;
bios = pci_map_rom(pdev, &size);
if (!bios)
- return -1;
+ goto out;
vbt = find_vbt(bios, size);
- if (!vbt) {
- pci_unmap_rom(pdev, bios);
- return -1;
- }
+ if (!vbt)
+ goto out;
DRM_DEBUG_KMS("Found valid VBT in PCI ROM\n");
}
@@ -1516,10 +1528,14 @@ intel_bios_init(struct drm_i915_private *dev_priv)
parse_mipi_sequence(dev_priv, bdb);
parse_ddi_ports(dev_priv, bdb);
+out:
+ if (!vbt) {
+ DRM_INFO("Failed to find VBIOS tables (VBT)\n");
+ init_vbt_missing_defaults(dev_priv);
+ }
+
if (bios)
pci_unmap_rom(pdev, bios);
-
- return 0;
}
/**
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index 7044e9a6abf7..ba986edee312 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -27,22 +27,104 @@
#include "i915_drv.h"
+static unsigned int __intel_breadcrumbs_wakeup(struct intel_breadcrumbs *b)
+{
+ struct intel_wait *wait;
+ unsigned int result = 0;
+
+ lockdep_assert_held(&b->irq_lock);
+
+ wait = b->irq_wait;
+ if (wait) {
+ result = ENGINE_WAKEUP_WAITER;
+ if (wake_up_process(wait->tsk))
+ result |= ENGINE_WAKEUP_ASLEEP;
+ }
+
+ return result;
+}
+
+unsigned int intel_engine_wakeup(struct intel_engine_cs *engine)
+{
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ unsigned int result;
+
+ spin_lock_irq(&b->irq_lock);
+ result = __intel_breadcrumbs_wakeup(b);
+ spin_unlock_irq(&b->irq_lock);
+
+ return result;
+}
+
+static unsigned long wait_timeout(void)
+{
+ return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES);
+}
+
+static noinline void missed_breadcrumb(struct intel_engine_cs *engine)
+{
+ DRM_DEBUG_DRIVER("%s missed breadcrumb at %pF, irq posted? %s\n",
+ engine->name, __builtin_return_address(0),
+ yesno(test_bit(ENGINE_IRQ_BREADCRUMB,
+ &engine->irq_posted)));
+
+ set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
+}
+
static void intel_breadcrumbs_hangcheck(unsigned long data)
{
struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
struct intel_breadcrumbs *b = &engine->breadcrumbs;
- if (!b->irq_enabled)
+ if (!b->irq_armed)
return;
- if (time_before(jiffies, b->timeout)) {
- mod_timer(&b->hangcheck, b->timeout);
+ if (b->hangcheck_interrupts != atomic_read(&engine->irq_count)) {
+ b->hangcheck_interrupts = atomic_read(&engine->irq_count);
+ mod_timer(&b->hangcheck, wait_timeout());
return;
}
- DRM_DEBUG("Hangcheck timer elapsed... %s idle\n", engine->name);
- set_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings);
- mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
+ /* We keep the hangcheck timer alive until we disarm the irq, even
+ * if there are no waiters at present.
+ *
+ * If the waiter was currently running, assume it hasn't had a chance
+ * to process the pending interrupt (e.g, low priority task on a loaded
+ * system) and wait until it sleeps before declaring a missed interrupt.
+ *
+ * If the waiter was asleep (and not even pending a wakeup), then we
+ * must have missed an interrupt as the GPU has stopped advancing
+ * but we still have a waiter. Assuming all batches complete within
+ * DRM_I915_HANGCHECK_JIFFIES [1.5s]!
+ */
+ if (intel_engine_wakeup(engine) & ENGINE_WAKEUP_ASLEEP) {
+ missed_breadcrumb(engine);
+ mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
+ } else {
+ mod_timer(&b->hangcheck, wait_timeout());
+ }
+}
+
+static void intel_breadcrumbs_fake_irq(unsigned long data)
+{
+ struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+
+ /* The timer persists in case we cannot enable interrupts,
+ * or if we have previously seen seqno/interrupt incoherency
+ * ("missed interrupt" syndrome, better known as a "missed breadcrumb").
+ * Here the worker will wake up every jiffie in order to kick the
+ * oldest waiter to do the coherent seqno check.
+ */
+
+ spin_lock_irq(&b->irq_lock);
+ if (!__intel_breadcrumbs_wakeup(b))
+ __intel_engine_disarm_breadcrumbs(engine);
+ spin_unlock_irq(&b->irq_lock);
+ if (!b->irq_armed)
+ return;
+
+ mod_timer(&b->fake_irq, jiffies + 1);
/* Ensure that even if the GPU hangs, we get woken up.
*
@@ -56,33 +138,13 @@ static void intel_breadcrumbs_hangcheck(unsigned long data)
i915_queue_hangcheck(engine->i915);
}
-static unsigned long wait_timeout(void)
-{
- return round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES);
-}
-
-static void intel_breadcrumbs_fake_irq(unsigned long data)
-{
- struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
-
- /*
- * The timer persists in case we cannot enable interrupts,
- * or if we have previously seen seqno/interrupt incoherency
- * ("missed interrupt" syndrome). Here the worker will wake up
- * every jiffie in order to kick the oldest waiter to do the
- * coherent seqno check.
- */
- if (intel_engine_wakeup(engine))
- mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
-}
-
static void irq_enable(struct intel_engine_cs *engine)
{
/* Enabling the IRQ may miss the generation of the interrupt, but
* we still need to force the barrier before reading the seqno,
* just in case.
*/
- engine->breadcrumbs.irq_posted = true;
+ set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
/* Caller disables interrupts */
spin_lock(&engine->i915->irq_lock);
@@ -96,61 +158,123 @@ static void irq_disable(struct intel_engine_cs *engine)
spin_lock(&engine->i915->irq_lock);
engine->irq_disable(engine);
spin_unlock(&engine->i915->irq_lock);
+}
+
+void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
+{
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+
+ lockdep_assert_held(&b->irq_lock);
+ GEM_BUG_ON(b->irq_wait);
+
+ if (b->irq_enabled) {
+ irq_disable(engine);
+ b->irq_enabled = false;
+ }
- engine->breadcrumbs.irq_posted = false;
+ b->irq_armed = false;
}
-static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
+void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine)
{
- struct intel_engine_cs *engine =
- container_of(b, struct intel_engine_cs, breadcrumbs);
- struct drm_i915_private *i915 = engine->i915;
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct intel_wait *wait, *n, *first;
- assert_spin_locked(&b->lock);
- if (b->rpm_wakelock)
+ if (!b->irq_armed)
return;
- /* Since we are waiting on a request, the GPU should be busy
- * and should have its own rpm reference. For completeness,
- * record an rpm reference for ourselves to cover the
- * interrupt we unmask.
+ /* We only disarm the irq when we are idle (all requests completed),
+ * so if the bottom-half remains asleep, it missed the request
+ * completion.
*/
- intel_runtime_pm_get_noresume(i915);
- b->rpm_wakelock = true;
- /* No interrupts? Kick the waiter every jiffie! */
- if (intel_irqs_enabled(i915)) {
- if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
- irq_enable(engine);
- b->irq_enabled = true;
+ spin_lock_irq(&b->rb_lock);
+
+ spin_lock(&b->irq_lock);
+ first = fetch_and_zero(&b->irq_wait);
+ __intel_engine_disarm_breadcrumbs(engine);
+ spin_unlock(&b->irq_lock);
+
+ rbtree_postorder_for_each_entry_safe(wait, n, &b->waiters, node) {
+ RB_CLEAR_NODE(&wait->node);
+ if (wake_up_process(wait->tsk) && wait == first)
+ missed_breadcrumb(engine);
}
+ b->waiters = RB_ROOT;
- if (!b->irq_enabled ||
- test_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
+ spin_unlock_irq(&b->rb_lock);
+}
+
+static bool use_fake_irq(const struct intel_breadcrumbs *b)
+{
+ const struct intel_engine_cs *engine =
+ container_of(b, struct intel_engine_cs, breadcrumbs);
+
+ if (!test_bit(engine->id, &engine->i915->gpu_error.missed_irq_rings))
+ return false;
+
+ /* Only start with the heavy weight fake irq timer if we have not
+ * seen any interrupts since enabling it the first time. If the
+ * interrupts are still arriving, it means we made a mistake in our
+ * engine->seqno_barrier(), a timing error that should be transient
+ * and unlikely to reoccur.
+ */
+ return atomic_read(&engine->irq_count) == b->hangcheck_interrupts;
+}
+
+static void enable_fake_irq(struct intel_breadcrumbs *b)
+{
+ /* Ensure we never sleep indefinitely */
+ if (!b->irq_enabled || use_fake_irq(b))
mod_timer(&b->fake_irq, jiffies + 1);
- } else {
- /* Ensure we never sleep indefinitely */
- GEM_BUG_ON(!time_after(b->timeout, jiffies));
- mod_timer(&b->hangcheck, b->timeout);
- }
+ else
+ mod_timer(&b->hangcheck, wait_timeout());
}
-static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b)
+static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
{
struct intel_engine_cs *engine =
container_of(b, struct intel_engine_cs, breadcrumbs);
+ struct drm_i915_private *i915 = engine->i915;
- assert_spin_locked(&b->lock);
- if (!b->rpm_wakelock)
+ lockdep_assert_held(&b->irq_lock);
+ if (b->irq_armed)
return;
- if (b->irq_enabled) {
- irq_disable(engine);
- b->irq_enabled = false;
+ /* The breadcrumb irq will be disarmed on the interrupt after the
+ * waiters are signaled. This gives us a single interrupt window in
+ * which we can add a new waiter and avoid the cost of re-enabling
+ * the irq.
+ */
+ b->irq_armed = true;
+ GEM_BUG_ON(b->irq_enabled);
+
+ if (I915_SELFTEST_ONLY(b->mock)) {
+ /* For our mock objects we want to avoid interaction
+ * with the real hardware (which is not set up). So
+ * we simply pretend we have enabled the powerwell
+ * and the irq, and leave it up to the mock
+ * implementation to call intel_engine_wakeup()
+ * itself when it wants to simulate a user interrupt,
+ */
+ return;
}
- intel_runtime_pm_put(engine->i915);
- b->rpm_wakelock = false;
+ /* Since we are waiting on a request, the GPU should be busy
+ * and should have its own rpm reference. This is tracked
+ * by i915->gt.awake, we can forgo holding our own wakref
+ * for the interrupt as before i915->gt.awake is released (when
+ * the driver is idle) we disarm the breadcrumbs.
+ */
+
+ /* No interrupts? Kick the waiter every jiffie! */
+ if (intel_irqs_enabled(i915)) {
+ if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
+ irq_enable(engine);
+ b->irq_enabled = true;
+ }
+
+ enable_fake_irq(b);
}
static inline struct intel_wait *to_wait(struct rb_node *node)
@@ -161,10 +285,16 @@ static inline struct intel_wait *to_wait(struct rb_node *node)
static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
struct intel_wait *wait)
{
- assert_spin_locked(&b->lock);
+ lockdep_assert_held(&b->rb_lock);
+ GEM_BUG_ON(b->irq_wait == wait);
/* This request is completed, so remove it from the tree, mark it as
- * complete, and *then* wake up the associated task.
+ * complete, and *then* wake up the associated task. N.B. when the
+ * task wakes up, it will find the empty rb_node, discern that it
+ * has already been removed from the tree and skip the serialisation
+ * of the b->rb_lock and b->irq_lock. This means that the destruction
+ * of the intel_wait is not serialised with the interrupt handler
+ * by the waiter - it must instead be serialised by the caller.
*/
rb_erase(&wait->node, &b->waiters);
RB_CLEAR_NODE(&wait->node);
@@ -172,6 +302,25 @@ static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
wake_up_process(wait->tsk); /* implicit smp_wmb() */
}
+static inline void __intel_breadcrumbs_next(struct intel_engine_cs *engine,
+ struct rb_node *next)
+{
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+
+ spin_lock(&b->irq_lock);
+ GEM_BUG_ON(!b->irq_armed);
+ GEM_BUG_ON(!b->irq_wait);
+ b->irq_wait = to_wait(next);
+ spin_unlock(&b->irq_lock);
+
+ /* We always wake up the next waiter that takes over as the bottom-half
+ * as we may delegate not only the irq-seqno barrier to the next waiter
+ * but also the task of waking up concurrent waiters.
+ */
+ if (next)
+ wake_up_process(to_wait(next)->tsk);
+}
+
static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
struct intel_wait *wait)
{
@@ -235,44 +384,10 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
}
rb_link_node(&wait->node, parent, p);
rb_insert_color(&wait->node, &b->waiters);
- GEM_BUG_ON(!first && !rcu_access_pointer(b->irq_seqno_bh));
-
- if (completed) {
- struct rb_node *next = rb_next(completed);
-
- GEM_BUG_ON(!next && !first);
- if (next && next != &wait->node) {
- GEM_BUG_ON(first);
- b->timeout = wait_timeout();
- b->first_wait = to_wait(next);
- rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk);
- /* As there is a delay between reading the current
- * seqno, processing the completed tasks and selecting
- * the next waiter, we may have missed the interrupt
- * and so need for the next bottom-half to wakeup.
- *
- * Also as we enable the IRQ, we may miss the
- * interrupt for that seqno, so we have to wake up
- * the next bottom-half in order to do a coherent check
- * in case the seqno passed.
- */
- __intel_breadcrumbs_enable_irq(b);
- if (READ_ONCE(b->irq_posted))
- wake_up_process(to_wait(next)->tsk);
- }
-
- do {
- struct intel_wait *crumb = to_wait(completed);
- completed = rb_prev(completed);
- __intel_breadcrumbs_finish(b, crumb);
- } while (completed);
- }
if (first) {
- GEM_BUG_ON(rb_first(&b->waiters) != &wait->node);
- b->timeout = wait_timeout();
- b->first_wait = wait;
- rcu_assign_pointer(b->irq_seqno_bh, wait->tsk);
+ spin_lock(&b->irq_lock);
+ b->irq_wait = wait;
/* After assigning ourselves as the new bottom-half, we must
* perform a cursory check to prevent a missed interrupt.
* Either we miss the interrupt whilst programming the hardware,
@@ -282,10 +397,31 @@ static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
* and so we miss the wake up.
*/
__intel_breadcrumbs_enable_irq(b);
+ spin_unlock(&b->irq_lock);
}
- GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh));
- GEM_BUG_ON(!b->first_wait);
- GEM_BUG_ON(rb_first(&b->waiters) != &b->first_wait->node);
+
+ if (completed) {
+ /* Advance the bottom-half (b->irq_wait) before we wake up
+ * the waiters who may scribble over their intel_wait
+ * just as the interrupt handler is dereferencing it via
+ * b->irq_wait.
+ */
+ if (!first) {
+ struct rb_node *next = rb_next(completed);
+ GEM_BUG_ON(next == &wait->node);
+ __intel_breadcrumbs_next(engine, next);
+ }
+
+ do {
+ struct intel_wait *crumb = to_wait(completed);
+ completed = rb_prev(completed);
+ __intel_breadcrumbs_finish(b, crumb);
+ } while (completed);
+ }
+
+ GEM_BUG_ON(!b->irq_wait);
+ GEM_BUG_ON(!b->irq_armed);
+ GEM_BUG_ON(rb_first(&b->waiters) != &b->irq_wait->node);
return first;
}
@@ -296,9 +432,9 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine,
struct intel_breadcrumbs *b = &engine->breadcrumbs;
bool first;
- spin_lock_irq(&b->lock);
+ spin_lock_irq(&b->rb_lock);
first = __intel_engine_add_wait(engine, wait);
- spin_unlock_irq(&b->lock);
+ spin_unlock_irq(&b->rb_lock);
return first;
}
@@ -317,29 +453,20 @@ static inline int wakeup_priority(struct intel_breadcrumbs *b,
return tsk->prio;
}
-void intel_engine_remove_wait(struct intel_engine_cs *engine,
- struct intel_wait *wait)
+static void __intel_engine_remove_wait(struct intel_engine_cs *engine,
+ struct intel_wait *wait)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
- /* Quick check to see if this waiter was already decoupled from
- * the tree by the bottom-half to avoid contention on the spinlock
- * by the herd.
- */
- if (RB_EMPTY_NODE(&wait->node))
- return;
-
- spin_lock_irq(&b->lock);
+ lockdep_assert_held(&b->rb_lock);
if (RB_EMPTY_NODE(&wait->node))
- goto out_unlock;
+ goto out;
- if (b->first_wait == wait) {
+ if (b->irq_wait == wait) {
const int priority = wakeup_priority(b, wait->tsk);
struct rb_node *next;
- GEM_BUG_ON(rcu_access_pointer(b->irq_seqno_bh) != wait->tsk);
-
/* We are the current bottom-half. Find the next candidate,
* the first waiter in the queue on the remaining oldest
* request. As multiple seqnos may complete in the time it
@@ -372,25 +499,7 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
}
}
- if (next) {
- /* In our haste, we may have completed the first waiter
- * before we enabled the interrupt. Do so now as we
- * have a second waiter for a future seqno. Afterwards,
- * we have to wake up that waiter in case we missed
- * the interrupt, or if we have to handle an
- * exception rather than a seqno completion.
- */
- b->timeout = wait_timeout();
- b->first_wait = to_wait(next);
- rcu_assign_pointer(b->irq_seqno_bh, b->first_wait->tsk);
- if (b->first_wait->seqno != wait->seqno)
- __intel_breadcrumbs_enable_irq(b);
- wake_up_process(b->first_wait->tsk);
- } else {
- b->first_wait = NULL;
- rcu_assign_pointer(b->irq_seqno_bh, NULL);
- __intel_breadcrumbs_disable_irq(b);
- }
+ __intel_breadcrumbs_next(engine, next);
} else {
GEM_BUG_ON(rb_first(&b->waiters) == &wait->node);
}
@@ -398,15 +507,37 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
rb_erase(&wait->node, &b->waiters);
-out_unlock:
- GEM_BUG_ON(b->first_wait == wait);
+out:
+ GEM_BUG_ON(b->irq_wait == wait);
GEM_BUG_ON(rb_first(&b->waiters) !=
- (b->first_wait ? &b->first_wait->node : NULL));
- GEM_BUG_ON(!rcu_access_pointer(b->irq_seqno_bh) ^ RB_EMPTY_ROOT(&b->waiters));
- spin_unlock_irq(&b->lock);
+ (b->irq_wait ? &b->irq_wait->node : NULL));
+}
+
+void intel_engine_remove_wait(struct intel_engine_cs *engine,
+ struct intel_wait *wait)
+{
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+
+ /* Quick check to see if this waiter was already decoupled from
+ * the tree by the bottom-half to avoid contention on the spinlock
+ * by the herd.
+ */
+ if (RB_EMPTY_NODE(&wait->node)) {
+ GEM_BUG_ON(READ_ONCE(b->irq_wait) == wait);
+ return;
+ }
+
+ spin_lock_irq(&b->rb_lock);
+ __intel_engine_remove_wait(engine, wait);
+ spin_unlock_irq(&b->rb_lock);
+}
+
+static bool signal_valid(const struct drm_i915_gem_request *request)
+{
+ return intel_wait_check_request(&request->signaling.wait, request);
}
-static bool signal_complete(struct drm_i915_gem_request *request)
+static bool signal_complete(const struct drm_i915_gem_request *request)
{
if (!request)
return false;
@@ -415,7 +546,7 @@ static bool signal_complete(struct drm_i915_gem_request *request)
* signalled that this wait is already completed.
*/
if (intel_wait_complete(&request->signaling.wait))
- return true;
+ return signal_valid(request);
/* Carefully check if the request is complete, giving time for the
* seqno to be visible or if the GPU hung.
@@ -458,40 +589,62 @@ static int intel_breadcrumbs_signaler(void *arg)
* need to wait for a new interrupt from the GPU or for
* a new client.
*/
- request = READ_ONCE(b->first_signal);
+ rcu_read_lock();
+ request = rcu_dereference(b->first_signal);
+ if (request)
+ request = i915_gem_request_get_rcu(request);
+ rcu_read_unlock();
if (signal_complete(request)) {
- /* Wake up all other completed waiters and select the
- * next bottom-half for the next user interrupt.
- */
- intel_engine_remove_wait(engine,
- &request->signaling.wait);
-
local_bh_disable();
dma_fence_signal(&request->fence);
local_bh_enable(); /* kick start the tasklets */
+ spin_lock_irq(&b->rb_lock);
+
+ /* Wake up all other completed waiters and select the
+ * next bottom-half for the next user interrupt.
+ */
+ __intel_engine_remove_wait(engine,
+ &request->signaling.wait);
+
/* Find the next oldest signal. Note that as we have
* not been holding the lock, another client may
* have installed an even older signal than the one
* we just completed - so double check we are still
* the oldest before picking the next one.
*/
- spin_lock_irq(&b->lock);
- if (request == b->first_signal) {
+ if (request == rcu_access_pointer(b->first_signal)) {
struct rb_node *rb =
rb_next(&request->signaling.node);
- b->first_signal = rb ? to_signaler(rb) : NULL;
+ rcu_assign_pointer(b->first_signal,
+ rb ? to_signaler(rb) : NULL);
}
rb_erase(&request->signaling.node, &b->signals);
- spin_unlock_irq(&b->lock);
+ RB_CLEAR_NODE(&request->signaling.node);
+
+ spin_unlock_irq(&b->rb_lock);
i915_gem_request_put(request);
} else {
- if (kthread_should_stop())
+ DEFINE_WAIT(exec);
+
+ if (kthread_should_stop()) {
+ GEM_BUG_ON(request);
break;
+ }
+
+ if (request)
+ add_wait_queue(&request->execute, &exec);
schedule();
+
+ if (request)
+ remove_wait_queue(&request->execute, &exec);
+
+ if (kthread_should_park())
+ kthread_parkme();
}
+ i915_gem_request_put(request);
} while (1);
__set_current_state(TASK_RUNNING);
@@ -504,24 +657,29 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
struct intel_breadcrumbs *b = &engine->breadcrumbs;
struct rb_node *parent, **p;
bool first, wakeup;
+ u32 seqno;
/* Note that we may be called from an interrupt handler on another
* device (e.g. nouveau signaling a fence completion causing us
* to submit a request, and so enable signaling). As such,
- * we need to make sure that all other users of b->lock protect
+ * we need to make sure that all other users of b->rb_lock protect
* against interrupts, i.e. use spin_lock_irqsave.
*/
/* locked by dma_fence_enable_sw_signaling() (irqsafe fence->lock) */
- assert_spin_locked(&request->lock);
- if (!request->global_seqno)
+ GEM_BUG_ON(!irqs_disabled());
+ lockdep_assert_held(&request->lock);
+
+ seqno = i915_gem_request_global_seqno(request);
+ if (!seqno)
return;
request->signaling.wait.tsk = b->signaler;
- request->signaling.wait.seqno = request->global_seqno;
+ request->signaling.wait.request = request;
+ request->signaling.wait.seqno = seqno;
i915_gem_request_get(request);
- spin_lock(&b->lock);
+ spin_lock(&b->rb_lock);
/* First add ourselves into the list of waiters, but register our
* bottom-half as the signaller thread. As per usual, only the oldest
@@ -542,8 +700,8 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
p = &b->signals.rb_node;
while (*p) {
parent = *p;
- if (i915_seqno_passed(request->global_seqno,
- to_signaler(parent)->global_seqno)) {
+ if (i915_seqno_passed(seqno,
+ to_signaler(parent)->signaling.wait.seqno)) {
p = &parent->rb_right;
first = false;
} else {
@@ -553,20 +711,52 @@ void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
rb_link_node(&request->signaling.node, parent, p);
rb_insert_color(&request->signaling.node, &b->signals);
if (first)
- smp_store_mb(b->first_signal, request);
+ rcu_assign_pointer(b->first_signal, request);
- spin_unlock(&b->lock);
+ spin_unlock(&b->rb_lock);
if (wakeup)
wake_up_process(b->signaler);
}
+void intel_engine_cancel_signaling(struct drm_i915_gem_request *request)
+{
+ struct intel_engine_cs *engine = request->engine;
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+
+ GEM_BUG_ON(!irqs_disabled());
+ lockdep_assert_held(&request->lock);
+ GEM_BUG_ON(!request->signaling.wait.seqno);
+
+ spin_lock(&b->rb_lock);
+
+ if (!RB_EMPTY_NODE(&request->signaling.node)) {
+ if (request == rcu_access_pointer(b->first_signal)) {
+ struct rb_node *rb =
+ rb_next(&request->signaling.node);
+ rcu_assign_pointer(b->first_signal,
+ rb ? to_signaler(rb) : NULL);
+ }
+ rb_erase(&request->signaling.node, &b->signals);
+ RB_CLEAR_NODE(&request->signaling.node);
+ i915_gem_request_put(request);
+ }
+
+ __intel_engine_remove_wait(engine, &request->signaling.wait);
+
+ spin_unlock(&b->rb_lock);
+
+ request->signaling.wait.seqno = 0;
+}
+
int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
{
struct intel_breadcrumbs *b = &engine->breadcrumbs;
struct task_struct *tsk;
- spin_lock_init(&b->lock);
+ spin_lock_init(&b->rb_lock);
+ spin_lock_init(&b->irq_lock);
+
setup_timer(&b->fake_irq,
intel_breadcrumbs_fake_irq,
(unsigned long)engine);
@@ -604,20 +794,26 @@ void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine)
struct intel_breadcrumbs *b = &engine->breadcrumbs;
cancel_fake_irq(engine);
- spin_lock_irq(&b->lock);
+ spin_lock_irq(&b->irq_lock);
- __intel_breadcrumbs_disable_irq(b);
- if (intel_engine_has_waiter(engine)) {
- b->timeout = wait_timeout();
- __intel_breadcrumbs_enable_irq(b);
- if (READ_ONCE(b->irq_posted))
- wake_up_process(b->first_wait->tsk);
- } else {
- /* sanitize the IMR and unmask any auxiliary interrupts */
+ if (b->irq_enabled)
+ irq_enable(engine);
+ else
irq_disable(engine);
- }
- spin_unlock_irq(&b->lock);
+ /* We set the IRQ_BREADCRUMB bit when we enable the irq presuming the
+ * GPU is active and may have already executed the MI_USER_INTERRUPT
+ * before the CPU is ready to receive. However, the engine is currently
+ * idle (we haven't started it yet), there is no possibility for a
+ * missed interrupt as we enabled the irq and so we can clear the
+ * immediate wakeup (until a real interrupt arrives for the waiter).
+ */
+ clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
+
+ if (b->irq_armed)
+ enable_fake_irq(b);
+
+ spin_unlock_irq(&b->irq_lock);
}
void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
@@ -625,9 +821,9 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
struct intel_breadcrumbs *b = &engine->breadcrumbs;
/* The engines should be idle and all requests accounted for! */
- WARN_ON(READ_ONCE(b->first_wait));
+ WARN_ON(READ_ONCE(b->irq_wait));
WARN_ON(!RB_EMPTY_ROOT(&b->waiters));
- WARN_ON(READ_ONCE(b->first_signal));
+ WARN_ON(rcu_access_pointer(b->first_signal));
WARN_ON(!RB_EMPTY_ROOT(&b->signals));
if (!IS_ERR_OR_NULL(b->signaler))
@@ -636,29 +832,28 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
cancel_fake_irq(engine);
}
-unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915)
+bool intel_breadcrumbs_busy(struct intel_engine_cs *engine)
{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- unsigned int mask = 0;
-
- for_each_engine(engine, i915, id) {
- struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ bool busy = false;
- spin_lock_irq(&b->lock);
+ spin_lock_irq(&b->rb_lock);
- if (b->first_wait) {
- wake_up_process(b->first_wait->tsk);
- mask |= intel_engine_flag(engine);
- }
-
- if (b->first_signal) {
- wake_up_process(b->signaler);
- mask |= intel_engine_flag(engine);
- }
+ if (b->irq_wait) {
+ wake_up_process(b->irq_wait->tsk);
+ busy = true;
+ }
- spin_unlock_irq(&b->lock);
+ if (rcu_access_pointer(b->first_signal)) {
+ wake_up_process(b->signaler);
+ busy = true;
}
- return mask;
+ spin_unlock_irq(&b->rb_lock);
+
+ return busy;
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/intel_breadcrumbs.c"
+#endif
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
new file mode 100644
index 000000000000..c2cc33f3d888
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -0,0 +1,1891 @@
+/*
+ * Copyright © 2006-2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "intel_drv.h"
+
+/**
+ * DOC: CDCLK / RAWCLK
+ *
+ * The display engine uses several different clocks to do its work. There
+ * are two main clocks involved that aren't directly related to the actual
+ * pixel clock or any symbol/bit clock of the actual output port. These
+ * are the core display clock (CDCLK) and RAWCLK.
+ *
+ * CDCLK clocks most of the display pipe logic, and thus its frequency
+ * must be high enough to support the rate at which pixels are flowing
+ * through the pipes. Downscaling must also be accounted as that increases
+ * the effective pixel rate.
+ *
+ * On several platforms the CDCLK frequency can be changed dynamically
+ * to minimize power consumption for a given display configuration.
+ * Typically changes to the CDCLK frequency require all the display pipes
+ * to be shut down while the frequency is being changed.
+ *
+ * On SKL+ the DMC will toggle the CDCLK off/on during DC5/6 entry/exit.
+ * DMC will not change the active CDCLK frequency however, so that part
+ * will still be performed by the driver directly.
+ *
+ * RAWCLK is a fixed frequency clock, often used by various auxiliary
+ * blocks such as AUX CH or backlight PWM. Hence the only thing we
+ * really need to know about RAWCLK is its frequency so that various
+ * dividers can be programmed correctly.
+ */
+
+static void fixed_133mhz_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
+{
+ cdclk_state->cdclk = 133333;
+}
+
+static void fixed_200mhz_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
+{
+ cdclk_state->cdclk = 200000;
+}
+
+static void fixed_266mhz_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
+{
+ cdclk_state->cdclk = 266667;
+}
+
+static void fixed_333mhz_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
+{
+ cdclk_state->cdclk = 333333;
+}
+
+static void fixed_400mhz_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
+{
+ cdclk_state->cdclk = 400000;
+}
+
+static void fixed_450mhz_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
+{
+ cdclk_state->cdclk = 450000;
+}
+
+static void i85x_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
+{
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ u16 hpllcc = 0;
+
+ /*
+ * 852GM/852GMV only supports 133 MHz and the HPLLCC
+ * encoding is different :(
+ * FIXME is this the right way to detect 852GM/852GMV?
+ */
+ if (pdev->revision == 0x1) {
+ cdclk_state->cdclk = 133333;
+ return;
+ }
+
+ pci_bus_read_config_word(pdev->bus,
+ PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
+
+ /* Assume that the hardware is in the high speed state. This
+ * should be the default.
+ */
+ switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
+ case GC_CLOCK_133_200:
+ case GC_CLOCK_133_200_2:
+ case GC_CLOCK_100_200:
+ cdclk_state->cdclk = 200000;
+ break;
+ case GC_CLOCK_166_250:
+ cdclk_state->cdclk = 250000;
+ break;
+ case GC_CLOCK_100_133:
+ cdclk_state->cdclk = 133333;
+ break;
+ case GC_CLOCK_133_266:
+ case GC_CLOCK_133_266_2:
+ case GC_CLOCK_166_266:
+ cdclk_state->cdclk = 266667;
+ break;
+ }
+}
+
+static void i915gm_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
+{
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ u16 gcfgc = 0;
+
+ pci_read_config_word(pdev, GCFGC, &gcfgc);
+
+ if (gcfgc & GC_LOW_FREQUENCY_ENABLE) {
+ cdclk_state->cdclk = 133333;
+ return;
+ }
+
+ switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
+ case GC_DISPLAY_CLOCK_333_320_MHZ:
+ cdclk_state->cdclk = 333333;
+ break;
+ default:
+ case GC_DISPLAY_CLOCK_190_200_MHZ:
+ cdclk_state->cdclk = 190000;
+ break;
+ }
+}
+
+static void i945gm_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
+{
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ u16 gcfgc = 0;
+
+ pci_read_config_word(pdev, GCFGC, &gcfgc);
+
+ if (gcfgc & GC_LOW_FREQUENCY_ENABLE) {
+ cdclk_state->cdclk = 133333;
+ return;
+ }
+
+ switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
+ case GC_DISPLAY_CLOCK_333_320_MHZ:
+ cdclk_state->cdclk = 320000;
+ break;
+ default:
+ case GC_DISPLAY_CLOCK_190_200_MHZ:
+ cdclk_state->cdclk = 200000;
+ break;
+ }
+}
+
+static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
+{
+ static const unsigned int blb_vco[8] = {
+ [0] = 3200000,
+ [1] = 4000000,
+ [2] = 5333333,
+ [3] = 4800000,
+ [4] = 6400000,
+ };
+ static const unsigned int pnv_vco[8] = {
+ [0] = 3200000,
+ [1] = 4000000,
+ [2] = 5333333,
+ [3] = 4800000,
+ [4] = 2666667,
+ };
+ static const unsigned int cl_vco[8] = {
+ [0] = 3200000,
+ [1] = 4000000,
+ [2] = 5333333,
+ [3] = 6400000,
+ [4] = 3333333,
+ [5] = 3566667,
+ [6] = 4266667,
+ };
+ static const unsigned int elk_vco[8] = {
+ [0] = 3200000,
+ [1] = 4000000,
+ [2] = 5333333,
+ [3] = 4800000,
+ };
+ static const unsigned int ctg_vco[8] = {
+ [0] = 3200000,
+ [1] = 4000000,
+ [2] = 5333333,
+ [3] = 6400000,
+ [4] = 2666667,
+ [5] = 4266667,
+ };
+ const unsigned int *vco_table;
+ unsigned int vco;
+ uint8_t tmp = 0;
+
+ /* FIXME other chipsets? */
+ if (IS_GM45(dev_priv))
+ vco_table = ctg_vco;
+ else if (IS_G45(dev_priv))
+ vco_table = elk_vco;
+ else if (IS_I965GM(dev_priv))
+ vco_table = cl_vco;
+ else if (IS_PINEVIEW(dev_priv))
+ vco_table = pnv_vco;
+ else if (IS_G33(dev_priv))
+ vco_table = blb_vco;
+ else
+ return 0;
+
+ tmp = I915_READ(IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO);
+
+ vco = vco_table[tmp & 0x7];
+ if (vco == 0)
+ DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
+ else
+ DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
+
+ return vco;
+}
+
+static void g33_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
+{
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 };
+ static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 };
+ static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
+ static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
+ const uint8_t *div_table;
+ unsigned int cdclk_sel;
+ uint16_t tmp = 0;
+
+ cdclk_state->vco = intel_hpll_vco(dev_priv);
+
+ pci_read_config_word(pdev, GCFGC, &tmp);
+
+ cdclk_sel = (tmp >> 4) & 0x7;
+
+ if (cdclk_sel >= ARRAY_SIZE(div_3200))
+ goto fail;
+
+ switch (cdclk_state->vco) {
+ case 3200000:
+ div_table = div_3200;
+ break;
+ case 4000000:
+ div_table = div_4000;
+ break;
+ case 4800000:
+ div_table = div_4800;
+ break;
+ case 5333333:
+ div_table = div_5333;
+ break;
+ default:
+ goto fail;
+ }
+
+ cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco,
+ div_table[cdclk_sel]);
+ return;
+
+fail:
+ DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n",
+ cdclk_state->vco, tmp);
+ cdclk_state->cdclk = 190476;
+}
+
+static void pnv_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
+{
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ u16 gcfgc = 0;
+
+ pci_read_config_word(pdev, GCFGC, &gcfgc);
+
+ switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
+ case GC_DISPLAY_CLOCK_267_MHZ_PNV:
+ cdclk_state->cdclk = 266667;
+ break;
+ case GC_DISPLAY_CLOCK_333_MHZ_PNV:
+ cdclk_state->cdclk = 333333;
+ break;
+ case GC_DISPLAY_CLOCK_444_MHZ_PNV:
+ cdclk_state->cdclk = 444444;
+ break;
+ case GC_DISPLAY_CLOCK_200_MHZ_PNV:
+ cdclk_state->cdclk = 200000;
+ break;
+ default:
+ DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
+ case GC_DISPLAY_CLOCK_133_MHZ_PNV:
+ cdclk_state->cdclk = 133333;
+ break;
+ case GC_DISPLAY_CLOCK_167_MHZ_PNV:
+ cdclk_state->cdclk = 166667;
+ break;
+ }
+}
+
+static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
+{
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ static const uint8_t div_3200[] = { 16, 10, 8 };
+ static const uint8_t div_4000[] = { 20, 12, 10 };
+ static const uint8_t div_5333[] = { 24, 16, 14 };
+ const uint8_t *div_table;
+ unsigned int cdclk_sel;
+ uint16_t tmp = 0;
+
+ cdclk_state->vco = intel_hpll_vco(dev_priv);
+
+ pci_read_config_word(pdev, GCFGC, &tmp);
+
+ cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
+
+ if (cdclk_sel >= ARRAY_SIZE(div_3200))
+ goto fail;
+
+ switch (cdclk_state->vco) {
+ case 3200000:
+ div_table = div_3200;
+ break;
+ case 4000000:
+ div_table = div_4000;
+ break;
+ case 5333333:
+ div_table = div_5333;
+ break;
+ default:
+ goto fail;
+ }
+
+ cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco,
+ div_table[cdclk_sel]);
+ return;
+
+fail:
+ DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n",
+ cdclk_state->vco, tmp);
+ cdclk_state->cdclk = 200000;
+}
+
+static void gm45_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
+{
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ unsigned int cdclk_sel;
+ uint16_t tmp = 0;
+
+ cdclk_state->vco = intel_hpll_vco(dev_priv);
+
+ pci_read_config_word(pdev, GCFGC, &tmp);
+
+ cdclk_sel = (tmp >> 12) & 0x1;
+
+ switch (cdclk_state->vco) {
+ case 2666667:
+ case 4000000:
+ case 5333333:
+ cdclk_state->cdclk = cdclk_sel ? 333333 : 222222;
+ break;
+ case 3200000:
+ cdclk_state->cdclk = cdclk_sel ? 320000 : 228571;
+ break;
+ default:
+ DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n",
+ cdclk_state->vco, tmp);
+ cdclk_state->cdclk = 222222;
+ break;
+ }
+}
+
+static void hsw_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
+{
+ uint32_t lcpll = I915_READ(LCPLL_CTL);
+ uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
+
+ if (lcpll & LCPLL_CD_SOURCE_FCLK)
+ cdclk_state->cdclk = 800000;
+ else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
+ cdclk_state->cdclk = 450000;
+ else if (freq == LCPLL_CLK_FREQ_450)
+ cdclk_state->cdclk = 450000;
+ else if (IS_HSW_ULT(dev_priv))
+ cdclk_state->cdclk = 337500;
+ else
+ cdclk_state->cdclk = 540000;
+}
+
+static int vlv_calc_cdclk(struct drm_i915_private *dev_priv,
+ int max_pixclk)
+{
+ int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ?
+ 333333 : 320000;
+ int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
+
+ /*
+ * We seem to get an unstable or solid color picture at 200MHz.
+ * Not sure what's wrong. For now use 200MHz only when all pipes
+ * are off.
+ */
+ if (!IS_CHERRYVIEW(dev_priv) &&
+ max_pixclk > freq_320*limit/100)
+ return 400000;
+ else if (max_pixclk > 266667*limit/100)
+ return freq_320;
+ else if (max_pixclk > 0)
+ return 266667;
+ else
+ return 200000;
+}
+
+static void vlv_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
+{
+ cdclk_state->vco = vlv_get_hpll_vco(dev_priv);
+ cdclk_state->cdclk = vlv_get_cck_clock(dev_priv, "cdclk",
+ CCK_DISPLAY_CLOCK_CONTROL,
+ cdclk_state->vco);
+}
+
+static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
+{
+ unsigned int credits, default_credits;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ default_credits = PFI_CREDIT(12);
+ else
+ default_credits = PFI_CREDIT(8);
+
+ if (dev_priv->cdclk.hw.cdclk >= dev_priv->czclk_freq) {
+ /* CHV suggested value is 31 or 63 */
+ if (IS_CHERRYVIEW(dev_priv))
+ credits = PFI_CREDIT_63;
+ else
+ credits = PFI_CREDIT(15);
+ } else {
+ credits = default_credits;
+ }
+
+ /*
+ * WA - write default credits before re-programming
+ * FIXME: should we also set the resend bit here?
+ */
+ I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
+ default_credits);
+
+ I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
+ credits | PFI_CREDIT_RESEND);
+
+ /*
+ * FIXME is this guaranteed to clear
+ * immediately or should we poll for it?
+ */
+ WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
+}
+
+static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *cdclk_state)
+{
+ int cdclk = cdclk_state->cdclk;
+ u32 val, cmd;
+
+ if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
+ cmd = 2;
+ else if (cdclk == 266667)
+ cmd = 1;
+ else
+ cmd = 0;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ val &= ~DSPFREQGUAR_MASK;
+ val |= (cmd << DSPFREQGUAR_SHIFT);
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
+ if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
+ DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
+ 50)) {
+ DRM_ERROR("timed out waiting for CDclk change\n");
+ }
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ mutex_lock(&dev_priv->sb_lock);
+
+ if (cdclk == 400000) {
+ u32 divider;
+
+ divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1,
+ cdclk) - 1;
+
+ /* adjust cdclk divider */
+ val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
+ val &= ~CCK_FREQUENCY_VALUES;
+ val |= divider;
+ vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
+
+ if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
+ CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
+ 50))
+ DRM_ERROR("timed out waiting for CDclk change\n");
+ }
+
+ /* adjust self-refresh exit latency value */
+ val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
+ val &= ~0x7f;
+
+ /*
+ * For high bandwidth configs, we set a higher latency in the bunit
+ * so that the core display fetch happens in time to avoid underruns.
+ */
+ if (cdclk == 400000)
+ val |= 4500 / 250; /* 4.5 usec */
+ else
+ val |= 3000 / 250; /* 3.0 usec */
+ vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
+
+ mutex_unlock(&dev_priv->sb_lock);
+
+ intel_update_cdclk(dev_priv);
+
+ vlv_program_pfi_credits(dev_priv);
+}
+
+static void chv_set_cdclk(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *cdclk_state)
+{
+ int cdclk = cdclk_state->cdclk;
+ u32 val, cmd;
+
+ switch (cdclk) {
+ case 333333:
+ case 320000:
+ case 266667:
+ case 200000:
+ break;
+ default:
+ MISSING_CASE(cdclk);
+ return;
+ }
+
+ /*
+ * Specs are full of misinformation, but testing on actual
+ * hardware has shown that we just need to write the desired
+ * CCK divider into the Punit register.
+ */
+ cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+ val &= ~DSPFREQGUAR_MASK_CHV;
+ val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
+ vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
+ if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
+ DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
+ 50)) {
+ DRM_ERROR("timed out waiting for CDclk change\n");
+ }
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ intel_update_cdclk(dev_priv);
+
+ vlv_program_pfi_credits(dev_priv);
+}
+
+static int bdw_calc_cdclk(int max_pixclk)
+{
+ if (max_pixclk > 540000)
+ return 675000;
+ else if (max_pixclk > 450000)
+ return 540000;
+ else if (max_pixclk > 337500)
+ return 450000;
+ else
+ return 337500;
+}
+
+static void bdw_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
+{
+ uint32_t lcpll = I915_READ(LCPLL_CTL);
+ uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
+
+ if (lcpll & LCPLL_CD_SOURCE_FCLK)
+ cdclk_state->cdclk = 800000;
+ else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
+ cdclk_state->cdclk = 450000;
+ else if (freq == LCPLL_CLK_FREQ_450)
+ cdclk_state->cdclk = 450000;
+ else if (freq == LCPLL_CLK_FREQ_54O_BDW)
+ cdclk_state->cdclk = 540000;
+ else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
+ cdclk_state->cdclk = 337500;
+ else
+ cdclk_state->cdclk = 675000;
+}
+
+static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *cdclk_state)
+{
+ int cdclk = cdclk_state->cdclk;
+ uint32_t val, data;
+ int ret;
+
+ if (WARN((I915_READ(LCPLL_CTL) &
+ (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
+ LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
+ LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
+ LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
+ "trying to change cdclk frequency with cdclk not enabled\n"))
+ return;
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ ret = sandybridge_pcode_write(dev_priv,
+ BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ if (ret) {
+ DRM_ERROR("failed to inform pcode about cdclk change\n");
+ return;
+ }
+
+ val = I915_READ(LCPLL_CTL);
+ val |= LCPLL_CD_SOURCE_FCLK;
+ I915_WRITE(LCPLL_CTL, val);
+
+ if (wait_for_us(I915_READ(LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE, 1))
+ DRM_ERROR("Switching to FCLK failed\n");
+
+ val = I915_READ(LCPLL_CTL);
+ val &= ~LCPLL_CLK_FREQ_MASK;
+
+ switch (cdclk) {
+ case 450000:
+ val |= LCPLL_CLK_FREQ_450;
+ data = 0;
+ break;
+ case 540000:
+ val |= LCPLL_CLK_FREQ_54O_BDW;
+ data = 1;
+ break;
+ case 337500:
+ val |= LCPLL_CLK_FREQ_337_5_BDW;
+ data = 2;
+ break;
+ case 675000:
+ val |= LCPLL_CLK_FREQ_675_BDW;
+ data = 3;
+ break;
+ default:
+ WARN(1, "invalid cdclk frequency\n");
+ return;
+ }
+
+ I915_WRITE(LCPLL_CTL, val);
+
+ val = I915_READ(LCPLL_CTL);
+ val &= ~LCPLL_CD_SOURCE_FCLK;
+ I915_WRITE(LCPLL_CTL, val);
+
+ if (wait_for_us((I915_READ(LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+ DRM_ERROR("Switching back to LCPLL failed\n");
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
+
+ intel_update_cdclk(dev_priv);
+
+ WARN(cdclk != dev_priv->cdclk.hw.cdclk,
+ "cdclk requested %d kHz but got %d kHz\n",
+ cdclk, dev_priv->cdclk.hw.cdclk);
+}
+
+static int skl_calc_cdclk(int max_pixclk, int vco)
+{
+ if (vco == 8640000) {
+ if (max_pixclk > 540000)
+ return 617143;
+ else if (max_pixclk > 432000)
+ return 540000;
+ else if (max_pixclk > 308571)
+ return 432000;
+ else
+ return 308571;
+ } else {
+ if (max_pixclk > 540000)
+ return 675000;
+ else if (max_pixclk > 450000)
+ return 540000;
+ else if (max_pixclk > 337500)
+ return 450000;
+ else
+ return 337500;
+ }
+}
+
+static void skl_dpll0_update(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
+{
+ u32 val;
+
+ cdclk_state->ref = 24000;
+ cdclk_state->vco = 0;
+
+ val = I915_READ(LCPLL1_CTL);
+ if ((val & LCPLL_PLL_ENABLE) == 0)
+ return;
+
+ if (WARN_ON((val & LCPLL_PLL_LOCK) == 0))
+ return;
+
+ val = I915_READ(DPLL_CTRL1);
+
+ if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
+ DPLL_CTRL1_SSC(SKL_DPLL0) |
+ DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
+ DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
+ return;
+
+ switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) {
+ case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0):
+ case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0):
+ case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0):
+ case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0):
+ cdclk_state->vco = 8100000;
+ break;
+ case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0):
+ case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0):
+ cdclk_state->vco = 8640000;
+ break;
+ default:
+ MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
+ break;
+ }
+}
+
+static void skl_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
+{
+ u32 cdctl;
+
+ skl_dpll0_update(dev_priv, cdclk_state);
+
+ cdclk_state->cdclk = cdclk_state->ref;
+
+ if (cdclk_state->vco == 0)
+ return;
+
+ cdctl = I915_READ(CDCLK_CTL);
+
+ if (cdclk_state->vco == 8640000) {
+ switch (cdctl & CDCLK_FREQ_SEL_MASK) {
+ case CDCLK_FREQ_450_432:
+ cdclk_state->cdclk = 432000;
+ break;
+ case CDCLK_FREQ_337_308:
+ cdclk_state->cdclk = 308571;
+ break;
+ case CDCLK_FREQ_540:
+ cdclk_state->cdclk = 540000;
+ break;
+ case CDCLK_FREQ_675_617:
+ cdclk_state->cdclk = 617143;
+ break;
+ default:
+ MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
+ break;
+ }
+ } else {
+ switch (cdctl & CDCLK_FREQ_SEL_MASK) {
+ case CDCLK_FREQ_450_432:
+ cdclk_state->cdclk = 450000;
+ break;
+ case CDCLK_FREQ_337_308:
+ cdclk_state->cdclk = 337500;
+ break;
+ case CDCLK_FREQ_540:
+ cdclk_state->cdclk = 540000;
+ break;
+ case CDCLK_FREQ_675_617:
+ cdclk_state->cdclk = 675000;
+ break;
+ default:
+ MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
+ break;
+ }
+ }
+}
+
+/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
+static int skl_cdclk_decimal(int cdclk)
+{
+ return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
+}
+
+static void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv,
+ int vco)
+{
+ bool changed = dev_priv->skl_preferred_vco_freq != vco;
+
+ dev_priv->skl_preferred_vco_freq = vco;
+
+ if (changed)
+ intel_update_max_cdclk(dev_priv);
+}
+
+static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
+{
+ int min_cdclk = skl_calc_cdclk(0, vco);
+ u32 val;
+
+ WARN_ON(vco != 8100000 && vco != 8640000);
+
+ /* select the minimum CDCLK before enabling DPLL 0 */
+ val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
+ I915_WRITE(CDCLK_CTL, val);
+ POSTING_READ(CDCLK_CTL);
+
+ /*
+ * We always enable DPLL0 with the lowest link rate possible, but still
+ * taking into account the VCO required to operate the eDP panel at the
+ * desired frequency. The usual DP link rates operate with a VCO of
+ * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
+ * The modeset code is responsible for the selection of the exact link
+ * rate later on, with the constraint of choosing a frequency that
+ * works with vco.
+ */
+ val = I915_READ(DPLL_CTRL1);
+
+ val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
+ DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
+ val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
+ if (vco == 8640000)
+ val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
+ SKL_DPLL0);
+ else
+ val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
+ SKL_DPLL0);
+
+ I915_WRITE(DPLL_CTRL1, val);
+ POSTING_READ(DPLL_CTRL1);
+
+ I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
+
+ if (intel_wait_for_register(dev_priv,
+ LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
+ 5))
+ DRM_ERROR("DPLL0 not locked\n");
+
+ dev_priv->cdclk.hw.vco = vco;
+
+ /* We'll want to keep using the current vco from now on. */
+ skl_set_preferred_cdclk_vco(dev_priv, vco);
+}
+
+static void skl_dpll0_disable(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
+ if (intel_wait_for_register(dev_priv,
+ LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
+ 1))
+ DRM_ERROR("Couldn't disable DPLL0\n");
+
+ dev_priv->cdclk.hw.vco = 0;
+}
+
+static void skl_set_cdclk(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *cdclk_state)
+{
+ int cdclk = cdclk_state->cdclk;
+ int vco = cdclk_state->vco;
+ u32 freq_select, pcu_ack;
+ int ret;
+
+ WARN_ON((cdclk == 24000) != (vco == 0));
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ if (ret) {
+ DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
+ ret);
+ return;
+ }
+
+ /* set CDCLK_CTL */
+ switch (cdclk) {
+ case 450000:
+ case 432000:
+ freq_select = CDCLK_FREQ_450_432;
+ pcu_ack = 1;
+ break;
+ case 540000:
+ freq_select = CDCLK_FREQ_540;
+ pcu_ack = 2;
+ break;
+ case 308571:
+ case 337500:
+ default:
+ freq_select = CDCLK_FREQ_337_308;
+ pcu_ack = 0;
+ break;
+ case 617143:
+ case 675000:
+ freq_select = CDCLK_FREQ_675_617;
+ pcu_ack = 3;
+ break;
+ }
+
+ if (dev_priv->cdclk.hw.vco != 0 &&
+ dev_priv->cdclk.hw.vco != vco)
+ skl_dpll0_disable(dev_priv);
+
+ if (dev_priv->cdclk.hw.vco != vco)
+ skl_dpll0_enable(dev_priv, vco);
+
+ I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk));
+ POSTING_READ(CDCLK_CTL);
+
+ /* inform PCU of the change */
+ mutex_lock(&dev_priv->rps.hw_lock);
+ sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ intel_update_cdclk(dev_priv);
+}
+
+static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
+{
+ uint32_t cdctl, expected;
+
+ /*
+ * check if the pre-os initialized the display
+ * There is SWF18 scratchpad register defined which is set by the
+ * pre-os which can be used by the OS drivers to check the status
+ */
+ if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
+ goto sanitize;
+
+ intel_update_cdclk(dev_priv);
+ /* Is PLL enabled and locked ? */
+ if (dev_priv->cdclk.hw.vco == 0 ||
+ dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.ref)
+ goto sanitize;
+
+ /* DPLL okay; verify the cdclock
+ *
+ * Noticed in some instances that the freq selection is correct but
+ * decimal part is programmed wrong from BIOS where pre-os does not
+ * enable display. Verify the same as well.
+ */
+ cdctl = I915_READ(CDCLK_CTL);
+ expected = (cdctl & CDCLK_FREQ_SEL_MASK) |
+ skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk);
+ if (cdctl == expected)
+ /* All well; nothing to sanitize */
+ return;
+
+sanitize:
+ DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
+
+ /* force cdclk programming */
+ dev_priv->cdclk.hw.cdclk = 0;
+ /* force full PLL disable + enable */
+ dev_priv->cdclk.hw.vco = -1;
+}
+
+/**
+ * skl_init_cdclk - Initialize CDCLK on SKL
+ * @dev_priv: i915 device
+ *
+ * Initialize CDCLK for SKL and derivatives. This is generally
+ * done only during the display core initialization sequence,
+ * after which the DMC will take care of turning CDCLK off/on
+ * as needed.
+ */
+void skl_init_cdclk(struct drm_i915_private *dev_priv)
+{
+ struct intel_cdclk_state cdclk_state;
+
+ skl_sanitize_cdclk(dev_priv);
+
+ if (dev_priv->cdclk.hw.cdclk != 0 &&
+ dev_priv->cdclk.hw.vco != 0) {
+ /*
+ * Use the current vco as our initial
+ * guess as to what the preferred vco is.
+ */
+ if (dev_priv->skl_preferred_vco_freq == 0)
+ skl_set_preferred_cdclk_vco(dev_priv,
+ dev_priv->cdclk.hw.vco);
+ return;
+ }
+
+ cdclk_state = dev_priv->cdclk.hw;
+
+ cdclk_state.vco = dev_priv->skl_preferred_vco_freq;
+ if (cdclk_state.vco == 0)
+ cdclk_state.vco = 8100000;
+ cdclk_state.cdclk = skl_calc_cdclk(0, cdclk_state.vco);
+
+ skl_set_cdclk(dev_priv, &cdclk_state);
+}
+
+/**
+ * skl_uninit_cdclk - Uninitialize CDCLK on SKL
+ * @dev_priv: i915 device
+ *
+ * Uninitialize CDCLK for SKL and derivatives. This is done only
+ * during the display core uninitialization sequence.
+ */
+void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
+{
+ struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
+
+ cdclk_state.cdclk = cdclk_state.ref;
+ cdclk_state.vco = 0;
+
+ skl_set_cdclk(dev_priv, &cdclk_state);
+}
+
+static int bxt_calc_cdclk(int max_pixclk)
+{
+ if (max_pixclk > 576000)
+ return 624000;
+ else if (max_pixclk > 384000)
+ return 576000;
+ else if (max_pixclk > 288000)
+ return 384000;
+ else if (max_pixclk > 144000)
+ return 288000;
+ else
+ return 144000;
+}
+
+static int glk_calc_cdclk(int max_pixclk)
+{
+ if (max_pixclk > 2 * 158400)
+ return 316800;
+ else if (max_pixclk > 2 * 79200)
+ return 158400;
+ else
+ return 79200;
+}
+
+static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
+{
+ int ratio;
+
+ if (cdclk == dev_priv->cdclk.hw.ref)
+ return 0;
+
+ switch (cdclk) {
+ default:
+ MISSING_CASE(cdclk);
+ case 144000:
+ case 288000:
+ case 384000:
+ case 576000:
+ ratio = 60;
+ break;
+ case 624000:
+ ratio = 65;
+ break;
+ }
+
+ return dev_priv->cdclk.hw.ref * ratio;
+}
+
+static int glk_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
+{
+ int ratio;
+
+ if (cdclk == dev_priv->cdclk.hw.ref)
+ return 0;
+
+ switch (cdclk) {
+ default:
+ MISSING_CASE(cdclk);
+ case 79200:
+ case 158400:
+ case 316800:
+ ratio = 33;
+ break;
+ }
+
+ return dev_priv->cdclk.hw.ref * ratio;
+}
+
+static void bxt_de_pll_update(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
+{
+ u32 val;
+
+ cdclk_state->ref = 19200;
+ cdclk_state->vco = 0;
+
+ val = I915_READ(BXT_DE_PLL_ENABLE);
+ if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
+ return;
+
+ if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
+ return;
+
+ val = I915_READ(BXT_DE_PLL_CTL);
+ cdclk_state->vco = (val & BXT_DE_PLL_RATIO_MASK) * cdclk_state->ref;
+}
+
+static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_state)
+{
+ u32 divider;
+ int div;
+
+ bxt_de_pll_update(dev_priv, cdclk_state);
+
+ cdclk_state->cdclk = cdclk_state->ref;
+
+ if (cdclk_state->vco == 0)
+ return;
+
+ divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
+
+ switch (divider) {
+ case BXT_CDCLK_CD2X_DIV_SEL_1:
+ div = 2;
+ break;
+ case BXT_CDCLK_CD2X_DIV_SEL_1_5:
+ WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n");
+ div = 3;
+ break;
+ case BXT_CDCLK_CD2X_DIV_SEL_2:
+ div = 4;
+ break;
+ case BXT_CDCLK_CD2X_DIV_SEL_4:
+ div = 8;
+ break;
+ default:
+ MISSING_CASE(divider);
+ return;
+ }
+
+ cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco, div);
+}
+
+static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE(BXT_DE_PLL_ENABLE, 0);
+
+ /* Timeout 200us */
+ if (intel_wait_for_register(dev_priv,
+ BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0,
+ 1))
+ DRM_ERROR("timeout waiting for DE PLL unlock\n");
+
+ dev_priv->cdclk.hw.vco = 0;
+}
+
+static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
+{
+ int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref);
+ u32 val;
+
+ val = I915_READ(BXT_DE_PLL_CTL);
+ val &= ~BXT_DE_PLL_RATIO_MASK;
+ val |= BXT_DE_PLL_RATIO(ratio);
+ I915_WRITE(BXT_DE_PLL_CTL, val);
+
+ I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
+
+ /* Timeout 200us */
+ if (intel_wait_for_register(dev_priv,
+ BXT_DE_PLL_ENABLE,
+ BXT_DE_PLL_LOCK,
+ BXT_DE_PLL_LOCK,
+ 1))
+ DRM_ERROR("timeout waiting for DE PLL lock\n");
+
+ dev_priv->cdclk.hw.vco = vco;
+}
+
+static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *cdclk_state)
+{
+ int cdclk = cdclk_state->cdclk;
+ int vco = cdclk_state->vco;
+ u32 val, divider;
+ int ret;
+
+ /* cdclk = vco / 2 / div{1,1.5,2,4} */
+ switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
+ case 8:
+ divider = BXT_CDCLK_CD2X_DIV_SEL_4;
+ break;
+ case 4:
+ divider = BXT_CDCLK_CD2X_DIV_SEL_2;
+ break;
+ case 3:
+ WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n");
+ divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
+ break;
+ case 2:
+ divider = BXT_CDCLK_CD2X_DIV_SEL_1;
+ break;
+ default:
+ WARN_ON(cdclk != dev_priv->cdclk.hw.ref);
+ WARN_ON(vco != 0);
+
+ divider = BXT_CDCLK_CD2X_DIV_SEL_1;
+ break;
+ }
+
+ /* Inform power controller of upcoming frequency change */
+ mutex_lock(&dev_priv->rps.hw_lock);
+ ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
+ 0x80000000);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ if (ret) {
+ DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
+ ret, cdclk);
+ return;
+ }
+
+ if (dev_priv->cdclk.hw.vco != 0 &&
+ dev_priv->cdclk.hw.vco != vco)
+ bxt_de_pll_disable(dev_priv);
+
+ if (dev_priv->cdclk.hw.vco != vco)
+ bxt_de_pll_enable(dev_priv, vco);
+
+ val = divider | skl_cdclk_decimal(cdclk);
+ /*
+ * FIXME if only the cd2x divider needs changing, it could be done
+ * without shutting off the pipe (if only one pipe is active).
+ */
+ val |= BXT_CDCLK_CD2X_PIPE_NONE;
+ /*
+ * Disable SSA Precharge when CD clock frequency < 500 MHz,
+ * enable otherwise.
+ */
+ if (cdclk >= 500000)
+ val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
+ I915_WRITE(CDCLK_CTL, val);
+
+ mutex_lock(&dev_priv->rps.hw_lock);
+ ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
+ DIV_ROUND_UP(cdclk, 25000));
+ mutex_unlock(&dev_priv->rps.hw_lock);
+
+ if (ret) {
+ DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
+ ret, cdclk);
+ return;
+ }
+
+ intel_update_cdclk(dev_priv);
+}
+
+static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
+{
+ u32 cdctl, expected;
+
+ intel_update_cdclk(dev_priv);
+
+ if (dev_priv->cdclk.hw.vco == 0 ||
+ dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.ref)
+ goto sanitize;
+
+ /* DPLL okay; verify the cdclock
+ *
+ * Some BIOS versions leave an incorrect decimal frequency value and
+ * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
+ * so sanitize this register.
+ */
+ cdctl = I915_READ(CDCLK_CTL);
+ /*
+ * Let's ignore the pipe field, since BIOS could have configured the
+ * dividers both synching to an active pipe, or asynchronously
+ * (PIPE_NONE).
+ */
+ cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
+
+ expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
+ skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk);
+ /*
+ * Disable SSA Precharge when CD clock frequency < 500 MHz,
+ * enable otherwise.
+ */
+ if (dev_priv->cdclk.hw.cdclk >= 500000)
+ expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
+
+ if (cdctl == expected)
+ /* All well; nothing to sanitize */
+ return;
+
+sanitize:
+ DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
+
+ /* force cdclk programming */
+ dev_priv->cdclk.hw.cdclk = 0;
+
+ /* force full PLL disable + enable */
+ dev_priv->cdclk.hw.vco = -1;
+}
+
+/**
+ * bxt_init_cdclk - Initialize CDCLK on BXT
+ * @dev_priv: i915 device
+ *
+ * Initialize CDCLK for BXT and derivatives. This is generally
+ * done only during the display core initialization sequence,
+ * after which the DMC will take care of turning CDCLK off/on
+ * as needed.
+ */
+void bxt_init_cdclk(struct drm_i915_private *dev_priv)
+{
+ struct intel_cdclk_state cdclk_state;
+
+ bxt_sanitize_cdclk(dev_priv);
+
+ if (dev_priv->cdclk.hw.cdclk != 0 &&
+ dev_priv->cdclk.hw.vco != 0)
+ return;
+
+ cdclk_state = dev_priv->cdclk.hw;
+
+ /*
+ * FIXME:
+ * - The initial CDCLK needs to be read from VBT.
+ * Need to make this change after VBT has changes for BXT.
+ */
+ if (IS_GEMINILAKE(dev_priv)) {
+ cdclk_state.cdclk = glk_calc_cdclk(0);
+ cdclk_state.vco = glk_de_pll_vco(dev_priv, cdclk_state.cdclk);
+ } else {
+ cdclk_state.cdclk = bxt_calc_cdclk(0);
+ cdclk_state.vco = bxt_de_pll_vco(dev_priv, cdclk_state.cdclk);
+ }
+
+ bxt_set_cdclk(dev_priv, &cdclk_state);
+}
+
+/**
+ * bxt_uninit_cdclk - Uninitialize CDCLK on BXT
+ * @dev_priv: i915 device
+ *
+ * Uninitialize CDCLK for BXT and derivatives. This is done only
+ * during the display core uninitialization sequence.
+ */
+void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
+{
+ struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
+
+ cdclk_state.cdclk = cdclk_state.ref;
+ cdclk_state.vco = 0;
+
+ bxt_set_cdclk(dev_priv, &cdclk_state);
+}
+
+/**
+ * intel_cdclk_state_compare - Determine if two CDCLK states differ
+ * @a: first CDCLK state
+ * @b: second CDCLK state
+ *
+ * Returns:
+ * True if the CDCLK states are identical, false if they differ.
+ */
+bool intel_cdclk_state_compare(const struct intel_cdclk_state *a,
+ const struct intel_cdclk_state *b)
+{
+ return memcmp(a, b, sizeof(*a)) == 0;
+}
+
+/**
+ * intel_set_cdclk - Push the CDCLK state to the hardware
+ * @dev_priv: i915 device
+ * @cdclk_state: new CDCLK state
+ *
+ * Program the hardware based on the passed in CDCLK state,
+ * if necessary.
+ */
+void intel_set_cdclk(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *cdclk_state)
+{
+ if (intel_cdclk_state_compare(&dev_priv->cdclk.hw, cdclk_state))
+ return;
+
+ if (WARN_ON_ONCE(!dev_priv->display.set_cdclk))
+ return;
+
+ DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz, VCO %d kHz, ref %d kHz\n",
+ cdclk_state->cdclk, cdclk_state->vco,
+ cdclk_state->ref);
+
+ dev_priv->display.set_cdclk(dev_priv, cdclk_state);
+}
+
+static int bdw_adjust_min_pipe_pixel_rate(struct intel_crtc_state *crtc_state,
+ int pixel_rate)
+{
+ struct drm_i915_private *dev_priv =
+ to_i915(crtc_state->base.crtc->dev);
+
+ /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+ if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
+ pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
+
+ /* BSpec says "Do not use DisplayPort with CDCLK less than
+ * 432 MHz, audio enabled, port width x4, and link rate
+ * HBR2 (5.4 GHz), or else there may be audio corruption or
+ * screen corruption."
+ */
+ if (intel_crtc_has_dp_encoder(crtc_state) &&
+ crtc_state->has_audio &&
+ crtc_state->port_clock >= 540000 &&
+ crtc_state->lane_count == 4)
+ pixel_rate = max(432000, pixel_rate);
+
+ return pixel_rate;
+}
+
+/* compute the max rate for new configuration */
+static int intel_max_pixel_rate(struct drm_atomic_state *state)
+{
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct drm_i915_private *dev_priv = to_i915(state->dev);
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *cstate;
+ struct intel_crtc_state *crtc_state;
+ unsigned int max_pixel_rate = 0, i;
+ enum pipe pipe;
+
+ memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
+ sizeof(intel_state->min_pixclk));
+
+ for_each_new_crtc_in_state(state, crtc, cstate, i) {
+ int pixel_rate;
+
+ crtc_state = to_intel_crtc_state(cstate);
+ if (!crtc_state->base.enable) {
+ intel_state->min_pixclk[i] = 0;
+ continue;
+ }
+
+ pixel_rate = crtc_state->pixel_rate;
+
+ if (IS_BROADWELL(dev_priv) || IS_GEN9(dev_priv))
+ pixel_rate =
+ bdw_adjust_min_pipe_pixel_rate(crtc_state,
+ pixel_rate);
+
+ intel_state->min_pixclk[i] = pixel_rate;
+ }
+
+ for_each_pipe(dev_priv, pipe)
+ max_pixel_rate = max(intel_state->min_pixclk[pipe],
+ max_pixel_rate);
+
+ return max_pixel_rate;
+}
+
+static int vlv_modeset_calc_cdclk(struct drm_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->dev);
+ int max_pixclk = intel_max_pixel_rate(state);
+ struct intel_atomic_state *intel_state =
+ to_intel_atomic_state(state);
+ int cdclk;
+
+ cdclk = vlv_calc_cdclk(dev_priv, max_pixclk);
+
+ if (cdclk > dev_priv->max_cdclk_freq) {
+ DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
+ cdclk, dev_priv->max_cdclk_freq);
+ return -EINVAL;
+ }
+
+ intel_state->cdclk.logical.cdclk = cdclk;
+
+ if (!intel_state->active_crtcs) {
+ cdclk = vlv_calc_cdclk(dev_priv, 0);
+
+ intel_state->cdclk.actual.cdclk = cdclk;
+ } else {
+ intel_state->cdclk.actual =
+ intel_state->cdclk.logical;
+ }
+
+ return 0;
+}
+
+static int bdw_modeset_calc_cdclk(struct drm_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->dev);
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ int max_pixclk = intel_max_pixel_rate(state);
+ int cdclk;
+
+ /*
+ * FIXME should also account for plane ratio
+ * once 64bpp pixel formats are supported.
+ */
+ cdclk = bdw_calc_cdclk(max_pixclk);
+
+ if (cdclk > dev_priv->max_cdclk_freq) {
+ DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
+ cdclk, dev_priv->max_cdclk_freq);
+ return -EINVAL;
+ }
+
+ intel_state->cdclk.logical.cdclk = cdclk;
+
+ if (!intel_state->active_crtcs) {
+ cdclk = bdw_calc_cdclk(0);
+
+ intel_state->cdclk.actual.cdclk = cdclk;
+ } else {
+ intel_state->cdclk.actual =
+ intel_state->cdclk.logical;
+ }
+
+ return 0;
+}
+
+static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
+{
+ struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
+ struct drm_i915_private *dev_priv = to_i915(state->dev);
+ const int max_pixclk = intel_max_pixel_rate(state);
+ int cdclk, vco;
+
+ vco = intel_state->cdclk.logical.vco;
+ if (!vco)
+ vco = dev_priv->skl_preferred_vco_freq;
+
+ /*
+ * FIXME should also account for plane ratio
+ * once 64bpp pixel formats are supported.
+ */
+ cdclk = skl_calc_cdclk(max_pixclk, vco);
+
+ if (cdclk > dev_priv->max_cdclk_freq) {
+ DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
+ cdclk, dev_priv->max_cdclk_freq);
+ return -EINVAL;
+ }
+
+ intel_state->cdclk.logical.vco = vco;
+ intel_state->cdclk.logical.cdclk = cdclk;
+
+ if (!intel_state->active_crtcs) {
+ cdclk = skl_calc_cdclk(0, vco);
+
+ intel_state->cdclk.actual.vco = vco;
+ intel_state->cdclk.actual.cdclk = cdclk;
+ } else {
+ intel_state->cdclk.actual =
+ intel_state->cdclk.logical;
+ }
+
+ return 0;
+}
+
+static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->dev);
+ int max_pixclk = intel_max_pixel_rate(state);
+ struct intel_atomic_state *intel_state =
+ to_intel_atomic_state(state);
+ int cdclk, vco;
+
+ if (IS_GEMINILAKE(dev_priv)) {
+ cdclk = glk_calc_cdclk(max_pixclk);
+ vco = glk_de_pll_vco(dev_priv, cdclk);
+ } else {
+ cdclk = bxt_calc_cdclk(max_pixclk);
+ vco = bxt_de_pll_vco(dev_priv, cdclk);
+ }
+
+ if (cdclk > dev_priv->max_cdclk_freq) {
+ DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
+ cdclk, dev_priv->max_cdclk_freq);
+ return -EINVAL;
+ }
+
+ intel_state->cdclk.logical.vco = vco;
+ intel_state->cdclk.logical.cdclk = cdclk;
+
+ if (!intel_state->active_crtcs) {
+ if (IS_GEMINILAKE(dev_priv)) {
+ cdclk = glk_calc_cdclk(0);
+ vco = glk_de_pll_vco(dev_priv, cdclk);
+ } else {
+ cdclk = bxt_calc_cdclk(0);
+ vco = bxt_de_pll_vco(dev_priv, cdclk);
+ }
+
+ intel_state->cdclk.actual.vco = vco;
+ intel_state->cdclk.actual.cdclk = cdclk;
+ } else {
+ intel_state->cdclk.actual =
+ intel_state->cdclk.logical;
+ }
+
+ return 0;
+}
+
+static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
+{
+ int max_cdclk_freq = dev_priv->max_cdclk_freq;
+
+ if (IS_GEMINILAKE(dev_priv))
+ return 2 * max_cdclk_freq;
+ else if (INTEL_INFO(dev_priv)->gen >= 9 ||
+ IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+ return max_cdclk_freq;
+ else if (IS_CHERRYVIEW(dev_priv))
+ return max_cdclk_freq*95/100;
+ else if (INTEL_INFO(dev_priv)->gen < 4)
+ return 2*max_cdclk_freq*90/100;
+ else
+ return max_cdclk_freq*90/100;
+}
+
+/**
+ * intel_update_max_cdclk - Determine the maximum support CDCLK frequency
+ * @dev_priv: i915 device
+ *
+ * Determine the maximum CDCLK frequency the platform supports, and also
+ * derive the maximum dot clock frequency the maximum CDCLK frequency
+ * allows.
+ */
+void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
+{
+ if (IS_GEN9_BC(dev_priv)) {
+ u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
+ int max_cdclk, vco;
+
+ vco = dev_priv->skl_preferred_vco_freq;
+ WARN_ON(vco != 8100000 && vco != 8640000);
+
+ /*
+ * Use the lower (vco 8640) cdclk values as a
+ * first guess. skl_calc_cdclk() will correct it
+ * if the preferred vco is 8100 instead.
+ */
+ if (limit == SKL_DFSM_CDCLK_LIMIT_675)
+ max_cdclk = 617143;
+ else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
+ max_cdclk = 540000;
+ else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
+ max_cdclk = 432000;
+ else
+ max_cdclk = 308571;
+
+ dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
+ } else if (IS_GEMINILAKE(dev_priv)) {
+ dev_priv->max_cdclk_freq = 316800;
+ } else if (IS_BROXTON(dev_priv)) {
+ dev_priv->max_cdclk_freq = 624000;
+ } else if (IS_BROADWELL(dev_priv)) {
+ /*
+ * FIXME with extra cooling we can allow
+ * 540 MHz for ULX and 675 Mhz for ULT.
+ * How can we know if extra cooling is
+ * available? PCI ID, VTB, something else?
+ */
+ if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
+ dev_priv->max_cdclk_freq = 450000;
+ else if (IS_BDW_ULX(dev_priv))
+ dev_priv->max_cdclk_freq = 450000;
+ else if (IS_BDW_ULT(dev_priv))
+ dev_priv->max_cdclk_freq = 540000;
+ else
+ dev_priv->max_cdclk_freq = 675000;
+ } else if (IS_CHERRYVIEW(dev_priv)) {
+ dev_priv->max_cdclk_freq = 320000;
+ } else if (IS_VALLEYVIEW(dev_priv)) {
+ dev_priv->max_cdclk_freq = 400000;
+ } else {
+ /* otherwise assume cdclk is fixed */
+ dev_priv->max_cdclk_freq = dev_priv->cdclk.hw.cdclk;
+ }
+
+ dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
+
+ DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
+ dev_priv->max_cdclk_freq);
+
+ DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
+ dev_priv->max_dotclk_freq);
+}
+
+/**
+ * intel_update_cdclk - Determine the current CDCLK frequency
+ * @dev_priv: i915 device
+ *
+ * Determine the current CDCLK frequency.
+ */
+void intel_update_cdclk(struct drm_i915_private *dev_priv)
+{
+ dev_priv->display.get_cdclk(dev_priv, &dev_priv->cdclk.hw);
+
+ DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n",
+ dev_priv->cdclk.hw.cdclk, dev_priv->cdclk.hw.vco,
+ dev_priv->cdclk.hw.ref);
+
+ /*
+ * 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
+ * Programmng [sic] note: bit[9:2] should be programmed to the number
+ * of cdclk that generates 4MHz reference clock freq which is used to
+ * generate GMBus clock. This will vary with the cdclk freq.
+ */
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ I915_WRITE(GMBUSFREQ_VLV,
+ DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000));
+}
+
+static int pch_rawclk(struct drm_i915_private *dev_priv)
+{
+ return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
+}
+
+static int vlv_hrawclk(struct drm_i915_private *dev_priv)
+{
+ /* RAWCLK_FREQ_VLV register updated from power well code */
+ return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
+ CCK_DISPLAY_REF_CLOCK_CONTROL);
+}
+
+static int g4x_hrawclk(struct drm_i915_private *dev_priv)
+{
+ uint32_t clkcfg;
+
+ /* hrawclock is 1/4 the FSB frequency */
+ clkcfg = I915_READ(CLKCFG);
+ switch (clkcfg & CLKCFG_FSB_MASK) {
+ case CLKCFG_FSB_400:
+ return 100000;
+ case CLKCFG_FSB_533:
+ return 133333;
+ case CLKCFG_FSB_667:
+ return 166667;
+ case CLKCFG_FSB_800:
+ return 200000;
+ case CLKCFG_FSB_1067:
+ return 266667;
+ case CLKCFG_FSB_1333:
+ return 333333;
+ /* these two are just a guess; one of them might be right */
+ case CLKCFG_FSB_1600:
+ case CLKCFG_FSB_1600_ALT:
+ return 400000;
+ default:
+ return 133333;
+ }
+}
+
+/**
+ * intel_update_rawclk - Determine the current RAWCLK frequency
+ * @dev_priv: i915 device
+ *
+ * Determine the current RAWCLK frequency. RAWCLK is a fixed
+ * frequency clock so this needs to done only once.
+ */
+void intel_update_rawclk(struct drm_i915_private *dev_priv)
+{
+ if (HAS_PCH_SPLIT(dev_priv))
+ dev_priv->rawclk_freq = pch_rawclk(dev_priv);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ dev_priv->rawclk_freq = vlv_hrawclk(dev_priv);
+ else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv))
+ dev_priv->rawclk_freq = g4x_hrawclk(dev_priv);
+ else
+ /* no rawclk on other platforms, or no need to know it */
+ return;
+
+ DRM_DEBUG_DRIVER("rawclk rate: %d kHz\n", dev_priv->rawclk_freq);
+}
+
+/**
+ * intel_init_cdclk_hooks - Initialize CDCLK related modesetting hooks
+ * @dev_priv: i915 device
+ */
+void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
+{
+ if (IS_CHERRYVIEW(dev_priv)) {
+ dev_priv->display.set_cdclk = chv_set_cdclk;
+ dev_priv->display.modeset_calc_cdclk =
+ vlv_modeset_calc_cdclk;
+ } else if (IS_VALLEYVIEW(dev_priv)) {
+ dev_priv->display.set_cdclk = vlv_set_cdclk;
+ dev_priv->display.modeset_calc_cdclk =
+ vlv_modeset_calc_cdclk;
+ } else if (IS_BROADWELL(dev_priv)) {
+ dev_priv->display.set_cdclk = bdw_set_cdclk;
+ dev_priv->display.modeset_calc_cdclk =
+ bdw_modeset_calc_cdclk;
+ } else if (IS_GEN9_LP(dev_priv)) {
+ dev_priv->display.set_cdclk = bxt_set_cdclk;
+ dev_priv->display.modeset_calc_cdclk =
+ bxt_modeset_calc_cdclk;
+ } else if (IS_GEN9_BC(dev_priv)) {
+ dev_priv->display.set_cdclk = skl_set_cdclk;
+ dev_priv->display.modeset_calc_cdclk =
+ skl_modeset_calc_cdclk;
+ }
+
+ if (IS_GEN9_BC(dev_priv))
+ dev_priv->display.get_cdclk = skl_get_cdclk;
+ else if (IS_GEN9_LP(dev_priv))
+ dev_priv->display.get_cdclk = bxt_get_cdclk;
+ else if (IS_BROADWELL(dev_priv))
+ dev_priv->display.get_cdclk = bdw_get_cdclk;
+ else if (IS_HASWELL(dev_priv))
+ dev_priv->display.get_cdclk = hsw_get_cdclk;
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ dev_priv->display.get_cdclk = vlv_get_cdclk;
+ else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
+ dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk;
+ else if (IS_GEN5(dev_priv))
+ dev_priv->display.get_cdclk = fixed_450mhz_get_cdclk;
+ else if (IS_GM45(dev_priv))
+ dev_priv->display.get_cdclk = gm45_get_cdclk;
+ else if (IS_G45(dev_priv))
+ dev_priv->display.get_cdclk = g33_get_cdclk;
+ else if (IS_I965GM(dev_priv))
+ dev_priv->display.get_cdclk = i965gm_get_cdclk;
+ else if (IS_I965G(dev_priv))
+ dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk;
+ else if (IS_PINEVIEW(dev_priv))
+ dev_priv->display.get_cdclk = pnv_get_cdclk;
+ else if (IS_G33(dev_priv))
+ dev_priv->display.get_cdclk = g33_get_cdclk;
+ else if (IS_I945GM(dev_priv))
+ dev_priv->display.get_cdclk = i945gm_get_cdclk;
+ else if (IS_I945G(dev_priv))
+ dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk;
+ else if (IS_I915GM(dev_priv))
+ dev_priv->display.get_cdclk = i915gm_get_cdclk;
+ else if (IS_I915G(dev_priv))
+ dev_priv->display.get_cdclk = fixed_333mhz_get_cdclk;
+ else if (IS_I865G(dev_priv))
+ dev_priv->display.get_cdclk = fixed_266mhz_get_cdclk;
+ else if (IS_I85X(dev_priv))
+ dev_priv->display.get_cdclk = i85x_get_cdclk;
+ else if (IS_I845G(dev_priv))
+ dev_priv->display.get_cdclk = fixed_200mhz_get_cdclk;
+ else { /* 830 */
+ WARN(!IS_I830(dev_priv),
+ "Unknown platform. Assuming 133 MHz CDCLK\n");
+ dev_priv->display.get_cdclk = fixed_133mhz_get_cdclk;
+ }
+}
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index d81232b79f00..306c6b06b330 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -340,20 +340,12 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state)
hsw_enable_ips(intel_crtc);
}
-/* Loads the palette/gamma unit for the CRTC on Broadwell+. */
-static void broadwell_load_luts(struct drm_crtc_state *state)
+static void bdw_load_degamma_lut(struct drm_crtc_state *state)
{
- struct drm_crtc *crtc = state->crtc;
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
+ struct drm_i915_private *dev_priv = to_i915(state->crtc->dev);
+ enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
uint32_t i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
- if (crtc_state_is_legacy(state)) {
- haswell_load_luts(state);
- return;
- }
-
I915_WRITE(PREC_PAL_INDEX(pipe),
PAL_PREC_SPLIT_MODE | PAL_PREC_AUTO_INCREMENT);
@@ -377,6 +369,20 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
(v << 20) | (v << 10) | v);
}
}
+}
+
+static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->crtc->dev);
+ enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
+ uint32_t i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+
+ WARN_ON(offset & ~PAL_PREC_INDEX_VALUE_MASK);
+
+ I915_WRITE(PREC_PAL_INDEX(pipe),
+ (offset ? PAL_PREC_SPLIT_MODE : 0) |
+ PAL_PREC_AUTO_INCREMENT |
+ offset);
if (state->gamma_lut) {
struct drm_color_lut *lut =
@@ -410,6 +416,23 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), (1 << 16) - 1);
I915_WRITE(PREC_PAL_GC_MAX(pipe, 2), (1 << 16) - 1);
}
+}
+
+/* Loads the palette/gamma unit for the CRTC on Broadwell+. */
+static void broadwell_load_luts(struct drm_crtc_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->crtc->dev);
+ struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
+ enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
+
+ if (crtc_state_is_legacy(state)) {
+ haswell_load_luts(state);
+ return;
+ }
+
+ bdw_load_degamma_lut(state);
+ bdw_load_gamma_lut(state,
+ INTEL_INFO(dev_priv)->color.degamma_lut_size);
intel_state->gamma_mode = GAMMA_MODE_MODE_SPLIT;
I915_WRITE(GAMMA_MODE(pipe), GAMMA_MODE_MODE_SPLIT);
@@ -422,6 +445,58 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
I915_WRITE(PREC_PAL_INDEX(pipe), 0);
}
+static void glk_load_degamma_lut(struct drm_crtc_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->crtc->dev);
+ enum pipe pipe = to_intel_crtc(state->crtc)->pipe;
+ const uint32_t lut_size = 33;
+ uint32_t i;
+
+ /*
+ * When setting the auto-increment bit, the hardware seems to
+ * ignore the index bits, so we need to reset it to index 0
+ * separately.
+ */
+ I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), 0);
+ I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT);
+
+ /*
+ * FIXME: The pipe degamma table in geminilake doesn't support
+ * different values per channel, so this just loads a linear table.
+ */
+ for (i = 0; i < lut_size; i++) {
+ uint32_t v = (i * (1 << 16)) / (lut_size - 1);
+
+ I915_WRITE(PRE_CSC_GAMC_DATA(pipe), v);
+ }
+
+ /* Clamp values > 1.0. */
+ while (i++ < 35)
+ I915_WRITE(PRE_CSC_GAMC_DATA(pipe), (1 << 16));
+}
+
+static void glk_load_luts(struct drm_crtc_state *state)
+{
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
+ enum pipe pipe = to_intel_crtc(crtc)->pipe;
+
+ glk_load_degamma_lut(state);
+
+ if (crtc_state_is_legacy(state)) {
+ haswell_load_luts(state);
+ return;
+ }
+
+ bdw_load_gamma_lut(state, 0);
+
+ intel_state->gamma_mode = GAMMA_MODE_MODE_10BIT;
+ I915_WRITE(GAMMA_MODE(pipe), GAMMA_MODE_MODE_10BIT);
+ POSTING_READ(GAMMA_MODE(pipe));
+}
+
/* Loads the palette/gamma unit for the CRTC on CherryView. */
static void cherryview_load_luts(struct drm_crtc_state *state)
{
@@ -536,10 +611,13 @@ void intel_color_init(struct drm_crtc *crtc)
} else if (IS_HASWELL(dev_priv)) {
dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
dev_priv->display.load_luts = haswell_load_luts;
- } else if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv) ||
- IS_BROXTON(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ } else if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv) ||
+ IS_BROXTON(dev_priv)) {
dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
dev_priv->display.load_luts = broadwell_load_luts;
+ } else if (IS_GEMINILAKE(dev_priv)) {
+ dev_priv->display.load_csc_matrix = i9xx_load_csc_matrix;
+ dev_priv->display.load_luts = glk_load_luts;
} else {
dev_priv->display.load_luts = i9xx_load_luts;
}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 2bf5aca6e37c..8c82607294c6 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -69,12 +69,11 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crt *crt = intel_encoder_to_crt(encoder);
- enum intel_display_power_domain power_domain;
u32 tmp;
bool ret;
- power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain))
return false;
ret = false;
@@ -91,7 +90,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
ret = true;
out:
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, encoder->power_domain);
return ret;
}
@@ -676,7 +675,6 @@ intel_crt_detect(struct drm_connector *connector, bool force)
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_crt *crt = intel_attached_crt(connector);
struct intel_encoder *intel_encoder = &crt->base;
- enum intel_display_power_domain power_domain;
enum drm_connector_status status;
struct intel_load_detect_pipe tmp;
struct drm_modeset_acquire_ctx ctx;
@@ -689,8 +687,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
if (dmi_check_system(intel_spurious_crt_detect))
return connector_status_disconnected;
- power_domain = intel_display_port_power_domain(intel_encoder);
- intel_display_power_get(dev_priv, power_domain);
+ intel_display_power_get(dev_priv, intel_encoder->power_domain);
if (I915_HAS_HOTPLUG(dev_priv)) {
/* We can not rely on the HPD pin always being correctly wired
@@ -745,7 +742,7 @@ intel_crt_detect(struct drm_connector *connector, bool force)
drm_modeset_acquire_fini(&ctx);
out:
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, intel_encoder->power_domain);
return status;
}
@@ -761,12 +758,10 @@ static int intel_crt_get_modes(struct drm_connector *connector)
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crt *crt = intel_attached_crt(connector);
struct intel_encoder *intel_encoder = &crt->base;
- enum intel_display_power_domain power_domain;
int ret;
struct i2c_adapter *i2c;
- power_domain = intel_display_port_power_domain(intel_encoder);
- intel_display_power_get(dev_priv, power_domain);
+ intel_display_power_get(dev_priv, intel_encoder->power_domain);
i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->vbt.crt_ddc_pin);
ret = intel_crt_ddc_get_modes(connector, i2c);
@@ -778,7 +773,7 @@ static int intel_crt_get_modes(struct drm_connector *connector)
ret = intel_crt_ddc_get_modes(connector, i2c);
out:
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, intel_encoder->power_domain);
return ret;
}
@@ -904,6 +899,8 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
crt->adpa_reg = adpa_reg;
+ crt->base.power_domain = POWER_DOMAIN_PORT_CRT;
+
crt->base.compute_config = intel_crt_compute_config;
if (HAS_PCH_SPLIT(dev_priv)) {
crt->base.disable = pch_disable_crt;
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 0085bc745f6a..36832257cc9b 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -34,9 +34,8 @@
* low-power state and comes back to normal.
*/
-#define I915_CSR_GLK "i915/glk_dmc_ver1_01.bin"
-MODULE_FIRMWARE(I915_CSR_GLK);
-#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
+#define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin"
+#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
#define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin"
MODULE_FIRMWARE(I915_CSR_KBL);
@@ -396,13 +395,11 @@ static void csr_load_work_fn(struct work_struct *work)
struct drm_i915_private *dev_priv;
struct intel_csr *csr;
const struct firmware *fw = NULL;
- int ret;
dev_priv = container_of(work, typeof(*dev_priv), csr.work);
csr = &dev_priv->csr;
- ret = request_firmware(&fw, dev_priv->csr.fw_path,
- &dev_priv->drm.pdev->dev);
+ request_firmware(&fw, dev_priv->csr.fw_path, &dev_priv->drm.pdev->dev);
if (fw)
dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 66b367d0771a..d8214ba8da14 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -34,6 +34,19 @@ struct ddi_buf_trans {
u8 i_boost; /* SKL: I_boost; valid: 0x0, 0x1, 0x3, 0x7 */
};
+static const u8 index_to_dp_signal_levels[] = {
+ [0] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0,
+ [1] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1,
+ [2] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2,
+ [3] = DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3,
+ [4] = DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0,
+ [5] = DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1,
+ [6] = DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2,
+ [7] = DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0,
+ [8] = DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1,
+ [9] = DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0,
+};
+
/* HDMI/DVI modes ignore everything but the last 2 items. So we share
* them for both DP and FDI transports, allowing those ports to
* automatically adapt to HDMI connections as well
@@ -445,7 +458,7 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
if (IS_GEN9_LP(dev_priv))
return hdmi_level;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_GEN9_BC(dev_priv)) {
skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
hdmi_default_entry = 8;
} else if (IS_BROADWELL(dev_priv)) {
@@ -468,6 +481,59 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
return hdmi_level;
}
+static const struct ddi_buf_trans *
+intel_ddi_get_buf_trans_dp(struct drm_i915_private *dev_priv,
+ int *n_entries)
+{
+ if (IS_KABYLAKE(dev_priv)) {
+ return kbl_get_buf_trans_dp(dev_priv, n_entries);
+ } else if (IS_SKYLAKE(dev_priv)) {
+ return skl_get_buf_trans_dp(dev_priv, n_entries);
+ } else if (IS_BROADWELL(dev_priv)) {
+ *n_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
+ return bdw_ddi_translations_dp;
+ } else if (IS_HASWELL(dev_priv)) {
+ *n_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
+ return hsw_ddi_translations_dp;
+ }
+
+ *n_entries = 0;
+ return NULL;
+}
+
+static const struct ddi_buf_trans *
+intel_ddi_get_buf_trans_edp(struct drm_i915_private *dev_priv,
+ int *n_entries)
+{
+ if (IS_KABYLAKE(dev_priv) || IS_SKYLAKE(dev_priv)) {
+ return skl_get_buf_trans_edp(dev_priv, n_entries);
+ } else if (IS_BROADWELL(dev_priv)) {
+ return bdw_get_buf_trans_edp(dev_priv, n_entries);
+ } else if (IS_HASWELL(dev_priv)) {
+ *n_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
+ return hsw_ddi_translations_dp;
+ }
+
+ *n_entries = 0;
+ return NULL;
+}
+
+static const struct ddi_buf_trans *
+intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv,
+ int *n_entries)
+{
+ if (IS_BROADWELL(dev_priv)) {
+ *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi);
+ return hsw_ddi_translations_fdi;
+ } else if (IS_HASWELL(dev_priv)) {
+ *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi);
+ return hsw_ddi_translations_fdi;
+ }
+
+ *n_entries = 0;
+ return NULL;
+}
+
/*
* Starting with Haswell, DDI port buffers must be programmed with correct
* values in advance. This function programs the correct values for
@@ -477,76 +543,43 @@ void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 iboost_bit = 0;
- int i, n_dp_entries, n_edp_entries, size;
+ int i, n_entries;
enum port port = intel_ddi_get_encoder_port(encoder);
- const struct ddi_buf_trans *ddi_translations_fdi;
- const struct ddi_buf_trans *ddi_translations_dp;
- const struct ddi_buf_trans *ddi_translations_edp;
const struct ddi_buf_trans *ddi_translations;
if (IS_GEN9_LP(dev_priv))
return;
- if (IS_KABYLAKE(dev_priv)) {
- ddi_translations_fdi = NULL;
- ddi_translations_dp =
- kbl_get_buf_trans_dp(dev_priv, &n_dp_entries);
- ddi_translations_edp =
- skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
- } else if (IS_SKYLAKE(dev_priv)) {
- ddi_translations_fdi = NULL;
- ddi_translations_dp =
- skl_get_buf_trans_dp(dev_priv, &n_dp_entries);
- ddi_translations_edp =
- skl_get_buf_trans_edp(dev_priv, &n_edp_entries);
- } else if (IS_BROADWELL(dev_priv)) {
- ddi_translations_fdi = bdw_ddi_translations_fdi;
- ddi_translations_dp = bdw_ddi_translations_dp;
- ddi_translations_edp = bdw_get_buf_trans_edp(dev_priv, &n_edp_entries);
- n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
- } else if (IS_HASWELL(dev_priv)) {
- ddi_translations_fdi = hsw_ddi_translations_fdi;
- ddi_translations_dp = hsw_ddi_translations_dp;
- ddi_translations_edp = hsw_ddi_translations_dp;
- n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp);
- } else {
- WARN(1, "ddi translation table missing\n");
- ddi_translations_edp = bdw_ddi_translations_dp;
- ddi_translations_fdi = bdw_ddi_translations_fdi;
- ddi_translations_dp = bdw_ddi_translations_dp;
- n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
- n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
- }
-
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
- /* If we're boosting the current, set bit 31 of trans1 */
- if (dev_priv->vbt.ddi_port_info[port].dp_boost_level)
- iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
-
- if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP &&
- port != PORT_A && port != PORT_E &&
- n_edp_entries > 9))
- n_edp_entries = 9;
- }
-
switch (encoder->type) {
case INTEL_OUTPUT_EDP:
- ddi_translations = ddi_translations_edp;
- size = n_edp_entries;
+ ddi_translations = intel_ddi_get_buf_trans_edp(dev_priv,
+ &n_entries);
break;
case INTEL_OUTPUT_DP:
- ddi_translations = ddi_translations_dp;
- size = n_dp_entries;
+ ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv,
+ &n_entries);
break;
case INTEL_OUTPUT_ANALOG:
- ddi_translations = ddi_translations_fdi;
- size = n_dp_entries;
+ ddi_translations = intel_ddi_get_buf_trans_fdi(dev_priv,
+ &n_entries);
break;
default:
- BUG();
+ MISSING_CASE(encoder->type);
+ return;
}
- for (i = 0; i < size; i++) {
+ if (IS_GEN9_BC(dev_priv)) {
+ /* If we're boosting the current, set bit 31 of trans1 */
+ if (dev_priv->vbt.ddi_port_info[port].dp_boost_level)
+ iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
+
+ if (WARN_ON(encoder->type == INTEL_OUTPUT_EDP &&
+ port != PORT_A && port != PORT_E &&
+ n_entries > 9))
+ n_entries = 9;
+ }
+
+ for (i = 0; i < n_entries; i++) {
I915_WRITE(DDI_BUF_TRANS_LO(port, i),
ddi_translations[i].trans1 | iboost_bit);
I915_WRITE(DDI_BUF_TRANS_HI(port, i),
@@ -572,7 +605,7 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder)
hdmi_level = intel_ddi_hdmi_level(dev_priv, port);
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_GEN9_BC(dev_priv)) {
ddi_translations_hdmi = skl_get_buf_trans_hdmi(dev_priv, &n_hdmi_entries);
/* If we're boosting the current, set bit 31 of trans1 */
@@ -641,15 +674,15 @@ static uint32_t hsw_pll_to_ddi_pll_sel(struct intel_shared_dpll *pll)
* DDI A (which is used for eDP)
*/
-void hsw_fdi_link_train(struct drm_crtc *crtc)
+void hsw_fdi_link_train(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
u32 temp, i, rx_ctl_val, ddi_pll_sel;
- for_each_encoder_on_crtc(dev, crtc, encoder) {
+ for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
WARN_ON(encoder->type != INTEL_OUTPUT_ANALOG);
intel_prepare_dp_ddi_buffers(encoder);
}
@@ -668,7 +701,7 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
/* Enable the PCH Receiver FDI PLL */
rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
FDI_RX_PLL_ENABLE |
- FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
+ FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
POSTING_READ(FDI_RX_CTL(PIPE_A));
udelay(220);
@@ -678,7 +711,7 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
/* Configure Port Clock Select */
- ddi_pll_sel = hsw_pll_to_ddi_pll_sel(intel_crtc->config->shared_dpll);
+ ddi_pll_sel = hsw_pll_to_ddi_pll_sel(crtc_state->shared_dpll);
I915_WRITE(PORT_CLK_SEL(PORT_E), ddi_pll_sel);
WARN_ON(ddi_pll_sel != PORT_CLK_SEL_SPLL);
@@ -698,7 +731,7 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
* port reversal bit */
I915_WRITE(DDI_BUF_CTL(PORT_E),
DDI_BUF_CTL_ENABLE |
- ((intel_crtc->config->fdi_lanes - 1) << 1) |
+ ((crtc_state->fdi_lanes - 1) << 1) |
DDI_BUF_TRANS_SELECT(i / 2));
POSTING_READ(DDI_BUF_CTL(PORT_E));
@@ -785,27 +818,26 @@ void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
}
static struct intel_encoder *
-intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
+intel_ddi_get_crtc_encoder(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_encoder *intel_encoder, *ret = NULL;
+ struct drm_device *dev = crtc->base.dev;
+ struct intel_encoder *encoder, *ret = NULL;
int num_encoders = 0;
- for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
- ret = intel_encoder;
+ for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
+ ret = encoder;
num_encoders++;
}
if (num_encoders != 1)
WARN(1, "%d encoders on crtc for pipe %c\n", num_encoders,
- pipe_name(intel_crtc->pipe));
+ pipe_name(crtc->pipe));
BUG_ON(ret == NULL);
return ret;
}
-struct intel_encoder *
+static struct intel_encoder *
intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
@@ -818,7 +850,7 @@ intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
state = crtc_state->base.state;
- for_each_connector_in_state(state, connector, connector_state, i) {
+ for_each_new_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != crtc_state->base.crtc)
continue;
@@ -1089,7 +1121,7 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
if (INTEL_GEN(dev_priv) <= 8)
hsw_ddi_clock_get(encoder, pipe_config);
- else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ else if (IS_GEN9_BC(dev_priv))
skl_ddi_clock_get(encoder, pipe_config);
else if (IS_GEN9_LP(dev_priv))
bxt_ddi_clock_get(encoder, pipe_config);
@@ -1098,12 +1130,12 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
static bool
hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state,
- struct intel_encoder *intel_encoder)
+ struct intel_encoder *encoder)
{
struct intel_shared_dpll *pll;
pll = intel_get_shared_dpll(intel_crtc, crtc_state,
- intel_encoder);
+ encoder);
if (!pll)
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
pipe_name(intel_crtc->pipe));
@@ -1114,11 +1146,11 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
static bool
skl_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state,
- struct intel_encoder *intel_encoder)
+ struct intel_encoder *encoder)
{
struct intel_shared_dpll *pll;
- pll = intel_get_shared_dpll(intel_crtc, crtc_state, intel_encoder);
+ pll = intel_get_shared_dpll(intel_crtc, crtc_state, encoder);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
pipe_name(intel_crtc->pipe));
@@ -1131,9 +1163,9 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
static bool
bxt_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state,
- struct intel_encoder *intel_encoder)
+ struct intel_encoder *encoder)
{
- return !!intel_get_shared_dpll(intel_crtc, crtc_state, intel_encoder);
+ return !!intel_get_shared_dpll(intel_crtc, crtc_state, encoder);
}
/*
@@ -1147,34 +1179,34 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- struct intel_encoder *intel_encoder =
+ struct intel_encoder *encoder =
intel_ddi_get_crtc_new_encoder(crtc_state);
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ if (IS_GEN9_BC(dev_priv))
return skl_ddi_pll_select(intel_crtc, crtc_state,
- intel_encoder);
+ encoder);
else if (IS_GEN9_LP(dev_priv))
return bxt_ddi_pll_select(intel_crtc, crtc_state,
- intel_encoder);
+ encoder);
else
return hsw_ddi_pll_select(intel_crtc, crtc_state,
- intel_encoder);
+ encoder);
}
-void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
+void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
- enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
- int type = intel_encoder->type;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ int type = encoder->type;
uint32_t temp;
if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
WARN_ON(transcoder_is_dsi(cpu_transcoder));
temp = TRANS_MSA_SYNC_CLK;
- switch (intel_crtc->config->pipe_bpp) {
+ switch (crtc_state->pipe_bpp) {
case 18:
temp |= TRANS_MSA_6_BPC;
break;
@@ -1194,12 +1226,12 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
}
}
-void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state)
+void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
+ bool state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
uint32_t temp;
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (state == true)
@@ -1209,23 +1241,22 @@ void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state)
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
}
-void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
+void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = intel_crtc->pipe;
- enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
- enum port port = intel_ddi_get_encoder_port(intel_encoder);
- int type = intel_encoder->type;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ enum port port = intel_ddi_get_encoder_port(encoder);
+ int type = encoder->type;
uint32_t temp;
/* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
temp = TRANS_DDI_FUNC_ENABLE;
temp |= TRANS_DDI_SELECT_PORT(port);
- switch (intel_crtc->config->pipe_bpp) {
+ switch (crtc_state->pipe_bpp) {
case 18:
temp |= TRANS_DDI_BPC_6;
break;
@@ -1242,9 +1273,9 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
BUG();
}
- if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
+ if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_PVSYNC)
temp |= TRANS_DDI_PVSYNC;
- if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
+ if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_PHSYNC)
temp |= TRANS_DDI_PHSYNC;
if (cpu_transcoder == TRANSCODER_EDP) {
@@ -1255,8 +1286,8 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
* using motion blur mitigation (which we don't
* support). */
if (IS_HASWELL(dev_priv) &&
- (intel_crtc->config->pch_pfit.enabled ||
- intel_crtc->config->pch_pfit.force_thru))
+ (crtc_state->pch_pfit.enabled ||
+ crtc_state->pch_pfit.force_thru))
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
else
temp |= TRANS_DDI_EDP_INPUT_A_ON;
@@ -1274,23 +1305,23 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
}
if (type == INTEL_OUTPUT_HDMI) {
- if (intel_crtc->config->has_hdmi_sink)
+ if (crtc_state->has_hdmi_sink)
temp |= TRANS_DDI_MODE_SELECT_HDMI;
else
temp |= TRANS_DDI_MODE_SELECT_DVI;
} else if (type == INTEL_OUTPUT_ANALOG) {
temp |= TRANS_DDI_MODE_SELECT_FDI;
- temp |= (intel_crtc->config->fdi_lanes - 1) << 1;
+ temp |= (crtc_state->fdi_lanes - 1) << 1;
} else if (type == INTEL_OUTPUT_DP ||
type == INTEL_OUTPUT_EDP) {
temp |= TRANS_DDI_MODE_SELECT_DP_SST;
- temp |= DDI_PORT_WIDTH(intel_crtc->config->lane_count);
+ temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
} else if (type == INTEL_OUTPUT_DP_MST) {
temp |= TRANS_DDI_MODE_SELECT_DP_MST;
- temp |= DDI_PORT_WIDTH(intel_crtc->config->lane_count);
+ temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
} else {
WARN(1, "Invalid encoder type %d for pipe %c\n",
- intel_encoder->type, pipe_name(pipe));
+ encoder->type, pipe_name(pipe));
}
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
@@ -1311,20 +1342,19 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
{
struct drm_device *dev = intel_connector->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_encoder *intel_encoder = intel_connector->encoder;
+ struct intel_encoder *encoder = intel_connector->encoder;
int type = intel_connector->base.connector_type;
- enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ enum port port = intel_ddi_get_encoder_port(encoder);
enum pipe pipe = 0;
enum transcoder cpu_transcoder;
- enum intel_display_power_domain power_domain;
uint32_t tmp;
bool ret;
- power_domain = intel_display_port_power_domain(intel_encoder);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain))
return false;
- if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) {
+ if (!encoder->get_hw_state(encoder, &pipe)) {
ret = false;
goto out;
}
@@ -1363,7 +1393,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
}
out:
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, encoder->power_domain);
return ret;
}
@@ -1374,13 +1404,12 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_ddi_get_encoder_port(encoder);
- enum intel_display_power_domain power_domain;
u32 tmp;
int i;
bool ret;
- power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain))
return false;
ret = false;
@@ -1437,29 +1466,39 @@ out:
"(PHY_CTL %08x)\n", port_name(port), tmp);
}
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, encoder->power_domain);
return ret;
}
-void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
+static u64 intel_ddi_get_power_domains(struct intel_encoder *encoder)
{
- struct drm_crtc *crtc = &intel_crtc->base;
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
- enum port port = intel_ddi_get_encoder_port(intel_encoder);
- enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+ enum pipe pipe;
+
+ if (intel_ddi_get_hw_state(encoder, &pipe))
+ return BIT_ULL(dig_port->ddi_io_power_domain);
+
+ return 0;
+}
+
+void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc);
+ enum port port = intel_ddi_get_encoder_port(encoder);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
if (cpu_transcoder != TRANSCODER_EDP)
I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
TRANS_CLK_SEL_PORT(port));
}
-void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
+void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
- enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
if (cpu_transcoder != TRANSCODER_EDP)
I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
@@ -1582,50 +1621,38 @@ static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
ddi_translations[level].deemphasis);
}
-static uint32_t translate_signal_level(int signal_levels)
+u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
{
- uint32_t level;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ int n_entries;
- switch (signal_levels) {
- default:
- DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level: 0x%x\n",
- signal_levels);
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
- level = 0;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
- level = 1;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
- level = 2;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_3:
- level = 3;
- break;
+ if (encoder->type == INTEL_OUTPUT_EDP)
+ intel_ddi_get_buf_trans_edp(dev_priv, &n_entries);
+ else
+ intel_ddi_get_buf_trans_dp(dev_priv, &n_entries);
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
- level = 4;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
- level = 5;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
- level = 6;
- break;
+ if (WARN_ON(n_entries < 1))
+ n_entries = 1;
+ if (WARN_ON(n_entries > ARRAY_SIZE(index_to_dp_signal_levels)))
+ n_entries = ARRAY_SIZE(index_to_dp_signal_levels);
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
- level = 7;
- break;
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
- level = 8;
- break;
+ return index_to_dp_signal_levels[n_entries - 1] &
+ DP_TRAIN_VOLTAGE_SWING_MASK;
+}
- case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
- level = 9;
- break;
+static uint32_t translate_signal_level(int signal_levels)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(index_to_dp_signal_levels); i++) {
+ if (index_to_dp_signal_levels[i] == signal_levels)
+ return i;
}
- return level;
+ WARN(1, "Unsupported voltage swing/pre-emphasis level: 0x%x\n",
+ signal_levels);
+
+ return 0;
}
uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
@@ -1641,7 +1668,7 @@ uint32_t ddi_signal_levels(struct intel_dp *intel_dp)
level = translate_signal_level(signal_levels);
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ if (IS_GEN9_BC(dev_priv))
skl_ddi_set_iboost(encoder, level);
else if (IS_GEN9_LP(dev_priv))
bxt_ddi_vswing_sequence(dev_priv, level, port, encoder->type);
@@ -1658,7 +1685,7 @@ void intel_ddi_clk_select(struct intel_encoder *encoder,
if (WARN_ON(!pll))
return;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_GEN9_BC(dev_priv)) {
uint32_t val;
/* DDI -> PLL mapping */
@@ -1684,6 +1711,9 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = intel_ddi_get_encoder_port(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+
+ WARN_ON(link_mst && (port == PORT_A || port == PORT_E));
intel_dp_set_link_params(intel_dp, link_rate, lane_count,
link_mst);
@@ -1691,6 +1721,9 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
intel_edp_panel_on(intel_dp);
intel_ddi_clk_select(encoder, pll);
+
+ intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
+
intel_prepare_dp_ddi_buffers(encoder);
intel_ddi_init_dp_buf_reg(encoder);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
@@ -1710,11 +1743,15 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
struct drm_encoder *drm_encoder = &encoder->base;
enum port port = intel_ddi_get_encoder_port(encoder);
int level = intel_ddi_hdmi_level(dev_priv, port);
+ struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
intel_ddi_clk_select(encoder, pll);
+
+ intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
+
intel_prepare_hdmi_ddi_buffers(encoder);
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ if (IS_GEN9_BC(dev_priv))
skl_ddi_set_iboost(encoder, level);
else if (IS_GEN9_LP(dev_priv))
bxt_ddi_vswing_sequence(dev_priv, level, port,
@@ -1725,27 +1762,25 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
crtc_state, conn_state);
}
-static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder,
+static void intel_ddi_pre_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_encoder *encoder = &intel_encoder->base;
- struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
- int type = intel_encoder->type;
+ int type = encoder->type;
if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
- intel_ddi_pre_enable_dp(intel_encoder,
- crtc->config->port_clock,
- crtc->config->lane_count,
- crtc->config->shared_dpll,
- intel_crtc_has_type(crtc->config,
+ intel_ddi_pre_enable_dp(encoder,
+ pipe_config->port_clock,
+ pipe_config->lane_count,
+ pipe_config->shared_dpll,
+ intel_crtc_has_type(pipe_config,
INTEL_OUTPUT_DP_MST));
}
if (type == INTEL_OUTPUT_HDMI) {
- intel_ddi_pre_enable_hdmi(intel_encoder,
+ intel_ddi_pre_enable_hdmi(encoder,
pipe_config->has_hdmi_sink,
pipe_config, conn_state,
- crtc->config->shared_dpll);
+ pipe_config->shared_dpll);
}
}
@@ -1756,6 +1791,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
int type = intel_encoder->type;
uint32_t val;
bool wait = false;
@@ -1784,7 +1820,10 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
intel_edp_panel_off(intel_dp);
}
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ if (dig_port)
+ intel_display_power_put(dev_priv, dig_port->ddi_io_power_domain);
+
+ if (IS_GEN9_BC(dev_priv))
I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
DPLL_CTRL2_DDI_CLK_OFF(port)));
else if (INTEL_GEN(dev_priv) < 9)
@@ -1797,11 +1836,11 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder,
}
}
-void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
+void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
uint32_t val;
/*
@@ -1814,7 +1853,7 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
val &= ~FDI_RX_ENABLE;
I915_WRITE(FDI_RX_CTL(PIPE_A), val);
- intel_ddi_post_disable(intel_encoder, old_crtc_state, old_conn_state);
+ intel_ddi_post_disable(encoder, old_crtc_state, old_conn_state);
val = I915_READ(FDI_RX_MISC(PIPE_A));
val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
@@ -1835,8 +1874,6 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder,
struct drm_connector_state *conn_state)
{
struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_crtc *crtc = encoder->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
@@ -1863,10 +1900,8 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder,
intel_edp_drrs_enable(intel_dp, pipe_config);
}
- if (intel_crtc->config->has_audio) {
- intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
+ if (pipe_config->has_audio)
intel_audio_codec_enable(intel_encoder, pipe_config, conn_state);
- }
}
static void intel_disable_ddi(struct intel_encoder *intel_encoder,
@@ -1874,16 +1909,10 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder,
struct drm_connector_state *old_conn_state)
{
struct drm_encoder *encoder = &intel_encoder->base;
- struct drm_crtc *crtc = encoder->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int type = intel_encoder->type;
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- if (intel_crtc->config->has_audio) {
+ if (old_crtc_state->has_audio)
intel_audio_codec_disable(intel_encoder);
- intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
- }
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -1898,8 +1927,7 @@ static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
- uint8_t mask = intel_crtc->config->lane_lat_optim_mask;
+ uint8_t mask = pipe_config->lane_lat_optim_mask;
bxt_ddi_phy_set_lane_optim_mask(encoder, mask);
}
@@ -2126,45 +2154,6 @@ intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
return connector;
}
-struct intel_shared_dpll *
-intel_ddi_get_link_dpll(struct intel_dp *intel_dp, int clock)
-{
- struct intel_connector *connector = intel_dp->attached_connector;
- struct intel_encoder *encoder = connector->encoder;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct intel_shared_dpll *pll = NULL;
- struct intel_shared_dpll_state tmp_pll_state;
- enum intel_dpll_id dpll_id;
-
- if (IS_GEN9_LP(dev_priv)) {
- dpll_id = (enum intel_dpll_id)dig_port->port;
- /*
- * Select the required PLL. This works for platforms where
- * there is no shared DPLL.
- */
- pll = &dev_priv->shared_dplls[dpll_id];
- if (WARN_ON(pll->active_mask)) {
-
- DRM_ERROR("Shared DPLL in use. active_mask:%x\n",
- pll->active_mask);
- return NULL;
- }
- tmp_pll_state = pll->state;
- if (!bxt_ddi_dp_set_dpll_hw_state(clock,
- &pll->state.hw_state)) {
- DRM_ERROR("Could not setup DPLL\n");
- pll->state = tmp_pll_state;
- return NULL;
- }
- } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
- pll = skl_find_link_pll(dev_priv, clock);
- } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- pll = hsw_ddi_dp_get_dpll(encoder, clock);
- }
- return pll;
-}
-
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
{
struct intel_digital_port *intel_dig_port;
@@ -2241,12 +2230,38 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_encoder->get_hw_state = intel_ddi_get_hw_state;
intel_encoder->get_config = intel_ddi_get_config;
intel_encoder->suspend = intel_dp_encoder_suspend;
+ intel_encoder->get_power_domains = intel_ddi_get_power_domains;
intel_dig_port->port = port;
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
(DDI_BUF_PORT_REVERSAL |
DDI_A_4_LANES);
+ switch (port) {
+ case PORT_A:
+ intel_dig_port->ddi_io_power_domain =
+ POWER_DOMAIN_PORT_DDI_A_IO;
+ break;
+ case PORT_B:
+ intel_dig_port->ddi_io_power_domain =
+ POWER_DOMAIN_PORT_DDI_B_IO;
+ break;
+ case PORT_C:
+ intel_dig_port->ddi_io_power_domain =
+ POWER_DOMAIN_PORT_DDI_C_IO;
+ break;
+ case PORT_D:
+ intel_dig_port->ddi_io_power_domain =
+ POWER_DOMAIN_PORT_DDI_D_IO;
+ break;
+ case PORT_E:
+ intel_dig_port->ddi_io_power_domain =
+ POWER_DOMAIN_PORT_DDI_E_IO;
+ break;
+ default:
+ MISSING_CASE(port);
+ }
+
/*
* Bspec says that DDI_A_4_LANES is the only supported configuration
* for Broxton. Yet some BIOS fail to set this bit on port A if eDP
@@ -2265,6 +2280,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_dig_port->max_lanes = max_lanes;
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
+ intel_encoder->power_domain = intel_port_to_power_domain(port);
intel_encoder->port = port;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = 0;
@@ -2274,14 +2290,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
goto err;
intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
- /*
- * On BXT A0/A1, sw needs to activate DDIA HPD logic and
- * interrupts to check the external panel connection.
- */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) && port == PORT_B)
- dev_priv->hotplug.irq_port[PORT_A] = intel_dig_port;
- else
- dev_priv->hotplug.irq_port[port] = intel_dig_port;
+ dev_priv->hotplug.irq_port[port] = intel_dig_port;
}
/* In theory we don't need the encoder->type check, but leave it just in
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index fcf81815daff..7d01dfe7faac 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -56,6 +56,8 @@ static const char * const platform_names[] = {
const char *intel_platform_name(enum intel_platform platform)
{
+ BUILD_BUG_ON(ARRAY_SIZE(platform_names) != INTEL_MAX_PLATFORMS);
+
if (WARN_ON_ONCE(platform >= ARRAY_SIZE(platform_names) ||
platform_names[platform] == NULL))
return "<unknown>";
@@ -195,8 +197,10 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
IS_GEN9_LP(dev_priv) && sseu_subslice_total(sseu) > 1;
sseu->has_eu_pg = sseu->eu_per_subslice > 2;
- if (IS_BROXTON(dev_priv)) {
+ if (IS_GEN9_LP(dev_priv)) {
#define IS_SS_DISABLED(ss) (!(sseu->subslice_mask & BIT(ss)))
+ info->has_pooled_eu = hweight8(sseu->subslice_mask) == 3;
+
/*
* There is a HW issue in 2x6 fused down parts that requires
* Pooled EU to be enabled as a WA. The pool configuration
@@ -204,9 +208,8 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
* doesn't affect if the device has all 3 subslices enabled.
*/
/* WaEnablePooledEuFor2x6:bxt */
- info->has_pooled_eu = ((hweight8(sseu->subslice_mask) == 3) ||
- (hweight8(sseu->subslice_mask) == 2 &&
- INTEL_REVID(dev_priv) < BXT_REVID_C0));
+ info->has_pooled_eu |= (hweight8(sseu->subslice_mask) == 2 &&
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST));
sseu->min_eu_in_pool = 0;
if (info->has_pooled_eu) {
@@ -234,7 +237,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
* The subslice disable field is global, i.e. it applies
* to each of the enabled slices.
*/
- sseu->subslice_mask = BIT(ss_max) - 1;
+ sseu->subslice_mask = GENMASK(ss_max - 1, 0);
sseu->subslice_mask &= ~((fuse2 & GEN8_F2_SS_DIS_MASK) >>
GEN8_F2_SS_DIS_SHIFT);
@@ -410,10 +413,6 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
info->has_snoop = !info->has_llc;
- /* Snooping is broken on BXT A stepping. */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
- info->has_snoop = false;
-
DRM_DEBUG_DRIVER("slice mask: %04x\n", info->sseu.slice_mask);
DRM_DEBUG_DRIVER("slice total: %u\n", hweight8(info->sseu.slice_mask));
DRM_DEBUG_DRIVER("subslice total: %u\n",
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index a2fece5e9fb3..010e5ddb198a 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -37,6 +37,7 @@
#include "intel_frontbuffer.h"
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "i915_gem_clflush.h"
#include "intel_dsi.h"
#include "i915_trace.h"
#include <drm/drm_atomic.h>
@@ -96,10 +97,9 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
-static int intel_framebuffer_init(struct drm_device *dev,
- struct intel_framebuffer *ifb,
- struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_i915_gem_object *obj);
+static int intel_framebuffer_init(struct intel_framebuffer *ifb,
+ struct drm_i915_gem_object *obj,
+ struct drm_mode_fb_cmd2 *mode_cmd);
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
@@ -122,9 +122,6 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
static void ironlake_pfit_enable(struct intel_crtc *crtc);
static void intel_modeset_setup_hw_state(struct drm_device *dev);
static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
-static int ilk_max_pixel_rate(struct drm_atomic_state *state);
-static int glk_calc_cdclk(int max_pixclk);
-static int bxt_calc_cdclk(int max_pixclk);
struct intel_limit {
struct {
@@ -138,7 +135,7 @@ struct intel_limit {
};
/* returns HPLL frequency in kHz */
-static int valleyview_get_vco(struct drm_i915_private *dev_priv)
+int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
{
int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
@@ -170,73 +167,16 @@ int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
}
-static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
- const char *name, u32 reg)
+int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
+ const char *name, u32 reg)
{
if (dev_priv->hpll_freq == 0)
- dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
+ dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
return vlv_get_cck_clock(dev_priv, name, reg,
dev_priv->hpll_freq);
}
-static int
-intel_pch_rawclk(struct drm_i915_private *dev_priv)
-{
- return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
-}
-
-static int
-intel_vlv_hrawclk(struct drm_i915_private *dev_priv)
-{
- /* RAWCLK_FREQ_VLV register updated from power well code */
- return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
- CCK_DISPLAY_REF_CLOCK_CONTROL);
-}
-
-static int
-intel_g4x_hrawclk(struct drm_i915_private *dev_priv)
-{
- uint32_t clkcfg;
-
- /* hrawclock is 1/4 the FSB frequency */
- clkcfg = I915_READ(CLKCFG);
- switch (clkcfg & CLKCFG_FSB_MASK) {
- case CLKCFG_FSB_400:
- return 100000;
- case CLKCFG_FSB_533:
- return 133333;
- case CLKCFG_FSB_667:
- return 166667;
- case CLKCFG_FSB_800:
- return 200000;
- case CLKCFG_FSB_1067:
- return 266667;
- case CLKCFG_FSB_1333:
- return 333333;
- /* these two are just a guess; one of them might be right */
- case CLKCFG_FSB_1600:
- case CLKCFG_FSB_1600_ALT:
- return 400000;
- default:
- return 133333;
- }
-}
-
-void intel_update_rawclk(struct drm_i915_private *dev_priv)
-{
- if (HAS_PCH_SPLIT(dev_priv))
- dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- dev_priv->rawclk_freq = intel_vlv_hrawclk(dev_priv);
- else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv))
- dev_priv->rawclk_freq = intel_g4x_hrawclk(dev_priv);
- else
- return; /* no rawclk on other platforms, or no need to know it */
-
- DRM_DEBUG_DRIVER("rawclk rate: %d kHz\n", dev_priv->rawclk_freq);
-}
-
static void intel_update_czclk(struct drm_i915_private *dev_priv)
{
if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
@@ -2050,10 +1990,13 @@ static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
return IS_GEN2(dev_priv) ? 2048 : 4096;
}
-static unsigned int intel_tile_width_bytes(const struct drm_i915_private *dev_priv,
- uint64_t fb_modifier, unsigned int cpp)
+static unsigned int
+intel_tile_width_bytes(const struct drm_framebuffer *fb, int plane)
{
- switch (fb_modifier) {
+ struct drm_i915_private *dev_priv = to_i915(fb->dev);
+ unsigned int cpp = fb->format->cpp[plane];
+
+ switch (fb->modifier) {
case DRM_FORMAT_MOD_NONE:
return cpp;
case I915_FORMAT_MOD_X_TILED:
@@ -2082,41 +2025,38 @@ static unsigned int intel_tile_width_bytes(const struct drm_i915_private *dev_pr
}
break;
default:
- MISSING_CASE(fb_modifier);
+ MISSING_CASE(fb->modifier);
return cpp;
}
}
-unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
- uint64_t fb_modifier, unsigned int cpp)
+static unsigned int
+intel_tile_height(const struct drm_framebuffer *fb, int plane)
{
- if (fb_modifier == DRM_FORMAT_MOD_NONE)
+ if (fb->modifier == DRM_FORMAT_MOD_NONE)
return 1;
else
- return intel_tile_size(dev_priv) /
- intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
+ return intel_tile_size(to_i915(fb->dev)) /
+ intel_tile_width_bytes(fb, plane);
}
/* Return the tile dimensions in pixel units */
-static void intel_tile_dims(const struct drm_i915_private *dev_priv,
+static void intel_tile_dims(const struct drm_framebuffer *fb, int plane,
unsigned int *tile_width,
- unsigned int *tile_height,
- uint64_t fb_modifier,
- unsigned int cpp)
+ unsigned int *tile_height)
{
- unsigned int tile_width_bytes =
- intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
+ unsigned int tile_width_bytes = intel_tile_width_bytes(fb, plane);
+ unsigned int cpp = fb->format->cpp[plane];
*tile_width = tile_width_bytes / cpp;
- *tile_height = intel_tile_size(dev_priv) / tile_width_bytes;
+ *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
}
unsigned int
-intel_fb_align_height(struct drm_device *dev, unsigned int height,
- uint32_t pixel_format, uint64_t fb_modifier)
+intel_fb_align_height(const struct drm_framebuffer *fb,
+ int plane, unsigned int height)
{
- unsigned int cpp = drm_format_plane_cpp(pixel_format, 0);
- unsigned int tile_height = intel_tile_height(to_i915(dev), fb_modifier, cpp);
+ unsigned int tile_height = intel_tile_height(fb, plane);
return ALIGN(height, tile_height);
}
@@ -2157,21 +2097,27 @@ static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_pr
return 0;
}
-static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv,
- uint64_t fb_modifier)
+static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
+ int plane)
{
- switch (fb_modifier) {
+ struct drm_i915_private *dev_priv = to_i915(fb->dev);
+
+ /* AUX_DIST needs only 4K alignment */
+ if (fb->format->format == DRM_FORMAT_NV12 && plane == 1)
+ return 4096;
+
+ switch (fb->modifier) {
case DRM_FORMAT_MOD_NONE:
return intel_linear_alignment(dev_priv);
case I915_FORMAT_MOD_X_TILED:
- if (INTEL_INFO(dev_priv)->gen >= 9)
+ if (INTEL_GEN(dev_priv) >= 9)
return 256 * 1024;
return 0;
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
return 1 * 1024 * 1024;
default:
- MISSING_CASE(fb_modifier);
+ MISSING_CASE(fb->modifier);
return 0;
}
}
@@ -2188,7 +2134,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- alignment = intel_surf_alignment(dev_priv, fb->modifier);
+ alignment = intel_surf_alignment(fb, 0);
intel_fill_fb_ggtt_view(&view, fb, rotation);
@@ -2349,8 +2295,7 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
unsigned int pitch_tiles;
tile_size = intel_tile_size(dev_priv);
- intel_tile_dims(dev_priv, &tile_width, &tile_height,
- fb->modifier, cpp);
+ intel_tile_dims(fb, plane, &tile_width, &tile_height);
if (drm_rotation_90_or_270(rotation)) {
pitch_tiles = pitch / tile_height;
@@ -2405,8 +2350,7 @@ static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
unsigned int tile_rows, tiles, pitch_tiles;
tile_size = intel_tile_size(dev_priv);
- intel_tile_dims(dev_priv, &tile_width, &tile_height,
- fb_modifier, cpp);
+ intel_tile_dims(fb, plane, &tile_width, &tile_height);
if (drm_rotation_90_or_270(rotation)) {
pitch_tiles = pitch / tile_height;
@@ -2446,13 +2390,7 @@ u32 intel_compute_tile_offset(int *x, int *y,
const struct drm_framebuffer *fb = state->base.fb;
unsigned int rotation = state->base.rotation;
int pitch = intel_fb_pitch(fb, plane, rotation);
- u32 alignment;
-
- /* AUX_DIST needs only 4K alignment */
- if (fb->format->format == DRM_FORMAT_NV12 && plane == 1)
- alignment = 4096;
- else
- alignment = intel_surf_alignment(dev_priv, fb->modifier);
+ u32 alignment = intel_surf_alignment(fb, plane);
return _intel_compute_tile_offset(dev_priv, x, y, fb, plane, pitch,
rotation, alignment);
@@ -2516,8 +2454,8 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
*/
if (i915_gem_object_is_tiled(intel_fb->obj) &&
(x + width) * cpp > fb->pitches[i]) {
- DRM_DEBUG("bad fb plane %d offset: 0x%x\n",
- i, fb->offsets[i]);
+ DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
+ i, fb->offsets[i]);
return -EINVAL;
}
@@ -2529,7 +2467,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
intel_fb->normal[i].y = y;
offset = _intel_compute_tile_offset(dev_priv, &x, &y,
- fb, 0, fb->pitches[i],
+ fb, i, fb->pitches[i],
DRM_ROTATE_0, tile_size);
offset /= tile_size;
@@ -2538,8 +2476,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
unsigned int pitch_tiles;
struct drm_rect r;
- intel_tile_dims(dev_priv, &tile_width, &tile_height,
- fb->modifier, cpp);
+ intel_tile_dims(fb, i, &tile_width, &tile_height);
rot_info->plane[i].offset = offset;
rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
@@ -2600,9 +2537,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
max_size = max(max_size, offset + size);
}
- if (max_size * tile_size > to_intel_framebuffer(fb)->obj->base.size) {
- DRM_DEBUG("fb too big for bo (need %u bytes, have %zu bytes)\n",
- max_size * tile_size, to_intel_framebuffer(fb)->obj->base.size);
+ if (max_size * tile_size > intel_fb->obj->base.size) {
+ DRM_DEBUG_KMS("fb too big for bo (need %u bytes, have %zu bytes)\n",
+ max_size * tile_size, intel_fb->obj->base.size);
return -EINVAL;
}
@@ -2682,15 +2619,13 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
return false;
mutex_lock(&dev->struct_mutex);
-
obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
base_aligned,
base_aligned,
size_aligned);
- if (!obj) {
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->struct_mutex);
+ if (!obj)
return false;
- }
if (plane_config->tiling == I915_TILING_X)
obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X;
@@ -2702,20 +2637,17 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
mode_cmd.modifier[0] = fb->modifier;
mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
- if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
- &mode_cmd, obj)) {
+ if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
DRM_DEBUG_KMS("intel fb init failed\n");
goto out_unref_obj;
}
- mutex_unlock(&dev->struct_mutex);
DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
return true;
out_unref_obj:
i915_gem_object_put(obj);
- mutex_unlock(&dev->struct_mutex);
return false;
}
@@ -2734,6 +2666,29 @@ update_state_fb(struct drm_plane *plane)
}
static void
+intel_set_plane_visible(struct intel_crtc_state *crtc_state,
+ struct intel_plane_state *plane_state,
+ bool visible)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+
+ plane_state->base.visible = visible;
+
+ /* FIXME pre-g4x don't work like this */
+ if (visible) {
+ crtc_state->base.plane_mask |= BIT(drm_plane_index(&plane->base));
+ crtc_state->active_planes |= BIT(plane->id);
+ } else {
+ crtc_state->base.plane_mask &= ~BIT(drm_plane_index(&plane->base));
+ crtc_state->active_planes &= ~BIT(plane->id);
+ }
+
+ DRM_DEBUG_KMS("%s active planes 0x%x\n",
+ crtc_state->base.crtc->name,
+ crtc_state->active_planes);
+}
+
+static void
intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct intel_initial_plane_config *plane_config)
{
@@ -2790,9 +2745,11 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
* simplest solution is to just disable the primary plane now and
* pretend the BIOS never had it enabled.
*/
- plane_state->visible = false;
- crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
+ intel_set_plane_visible(to_intel_crtc_state(crtc_state),
+ to_intel_plane_state(plane_state),
+ false);
intel_pre_disable_primary_noatomic(&intel_crtc->base);
+ trace_intel_disable_plane(primary, intel_crtc);
intel_plane->disable_plane(primary, &intel_crtc->base);
return;
@@ -2831,7 +2788,11 @@ valid_fb:
drm_framebuffer_reference(fb);
primary->fb = primary->state->fb = fb;
primary->crtc = primary->state->crtc = &intel_crtc->base;
- intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
+
+ intel_set_plane_visible(to_intel_crtc_state(crtc_state),
+ to_intel_plane_state(plane_state),
+ true);
+
atomic_or(to_intel_plane(primary)->frontbuffer_bit,
&obj->frontbuffer_bits);
}
@@ -2880,7 +2841,6 @@ static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane,
static int skl_check_main_surface(struct intel_plane_state *plane_state)
{
- const struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
const struct drm_framebuffer *fb = plane_state->base.fb;
unsigned int rotation = plane_state->base.rotation;
int x = plane_state->base.src.x1 >> 16;
@@ -2899,8 +2859,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
intel_add_fb_offsets(&x, &y, plane_state, 0);
offset = intel_compute_tile_offset(&x, &y, plane_state, 0);
-
- alignment = intel_surf_alignment(dev_priv, fb->modifier);
+ alignment = intel_surf_alignment(fb, 0);
/*
* AUX surface offset is specified as the distance from the
@@ -3017,6 +2976,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
unsigned int rotation = plane_state->base.rotation;
int x = plane_state->base.src.x1 >> 16;
int y = plane_state->base.src.y1 >> 16;
+ unsigned long irqflags;
dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -3025,20 +2985,6 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
if (INTEL_GEN(dev_priv) < 4) {
if (intel_crtc->pipe == PIPE_B)
dspcntr |= DISPPLANE_SEL_PIPE_B;
-
- /* pipesrc and dspsize control the size that is scaled from,
- * which should always be the user's requested size.
- */
- I915_WRITE(DSPSIZE(plane),
- ((crtc_state->pipe_src_h - 1) << 16) |
- (crtc_state->pipe_src_w - 1));
- I915_WRITE(DSPPOS(plane), 0);
- } else if (IS_CHERRYVIEW(dev_priv) && plane == PLANE_B) {
- I915_WRITE(PRIMSIZE(plane),
- ((crtc_state->pipe_src_h - 1) << 16) |
- (crtc_state->pipe_src_w - 1));
- I915_WRITE(PRIMPOS(plane), 0);
- I915_WRITE(PRIMCNSTALPHA(plane), 0);
}
switch (fb->format->format) {
@@ -3101,21 +3047,41 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
intel_crtc->adjusted_x = x;
intel_crtc->adjusted_y = y;
- I915_WRITE(reg, dspcntr);
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ if (INTEL_GEN(dev_priv) < 4) {
+ /* pipesrc and dspsize control the size that is scaled from,
+ * which should always be the user's requested size.
+ */
+ I915_WRITE_FW(DSPSIZE(plane),
+ ((crtc_state->pipe_src_h - 1) << 16) |
+ (crtc_state->pipe_src_w - 1));
+ I915_WRITE_FW(DSPPOS(plane), 0);
+ } else if (IS_CHERRYVIEW(dev_priv) && plane == PLANE_B) {
+ I915_WRITE_FW(PRIMSIZE(plane),
+ ((crtc_state->pipe_src_h - 1) << 16) |
+ (crtc_state->pipe_src_w - 1));
+ I915_WRITE_FW(PRIMPOS(plane), 0);
+ I915_WRITE_FW(PRIMCNSTALPHA(plane), 0);
+ }
- I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
+ I915_WRITE_FW(reg, dspcntr);
+
+ I915_WRITE_FW(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_GEN(dev_priv) >= 4) {
- I915_WRITE(DSPSURF(plane),
- intel_plane_ggtt_offset(plane_state) +
- intel_crtc->dspaddr_offset);
- I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
- I915_WRITE(DSPLINOFF(plane), linear_offset);
+ I915_WRITE_FW(DSPSURF(plane),
+ intel_plane_ggtt_offset(plane_state) +
+ intel_crtc->dspaddr_offset);
+ I915_WRITE_FW(DSPTILEOFF(plane), (y << 16) | x);
+ I915_WRITE_FW(DSPLINOFF(plane), linear_offset);
} else {
- I915_WRITE(DSPADDR(plane),
- intel_plane_ggtt_offset(plane_state) +
- intel_crtc->dspaddr_offset);
+ I915_WRITE_FW(DSPADDR(plane),
+ intel_plane_ggtt_offset(plane_state) +
+ intel_crtc->dspaddr_offset);
}
- POSTING_READ(reg);
+ POSTING_READ_FW(reg);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void i9xx_disable_primary_plane(struct drm_plane *primary,
@@ -3125,13 +3091,18 @@ static void i9xx_disable_primary_plane(struct drm_plane *primary,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE(DSPCNTR(plane), 0);
+ I915_WRITE_FW(DSPCNTR(plane), 0);
if (INTEL_INFO(dev_priv)->gen >= 4)
- I915_WRITE(DSPSURF(plane), 0);
+ I915_WRITE_FW(DSPSURF(plane), 0);
else
- I915_WRITE(DSPADDR(plane), 0);
- POSTING_READ(DSPCNTR(plane));
+ I915_WRITE_FW(DSPADDR(plane), 0);
+ POSTING_READ_FW(DSPCNTR(plane));
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void ironlake_update_primary_plane(struct drm_plane *primary,
@@ -3149,6 +3120,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
unsigned int rotation = plane_state->base.rotation;
int x = plane_state->base.src.x1 >> 16;
int y = plane_state->base.src.y1 >> 16;
+ unsigned long irqflags;
dspcntr = DISPPLANE_GAMMA_ENABLE;
dspcntr |= DISPLAY_PLANE_ENABLE;
@@ -3205,31 +3177,32 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
intel_crtc->adjusted_x = x;
intel_crtc->adjusted_y = y;
- I915_WRITE(reg, dspcntr);
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
- I915_WRITE(DSPSURF(plane),
- intel_plane_ggtt_offset(plane_state) +
- intel_crtc->dspaddr_offset);
+ I915_WRITE_FW(reg, dspcntr);
+
+ I915_WRITE_FW(DSPSTRIDE(plane), fb->pitches[0]);
+ I915_WRITE_FW(DSPSURF(plane),
+ intel_plane_ggtt_offset(plane_state) +
+ intel_crtc->dspaddr_offset);
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
+ I915_WRITE_FW(DSPOFFSET(plane), (y << 16) | x);
} else {
- I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
- I915_WRITE(DSPLINOFF(plane), linear_offset);
+ I915_WRITE_FW(DSPTILEOFF(plane), (y << 16) | x);
+ I915_WRITE_FW(DSPLINOFF(plane), linear_offset);
}
- POSTING_READ(reg);
+ POSTING_READ_FW(reg);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
-u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
- uint64_t fb_modifier, uint32_t pixel_format)
+static u32
+intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane)
{
- if (fb_modifier == DRM_FORMAT_MOD_NONE) {
+ if (fb->modifier == DRM_FORMAT_MOD_NONE)
return 64;
- } else {
- int cpp = drm_format_plane_cpp(pixel_format, 0);
-
- return intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
- }
+ else
+ return intel_tile_width_bytes(fb, plane);
}
static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
@@ -3262,21 +3235,21 @@ static void skl_detach_scalers(struct intel_crtc *intel_crtc)
u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
unsigned int rotation)
{
- const struct drm_i915_private *dev_priv = to_i915(fb->dev);
- u32 stride = intel_fb_pitch(fb, plane, rotation);
+ u32 stride;
+
+ if (plane >= fb->format->num_planes)
+ return 0;
+
+ stride = intel_fb_pitch(fb, plane, rotation);
/*
* The stride is either expressed as a multiple of 64 bytes chunks for
* linear buffers or in number of tiles for tiled buffers.
*/
- if (drm_rotation_90_or_270(rotation)) {
- int cpp = fb->format->cpp[plane];
-
- stride /= intel_tile_height(dev_priv, fb->modifier, cpp);
- } else {
- stride /= intel_fb_stride_alignment(dev_priv, fb->modifier,
- fb->format->format);
- }
+ if (drm_rotation_90_or_270(rotation))
+ stride /= intel_tile_height(fb, plane);
+ else
+ stride /= intel_fb_stride_alignment(fb, plane);
return stride;
}
@@ -3385,14 +3358,19 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
int dst_y = plane_state->base.dst.y1;
int dst_w = drm_rect_width(&plane_state->base.dst);
int dst_h = drm_rect_height(&plane_state->base.dst);
+ unsigned long irqflags;
- plane_ctl = PLANE_CTL_ENABLE |
- PLANE_CTL_PIPE_GAMMA_ENABLE |
- PLANE_CTL_PIPE_CSC_ENABLE;
+ plane_ctl = PLANE_CTL_ENABLE;
+
+ if (!IS_GEMINILAKE(dev_priv)) {
+ plane_ctl |=
+ PLANE_CTL_PIPE_GAMMA_ENABLE |
+ PLANE_CTL_PIPE_CSC_ENABLE |
+ PLANE_CTL_PLANE_GAMMA_DISABLE;
+ }
plane_ctl |= skl_plane_ctl_format(fb->format->format);
plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
- plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
plane_ctl |= skl_plane_ctl_rotation(rotation);
/* Sizes are 0 based */
@@ -3406,10 +3384,19 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
intel_crtc->adjusted_x = src_x;
intel_crtc->adjusted_y = src_y;
- I915_WRITE(PLANE_CTL(pipe, plane_id), plane_ctl);
- I915_WRITE(PLANE_OFFSET(pipe, plane_id), (src_y << 16) | src_x);
- I915_WRITE(PLANE_STRIDE(pipe, plane_id), stride);
- I915_WRITE(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ if (IS_GEMINILAKE(dev_priv)) {
+ I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
+ PLANE_COLOR_PIPE_GAMMA_ENABLE |
+ PLANE_COLOR_PIPE_CSC_ENABLE |
+ PLANE_COLOR_PLANE_GAMMA_DISABLE);
+ }
+
+ I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl);
+ I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (src_y << 16) | src_x);
+ I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
+ I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
if (scaler_id >= 0) {
uint32_t ps_ctrl = 0;
@@ -3417,19 +3404,21 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
WARN_ON(!dst_w || !dst_h);
ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane_id) |
crtc_state->scaler_state.scalers[scaler_id].mode;
- I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
- I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
- I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
- I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
- I915_WRITE(PLANE_POS(pipe, plane_id), 0);
+ I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
+ I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
+ I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
+ I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
+ I915_WRITE_FW(PLANE_POS(pipe, plane_id), 0);
} else {
- I915_WRITE(PLANE_POS(pipe, plane_id), (dst_y << 16) | dst_x);
+ I915_WRITE_FW(PLANE_POS(pipe, plane_id), (dst_y << 16) | dst_x);
}
- I915_WRITE(PLANE_SURF(pipe, plane_id),
- intel_plane_ggtt_offset(plane_state) + surf_addr);
+ I915_WRITE_FW(PLANE_SURF(pipe, plane_id),
+ intel_plane_ggtt_offset(plane_state) + surf_addr);
+
+ POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
- POSTING_READ(PLANE_SURF(pipe, plane_id));
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void skylake_disable_primary_plane(struct drm_plane *primary,
@@ -3439,10 +3428,15 @@ static void skylake_disable_primary_plane(struct drm_plane *primary,
struct drm_i915_private *dev_priv = to_i915(dev);
enum plane_id plane_id = to_intel_plane(primary)->id;
enum pipe pipe = to_intel_plane(primary)->pipe;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
+ I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0);
+ POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
- I915_WRITE(PLANE_CTL(pipe, plane_id), 0);
- I915_WRITE(PLANE_SURF(pipe, plane_id), 0);
- POSTING_READ(PLANE_SURF(pipe, plane_id));
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
/* Assume fb object is pinned & idle & fenced and just update base pointers */
@@ -3473,10 +3467,14 @@ static void intel_update_primary_planes(struct drm_device *dev)
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
- if (plane_state->base.visible)
+ if (plane_state->base.visible) {
+ trace_intel_update_plane(&plane->base,
+ to_intel_crtc(crtc));
+
plane->update_plane(&plane->base,
to_intel_crtc_state(crtc->state),
plane_state);
+ }
}
}
@@ -3495,7 +3493,12 @@ __intel_display_resume(struct drm_device *dev,
if (!state)
return 0;
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ /*
+ * We've duplicated the state, pointers to the old state are invalid.
+ *
+ * Don't attempt to use the old state until we commit the duplicated state.
+ */
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
/*
* Force recalculation even if we restore
* current state. With fast modeset this may not result
@@ -3505,7 +3508,8 @@ __intel_display_resume(struct drm_device *dev,
}
/* ignore any reset values/BIOS leftovers in the WM registers */
- to_intel_atomic_state(state)->skip_intermediate_wm = true;
+ if (!HAS_GMCH_DISPLAY(to_i915(dev)))
+ to_intel_atomic_state(state)->skip_intermediate_wm = true;
ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
@@ -3635,7 +3639,7 @@ static bool abort_flip_on_reset(struct intel_crtc *crtc)
{
struct i915_gpu_error *error = &to_i915(crtc->base.dev)->gpu_error;
- if (i915_reset_in_progress(error))
+ if (i915_reset_backoff(error))
return true;
if (crtc->reset_count != i915_reset_count(error))
@@ -3670,10 +3674,6 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
/* drm_atomic_helper_update_legacy_modeset_state might not be called. */
crtc->base.mode = crtc->base.state->mode;
- DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
- old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
- pipe_config->pipe_src_w, pipe_config->pipe_src_h);
-
/*
* Update pipe size and adjust fitter if needed: the reason for this is
* that in compute_mode_changes we check the native mode (not the pfit
@@ -3701,12 +3701,11 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
}
}
-static void intel_fdi_normal_train(struct drm_crtc *crtc)
+static void intel_fdi_normal_train(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ int pipe = crtc->pipe;
i915_reg_t reg;
u32 temp;
@@ -3744,12 +3743,12 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
}
/* The FDI link training functions for ILK/Ibexpeak. */
-static void ironlake_fdi_link_train(struct drm_crtc *crtc)
+static void ironlake_fdi_link_train(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ int pipe = crtc->pipe;
i915_reg_t reg;
u32 temp, tries;
@@ -3770,7 +3769,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_DP_PORT_WIDTH_MASK;
- temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
+ temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
I915_WRITE(reg, temp | FDI_TX_ENABLE);
@@ -3845,12 +3844,12 @@ static const int snb_b_fdi_train_param[] = {
};
/* The FDI link training functions for SNB/Cougarpoint. */
-static void gen6_fdi_link_train(struct drm_crtc *crtc)
+static void gen6_fdi_link_train(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ int pipe = crtc->pipe;
i915_reg_t reg;
u32 temp, i, retry;
@@ -3869,7 +3868,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_DP_PORT_WIDTH_MASK;
- temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
+ temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
@@ -3978,12 +3977,12 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
}
/* Manual link training for Ivy Bridge A0 parts */
-static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
+static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ int pipe = crtc->pipe;
i915_reg_t reg;
u32 temp, i, j;
@@ -4021,7 +4020,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_DP_PORT_WIDTH_MASK;
- temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
+ temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[j/2];
@@ -4308,10 +4307,10 @@ void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
}
/* Program iCLKIP clock to the desired frequency */
-static void lpt_program_iclkip(struct drm_crtc *crtc)
+static void lpt_program_iclkip(struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ int clock = crtc->config->base.adjusted_mode.crtc_clock;
u32 divsel, phaseinc, auxdiv, phasedir = 0;
u32 temp;
@@ -4492,12 +4491,12 @@ static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
/* Return which DP Port should be selected for Transcoder DP control */
static enum port
-intel_trans_dp_port_sel(struct drm_crtc *crtc)
+intel_trans_dp_port_sel(struct intel_crtc *crtc)
{
- struct drm_device *dev = crtc->dev;
+ struct drm_device *dev = crtc->base.dev;
struct intel_encoder *encoder;
- for_each_encoder_on_crtc(dev, crtc, encoder) {
+ for_each_encoder_on_crtc(dev, &crtc->base, encoder) {
if (encoder->type == INTEL_OUTPUT_DP ||
encoder->type == INTEL_OUTPUT_EDP)
return enc_to_dig_port(&encoder->base)->port;
@@ -4514,18 +4513,18 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
* - DP transcoding bits
* - transcoder
*/
-static void ironlake_pch_enable(struct drm_crtc *crtc)
+static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->dev;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ int pipe = crtc->pipe;
u32 temp;
assert_pch_transcoder_disabled(dev_priv, pipe);
if (IS_IVYBRIDGE(dev_priv))
- ivybridge_update_fdi_bc_bifurcation(intel_crtc);
+ ivybridge_update_fdi_bc_bifurcation(crtc);
/* Write the TU size bits before fdi link training, so that error
* detection works. */
@@ -4533,7 +4532,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
/* For PCH output, training FDI link */
- dev_priv->display.fdi_link_train(crtc);
+ dev_priv->display.fdi_link_train(crtc, crtc_state);
/* We need to program the right clock selection before writing the pixel
* mutliplier into the DPLL. */
@@ -4543,7 +4542,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
temp = I915_READ(PCH_DPLL_SEL);
temp |= TRANS_DPLL_ENABLE(pipe);
sel = TRANS_DPLLB_SEL(pipe);
- if (intel_crtc->config->shared_dpll ==
+ if (crtc_state->shared_dpll ==
intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
temp |= sel;
else
@@ -4558,19 +4557,19 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
* Note that enable_shared_dpll tries to do the right thing, but
* get_shared_dpll unconditionally resets the pll - we need that to have
* the right LVDS enable sequence. */
- intel_enable_shared_dpll(intel_crtc);
+ intel_enable_shared_dpll(crtc);
/* set transcoder timing, panel must allow it */
assert_panel_unlocked(dev_priv, pipe);
- ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
+ ironlake_pch_transcoder_set_timings(crtc, pipe);
intel_fdi_normal_train(crtc);
/* For PCH DP, enable TRANS_DP_CTL */
if (HAS_PCH_CPT(dev_priv) &&
- intel_crtc_has_dp_encoder(intel_crtc->config)) {
+ intel_crtc_has_dp_encoder(crtc_state)) {
const struct drm_display_mode *adjusted_mode =
- &intel_crtc->config->base.adjusted_mode;
+ &crtc_state->base.adjusted_mode;
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
i915_reg_t reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
@@ -4605,19 +4604,18 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
ironlake_enable_pch_transcoder(dev_priv, pipe);
}
-static void lpt_pch_enable(struct drm_crtc *crtc)
+static void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
lpt_program_iclkip(crtc);
/* Set transcoder timing. */
- ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
+ ironlake_pch_transcoder_set_timings(crtc, PIPE_A);
lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
}
@@ -4797,23 +4795,17 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
struct intel_crtc_scaler_state *scaler_state =
&crtc->config->scaler_state;
- DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
-
if (crtc->config->pch_pfit.enabled) {
int id;
- if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
- DRM_ERROR("Requesting pfit without getting a scaler first\n");
+ if (WARN_ON(crtc->config->scaler_state.scaler_id < 0))
return;
- }
id = scaler_state->scaler_id;
I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
-
- DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
}
}
@@ -5026,8 +5018,6 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
- crtc->wm.cxsr_allowed = true;
-
if (pipe_config->update_wm_post && pipe_config->base.active)
intel_update_watermarks(crtc);
@@ -5046,13 +5036,12 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
}
}
-static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
+static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
+ struct intel_crtc_state *pipe_config)
{
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc_state *pipe_config =
- to_intel_crtc_state(crtc->base.state);
struct drm_atomic_state *old_state = old_crtc_state->base.state;
struct drm_plane *primary = crtc->base.primary;
struct drm_plane_state *old_pri_state =
@@ -5074,22 +5063,18 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
intel_pre_disable_primary(&crtc->base);
}
- if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev_priv)) {
- crtc->wm.cxsr_allowed = false;
-
- /*
- * Vblank time updates from the shadow to live plane control register
- * are blocked if the memory self-refresh mode is active at that
- * moment. So to make sure the plane gets truly disabled, disable
- * first the self-refresh mode. The self-refresh enable bit in turn
- * will be checked/applied by the HW only at the next frame start
- * event which is after the vblank start event, so we need to have a
- * wait-for-vblank between disabling the plane and the pipe.
- */
- if (old_crtc_state->base.active &&
- intel_set_memory_cxsr(dev_priv, false))
- intel_wait_for_vblank(dev_priv, crtc->pipe);
- }
+ /*
+ * Vblank time updates from the shadow to live plane control register
+ * are blocked if the memory self-refresh mode is active at that
+ * moment. So to make sure the plane gets truly disabled, disable
+ * first the self-refresh mode. The self-refresh enable bit in turn
+ * will be checked/applied by the HW only at the next frame start
+ * event which is after the vblank start event, so we need to have a
+ * wait-for-vblank between disabling the plane and the pipe.
+ */
+ if (HAS_GMCH_DISPLAY(dev_priv) && old_crtc_state->base.active &&
+ pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
+ intel_wait_for_vblank(dev_priv, crtc->pipe);
/*
* IVB workaround: must disable low power watermarks for at least
@@ -5153,12 +5138,11 @@ static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
struct intel_crtc_state *crtc_state,
struct drm_atomic_state *old_state)
{
- struct drm_connector_state *old_conn_state;
+ struct drm_connector_state *conn_state;
struct drm_connector *conn;
int i;
- for_each_connector_in_state(old_state, conn, old_conn_state, i) {
- struct drm_connector_state *conn_state = conn->state;
+ for_each_new_connector_in_state(old_state, conn, conn_state, i) {
struct intel_encoder *encoder =
to_intel_encoder(conn_state->best_encoder);
@@ -5174,12 +5158,11 @@ static void intel_encoders_pre_enable(struct drm_crtc *crtc,
struct intel_crtc_state *crtc_state,
struct drm_atomic_state *old_state)
{
- struct drm_connector_state *old_conn_state;
+ struct drm_connector_state *conn_state;
struct drm_connector *conn;
int i;
- for_each_connector_in_state(old_state, conn, old_conn_state, i) {
- struct drm_connector_state *conn_state = conn->state;
+ for_each_new_connector_in_state(old_state, conn, conn_state, i) {
struct intel_encoder *encoder =
to_intel_encoder(conn_state->best_encoder);
@@ -5195,12 +5178,11 @@ static void intel_encoders_enable(struct drm_crtc *crtc,
struct intel_crtc_state *crtc_state,
struct drm_atomic_state *old_state)
{
- struct drm_connector_state *old_conn_state;
+ struct drm_connector_state *conn_state;
struct drm_connector *conn;
int i;
- for_each_connector_in_state(old_state, conn, old_conn_state, i) {
- struct drm_connector_state *conn_state = conn->state;
+ for_each_new_connector_in_state(old_state, conn, conn_state, i) {
struct intel_encoder *encoder =
to_intel_encoder(conn_state->best_encoder);
@@ -5220,7 +5202,7 @@ static void intel_encoders_disable(struct drm_crtc *crtc,
struct drm_connector *conn;
int i;
- for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+ for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
struct intel_encoder *encoder =
to_intel_encoder(old_conn_state->best_encoder);
@@ -5240,7 +5222,7 @@ static void intel_encoders_post_disable(struct drm_crtc *crtc,
struct drm_connector *conn;
int i;
- for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+ for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
struct intel_encoder *encoder =
to_intel_encoder(old_conn_state->best_encoder);
@@ -5260,7 +5242,7 @@ static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
struct drm_connector *conn;
int i;
- for_each_connector_in_state(old_state, conn, old_conn_state, i) {
+ for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
struct intel_encoder *encoder =
to_intel_encoder(old_conn_state->best_encoder);
@@ -5344,7 +5326,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
intel_enable_pipe(intel_crtc);
if (intel_crtc->config->has_pch_encoder)
- ironlake_pch_enable(crtc);
+ ironlake_pch_enable(pipe_config);
assert_vblank_disabled(crtc);
drm_crtc_vblank_on(crtc);
@@ -5426,10 +5408,10 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_encoders_pre_enable(crtc, pipe_config, old_state);
if (intel_crtc->config->has_pch_encoder)
- dev_priv->display.fdi_link_train(crtc);
+ dev_priv->display.fdi_link_train(intel_crtc, pipe_config);
if (!transcoder_is_dsi(cpu_transcoder))
- intel_ddi_enable_pipe_clock(intel_crtc);
+ intel_ddi_enable_pipe_clock(pipe_config);
if (INTEL_GEN(dev_priv) >= 9)
skylake_pfit_enable(intel_crtc);
@@ -5442,9 +5424,9 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
*/
intel_color_load_luts(&pipe_config->base);
- intel_ddi_set_pipe_settings(crtc);
+ intel_ddi_set_pipe_settings(pipe_config);
if (!transcoder_is_dsi(cpu_transcoder))
- intel_ddi_enable_transcoder_func(crtc);
+ intel_ddi_enable_transcoder_func(pipe_config);
if (dev_priv->display.initial_watermarks != NULL)
dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
@@ -5454,10 +5436,10 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
intel_enable_pipe(intel_crtc);
if (intel_crtc->config->has_pch_encoder)
- lpt_pch_enable(crtc);
+ lpt_pch_enable(pipe_config);
if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
- intel_ddi_set_vc_payload_alloc(crtc, true);
+ intel_ddi_set_vc_payload_alloc(pipe_config, true);
assert_vblank_disabled(crtc);
drm_crtc_vblank_on(crtc);
@@ -5579,7 +5561,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_disable_pipe(intel_crtc);
if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST))
- intel_ddi_set_vc_payload_alloc(crtc, false);
+ intel_ddi_set_vc_payload_alloc(intel_crtc->config, false);
if (!transcoder_is_dsi(cpu_transcoder))
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
@@ -5590,7 +5572,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
ironlake_pfit_disable(intel_crtc, false);
if (!transcoder_is_dsi(cpu_transcoder))
- intel_ddi_disable_pipe_clock(intel_crtc);
+ intel_ddi_disable_pipe_clock(intel_crtc->config);
intel_encoders_post_disable(crtc, old_crtc_state, old_state);
@@ -5623,7 +5605,7 @@ static void i9xx_pfit_enable(struct intel_crtc *crtc)
I915_WRITE(BCLRPAT(crtc->pipe), 0);
}
-static enum intel_display_power_domain port_to_power_domain(enum port port)
+enum intel_display_power_domain intel_port_to_power_domain(enum port port)
{
switch (port) {
case PORT_A:
@@ -5642,91 +5624,15 @@ static enum intel_display_power_domain port_to_power_domain(enum port port)
}
}
-static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
-{
- switch (port) {
- case PORT_A:
- return POWER_DOMAIN_AUX_A;
- case PORT_B:
- return POWER_DOMAIN_AUX_B;
- case PORT_C:
- return POWER_DOMAIN_AUX_C;
- case PORT_D:
- return POWER_DOMAIN_AUX_D;
- case PORT_E:
- /* FIXME: Check VBT for actual wiring of PORT E */
- return POWER_DOMAIN_AUX_D;
- default:
- MISSING_CASE(port);
- return POWER_DOMAIN_AUX_A;
- }
-}
-
-enum intel_display_power_domain
-intel_display_port_power_domain(struct intel_encoder *intel_encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
- struct intel_digital_port *intel_dig_port;
-
- switch (intel_encoder->type) {
- case INTEL_OUTPUT_UNKNOWN:
- /* Only DDI platforms should ever use this output type */
- WARN_ON_ONCE(!HAS_DDI(dev_priv));
- case INTEL_OUTPUT_DP:
- case INTEL_OUTPUT_HDMI:
- case INTEL_OUTPUT_EDP:
- intel_dig_port = enc_to_dig_port(&intel_encoder->base);
- return port_to_power_domain(intel_dig_port->port);
- case INTEL_OUTPUT_DP_MST:
- intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
- return port_to_power_domain(intel_dig_port->port);
- case INTEL_OUTPUT_ANALOG:
- return POWER_DOMAIN_PORT_CRT;
- case INTEL_OUTPUT_DSI:
- return POWER_DOMAIN_PORT_DSI;
- default:
- return POWER_DOMAIN_PORT_OTHER;
- }
-}
-
-enum intel_display_power_domain
-intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
- struct intel_digital_port *intel_dig_port;
-
- switch (intel_encoder->type) {
- case INTEL_OUTPUT_UNKNOWN:
- case INTEL_OUTPUT_HDMI:
- /*
- * Only DDI platforms should ever use these output types.
- * We can get here after the HDMI detect code has already set
- * the type of the shared encoder. Since we can't be sure
- * what's the status of the given connectors, play safe and
- * run the DP detection too.
- */
- WARN_ON_ONCE(!HAS_DDI(dev_priv));
- case INTEL_OUTPUT_DP:
- case INTEL_OUTPUT_EDP:
- intel_dig_port = enc_to_dig_port(&intel_encoder->base);
- return port_to_aux_power_domain(intel_dig_port->port);
- case INTEL_OUTPUT_DP_MST:
- intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
- return port_to_aux_power_domain(intel_dig_port->port);
- default:
- MISSING_CASE(intel_encoder->type);
- return POWER_DOMAIN_AUX_A;
- }
-}
-
-static unsigned long get_crtc_power_domains(struct drm_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+static u64 get_crtc_power_domains(struct drm_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
{
struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_encoder *encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
- unsigned long mask;
+ u64 mask;
enum transcoder transcoder = crtc_state->cpu_transcoder;
if (!crtc_state->base.active)
@@ -5736,28 +5642,31 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc,
mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
if (crtc_state->pch_pfit.enabled ||
crtc_state->pch_pfit.force_thru)
- mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
+ mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
- mask |= BIT(intel_display_port_power_domain(intel_encoder));
+ mask |= BIT_ULL(intel_encoder->power_domain);
}
+ if (HAS_DDI(dev_priv) && crtc_state->has_audio)
+ mask |= BIT(POWER_DOMAIN_AUDIO);
+
if (crtc_state->shared_dpll)
- mask |= BIT(POWER_DOMAIN_PLLS);
+ mask |= BIT_ULL(POWER_DOMAIN_PLLS);
return mask;
}
-static unsigned long
+static u64
modeset_get_crtc_power_domains(struct drm_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum intel_display_power_domain domain;
- unsigned long domains, new_domains, old_domains;
+ u64 domains, new_domains, old_domains;
old_domains = intel_crtc->enabled_power_domains;
intel_crtc->enabled_power_domains = new_domains =
@@ -5772,7 +5681,7 @@ modeset_get_crtc_power_domains(struct drm_crtc *crtc,
}
static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
- unsigned long domains)
+ u64 domains)
{
enum intel_display_power_domain domain;
@@ -5780,922 +5689,11 @@ static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
intel_display_power_put(dev_priv, domain);
}
-static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
-{
- int max_cdclk_freq = dev_priv->max_cdclk_freq;
-
- if (IS_GEMINILAKE(dev_priv))
- return 2 * max_cdclk_freq;
- else if (INTEL_INFO(dev_priv)->gen >= 9 ||
- IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- return max_cdclk_freq;
- else if (IS_CHERRYVIEW(dev_priv))
- return max_cdclk_freq*95/100;
- else if (INTEL_INFO(dev_priv)->gen < 4)
- return 2*max_cdclk_freq*90/100;
- else
- return max_cdclk_freq*90/100;
-}
-
-static int skl_calc_cdclk(int max_pixclk, int vco);
-
-static void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
-{
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
- u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
- int max_cdclk, vco;
-
- vco = dev_priv->skl_preferred_vco_freq;
- WARN_ON(vco != 8100000 && vco != 8640000);
-
- /*
- * Use the lower (vco 8640) cdclk values as a
- * first guess. skl_calc_cdclk() will correct it
- * if the preferred vco is 8100 instead.
- */
- if (limit == SKL_DFSM_CDCLK_LIMIT_675)
- max_cdclk = 617143;
- else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
- max_cdclk = 540000;
- else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
- max_cdclk = 432000;
- else
- max_cdclk = 308571;
-
- dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
- } else if (IS_GEMINILAKE(dev_priv)) {
- dev_priv->max_cdclk_freq = 316800;
- } else if (IS_BROXTON(dev_priv)) {
- dev_priv->max_cdclk_freq = 624000;
- } else if (IS_BROADWELL(dev_priv)) {
- /*
- * FIXME with extra cooling we can allow
- * 540 MHz for ULX and 675 Mhz for ULT.
- * How can we know if extra cooling is
- * available? PCI ID, VTB, something else?
- */
- if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
- dev_priv->max_cdclk_freq = 450000;
- else if (IS_BDW_ULX(dev_priv))
- dev_priv->max_cdclk_freq = 450000;
- else if (IS_BDW_ULT(dev_priv))
- dev_priv->max_cdclk_freq = 540000;
- else
- dev_priv->max_cdclk_freq = 675000;
- } else if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->max_cdclk_freq = 320000;
- } else if (IS_VALLEYVIEW(dev_priv)) {
- dev_priv->max_cdclk_freq = 400000;
- } else {
- /* otherwise assume cdclk is fixed */
- dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
- }
-
- dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
-
- DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
- dev_priv->max_cdclk_freq);
-
- DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
- dev_priv->max_dotclk_freq);
-}
-
-static void intel_update_cdclk(struct drm_i915_private *dev_priv)
-{
- dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev_priv);
-
- if (INTEL_GEN(dev_priv) >= 9)
- DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n",
- dev_priv->cdclk_freq, dev_priv->cdclk_pll.vco,
- dev_priv->cdclk_pll.ref);
- else
- DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
- dev_priv->cdclk_freq);
-
- /*
- * 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
- * Programmng [sic] note: bit[9:2] should be programmed to the number
- * of cdclk that generates 4MHz reference clock freq which is used to
- * generate GMBus clock. This will vary with the cdclk freq.
- */
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
-}
-
-/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
-static int skl_cdclk_decimal(int cdclk)
-{
- return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
-}
-
-static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
-{
- int ratio;
-
- if (cdclk == dev_priv->cdclk_pll.ref)
- return 0;
-
- switch (cdclk) {
- default:
- MISSING_CASE(cdclk);
- case 144000:
- case 288000:
- case 384000:
- case 576000:
- ratio = 60;
- break;
- case 624000:
- ratio = 65;
- break;
- }
-
- return dev_priv->cdclk_pll.ref * ratio;
-}
-
-static int glk_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
-{
- int ratio;
-
- if (cdclk == dev_priv->cdclk_pll.ref)
- return 0;
-
- switch (cdclk) {
- default:
- MISSING_CASE(cdclk);
- case 79200:
- case 158400:
- case 316800:
- ratio = 33;
- break;
- }
-
- return dev_priv->cdclk_pll.ref * ratio;
-}
-
-static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(BXT_DE_PLL_ENABLE, 0);
-
- /* Timeout 200us */
- if (intel_wait_for_register(dev_priv,
- BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0,
- 1))
- DRM_ERROR("timeout waiting for DE PLL unlock\n");
-
- dev_priv->cdclk_pll.vco = 0;
-}
-
-static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
-{
- int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk_pll.ref);
- u32 val;
-
- val = I915_READ(BXT_DE_PLL_CTL);
- val &= ~BXT_DE_PLL_RATIO_MASK;
- val |= BXT_DE_PLL_RATIO(ratio);
- I915_WRITE(BXT_DE_PLL_CTL, val);
-
- I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
-
- /* Timeout 200us */
- if (intel_wait_for_register(dev_priv,
- BXT_DE_PLL_ENABLE,
- BXT_DE_PLL_LOCK,
- BXT_DE_PLL_LOCK,
- 1))
- DRM_ERROR("timeout waiting for DE PLL lock\n");
-
- dev_priv->cdclk_pll.vco = vco;
-}
-
-static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
-{
- u32 val, divider;
- int vco, ret;
-
- if (IS_GEMINILAKE(dev_priv))
- vco = glk_de_pll_vco(dev_priv, cdclk);
- else
- vco = bxt_de_pll_vco(dev_priv, cdclk);
-
- DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
-
- /* cdclk = vco / 2 / div{1,1.5,2,4} */
- switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
- case 8:
- divider = BXT_CDCLK_CD2X_DIV_SEL_4;
- break;
- case 4:
- divider = BXT_CDCLK_CD2X_DIV_SEL_2;
- break;
- case 3:
- WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n");
- divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
- break;
- case 2:
- divider = BXT_CDCLK_CD2X_DIV_SEL_1;
- break;
- default:
- WARN_ON(cdclk != dev_priv->cdclk_pll.ref);
- WARN_ON(vco != 0);
-
- divider = BXT_CDCLK_CD2X_DIV_SEL_1;
- break;
- }
-
- /* Inform power controller of upcoming frequency change */
- mutex_lock(&dev_priv->rps.hw_lock);
- ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
- 0x80000000);
- mutex_unlock(&dev_priv->rps.hw_lock);
-
- if (ret) {
- DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
- ret, cdclk);
- return;
- }
-
- if (dev_priv->cdclk_pll.vco != 0 &&
- dev_priv->cdclk_pll.vco != vco)
- bxt_de_pll_disable(dev_priv);
-
- if (dev_priv->cdclk_pll.vco != vco)
- bxt_de_pll_enable(dev_priv, vco);
-
- val = divider | skl_cdclk_decimal(cdclk);
- /*
- * FIXME if only the cd2x divider needs changing, it could be done
- * without shutting off the pipe (if only one pipe is active).
- */
- val |= BXT_CDCLK_CD2X_PIPE_NONE;
- /*
- * Disable SSA Precharge when CD clock frequency < 500 MHz,
- * enable otherwise.
- */
- if (cdclk >= 500000)
- val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
- I915_WRITE(CDCLK_CTL, val);
-
- mutex_lock(&dev_priv->rps.hw_lock);
- ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
- DIV_ROUND_UP(cdclk, 25000));
- mutex_unlock(&dev_priv->rps.hw_lock);
-
- if (ret) {
- DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
- ret, cdclk);
- return;
- }
-
- intel_update_cdclk(dev_priv);
-}
-
-static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
-{
- u32 cdctl, expected;
-
- intel_update_cdclk(dev_priv);
-
- if (dev_priv->cdclk_pll.vco == 0 ||
- dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
- goto sanitize;
-
- /* DPLL okay; verify the cdclock
- *
- * Some BIOS versions leave an incorrect decimal frequency value and
- * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
- * so sanitize this register.
- */
- cdctl = I915_READ(CDCLK_CTL);
- /*
- * Let's ignore the pipe field, since BIOS could have configured the
- * dividers both synching to an active pipe, or asynchronously
- * (PIPE_NONE).
- */
- cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
-
- expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
- skl_cdclk_decimal(dev_priv->cdclk_freq);
- /*
- * Disable SSA Precharge when CD clock frequency < 500 MHz,
- * enable otherwise.
- */
- if (dev_priv->cdclk_freq >= 500000)
- expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
-
- if (cdctl == expected)
- /* All well; nothing to sanitize */
- return;
-
-sanitize:
- DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
-
- /* force cdclk programming */
- dev_priv->cdclk_freq = 0;
-
- /* force full PLL disable + enable */
- dev_priv->cdclk_pll.vco = -1;
-}
-
-void bxt_init_cdclk(struct drm_i915_private *dev_priv)
-{
- int cdclk;
-
- bxt_sanitize_cdclk(dev_priv);
-
- if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0)
- return;
-
- /*
- * FIXME:
- * - The initial CDCLK needs to be read from VBT.
- * Need to make this change after VBT has changes for BXT.
- */
- if (IS_GEMINILAKE(dev_priv))
- cdclk = glk_calc_cdclk(0);
- else
- cdclk = bxt_calc_cdclk(0);
-
- bxt_set_cdclk(dev_priv, cdclk);
-}
-
-void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
-{
- bxt_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref);
-}
-
-static int skl_calc_cdclk(int max_pixclk, int vco)
-{
- if (vco == 8640000) {
- if (max_pixclk > 540000)
- return 617143;
- else if (max_pixclk > 432000)
- return 540000;
- else if (max_pixclk > 308571)
- return 432000;
- else
- return 308571;
- } else {
- if (max_pixclk > 540000)
- return 675000;
- else if (max_pixclk > 450000)
- return 540000;
- else if (max_pixclk > 337500)
- return 450000;
- else
- return 337500;
- }
-}
-
-static void
-skl_dpll0_update(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- dev_priv->cdclk_pll.ref = 24000;
- dev_priv->cdclk_pll.vco = 0;
-
- val = I915_READ(LCPLL1_CTL);
- if ((val & LCPLL_PLL_ENABLE) == 0)
- return;
-
- if (WARN_ON((val & LCPLL_PLL_LOCK) == 0))
- return;
-
- val = I915_READ(DPLL_CTRL1);
-
- if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
- DPLL_CTRL1_SSC(SKL_DPLL0) |
- DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
- DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
- return;
-
- switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) {
- case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0):
- case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0):
- case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0):
- case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0):
- dev_priv->cdclk_pll.vco = 8100000;
- break;
- case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0):
- case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0):
- dev_priv->cdclk_pll.vco = 8640000;
- break;
- default:
- MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
- break;
- }
-}
-
-void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco)
-{
- bool changed = dev_priv->skl_preferred_vco_freq != vco;
-
- dev_priv->skl_preferred_vco_freq = vco;
-
- if (changed)
- intel_update_max_cdclk(dev_priv);
-}
-
-static void
-skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
-{
- int min_cdclk = skl_calc_cdclk(0, vco);
- u32 val;
-
- WARN_ON(vco != 8100000 && vco != 8640000);
-
- /* select the minimum CDCLK before enabling DPLL 0 */
- val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
- I915_WRITE(CDCLK_CTL, val);
- POSTING_READ(CDCLK_CTL);
-
- /*
- * We always enable DPLL0 with the lowest link rate possible, but still
- * taking into account the VCO required to operate the eDP panel at the
- * desired frequency. The usual DP link rates operate with a VCO of
- * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
- * The modeset code is responsible for the selection of the exact link
- * rate later on, with the constraint of choosing a frequency that
- * works with vco.
- */
- val = I915_READ(DPLL_CTRL1);
-
- val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
- DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
- val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
- if (vco == 8640000)
- val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
- SKL_DPLL0);
- else
- val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
- SKL_DPLL0);
-
- I915_WRITE(DPLL_CTRL1, val);
- POSTING_READ(DPLL_CTRL1);
-
- I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
-
- if (intel_wait_for_register(dev_priv,
- LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
- 5))
- DRM_ERROR("DPLL0 not locked\n");
-
- dev_priv->cdclk_pll.vco = vco;
-
- /* We'll want to keep using the current vco from now on. */
- skl_set_preferred_cdclk_vco(dev_priv, vco);
-}
-
-static void
-skl_dpll0_disable(struct drm_i915_private *dev_priv)
-{
- I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
- if (intel_wait_for_register(dev_priv,
- LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
- 1))
- DRM_ERROR("Couldn't disable DPLL0\n");
-
- dev_priv->cdclk_pll.vco = 0;
-}
-
-static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
-{
- u32 freq_select, pcu_ack;
- int ret;
-
- WARN_ON((cdclk == 24000) != (vco == 0));
-
- DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
-
- mutex_lock(&dev_priv->rps.hw_lock);
- ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
- SKL_CDCLK_PREPARE_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE, 3);
- mutex_unlock(&dev_priv->rps.hw_lock);
- if (ret) {
- DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
- ret);
- return;
- }
-
- /* set CDCLK_CTL */
- switch (cdclk) {
- case 450000:
- case 432000:
- freq_select = CDCLK_FREQ_450_432;
- pcu_ack = 1;
- break;
- case 540000:
- freq_select = CDCLK_FREQ_540;
- pcu_ack = 2;
- break;
- case 308571:
- case 337500:
- default:
- freq_select = CDCLK_FREQ_337_308;
- pcu_ack = 0;
- break;
- case 617143:
- case 675000:
- freq_select = CDCLK_FREQ_675_617;
- pcu_ack = 3;
- break;
- }
-
- if (dev_priv->cdclk_pll.vco != 0 &&
- dev_priv->cdclk_pll.vco != vco)
- skl_dpll0_disable(dev_priv);
-
- if (dev_priv->cdclk_pll.vco != vco)
- skl_dpll0_enable(dev_priv, vco);
-
- I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk));
- POSTING_READ(CDCLK_CTL);
-
- /* inform PCU of the change */
- mutex_lock(&dev_priv->rps.hw_lock);
- sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
- mutex_unlock(&dev_priv->rps.hw_lock);
-
- intel_update_cdclk(dev_priv);
-}
-
-static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
-
-void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
-{
- skl_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref, 0);
-}
-
-void skl_init_cdclk(struct drm_i915_private *dev_priv)
-{
- int cdclk, vco;
-
- skl_sanitize_cdclk(dev_priv);
-
- if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) {
- /*
- * Use the current vco as our initial
- * guess as to what the preferred vco is.
- */
- if (dev_priv->skl_preferred_vco_freq == 0)
- skl_set_preferred_cdclk_vco(dev_priv,
- dev_priv->cdclk_pll.vco);
- return;
- }
-
- vco = dev_priv->skl_preferred_vco_freq;
- if (vco == 0)
- vco = 8100000;
- cdclk = skl_calc_cdclk(0, vco);
-
- skl_set_cdclk(dev_priv, cdclk, vco);
-}
-
-static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
-{
- uint32_t cdctl, expected;
-
- /*
- * check if the pre-os intialized the display
- * There is SWF18 scratchpad register defined which is set by the
- * pre-os which can be used by the OS drivers to check the status
- */
- if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
- goto sanitize;
-
- intel_update_cdclk(dev_priv);
- /* Is PLL enabled and locked ? */
- if (dev_priv->cdclk_pll.vco == 0 ||
- dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
- goto sanitize;
-
- /* DPLL okay; verify the cdclock
- *
- * Noticed in some instances that the freq selection is correct but
- * decimal part is programmed wrong from BIOS where pre-os does not
- * enable display. Verify the same as well.
- */
- cdctl = I915_READ(CDCLK_CTL);
- expected = (cdctl & CDCLK_FREQ_SEL_MASK) |
- skl_cdclk_decimal(dev_priv->cdclk_freq);
- if (cdctl == expected)
- /* All well; nothing to sanitize */
- return;
-
-sanitize:
- DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
-
- /* force cdclk programming */
- dev_priv->cdclk_freq = 0;
- /* force full PLL disable + enable */
- dev_priv->cdclk_pll.vco = -1;
-}
-
-/* Adjust CDclk dividers to allow high res or save power if possible */
-static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- u32 val, cmd;
-
- WARN_ON(dev_priv->display.get_display_clock_speed(dev_priv)
- != dev_priv->cdclk_freq);
-
- if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
- cmd = 2;
- else if (cdclk == 266667)
- cmd = 1;
- else
- cmd = 0;
-
- mutex_lock(&dev_priv->rps.hw_lock);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
- val &= ~DSPFREQGUAR_MASK;
- val |= (cmd << DSPFREQGUAR_SHIFT);
- vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
- if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
- DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
- 50)) {
- DRM_ERROR("timed out waiting for CDclk change\n");
- }
- mutex_unlock(&dev_priv->rps.hw_lock);
-
- mutex_lock(&dev_priv->sb_lock);
-
- if (cdclk == 400000) {
- u32 divider;
-
- divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
-
- /* adjust cdclk divider */
- val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
- val &= ~CCK_FREQUENCY_VALUES;
- val |= divider;
- vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
-
- if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
- CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
- 50))
- DRM_ERROR("timed out waiting for CDclk change\n");
- }
-
- /* adjust self-refresh exit latency value */
- val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
- val &= ~0x7f;
-
- /*
- * For high bandwidth configs, we set a higher latency in the bunit
- * so that the core display fetch happens in time to avoid underruns.
- */
- if (cdclk == 400000)
- val |= 4500 / 250; /* 4.5 usec */
- else
- val |= 3000 / 250; /* 3.0 usec */
- vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
-
- mutex_unlock(&dev_priv->sb_lock);
-
- intel_update_cdclk(dev_priv);
-}
-
-static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- u32 val, cmd;
-
- WARN_ON(dev_priv->display.get_display_clock_speed(dev_priv)
- != dev_priv->cdclk_freq);
-
- switch (cdclk) {
- case 333333:
- case 320000:
- case 266667:
- case 200000:
- break;
- default:
- MISSING_CASE(cdclk);
- return;
- }
-
- /*
- * Specs are full of misinformation, but testing on actual
- * hardware has shown that we just need to write the desired
- * CCK divider into the Punit register.
- */
- cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
-
- mutex_lock(&dev_priv->rps.hw_lock);
- val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
- val &= ~DSPFREQGUAR_MASK_CHV;
- val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
- vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
- if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
- DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
- 50)) {
- DRM_ERROR("timed out waiting for CDclk change\n");
- }
- mutex_unlock(&dev_priv->rps.hw_lock);
-
- intel_update_cdclk(dev_priv);
-}
-
-static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
- int max_pixclk)
-{
- int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000;
- int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
-
- /*
- * Really only a few cases to deal with, as only 4 CDclks are supported:
- * 200MHz
- * 267MHz
- * 320/333MHz (depends on HPLL freq)
- * 400MHz (VLV only)
- * So we check to see whether we're above 90% (VLV) or 95% (CHV)
- * of the lower bin and adjust if needed.
- *
- * We seem to get an unstable or solid color picture at 200MHz.
- * Not sure what's wrong. For now use 200MHz only when all pipes
- * are off.
- */
- if (!IS_CHERRYVIEW(dev_priv) &&
- max_pixclk > freq_320*limit/100)
- return 400000;
- else if (max_pixclk > 266667*limit/100)
- return freq_320;
- else if (max_pixclk > 0)
- return 266667;
- else
- return 200000;
-}
-
-static int glk_calc_cdclk(int max_pixclk)
-{
- if (max_pixclk > 2 * 158400)
- return 316800;
- else if (max_pixclk > 2 * 79200)
- return 158400;
- else
- return 79200;
-}
-
-static int bxt_calc_cdclk(int max_pixclk)
-{
- if (max_pixclk > 576000)
- return 624000;
- else if (max_pixclk > 384000)
- return 576000;
- else if (max_pixclk > 288000)
- return 384000;
- else if (max_pixclk > 144000)
- return 288000;
- else
- return 144000;
-}
-
-/* Compute the max pixel clock for new configuration. */
-static int intel_mode_max_pixclk(struct drm_device *dev,
- struct drm_atomic_state *state)
-{
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
- unsigned max_pixclk = 0, i;
- enum pipe pipe;
-
- memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
- sizeof(intel_state->min_pixclk));
-
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- int pixclk = 0;
-
- if (crtc_state->enable)
- pixclk = crtc_state->adjusted_mode.crtc_clock;
-
- intel_state->min_pixclk[i] = pixclk;
- }
-
- for_each_pipe(dev_priv, pipe)
- max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk);
-
- return max_pixclk;
-}
-
-static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
-{
- struct drm_device *dev = state->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- int max_pixclk = intel_mode_max_pixclk(dev, state);
- struct intel_atomic_state *intel_state =
- to_intel_atomic_state(state);
-
- intel_state->cdclk = intel_state->dev_cdclk =
- valleyview_calc_cdclk(dev_priv, max_pixclk);
-
- if (!intel_state->active_crtcs)
- intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, 0);
-
- return 0;
-}
-
-static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->dev);
- int max_pixclk = ilk_max_pixel_rate(state);
- struct intel_atomic_state *intel_state =
- to_intel_atomic_state(state);
- int cdclk;
-
- if (IS_GEMINILAKE(dev_priv))
- cdclk = glk_calc_cdclk(max_pixclk);
- else
- cdclk = bxt_calc_cdclk(max_pixclk);
-
- intel_state->cdclk = intel_state->dev_cdclk = cdclk;
-
- if (!intel_state->active_crtcs) {
- if (IS_GEMINILAKE(dev_priv))
- cdclk = glk_calc_cdclk(0);
- else
- cdclk = bxt_calc_cdclk(0);
-
- intel_state->dev_cdclk = cdclk;
- }
-
- return 0;
-}
-
-static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
-{
- unsigned int credits, default_credits;
-
- if (IS_CHERRYVIEW(dev_priv))
- default_credits = PFI_CREDIT(12);
- else
- default_credits = PFI_CREDIT(8);
-
- if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) {
- /* CHV suggested value is 31 or 63 */
- if (IS_CHERRYVIEW(dev_priv))
- credits = PFI_CREDIT_63;
- else
- credits = PFI_CREDIT(15);
- } else {
- credits = default_credits;
- }
-
- /*
- * WA - write default credits before re-programming
- * FIXME: should we also set the resend bit here?
- */
- I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
- default_credits);
-
- I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
- credits | PFI_CREDIT_RESEND);
-
- /*
- * FIXME is this guaranteed to clear
- * immediately or should we poll for it?
- */
- WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
-}
-
-static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
-{
- struct drm_device *dev = old_state->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_atomic_state *old_intel_state =
- to_intel_atomic_state(old_state);
- unsigned req_cdclk = old_intel_state->dev_cdclk;
-
- /*
- * FIXME: We can end up here with all power domains off, yet
- * with a CDCLK frequency other than the minimum. To account
- * for this take the PIPE-A power domain, which covers the HW
- * blocks needed for the following programming. This can be
- * removed once it's guaranteed that we get here either with
- * the minimum CDCLK set, or the required power domains
- * enabled.
- */
- intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
-
- if (IS_CHERRYVIEW(dev_priv))
- cherryview_set_cdclk(dev, req_cdclk);
- else
- valleyview_set_cdclk(dev, req_cdclk);
-
- vlv_program_pfi_credits(dev_priv);
-
- intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
-}
-
static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
struct drm_atomic_state *old_state)
{
+ struct intel_atomic_state *old_intel_state =
+ to_intel_atomic_state(old_state);
struct drm_crtc *crtc = pipe_config->base.crtc;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -6740,7 +5738,8 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
intel_color_load_luts(&pipe_config->base);
- intel_update_watermarks(intel_crtc);
+ dev_priv->display.initial_watermarks(old_intel_state,
+ pipe_config);
intel_enable_pipe(intel_crtc);
assert_vblank_disabled(crtc);
@@ -6857,6 +5856,9 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
if (!IS_GEN2(dev_priv))
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+
+ if (!dev_priv->display.initial_watermarks)
+ intel_update_watermarks(intel_crtc);
}
static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
@@ -6865,7 +5867,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
enum intel_display_power_domain domain;
- unsigned long domains;
+ u64 domains;
struct drm_atomic_state *state;
struct intel_crtc_state *crtc_state;
int ret;
@@ -7173,7 +6175,7 @@ static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
*
* Should measure whether using a lower cdclk w/o IPS
*/
- return ilk_pipe_pixel_rate(pipe_config) <=
+ return pipe_config->pixel_rate <=
dev_priv->max_cdclk_freq * 95 / 100;
}
@@ -7197,6 +6199,54 @@ static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
(crtc->pipe == PIPE_A || IS_I915G(dev_priv));
}
+static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
+{
+ uint32_t pixel_rate;
+
+ pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
+
+ /*
+ * We only use IF-ID interlacing. If we ever use
+ * PF-ID we'll need to adjust the pixel_rate here.
+ */
+
+ if (pipe_config->pch_pfit.enabled) {
+ uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
+ uint32_t pfit_size = pipe_config->pch_pfit.size;
+
+ pipe_w = pipe_config->pipe_src_w;
+ pipe_h = pipe_config->pipe_src_h;
+
+ pfit_w = (pfit_size >> 16) & 0xFFFF;
+ pfit_h = pfit_size & 0xFFFF;
+ if (pipe_w < pfit_w)
+ pipe_w = pfit_w;
+ if (pipe_h < pfit_h)
+ pipe_h = pfit_h;
+
+ if (WARN_ON(!pfit_w || !pfit_h))
+ return pixel_rate;
+
+ pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
+ pfit_w * pfit_h);
+ }
+
+ return pixel_rate;
+}
+
+static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+
+ if (HAS_GMCH_DISPLAY(dev_priv))
+ /* FIXME calculate proper pipe pixel rate for GMCH pfit */
+ crtc_state->pixel_rate =
+ crtc_state->base.adjusted_mode.crtc_clock;
+ else
+ crtc_state->pixel_rate =
+ ilk_pipe_pixel_rate(crtc_state);
+}
+
static int intel_crtc_compute_config(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config)
{
@@ -7243,6 +6293,8 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
return -EINVAL;
+ intel_crtc_compute_pixel_rate(pipe_config);
+
if (HAS_IPS(dev_priv))
hsw_compute_ips_config(crtc, pipe_config);
@@ -7252,428 +6304,6 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
return 0;
}
-static int skylake_get_display_clock_speed(struct drm_i915_private *dev_priv)
-{
- u32 cdctl;
-
- skl_dpll0_update(dev_priv);
-
- if (dev_priv->cdclk_pll.vco == 0)
- return dev_priv->cdclk_pll.ref;
-
- cdctl = I915_READ(CDCLK_CTL);
-
- if (dev_priv->cdclk_pll.vco == 8640000) {
- switch (cdctl & CDCLK_FREQ_SEL_MASK) {
- case CDCLK_FREQ_450_432:
- return 432000;
- case CDCLK_FREQ_337_308:
- return 308571;
- case CDCLK_FREQ_540:
- return 540000;
- case CDCLK_FREQ_675_617:
- return 617143;
- default:
- MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
- }
- } else {
- switch (cdctl & CDCLK_FREQ_SEL_MASK) {
- case CDCLK_FREQ_450_432:
- return 450000;
- case CDCLK_FREQ_337_308:
- return 337500;
- case CDCLK_FREQ_540:
- return 540000;
- case CDCLK_FREQ_675_617:
- return 675000;
- default:
- MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
- }
- }
-
- return dev_priv->cdclk_pll.ref;
-}
-
-static void bxt_de_pll_update(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- dev_priv->cdclk_pll.ref = 19200;
- dev_priv->cdclk_pll.vco = 0;
-
- val = I915_READ(BXT_DE_PLL_ENABLE);
- if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
- return;
-
- if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
- return;
-
- val = I915_READ(BXT_DE_PLL_CTL);
- dev_priv->cdclk_pll.vco = (val & BXT_DE_PLL_RATIO_MASK) *
- dev_priv->cdclk_pll.ref;
-}
-
-static int broxton_get_display_clock_speed(struct drm_i915_private *dev_priv)
-{
- u32 divider;
- int div, vco;
-
- bxt_de_pll_update(dev_priv);
-
- vco = dev_priv->cdclk_pll.vco;
- if (vco == 0)
- return dev_priv->cdclk_pll.ref;
-
- divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
-
- switch (divider) {
- case BXT_CDCLK_CD2X_DIV_SEL_1:
- div = 2;
- break;
- case BXT_CDCLK_CD2X_DIV_SEL_1_5:
- WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n");
- div = 3;
- break;
- case BXT_CDCLK_CD2X_DIV_SEL_2:
- div = 4;
- break;
- case BXT_CDCLK_CD2X_DIV_SEL_4:
- div = 8;
- break;
- default:
- MISSING_CASE(divider);
- return dev_priv->cdclk_pll.ref;
- }
-
- return DIV_ROUND_CLOSEST(vco, div);
-}
-
-static int broadwell_get_display_clock_speed(struct drm_i915_private *dev_priv)
-{
- uint32_t lcpll = I915_READ(LCPLL_CTL);
- uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
-
- if (lcpll & LCPLL_CD_SOURCE_FCLK)
- return 800000;
- else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
- return 450000;
- else if (freq == LCPLL_CLK_FREQ_450)
- return 450000;
- else if (freq == LCPLL_CLK_FREQ_54O_BDW)
- return 540000;
- else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
- return 337500;
- else
- return 675000;
-}
-
-static int haswell_get_display_clock_speed(struct drm_i915_private *dev_priv)
-{
- uint32_t lcpll = I915_READ(LCPLL_CTL);
- uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
-
- if (lcpll & LCPLL_CD_SOURCE_FCLK)
- return 800000;
- else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
- return 450000;
- else if (freq == LCPLL_CLK_FREQ_450)
- return 450000;
- else if (IS_HSW_ULT(dev_priv))
- return 337500;
- else
- return 540000;
-}
-
-static int valleyview_get_display_clock_speed(struct drm_i915_private *dev_priv)
-{
- return vlv_get_cck_clock_hpll(dev_priv, "cdclk",
- CCK_DISPLAY_CLOCK_CONTROL);
-}
-
-static int ilk_get_display_clock_speed(struct drm_i915_private *dev_priv)
-{
- return 450000;
-}
-
-static int i945_get_display_clock_speed(struct drm_i915_private *dev_priv)
-{
- return 400000;
-}
-
-static int i915_get_display_clock_speed(struct drm_i915_private *dev_priv)
-{
- return 333333;
-}
-
-static int i9xx_misc_get_display_clock_speed(struct drm_i915_private *dev_priv)
-{
- return 200000;
-}
-
-static int pnv_get_display_clock_speed(struct drm_i915_private *dev_priv)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
- u16 gcfgc = 0;
-
- pci_read_config_word(pdev, GCFGC, &gcfgc);
-
- switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
- case GC_DISPLAY_CLOCK_267_MHZ_PNV:
- return 266667;
- case GC_DISPLAY_CLOCK_333_MHZ_PNV:
- return 333333;
- case GC_DISPLAY_CLOCK_444_MHZ_PNV:
- return 444444;
- case GC_DISPLAY_CLOCK_200_MHZ_PNV:
- return 200000;
- default:
- DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
- case GC_DISPLAY_CLOCK_133_MHZ_PNV:
- return 133333;
- case GC_DISPLAY_CLOCK_167_MHZ_PNV:
- return 166667;
- }
-}
-
-static int i915gm_get_display_clock_speed(struct drm_i915_private *dev_priv)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
- u16 gcfgc = 0;
-
- pci_read_config_word(pdev, GCFGC, &gcfgc);
-
- if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
- return 133333;
- else {
- switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
- case GC_DISPLAY_CLOCK_333_MHZ:
- return 333333;
- default:
- case GC_DISPLAY_CLOCK_190_200_MHZ:
- return 190000;
- }
- }
-}
-
-static int i865_get_display_clock_speed(struct drm_i915_private *dev_priv)
-{
- return 266667;
-}
-
-static int i85x_get_display_clock_speed(struct drm_i915_private *dev_priv)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
- u16 hpllcc = 0;
-
- /*
- * 852GM/852GMV only supports 133 MHz and the HPLLCC
- * encoding is different :(
- * FIXME is this the right way to detect 852GM/852GMV?
- */
- if (pdev->revision == 0x1)
- return 133333;
-
- pci_bus_read_config_word(pdev->bus,
- PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
-
- /* Assume that the hardware is in the high speed state. This
- * should be the default.
- */
- switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
- case GC_CLOCK_133_200:
- case GC_CLOCK_133_200_2:
- case GC_CLOCK_100_200:
- return 200000;
- case GC_CLOCK_166_250:
- return 250000;
- case GC_CLOCK_100_133:
- return 133333;
- case GC_CLOCK_133_266:
- case GC_CLOCK_133_266_2:
- case GC_CLOCK_166_266:
- return 266667;
- }
-
- /* Shouldn't happen */
- return 0;
-}
-
-static int i830_get_display_clock_speed(struct drm_i915_private *dev_priv)
-{
- return 133333;
-}
-
-static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
-{
- static const unsigned int blb_vco[8] = {
- [0] = 3200000,
- [1] = 4000000,
- [2] = 5333333,
- [3] = 4800000,
- [4] = 6400000,
- };
- static const unsigned int pnv_vco[8] = {
- [0] = 3200000,
- [1] = 4000000,
- [2] = 5333333,
- [3] = 4800000,
- [4] = 2666667,
- };
- static const unsigned int cl_vco[8] = {
- [0] = 3200000,
- [1] = 4000000,
- [2] = 5333333,
- [3] = 6400000,
- [4] = 3333333,
- [5] = 3566667,
- [6] = 4266667,
- };
- static const unsigned int elk_vco[8] = {
- [0] = 3200000,
- [1] = 4000000,
- [2] = 5333333,
- [3] = 4800000,
- };
- static const unsigned int ctg_vco[8] = {
- [0] = 3200000,
- [1] = 4000000,
- [2] = 5333333,
- [3] = 6400000,
- [4] = 2666667,
- [5] = 4266667,
- };
- const unsigned int *vco_table;
- unsigned int vco;
- uint8_t tmp = 0;
-
- /* FIXME other chipsets? */
- if (IS_GM45(dev_priv))
- vco_table = ctg_vco;
- else if (IS_G4X(dev_priv))
- vco_table = elk_vco;
- else if (IS_I965GM(dev_priv))
- vco_table = cl_vco;
- else if (IS_PINEVIEW(dev_priv))
- vco_table = pnv_vco;
- else if (IS_G33(dev_priv))
- vco_table = blb_vco;
- else
- return 0;
-
- tmp = I915_READ(IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO);
-
- vco = vco_table[tmp & 0x7];
- if (vco == 0)
- DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
- else
- DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
-
- return vco;
-}
-
-static int gm45_get_display_clock_speed(struct drm_i915_private *dev_priv)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
- unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv);
- uint16_t tmp = 0;
-
- pci_read_config_word(pdev, GCFGC, &tmp);
-
- cdclk_sel = (tmp >> 12) & 0x1;
-
- switch (vco) {
- case 2666667:
- case 4000000:
- case 5333333:
- return cdclk_sel ? 333333 : 222222;
- case 3200000:
- return cdclk_sel ? 320000 : 228571;
- default:
- DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
- return 222222;
- }
-}
-
-static int i965gm_get_display_clock_speed(struct drm_i915_private *dev_priv)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
- static const uint8_t div_3200[] = { 16, 10, 8 };
- static const uint8_t div_4000[] = { 20, 12, 10 };
- static const uint8_t div_5333[] = { 24, 16, 14 };
- const uint8_t *div_table;
- unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv);
- uint16_t tmp = 0;
-
- pci_read_config_word(pdev, GCFGC, &tmp);
-
- cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
-
- if (cdclk_sel >= ARRAY_SIZE(div_3200))
- goto fail;
-
- switch (vco) {
- case 3200000:
- div_table = div_3200;
- break;
- case 4000000:
- div_table = div_4000;
- break;
- case 5333333:
- div_table = div_5333;
- break;
- default:
- goto fail;
- }
-
- return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
-
-fail:
- DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
- return 200000;
-}
-
-static int g33_get_display_clock_speed(struct drm_i915_private *dev_priv)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
- static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 };
- static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 };
- static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
- static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
- const uint8_t *div_table;
- unsigned int cdclk_sel, vco = intel_hpll_vco(dev_priv);
- uint16_t tmp = 0;
-
- pci_read_config_word(pdev, GCFGC, &tmp);
-
- cdclk_sel = (tmp >> 4) & 0x7;
-
- if (cdclk_sel >= ARRAY_SIZE(div_3200))
- goto fail;
-
- switch (vco) {
- case 3200000:
- div_table = div_3200;
- break;
- case 4000000:
- div_table = div_4000;
- break;
- case 4800000:
- div_table = div_4800;
- break;
- case 5333333:
- div_table = div_5333;
- break;
- default:
- goto fail;
- }
-
- return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
-
-fail:
- DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
- return 190476;
-}
-
static void
intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
{
@@ -8768,9 +7398,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
val = I915_READ(DSPSTRIDE(pipe));
fb->pitches[0] = val & 0xffffffc0;
- aligned_height = intel_fb_align_height(dev, fb->height,
- fb->format->format,
- fb->modifier);
+ aligned_height = intel_fb_align_height(fb, 0, fb->height);
plane_config->size = fb->pitches[0] * aligned_height;
@@ -9805,13 +8433,10 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
fb->width = ((val >> 0) & 0x1fff) + 1;
val = I915_READ(PLANE_STRIDE(pipe, 0));
- stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier,
- fb->format->format);
+ stride_mult = intel_fb_stride_alignment(fb, 0);
fb->pitches[0] = (val & 0x3ff) * stride_mult;
- aligned_height = intel_fb_align_height(dev, fb->height,
- fb->format->format,
- fb->modifier);
+ aligned_height = intel_fb_align_height(fb, 0, fb->height);
plane_config->size = fb->pitches[0] * aligned_height;
@@ -9907,9 +8532,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
val = I915_READ(DSPSTRIDE(pipe));
fb->pitches[0] = val & 0xffffffc0;
- aligned_height = intel_fb_align_height(dev, fb->height,
- fb->format->format,
- fb->modifier);
+ aligned_height = intel_fb_align_height(fb, 0, fb->height);
plane_config->size = fb->pitches[0] * aligned_height;
@@ -10235,245 +8858,6 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
}
}
-static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state)
-{
- struct drm_device *dev = old_state->dev;
- struct intel_atomic_state *old_intel_state =
- to_intel_atomic_state(old_state);
- unsigned int req_cdclk = old_intel_state->dev_cdclk;
-
- bxt_set_cdclk(to_i915(dev), req_cdclk);
-}
-
-static int bdw_adjust_min_pipe_pixel_rate(struct intel_crtc_state *crtc_state,
- int pixel_rate)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
-
- /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
- pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
-
- /* BSpec says "Do not use DisplayPort with CDCLK less than
- * 432 MHz, audio enabled, port width x4, and link rate
- * HBR2 (5.4 GHz), or else there may be audio corruption or
- * screen corruption."
- */
- if (intel_crtc_has_dp_encoder(crtc_state) &&
- crtc_state->has_audio &&
- crtc_state->port_clock >= 540000 &&
- crtc_state->lane_count == 4)
- pixel_rate = max(432000, pixel_rate);
-
- return pixel_rate;
-}
-
-/* compute the max rate for new configuration */
-static int ilk_max_pixel_rate(struct drm_atomic_state *state)
-{
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_i915_private *dev_priv = to_i915(state->dev);
- struct drm_crtc *crtc;
- struct drm_crtc_state *cstate;
- struct intel_crtc_state *crtc_state;
- unsigned max_pixel_rate = 0, i;
- enum pipe pipe;
-
- memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
- sizeof(intel_state->min_pixclk));
-
- for_each_crtc_in_state(state, crtc, cstate, i) {
- int pixel_rate;
-
- crtc_state = to_intel_crtc_state(cstate);
- if (!crtc_state->base.enable) {
- intel_state->min_pixclk[i] = 0;
- continue;
- }
-
- pixel_rate = ilk_pipe_pixel_rate(crtc_state);
-
- if (IS_BROADWELL(dev_priv) || IS_GEN9(dev_priv))
- pixel_rate = bdw_adjust_min_pipe_pixel_rate(crtc_state,
- pixel_rate);
-
- intel_state->min_pixclk[i] = pixel_rate;
- }
-
- for_each_pipe(dev_priv, pipe)
- max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate);
-
- return max_pixel_rate;
-}
-
-static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- uint32_t val, data;
- int ret;
-
- if (WARN((I915_READ(LCPLL_CTL) &
- (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
- LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
- LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
- LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
- "trying to change cdclk frequency with cdclk not enabled\n"))
- return;
-
- mutex_lock(&dev_priv->rps.hw_lock);
- ret = sandybridge_pcode_write(dev_priv,
- BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
- mutex_unlock(&dev_priv->rps.hw_lock);
- if (ret) {
- DRM_ERROR("failed to inform pcode about cdclk change\n");
- return;
- }
-
- val = I915_READ(LCPLL_CTL);
- val |= LCPLL_CD_SOURCE_FCLK;
- I915_WRITE(LCPLL_CTL, val);
-
- if (wait_for_us(I915_READ(LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE, 1))
- DRM_ERROR("Switching to FCLK failed\n");
-
- val = I915_READ(LCPLL_CTL);
- val &= ~LCPLL_CLK_FREQ_MASK;
-
- switch (cdclk) {
- case 450000:
- val |= LCPLL_CLK_FREQ_450;
- data = 0;
- break;
- case 540000:
- val |= LCPLL_CLK_FREQ_54O_BDW;
- data = 1;
- break;
- case 337500:
- val |= LCPLL_CLK_FREQ_337_5_BDW;
- data = 2;
- break;
- case 675000:
- val |= LCPLL_CLK_FREQ_675_BDW;
- data = 3;
- break;
- default:
- WARN(1, "invalid cdclk frequency\n");
- return;
- }
-
- I915_WRITE(LCPLL_CTL, val);
-
- val = I915_READ(LCPLL_CTL);
- val &= ~LCPLL_CD_SOURCE_FCLK;
- I915_WRITE(LCPLL_CTL, val);
-
- if (wait_for_us((I915_READ(LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
- DRM_ERROR("Switching back to LCPLL failed\n");
-
- mutex_lock(&dev_priv->rps.hw_lock);
- sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
- mutex_unlock(&dev_priv->rps.hw_lock);
-
- I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
-
- intel_update_cdclk(dev_priv);
-
- WARN(cdclk != dev_priv->cdclk_freq,
- "cdclk requested %d kHz but got %d kHz\n",
- cdclk, dev_priv->cdclk_freq);
-}
-
-static int broadwell_calc_cdclk(int max_pixclk)
-{
- if (max_pixclk > 540000)
- return 675000;
- else if (max_pixclk > 450000)
- return 540000;
- else if (max_pixclk > 337500)
- return 450000;
- else
- return 337500;
-}
-
-static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->dev);
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- int max_pixclk = ilk_max_pixel_rate(state);
- int cdclk;
-
- /*
- * FIXME should also account for plane ratio
- * once 64bpp pixel formats are supported.
- */
- cdclk = broadwell_calc_cdclk(max_pixclk);
-
- if (cdclk > dev_priv->max_cdclk_freq) {
- DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
- cdclk, dev_priv->max_cdclk_freq);
- return -EINVAL;
- }
-
- intel_state->cdclk = intel_state->dev_cdclk = cdclk;
- if (!intel_state->active_crtcs)
- intel_state->dev_cdclk = broadwell_calc_cdclk(0);
-
- return 0;
-}
-
-static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
-{
- struct drm_device *dev = old_state->dev;
- struct intel_atomic_state *old_intel_state =
- to_intel_atomic_state(old_state);
- unsigned req_cdclk = old_intel_state->dev_cdclk;
-
- broadwell_set_cdclk(dev, req_cdclk);
-}
-
-static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
-{
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
- struct drm_i915_private *dev_priv = to_i915(state->dev);
- const int max_pixclk = ilk_max_pixel_rate(state);
- int vco = intel_state->cdclk_pll_vco;
- int cdclk;
-
- /*
- * FIXME should also account for plane ratio
- * once 64bpp pixel formats are supported.
- */
- cdclk = skl_calc_cdclk(max_pixclk, vco);
-
- /*
- * FIXME move the cdclk caclulation to
- * compute_config() so we can fail gracegully.
- */
- if (cdclk > dev_priv->max_cdclk_freq) {
- DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
- cdclk, dev_priv->max_cdclk_freq);
- cdclk = dev_priv->max_cdclk_freq;
- }
-
- intel_state->cdclk = intel_state->dev_cdclk = cdclk;
- if (!intel_state->active_crtcs)
- intel_state->dev_cdclk = skl_calc_cdclk(0, vco);
-
- return 0;
-}
-
-static void skl_modeset_commit_cdclk(struct drm_atomic_state *old_state)
-{
- struct drm_i915_private *dev_priv = to_i915(old_state->dev);
- struct intel_atomic_state *intel_state = to_intel_atomic_state(old_state);
- unsigned int req_cdclk = intel_state->dev_cdclk;
- unsigned int req_vco = intel_state->cdclk_pll_vco;
-
- skl_set_cdclk(dev_priv, req_cdclk, req_vco);
-}
-
static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state)
{
@@ -10565,7 +8949,7 @@ static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
- unsigned long *power_domain_mask)
+ u64 *power_domain_mask)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -10607,7 +8991,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- *power_domain_mask |= BIT(power_domain);
+ *power_domain_mask |= BIT_ULL(power_domain);
tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
@@ -10616,7 +9000,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config,
- unsigned long *power_domain_mask)
+ u64 *power_domain_mask)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -10634,7 +9018,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
continue;
- *power_domain_mask |= BIT(power_domain);
+ *power_domain_mask |= BIT_ULL(power_domain);
/*
* The PLL needs to be enabled with a valid divider
@@ -10674,7 +9058,7 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ if (IS_GEN9_BC(dev_priv))
skylake_get_ddi_pll(dev_priv, port, pipe_config);
else if (IS_GEN9_LP(dev_priv))
bxt_get_ddi_pll(dev_priv, port, pipe_config);
@@ -10709,13 +9093,13 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum intel_display_power_domain power_domain;
- unsigned long power_domain_mask;
+ u64 power_domain_mask;
bool active;
power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- power_domain_mask = BIT(power_domain);
+ power_domain_mask = BIT_ULL(power_domain);
pipe_config->shared_dpll = NULL;
@@ -10749,7 +9133,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
- power_domain_mask |= BIT(power_domain);
+ power_domain_mask |= BIT_ULL(power_domain);
if (INTEL_GEN(dev_priv) >= 9)
skylake_get_pfit_config(crtc, pipe_config);
else
@@ -10816,24 +9200,24 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
/* On these chipsets we can only modify the base/size/stride
* whilst the cursor is disabled.
*/
- I915_WRITE(CURCNTR(PIPE_A), 0);
- POSTING_READ(CURCNTR(PIPE_A));
+ I915_WRITE_FW(CURCNTR(PIPE_A), 0);
+ POSTING_READ_FW(CURCNTR(PIPE_A));
intel_crtc->cursor_cntl = 0;
}
if (intel_crtc->cursor_base != base) {
- I915_WRITE(CURBASE(PIPE_A), base);
+ I915_WRITE_FW(CURBASE(PIPE_A), base);
intel_crtc->cursor_base = base;
}
if (intel_crtc->cursor_size != size) {
- I915_WRITE(CURSIZE, size);
+ I915_WRITE_FW(CURSIZE, size);
intel_crtc->cursor_size = size;
}
if (intel_crtc->cursor_cntl != cntl) {
- I915_WRITE(CURCNTR(PIPE_A), cntl);
- POSTING_READ(CURCNTR(PIPE_A));
+ I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
+ POSTING_READ_FW(CURCNTR(PIPE_A));
intel_crtc->cursor_cntl = cntl;
}
}
@@ -10873,14 +9257,14 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
}
if (intel_crtc->cursor_cntl != cntl) {
- I915_WRITE(CURCNTR(pipe), cntl);
- POSTING_READ(CURCNTR(pipe));
+ I915_WRITE_FW(CURCNTR(pipe), cntl);
+ POSTING_READ_FW(CURCNTR(pipe));
intel_crtc->cursor_cntl = cntl;
}
/* and commit changes on next vblank */
- I915_WRITE(CURBASE(pipe), base);
- POSTING_READ(CURBASE(pipe));
+ I915_WRITE_FW(CURBASE(pipe), base);
+ POSTING_READ_FW(CURBASE(pipe));
intel_crtc->cursor_base = base;
}
@@ -10894,6 +9278,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 base = intel_crtc->cursor_addr;
+ unsigned long irqflags;
u32 pos = 0;
if (plane_state) {
@@ -10920,12 +9305,16 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
}
}
- I915_WRITE(CURPOS(pipe), pos);
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ I915_WRITE_FW(CURPOS(pipe), pos);
if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
i845_update_cursor(crtc, base, plane_state);
else
i9xx_update_cursor(crtc, base, plane_state);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static bool cursor_size_ok(struct drm_i915_private *dev_priv,
@@ -10972,9 +9361,8 @@ static struct drm_display_mode load_detect_mode = {
};
struct drm_framebuffer *
-__intel_framebuffer_create(struct drm_device *dev,
- struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_i915_gem_object *obj)
+intel_framebuffer_create(struct drm_i915_gem_object *obj,
+ struct drm_mode_fb_cmd2 *mode_cmd)
{
struct intel_framebuffer *intel_fb;
int ret;
@@ -10983,7 +9371,7 @@ __intel_framebuffer_create(struct drm_device *dev,
if (!intel_fb)
return ERR_PTR(-ENOMEM);
- ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
+ ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
if (ret)
goto err;
@@ -10994,23 +9382,6 @@ err:
return ERR_PTR(ret);
}
-static struct drm_framebuffer *
-intel_framebuffer_create(struct drm_device *dev,
- struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_i915_gem_object *obj)
-{
- struct drm_framebuffer *fb;
- int ret;
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ERR_PTR(ret);
- fb = __intel_framebuffer_create(dev, mode_cmd, obj);
- mutex_unlock(&dev->struct_mutex);
-
- return fb;
-}
-
static u32
intel_framebuffer_pitch_for_width(int width, int bpp)
{
@@ -11045,7 +9416,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
bpp);
mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
- fb = intel_framebuffer_create(dev, &mode_cmd, obj);
+ fb = intel_framebuffer_create(obj, &mode_cmd);
if (IS_ERR(fb))
i915_gem_object_put(obj);
@@ -11731,14 +10102,12 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_ring *ring = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- u32 flip_mask;
- int ret;
+ u32 flip_mask, *cs;
- ret = intel_ring_begin(req, 6);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
/* Can't queue multiple flips, so wait for the previous
* one to finish before executing the next.
@@ -11747,13 +10116,12 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
- intel_ring_emit(ring, 0); /* aux display base address, unused */
+ *cs++ = MI_WAIT_FOR_EVENT | flip_mask;
+ *cs++ = MI_NOOP;
+ *cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+ *cs++ = fb->pitches[0];
+ *cs++ = intel_crtc->flip_work->gtt_offset;
+ *cs++ = 0; /* aux display base address, unused */
return 0;
}
@@ -11765,26 +10133,23 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_ring *ring = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- u32 flip_mask;
- int ret;
+ u32 flip_mask, *cs;
- ret = intel_ring_begin(req, 6);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
if (intel_crtc->plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
- intel_ring_emit(ring, MI_NOOP);
+ *cs++ = MI_WAIT_FOR_EVENT | flip_mask;
+ *cs++ = MI_NOOP;
+ *cs++ = MI_DISPLAY_FLIP_I915 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+ *cs++ = fb->pitches[0];
+ *cs++ = intel_crtc->flip_work->gtt_offset;
+ *cs++ = MI_NOOP;
return 0;
}
@@ -11796,25 +10161,22 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_ring *ring = req->ring;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- uint32_t pf, pipesrc;
- int ret;
+ u32 pf, pipesrc, *cs;
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
/* i965+ uses the linear or tiled offsets from the
* Display Registers (which do not change across a page-flip)
* so we need only reprogram the base address.
*/
- intel_ring_emit(ring, MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset |
- intel_fb_modifier_to_tiling(fb->modifier));
+ *cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+ *cs++ = fb->pitches[0];
+ *cs++ = intel_crtc->flip_work->gtt_offset |
+ intel_fb_modifier_to_tiling(fb->modifier);
/* XXX Enabling the panel-fitter across page-flip is so far
* untested on non-native modes, so ignore it for now.
@@ -11822,7 +10184,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
- intel_ring_emit(ring, pf | pipesrc);
+ *cs++ = pf | pipesrc;
return 0;
}
@@ -11834,21 +10196,17 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
struct drm_i915_gem_request *req,
uint32_t flags)
{
- struct intel_ring *ring = req->ring;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- uint32_t pf, pipesrc;
- int ret;
+ u32 pf, pipesrc, *cs;
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, MI_DISPLAY_FLIP |
- MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- intel_ring_emit(ring, fb->pitches[0] |
- intel_fb_modifier_to_tiling(fb->modifier));
- intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
+ *cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+ *cs++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier);
+ *cs++ = intel_crtc->flip_work->gtt_offset;
/* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page
@@ -11858,7 +10216,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
*/
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
- intel_ring_emit(ring, pf | pipesrc);
+ *cs++ = pf | pipesrc;
return 0;
}
@@ -11871,9 +10229,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
uint32_t flags)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_ring *ring = req->ring;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- uint32_t plane_bit = 0;
+ u32 *cs, plane_bit = 0;
int len, ret;
switch (intel_crtc->plane) {
@@ -11917,9 +10274,9 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
if (ret)
return ret;
- ret = intel_ring_begin(req, len);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, len);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
/* Unmask the flip-done completion message. Note that the bspec says that
* we should do this for both the BCS and RCS, and that we must not unmask
@@ -11931,31 +10288,28 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
* to zero does lead to lockups within MI_DISPLAY_FLIP.
*/
if (req->engine->id == RCS) {
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(ring, DERRMR);
- intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
- DERRMR_PIPEB_PRI_FLIP_DONE |
- DERRMR_PIPEC_PRI_FLIP_DONE));
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(DERRMR);
+ *cs++ = ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+ DERRMR_PIPEB_PRI_FLIP_DONE |
+ DERRMR_PIPEC_PRI_FLIP_DONE);
if (IS_GEN8(dev_priv))
- intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
- MI_SRM_LRM_GLOBAL_GTT);
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 |
+ MI_SRM_LRM_GLOBAL_GTT;
else
- intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
- MI_SRM_LRM_GLOBAL_GTT);
- intel_ring_emit_reg(ring, DERRMR);
- intel_ring_emit(ring,
- i915_ggtt_offset(req->engine->scratch) + 256);
+ *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+ *cs++ = i915_mmio_reg_offset(DERRMR);
+ *cs++ = i915_ggtt_offset(req->engine->scratch) + 256;
if (IS_GEN8(dev_priv)) {
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
}
}
- intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
- intel_ring_emit(ring, fb->pitches[0] |
- intel_fb_modifier_to_tiling(fb->modifier));
- intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
- intel_ring_emit(ring, (MI_NOOP));
+ *cs++ = MI_DISPLAY_FLIP_I915 | plane_bit;
+ *cs++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier);
+ *cs++ = intel_crtc->flip_work->gtt_offset;
+ *cs++ = MI_NOOP;
return 0;
}
@@ -12147,6 +10501,7 @@ void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
spin_unlock(&dev->event_lock);
}
+__maybe_unused
static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
@@ -12240,7 +10595,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
goto cleanup;
intel_crtc->reset_count = i915_reset_count(&dev_priv->gpu_error);
- if (i915_reset_in_progress_or_wedged(&dev_priv->gpu_error)) {
+ if (i915_reset_backoff_or_wedged(&dev_priv->gpu_error)) {
ret = -EIO;
goto unlock;
}
@@ -12313,7 +10668,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
intel_mark_page_flip_active(intel_crtc, work);
work->flip_queued_req = i915_gem_request_get(request);
- i915_add_request_no_flush(request);
+ i915_add_request(request);
}
i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
@@ -12329,7 +10684,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return 0;
cleanup_request:
- i915_add_request_no_flush(request);
+ i915_add_request(request);
cleanup_unpin:
to_intel_plane_state(primary->state)->vma = work->old_vma;
intel_unpin_fb_vma(vma);
@@ -12441,11 +10796,11 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
struct drm_crtc *crtc = crtc_state->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_plane *plane = plane_state->plane;
+ struct intel_plane *plane = to_intel_plane(plane_state->plane);
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane_state *old_plane_state =
- to_intel_plane_state(plane->state);
+ to_intel_plane_state(plane->base.state);
bool mode_changed = needs_modeset(crtc_state);
bool was_crtc_enabled = crtc->state->active;
bool is_crtc_enabled = crtc_state->active;
@@ -12453,7 +10808,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
struct drm_framebuffer *fb = plane_state->fb;
int ret;
- if (INTEL_GEN(dev_priv) >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) {
+ if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
ret = skl_update_scaler_plane(
to_intel_crtc_state(crtc_state),
to_intel_plane_state(plane_state));
@@ -12477,8 +10832,10 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
* per-plane wm computation to the .check_plane() hook, and
* only combine the results from all planes in the current place?
*/
- if (!is_crtc_enabled)
+ if (!is_crtc_enabled) {
plane_state->visible = visible = false;
+ to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
+ }
if (!was_visible && !visible)
return 0;
@@ -12490,41 +10847,39 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
turn_on = visible && (!was_visible || mode_changed);
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
- intel_crtc->base.base.id,
- intel_crtc->base.name,
- plane->base.id, plane->name,
+ intel_crtc->base.base.id, intel_crtc->base.name,
+ plane->base.base.id, plane->base.name,
fb ? fb->base.id : -1);
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
- plane->base.id, plane->name,
+ plane->base.base.id, plane->base.name,
was_visible, visible,
turn_off, turn_on, mode_changed);
if (turn_on) {
- pipe_config->update_wm_pre = true;
+ if (INTEL_GEN(dev_priv) < 5)
+ pipe_config->update_wm_pre = true;
/* must disable cxsr around plane enable/disable */
- if (plane->type != DRM_PLANE_TYPE_CURSOR)
+ if (plane->id != PLANE_CURSOR)
pipe_config->disable_cxsr = true;
} else if (turn_off) {
- pipe_config->update_wm_post = true;
+ if (INTEL_GEN(dev_priv) < 5)
+ pipe_config->update_wm_post = true;
/* must disable cxsr around plane enable/disable */
- if (plane->type != DRM_PLANE_TYPE_CURSOR)
+ if (plane->id != PLANE_CURSOR)
pipe_config->disable_cxsr = true;
- } else if (intel_wm_need_update(plane, plane_state)) {
- /* FIXME bollocks */
- pipe_config->update_wm_pre = true;
- pipe_config->update_wm_post = true;
+ } else if (intel_wm_need_update(&plane->base, plane_state)) {
+ if (INTEL_GEN(dev_priv) < 5) {
+ /* FIXME bollocks */
+ pipe_config->update_wm_pre = true;
+ pipe_config->update_wm_post = true;
+ }
}
- /* Pre-gen9 platforms need two-step watermark updates */
- if ((pipe_config->update_wm_pre || pipe_config->update_wm_post) &&
- INTEL_GEN(dev_priv) < 9 && dev_priv->display.optimize_watermarks)
- to_intel_crtc_state(crtc_state)->wm.need_postvbl_update = true;
-
if (visible || was_visible)
- pipe_config->fb_bits |= to_intel_plane(plane)->frontbuffer_bit;
+ pipe_config->fb_bits |= plane->frontbuffer_bit;
/*
* WaCxSRDisabledForSpriteScaling:ivb
@@ -12532,7 +10887,7 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
* cstate->update_wm was already set above, so this flag will
* take effect when we commit and program watermarks.
*/
- if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev_priv) &&
+ if (plane->id == PLANE_SPRITE0 && IS_IVYBRIDGE(dev_priv) &&
needs_scaling(to_intel_plane_state(plane_state)) &&
!needs_scaling(old_plane_state))
pipe_config->disable_lp_wm = true;
@@ -12557,7 +10912,7 @@ static bool check_single_encoder_cloning(struct drm_atomic_state *state,
struct drm_connector_state *connector_state;
int i;
- for_each_connector_in_state(state, connector, connector_state, i) {
+ for_each_new_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != &crtc->base)
continue;
@@ -12642,7 +10997,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
ret = skl_update_scaler_crtc(pipe_config);
if (!ret)
- ret = intel_atomic_setup_scalers(dev, intel_crtc,
+ ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
pipe_config);
}
@@ -12659,8 +11014,10 @@ static const struct drm_crtc_helper_funcs intel_helper_funcs = {
static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
{
struct intel_connector *connector;
+ struct drm_connector_list_iter conn_iter;
- for_each_intel_connector(dev, connector) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
if (connector->base.state->crtc)
drm_connector_unreference(&connector->base);
@@ -12676,6 +11033,7 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
connector->base.state->crtc = NULL;
}
}
+ drm_connector_list_iter_end(&conn_iter);
}
static void
@@ -12728,7 +11086,7 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
state = pipe_config->base.state;
/* Clamp display bpp to EDID value */
- for_each_connector_in_state(state, connector, connector_state, i) {
+ for_each_new_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != &crtc->base)
continue;
@@ -12800,9 +11158,10 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
DRM_DEBUG_KMS("adjusted mode:\n");
drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
- DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d\n",
+ DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
pipe_config->port_clock,
- pipe_config->pipe_src_w, pipe_config->pipe_src_h);
+ pipe_config->pipe_src_w, pipe_config->pipe_src_h,
+ pipe_config->pixel_rate);
if (INTEL_GEN(dev_priv) >= 9)
DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
@@ -12920,10 +11279,12 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
static void
clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
{
- struct drm_crtc_state tmp_state;
+ struct drm_i915_private *dev_priv =
+ to_i915(crtc_state->base.crtc->dev);
struct intel_crtc_scaler_state scaler_state;
struct intel_dpll_hw_state dpll_hw_state;
struct intel_shared_dpll *shared_dpll;
+ struct intel_crtc_wm_state wm_state;
bool force_thru;
/* FIXME: before the switch to atomic started, a new pipe_config was
@@ -12931,19 +11292,24 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
* fixed, so that the crtc_state can be safely duplicated. For now,
* only fields that are know to not cause problems are preserved. */
- tmp_state = crtc_state->base;
scaler_state = crtc_state->scaler_state;
shared_dpll = crtc_state->shared_dpll;
dpll_hw_state = crtc_state->dpll_hw_state;
force_thru = crtc_state->pch_pfit.force_thru;
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ wm_state = crtc_state->wm;
- memset(crtc_state, 0, sizeof *crtc_state);
+ /* Keep base drm_crtc_state intact, only clear our extended struct */
+ BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
+ memset(&crtc_state->base + 1, 0,
+ sizeof(*crtc_state) - sizeof(crtc_state->base));
- crtc_state->base = tmp_state;
crtc_state->scaler_state = scaler_state;
crtc_state->shared_dpll = shared_dpll;
crtc_state->dpll_hw_state = dpll_hw_state;
crtc_state->pch_pfit.force_thru = force_thru;
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ crtc_state->wm = wm_state;
}
static int
@@ -12993,7 +11359,7 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
&pipe_config->pipe_src_w,
&pipe_config->pipe_src_h);
- for_each_connector_in_state(state, connector, connector_state, i) {
+ for_each_new_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != crtc)
continue;
@@ -13024,7 +11390,7 @@ encoder_retry:
* adjust it according to limitations or connector properties, and also
* a chance to reject the mode entirely.
*/
- for_each_connector_in_state(state, connector, connector_state, i) {
+ for_each_new_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != crtc)
continue;
@@ -13060,8 +11426,11 @@ encoder_retry:
}
/* Dithering seems to not pass-through bits correctly when it should, so
- * only enable it on 6bpc panels. */
- pipe_config->dither = pipe_config->pipe_bpp == 6*3;
+ * only enable it on 6bpc panels and when its not a compliance
+ * test requesting 6bpc video pattern.
+ */
+ pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
+ !pipe_config->dither_force_disable;
DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
@@ -13073,16 +11442,16 @@ static void
intel_modeset_update_crtc_state(struct drm_atomic_state *state)
{
struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
+ struct drm_crtc_state *new_crtc_state;
int i;
/* Double check state. */
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
/* Update hwmode for vblank functions */
- if (crtc->state->active)
- crtc->hwmode = crtc->state->adjusted_mode;
+ if (new_crtc_state->active)
+ crtc->hwmode = new_crtc_state->adjusted_mode;
else
crtc->hwmode.crtc_clock = 0;
@@ -13375,6 +11744,7 @@ intel_pipe_config_compare(struct drm_i915_private *dev_priv,
}
PIPE_CONF_CHECK_I(scaler_state.scaler_id);
+ PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
}
/* BDW+ don't expose a synchronous way to read the state */
@@ -13554,47 +11924,55 @@ verify_connector_state(struct drm_device *dev,
struct drm_crtc *crtc)
{
struct drm_connector *connector;
- struct drm_connector_state *old_conn_state;
+ struct drm_connector_state *new_conn_state;
int i;
- for_each_connector_in_state(state, connector, old_conn_state, i) {
+ for_each_new_connector_in_state(state, connector, new_conn_state, i) {
struct drm_encoder *encoder = connector->encoder;
- struct drm_connector_state *state = connector->state;
- if (state->crtc != crtc)
+ if (new_conn_state->crtc != crtc)
continue;
intel_connector_verify_state(to_intel_connector(connector));
- I915_STATE_WARN(state->best_encoder != encoder,
+ I915_STATE_WARN(new_conn_state->best_encoder != encoder,
"connector's atomic encoder doesn't match legacy encoder\n");
}
}
static void
-verify_encoder_state(struct drm_device *dev)
+verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
{
struct intel_encoder *encoder;
- struct intel_connector *connector;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state, *new_conn_state;
+ int i;
for_each_intel_encoder(dev, encoder) {
- bool enabled = false;
+ bool enabled = false, found = false;
enum pipe pipe;
DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
encoder->base.base.id,
encoder->base.name);
- for_each_intel_connector(dev, connector) {
- if (connector->base.state->best_encoder != &encoder->base)
+ for_each_oldnew_connector_in_state(state, connector, old_conn_state,
+ new_conn_state, i) {
+ if (old_conn_state->best_encoder == &encoder->base)
+ found = true;
+
+ if (new_conn_state->best_encoder != &encoder->base)
continue;
- enabled = true;
+ found = enabled = true;
- I915_STATE_WARN(connector->base.state->crtc !=
+ I915_STATE_WARN(new_conn_state->crtc !=
encoder->base.crtc,
"connector's crtc doesn't match encoder crtc\n");
}
+ if (!found)
+ continue;
+
I915_STATE_WARN(!!encoder->base.crtc != enabled,
"encoder's enabled state mismatch "
"(expected %i, found %i)\n",
@@ -13666,6 +12044,8 @@ verify_crtc_state(struct drm_crtc *crtc,
}
}
+ intel_crtc_compute_pixel_rate(pipe_config);
+
if (!new_crtc_state->active)
return;
@@ -13793,7 +12173,7 @@ static void
intel_modeset_verify_disabled(struct drm_device *dev,
struct drm_atomic_state *state)
{
- verify_encoder_state(dev);
+ verify_encoder_state(dev, state);
verify_connector_state(dev, state, NULL);
verify_disabled_dpll_state(dev);
}
@@ -13841,21 +12221,21 @@ static void intel_modeset_clear_plls(struct drm_atomic_state *state)
struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
int i;
if (!dev_priv->display.crtc_compute_clock)
return;
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_shared_dpll *old_dpll =
- to_intel_crtc_state(crtc->state)->shared_dpll;
+ to_intel_crtc_state(old_crtc_state)->shared_dpll;
- if (!needs_modeset(crtc_state))
+ if (!needs_modeset(new_crtc_state))
continue;
- to_intel_crtc_state(crtc_state)->shared_dpll = NULL;
+ to_intel_crtc_state(new_crtc_state)->shared_dpll = NULL;
if (!old_dpll)
continue;
@@ -13881,7 +12261,7 @@ static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
int i;
/* look at all crtc's that are going to be enabled in during modeset */
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
intel_crtc = to_intel_crtc(crtc);
if (!crtc_state->active || !needs_modeset(crtc_state))
@@ -13983,7 +12363,7 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_i915_private *dev_priv = to_i915(state->dev);
struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
int ret = 0, i;
if (!check_digital_port_conflicts(state)) {
@@ -13993,14 +12373,16 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
intel_state->modeset = true;
intel_state->active_crtcs = dev_priv->active_crtcs;
+ intel_state->cdclk.logical = dev_priv->cdclk.logical;
+ intel_state->cdclk.actual = dev_priv->cdclk.actual;
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
- if (crtc_state->active)
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ if (new_crtc_state->active)
intel_state->active_crtcs |= 1 << i;
else
intel_state->active_crtcs &= ~(1 << i);
- if (crtc_state->active != crtc->state->active)
+ if (old_crtc_state->active != new_crtc_state->active)
intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
}
@@ -14012,38 +12394,35 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
* adjusted_mode bits in the crtc directly.
*/
if (dev_priv->display.modeset_calc_cdclk) {
- if (!intel_state->cdclk_pll_vco)
- intel_state->cdclk_pll_vco = dev_priv->cdclk_pll.vco;
- if (!intel_state->cdclk_pll_vco)
- intel_state->cdclk_pll_vco = dev_priv->skl_preferred_vco_freq;
-
ret = dev_priv->display.modeset_calc_cdclk(state);
if (ret < 0)
return ret;
/*
- * Writes to dev_priv->atomic_cdclk_freq must protected by
+ * Writes to dev_priv->cdclk.logical must protected by
* holding all the crtc locks, even if we don't end up
* touching the hardware
*/
- if (intel_state->cdclk != dev_priv->atomic_cdclk_freq) {
+ if (!intel_cdclk_state_compare(&dev_priv->cdclk.logical,
+ &intel_state->cdclk.logical)) {
ret = intel_lock_all_pipes(state);
if (ret < 0)
return ret;
}
/* All pipes must be switched off while we change the cdclk. */
- if (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
- intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco) {
+ if (!intel_cdclk_state_compare(&dev_priv->cdclk.actual,
+ &intel_state->cdclk.actual)) {
ret = intel_modeset_all_pipes(state);
if (ret < 0)
return ret;
}
- DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n",
- intel_state->cdclk, intel_state->dev_cdclk);
+ DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
+ intel_state->cdclk.logical.cdclk,
+ intel_state->cdclk.actual.cdclk);
} else {
- to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq;
+ to_intel_atomic_state(state)->cdclk.logical = dev_priv->cdclk.logical;
}
intel_modeset_clear_plls(state);
@@ -14082,7 +12461,7 @@ static int intel_atomic_check(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_crtc *crtc;
- struct drm_crtc_state *crtc_state;
+ struct drm_crtc_state *old_crtc_state, *crtc_state;
int ret, i;
bool any_ms = false;
@@ -14090,12 +12469,12 @@ static int intel_atomic_check(struct drm_device *dev,
if (ret)
return ret;
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, crtc_state, i) {
struct intel_crtc_state *pipe_config =
to_intel_crtc_state(crtc_state);
/* Catch I915_MODE_FLAG_INHERITED */
- if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
+ if (crtc_state->mode.private_flags != old_crtc_state->mode.private_flags)
crtc_state->mode_changed = true;
if (!needs_modeset(crtc_state))
@@ -14122,10 +12501,10 @@ static int intel_atomic_check(struct drm_device *dev,
if (i915.fastboot &&
intel_pipe_config_compare(dev_priv,
- to_intel_crtc_state(crtc->state),
+ to_intel_crtc_state(old_crtc_state),
pipe_config, true)) {
crtc_state->mode_changed = false;
- to_intel_crtc_state(crtc_state)->update_pipe = true;
+ pipe_config->update_pipe = true;
}
if (needs_modeset(crtc_state))
@@ -14146,7 +12525,7 @@ static int intel_atomic_check(struct drm_device *dev,
if (ret)
return ret;
} else {
- intel_state->cdclk = dev_priv->atomic_cdclk_freq;
+ intel_state->cdclk.logical = dev_priv->cdclk.logical;
}
ret = drm_atomic_helper_check_planes(dev, state);
@@ -14165,7 +12544,7 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
struct drm_crtc *crtc;
int i, ret;
- for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
if (state->legacy_cursor_update)
continue;
@@ -14253,12 +12632,7 @@ static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
if (crtc_state->update_wm_post)
return true;
- /*
- * cxsr is re-enabled after vblank.
- * This is already handled by crtc_state->update_wm_post,
- * but added for clarity.
- */
- if (crtc_state->disable_cxsr)
+ if (crtc_state->wm.need_postvbl_update)
return true;
return false;
@@ -14267,19 +12641,21 @@ static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
static void intel_update_crtc(struct drm_crtc *crtc,
struct drm_atomic_state *state,
struct drm_crtc_state *old_crtc_state,
+ struct drm_crtc_state *new_crtc_state,
unsigned int *crtc_vblank_mask)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->state);
- bool modeset = needs_modeset(crtc->state);
+ struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
+ bool modeset = needs_modeset(new_crtc_state);
if (modeset) {
update_scanline_offset(intel_crtc);
dev_priv->display.crtc_enable(pipe_config, state);
} else {
- intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
+ intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
+ pipe_config);
}
if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
@@ -14298,15 +12674,15 @@ static void intel_update_crtcs(struct drm_atomic_state *state,
unsigned int *crtc_vblank_mask)
{
struct drm_crtc *crtc;
- struct drm_crtc_state *old_crtc_state;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
int i;
- for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
- if (!crtc->state->active)
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ if (!new_crtc_state->active)
continue;
intel_update_crtc(crtc, state, old_crtc_state,
- crtc_vblank_mask);
+ new_crtc_state, crtc_vblank_mask);
}
}
@@ -14317,7 +12693,7 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
- struct drm_crtc_state *old_crtc_state;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct intel_crtc_state *cstate;
unsigned int updated = 0;
bool progress;
@@ -14326,9 +12702,9 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {};
- for_each_crtc_in_state(state, crtc, old_crtc_state, i)
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
/* ignore allocations for crtc's that have been turned off. */
- if (crtc->state->active)
+ if (new_crtc_state->active)
entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
/*
@@ -14340,7 +12716,7 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
do {
progress = false;
- for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
bool vbl_wait = false;
unsigned int cmask = drm_crtc_mask(crtc);
@@ -14365,12 +12741,12 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
*/
if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
&to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
- !crtc->state->active_changed &&
+ !new_crtc_state->active_changed &&
intel_state->wm_results.dirty_pipes != updated)
vbl_wait = true;
intel_update_crtc(crtc, state, old_crtc_state,
- crtc_vblank_mask);
+ new_crtc_state, crtc_vblank_mask);
if (vbl_wait)
intel_wait_for_vblank(dev_priv, pipe);
@@ -14380,16 +12756,34 @@ static void skl_update_crtcs(struct drm_atomic_state *state,
} while (progress);
}
+static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
+{
+ struct intel_atomic_state *state, *next;
+ struct llist_node *freed;
+
+ freed = llist_del_all(&dev_priv->atomic_helper.free_list);
+ llist_for_each_entry_safe(state, next, freed, freed)
+ drm_atomic_state_put(&state->base);
+}
+
+static void intel_atomic_helper_free_state_worker(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), atomic_helper.free_work);
+
+ intel_atomic_helper_free_state(dev_priv);
+}
+
static void intel_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc_state *old_crtc_state;
+ struct drm_crtc_state *old_crtc_state, *new_crtc_state;
struct drm_crtc *crtc;
struct intel_crtc_state *intel_cstate;
bool hw_check = intel_state->modeset;
- unsigned long put_domains[I915_MAX_PIPES] = {};
+ u64 put_domains[I915_MAX_PIPES] = {};
unsigned crtc_vblank_mask = 0;
int i;
@@ -14398,22 +12792,23 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
if (intel_state->modeset)
intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
- for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- if (needs_modeset(crtc->state) ||
- to_intel_crtc_state(crtc->state)->update_pipe) {
+ if (needs_modeset(new_crtc_state) ||
+ to_intel_crtc_state(new_crtc_state)->update_pipe) {
hw_check = true;
put_domains[to_intel_crtc(crtc)->pipe] =
modeset_get_crtc_power_domains(crtc,
- to_intel_crtc_state(crtc->state));
+ to_intel_crtc_state(new_crtc_state));
}
- if (!needs_modeset(crtc->state))
+ if (!needs_modeset(new_crtc_state))
continue;
- intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
+ intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
+ to_intel_crtc_state(new_crtc_state));
if (old_crtc_state->active) {
intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
@@ -14433,12 +12828,12 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
/*
* Make sure we don't call initial_watermarks
* for ILK-style watermark updates.
+ *
+ * No clue what this is supposed to achieve.
*/
- if (dev_priv->display.atomic_update_watermarks)
+ if (INTEL_GEN(dev_priv) >= 9)
dev_priv->display.initial_watermarks(intel_state,
to_intel_crtc_state(crtc->state));
- else
- intel_update_watermarks(intel_crtc);
}
}
}
@@ -14450,10 +12845,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
if (intel_state->modeset) {
drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
- if (dev_priv->display.modeset_commit_cdclk &&
- (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
- intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco))
- dev_priv->display.modeset_commit_cdclk(state);
+ intel_set_cdclk(dev_priv, &dev_priv->cdclk.actual);
/*
* SKL workaround: bspec recommends we disable the SAGV when we
@@ -14466,16 +12858,16 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
}
/* Complete the events for pipes that have now been disabled */
- for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
- bool modeset = needs_modeset(crtc->state);
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ bool modeset = needs_modeset(new_crtc_state);
/* Complete events for now disable pipes here. */
- if (modeset && !crtc->state->active && crtc->state->event) {
+ if (modeset && !new_crtc_state->active && new_crtc_state->event) {
spin_lock_irq(&dev->event_lock);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
spin_unlock_irq(&dev->event_lock);
- crtc->state->event = NULL;
+ new_crtc_state->event = NULL;
}
}
@@ -14501,21 +12893,21 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
*
* TODO: Move this (and other cleanup) to an async worker eventually.
*/
- for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
- intel_cstate = to_intel_crtc_state(crtc->state);
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ intel_cstate = to_intel_crtc_state(new_crtc_state);
if (dev_priv->display.optimize_watermarks)
dev_priv->display.optimize_watermarks(intel_state,
intel_cstate);
}
- for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
+ for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
if (put_domains[i])
modeset_put_power_domains(dev_priv, put_domains[i]);
- intel_modeset_verify_crtc(crtc, state, old_crtc_state, crtc->state);
+ intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
}
if (intel_state->modeset && intel_can_enable_sagv(state))
@@ -14546,6 +12938,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
* can happen also when the device is completely off.
*/
intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
+
+ intel_atomic_helper_free_state(dev_priv);
}
static void intel_atomic_commit_work(struct work_struct *work)
@@ -14585,13 +12979,13 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence,
static void intel_atomic_track_fbs(struct drm_atomic_state *state)
{
- struct drm_plane_state *old_plane_state;
+ struct drm_plane_state *old_plane_state, *new_plane_state;
struct drm_plane *plane;
int i;
- for_each_plane_in_state(state, plane, old_plane_state, i)
+ for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
- intel_fb_obj(plane->state->fb),
+ intel_fb_obj(new_plane_state->fb),
to_intel_plane(plane)->frontbuffer_bit);
}
@@ -14615,6 +13009,17 @@ static int intel_atomic_commit(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
int ret = 0;
+ /*
+ * The intel_legacy_cursor_update() fast path takes care
+ * of avoiding the vblank waits for simple cursor
+ * movement and flips. For cursor on/off and size changes,
+ * we want to perform the vblank waits so that watermark
+ * updates happen during the correct frames. Gen9+ have
+ * double buffered watermarks and so shouldn't need this.
+ */
+ if (INTEL_GEN(dev_priv) < 9)
+ state->legacy_cursor_update = false;
+
ret = drm_atomic_helper_setup_commit(state, nonblock);
if (ret)
return ret;
@@ -14639,7 +13044,8 @@ static int intel_atomic_commit(struct drm_device *dev,
memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
sizeof(intel_state->min_pixclk));
dev_priv->active_crtcs = intel_state->active_crtcs;
- dev_priv->atomic_cdclk_freq = intel_state->cdclk;
+ dev_priv->cdclk.logical = intel_state->cdclk.logical;
+ dev_priv->cdclk.actual = intel_state->cdclk.actual;
}
drm_atomic_state_get(state);
@@ -14739,7 +13145,7 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.set_property = drm_atomic_helper_crtc_set_property,
.destroy = intel_crtc_destroy,
- .page_flip = intel_crtc_page_flip,
+ .page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = intel_crtc_duplicate_state,
.atomic_destroy_state = intel_crtc_destroy_state,
.set_crc_source = intel_crtc_set_crc_source,
@@ -14771,6 +13177,29 @@ intel_prepare_plane_fb(struct drm_plane *plane,
struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
int ret;
+ if (obj) {
+ if (plane->type == DRM_PLANE_TYPE_CURSOR &&
+ INTEL_INFO(dev_priv)->cursor_needs_physical) {
+ const int align = IS_I830(dev_priv) ? 16 * 1024 : 256;
+
+ ret = i915_gem_object_attach_phys(obj, align);
+ if (ret) {
+ DRM_DEBUG_KMS("failed to attach phys object\n");
+ return ret;
+ }
+ } else {
+ struct i915_vma *vma;
+
+ vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
+ if (IS_ERR(vma)) {
+ DRM_DEBUG_KMS("failed to pin object\n");
+ return PTR_ERR(vma);
+ }
+
+ to_intel_plane_state(new_state)->vma = vma;
+ }
+ }
+
if (!obj && !old_obj)
return 0;
@@ -14823,26 +13252,6 @@ intel_prepare_plane_fb(struct drm_plane *plane,
i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY);
}
- if (plane->type == DRM_PLANE_TYPE_CURSOR &&
- INTEL_INFO(dev_priv)->cursor_needs_physical) {
- int align = IS_I830(dev_priv) ? 16 * 1024 : 256;
- ret = i915_gem_object_attach_phys(obj, align);
- if (ret) {
- DRM_DEBUG_KMS("failed to attach phys object\n");
- return ret;
- }
- } else {
- struct i915_vma *vma;
-
- vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
- if (IS_ERR(vma)) {
- DRM_DEBUG_KMS("failed to pin object\n");
- return PTR_ERR(vma);
- }
-
- to_intel_plane_state(new_state)->vma = vma;
- }
-
return 0;
}
@@ -14870,16 +13279,22 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
int
skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv;
int max_scale;
- int crtc_clock, cdclk;
+ int crtc_clock, max_dotclk;
if (!intel_crtc || !crtc_state->base.enable)
return DRM_PLANE_HELPER_NO_SCALING;
+ dev_priv = to_i915(intel_crtc->base.dev);
+
crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
- cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
+ max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
- if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock))
+ if (IS_GEMINILAKE(dev_priv))
+ max_dotclk *= 2;
+
+ if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
return DRM_PLANE_HELPER_NO_SCALING;
/*
@@ -14888,7 +13303,8 @@ skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state
* or
* cdclk/crtc_clock
*/
- max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock));
+ max_scale = min((1 << 16) * 3 - 1,
+ (1 << 8) * ((max_dotclk << 8) / crtc_clock));
return max_scale;
}
@@ -14947,17 +13363,19 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc,
to_intel_atomic_state(old_crtc_state->state);
bool modeset = needs_modeset(crtc->state);
+ if (!modeset &&
+ (intel_cstate->base.color_mgmt_changed ||
+ intel_cstate->update_pipe)) {
+ intel_color_set_csc(crtc->state);
+ intel_color_load_luts(crtc->state);
+ }
+
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(intel_crtc);
if (modeset)
goto out;
- if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
- intel_color_set_csc(crtc->state);
- intel_color_load_luts(crtc->state);
- }
-
if (intel_cstate->update_pipe)
intel_update_pipe_config(intel_crtc, old_intel_cstate);
else if (INTEL_GEN(dev_priv) >= 9)
@@ -15038,8 +13456,7 @@ intel_legacy_cursor_update(struct drm_plane *plane,
old_plane_state->src_h != src_h ||
old_plane_state->crtc_w != crtc_w ||
old_plane_state->crtc_h != crtc_h ||
- !old_plane_state->visible ||
- old_plane_state->fb->modifier != fb->modifier)
+ !old_plane_state->fb != !fb)
goto slow;
new_plane_state = intel_plane_duplicate_state(plane);
@@ -15062,10 +13479,6 @@ intel_legacy_cursor_update(struct drm_plane *plane,
if (ret)
goto out_free;
- /* Visibility changed, must take slowpath. */
- if (!new_plane_state->visible)
- goto slow_free;
-
ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
if (ret)
goto out_free;
@@ -15105,9 +13518,15 @@ intel_legacy_cursor_update(struct drm_plane *plane,
new_plane_state->fb = old_fb;
to_intel_plane_state(new_plane_state)->vma = old_vma;
- intel_plane->update_plane(plane,
- to_intel_crtc_state(crtc->state),
- to_intel_plane_state(plane->state));
+ if (plane->state->visible) {
+ trace_intel_update_plane(plane, to_intel_crtc(crtc));
+ intel_plane->update_plane(plane,
+ to_intel_crtc_state(crtc->state),
+ to_intel_plane_state(plane->state));
+ } else {
+ trace_intel_disable_plane(plane, to_intel_crtc(crtc));
+ intel_plane->disable_plane(plane, crtc);
+ }
intel_cleanup_plane_fb(plane, new_plane_state);
@@ -15117,8 +13536,6 @@ out_free:
intel_plane_destroy_state(plane, new_plane_state);
return ret;
-slow_free:
- intel_plane_destroy_state(plane, new_plane_state);
slow:
return drm_atomic_helper_update_plane(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
@@ -15492,8 +13909,6 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
intel_crtc->cursor_cntl = ~0;
intel_crtc->cursor_size = ~0;
- intel_crtc->wm.cxsr_allowed = true;
-
/* initialize shared scalers */
intel_crtc_init_scalers(intel_crtc, crtc_state);
@@ -15523,15 +13938,14 @@ fail:
enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
{
- struct drm_encoder *encoder = connector->base.encoder;
struct drm_device *dev = connector->base.dev;
WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
- if (!encoder || WARN_ON(!encoder->crtc))
+ if (!connector->base.state->crtc)
return INVALID_PIPE;
- return to_intel_crtc(encoder->crtc)->pipe;
+ return to_intel_crtc(connector->base.state->crtc)->pipe;
}
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
@@ -15681,7 +14095,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
*/
found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
/* WaIgnoreDDIAStrap: skl */
- if (found || IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ if (found || IS_GEN9_BC(dev_priv))
intel_ddi_init(dev_priv, PORT_A);
/* DDI B, C and D detection is indicated by the SFUSE_STRAP
@@ -15697,7 +14111,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
/*
* On SKL we don't have a way to detect DDI-E so we rely on VBT.
*/
- if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
+ if (IS_GEN9_BC(dev_priv) &&
(dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
@@ -15830,14 +14244,16 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
- struct drm_device *dev = fb->dev;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
drm_framebuffer_cleanup(fb);
- mutex_lock(&dev->struct_mutex);
+
+ i915_gem_object_lock(intel_fb->obj);
WARN_ON(!intel_fb->obj->framebuffer_references--);
+ i915_gem_object_unlock(intel_fb->obj);
+
i915_gem_object_put(intel_fb->obj);
- mutex_unlock(&dev->struct_mutex);
+
kfree(intel_fb);
}
@@ -15862,15 +14278,10 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
struct drm_clip_rect *clips,
unsigned num_clips)
{
- struct drm_device *dev = fb->dev;
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj = intel_fb->obj;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- mutex_lock(&dev->struct_mutex);
- if (obj->pin_display && obj->cache_dirty)
- i915_gem_clflush_object(obj, true);
- intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
- mutex_unlock(&dev->struct_mutex);
+ i915_gem_object_flush_if_display(obj);
+ intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
return 0;
}
@@ -15885,7 +14296,7 @@ static
u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
uint64_t fb_modifier, uint32_t pixel_format)
{
- u32 gen = INTEL_INFO(dev_priv)->gen;
+ u32 gen = INTEL_GEN(dev_priv);
if (gen >= 9) {
int cpp = drm_format_plane_cpp(pixel_format, 0);
@@ -15894,8 +14305,7 @@ u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
* pixels and 32K bytes."
*/
return min(8192 * cpp, 32768);
- } else if (gen >= 5 && !IS_VALLEYVIEW(dev_priv) &&
- !IS_CHERRYVIEW(dev_priv)) {
+ } else if (gen >= 5 && !HAS_GMCH_DISPLAY(dev_priv)) {
return 32*1024;
} else if (gen >= 4) {
if (fb_modifier == I915_FORMAT_MOD_X_TILED)
@@ -15913,18 +14323,21 @@ u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv,
}
}
-static int intel_framebuffer_init(struct drm_device *dev,
- struct intel_framebuffer *intel_fb,
- struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_i915_gem_object *obj)
+static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
+ struct drm_i915_gem_object *obj,
+ struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
- unsigned int tiling = i915_gem_object_get_tiling(obj);
- int ret;
- u32 pitch_limit, stride_alignment;
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct drm_format_name_buf format_name;
+ u32 pitch_limit, stride_alignment;
+ unsigned int tiling, stride;
+ int ret = -EINVAL;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ i915_gem_object_lock(obj);
+ obj->framebuffer_references++;
+ tiling = i915_gem_object_get_tiling(obj);
+ stride = i915_gem_object_get_stride(obj);
+ i915_gem_object_unlock(obj);
if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
/*
@@ -15933,15 +14346,15 @@ static int intel_framebuffer_init(struct drm_device *dev,
*/
if (tiling != I915_TILING_NONE &&
tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
- DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
- return -EINVAL;
+ DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
+ goto err;
}
} else {
if (tiling == I915_TILING_X) {
mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
} else if (tiling == I915_TILING_Y) {
- DRM_DEBUG("No Y tiling for legacy addfb\n");
- return -EINVAL;
+ DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
+ goto err;
}
}
@@ -15950,17 +14363,17 @@ static int intel_framebuffer_init(struct drm_device *dev,
case I915_FORMAT_MOD_Y_TILED:
case I915_FORMAT_MOD_Yf_TILED:
if (INTEL_GEN(dev_priv) < 9) {
- DRM_DEBUG("Unsupported tiling 0x%llx!\n",
- mode_cmd->modifier[0]);
- return -EINVAL;
+ DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n",
+ mode_cmd->modifier[0]);
+ goto err;
}
case DRM_FORMAT_MOD_NONE:
case I915_FORMAT_MOD_X_TILED:
break;
default:
- DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
- mode_cmd->modifier[0]);
- return -EINVAL;
+ DRM_DEBUG_KMS("Unsupported fb modifier 0x%llx!\n",
+ mode_cmd->modifier[0]);
+ goto err;
}
/*
@@ -15969,39 +14382,28 @@ static int intel_framebuffer_init(struct drm_device *dev,
*/
if (INTEL_INFO(dev_priv)->gen < 4 &&
tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
- DRM_DEBUG("tiling_mode must match fb modifier exactly on gen2/3\n");
- return -EINVAL;
- }
-
- stride_alignment = intel_fb_stride_alignment(dev_priv,
- mode_cmd->modifier[0],
- mode_cmd->pixel_format);
- if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
- DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
- mode_cmd->pitches[0], stride_alignment);
- return -EINVAL;
+ DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
+ goto err;
}
pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0],
mode_cmd->pixel_format);
if (mode_cmd->pitches[0] > pitch_limit) {
- DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
- mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
- "tiled" : "linear",
- mode_cmd->pitches[0], pitch_limit);
- return -EINVAL;
+ DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
+ mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
+ "tiled" : "linear",
+ mode_cmd->pitches[0], pitch_limit);
+ goto err;
}
/*
* If there's a fence, enforce that
* the fb pitch and fence stride match.
*/
- if (tiling != I915_TILING_NONE &&
- mode_cmd->pitches[0] != i915_gem_object_get_stride(obj)) {
- DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
- mode_cmd->pitches[0],
- i915_gem_object_get_stride(obj));
- return -EINVAL;
+ if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
+ DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
+ mode_cmd->pitches[0], stride);
+ goto err;
}
/* Reject formats not supported by any plane early. */
@@ -16013,33 +14415,33 @@ static int intel_framebuffer_init(struct drm_device *dev,
break;
case DRM_FORMAT_XRGB1555:
if (INTEL_GEN(dev_priv) > 3) {
- DRM_DEBUG("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- return -EINVAL;
+ DRM_DEBUG_KMS("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
+ goto err;
}
break;
case DRM_FORMAT_ABGR8888:
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
INTEL_GEN(dev_priv) < 9) {
- DRM_DEBUG("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- return -EINVAL;
+ DRM_DEBUG_KMS("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
+ goto err;
}
break;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
if (INTEL_GEN(dev_priv) < 4) {
- DRM_DEBUG("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- return -EINVAL;
+ DRM_DEBUG_KMS("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
+ goto err;
}
break;
case DRM_FORMAT_ABGR2101010:
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
- DRM_DEBUG("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- return -EINVAL;
+ DRM_DEBUG_KMS("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
+ goto err;
}
break;
case DRM_FORMAT_YUYV:
@@ -16047,37 +14449,52 @@ static int intel_framebuffer_init(struct drm_device *dev,
case DRM_FORMAT_YVYU:
case DRM_FORMAT_VYUY:
if (INTEL_GEN(dev_priv) < 5) {
- DRM_DEBUG("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- return -EINVAL;
+ DRM_DEBUG_KMS("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
+ goto err;
}
break;
default:
- DRM_DEBUG("unsupported pixel format: %s\n",
- drm_get_format_name(mode_cmd->pixel_format, &format_name));
- return -EINVAL;
+ DRM_DEBUG_KMS("unsupported pixel format: %s\n",
+ drm_get_format_name(mode_cmd->pixel_format, &format_name));
+ goto err;
}
/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
if (mode_cmd->offsets[0] != 0)
- return -EINVAL;
+ goto err;
+
+ drm_helper_mode_fill_fb_struct(&dev_priv->drm,
+ &intel_fb->base, mode_cmd);
+
+ stride_alignment = intel_fb_stride_alignment(&intel_fb->base, 0);
+ if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
+ DRM_DEBUG_KMS("pitch (%d) must be at least %u byte aligned\n",
+ mode_cmd->pitches[0], stride_alignment);
+ goto err;
+ }
- drm_helper_mode_fill_fb_struct(dev, &intel_fb->base, mode_cmd);
intel_fb->obj = obj;
ret = intel_fill_fb_info(dev_priv, &intel_fb->base);
if (ret)
- return ret;
+ goto err;
- ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
+ ret = drm_framebuffer_init(obj->base.dev,
+ &intel_fb->base,
+ &intel_fb_funcs);
if (ret) {
DRM_ERROR("framebuffer init failed %d\n", ret);
- return ret;
+ goto err;
}
- intel_fb->obj->framebuffer_references++;
-
return 0;
+
+err:
+ i915_gem_object_lock(obj);
+ obj->framebuffer_references--;
+ i915_gem_object_unlock(obj);
+ return ret;
}
static struct drm_framebuffer *
@@ -16093,7 +14510,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
if (!obj)
return ERR_PTR(-ENOENT);
- fb = intel_framebuffer_create(dev, &mode_cmd, obj);
+ fb = intel_framebuffer_create(obj, &mode_cmd);
if (IS_ERR(fb))
i915_gem_object_put(obj);
@@ -16127,6 +14544,8 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
*/
void intel_init_display_hooks(struct drm_i915_private *dev_priv)
{
+ intel_init_cdclk_hooks(dev_priv);
+
if (INTEL_INFO(dev_priv)->gen >= 9) {
dev_priv->display.get_pipe_config = haswell_get_pipe_config;
dev_priv->display.get_initial_plane_config =
@@ -16195,62 +14614,6 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.crtc_disable = i9xx_crtc_disable;
}
- /* Returns the core display clock speed */
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
- dev_priv->display.get_display_clock_speed =
- skylake_get_display_clock_speed;
- else if (IS_GEN9_LP(dev_priv))
- dev_priv->display.get_display_clock_speed =
- broxton_get_display_clock_speed;
- else if (IS_BROADWELL(dev_priv))
- dev_priv->display.get_display_clock_speed =
- broadwell_get_display_clock_speed;
- else if (IS_HASWELL(dev_priv))
- dev_priv->display.get_display_clock_speed =
- haswell_get_display_clock_speed;
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- dev_priv->display.get_display_clock_speed =
- valleyview_get_display_clock_speed;
- else if (IS_GEN5(dev_priv))
- dev_priv->display.get_display_clock_speed =
- ilk_get_display_clock_speed;
- else if (IS_I945G(dev_priv) || IS_I965G(dev_priv) ||
- IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
- dev_priv->display.get_display_clock_speed =
- i945_get_display_clock_speed;
- else if (IS_GM45(dev_priv))
- dev_priv->display.get_display_clock_speed =
- gm45_get_display_clock_speed;
- else if (IS_I965GM(dev_priv))
- dev_priv->display.get_display_clock_speed =
- i965gm_get_display_clock_speed;
- else if (IS_PINEVIEW(dev_priv))
- dev_priv->display.get_display_clock_speed =
- pnv_get_display_clock_speed;
- else if (IS_G33(dev_priv) || IS_G4X(dev_priv))
- dev_priv->display.get_display_clock_speed =
- g33_get_display_clock_speed;
- else if (IS_I915G(dev_priv))
- dev_priv->display.get_display_clock_speed =
- i915_get_display_clock_speed;
- else if (IS_I945GM(dev_priv) || IS_I845G(dev_priv))
- dev_priv->display.get_display_clock_speed =
- i9xx_misc_get_display_clock_speed;
- else if (IS_I915GM(dev_priv))
- dev_priv->display.get_display_clock_speed =
- i915gm_get_display_clock_speed;
- else if (IS_I865G(dev_priv))
- dev_priv->display.get_display_clock_speed =
- i865_get_display_clock_speed;
- else if (IS_I85X(dev_priv))
- dev_priv->display.get_display_clock_speed =
- i85x_get_display_clock_speed;
- else { /* 830 */
- WARN(!IS_I830(dev_priv), "Unknown platform. Assuming 133 MHz CDCLK\n");
- dev_priv->display.get_display_clock_speed =
- i830_get_display_clock_speed;
- }
-
if (IS_GEN5(dev_priv)) {
dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
} else if (IS_GEN6(dev_priv)) {
@@ -16262,28 +14625,6 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
}
- if (IS_BROADWELL(dev_priv)) {
- dev_priv->display.modeset_commit_cdclk =
- broadwell_modeset_commit_cdclk;
- dev_priv->display.modeset_calc_cdclk =
- broadwell_modeset_calc_cdclk;
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- dev_priv->display.modeset_commit_cdclk =
- valleyview_modeset_commit_cdclk;
- dev_priv->display.modeset_calc_cdclk =
- valleyview_modeset_calc_cdclk;
- } else if (IS_GEN9_LP(dev_priv)) {
- dev_priv->display.modeset_commit_cdclk =
- bxt_modeset_commit_cdclk;
- dev_priv->display.modeset_calc_cdclk =
- bxt_modeset_calc_cdclk;
- } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
- dev_priv->display.modeset_commit_cdclk =
- skl_modeset_commit_cdclk;
- dev_priv->display.modeset_calc_cdclk =
- skl_modeset_calc_cdclk;
- }
-
if (dev_priv->info.gen >= 9)
dev_priv->display.update_crtcs = skl_update_crtcs;
else
@@ -16510,8 +14851,7 @@ void intel_modeset_init_hw(struct drm_device *dev)
struct drm_i915_private *dev_priv = to_i915(dev);
intel_update_cdclk(dev_priv);
-
- dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
+ dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
intel_init_clock_gating(dev_priv);
}
@@ -16566,7 +14906,8 @@ retry:
* intermediate watermarks (since we don't trust the current
* watermarks).
*/
- intel_state->skip_intermediate_wm = true;
+ if (!HAS_GMCH_DISPLAY(dev_priv))
+ intel_state->skip_intermediate_wm = true;
ret = intel_atomic_check(dev, state);
if (ret) {
@@ -16586,7 +14927,7 @@ retry:
}
/* Write calculated watermark values back */
- for_each_crtc_in_state(state, crtc, cstate, i) {
+ for_each_new_crtc_in_state(state, crtc, cstate, i) {
struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
cs->wm.need_postvbl_update = true;
@@ -16600,18 +14941,6 @@ fail:
drm_modeset_acquire_fini(&ctx);
}
-static void intel_atomic_helper_free_state(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), atomic_helper.free_work);
- struct intel_atomic_state *state, *next;
- struct llist_node *freed;
-
- freed = llist_del_all(&dev_priv->atomic_helper.free_list);
- llist_for_each_entry_safe(state, next, freed, freed)
- drm_atomic_state_put(&state->base);
-}
-
int intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -16632,7 +14961,7 @@ int intel_modeset_init(struct drm_device *dev)
dev->mode_config.funcs = &intel_mode_funcs;
INIT_WORK(&dev_priv->atomic_helper.free_work,
- intel_atomic_helper_free_state);
+ intel_atomic_helper_free_state_worker);
intel_init_quirks(dev);
@@ -16697,12 +15026,11 @@ int intel_modeset_init(struct drm_device *dev)
}
}
- intel_update_czclk(dev_priv);
- intel_update_cdclk(dev_priv);
- dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
-
intel_shared_dpll_init(dev);
+ intel_update_czclk(dev_priv);
+ intel_modeset_init_hw(dev);
+
if (dev_priv->max_cdclk_freq == 0)
intel_update_max_cdclk(dev_priv);
@@ -16742,7 +15070,8 @@ int intel_modeset_init(struct drm_device *dev)
* Note that we need to do this after reconstructing the BIOS fb's
* since the watermark calculation done here will use pstate->fb.
*/
- sanitize_watermarks(dev);
+ if (!HAS_GMCH_DISPLAY(dev_priv))
+ sanitize_watermarks(dev);
return 0;
}
@@ -16750,6 +15079,7 @@ int intel_modeset_init(struct drm_device *dev)
static void intel_enable_pipe_a(struct drm_device *dev)
{
struct intel_connector *connector;
+ struct drm_connector_list_iter conn_iter;
struct drm_connector *crt = NULL;
struct intel_load_detect_pipe load_detect_temp;
struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
@@ -16757,12 +15087,14 @@ static void intel_enable_pipe_a(struct drm_device *dev)
/* We can't just switch on the pipe A, we need to set things up with a
* proper mode and output configuration. As a gross hack, enable pipe A
* by enabling the load detect pipe once. */
- for_each_intel_connector(dev, connector) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
crt = &connector->base;
break;
}
}
+ drm_connector_list_iter_end(&conn_iter);
if (!crt)
return;
@@ -16844,6 +15176,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
continue;
+ trace_intel_disable_plane(&plane->base, crtc);
plane->disable_plane(&plane->base, &crtc->base);
}
}
@@ -16990,15 +15323,14 @@ static bool primary_get_hw_state(struct intel_plane *plane)
/* FIXME read out full plane state for all planes */
static void readout_plane_state(struct intel_crtc *crtc)
{
- struct drm_plane *primary = crtc->base.primary;
- struct intel_plane_state *plane_state =
- to_intel_plane_state(primary->state);
+ struct intel_plane *primary = to_intel_plane(crtc->base.primary);
+ bool visible;
- plane_state->base.visible = crtc->active &&
- primary_get_hw_state(to_intel_plane(primary));
+ visible = crtc->active && primary_get_hw_state(primary);
- if (plane_state->base.visible)
- crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
+ intel_set_plane_visible(to_intel_crtc_state(crtc->base.state),
+ to_intel_plane_state(primary->base.state),
+ visible);
}
static void intel_modeset_readout_hw_state(struct drm_device *dev)
@@ -17008,6 +15340,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_connector *connector;
+ struct drm_connector_list_iter conn_iter;
int i;
dev_priv->active_crtcs = 0;
@@ -17078,7 +15411,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
pipe_name(pipe));
}
- for_each_intel_connector(dev, connector) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
if (connector->get_hw_state(connector)) {
connector->base.dpms = DRM_MODE_DPMS_ON;
@@ -17106,6 +15440,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
connector->base.base.id, connector->base.name,
enableddisabled(connector->base.encoder));
}
+ drm_connector_list_iter_end(&conn_iter);
for_each_intel_crtc(dev, crtc) {
struct intel_crtc_state *crtc_state =
@@ -17131,10 +15466,11 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
*/
crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
- if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
- pixclk = ilk_pipe_pixel_rate(crtc_state);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- pixclk = crtc_state->base.adjusted_mode.crtc_clock;
+ intel_crtc_compute_pixel_rate(crtc_state);
+
+ if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv) ||
+ IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ pixclk = crtc_state->pixel_rate;
else
WARN_ON(dev_priv->display.modeset_calc_cdclk);
@@ -17152,6 +15488,24 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
}
}
+static void
+get_encoder_power_domains(struct drm_i915_private *dev_priv)
+{
+ struct intel_encoder *encoder;
+
+ for_each_intel_encoder(&dev_priv->drm, encoder) {
+ u64 get_domains;
+ enum intel_display_power_domain domain;
+
+ if (!encoder->get_power_domains)
+ continue;
+
+ get_domains = encoder->get_power_domains(encoder);
+ for_each_power_domain(domain, get_domains)
+ intel_display_power_get(dev_priv, domain);
+ }
+}
+
/* Scan out the current hw modeset state,
* and sanitizes it to the current state
*/
@@ -17167,6 +15521,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
intel_modeset_readout_hw_state(dev);
/* HW state is read out, now we need to sanitize this mess. */
+ get_encoder_power_domains(dev_priv);
+
for_each_intel_encoder(dev, encoder) {
intel_sanitize_encoder(encoder);
}
@@ -17193,15 +15549,17 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
pll->on = false;
}
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
vlv_wm_get_hw_state(dev);
- else if (IS_GEN9(dev_priv))
+ vlv_wm_sanitize(dev_priv);
+ } else if (IS_GEN9(dev_priv)) {
skl_wm_get_hw_state(dev);
- else if (HAS_PCH_SPLIT(dev_priv))
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
ilk_wm_get_hw_state(dev);
+ }
for_each_intel_crtc(dev, crtc) {
- unsigned long put_domains;
+ u64 put_domains;
put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
if (WARN_ON(put_domains))
@@ -17209,6 +15567,8 @@ intel_modeset_setup_hw_state(struct drm_device *dev)
}
intel_display_set_init_power(dev_priv, false);
+ intel_power_domains_verify_state(dev_priv);
+
intel_fbc_init_pipe_state(dev_priv);
}
@@ -17259,8 +15619,6 @@ void intel_modeset_gem_init(struct drm_device *dev)
intel_init_gt_powersave(dev_priv);
- intel_modeset_init_hw(dev);
-
intel_setup_overlay(dev_priv);
}
@@ -17492,9 +15850,9 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
void
intel_display_print_error_state(struct drm_i915_error_state_buf *m,
- struct drm_i915_private *dev_priv,
struct intel_display_error_state *error)
{
+ struct drm_i915_private *dev_priv = m->i915;
int i;
if (!error)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index d1670b8afbf5..fd96a6cf7326 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -28,8 +28,10 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/types.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
+#include <asm/byteorder.h>
#include <drm/drmP.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
@@ -226,7 +228,7 @@ intel_dp_source_rates(struct intel_dp *intel_dp, const int **source_rates)
if (IS_GEN9_LP(dev_priv)) {
*source_rates = bxt_rates;
size = ARRAY_SIZE(bxt_rates);
- } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ } else if (IS_GEN9_BC(dev_priv)) {
*source_rates = skl_rates;
size = ARRAY_SIZE(skl_rates);
} else {
@@ -394,14 +396,12 @@ static void pps_lock(struct intel_dp *intel_dp)
struct intel_encoder *encoder = &intel_dig_port->base;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum intel_display_power_domain power_domain;
/*
* See vlv_power_sequencer_reset() why we need
* a power domain reference here.
*/
- power_domain = intel_display_port_aux_power_domain(encoder);
- intel_display_power_get(dev_priv, power_domain);
+ intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
mutex_lock(&dev_priv->pps_mutex);
}
@@ -412,12 +412,10 @@ static void pps_unlock(struct intel_dp *intel_dp)
struct intel_encoder *encoder = &intel_dig_port->base;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum intel_display_power_domain power_domain;
mutex_unlock(&dev_priv->pps_mutex);
- power_domain = intel_display_port_aux_power_domain(encoder);
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
}
static void
@@ -916,7 +914,7 @@ static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
* divide by 2000 and use that
*/
if (intel_dig_port->port == PORT_A)
- return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
+ return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
else
return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
}
@@ -1593,6 +1591,13 @@ static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
if (bpc > 0)
bpp = min(bpp, 3*bpc);
+ /* For DP Compliance we override the computed bpp for the pipe */
+ if (intel_dp->compliance.test_data.bpc != 0) {
+ pipe_config->pipe_bpp = 3*intel_dp->compliance.test_data.bpc;
+ pipe_config->dither_force_disable = pipe_config->pipe_bpp == 6*3;
+ DRM_DEBUG_KMS("Setting pipe_bpp to %d\n",
+ pipe_config->pipe_bpp);
+ }
return bpp;
}
@@ -1613,6 +1618,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
/* Conveniently, the link BW constants become indices with a shift...*/
int min_clock = 0;
int max_clock;
+ int link_rate_index;
int bpp, mode_rate;
int link_avail, link_clock;
int common_rates[DP_MAX_SUPPORTED_RATES] = {};
@@ -1654,6 +1660,15 @@ intel_dp_compute_config(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
return false;
+ /* Use values requested by Compliance Test Request */
+ if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
+ link_rate_index = intel_dp_link_rate_index(intel_dp,
+ common_rates,
+ intel_dp->compliance.test_link_rate);
+ if (link_rate_index >= 0)
+ min_clock = max_clock = link_rate_index;
+ min_lane_count = max_lane_count = intel_dp->compliance.test_lane_count;
+ }
DRM_DEBUG_KMS("DP link computation with max lane count %i "
"max bw %d pixel clock %iKHz\n",
max_lane_count, common_rates[max_clock],
@@ -1753,8 +1768,7 @@ found:
* DPLL0 VCO may need to be adjusted to get the correct
* clock for eDP. This will affect cdclk as well.
*/
- if (is_edp(intel_dp) &&
- (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) {
+ if (is_edp(intel_dp) && IS_GEN9_BC(dev_priv)) {
int vco;
switch (pipe_config->port_clock / 2) {
@@ -1767,7 +1781,7 @@ found:
break;
}
- to_intel_atomic_state(pipe_config->base.state)->cdclk_pll_vco = vco;
+ to_intel_atomic_state(pipe_config->base.state)->cdclk.logical.vco = vco;
}
if (!HAS_DDI(dev_priv))
@@ -1987,9 +2001,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum intel_display_power_domain power_domain;
u32 pp;
i915_reg_t pp_stat_reg, pp_ctrl_reg;
bool need_to_disable = !intel_dp->want_panel_vdd;
@@ -2005,8 +2017,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
if (edp_have_panel_vdd(intel_dp))
return need_to_disable;
- power_domain = intel_display_port_aux_power_domain(intel_encoder);
- intel_display_power_get(dev_priv, power_domain);
+ intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
port_name(intel_dig_port->port));
@@ -2064,8 +2075,6 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_digital_port *intel_dig_port =
dp_to_dig_port(intel_dp);
- struct intel_encoder *intel_encoder = &intel_dig_port->base;
- enum intel_display_power_domain power_domain;
u32 pp;
i915_reg_t pp_stat_reg, pp_ctrl_reg;
@@ -2095,8 +2104,7 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
if ((pp & PANEL_POWER_ON) == 0)
intel_dp->panel_power_off_time = ktime_get_boottime();
- power_domain = intel_display_port_aux_power_domain(intel_encoder);
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
}
static void edp_panel_vdd_work(struct work_struct *__work)
@@ -2209,11 +2217,8 @@ void intel_edp_panel_on(struct intel_dp *intel_dp)
static void edp_panel_off(struct intel_dp *intel_dp)
{
- struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dev);
- enum intel_display_power_domain power_domain;
u32 pp;
i915_reg_t pp_ctrl_reg;
@@ -2245,8 +2250,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
wait_panel_off(intel_dp);
/* We got a reference when we enabled the VDD. */
- power_domain = intel_display_port_aux_power_domain(intel_encoder);
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
}
void intel_edp_panel_off(struct intel_dp *intel_dp)
@@ -2492,12 +2496,11 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
enum port port = dp_to_dig_port(intel_dp)->port;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum intel_display_power_domain power_domain;
u32 tmp;
bool ret;
- power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain))
return false;
ret = false;
@@ -2533,7 +2536,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
ret = true;
out:
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, encoder->power_domain);
return ret;
}
@@ -3080,9 +3083,8 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
if (IS_GEN9_LP(dev_priv))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else if (INTEL_GEN(dev_priv) >= 9) {
- if (dev_priv->vbt.edp.low_vswing && port == PORT_A)
- return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
- return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ return intel_ddi_dp_voltage_max(encoder);
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else if (IS_GEN7(dev_priv) && port == PORT_A)
@@ -3922,19 +3924,112 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
{
- uint8_t test_result = DP_TEST_ACK;
- return test_result;
+ int status = 0;
+ int min_lane_count = 1;
+ int common_rates[DP_MAX_SUPPORTED_RATES] = {};
+ int link_rate_index, test_link_rate;
+ uint8_t test_lane_count, test_link_bw;
+ /* (DP CTS 1.2)
+ * 4.3.1.11
+ */
+ /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
+ status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
+ &test_lane_count);
+
+ if (status <= 0) {
+ DRM_DEBUG_KMS("Lane count read failed\n");
+ return DP_TEST_NAK;
+ }
+ test_lane_count &= DP_MAX_LANE_COUNT_MASK;
+ /* Validate the requested lane count */
+ if (test_lane_count < min_lane_count ||
+ test_lane_count > intel_dp->max_sink_lane_count)
+ return DP_TEST_NAK;
+
+ status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
+ &test_link_bw);
+ if (status <= 0) {
+ DRM_DEBUG_KMS("Link Rate read failed\n");
+ return DP_TEST_NAK;
+ }
+ /* Validate the requested link rate */
+ test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
+ link_rate_index = intel_dp_link_rate_index(intel_dp,
+ common_rates,
+ test_link_rate);
+ if (link_rate_index < 0)
+ return DP_TEST_NAK;
+
+ intel_dp->compliance.test_lane_count = test_lane_count;
+ intel_dp->compliance.test_link_rate = test_link_rate;
+
+ return DP_TEST_ACK;
}
static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
{
- uint8_t test_result = DP_TEST_NAK;
- return test_result;
+ uint8_t test_pattern;
+ uint16_t test_misc;
+ __be16 h_width, v_height;
+ int status = 0;
+
+ /* Read the TEST_PATTERN (DP CTS 3.1.5) */
+ status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_PATTERN,
+ &test_pattern, 1);
+ if (status <= 0) {
+ DRM_DEBUG_KMS("Test pattern read failed\n");
+ return DP_TEST_NAK;
+ }
+ if (test_pattern != DP_COLOR_RAMP)
+ return DP_TEST_NAK;
+
+ status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
+ &h_width, 2);
+ if (status <= 0) {
+ DRM_DEBUG_KMS("H Width read failed\n");
+ return DP_TEST_NAK;
+ }
+
+ status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
+ &v_height, 2);
+ if (status <= 0) {
+ DRM_DEBUG_KMS("V Height read failed\n");
+ return DP_TEST_NAK;
+ }
+
+ status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_MISC0,
+ &test_misc, 1);
+ if (status <= 0) {
+ DRM_DEBUG_KMS("TEST MISC read failed\n");
+ return DP_TEST_NAK;
+ }
+ if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
+ return DP_TEST_NAK;
+ if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
+ return DP_TEST_NAK;
+ switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
+ case DP_TEST_BIT_DEPTH_6:
+ intel_dp->compliance.test_data.bpc = 6;
+ break;
+ case DP_TEST_BIT_DEPTH_8:
+ intel_dp->compliance.test_data.bpc = 8;
+ break;
+ default:
+ return DP_TEST_NAK;
+ }
+
+ intel_dp->compliance.test_data.video_pattern = test_pattern;
+ intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
+ intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
+ /* Set test active flag here so userspace doesn't interrupt things */
+ intel_dp->compliance.test_active = 1;
+
+ return DP_TEST_ACK;
}
static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
{
- uint8_t test_result = DP_TEST_NAK;
+ uint8_t test_result = DP_TEST_ACK;
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct drm_connector *connector = &intel_connector->base;
@@ -3969,7 +4064,7 @@ static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("Failed to write EDID checksum\n");
test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
- intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_STANDARD;
+ intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
}
/* Set test active flag here so userspace doesn't interrupt things */
@@ -3987,45 +4082,42 @@ static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
{
uint8_t response = DP_TEST_NAK;
- uint8_t rxdata = 0;
- int status = 0;
+ uint8_t request = 0;
+ int status;
- status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
+ status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
if (status <= 0) {
DRM_DEBUG_KMS("Could not read test request from sink\n");
goto update_status;
}
- switch (rxdata) {
+ switch (request) {
case DP_TEST_LINK_TRAINING:
DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
- intel_dp->compliance.test_type = DP_TEST_LINK_TRAINING;
response = intel_dp_autotest_link_training(intel_dp);
break;
case DP_TEST_LINK_VIDEO_PATTERN:
DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
- intel_dp->compliance.test_type = DP_TEST_LINK_VIDEO_PATTERN;
response = intel_dp_autotest_video_pattern(intel_dp);
break;
case DP_TEST_LINK_EDID_READ:
DRM_DEBUG_KMS("EDID test requested\n");
- intel_dp->compliance.test_type = DP_TEST_LINK_EDID_READ;
response = intel_dp_autotest_edid(intel_dp);
break;
case DP_TEST_LINK_PHY_TEST_PATTERN:
DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
- intel_dp->compliance.test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
response = intel_dp_autotest_phy_pattern(intel_dp);
break;
default:
- DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
+ DRM_DEBUG_KMS("Invalid test request '%02x'\n", request);
break;
}
+ if (response & DP_TEST_ACK)
+ intel_dp->compliance.test_type = request;
+
update_status:
- status = drm_dp_dpcd_write(&intel_dp->aux,
- DP_TEST_RESPONSE,
- &response, 1);
+ status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
if (status <= 0)
DRM_DEBUG_KMS("Could not write test response to sink\n");
}
@@ -4137,9 +4229,8 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
if (!intel_dp->lane_count)
return;
- /* if link training is requested we should perform it always */
- if ((intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) ||
- (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count))) {
+ /* Retrain if Channel EQ or CR not ok */
+ if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
intel_encoder->base.name);
@@ -4164,6 +4255,7 @@ static bool
intel_dp_short_pulse(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
u8 sink_irq_vector = 0;
u8 old_sink_count = intel_dp->sink_count;
bool ret;
@@ -4197,7 +4289,7 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
sink_irq_vector);
if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
- DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
+ intel_dp_handle_test_request(intel_dp);
if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
}
@@ -4205,6 +4297,11 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
intel_dp_check_link_status(intel_dp);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
+ DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
+ /* Send a Hotplug Uevent to userspace to start modeset */
+ drm_kms_helper_hotplug_event(intel_encoder->base.dev);
+ }
return true;
}
@@ -4213,9 +4310,13 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
static enum drm_connector_status
intel_dp_detect_dpcd(struct intel_dp *intel_dp)
{
+ struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
uint8_t *dpcd = intel_dp->dpcd;
uint8_t type;
+ if (lspcon->active)
+ lspcon_resume(lspcon);
+
if (!intel_dp_get_dpcd(intel_dp))
return connector_status_disconnected;
@@ -4474,11 +4575,9 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = connector->dev;
enum drm_connector_status status;
- enum intel_display_power_domain power_domain;
u8 sink_irq_vector = 0;
- power_domain = intel_display_port_aux_power_domain(intel_encoder);
- intel_display_power_get(to_i915(dev), power_domain);
+ intel_display_power_get(to_i915(dev), intel_dp->aux_power_domain);
/* Can't disconnect eDP, but you can close the lid... */
if (is_edp(intel_dp))
@@ -4511,11 +4610,15 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
yesno(intel_dp_source_supports_hbr2(intel_dp)),
yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
- /* Set the max lane count for sink */
- intel_dp->max_sink_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
+ if (intel_dp->reset_link_params) {
+ /* Set the max lane count for sink */
+ intel_dp->max_sink_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
+
+ /* Set the max link BW for sink */
+ intel_dp->max_sink_link_bw = intel_dp_max_link_bw(intel_dp);
- /* Set the max link BW for sink */
- intel_dp->max_sink_link_bw = intel_dp_max_link_bw(intel_dp);
+ intel_dp->reset_link_params = false;
+ }
intel_dp_print_rates(intel_dp);
@@ -4575,7 +4678,7 @@ out:
if (status != connector_status_connected && !intel_dp->is_mst)
intel_dp_unset_edid(intel_dp);
- intel_display_power_put(to_i915(dev), power_domain);
+ intel_display_power_put(to_i915(dev), intel_dp->aux_power_domain);
return status;
}
@@ -4603,7 +4706,6 @@ intel_dp_force(struct drm_connector *connector)
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
- enum intel_display_power_domain power_domain;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
@@ -4612,12 +4714,11 @@ intel_dp_force(struct drm_connector *connector)
if (connector->status != connector_status_connected)
return;
- power_domain = intel_display_port_aux_power_domain(intel_encoder);
- intel_display_power_get(dev_priv, power_domain);
+ intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
intel_dp_set_edid(intel_dp);
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
if (intel_encoder->type != INTEL_OUTPUT_EDP)
intel_encoder->type = INTEL_OUTPUT_DP;
@@ -4852,7 +4953,6 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum intel_display_power_domain power_domain;
lockdep_assert_held(&dev_priv->pps_mutex);
@@ -4866,8 +4966,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
* indefinitely.
*/
DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
- power_domain = intel_display_port_aux_power_domain(&intel_dig_port->base);
- intel_display_power_get(dev_priv, power_domain);
+ intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
edp_panel_vdd_schedule_off(intel_dp);
}
@@ -4897,6 +4996,8 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
if (lspcon->active)
lspcon_resume(lspcon);
+ intel_dp->reset_link_params = true;
+
pps_lock(intel_dp);
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
@@ -4939,10 +5040,8 @@ enum irqreturn
intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
{
struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum intel_display_power_domain power_domain;
enum irqreturn ret = IRQ_NONE;
if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
@@ -4966,12 +5065,12 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
long_hpd ? "long" : "short");
if (long_hpd) {
+ intel_dp->reset_link_params = true;
intel_dp->detect_done = false;
return IRQ_NONE;
}
- power_domain = intel_display_port_aux_power_domain(intel_encoder);
- intel_display_power_get(dev_priv, power_domain);
+ intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
if (intel_dp->is_mst) {
if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
@@ -4999,7 +5098,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
ret = IRQ_HANDLED;
put_power:
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
return ret;
}
@@ -5790,6 +5889,41 @@ out_vdd_off:
return false;
}
+/* Set up the hotplug pin and aux power domain. */
+static void
+intel_dp_init_connector_port_info(struct intel_digital_port *intel_dig_port)
+{
+ struct intel_encoder *encoder = &intel_dig_port->base;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+
+ switch (intel_dig_port->port) {
+ case PORT_A:
+ encoder->hpd_pin = HPD_PORT_A;
+ intel_dp->aux_power_domain = POWER_DOMAIN_AUX_A;
+ break;
+ case PORT_B:
+ encoder->hpd_pin = HPD_PORT_B;
+ intel_dp->aux_power_domain = POWER_DOMAIN_AUX_B;
+ break;
+ case PORT_C:
+ encoder->hpd_pin = HPD_PORT_C;
+ intel_dp->aux_power_domain = POWER_DOMAIN_AUX_C;
+ break;
+ case PORT_D:
+ encoder->hpd_pin = HPD_PORT_D;
+ intel_dp->aux_power_domain = POWER_DOMAIN_AUX_D;
+ break;
+ case PORT_E:
+ encoder->hpd_pin = HPD_PORT_E;
+
+ /* FIXME: Check VBT for actual wiring of PORT E */
+ intel_dp->aux_power_domain = POWER_DOMAIN_AUX_D;
+ break;
+ default:
+ MISSING_CASE(intel_dig_port->port);
+ }
+}
+
bool
intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector)
@@ -5807,6 +5941,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dig_port->max_lanes, port_name(port)))
return false;
+ intel_dp->reset_link_params = true;
intel_dp->pps_pipe = INVALID_PIPE;
intel_dp->active_pipe = INVALID_PIPE;
@@ -5863,6 +5998,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
+ intel_dp_init_connector_port_info(intel_dig_port);
+
intel_dp_aux_init(intel_dp);
INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
@@ -5875,29 +6012,6 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
- /* Set up the hotplug pin. */
- switch (port) {
- case PORT_A:
- intel_encoder->hpd_pin = HPD_PORT_A;
- break;
- case PORT_B:
- intel_encoder->hpd_pin = HPD_PORT_B;
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
- intel_encoder->hpd_pin = HPD_PORT_A;
- break;
- case PORT_C:
- intel_encoder->hpd_pin = HPD_PORT_C;
- break;
- case PORT_D:
- intel_encoder->hpd_pin = HPD_PORT_D;
- break;
- case PORT_E:
- intel_encoder->hpd_pin = HPD_PORT_E;
- break;
- default:
- BUG();
- }
-
/* init MST on ports that can support it */
if (HAS_DP_MST(dev_priv) && !is_edp(intel_dp) &&
(port == PORT_B || port == PORT_C || port == PORT_D))
@@ -5982,6 +6096,7 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
intel_dig_port->max_lanes = 4;
intel_encoder->type = INTEL_OUTPUT_DP;
+ intel_encoder->power_domain = intel_port_to_power_domain(port);
if (IS_CHERRYVIEW(dev_priv)) {
if (port == PORT_D)
intel_encoder->crtc_mask = 1 << 2;
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index f51574f7f160..c1f62eb07c07 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -47,6 +47,11 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
pipe_config->has_pch_encoder = false;
bpp = 24;
+ if (intel_dp->compliance.test_data.bpc) {
+ bpp = intel_dp->compliance.test_data.bpc * 3;
+ DRM_DEBUG_KMS("Setting pipe bpp to %d\n",
+ bpp);
+ }
/*
* for MST we always configure max link bw - the spec doesn't
* seem to suggest we should do otherwise.
@@ -55,7 +60,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
pipe_config->lane_count = lane_count;
- pipe_config->pipe_bpp = 24;
+ pipe_config->pipe_bpp = bpp;
pipe_config->port_clock = intel_dp_max_link_rate(intel_dp);
state = pipe_config->base.state;
@@ -87,7 +92,6 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int ret;
DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
@@ -98,10 +102,8 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder,
if (ret) {
DRM_ERROR("failed to update payload %d\n", ret);
}
- if (old_crtc_state->has_audio) {
+ if (old_crtc_state->has_audio)
intel_audio_codec_disable(encoder);
- intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
- }
}
static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
@@ -156,23 +158,9 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
- if (intel_dp->active_mst_links == 0) {
- intel_ddi_clk_select(&intel_dig_port->base,
- pipe_config->shared_dpll);
-
- intel_prepare_dp_ddi_buffers(&intel_dig_port->base);
- intel_dp_set_link_params(intel_dp,
- pipe_config->port_clock,
- pipe_config->lane_count,
- true);
-
- intel_ddi_init_dp_buf_reg(&intel_dig_port->base);
-
- intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
-
- intel_dp_start_link_train(intel_dp);
- intel_dp_stop_link_train(intel_dp);
- }
+ if (intel_dp->active_mst_links == 0)
+ intel_dig_port->base.pre_enable(&intel_dig_port->base,
+ pipe_config, NULL);
ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr,
connector->port,
@@ -214,10 +202,8 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr);
- if (pipe_config->has_audio) {
- intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
+ if (pipe_config->has_audio)
intel_audio_codec_enable(encoder, pipe_config, conn_state);
- }
}
static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
@@ -548,6 +534,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe));
intel_encoder->type = INTEL_OUTPUT_DP_MST;
+ intel_encoder->power_domain = intel_dig_port->base.power_domain;
intel_encoder->port = intel_dig_port->port;
intel_encoder->crtc_mask = 0x7;
intel_encoder->cloneable = 0;
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index e59e43a9f3a6..b4de632f1158 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -42,44 +42,6 @@
* commit phase.
*/
-struct intel_shared_dpll *
-skl_find_link_pll(struct drm_i915_private *dev_priv, int clock)
-{
- struct intel_shared_dpll *pll = NULL;
- struct intel_dpll_hw_state dpll_hw_state;
- enum intel_dpll_id i;
- bool found = false;
-
- if (!skl_ddi_dp_set_dpll_hw_state(clock, &dpll_hw_state))
- return pll;
-
- for (i = DPLL_ID_SKL_DPLL1; i <= DPLL_ID_SKL_DPLL3; i++) {
- pll = &dev_priv->shared_dplls[i];
-
- /* Only want to check enabled timings first */
- if (pll->state.crtc_mask == 0)
- continue;
-
- if (memcmp(&dpll_hw_state, &pll->state.hw_state,
- sizeof(pll->state.hw_state)) == 0) {
- found = true;
- break;
- }
- }
-
- /* Ok no matching timings, maybe there's a free one? */
- for (i = DPLL_ID_SKL_DPLL1;
- ((found == false) && (i <= DPLL_ID_SKL_DPLL3)); i++) {
- pll = &dev_priv->shared_dplls[i];
- if (pll->state.crtc_mask == 0) {
- pll->state.hw_state = dpll_hw_state;
- break;
- }
- }
-
- return pll;
-}
-
static void
intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll_state *shared_dpll)
@@ -811,8 +773,8 @@ static struct intel_shared_dpll *hsw_ddi_hdmi_get_dpll(int clock,
return pll;
}
-struct intel_shared_dpll *hsw_ddi_dp_get_dpll(struct intel_encoder *encoder,
- int clock)
+static struct intel_shared_dpll *
+hsw_ddi_dp_get_dpll(struct intel_encoder *encoder, int clock)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_shared_dpll *pll;
@@ -1360,8 +1322,9 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc *crtc,
}
-bool skl_ddi_dp_set_dpll_hw_state(int clock,
- struct intel_dpll_hw_state *dpll_hw_state)
+static bool
+skl_ddi_dp_set_dpll_hw_state(int clock,
+ struct intel_dpll_hw_state *dpll_hw_state)
{
uint32_t ctrl1;
@@ -1816,8 +1779,9 @@ static bool bxt_ddi_set_dpll_hw_state(int clock,
return true;
}
-bool bxt_ddi_dp_set_dpll_hw_state(int clock,
- struct intel_dpll_hw_state *dpll_hw_state)
+static bool
+bxt_ddi_dp_set_dpll_hw_state(int clock,
+ struct intel_dpll_hw_state *dpll_hw_state)
{
struct bxt_clk_div clk_div = {0};
@@ -2016,7 +1980,7 @@ void intel_shared_dpll_init(struct drm_device *dev)
const struct dpll_info *dpll_info;
int i;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ if (IS_GEN9_BC(dev_priv))
dpll_mgr = &skl_pll_mgr;
else if (IS_GEN9_LP(dev_priv))
dpll_mgr = &bxt_pll_mgr;
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.h b/drivers/gpu/drm/i915/intel_dpll_mgr.h
index af1497eb4f9c..f8d13a947c13 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.h
@@ -282,20 +282,4 @@ void intel_shared_dpll_init(struct drm_device *dev);
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
struct intel_dpll_hw_state *hw_state);
-/* BXT dpll related functions */
-bool bxt_ddi_dp_set_dpll_hw_state(int clock,
- struct intel_dpll_hw_state *dpll_hw_state);
-
-
-/* SKL dpll related functions */
-bool skl_ddi_dp_set_dpll_hw_state(int clock,
- struct intel_dpll_hw_state *dpll_hw_state);
-struct intel_shared_dpll *skl_find_link_pll(struct drm_i915_private *dev_priv,
- int clock);
-
-
-/* HSW dpll related functions */
-struct intel_shared_dpll *hsw_ddi_dp_get_dpll(struct intel_encoder *encoder,
- int clock);
-
#endif /* _INTEL_DPLL_MGR_H_ */
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 064582963ff6..51228fe4283b 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -242,6 +242,9 @@ struct intel_encoder {
* be set correctly before calling this function. */
void (*get_config)(struct intel_encoder *,
struct intel_crtc_state *pipe_config);
+ /* Returns a mask of power domains that need to be referenced as part
+ * of the hardware state readout code. */
+ u64 (*get_power_domains)(struct intel_encoder *encoder);
/*
* Called during system suspend after all pending requests for the
* encoder are flushed (for example for DP AUX transactions) and
@@ -250,6 +253,7 @@ struct intel_encoder {
void (*suspend)(struct intel_encoder *);
int crtc_mask;
enum hpd_pin hpd_pin;
+ enum intel_display_power_domain power_domain;
/* for communication with audio component; protected by av_mutex */
const struct drm_connector *audio_connector;
};
@@ -334,13 +338,20 @@ struct dpll {
struct intel_atomic_state {
struct drm_atomic_state base;
- unsigned int cdclk;
-
- /*
- * Calculated device cdclk, can be different from cdclk
- * only when all crtc's are DPMS off.
- */
- unsigned int dev_cdclk;
+ struct {
+ /*
+ * Logical state of cdclk (used for all scaling, watermark,
+ * etc. calculations and checks). This is computed as if all
+ * enabled crtcs were active.
+ */
+ struct intel_cdclk_state logical;
+
+ /*
+ * Actual state of cdclk, can be different from the logical
+ * state only when all crtc's are DPMS off.
+ */
+ struct intel_cdclk_state actual;
+ } cdclk;
bool dpll_set, modeset;
@@ -357,9 +368,6 @@ struct intel_atomic_state {
unsigned int active_crtcs;
unsigned int min_pixclk[I915_MAX_PIPES];
- /* SKL/KBL Only */
- unsigned int cdclk_pll_vco;
-
struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
/*
@@ -485,6 +493,24 @@ struct skl_pipe_wm {
uint32_t linetime;
};
+enum vlv_wm_level {
+ VLV_WM_LEVEL_PM2,
+ VLV_WM_LEVEL_PM5,
+ VLV_WM_LEVEL_DDR_DVFS,
+ NUM_VLV_WM_LEVELS,
+};
+
+struct vlv_wm_state {
+ struct vlv_pipe_wm wm[NUM_VLV_WM_LEVELS];
+ struct vlv_sr_wm sr[NUM_VLV_WM_LEVELS];
+ uint8_t num_levels;
+ bool cxsr;
+};
+
+struct vlv_fifo_state {
+ u16 plane[I915_MAX_PLANES];
+};
+
struct intel_crtc_wm_state {
union {
struct {
@@ -509,6 +535,17 @@ struct intel_crtc_wm_state {
struct skl_pipe_wm optimal;
struct skl_ddb_entry ddb;
} skl;
+
+ struct {
+ /* "raw" watermarks (not inverted) */
+ struct vlv_pipe_wm raw[NUM_VLV_WM_LEVELS];
+ /* intermediate watermarks (inverted) */
+ struct vlv_wm_state intermediate;
+ /* optimal watermarks (inverted) */
+ struct vlv_wm_state optimal;
+ /* display FIFO split */
+ struct vlv_fifo_state fifo_state;
+ } vlv;
};
/*
@@ -539,12 +576,19 @@ struct intel_crtc_state {
bool disable_cxsr;
bool update_wm_pre, update_wm_post; /* watermarks are updated */
bool fb_changed; /* fb on any of the planes is changed */
+ bool fifo_changed; /* FIFO split is changed */
/* Pipe source size (ie. panel fitter input size)
* All planes will be positioned inside this space,
* and get clipped at the edges. */
int pipe_src_w, pipe_src_h;
+ /*
+ * Pipe pixel rate, adjusted for
+ * panel fitter/pipe scaler downscaling.
+ */
+ unsigned int pixel_rate;
+
/* Whether to set up the PCH/FDI. Note that we never allow sharing
* between pch encoders and cpu encoders. */
bool has_pch_encoder;
@@ -581,6 +625,14 @@ struct intel_crtc_state {
*/
bool dither;
+ /*
+ * Dither gets enabled for 18bpp which causes CRC mismatch errors for
+ * compliance video pattern tests.
+ * Disable dither only if it is a compliance test request for
+ * 18bpp.
+ */
+ bool dither_force_disable;
+
/* Controls for the clock computation, to override various stages. */
bool clock_set;
@@ -674,15 +726,9 @@ struct intel_crtc_state {
/* Gamma mode programmed on the pipe */
uint32_t gamma_mode;
-};
-struct vlv_wm_state {
- struct vlv_pipe_wm wm[3];
- struct vlv_sr_wm sr[3];
- uint8_t num_active_planes;
- uint8_t num_levels;
- uint8_t level;
- bool cxsr;
+ /* bitmask of visible planes (enum plane_id) */
+ u8 active_planes;
};
struct intel_crtc {
@@ -698,7 +744,7 @@ struct intel_crtc {
bool active;
bool lowfreq_avail;
u8 plane_ids_mask;
- unsigned long enabled_power_domains;
+ unsigned long long enabled_power_domains;
struct intel_overlay *overlay;
struct intel_flip_work *flip_work;
@@ -730,10 +776,8 @@ struct intel_crtc {
/* watermarks currently being used */
union {
struct intel_pipe_wm ilk;
+ struct vlv_wm_state vlv;
} active;
-
- /* allow CxSR on this pipe */
- bool cxsr_allowed;
} wm;
int scanline_offset;
@@ -747,27 +791,6 @@ struct intel_crtc {
/* scalers available on this crtc */
int num_scalers;
-
- struct vlv_wm_state wm_state;
-};
-
-struct intel_plane_wm_parameters {
- uint32_t horiz_pixels;
- uint32_t vert_pixels;
- /*
- * For packed pixel formats:
- * bytes_per_pixel - holds bytes per pixel
- * For planar pixel formats:
- * bytes_per_pixel - holds bytes per pixel for uv-plane
- * y_bytes_per_pixel - holds bytes per pixel for y-plane
- */
- uint8_t bytes_per_pixel;
- uint8_t y_bytes_per_pixel;
- bool enabled;
- bool scaled;
- u64 tiling;
- unsigned int rotation;
- uint16_t fifo_size;
};
struct intel_plane {
@@ -779,13 +802,6 @@ struct intel_plane {
int max_downscale;
uint32_t frontbuffer_bit;
- /* Since we need to change the watermarks before/after
- * enabling/disabling the planes, we need to store the parameters here
- * as the other pieces of the struct may not reflect the values we want
- * for the watermark calculations. Currently only Haswell uses this.
- */
- struct intel_plane_wm_parameters wm;
-
/*
* NOTE: Do not place new plane state fields here (e.g., when adding
* new plane properties). New runtime state should now be placed in
@@ -891,12 +907,17 @@ struct intel_dp_desc {
struct intel_dp_compliance_data {
unsigned long edid;
+ uint8_t video_pattern;
+ uint16_t hdisplay, vdisplay;
+ uint8_t bpc;
};
struct intel_dp_compliance {
unsigned long test_type;
struct intel_dp_compliance_data test_data;
bool test_active;
+ int test_link_rate;
+ u8 test_lane_count;
};
struct intel_dp {
@@ -911,6 +932,7 @@ struct intel_dp {
bool has_audio;
bool detect_done;
bool channel_eq_status;
+ bool reset_link_params;
enum hdmi_force_audio force_audio;
bool limited_color_range;
bool color_range_auto;
@@ -928,6 +950,7 @@ struct intel_dp {
/* sink or branch descriptor */
struct intel_dp_desc desc;
struct drm_dp_aux aux;
+ enum intel_display_power_domain aux_power_domain;
uint8_t train_set[4];
int panel_power_up_delay;
int panel_power_down_delay;
@@ -990,7 +1013,6 @@ struct intel_dp {
struct intel_lspcon {
bool active;
enum drm_lspcon_mode mode;
- bool desc_valid;
};
struct intel_digital_port {
@@ -1003,6 +1025,7 @@ struct intel_digital_port {
enum irqreturn (*hpd_pulse)(struct intel_digital_port *, bool);
bool release_cl2_override;
uint8_t max_lanes;
+ enum intel_display_power_domain ddi_io_power_domain;
};
struct intel_dp_mst_encoder {
@@ -1097,7 +1120,19 @@ intel_attached_encoder(struct drm_connector *connector)
static inline struct intel_digital_port *
enc_to_dig_port(struct drm_encoder *encoder)
{
- return container_of(encoder, struct intel_digital_port, base.base);
+ struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+
+ switch (intel_encoder->type) {
+ case INTEL_OUTPUT_UNKNOWN:
+ WARN_ON(!HAS_DDI(to_i915(encoder->dev)));
+ case INTEL_OUTPUT_DP:
+ case INTEL_OUTPUT_EDP:
+ case INTEL_OUTPUT_HDMI:
+ return container_of(encoder, struct intel_digital_port,
+ base.base);
+ default:
+ return NULL;
+ }
}
static inline struct intel_dp_mst_encoder *
@@ -1153,7 +1188,13 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
-u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask);
+
+static inline u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915,
+ u32 mask)
+{
+ return mask & ~i915->rps.pm_intrmsk_mbz;
+}
+
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
@@ -1185,40 +1226,36 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *intel_encoder,
struct intel_crtc_state *old_crtc_state,
struct drm_connector_state *old_conn_state);
void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder);
-void hsw_fdi_link_train(struct drm_crtc *crtc);
+void hsw_fdi_link_train(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state);
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
-void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc);
+void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder);
-void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
-void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
+void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
+void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
bool intel_ddi_pll_select(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state);
-void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
+void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state);
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
struct intel_crtc *intel_crtc);
void intel_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
-struct intel_encoder *
-intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state);
void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder);
void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
-void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
+void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
+ bool state);
uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
-struct intel_shared_dpll *intel_ddi_get_link_dpll(struct intel_dp *intel_dp,
- int clock);
-unsigned int intel_fb_align_height(struct drm_device *dev,
- unsigned int height,
- uint32_t pixel_format,
- uint64_t fb_format_modifier);
-u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
- uint64_t fb_modifier, uint32_t pixel_format);
+u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder);
+
+unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
+ int plane, unsigned int height);
/* intel_audio.c */
void intel_init_audio_hooks(struct drm_i915_private *dev_priv);
@@ -1231,12 +1268,28 @@ void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
void intel_audio_init(struct drm_i915_private *dev_priv);
void intel_audio_deinit(struct drm_i915_private *dev_priv);
+/* intel_cdclk.c */
+void skl_init_cdclk(struct drm_i915_private *dev_priv);
+void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
+void bxt_init_cdclk(struct drm_i915_private *dev_priv);
+void bxt_uninit_cdclk(struct drm_i915_private *dev_priv);
+void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
+void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
+void intel_update_cdclk(struct drm_i915_private *dev_priv);
+void intel_update_rawclk(struct drm_i915_private *dev_priv);
+bool intel_cdclk_state_compare(const struct intel_cdclk_state *a,
+ const struct intel_cdclk_state *b);
+void intel_set_cdclk(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_state *cdclk_state);
+
/* intel_display.c */
enum transcoder intel_crtc_pch_transcoder(struct intel_crtc *crtc);
-void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco);
void intel_update_rawclk(struct drm_i915_private *dev_priv);
+int vlv_get_hpll_vco(struct drm_i915_private *dev_priv);
int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
const char *name, u32 reg, int ref_freq);
+int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
+ const char *name, u32 reg);
void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv);
void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
extern const struct drm_plane_funcs intel_plane_funcs;
@@ -1311,9 +1364,8 @@ struct i915_vma *
intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
void intel_unpin_fb_vma(struct i915_vma *vma);
struct drm_framebuffer *
-__intel_framebuffer_create(struct drm_device *dev,
- struct drm_mode_fb_cmd2 *mode_cmd,
- struct drm_i915_gem_object *obj);
+intel_framebuffer_create(struct drm_i915_gem_object *obj,
+ struct drm_mode_fb_cmd2 *mode_cmd);
void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe);
void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe);
void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe);
@@ -1332,9 +1384,6 @@ int intel_plane_atomic_set_property(struct drm_plane *plane,
int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state);
-unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
- uint64_t fb_modifier, unsigned int cpp);
-
void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe);
@@ -1366,14 +1415,10 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv);
void intel_finish_reset(struct drm_i915_private *dev_priv);
void hsw_enable_pc8(struct drm_i915_private *dev_priv);
void hsw_disable_pc8(struct drm_i915_private *dev_priv);
-void bxt_init_cdclk(struct drm_i915_private *dev_priv);
-void bxt_uninit_cdclk(struct drm_i915_private *dev_priv);
void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
void bxt_enable_dc9(struct drm_i915_private *dev_priv);
void bxt_disable_dc9(struct drm_i915_private *dev_priv);
void gen9_enable_dc5(struct drm_i915_private *dev_priv);
-void skl_init_cdclk(struct drm_i915_private *dev_priv);
-void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
unsigned int skl_cdclk_get_vco(unsigned int freq);
void skl_enable_dc6(struct drm_i915_private *dev_priv);
void skl_disable_dc6(struct drm_i915_private *dev_priv);
@@ -1388,10 +1433,7 @@ int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
bool intel_crtc_active(struct intel_crtc *crtc);
void hsw_enable_ips(struct intel_crtc *crtc);
void hsw_disable_ips(struct intel_crtc *crtc);
-enum intel_display_power_domain
-intel_display_port_power_domain(struct intel_encoder *intel_encoder);
-enum intel_display_power_domain
-intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder);
+enum intel_display_power_domain intel_port_to_power_domain(enum port port);
void intel_mode_from_pipe_config(struct drm_display_mode *mode,
struct intel_crtc_state *pipe_config);
@@ -1664,6 +1706,7 @@ int intel_power_domains_init(struct drm_i915_private *);
void intel_power_domains_fini(struct drm_i915_private *);
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume);
void intel_power_domains_suspend(struct drm_i915_private *dev_priv);
+void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume);
void bxt_display_core_uninit(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
@@ -1692,10 +1735,8 @@ static inline void
assert_rpm_wakelock_held(struct drm_i915_private *dev_priv)
{
assert_rpm_device_not_suspended(dev_priv);
- /* FIXME: Needs to be converted back to WARN_ONCE, but currently causes
- * too much noise. */
- if (!atomic_read(&dev_priv->pm.wakeref_count))
- DRM_DEBUG_DRIVER("RPM wakelock ref not held during HW access");
+ WARN_ONCE(!atomic_read(&dev_priv->pm.wakeref_count),
+ "RPM wakelock ref not held during HW access");
}
/**
@@ -1783,6 +1824,7 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
struct skl_ddb_allocation *ddb /* out */);
void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc,
struct skl_pipe_wm *out);
+void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
bool intel_can_enable_sagv(struct drm_atomic_state *state);
int intel_enable_sagv(struct drm_i915_private *dev_priv);
int intel_disable_sagv(struct drm_i915_private *dev_priv);
@@ -1791,7 +1833,6 @@ bool skl_wm_level_equals(const struct skl_wm_level *l1,
bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry **entries,
const struct skl_ddb_entry *ddb,
int ignore);
-uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
bool ilk_disable_lp_wm(struct drm_device *dev);
int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
static inline int intel_enable_rc6(void)
@@ -1865,9 +1906,9 @@ intel_atomic_get_existing_plane_state(struct drm_atomic_state *state,
return to_intel_plane_state(plane_state);
}
-int intel_atomic_setup_scalers(struct drm_device *dev,
- struct intel_crtc *intel_crtc,
- struct intel_crtc_state *crtc_state);
+int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
+ struct intel_crtc *intel_crtc,
+ struct intel_crtc_state *crtc_state);
/* intel_atomic_plane.c */
struct intel_plane_state *intel_create_plane_state(struct drm_plane *plane);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 16732e7bc08e..3ffe8b1f1d48 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -28,7 +28,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/i915_drm.h>
-#include <drm/drm_panel.h>
#include <drm/drm_mipi_dsi.h>
#include <linux/slab.h>
#include <linux/gpio/consumer.h>
@@ -36,16 +35,6 @@
#include "intel_drv.h"
#include "intel_dsi.h"
-static const struct {
- u16 panel_id;
- struct drm_panel * (*init)(struct intel_dsi *intel_dsi, u16 panel_id);
-} intel_dsi_drivers[] = {
- {
- .panel_id = MIPI_DSI_GENERIC_PANEL_ID,
- .init = vbt_panel_init,
- },
-};
-
/* return pixels in terms of txbyteclkhs */
static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
u16 burst_mode_ratio)
@@ -80,7 +69,7 @@ enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt)
}
}
-static void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
+void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
{
struct drm_encoder *encoder = &intel_dsi->base.base;
struct drm_device *dev = encoder->dev;
@@ -357,41 +346,132 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
return true;
}
-static void bxt_dsi_device_ready(struct intel_encoder *encoder)
+static void glk_dsi_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
- u32 val;
+ u32 tmp, val;
- DRM_DEBUG_KMS("\n");
+ /* Set the MIPI mode
+ * If MIPI_Mode is off, then writing to LP_Wake bit is not reflecting.
+ * Power ON MIPI IO first and then write into IO reset and LP wake bits
+ */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(MIPI_CTRL(port));
+ I915_WRITE(MIPI_CTRL(port), tmp | GLK_MIPIIO_ENABLE);
+ }
+
+ /* Put the IO into reset */
+ tmp = I915_READ(MIPI_CTRL(PORT_A));
+ tmp &= ~GLK_MIPIIO_RESET_RELEASED;
+ I915_WRITE(MIPI_CTRL(PORT_A), tmp);
+
+ /* Program LP Wake */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(MIPI_CTRL(port));
+ tmp |= GLK_LP_WAKE;
+ I915_WRITE(MIPI_CTRL(port), tmp);
+ }
- /* Exit Low power state in 4 steps*/
+ /* Wait for Pwr ACK */
for_each_dsi_port(port, intel_dsi->ports) {
+ if (intel_wait_for_register(dev_priv,
+ MIPI_CTRL(port), GLK_MIPIIO_PORT_POWERED,
+ GLK_MIPIIO_PORT_POWERED, 20))
+ DRM_ERROR("MIPIO port is powergated\n");
+ }
- /* 1. Enable MIPI PHY transparent latch */
- val = I915_READ(BXT_MIPI_PORT_CTRL(port));
- I915_WRITE(BXT_MIPI_PORT_CTRL(port), val | LP_OUTPUT_HOLD);
- usleep_range(2000, 2500);
+ /* Wait for MIPI PHY status bit to set */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ if (intel_wait_for_register(dev_priv,
+ MIPI_CTRL(port), GLK_PHY_STATUS_PORT_READY,
+ GLK_PHY_STATUS_PORT_READY, 20))
+ DRM_ERROR("PHY is not ON\n");
+ }
- /* 2. Enter ULPS */
+ /* Get IO out of reset */
+ tmp = I915_READ(MIPI_CTRL(PORT_A));
+ I915_WRITE(MIPI_CTRL(PORT_A), tmp | GLK_MIPIIO_RESET_RELEASED);
+
+ /* Get IO out of Low power state*/
+ for_each_dsi_port(port, intel_dsi->ports) {
+ if (!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY)) {
+ val = I915_READ(MIPI_DEVICE_READY(port));
+ val &= ~ULPS_STATE_MASK;
+ val |= DEVICE_READY;
+ I915_WRITE(MIPI_DEVICE_READY(port), val);
+ usleep_range(10, 15);
+ }
+
+ /* Enter ULPS */
val = I915_READ(MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= (ULPS_STATE_ENTER | DEVICE_READY);
I915_WRITE(MIPI_DEVICE_READY(port), val);
- /* at least 2us - relaxed for hrtimer subsystem optimization */
- usleep_range(10, 50);
- /* 3. Exit ULPS */
+ /* Wait for ULPS Not active */
+ if (intel_wait_for_register(dev_priv,
+ MIPI_CTRL(port), GLK_ULPS_NOT_ACTIVE,
+ GLK_ULPS_NOT_ACTIVE, 20))
+ DRM_ERROR("ULPS is still active\n");
+
+ /* Exit ULPS */
val = I915_READ(MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= (ULPS_STATE_EXIT | DEVICE_READY);
I915_WRITE(MIPI_DEVICE_READY(port), val);
- usleep_range(1000, 1500);
- /* Clear ULPS and set device ready */
+ /* Enter Normal Mode */
+ val = I915_READ(MIPI_DEVICE_READY(port));
+ val &= ~ULPS_STATE_MASK;
+ val |= (ULPS_STATE_NORMAL_OPERATION | DEVICE_READY);
+ I915_WRITE(MIPI_DEVICE_READY(port), val);
+
+ tmp = I915_READ(MIPI_CTRL(port));
+ tmp &= ~GLK_LP_WAKE;
+ I915_WRITE(MIPI_CTRL(port), tmp);
+ }
+
+ /* Wait for Stop state */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ if (intel_wait_for_register(dev_priv,
+ MIPI_CTRL(port), GLK_DATA_LANE_STOP_STATE,
+ GLK_DATA_LANE_STOP_STATE, 20))
+ DRM_ERROR("Date lane not in STOP state\n");
+ }
+
+ /* Wait for AFE LATCH */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ if (intel_wait_for_register(dev_priv,
+ BXT_MIPI_PORT_CTRL(port), AFE_LATCHOUT,
+ AFE_LATCHOUT, 20))
+ DRM_ERROR("D-PHY not entering LP-11 state\n");
+ }
+}
+
+static void bxt_dsi_device_ready(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ u32 val;
+
+ DRM_DEBUG_KMS("\n");
+
+ /* Enable MIPI PHY transparent latch */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ val = I915_READ(BXT_MIPI_PORT_CTRL(port));
+ I915_WRITE(BXT_MIPI_PORT_CTRL(port), val | LP_OUTPUT_HOLD);
+ usleep_range(2000, 2500);
+ }
+
+ /* Clear ULPS and set device ready */
+ for_each_dsi_port(port, intel_dsi->ports) {
val = I915_READ(MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
+ I915_WRITE(MIPI_DEVICE_READY(port), val);
+ usleep_range(2000, 2500);
val |= DEVICE_READY;
I915_WRITE(MIPI_DEVICE_READY(port), val);
}
@@ -442,8 +522,121 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_dsi_device_ready(encoder);
- else if (IS_GEN9_LP(dev_priv))
+ else if (IS_BROXTON(dev_priv))
bxt_dsi_device_ready(encoder);
+ else if (IS_GEMINILAKE(dev_priv))
+ glk_dsi_device_ready(encoder);
+}
+
+static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ u32 val;
+
+ /* Enter ULPS */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ val = I915_READ(MIPI_DEVICE_READY(port));
+ val &= ~ULPS_STATE_MASK;
+ val |= (ULPS_STATE_ENTER | DEVICE_READY);
+ I915_WRITE(MIPI_DEVICE_READY(port), val);
+ }
+
+ /* Wait for MIPI PHY status bit to unset */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ if (intel_wait_for_register(dev_priv,
+ MIPI_CTRL(port),
+ GLK_PHY_STATUS_PORT_READY, 0, 20))
+ DRM_ERROR("PHY is not turning OFF\n");
+ }
+
+ /* Wait for Pwr ACK bit to unset */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ if (intel_wait_for_register(dev_priv,
+ MIPI_CTRL(port),
+ GLK_MIPIIO_PORT_POWERED, 0, 20))
+ DRM_ERROR("MIPI IO Port is not powergated\n");
+ }
+}
+
+static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ u32 tmp;
+
+ /* Put the IO into reset */
+ tmp = I915_READ(MIPI_CTRL(PORT_A));
+ tmp &= ~GLK_MIPIIO_RESET_RELEASED;
+ I915_WRITE(MIPI_CTRL(PORT_A), tmp);
+
+ /* Wait for MIPI PHY status bit to unset */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ if (intel_wait_for_register(dev_priv,
+ MIPI_CTRL(port),
+ GLK_PHY_STATUS_PORT_READY, 0, 20))
+ DRM_ERROR("PHY is not turning OFF\n");
+ }
+
+ /* Clear MIPI mode */
+ for_each_dsi_port(port, intel_dsi->ports) {
+ tmp = I915_READ(MIPI_CTRL(port));
+ tmp &= ~GLK_MIPIIO_ENABLE;
+ I915_WRITE(MIPI_CTRL(port), tmp);
+ }
+}
+
+static void glk_dsi_clear_device_ready(struct intel_encoder *encoder)
+{
+ glk_dsi_enter_low_power_mode(encoder);
+ glk_dsi_disable_mipi_io(encoder);
+}
+
+static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+
+ DRM_DEBUG_KMS("\n");
+ for_each_dsi_port(port, intel_dsi->ports) {
+ /* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
+ i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ?
+ BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
+ u32 val;
+
+ I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
+ ULPS_STATE_ENTER);
+ usleep_range(2000, 2500);
+
+ I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
+ ULPS_STATE_EXIT);
+ usleep_range(2000, 2500);
+
+ I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
+ ULPS_STATE_ENTER);
+ usleep_range(2000, 2500);
+
+ /*
+ * On VLV/CHV, wait till Clock lanes are in LP-00 state for MIPI
+ * Port A only. MIPI Port C has no similar bit for checking.
+ */
+ if ((IS_GEN9_LP(dev_priv) || port == PORT_A) &&
+ intel_wait_for_register(dev_priv,
+ port_ctrl, AFE_LATCHOUT, 0,
+ 30))
+ DRM_ERROR("DSI LP not going Low\n");
+
+ /* Disable MIPI PHY transparent latch */
+ val = I915_READ(port_ctrl);
+ I915_WRITE(port_ctrl, val & ~LP_OUTPUT_HOLD);
+ usleep_range(1000, 1500);
+
+ I915_WRITE(MIPI_DEVICE_READY(port), 0x00);
+ usleep_range(2000, 2500);
+ }
}
static void intel_dsi_port_enable(struct intel_encoder *encoder)
@@ -456,12 +649,21 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) {
u32 temp;
-
- temp = I915_READ(VLV_CHICKEN_3);
- temp &= ~PIXEL_OVERLAP_CNT_MASK |
+ if (IS_GEN9_LP(dev_priv)) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ temp = I915_READ(MIPI_CTRL(port));
+ temp &= ~BXT_PIXEL_OVERLAP_CNT_MASK |
+ intel_dsi->pixel_overlap <<
+ BXT_PIXEL_OVERLAP_CNT_SHIFT;
+ I915_WRITE(MIPI_CTRL(port), temp);
+ }
+ } else {
+ temp = I915_READ(VLV_CHICKEN_3);
+ temp &= ~PIXEL_OVERLAP_CNT_MASK |
intel_dsi->pixel_overlap <<
PIXEL_OVERLAP_CNT_SHIFT;
- I915_WRITE(VLV_CHICKEN_3, temp);
+ I915_WRITE(VLV_CHICKEN_3, temp);
+ }
}
for_each_dsi_port(port, intel_dsi->ports) {
@@ -509,37 +711,57 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
}
}
-static void intel_dsi_enable(struct intel_encoder *encoder)
-{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- enum port port;
-
- DRM_DEBUG_KMS("\n");
-
- if (is_cmd_mode(intel_dsi)) {
- for_each_dsi_port(port, intel_dsi->ports)
- I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(port), 8 * 4);
- } else {
- msleep(20); /* XXX */
- for_each_dsi_port(port, intel_dsi->ports)
- dpi_send_cmd(intel_dsi, TURN_ON, false, port);
- msleep(100);
-
- drm_panel_enable(intel_dsi->panel);
+static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
+ struct intel_crtc_state *pipe_config);
+static void intel_dsi_unprepare(struct intel_encoder *encoder);
- for_each_dsi_port(port, intel_dsi->ports)
- wait_for_dsi_fifo_empty(intel_dsi, port);
+static void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
- intel_dsi_port_enable(encoder);
- }
+ /* For v3 VBTs in vid-mode the delays are part of the VBT sequences */
+ if (is_vid_mode(intel_dsi) && dev_priv->vbt.dsi.seq_version >= 3)
+ return;
- intel_panel_enable_backlight(intel_dsi->attached_connector);
+ msleep(msec);
}
-static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
- struct intel_crtc_state *pipe_config);
+/*
+ * Panel enable/disable sequences from the VBT spec.
+ *
+ * Note the spec has AssertReset / DeassertReset swapped from their
+ * usual naming. We use the normal names to avoid confusion (so below
+ * they are swapped compared to the spec).
+ *
+ * Steps starting with MIPI refer to VBT sequences, note that for v2
+ * VBTs several steps which have a VBT in v2 are expected to be handled
+ * directly by the driver, by directly driving gpios for example.
+ *
+ * v2 video mode seq v3 video mode seq command mode seq
+ * - power on - MIPIPanelPowerOn - power on
+ * - wait t1+t2 - wait t1+t2
+ * - MIPIDeassertResetPin - MIPIDeassertResetPin - MIPIDeassertResetPin
+ * - io lines to lp-11 - io lines to lp-11 - io lines to lp-11
+ * - MIPISendInitialDcsCmds - MIPISendInitialDcsCmds - MIPISendInitialDcsCmds
+ * - MIPITearOn
+ * - MIPIDisplayOn
+ * - turn on DPI - turn on DPI - set pipe to dsr mode
+ * - MIPIDisplayOn - MIPIDisplayOn
+ * - wait t5 - wait t5
+ * - backlight on - MIPIBacklightOn - backlight on
+ * ... ... ... issue mem cmds ...
+ * - backlight off - MIPIBacklightOff - backlight off
+ * - wait t6 - wait t6
+ * - MIPIDisplayOff
+ * - turn off DPI - turn off DPI - disable pipe dsr mode
+ * - MIPITearOff
+ * - MIPIDisplayOff - MIPIDisplayOff
+ * - io lines to lp-00 - io lines to lp-00 - io lines to lp-00
+ * - MIPIAssertResetPin - MIPIAssertResetPin - MIPIAssertResetPin
+ * - wait t3 - wait t3
+ * - power off - MIPIPanelPowerOff - power off
+ * - wait t4 - wait t4
+ */
static void intel_dsi_pre_enable(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
@@ -548,6 +770,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
+ u32 val;
DRM_DEBUG_KMS("\n");
@@ -558,13 +781,16 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
intel_disable_dsi_pll(encoder);
intel_enable_dsi_pll(encoder, pipe_config);
- intel_dsi_prepare(encoder, pipe_config);
-
- /* Panel Enable over CRC PMIC */
- if (intel_dsi->gpio_panel)
- gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
+ if (IS_BROXTON(dev_priv)) {
+ /* Add MIPI IO reset programming for modeset */
+ val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
+ I915_WRITE(BXT_P_CR_GT_DISP_PWRON,
+ val | MIPIO_RST_CTRL);
- msleep(intel_dsi->panel_on_delay);
+ /* Power up DSI regulator */
+ I915_WRITE(BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
+ I915_WRITE(BXT_P_DSI_REGULATOR_TX_CTRL, 0);
+ }
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
u32 val;
@@ -575,42 +801,88 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
I915_WRITE(DSPCLK_GATE_D, val);
}
- /* put device in ready state */
- intel_dsi_device_ready(encoder);
+ intel_dsi_prepare(encoder, pipe_config);
- drm_panel_prepare(intel_dsi->panel);
+ /* Power on, try both CRC pmic gpio and VBT */
+ if (intel_dsi->gpio_panel)
+ gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
+ intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
+
+ /* Deassert reset */
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
- for_each_dsi_port(port, intel_dsi->ports)
- wait_for_dsi_fifo_empty(intel_dsi, port);
+ /* Put device in ready state (LP-11) */
+ intel_dsi_device_ready(encoder);
+
+ /* Send initialization commands in LP mode */
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
/* Enable port in pre-enable phase itself because as per hw team
* recommendation, port should be enabled befor plane & pipe */
- intel_dsi_enable(encoder);
+ if (is_cmd_mode(intel_dsi)) {
+ for_each_dsi_port(port, intel_dsi->ports)
+ I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(port), 8 * 4);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_TEAR_ON);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
+ } else {
+ msleep(20); /* XXX */
+ for_each_dsi_port(port, intel_dsi->ports)
+ dpi_send_cmd(intel_dsi, TURN_ON, false, port);
+ intel_dsi_msleep(intel_dsi, 100);
+
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
+
+ intel_dsi_port_enable(encoder);
+ }
+
+ intel_panel_enable_backlight(intel_dsi->attached_connector);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
}
+/*
+ * DSI port enable has to be done before pipe and plane enable, so we do it in
+ * the pre_enable hook.
+ */
static void intel_dsi_enable_nop(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
DRM_DEBUG_KMS("\n");
-
- /* for DSI port enable has to be done before pipe
- * and plane enable, so port enable is done in
- * pre_enable phase itself unlike other encoders
- */
}
-static void intel_dsi_pre_disable(struct intel_encoder *encoder,
- struct intel_crtc_state *old_crtc_state,
- struct drm_connector_state *old_conn_state)
+/*
+ * DSI port disable has to be done after pipe and plane disable, so we do it in
+ * the post_disable hook.
+ */
+static void intel_dsi_disable(struct intel_encoder *encoder,
+ struct intel_crtc_state *old_crtc_state,
+ struct drm_connector_state *old_conn_state)
{
+ struct drm_device *dev = encoder->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
DRM_DEBUG_KMS("\n");
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
intel_panel_disable_backlight(intel_dsi->attached_connector);
+ /*
+ * Disable Device ready before the port shutdown in order
+ * to avoid split screen
+ */
+ if (IS_BROXTON(dev_priv)) {
+ for_each_dsi_port(port, intel_dsi->ports)
+ I915_WRITE(MIPI_DEVICE_READY(port), 0);
+ }
+
+ /*
+ * According to the spec we should send SHUTDOWN before
+ * MIPI_SEQ_DISPLAY_OFF only for v3+ VBTs, but field testing
+ * has shown that the v3 sequence works for v2 VBTs too
+ */
if (is_vid_mode(intel_dsi)) {
/* Send Shutdown command to the panel in LP mode */
for_each_dsi_port(port, intel_dsi->ports)
@@ -619,13 +891,25 @@ static void intel_dsi_pre_disable(struct intel_encoder *encoder,
}
}
-static void intel_dsi_disable(struct intel_encoder *encoder)
+static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) ||
+ IS_BROXTON(dev_priv))
+ vlv_dsi_clear_device_ready(encoder);
+ else if (IS_GEMINILAKE(dev_priv))
+ glk_dsi_clear_device_ready(encoder);
+}
+
+static void intel_dsi_post_disable(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
enum port port;
- u32 temp;
+ u32 val;
DRM_DEBUG_KMS("\n");
@@ -634,85 +918,32 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
wait_for_dsi_fifo_empty(intel_dsi, port);
intel_dsi_port_disable(encoder);
- msleep(2);
+ usleep_range(2000, 5000);
}
- for_each_dsi_port(port, intel_dsi->ports) {
- /* Panel commands can be sent when clock is in LP11 */
- I915_WRITE(MIPI_DEVICE_READY(port), 0x0);
-
- intel_dsi_reset_clocks(encoder, port);
- I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
+ intel_dsi_unprepare(encoder);
- temp = I915_READ(MIPI_DSI_FUNC_PRG(port));
- temp &= ~VID_MODE_FORMAT_MASK;
- I915_WRITE(MIPI_DSI_FUNC_PRG(port), temp);
-
- I915_WRITE(MIPI_DEVICE_READY(port), 0x1);
- }
- /* if disable packets are sent before sending shutdown packet then in
- * some next enable sequence send turn on packet error is observed */
- drm_panel_disable(intel_dsi->panel);
-
- for_each_dsi_port(port, intel_dsi->ports)
- wait_for_dsi_fifo_empty(intel_dsi, port);
-}
-
-static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- enum port port;
-
- DRM_DEBUG_KMS("\n");
- for_each_dsi_port(port, intel_dsi->ports) {
- /* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
- i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ?
- BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
- u32 val;
-
- I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
- ULPS_STATE_ENTER);
- usleep_range(2000, 2500);
-
- I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
- ULPS_STATE_EXIT);
- usleep_range(2000, 2500);
-
- I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
- ULPS_STATE_ENTER);
- usleep_range(2000, 2500);
+ /*
+ * if disable packets are sent before sending shutdown packet then in
+ * some next enable sequence send turn on packet error is observed
+ */
+ if (is_cmd_mode(intel_dsi))
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_TEAR_OFF);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_OFF);
- /* Wait till Clock lanes are in LP-00 state for MIPI Port A
- * only. MIPI Port C has no similar bit for checking
- */
- if (intel_wait_for_register(dev_priv,
- port_ctrl, AFE_LATCHOUT, 0,
- 30))
- DRM_ERROR("DSI LP not going Low\n");
+ /* Transition to LP-00 */
+ intel_dsi_clear_device_ready(encoder);
- /* Disable MIPI PHY transparent latch */
- val = I915_READ(port_ctrl);
- I915_WRITE(port_ctrl, val & ~LP_OUTPUT_HOLD);
- usleep_range(1000, 1500);
+ if (IS_BROXTON(dev_priv)) {
+ /* Power down DSI regulator to save power */
+ I915_WRITE(BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
+ I915_WRITE(BXT_P_DSI_REGULATOR_TX_CTRL, HS_IO_CTRL_SELECT);
- I915_WRITE(MIPI_DEVICE_READY(port), 0x00);
- usleep_range(2000, 2500);
+ /* Add MIPI IO reset programming for modeset */
+ val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
+ I915_WRITE(BXT_P_CR_GT_DISP_PWRON,
+ val & ~MIPIO_RST_CTRL);
}
-}
-
-static void intel_dsi_post_disable(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
-
- DRM_DEBUG_KMS("\n");
-
- intel_dsi_disable(encoder);
-
- intel_dsi_clear_device_ready(encoder);
intel_disable_dsi_pll(encoder);
@@ -724,11 +955,12 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
I915_WRITE(DSPCLK_GATE_D, val);
}
- drm_panel_unprepare(intel_dsi->panel);
+ /* Assert reset */
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_ASSERT_RESET);
- msleep(intel_dsi->panel_off_delay);
-
- /* Panel Disable over CRC PMIC */
+ /* Power off, try both CRC pmic gpio and VBT */
+ intel_dsi_msleep(intel_dsi, intel_dsi->panel_off_delay);
+ intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_OFF);
if (intel_dsi->gpio_panel)
gpiod_set_value_cansleep(intel_dsi->gpio_panel, 0);
@@ -736,7 +968,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
* FIXME As we do with eDP, just make a note of the time here
* and perform the wait before the next panel power on.
*/
- msleep(intel_dsi->panel_pwr_cycle_delay);
+ intel_dsi_msleep(intel_dsi, intel_dsi->panel_pwr_cycle_delay);
}
static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
@@ -744,14 +976,13 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- enum intel_display_power_domain power_domain;
enum port port;
bool active = false;
DRM_DEBUG_KMS("\n");
- power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain))
return false;
/*
@@ -807,7 +1038,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
}
out_put_power:
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, encoder->power_domain);
return active;
}
@@ -1279,6 +1510,14 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
*/
I915_WRITE(MIPI_LP_BYTECLK(port), intel_dsi->lp_byte_clk);
+ if (IS_GEMINILAKE(dev_priv)) {
+ I915_WRITE(MIPI_TLPX_TIME_COUNT(port),
+ intel_dsi->lp_byte_clk);
+ /* Shadow of DPHY reg */
+ I915_WRITE(MIPI_CLK_LANE_TIMING(port),
+ intel_dsi->dphy_reg);
+ }
+
/* the bw essential for transmitting 16 long packets containing
* 252 bytes meant for dcs write memory command is programmed in
* this register in terms of byte clocks. based on dsi transfer
@@ -1302,6 +1541,30 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
}
}
+static void intel_dsi_unprepare(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ enum port port;
+ u32 val;
+
+ if (!IS_GEMINILAKE(dev_priv)) {
+ for_each_dsi_port(port, intel_dsi->ports) {
+ /* Panel commands can be sent when clock is in LP11 */
+ I915_WRITE(MIPI_DEVICE_READY(port), 0x0);
+
+ intel_dsi_reset_clocks(encoder, port);
+ I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
+
+ val = I915_READ(MIPI_DSI_FUNC_PRG(port));
+ val &= ~VID_MODE_FORMAT_MASK;
+ I915_WRITE(MIPI_DSI_FUNC_PRG(port), val);
+
+ I915_WRITE(MIPI_DEVICE_READY(port), 0x1);
+ }
+ }
+}
+
static int intel_dsi_get_modes(struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -1381,12 +1644,6 @@ static void intel_dsi_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- if (intel_dsi->panel) {
- drm_panel_detach(intel_dsi->panel);
- /* XXX: Logically this call belongs in the panel driver. */
- drm_panel_remove(intel_dsi->panel);
- }
-
/* dispose of the gpios */
if (intel_dsi->gpio_panel)
gpiod_put(intel_dsi->gpio_panel);
@@ -1438,7 +1695,6 @@ void intel_dsi_init(struct drm_i915_private *dev_priv)
struct drm_connector *connector;
struct drm_display_mode *scan, *fixed_mode = NULL;
enum port port;
- unsigned int i;
DRM_DEBUG_KMS("\n");
@@ -1477,7 +1733,7 @@ void intel_dsi_init(struct drm_i915_private *dev_priv)
intel_encoder->compute_config = intel_dsi_compute_config;
intel_encoder->pre_enable = intel_dsi_pre_enable;
intel_encoder->enable = intel_dsi_enable_nop;
- intel_encoder->disable = intel_dsi_pre_disable;
+ intel_encoder->disable = intel_dsi_disable;
intel_encoder->post_disable = intel_dsi_post_disable;
intel_encoder->get_hw_state = intel_dsi_get_hw_state;
intel_encoder->get_config = intel_dsi_get_config;
@@ -1485,6 +1741,7 @@ void intel_dsi_init(struct drm_i915_private *dev_priv)
intel_connector->get_hw_state = intel_connector_get_hw_state;
intel_encoder->port = port;
+
/*
* On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
* port C. BXT isn't limited like this.
@@ -1544,14 +1801,7 @@ void intel_dsi_init(struct drm_i915_private *dev_priv)
intel_dsi->dsi_hosts[port] = host;
}
- for (i = 0; i < ARRAY_SIZE(intel_dsi_drivers); i++) {
- intel_dsi->panel = intel_dsi_drivers[i].init(intel_dsi,
- intel_dsi_drivers[i].panel_id);
- if (intel_dsi->panel)
- break;
- }
-
- if (!intel_dsi->panel) {
+ if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) {
DRM_DEBUG_KMS("no device found\n");
goto err;
}
@@ -1560,7 +1810,8 @@ void intel_dsi_init(struct drm_i915_private *dev_priv)
* In case of BYT with CRC PMIC, we need to use GPIO for
* Panel control.
*/
- if (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
+ if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC)) {
intel_dsi->gpio_panel =
gpiod_get(dev->dev, "panel", GPIOD_OUT_HIGH);
@@ -1571,6 +1822,7 @@ void intel_dsi_init(struct drm_i915_private *dev_priv)
}
intel_encoder->type = INTEL_OUTPUT_DSI;
+ intel_encoder->power_domain = POWER_DOMAIN_PORT_DSI;
intel_encoder->cloneable = 0;
drm_connector_init(dev, connector, &intel_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
@@ -1583,10 +1835,8 @@ void intel_dsi_init(struct drm_i915_private *dev_priv)
intel_connector_attach_encoder(intel_connector, intel_encoder);
- drm_panel_attach(intel_dsi->panel, connector);
-
mutex_lock(&dev->mode_config.mutex);
- drm_panel_get_modes(intel_dsi->panel);
+ intel_dsi_vbt_get_modes(intel_dsi);
list_for_each_entry(scan, &connector->probed_modes, head) {
if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
fixed_mode = drm_mode_duplicate(dev, scan);
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 5967ea6d6045..7afeb9580f41 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -39,7 +39,6 @@ struct intel_dsi_host;
struct intel_dsi {
struct intel_encoder base;
- struct drm_panel *panel;
struct intel_dsi_host *dsi_hosts[I915_MAX_PORTS];
/* GPIO Desc for CRC based Panel control */
@@ -130,6 +129,11 @@ static inline struct intel_dsi *enc_to_intel_dsi(struct drm_encoder *encoder)
return container_of(encoder, struct intel_dsi, base.base);
}
+/* intel_dsi.c */
+void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port);
+enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
+
+/* intel_dsi_pll.c */
bool intel_dsi_pll_is_enabled(struct drm_i915_private *dev_priv);
int intel_compute_dsi_pll(struct intel_encoder *encoder,
struct intel_crtc_state *config);
@@ -141,7 +145,10 @@ u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
void intel_dsi_reset_clocks(struct intel_encoder *encoder,
enum port port);
-struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id);
-enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt);
+/* intel_dsi_vbt.c */
+bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
+int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi);
+void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
+ enum mipi_seq seq_id);
#endif /* _INTEL_DSI_H */
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index 61440e5c2563..2ff2ee7f3b78 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -206,17 +206,24 @@ static bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
return false;
/*
- * Both dividers must be programmed with valid values even if only one
- * of the PLL is used, see BSpec/Broxton Clocks. Check this here for
+ * Dividers must be programmed with valid values. As per BSEPC, for
+ * GEMINLAKE only PORT A divider values are checked while for BXT
+ * both divider values are validated. Check this here for
* paranoia, since BIOS is known to misconfigure PLLs in this way at
* times, and since accessing DSI registers with invalid dividers
* causes a system hang.
*/
val = I915_READ(BXT_DSI_PLL_CTL);
- if (!(val & BXT_DSIA_16X_MASK) || !(val & BXT_DSIC_16X_MASK)) {
- DRM_DEBUG_DRIVER("PLL is enabled with invalid divider settings (%08x)\n",
- val);
- enabled = false;
+ if (IS_GEMINILAKE(dev_priv)) {
+ if (!(val & BXT_DSIA_16X_MASK)) {
+ DRM_DEBUG_DRIVER("Invalid PLL divider (%08x)\n", val);
+ enabled = false;
+ }
+ } else {
+ if (!(val & BXT_DSIA_16X_MASK) || !(val & BXT_DSIC_16X_MASK)) {
+ DRM_DEBUG_DRIVER("Invalid PLL divider (%08x)\n", val);
+ enabled = false;
+ }
}
return enabled;
@@ -372,6 +379,53 @@ static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
ESCAPE_CLOCK_DIVIDER_SHIFT);
}
+static void glk_dsi_program_esc_clock(struct drm_device *dev,
+ const struct intel_crtc_state *config)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 dsi_rate = 0;
+ u32 pll_ratio = 0;
+ u32 ddr_clk = 0;
+ u32 div1_value = 0;
+ u32 div2_value = 0;
+ u32 txesc1_div = 0;
+ u32 txesc2_div = 0;
+
+ pll_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK;
+
+ dsi_rate = (BXT_REF_CLOCK_KHZ * pll_ratio) / 2;
+
+ ddr_clk = dsi_rate / 2;
+
+ /* Variable divider value */
+ div1_value = DIV_ROUND_CLOSEST(ddr_clk, 20000);
+
+ /* Calculate TXESC1 divider */
+ if (div1_value <= 10)
+ txesc1_div = div1_value;
+ else if ((div1_value > 10) && (div1_value <= 20))
+ txesc1_div = DIV_ROUND_UP(div1_value, 2);
+ else if ((div1_value > 20) && (div1_value <= 30))
+ txesc1_div = DIV_ROUND_UP(div1_value, 4);
+ else if ((div1_value > 30) && (div1_value <= 40))
+ txesc1_div = DIV_ROUND_UP(div1_value, 6);
+ else if ((div1_value > 40) && (div1_value <= 50))
+ txesc1_div = DIV_ROUND_UP(div1_value, 8);
+ else
+ txesc1_div = 10;
+
+ /* Calculate TXESC2 divider */
+ div2_value = DIV_ROUND_UP(div1_value, txesc1_div);
+
+ if (div2_value < 10)
+ txesc2_div = div2_value;
+ else
+ txesc2_div = 10;
+
+ I915_WRITE(MIPIO_TXESC_CLK_DIV1, txesc1_div & GLK_TX_ESC_CLK_DIV1_MASK);
+ I915_WRITE(MIPIO_TXESC_CLK_DIV2, txesc2_div & GLK_TX_ESC_CLK_DIV2_MASK);
+}
+
/* Program BXT Mipi clocks and dividers */
static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
const struct intel_crtc_state *config)
@@ -416,11 +470,7 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
rx_div_lower = rx_div & RX_DIVIDER_BIT_1_2;
rx_div_upper = (rx_div & RX_DIVIDER_BIT_3_4) >> 2;
- /* As per bpsec program the 8/3X clock divider to the below value */
- if (dev_priv->vbt.dsi.config->is_cmd_mode)
- mipi_8by3_divider = 0x2;
- else
- mipi_8by3_divider = 0x3;
+ mipi_8by3_divider = 0x2;
tmp |= BXT_MIPI_8X_BY3_DIVIDER(port, mipi_8by3_divider);
tmp |= BXT_MIPI_TX_ESCLK_DIVIDER(port, tx_div);
@@ -430,11 +480,12 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
}
-static int bxt_compute_dsi_pll(struct intel_encoder *encoder,
+static int gen9lp_compute_dsi_pll(struct intel_encoder *encoder,
struct intel_crtc_state *config)
{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
- u8 dsi_ratio;
+ u8 dsi_ratio, dsi_ratio_min, dsi_ratio_max;
u32 dsi_clk;
dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format,
@@ -446,11 +497,20 @@ static int bxt_compute_dsi_pll(struct intel_encoder *encoder,
* round 'up' the result
*/
dsi_ratio = DIV_ROUND_UP(dsi_clk * 2, BXT_REF_CLOCK_KHZ);
- if (dsi_ratio < BXT_DSI_PLL_RATIO_MIN ||
- dsi_ratio > BXT_DSI_PLL_RATIO_MAX) {
+
+ if (IS_BROXTON(dev_priv)) {
+ dsi_ratio_min = BXT_DSI_PLL_RATIO_MIN;
+ dsi_ratio_max = BXT_DSI_PLL_RATIO_MAX;
+ } else {
+ dsi_ratio_min = GLK_DSI_PLL_RATIO_MIN;
+ dsi_ratio_max = GLK_DSI_PLL_RATIO_MAX;
+ }
+
+ if (dsi_ratio < dsi_ratio_min || dsi_ratio > dsi_ratio_max) {
DRM_ERROR("Cant get a suitable ratio from DSI PLL ratios\n");
return -ECHRNG;
- }
+ } else
+ DRM_DEBUG_KMS("DSI PLL calculation is Done!!\n");
/*
* Program DSI ratio and Select MIPIC and MIPIA PLL output as 8x
@@ -462,13 +522,13 @@ static int bxt_compute_dsi_pll(struct intel_encoder *encoder,
/* As per recommendation from hardware team,
* Prog PVD ratio =1 if dsi ratio <= 50
*/
- if (dsi_ratio <= 50)
+ if (IS_BROXTON(dev_priv) && dsi_ratio <= 50)
config->dsi_pll.ctrl |= BXT_DSI_PLL_PVD_RATIO_1;
return 0;
}
-static void bxt_enable_dsi_pll(struct intel_encoder *encoder,
+static void gen9lp_enable_dsi_pll(struct intel_encoder *encoder,
const struct intel_crtc_state *config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -483,8 +543,12 @@ static void bxt_enable_dsi_pll(struct intel_encoder *encoder,
POSTING_READ(BXT_DSI_PLL_CTL);
/* Program TX, RX, Dphy clocks */
- for_each_dsi_port(port, intel_dsi->ports)
- bxt_dsi_program_clocks(encoder->base.dev, port, config);
+ if (IS_BROXTON(dev_priv)) {
+ for_each_dsi_port(port, intel_dsi->ports)
+ bxt_dsi_program_clocks(encoder->base.dev, port, config);
+ } else {
+ glk_dsi_program_esc_clock(encoder->base.dev, config);
+ }
/* Enable DSI PLL */
val = I915_READ(BXT_DSI_PLL_ENABLE);
@@ -522,7 +586,7 @@ int intel_compute_dsi_pll(struct intel_encoder *encoder,
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
return vlv_compute_dsi_pll(encoder, config);
else if (IS_GEN9_LP(dev_priv))
- return bxt_compute_dsi_pll(encoder, config);
+ return gen9lp_compute_dsi_pll(encoder, config);
return -ENODEV;
}
@@ -535,7 +599,7 @@ void intel_enable_dsi_pll(struct intel_encoder *encoder,
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_enable_dsi_pll(encoder, config);
else if (IS_GEN9_LP(dev_priv))
- bxt_enable_dsi_pll(encoder, config);
+ gen9lp_enable_dsi_pll(encoder, config);
}
void intel_disable_dsi_pll(struct intel_encoder *encoder)
@@ -548,19 +612,30 @@ void intel_disable_dsi_pll(struct intel_encoder *encoder)
bxt_disable_dsi_pll(encoder);
}
-static void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
+static void gen9lp_dsi_reset_clocks(struct intel_encoder *encoder,
+ enum port port)
{
u32 tmp;
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
/* Clear old configurations */
- tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
- tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
- tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
- tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
- tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port));
- I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
+ if (IS_BROXTON(dev_priv)) {
+ tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
+ tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
+ tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
+ tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
+ tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port));
+ I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
+ } else {
+ tmp = I915_READ(MIPIO_TXESC_CLK_DIV1);
+ tmp &= ~GLK_TX_ESC_CLK_DIV1_MASK;
+ I915_WRITE(MIPIO_TXESC_CLK_DIV1, tmp);
+
+ tmp = I915_READ(MIPIO_TXESC_CLK_DIV2);
+ tmp &= ~GLK_TX_ESC_CLK_DIV2_MASK;
+ I915_WRITE(MIPIO_TXESC_CLK_DIV2, tmp);
+ }
I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
}
@@ -569,7 +644,7 @@ void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (IS_GEN9_LP(dev_priv))
- bxt_dsi_reset_clocks(encoder, port);
+ gen9lp_dsi_reset_clocks(encoder, port);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
vlv_dsi_reset_clocks(encoder, port);
}
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_vbt.c
index 8f683b8b1816..0dce7792643a 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_vbt.c
@@ -28,7 +28,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/i915_drm.h>
-#include <drm/drm_panel.h>
#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <video/mipi_display.h>
@@ -38,16 +37,6 @@
#include "intel_drv.h"
#include "intel_dsi.h"
-struct vbt_panel {
- struct drm_panel panel;
- struct intel_dsi *intel_dsi;
-};
-
-static inline struct vbt_panel *to_vbt_panel(struct drm_panel *panel)
-{
- return container_of(panel, struct vbt_panel, panel);
-}
-
#define MIPI_TRANSFER_MODE_SHIFT 0
#define MIPI_VIRTUAL_CHANNEL_SHIFT 1
#define MIPI_PORT_SHIFT 3
@@ -192,6 +181,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
break;
}
+ wait_for_dsi_fifo_empty(intel_dsi, port);
+
out:
data += len;
@@ -424,10 +415,9 @@ static const char *sequence_name(enum mipi_seq seq_id)
return "(unknown)";
}
-static void generic_exec_sequence(struct drm_panel *panel, enum mipi_seq seq_id)
+void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
+ enum mipi_seq seq_id)
{
- struct vbt_panel *vbt_panel = to_vbt_panel(panel);
- struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
const u8 *data;
fn_mipi_elem_exec mipi_elem_exec;
@@ -491,78 +481,31 @@ static void generic_exec_sequence(struct drm_panel *panel, enum mipi_seq seq_id)
}
}
-static int vbt_panel_prepare(struct drm_panel *panel)
+int intel_dsi_vbt_get_modes(struct intel_dsi *intel_dsi)
{
- generic_exec_sequence(panel, MIPI_SEQ_ASSERT_RESET);
- generic_exec_sequence(panel, MIPI_SEQ_POWER_ON);
- generic_exec_sequence(panel, MIPI_SEQ_DEASSERT_RESET);
- generic_exec_sequence(panel, MIPI_SEQ_INIT_OTP);
-
- return 0;
-}
-
-static int vbt_panel_unprepare(struct drm_panel *panel)
-{
- generic_exec_sequence(panel, MIPI_SEQ_ASSERT_RESET);
- generic_exec_sequence(panel, MIPI_SEQ_POWER_OFF);
-
- return 0;
-}
-
-static int vbt_panel_enable(struct drm_panel *panel)
-{
- generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_ON);
- generic_exec_sequence(panel, MIPI_SEQ_BACKLIGHT_ON);
-
- return 0;
-}
-
-static int vbt_panel_disable(struct drm_panel *panel)
-{
- generic_exec_sequence(panel, MIPI_SEQ_BACKLIGHT_OFF);
- generic_exec_sequence(panel, MIPI_SEQ_DISPLAY_OFF);
-
- return 0;
-}
-
-static int vbt_panel_get_modes(struct drm_panel *panel)
-{
- struct vbt_panel *vbt_panel = to_vbt_panel(panel);
- struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
+ struct intel_connector *connector = intel_dsi->attached_connector;
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_display_mode *mode;
- if (!panel->connector)
- return 0;
-
mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
if (!mode)
return 0;
mode->type |= DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(panel->connector, mode);
+ drm_mode_probed_add(&connector->base, mode);
return 1;
}
-static const struct drm_panel_funcs vbt_panel_funcs = {
- .disable = vbt_panel_disable,
- .unprepare = vbt_panel_unprepare,
- .prepare = vbt_panel_prepare,
- .enable = vbt_panel_enable,
- .get_modes = vbt_panel_get_modes,
-};
-
-struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
+bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
{
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
- struct vbt_panel *vbt_panel;
u32 bpp;
u32 tlpx_ns, extra_byte_count, bitrate, tlpx_ui;
u32 ui_num, ui_den;
@@ -571,6 +514,7 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
u32 tclk_prepare_clkzero, ths_prepare_hszero;
u32 lp_to_hs_switch, hs_to_lp_switch;
u32 pclk, computed_ddr;
+ u32 mul;
u16 burst_mode_ratio;
enum port port;
@@ -624,7 +568,7 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
if (mipi_config->target_burst_mode_freq <
computed_ddr) {
DRM_ERROR("Burst mode freq is less than computed\n");
- return NULL;
+ return false;
}
burst_mode_ratio = DIV_ROUND_UP(
@@ -634,7 +578,7 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
pclk = DIV_ROUND_UP(pclk * burst_mode_ratio, 100);
} else {
DRM_ERROR("Burst mode target is not set\n");
- return NULL;
+ return false;
}
} else
burst_mode_ratio = 100;
@@ -674,11 +618,6 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
break;
}
- /*
- * ui(s) = 1/f [f in hz]
- * ui(ns) = 10^9 / (f*10^6) [f in Mhz] -> 10^3/f(Mhz)
- */
-
/* in Kbps */
ui_num = NS_KHZ_RATIO;
ui_den = bitrate;
@@ -692,21 +631,26 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
*/
intel_dsi->lp_byte_clk = DIV_ROUND_UP(tlpx_ns * ui_den, 8 * ui_num);
- /* count values in UI = (ns value) * (bitrate / (2 * 10^6))
+ /* DDR clock period = 2 * UI
+ * UI(sec) = 1/(bitrate * 10^3) (bitrate is in KHZ)
+ * UI(nsec) = 10^6 / bitrate
+ * DDR clock period (nsec) = 2 * UI = (2 * 10^6)/ bitrate
+ * DDR clock count = ns_value / DDR clock period
*
- * Since txddrclkhs_i is 2xUI, all the count values programmed in
- * DPHY param register are divided by 2
- *
- * prepare count
+ * For GEMINILAKE dphy_param_reg will be programmed in terms of
+ * HS byte clock count for other platform in HS ddr clock count
*/
+ mul = IS_GEMINILAKE(dev_priv) ? 8 : 2;
ths_prepare_ns = max(mipi_config->ths_prepare,
mipi_config->tclk_prepare);
- prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * 2);
+
+ /* prepare count */
+ prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * mul);
/* exit zero count */
exit_zero_cnt = DIV_ROUND_UP(
(ths_prepare_hszero - ths_prepare_ns) * ui_den,
- ui_num * 2
+ ui_num * mul
);
/*
@@ -720,12 +664,12 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
/* clk zero count */
clk_zero_cnt = DIV_ROUND_UP(
- (tclk_prepare_clkzero - ths_prepare_ns)
- * ui_den, 2 * ui_num);
+ (tclk_prepare_clkzero - ths_prepare_ns)
+ * ui_den, ui_num * mul);
/* trail count */
tclk_trail_ns = max(mipi_config->tclk_trail, mipi_config->ths_trail);
- trail_cnt = DIV_ROUND_UP(tclk_trail_ns * ui_den, 2 * ui_num);
+ trail_cnt = DIV_ROUND_UP(tclk_trail_ns * ui_den, ui_num * mul);
if (prepare_cnt > PREPARE_CNT_MAX ||
exit_zero_cnt > EXIT_ZERO_CNT_MAX ||
@@ -801,6 +745,19 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
8);
intel_dsi->clk_hs_to_lp_count += extra_byte_count;
+ DRM_DEBUG_KMS("Pclk %d\n", intel_dsi->pclk);
+ DRM_DEBUG_KMS("Pixel overlap %d\n", intel_dsi->pixel_overlap);
+ DRM_DEBUG_KMS("Lane count %d\n", intel_dsi->lane_count);
+ DRM_DEBUG_KMS("DPHY param reg 0x%x\n", intel_dsi->dphy_reg);
+ DRM_DEBUG_KMS("Video mode format %s\n",
+ intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ?
+ "non-burst with sync pulse" :
+ intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS ?
+ "non-burst with sync events" :
+ intel_dsi->video_mode_format == VIDEO_MODE_BURST ?
+ "burst" : "<unknown>");
+ DRM_DEBUG_KMS("Burst mode ratio %d\n", intel_dsi->burst_mode_ratio);
+ DRM_DEBUG_KMS("Reset timer %d\n", intel_dsi->rst_timer_val);
DRM_DEBUG_KMS("Eot %s\n", enableddisabled(intel_dsi->eotp_pkt));
DRM_DEBUG_KMS("Clockstop %s\n", enableddisabled(!intel_dsi->clock_stop));
DRM_DEBUG_KMS("Mode %s\n", intel_dsi->operation_mode ? "command" : "video");
@@ -832,20 +789,10 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
intel_dsi->panel_off_delay = pps->panel_off_delay / 10;
intel_dsi->panel_pwr_cycle_delay = pps->panel_power_cycle_delay / 10;
- /* This is cheating a bit with the cleanup. */
- vbt_panel = devm_kzalloc(dev->dev, sizeof(*vbt_panel), GFP_KERNEL);
- if (!vbt_panel)
- return NULL;
-
- vbt_panel->intel_dsi = intel_dsi;
- drm_panel_init(&vbt_panel->panel);
- vbt_panel->panel.funcs = &vbt_panel_funcs;
- drm_panel_add(&vbt_panel->panel);
-
/* a regular driver would get the device in probe */
for_each_dsi_port(port, intel_dsi->ports) {
mipi_dsi_attach(intel_dsi->dsi_hosts[port]->device);
}
- return &vbt_panel->panel;
+ return true;
}
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 50da89dcb92b..6025839ed3b7 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -515,6 +515,7 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
"DVO %c", port_name(port));
intel_encoder->type = INTEL_OUTPUT_DVO;
+ intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
intel_encoder->port = port;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 371acf109e34..4200faa520c7 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -28,8 +28,8 @@
static const struct engine_info {
const char *name;
- unsigned exec_id;
- enum intel_engine_hw_id hw_id;
+ unsigned int exec_id;
+ unsigned int hw_id;
u32 mmio_base;
unsigned irq_shift;
int (*init_legacy)(struct intel_engine_cs *engine);
@@ -105,26 +105,27 @@ intel_engine_setup(struct drm_i915_private *dev_priv,
/* Nothing to do here, execute in order of dependencies */
engine->schedule = NULL;
+ ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier);
+
dev_priv->engine[id] = engine;
return 0;
}
/**
- * intel_engines_init() - allocate, populate and init the Engine Command Streamers
+ * intel_engines_init_early() - allocate the Engine Command Streamers
* @dev_priv: i915 device private
*
* Return: non-zero if the initialization failed.
*/
-int intel_engines_init(struct drm_i915_private *dev_priv)
+int intel_engines_init_early(struct drm_i915_private *dev_priv)
{
struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
unsigned int ring_mask = INTEL_INFO(dev_priv)->ring_mask;
unsigned int mask = 0;
- int (*init)(struct intel_engine_cs *engine);
struct intel_engine_cs *engine;
enum intel_engine_id id;
unsigned int i;
- int ret;
+ int err;
WARN_ON(ring_mask == 0);
WARN_ON(ring_mask &
@@ -134,20 +135,8 @@ int intel_engines_init(struct drm_i915_private *dev_priv)
if (!HAS_ENGINE(dev_priv, i))
continue;
- if (i915.enable_execlists)
- init = intel_engines[i].init_execlists;
- else
- init = intel_engines[i].init_legacy;
-
- if (!init)
- continue;
-
- ret = intel_engine_setup(dev_priv, i);
- if (ret)
- goto cleanup;
-
- ret = init(dev_priv->engine[i]);
- if (ret)
+ err = intel_engine_setup(dev_priv, i);
+ if (err)
goto cleanup;
mask |= ENGINE_MASK(i);
@@ -166,14 +155,68 @@ int intel_engines_init(struct drm_i915_private *dev_priv)
return 0;
cleanup:
+ for_each_engine(engine, dev_priv, id)
+ kfree(engine);
+ return err;
+}
+
+/**
+ * intel_engines_init() - allocate, populate and init the Engine Command Streamers
+ * @dev_priv: i915 device private
+ *
+ * Return: non-zero if the initialization failed.
+ */
+int intel_engines_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_device_info *device_info = mkwrite_device_info(dev_priv);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id, err_id;
+ unsigned int mask = 0;
+ int err = 0;
+
for_each_engine(engine, dev_priv, id) {
+ int (*init)(struct intel_engine_cs *engine);
+
if (i915.enable_execlists)
- intel_logical_ring_cleanup(engine);
+ init = intel_engines[id].init_execlists;
else
- intel_engine_cleanup(engine);
+ init = intel_engines[id].init_legacy;
+ if (!init) {
+ kfree(engine);
+ dev_priv->engine[id] = NULL;
+ continue;
+ }
+
+ err = init(engine);
+ if (err) {
+ err_id = id;
+ goto cleanup;
+ }
+
+ GEM_BUG_ON(!engine->submit_request);
+ mask |= ENGINE_MASK(id);
}
- return ret;
+ /*
+ * Catch failures to update intel_engines table when the new engines
+ * are added to the driver by a warning and disabling the forgotten
+ * engines.
+ */
+ if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask))
+ device_info->ring_mask = mask;
+
+ device_info->num_rings = hweight32(mask);
+
+ return 0;
+
+cleanup:
+ for_each_engine(engine, dev_priv, id) {
+ if (id >= err_id)
+ kfree(engine);
+ else
+ dev_priv->gt.cleanup_engine(engine);
+ }
+ return err;
}
void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
@@ -208,12 +251,9 @@ void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno)
}
intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
- if (engine->irq_seqno_barrier)
- engine->irq_seqno_barrier(engine);
+ clear_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted);
GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request));
- engine->timeline->last_submitted_seqno = seqno;
-
engine->hangcheck.seqno = seqno;
/* After manually advancing the seqno, fake the interrupt in case
@@ -304,6 +344,8 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
{
int ret;
+ engine->set_default_submission(engine);
+
/* We may need to do things with the shrinker which
* require us to immediately switch back to the default
* context. This can cause a problem as pinning the
@@ -482,3 +524,610 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine,
break;
}
}
+
+static int wa_add(struct drm_i915_private *dev_priv,
+ i915_reg_t addr,
+ const u32 mask, const u32 val)
+{
+ const u32 idx = dev_priv->workarounds.count;
+
+ if (WARN_ON(idx >= I915_MAX_WA_REGS))
+ return -ENOSPC;
+
+ dev_priv->workarounds.reg[idx].addr = addr;
+ dev_priv->workarounds.reg[idx].value = val;
+ dev_priv->workarounds.reg[idx].mask = mask;
+
+ dev_priv->workarounds.count++;
+
+ return 0;
+}
+
+#define WA_REG(addr, mask, val) do { \
+ const int r = wa_add(dev_priv, (addr), (mask), (val)); \
+ if (r) \
+ return r; \
+ } while (0)
+
+#define WA_SET_BIT_MASKED(addr, mask) \
+ WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
+
+#define WA_CLR_BIT_MASKED(addr, mask) \
+ WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
+
+#define WA_SET_FIELD_MASKED(addr, mask, value) \
+ WA_REG(addr, mask, _MASKED_FIELD(mask, value))
+
+#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
+#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
+
+#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
+
+static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
+ i915_reg_t reg)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ struct i915_workarounds *wa = &dev_priv->workarounds;
+ const uint32_t index = wa->hw_whitelist_count[engine->id];
+
+ if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
+ return -EINVAL;
+
+ WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
+ i915_mmio_reg_offset(reg));
+ wa->hw_whitelist_count[engine->id]++;
+
+ return 0;
+}
+
+static int gen8_init_workarounds(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+
+ WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
+
+ /* WaDisableAsyncFlipPerfMode:bdw,chv */
+ WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
+
+ /* WaDisablePartialInstShootdown:bdw,chv */
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+ PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
+
+ /* Use Force Non-Coherent whenever executing a 3D context. This is a
+ * workaround for for a possible hang in the unlikely event a TLB
+ * invalidation occurs during a PSD flush.
+ */
+ /* WaForceEnableNonCoherent:bdw,chv */
+ /* WaHdcDisableFetchWhenMasked:bdw,chv */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_DONOT_FETCH_MEM_WHEN_MASKED |
+ HDC_FORCE_NON_COHERENT);
+
+ /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
+ * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
+ * polygons in the same 8x4 pixel/sample area to be processed without
+ * stalling waiting for the earlier ones to write to Hierarchical Z
+ * buffer."
+ *
+ * This optimization is off by default for BDW and CHV; turn it on.
+ */
+ WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
+
+ /* Wa4x4STCOptimizationDisable:bdw,chv */
+ WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
+
+ /*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+ GEN6_WIZ_HASHING_MASK,
+ GEN6_WIZ_HASHING_16x4);
+
+ return 0;
+}
+
+static int bdw_init_workarounds(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ int ret;
+
+ ret = gen8_init_workarounds(engine);
+ if (ret)
+ return ret;
+
+ /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
+
+ /* WaDisableDopClockGating:bdw
+ *
+ * Also see the related UCGTCL1 write in broadwell_init_clock_gating()
+ * to disable EUTC clock gating.
+ */
+ WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
+ DOP_CLOCK_GATING_DISABLE);
+
+ WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
+ GEN8_SAMPLER_POWER_BYPASS_DIS);
+
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ /* WaForceContextSaveRestoreNonCoherent:bdw */
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+ /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
+ (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
+
+ return 0;
+}
+
+static int chv_init_workarounds(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ int ret;
+
+ ret = gen8_init_workarounds(engine);
+ if (ret)
+ return ret;
+
+ /* WaDisableThreadStallDopClockGating:chv */
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
+
+ /* Improve HiZ throughput on CHV. */
+ WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
+
+ return 0;
+}
+
+static int gen9_init_workarounds(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ int ret;
+
+ /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk */
+ I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
+
+ /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk */
+ I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
+ GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
+
+ /* WaDisableKillLogic:bxt,skl,kbl */
+ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+ ECOCHK_DIS_TLB);
+
+ /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk */
+ /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk */
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+ FLOW_CONTROL_ENABLE |
+ PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
+
+ /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
+ WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
+ GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
+
+ /* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
+ WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
+ GEN9_DG_MIRROR_FIX_ENABLE);
+
+ /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
+ WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
+ GEN9_RHWO_OPTIMIZATION_DISABLE);
+ /*
+ * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
+ * but we do that in per ctx batchbuffer as there is an issue
+ * with this register not getting restored on ctx restore
+ */
+ }
+
+ /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
+ WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
+ GEN9_ENABLE_GPGPU_PREEMPTION);
+
+ /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk */
+ /* WaDisablePartialResolveInVc:skl,bxt,kbl */
+ WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
+ GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
+
+ /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk */
+ WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
+ GEN9_CCS_TLB_PREFETCH_ENABLE);
+
+ /* WaDisableMaskBasedCammingInRCC:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
+ WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
+ PIXEL_MASK_CAMMING_DISABLE);
+
+ /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
+ HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
+
+ /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
+ * both tied to WaForceContextSaveRestoreNonCoherent
+ * in some hsds for skl. We keep the tie for all gen9. The
+ * documentation is a bit hazy and so we want to get common behaviour,
+ * even though there is no clear evidence we would need both on kbl/bxt.
+ * This area has been source of system hangs so we play it safe
+ * and mimic the skl regardless of what bspec says.
+ *
+ * Use Force Non-Coherent whenever executing a 3D context. This
+ * is a workaround for a possible hang in the unlikely event
+ * a TLB invalidation occurs during a PSD flush.
+ */
+
+ /* WaForceEnableNonCoherent:skl,bxt,kbl */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FORCE_NON_COHERENT);
+
+ /* WaDisableHDCInvalidation:skl,bxt,kbl */
+ I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
+ BDW_DISABLE_HDC_INVALIDATION);
+
+ /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
+ if (IS_SKYLAKE(dev_priv) ||
+ IS_KABYLAKE(dev_priv) ||
+ IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
+ WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
+ GEN8_SAMPLER_POWER_BYPASS_DIS);
+
+ /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk */
+ WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
+
+ /* WaOCLCoherentLineFlush:skl,bxt,kbl */
+ I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
+ GEN8_LQSC_FLUSH_COHERENT_LINES));
+
+ /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk */
+ ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
+ if (ret)
+ return ret;
+
+ /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */
+ ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
+ if (ret)
+ return ret;
+
+ /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk */
+ ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ u8 vals[3] = { 0, 0, 0 };
+ unsigned int i;
+
+ for (i = 0; i < 3; i++) {
+ u8 ss;
+
+ /*
+ * Only consider slices where one, and only one, subslice has 7
+ * EUs
+ */
+ if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
+ continue;
+
+ /*
+ * subslice_7eu[i] != 0 (because of the check above) and
+ * ss_max == 4 (maximum number of subslices possible per slice)
+ *
+ * -> 0 <= ss <= 3;
+ */
+ ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
+ vals[i] = 3 - ss;
+ }
+
+ if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
+ return 0;
+
+ /* Tune IZ hashing. See intel_device_info_runtime_init() */
+ WA_SET_FIELD_MASKED(GEN7_GT_MODE,
+ GEN9_IZ_HASHING_MASK(2) |
+ GEN9_IZ_HASHING_MASK(1) |
+ GEN9_IZ_HASHING_MASK(0),
+ GEN9_IZ_HASHING(2, vals[2]) |
+ GEN9_IZ_HASHING(1, vals[1]) |
+ GEN9_IZ_HASHING(0, vals[0]));
+
+ return 0;
+}
+
+static int skl_init_workarounds(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ int ret;
+
+ ret = gen9_init_workarounds(engine);
+ if (ret)
+ return ret;
+
+ /*
+ * Actual WA is to disable percontext preemption granularity control
+ * until D0 which is the default case so this is equivalent to
+ * !WaDisablePerCtxtPreemptionGranularityControl:skl
+ */
+ I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
+ _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
+
+ /* WaEnableGapsTsvCreditFix:skl */
+ I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
+ GEN9_GAPS_TSV_CREDIT_DISABLE));
+
+ /* WaDisableGafsUnitClkGating:skl */
+ WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaInPlaceDecompressionHang:skl */
+ if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
+ WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
+ /* WaDisableLSQCROPERFforOCL:skl */
+ ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
+ if (ret)
+ return ret;
+
+ return skl_tune_iz_hashing(engine);
+}
+
+static int bxt_init_workarounds(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ int ret;
+
+ ret = gen9_init_workarounds(engine);
+ if (ret)
+ return ret;
+
+ /* WaStoreMultiplePTEenable:bxt */
+ /* This is a requirement according to Hardware specification */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
+ I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
+
+ /* WaSetClckGatingDisableMedia:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
+ I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
+ ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
+ }
+
+ /* WaDisableThreadStallDopClockGating:bxt */
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+ STALL_DOP_GATING_DISABLE);
+
+ /* WaDisablePooledEuLoadBalancingFix:bxt */
+ if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
+ WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2,
+ GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
+ }
+
+ /* WaDisableSbeCacheDispatchPortSharing:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
+ WA_SET_BIT_MASKED(
+ GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+ }
+
+ /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
+ /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
+ /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
+ /* WaDisableLSQCROPERFforOCL:bxt */
+ if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
+ ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
+ if (ret)
+ return ret;
+
+ ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
+ if (ret)
+ return ret;
+ }
+
+ /* WaProgramL3SqcReg1DefaultForPerf:bxt */
+ if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
+ I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
+ L3_HIGH_PRIO_CREDITS(2));
+
+ /* WaToEnableHwFixForPushConstHWBug:bxt */
+ if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+ /* WaInPlaceDecompressionHang:bxt */
+ if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
+ WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
+ return 0;
+}
+
+static int kbl_init_workarounds(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ int ret;
+
+ ret = gen9_init_workarounds(engine);
+ if (ret)
+ return ret;
+
+ /* WaEnableGapsTsvCreditFix:kbl */
+ I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
+ GEN9_GAPS_TSV_CREDIT_DISABLE));
+
+ /* WaDisableDynamicCreditSharing:kbl */
+ if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
+ WA_SET_BIT(GAMT_CHKN_BIT_REG,
+ GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
+
+ /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
+ if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FENCE_DEST_SLM_DISABLE);
+
+ /* WaToEnableHwFixForPushConstHWBug:kbl */
+ if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+ /* WaDisableGafsUnitClkGating:kbl */
+ WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
+
+ /* WaDisableSbeCacheDispatchPortSharing:kbl */
+ WA_SET_BIT_MASKED(
+ GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+
+ /* WaInPlaceDecompressionHang:kbl */
+ WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
+ GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
+
+ /* WaDisableLSQCROPERFforOCL:kbl */
+ ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int glk_init_workarounds(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ int ret;
+
+ ret = gen9_init_workarounds(engine);
+ if (ret)
+ return ret;
+
+ /* WaToEnableHwFixForPushConstHWBug:glk */
+ WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
+ GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
+
+ return 0;
+}
+
+int init_workarounds_ring(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+ int err;
+
+ WARN_ON(engine->id != RCS);
+
+ dev_priv->workarounds.count = 0;
+ dev_priv->workarounds.hw_whitelist_count[engine->id] = 0;
+
+ if (IS_BROADWELL(dev_priv))
+ err = bdw_init_workarounds(engine);
+ else if (IS_CHERRYVIEW(dev_priv))
+ err = chv_init_workarounds(engine);
+ else if (IS_SKYLAKE(dev_priv))
+ err = skl_init_workarounds(engine);
+ else if (IS_BROXTON(dev_priv))
+ err = bxt_init_workarounds(engine);
+ else if (IS_KABYLAKE(dev_priv))
+ err = kbl_init_workarounds(engine);
+ else if (IS_GEMINILAKE(dev_priv))
+ err = glk_init_workarounds(engine);
+ else
+ err = 0;
+ if (err)
+ return err;
+
+ DRM_DEBUG_DRIVER("%s: Number of context specific w/a: %d\n",
+ engine->name, dev_priv->workarounds.count);
+ return 0;
+}
+
+int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
+{
+ struct i915_workarounds *w = &req->i915->workarounds;
+ u32 *cs;
+ int ret, i;
+
+ if (w->count == 0)
+ return 0;
+
+ ret = req->engine->emit_flush(req, EMIT_BARRIER);
+ if (ret)
+ return ret;
+
+ cs = intel_ring_begin(req, (w->count * 2 + 2));
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = MI_LOAD_REGISTER_IMM(w->count);
+ for (i = 0; i < w->count; i++) {
+ *cs++ = i915_mmio_reg_offset(w->reg[i].addr);
+ *cs++ = w->reg[i].value;
+ }
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(req, cs);
+
+ ret = req->engine->emit_flush(req, EMIT_BARRIER);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * intel_engine_is_idle() - Report if the engine has finished process all work
+ * @engine: the intel_engine_cs
+ *
+ * Return true if there are no requests pending, nothing left to be submitted
+ * to hardware, and that the engine is idle.
+ */
+bool intel_engine_is_idle(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *dev_priv = engine->i915;
+
+ /* Any inflight/incomplete requests? */
+ if (!i915_seqno_passed(intel_engine_get_seqno(engine),
+ intel_engine_last_submit(engine)))
+ return false;
+
+ /* Interrupt/tasklet pending? */
+ if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted))
+ return false;
+
+ /* Both ports drained, no more ELSP submission? */
+ if (engine->execlist_port[0].request)
+ return false;
+
+ /* Ring stopped? */
+ if (INTEL_GEN(dev_priv) > 2 && !(I915_READ_MODE(engine) & MODE_IDLE))
+ return false;
+
+ return true;
+}
+
+bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, dev_priv, id) {
+ if (!intel_engine_is_idle(engine))
+ return false;
+ }
+
+ return true;
+}
+
+void intel_engines_reset_default_submission(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, i915, id)
+ engine->set_default_submission(engine);
+}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/mock_engine.c"
+#endif
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 89fe5c8464df..ded2add18b26 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -537,8 +537,7 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
* reserved range size, so it always assumes the maximum (8mb) is used.
* If we enable FBC using a CFB on that memory range we'll get FIFO
* underruns, even if that range is not reserved by the BIOS. */
- if (IS_BROADWELL(dev_priv) ||
- IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ if (IS_BROADWELL(dev_priv) || IS_GEN9_BC(dev_priv))
end = ggtt->stolen_size - 8 * 1024 * 1024;
else
end = U64_MAX;
@@ -628,7 +627,8 @@ err_fb:
kfree(compressed_llb);
i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
err_llb:
- pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
+ if (drm_mm_initialized(&dev_priv->mm.stolen))
+ pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
}
@@ -743,8 +743,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- cache->crtc.hsw_bdw_pixel_rate =
- ilk_pipe_pixel_rate(crtc_state);
+ cache->crtc.hsw_bdw_pixel_rate = crtc_state->pixel_rate;
cache->plane.rotation = plane_state->base.rotation;
cache->plane.src_w = drm_rect_width(&plane_state->base.src) >> 16;
@@ -819,7 +818,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
/* WaFbcExceedCdClockThreshold:hsw,bdw */
if ((IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) &&
- cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk_freq * 95 / 100) {
+ cache->crtc.hsw_bdw_pixel_rate >= dev_priv->cdclk.hw.cdclk * 95 / 100) {
fbc->no_fbc_reason = "pixel rate is too big";
return false;
}
@@ -1062,7 +1061,7 @@ void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
* plane. We could go for fancier schemes such as checking the plane
* size, but this would just affect the few platforms that don't tie FBC
* to pipe or plane A. */
- for_each_plane_in_state(state, plane, plane_state, i) {
+ for_each_new_plane_in_state(state, plane, plane_state, i) {
struct intel_plane_state *intel_plane_state =
to_intel_plane_state(plane_state);
struct intel_crtc_state *intel_crtc_state;
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 281c5c48a84d..332254a8eebe 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -45,6 +45,14 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
+static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev)
+{
+ struct drm_i915_gem_object *obj = ifbdev->fb->obj;
+ unsigned int origin = ifbdev->vma->fence ? ORIGIN_GTT : ORIGIN_CPU;
+
+ intel_fb_obj_invalidate(obj, origin);
+}
+
static int intel_fbdev_set_par(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
@@ -53,12 +61,8 @@ static int intel_fbdev_set_par(struct fb_info *info)
int ret;
ret = drm_fb_helper_set_par(info);
-
- if (ret == 0) {
- mutex_lock(&fb_helper->dev->struct_mutex);
- intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
- mutex_unlock(&fb_helper->dev->struct_mutex);
- }
+ if (ret == 0)
+ intel_fbdev_invalidate(ifbdev);
return ret;
}
@@ -71,12 +75,8 @@ static int intel_fbdev_blank(int blank, struct fb_info *info)
int ret;
ret = drm_fb_helper_blank(blank, info);
-
- if (ret == 0) {
- mutex_lock(&fb_helper->dev->struct_mutex);
- intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
- mutex_unlock(&fb_helper->dev->struct_mutex);
- }
+ if (ret == 0)
+ intel_fbdev_invalidate(ifbdev);
return ret;
}
@@ -87,15 +87,11 @@ static int intel_fbdev_pan_display(struct fb_var_screeninfo *var,
struct drm_fb_helper *fb_helper = info->par;
struct intel_fbdev *ifbdev =
container_of(fb_helper, struct intel_fbdev, helper);
-
int ret;
- ret = drm_fb_helper_pan_display(var, info);
- if (ret == 0) {
- mutex_lock(&fb_helper->dev->struct_mutex);
- intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
- mutex_unlock(&fb_helper->dev->struct_mutex);
- }
+ ret = drm_fb_helper_pan_display(var, info);
+ if (ret == 0)
+ intel_fbdev_invalidate(ifbdev);
return ret;
}
@@ -121,7 +117,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_mode_fb_cmd2 mode_cmd = {};
- struct drm_i915_gem_object *obj = NULL;
+ struct drm_i915_gem_object *obj;
int size, ret;
/* we don't do packed 24bpp */
@@ -136,14 +132,13 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
- mutex_lock(&dev->struct_mutex);
-
size = mode_cmd.pitches[0] * mode_cmd.height;
size = PAGE_ALIGN(size);
/* If the FB is too big, just don't use it since fbdev is not very
* important and we should probably use that space with FBC or other
* features. */
+ obj = NULL;
if (size * 2 < ggtt->stolen_usable_size)
obj = i915_gem_object_create_stolen(dev_priv, size);
if (obj == NULL)
@@ -151,24 +146,22 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
if (IS_ERR(obj)) {
DRM_ERROR("failed to allocate framebuffer\n");
ret = PTR_ERR(obj);
- goto out;
+ goto err;
}
- fb = __intel_framebuffer_create(dev, &mode_cmd, obj);
+ fb = intel_framebuffer_create(obj, &mode_cmd);
if (IS_ERR(fb)) {
- i915_gem_object_put(obj);
ret = PTR_ERR(fb);
- goto out;
+ goto err_obj;
}
- mutex_unlock(&dev->struct_mutex);
-
ifbdev->fb = to_intel_framebuffer(fb);
return 0;
-out:
- mutex_unlock(&dev->struct_mutex);
+err_obj:
+ i915_gem_object_put(obj);
+err:
return ret;
}
@@ -355,23 +348,23 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
bool *enabled, int width, int height)
{
struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
- unsigned long conn_configured, mask;
+ unsigned long conn_configured, conn_seq, mask;
unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
int i, j;
bool *save_enabled;
bool fallback = true;
int num_connectors_enabled = 0;
int num_connectors_detected = 0;
- int pass = 0;
save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
if (!save_enabled)
return false;
memcpy(save_enabled, enabled, count);
- mask = BIT(count) - 1;
+ mask = GENMASK(count - 1, 0);
conn_configured = 0;
retry:
+ conn_seq = conn_configured;
for (i = 0; i < count; i++) {
struct drm_fb_helper_connector *fb_conn;
struct drm_connector *connector;
@@ -385,7 +378,7 @@ retry:
if (conn_configured & BIT(i))
continue;
- if (pass == 0 && !connector->has_tile)
+ if (conn_seq == 0 && !connector->has_tile)
continue;
if (connector->status == connector_status_connected)
@@ -496,10 +489,8 @@ retry:
conn_configured |= BIT(i);
}
- if ((conn_configured & mask) != mask) {
- pass++;
+ if ((conn_configured & mask) != mask && conn_configured != conn_seq)
goto retry;
- }
/*
* If the BIOS didn't enable everything it could, fall back to have the
@@ -628,9 +619,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
}
cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay;
- cur_size = intel_fb_align_height(dev, cur_size,
- fb->base.format->format,
- fb->base.modifier);
+ cur_size = intel_fb_align_height(&fb->base, 0, cur_size);
cur_size *= fb->base.pitches[0];
DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n",
pipe_name(intel_crtc->pipe),
@@ -838,11 +827,6 @@ void intel_fbdev_restore_mode(struct drm_device *dev)
if (!ifbdev->fb)
return;
- if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper)) {
- DRM_DEBUG("failed to restore crtc mode\n");
- } else {
- mutex_lock(&dev->struct_mutex);
- intel_fb_obj_invalidate(ifbdev->fb->obj, ORIGIN_GTT);
- mutex_unlock(&dev->struct_mutex);
- }
+ if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper) == 0)
+ intel_fbdev_invalidate(ifbdev);
}
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index e660d8b4bbc3..966e255ca053 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -54,7 +54,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev)
struct intel_crtc *crtc;
enum pipe pipe;
- assert_spin_locked(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq_lock);
for_each_pipe(dev_priv, pipe) {
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
@@ -72,7 +72,7 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
enum pipe pipe;
struct intel_crtc *crtc;
- assert_spin_locked(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq_lock);
for_each_pipe(dev_priv, pipe) {
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
@@ -90,7 +90,7 @@ static void i9xx_check_fifo_underruns(struct intel_crtc *crtc)
i915_reg_t reg = PIPESTAT(crtc->pipe);
u32 pipestat = I915_READ(reg) & 0xffff0000;
- assert_spin_locked(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq_lock);
if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
return;
@@ -98,6 +98,7 @@ static void i9xx_check_fifo_underruns(struct intel_crtc *crtc)
I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
POSTING_READ(reg);
+ trace_intel_cpu_fifo_underrun(dev_priv, crtc->pipe);
DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
}
@@ -109,7 +110,7 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
i915_reg_t reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & 0xffff0000;
- assert_spin_locked(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq_lock);
if (enable) {
I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
@@ -139,7 +140,7 @@ static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc)
enum pipe pipe = crtc->pipe;
uint32_t err_int = I915_READ(GEN7_ERR_INT);
- assert_spin_locked(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq_lock);
if ((err_int & ERR_INT_FIFO_UNDERRUN(pipe)) == 0)
return;
@@ -147,6 +148,7 @@ static void ivybridge_check_fifo_underruns(struct intel_crtc *crtc)
I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
POSTING_READ(GEN7_ERR_INT);
+ trace_intel_cpu_fifo_underrun(dev_priv, pipe);
DRM_ERROR("fifo underrun on pipe %c\n", pipe_name(pipe));
}
@@ -204,7 +206,7 @@ static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
enum transcoder pch_transcoder = (enum transcoder) crtc->pipe;
uint32_t serr_int = I915_READ(SERR_INT);
- assert_spin_locked(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq_lock);
if ((serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) == 0)
return;
@@ -212,6 +214,7 @@ static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
I915_WRITE(SERR_INT, SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
POSTING_READ(SERR_INT);
+ trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder);
DRM_ERROR("pch fifo underrun on pch transcoder %s\n",
transcoder_name(pch_transcoder));
}
@@ -248,7 +251,7 @@ static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
bool old;
- assert_spin_locked(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq_lock);
old = !crtc->cpu_fifo_underrun_disabled;
crtc->cpu_fifo_underrun_disabled = !enable;
@@ -368,9 +371,11 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
crtc->cpu_fifo_underrun_disabled)
return;
- if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false))
+ if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) {
+ trace_intel_cpu_fifo_underrun(dev_priv, pipe);
DRM_ERROR("CPU pipe %c FIFO underrun\n",
pipe_name(pipe));
+ }
intel_fbc_handle_fifo_underrun_irq(dev_priv);
}
@@ -388,9 +393,11 @@ void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
enum transcoder pch_transcoder)
{
if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
- false))
+ false)) {
+ trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder);
DRM_ERROR("PCH transcoder %s FIFO underrun\n",
transcoder_name(pch_transcoder));
+ }
}
/**
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index 966de4c7c7a2..fcfc217e754e 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -114,13 +114,12 @@ static void intel_frontbuffer_flush(struct drm_i915_private *dev_priv,
}
void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
- bool retire,
enum fb_op_origin origin,
unsigned int frontbuffer_bits)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- if (retire) {
+ if (origin == ORIGIN_CS) {
spin_lock(&dev_priv->fb_tracking.lock);
/* Filter out new bits since rendering started. */
frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.h b/drivers/gpu/drm/i915/intel_frontbuffer.h
index 7bab41218cf7..63cd9a753a72 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.h
@@ -38,7 +38,6 @@ void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
enum fb_op_origin origin,
unsigned int frontbuffer_bits);
void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
- bool retire,
enum fb_op_origin origin,
unsigned int frontbuffer_bits);
@@ -69,15 +68,12 @@ static inline bool intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
/**
* intel_fb_obj_flush - flush frontbuffer object
* @obj: GEM object to flush
- * @retire: set when retiring asynchronous rendering
* @origin: which operation caused the flush
*
* This function gets called every time rendering on the given object has
- * completed and frontbuffer caching can be started again. If @retire is true
- * then any delayed flushes will be unblocked.
+ * completed and frontbuffer caching can be started again.
*/
static inline void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
- bool retire,
enum fb_op_origin origin)
{
unsigned int frontbuffer_bits;
@@ -86,7 +82,7 @@ static inline void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
if (!frontbuffer_bits)
return;
- __intel_fb_obj_flush(obj, retire, origin, frontbuffer_bits);
+ __intel_fb_obj_flush(obj, origin, frontbuffer_bits);
}
#endif /* __INTEL_FRONTBUFFER_H__ */
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 2f1cf9aea04e..2f270d02894c 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -26,7 +26,6 @@
* Dave Gordon <david.s.gordon@intel.com>
* Alex Dai <yu.dai@intel.com>
*/
-#include <linux/firmware.h>
#include "i915_drv.h"
#include "intel_uc.h"
@@ -91,70 +90,6 @@ const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
}
};
-static void guc_interrupts_release(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int irqs;
-
- /* tell all command streamers NOT to forward interrupts or vblank to GuC */
- irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
- irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MODE_GEN7(engine), irqs);
-
- /* route all GT interrupts to the host */
- I915_WRITE(GUC_BCS_RCS_IER, 0);
- I915_WRITE(GUC_VCS2_VCS1_IER, 0);
- I915_WRITE(GUC_WD_VECS_IER, 0);
-}
-
-static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
-{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int irqs;
- u32 tmp;
-
- /* tell all command streamers to forward interrupts (but not vblank) to GuC */
- irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
- for_each_engine(engine, dev_priv, id)
- I915_WRITE(RING_MODE_GEN7(engine), irqs);
-
- /* route USER_INTERRUPT to Host, all others are sent to GuC. */
- irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
- GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
- /* These three registers have the same bit definitions */
- I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
- I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
- I915_WRITE(GUC_WD_VECS_IER, ~irqs);
-
- /*
- * The REDIRECT_TO_GUC bit of the PMINTRMSK register directs all
- * (unmasked) PM interrupts to the GuC. All other bits of this
- * register *disable* generation of a specific interrupt.
- *
- * 'pm_intr_keep' indicates bits that are NOT to be set when
- * writing to the PM interrupt mask register, i.e. interrupts
- * that must not be disabled.
- *
- * If the GuC is handling these interrupts, then we must not let
- * the PM code disable ANY interrupt that the GuC is expecting.
- * So for each ENABLED (0) bit in this register, we must SET the
- * bit in pm_intr_keep so that it's left enabled for the GuC.
- *
- * OTOH the REDIRECT_TO_GUC bit is initially SET in pm_intr_keep
- * (so interrupts go to the DISPLAY unit at first); but here we
- * need to CLEAR that bit, which will result in the register bit
- * being left SET!
- */
- tmp = I915_READ(GEN6_PMINTRMSK);
- if (tmp & GEN8_PMINTR_REDIRECT_TO_GUC) {
- dev_priv->rps.pm_intr_keep |= ~tmp;
- dev_priv->rps.pm_intr_keep &= ~GEN8_PMINTR_REDIRECT_TO_GUC;
- }
-}
-
static u32 get_gttype(struct drm_i915_private *dev_priv)
{
/* XXX: GT type based on PCI device ID? field seems unused by fw */
@@ -409,380 +344,91 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
return ret;
}
-static int guc_hw_reset(struct drm_i915_private *dev_priv)
-{
- int ret;
- u32 guc_status;
-
- ret = intel_guc_reset(dev_priv);
- if (ret) {
- DRM_ERROR("GuC reset failed, ret = %d\n", ret);
- return ret;
- }
-
- guc_status = I915_READ(GUC_STATUS);
- WARN(!(guc_status & GS_MIA_IN_RESET),
- "GuC status: 0x%x, MIA core expected to be in reset\n", guc_status);
-
- return ret;
-}
-
/**
- * intel_guc_setup() - finish preparing the GuC for activity
- * @dev_priv: i915 device private
+ * intel_guc_init_hw() - finish preparing the GuC for activity
+ * @guc: intel_guc structure
*
- * Called from gem_init_hw() during driver loading and also after a GPU reset.
+ * Called during driver loading and also after a GPU reset.
*
* The main action required here it to load the GuC uCode into the device.
* The firmware image should have already been fetched into memory by the
- * earlier call to intel_guc_init(), so here we need only check that worked,
- * and then transfer the image to the h/w.
+ * earlier call to intel_guc_init(), so here we need only check that
+ * worked, and then transfer the image to the h/w.
*
* Return: non-zero code on error
*/
-int intel_guc_setup(struct drm_i915_private *dev_priv)
+int intel_guc_init_hw(struct intel_guc *guc)
{
- struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
- const char *fw_path = guc_fw->path;
- int retries, ret, err;
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
+ const char *fw_path = guc->fw.path;
+ int ret;
DRM_DEBUG_DRIVER("GuC fw status: path %s, fetch %s, load %s\n",
fw_path,
- intel_uc_fw_status_repr(guc_fw->fetch_status),
- intel_uc_fw_status_repr(guc_fw->load_status));
-
- /* Loading forbidden, or no firmware to load? */
- if (!i915.enable_guc_loading) {
- err = 0;
- goto fail;
- } else if (fw_path == NULL) {
- /* Device is known to have no uCode (e.g. no GuC) */
- err = -ENXIO;
- goto fail;
- } else if (*fw_path == '\0') {
- /* Device has a GuC but we don't know what f/w to load? */
- WARN(1, "No GuC firmware known for this platform!\n");
- err = -ENODEV;
- goto fail;
- }
+ intel_uc_fw_status_repr(guc->fw.fetch_status),
+ intel_uc_fw_status_repr(guc->fw.load_status));
- /* Fetch failed, or already fetched but failed to load? */
- if (guc_fw->fetch_status != INTEL_UC_FIRMWARE_SUCCESS) {
- err = -EIO;
- goto fail;
- } else if (guc_fw->load_status == INTEL_UC_FIRMWARE_FAIL) {
- err = -ENOEXEC;
- goto fail;
- }
+ if (guc->fw.fetch_status != INTEL_UC_FIRMWARE_SUCCESS)
+ return -EIO;
- guc_interrupts_release(dev_priv);
- gen9_reset_guc_interrupts(dev_priv);
-
- /* We need to notify the guc whenever we change the GGTT */
- i915_ggtt_enable_guc(dev_priv);
-
- guc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
+ guc->fw.load_status = INTEL_UC_FIRMWARE_PENDING;
DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
- intel_uc_fw_status_repr(guc_fw->fetch_status),
- intel_uc_fw_status_repr(guc_fw->load_status));
+ intel_uc_fw_status_repr(guc->fw.fetch_status),
+ intel_uc_fw_status_repr(guc->fw.load_status));
- err = i915_guc_submission_init(dev_priv);
- if (err)
- goto fail;
+ ret = guc_ucode_xfer(dev_priv);
- /*
- * WaEnableuKernelHeaderValidFix:skl,bxt
- * For BXT, this is only upto B0 but below WA is required for later
- * steppings also so this is extended as well.
- */
- /* WaEnableGuCBootHashCheckNotSet:skl,bxt */
- for (retries = 3; ; ) {
- /*
- * Always reset the GuC just before (re)loading, so
- * that the state and timing are fairly predictable
- */
- err = guc_hw_reset(dev_priv);
- if (err)
- goto fail;
-
- intel_huc_load(dev_priv);
- err = guc_ucode_xfer(dev_priv);
- if (!err)
- break;
-
- if (--retries == 0)
- goto fail;
-
- DRM_INFO("GuC fw load failed: %d; will reset and "
- "retry %d more time(s)\n", err, retries);
- }
-
- guc_fw->load_status = INTEL_UC_FIRMWARE_SUCCESS;
+ if (ret)
+ return -EAGAIN;
- DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
- intel_uc_fw_status_repr(guc_fw->fetch_status),
- intel_uc_fw_status_repr(guc_fw->load_status));
+ guc->fw.load_status = INTEL_UC_FIRMWARE_SUCCESS;
- intel_guc_auth_huc(dev_priv);
-
- if (i915.enable_guc_submission) {
- if (i915.guc_log_level >= 0)
- gen9_enable_guc_interrupts(dev_priv);
-
- err = i915_guc_submission_enable(dev_priv);
- if (err)
- goto fail;
- guc_interrupts_capture(dev_priv);
- }
+ DRM_INFO("GuC %s (firmware %s [version %u.%u])\n",
+ i915.enable_guc_submission ? "submission enabled" : "loaded",
+ guc->fw.path,
+ guc->fw.major_ver_found, guc->fw.minor_ver_found);
return 0;
-
-fail:
- if (guc_fw->load_status == INTEL_UC_FIRMWARE_PENDING)
- guc_fw->load_status = INTEL_UC_FIRMWARE_FAIL;
-
- guc_interrupts_release(dev_priv);
- i915_guc_submission_disable(dev_priv);
- i915_guc_submission_fini(dev_priv);
- i915_ggtt_disable_guc(dev_priv);
-
- /*
- * We've failed to load the firmware :(
- *
- * Decide whether to disable GuC submission and fall back to
- * execlist mode, and whether to hide the error by returning
- * zero or to return -EIO, which the caller will treat as a
- * nonfatal error (i.e. it doesn't prevent driver load, but
- * marks the GPU as wedged until reset).
- */
- if (i915.enable_guc_loading > 1) {
- ret = -EIO;
- } else if (i915.enable_guc_submission > 1) {
- ret = -EIO;
- } else {
- ret = 0;
- }
-
- if (err == 0 && !HAS_GUC_UCODE(dev_priv))
- ; /* Don't mention the GuC! */
- else if (err == 0)
- DRM_INFO("GuC firmware load skipped\n");
- else if (ret != -EIO)
- DRM_NOTE("GuC firmware load failed: %d\n", err);
- else
- DRM_WARN("GuC firmware load failed: %d\n", err);
-
- if (i915.enable_guc_submission) {
- if (fw_path == NULL)
- DRM_INFO("GuC submission without firmware not supported\n");
- if (ret == 0)
- DRM_NOTE("Falling back from GuC submission to execlist mode\n");
- else
- DRM_ERROR("GuC init failed: %d\n", ret);
- }
- i915.enable_guc_submission = 0;
-
- return ret;
-}
-
-void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
- struct intel_uc_fw *uc_fw)
-{
- struct pci_dev *pdev = dev_priv->drm.pdev;
- struct drm_i915_gem_object *obj;
- const struct firmware *fw = NULL;
- struct uc_css_header *css;
- size_t size;
- int err;
-
- DRM_DEBUG_DRIVER("before requesting firmware: uC fw fetch status %s\n",
- intel_uc_fw_status_repr(uc_fw->fetch_status));
-
- err = request_firmware(&fw, uc_fw->path, &pdev->dev);
- if (err)
- goto fail;
- if (!fw)
- goto fail;
-
- DRM_DEBUG_DRIVER("fetch uC fw from %s succeeded, fw %p\n",
- uc_fw->path, fw);
-
- /* Check the size of the blob before examining buffer contents */
- if (fw->size < sizeof(struct uc_css_header)) {
- DRM_NOTE("Firmware header is missing\n");
- goto fail;
- }
-
- css = (struct uc_css_header *)fw->data;
-
- /* Firmware bits always start from header */
- uc_fw->header_offset = 0;
- uc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
- css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
-
- if (uc_fw->header_size != sizeof(struct uc_css_header)) {
- DRM_NOTE("CSS header definition mismatch\n");
- goto fail;
- }
-
- /* then, uCode */
- uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size;
- uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
-
- /* now RSA */
- if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
- DRM_NOTE("RSA key size is bad\n");
- goto fail;
- }
- uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size;
- uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
-
- /* At least, it should have header, uCode and RSA. Size of all three. */
- size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size;
- if (fw->size < size) {
- DRM_NOTE("Missing firmware components\n");
- goto fail;
- }
-
- /*
- * The GuC firmware image has the version number embedded at a well-known
- * offset within the firmware blob; note that major / minor version are
- * TWO bytes each (i.e. u16), although all pointers and offsets are defined
- * in terms of bytes (u8).
- */
- switch (uc_fw->fw) {
- case INTEL_UC_FW_TYPE_GUC:
- /* Header and uCode will be loaded to WOPCM. Size of the two. */
- size = uc_fw->header_size + uc_fw->ucode_size;
-
- /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
- if (size > intel_guc_wopcm_size(dev_priv)) {
- DRM_ERROR("Firmware is too large to fit in WOPCM\n");
- goto fail;
- }
- uc_fw->major_ver_found = css->guc.sw_version >> 16;
- uc_fw->minor_ver_found = css->guc.sw_version & 0xFFFF;
- break;
-
- case INTEL_UC_FW_TYPE_HUC:
- uc_fw->major_ver_found = css->huc.sw_version >> 16;
- uc_fw->minor_ver_found = css->huc.sw_version & 0xFFFF;
- break;
-
- default:
- DRM_ERROR("Unknown firmware type %d\n", uc_fw->fw);
- err = -ENOEXEC;
- goto fail;
- }
-
- if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
- uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
- DRM_NOTE("uC firmware version %d.%d, required %d.%d\n",
- uc_fw->major_ver_found, uc_fw->minor_ver_found,
- uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
- err = -ENOEXEC;
- goto fail;
- }
-
- DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
- uc_fw->major_ver_found, uc_fw->minor_ver_found,
- uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
-
- mutex_lock(&dev_priv->drm.struct_mutex);
- obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size);
- mutex_unlock(&dev_priv->drm.struct_mutex);
- if (IS_ERR_OR_NULL(obj)) {
- err = obj ? PTR_ERR(obj) : -ENOMEM;
- goto fail;
- }
-
- uc_fw->obj = obj;
- uc_fw->size = fw->size;
-
- DRM_DEBUG_DRIVER("uC fw fetch status SUCCESS, obj %p\n",
- uc_fw->obj);
-
- release_firmware(fw);
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS;
- return;
-
-fail:
- DRM_WARN("Failed to fetch valid uC firmware from %s (error %d)\n",
- uc_fw->path, err);
- DRM_DEBUG_DRIVER("uC fw fetch status FAIL; err %d, fw %p, obj %p\n",
- err, fw, uc_fw->obj);
-
- mutex_lock(&dev_priv->drm.struct_mutex);
- obj = uc_fw->obj;
- if (obj)
- i915_gem_object_put(obj);
- uc_fw->obj = NULL;
- mutex_unlock(&dev_priv->drm.struct_mutex);
-
- release_firmware(fw); /* OK even if fw is NULL */
- uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL;
}
/**
- * intel_guc_init() - define parameters and fetch firmware
- * @dev_priv: i915 device private
- *
- * Called early during driver load, but after GEM is initialised.
+ * intel_guc_select_fw() - selects GuC firmware for loading
+ * @guc: intel_guc struct
*
- * The firmware will be transferred to the GuC's memory later,
- * when intel_guc_setup() is called.
+ * Return: zero when we know firmware, non-zero in other case
*/
-void intel_guc_init(struct drm_i915_private *dev_priv)
+int intel_guc_select_fw(struct intel_guc *guc)
{
- struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
- const char *fw_path;
+ struct drm_i915_private *dev_priv = guc_to_i915(guc);
- if (!HAS_GUC(dev_priv)) {
- i915.enable_guc_loading = 0;
- i915.enable_guc_submission = 0;
- } else {
- /* A negative value means "use platform default" */
- if (i915.enable_guc_loading < 0)
- i915.enable_guc_loading = HAS_GUC_UCODE(dev_priv);
- if (i915.enable_guc_submission < 0)
- i915.enable_guc_submission = HAS_GUC_SCHED(dev_priv);
- }
+ guc->fw.path = NULL;
+ guc->fw.fetch_status = INTEL_UC_FIRMWARE_NONE;
+ guc->fw.load_status = INTEL_UC_FIRMWARE_NONE;
+ guc->fw.type = INTEL_UC_FW_TYPE_GUC;
- if (!HAS_GUC_UCODE(dev_priv)) {
- fw_path = NULL;
+ if (i915.guc_firmware_path) {
+ guc->fw.path = i915.guc_firmware_path;
+ guc->fw.major_ver_wanted = 0;
+ guc->fw.minor_ver_wanted = 0;
} else if (IS_SKYLAKE(dev_priv)) {
- fw_path = I915_SKL_GUC_UCODE;
- guc_fw->major_ver_wanted = SKL_FW_MAJOR;
- guc_fw->minor_ver_wanted = SKL_FW_MINOR;
+ guc->fw.path = I915_SKL_GUC_UCODE;
+ guc->fw.major_ver_wanted = SKL_FW_MAJOR;
+ guc->fw.minor_ver_wanted = SKL_FW_MINOR;
} else if (IS_BROXTON(dev_priv)) {
- fw_path = I915_BXT_GUC_UCODE;
- guc_fw->major_ver_wanted = BXT_FW_MAJOR;
- guc_fw->minor_ver_wanted = BXT_FW_MINOR;
+ guc->fw.path = I915_BXT_GUC_UCODE;
+ guc->fw.major_ver_wanted = BXT_FW_MAJOR;
+ guc->fw.minor_ver_wanted = BXT_FW_MINOR;
} else if (IS_KABYLAKE(dev_priv)) {
- fw_path = I915_KBL_GUC_UCODE;
- guc_fw->major_ver_wanted = KBL_FW_MAJOR;
- guc_fw->minor_ver_wanted = KBL_FW_MINOR;
+ guc->fw.path = I915_KBL_GUC_UCODE;
+ guc->fw.major_ver_wanted = KBL_FW_MAJOR;
+ guc->fw.minor_ver_wanted = KBL_FW_MINOR;
} else {
- fw_path = ""; /* unknown device */
+ DRM_ERROR("No GuC firmware known for platform with GuC!\n");
+ return -ENOENT;
}
- guc_fw->path = fw_path;
- guc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
- guc_fw->load_status = INTEL_UC_FIRMWARE_NONE;
-
- /* Early (and silent) return if GuC loading is disabled */
- if (!i915.enable_guc_loading)
- return;
- if (fw_path == NULL)
- return;
- if (*fw_path == '\0')
- return;
-
- guc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
- DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path);
- intel_uc_fw_fetch(dev_priv, guc_fw);
- /* status must now be FAIL or SUCCESS */
+ return 0;
}
/**
@@ -792,16 +438,16 @@ void intel_guc_init(struct drm_i915_private *dev_priv)
void intel_guc_fini(struct drm_i915_private *dev_priv)
{
struct intel_uc_fw *guc_fw = &dev_priv->guc.fw;
+ struct drm_i915_gem_object *obj;
mutex_lock(&dev_priv->drm.struct_mutex);
- guc_interrupts_release(dev_priv);
i915_guc_submission_disable(dev_priv);
i915_guc_submission_fini(dev_priv);
-
- if (guc_fw->obj)
- i915_gem_object_put(guc_fw->obj);
- guc_fw->obj = NULL;
mutex_unlock(&dev_priv->drm.struct_mutex);
+ obj = fetch_and_zero(&guc_fw->obj);
+ if (obj)
+ i915_gem_object_put(obj);
+
guc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
}
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index f05971f5586f..dce742243ba6 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -480,3 +480,7 @@ void intel_hangcheck_init(struct drm_i915_private *i915)
INIT_DELAYED_WORK(&i915->gpu_error.hangcheck_work,
i915_hangcheck_elapsed);
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/intel_hangcheck.c"
+#endif
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index ebae2bd83918..3eec74ca5116 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -902,12 +902,11 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
- enum intel_display_power_domain power_domain;
u32 tmp;
bool ret;
- power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain))
return false;
ret = false;
@@ -927,7 +926,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
ret = true;
out:
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, encoder->power_domain);
return ret;
}
@@ -1298,16 +1297,34 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc_state->base.crtc->dev;
+ struct drm_i915_private *dev_priv =
+ to_i915(crtc_state->base.crtc->dev);
+ struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_connector_state *connector_state;
+ struct drm_connector *connector;
+ int i;
- if (HAS_GMCH_DISPLAY(to_i915(dev)))
+ if (HAS_GMCH_DISPLAY(dev_priv))
return false;
/*
* HDMI 12bpc affects the clocks, so it's only possible
* when not cloning with other encoder types.
*/
- return crtc_state->output_types == 1 << INTEL_OUTPUT_HDMI;
+ if (crtc_state->output_types != 1 << INTEL_OUTPUT_HDMI)
+ return false;
+
+ for_each_connector_in_state(state, connector, connector_state, i) {
+ const struct drm_display_info *info = &connector->display_info;
+
+ if (connector_state->crtc != crtc_state->base.crtc)
+ continue;
+
+ if ((info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36) == 0)
+ return false;
+ }
+
+ return true;
}
bool intel_hdmi_compute_config(struct intel_encoder *encoder,
@@ -1869,14 +1886,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
switch (port) {
case PORT_B:
- /*
- * On BXT A0/A1, sw needs to activate DDIA HPD logic and
- * interrupts to check the external panel connection.
- */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
- intel_encoder->hpd_pin = HPD_PORT_A;
- else
- intel_encoder->hpd_pin = HPD_PORT_B;
+ intel_encoder->hpd_pin = HPD_PORT_B;
break;
case PORT_C:
intel_encoder->hpd_pin = HPD_PORT_C;
@@ -1988,6 +1998,7 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv,
}
intel_encoder->type = INTEL_OUTPUT_HDMI;
+ intel_encoder->power_domain = intel_port_to_power_domain(port);
intel_encoder->port = port;
if (IS_CHERRYVIEW(dev_priv)) {
if (port == PORT_D)
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index b62e3f8ad415..7d210097eefa 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -100,7 +100,6 @@ bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port)
}
#define HPD_STORM_DETECT_PERIOD 1000
-#define HPD_STORM_THRESHOLD 5
#define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000)
/**
@@ -112,9 +111,13 @@ bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port)
* storms. Only the pin specific stats and state are changed, the caller is
* responsible for further action.
*
- * @HPD_STORM_THRESHOLD irqs are allowed within @HPD_STORM_DETECT_PERIOD ms,
- * otherwise it's considered an irq storm, and the irq state is set to
- * @HPD_MARK_DISABLED.
+ * The number of irqs that are allowed within @HPD_STORM_DETECT_PERIOD is
+ * stored in @dev_priv->hotplug.hpd_storm_threshold which defaults to
+ * @HPD_STORM_DEFAULT_THRESHOLD. If this threshold is exceeded, it's
+ * considered an irq storm and the irq state is set to @HPD_MARK_DISABLED.
+ *
+ * The HPD threshold can be controlled through i915_hpd_storm_ctl in debugfs,
+ * and should only be adjusted for automated hotplug testing.
*
* Return true if an irq storm was detected on @pin.
*/
@@ -123,13 +126,15 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
{
unsigned long start = dev_priv->hotplug.stats[pin].last_jiffies;
unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD);
+ const int threshold = dev_priv->hotplug.hpd_storm_threshold;
bool storm = false;
if (!time_in_range(jiffies, start, end)) {
dev_priv->hotplug.stats[pin].last_jiffies = jiffies;
dev_priv->hotplug.stats[pin].count = 0;
DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", pin);
- } else if (dev_priv->hotplug.stats[pin].count > HPD_STORM_THRESHOLD) {
+ } else if (dev_priv->hotplug.stats[pin].count > threshold &&
+ threshold) {
dev_priv->hotplug.stats[pin].state = HPD_MARK_DISABLED;
DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin);
storm = true;
@@ -145,16 +150,17 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
- struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_connector *intel_connector;
struct intel_encoder *intel_encoder;
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
enum hpd_pin pin;
bool hpd_disabled = false;
- assert_spin_locked(&dev_priv->irq_lock);
+ lockdep_assert_held(&dev_priv->irq_lock);
- list_for_each_entry(connector, &mode_config->connector_list, head) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->polled != DRM_CONNECTOR_POLL_HPD)
continue;
@@ -177,6 +183,7 @@ static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
| DRM_CONNECTOR_POLL_DISCONNECT;
hpd_disabled = true;
}
+ drm_connector_list_iter_end(&conn_iter);
/* Enable polling and queue hotplug re-enabling. */
if (hpd_disabled) {
@@ -192,7 +199,6 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
container_of(work, typeof(*dev_priv),
hotplug.reenable_work.work);
struct drm_device *dev = &dev_priv->drm;
- struct drm_mode_config *mode_config = &dev->mode_config;
int i;
intel_runtime_pm_get(dev_priv);
@@ -200,13 +206,15 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
spin_lock_irq(&dev_priv->irq_lock);
for_each_hpd_pin(i) {
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
if (dev_priv->hotplug.stats[i].state != HPD_DISABLED)
continue;
dev_priv->hotplug.stats[i].state = HPD_ENABLED;
- list_for_each_entry(connector, &mode_config->connector_list, head) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
struct intel_connector *intel_connector = to_intel_connector(connector);
if (intel_connector->encoder->hpd_pin == i) {
@@ -218,8 +226,9 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
connector->polled = DRM_CONNECTOR_POLL_HPD;
}
}
+ drm_connector_list_iter_end(&conn_iter);
}
- if (dev_priv->display.hpd_irq_setup)
+ if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
@@ -303,14 +312,14 @@ static void i915_hotplug_work_func(struct work_struct *work)
struct drm_i915_private *dev_priv =
container_of(work, struct drm_i915_private, hotplug.hotplug_work);
struct drm_device *dev = &dev_priv->drm;
- struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_connector *intel_connector;
struct intel_encoder *intel_encoder;
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
bool changed = false;
u32 hpd_event_bits;
- mutex_lock(&mode_config->mutex);
+ mutex_lock(&dev->mode_config.mutex);
DRM_DEBUG_KMS("running encoder hotplug functions\n");
spin_lock_irq(&dev_priv->irq_lock);
@@ -323,7 +332,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
spin_unlock_irq(&dev_priv->irq_lock);
- list_for_each_entry(connector, &mode_config->connector_list, head) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
intel_connector = to_intel_connector(connector);
if (!intel_connector->encoder)
continue;
@@ -337,7 +347,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
changed = true;
}
}
- mutex_unlock(&mode_config->mutex);
+ drm_connector_list_iter_end(&conn_iter);
+ mutex_unlock(&dev->mode_config.mutex);
if (changed)
drm_kms_helper_hotplug_event(dev);
@@ -425,7 +436,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
}
}
- if (storm_detected)
+ if (storm_detected && dev_priv->display_irqs_enabled)
dev_priv->display.hpd_irq_setup(dev_priv);
spin_unlock(&dev_priv->irq_lock);
@@ -471,10 +482,12 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
* Interrupt setup is already guaranteed to be single-threaded, this is
* just to make the assert_spin_locked checks happy.
*/
- spin_lock_irq(&dev_priv->irq_lock);
- if (dev_priv->display.hpd_irq_setup)
- dev_priv->display.hpd_irq_setup(dev_priv);
- spin_unlock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) {
+ spin_lock_irq(&dev_priv->irq_lock);
+ if (dev_priv->display_irqs_enabled)
+ dev_priv->display.hpd_irq_setup(dev_priv);
+ spin_unlock_irq(&dev_priv->irq_lock);
+ }
}
static void i915_hpd_poll_init_work(struct work_struct *work)
@@ -483,15 +496,16 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
container_of(work, struct drm_i915_private,
hotplug.poll_init_work);
struct drm_device *dev = &dev_priv->drm;
- struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
bool enabled;
mutex_lock(&dev->mode_config.mutex);
enabled = READ_ONCE(dev_priv->hotplug.poll_enabled);
- list_for_each_entry(connector, &mode_config->connector_list, head) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
struct intel_connector *intel_connector =
to_intel_connector(connector);
connector->polled = intel_connector->polled;
@@ -509,6 +523,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
DRM_CONNECTOR_POLL_HPD;
}
}
+ drm_connector_list_iter_end(&conn_iter);
if (enabled)
drm_kms_helper_poll_enable(dev);
diff --git a/drivers/gpu/drm/i915/intel_huc.c b/drivers/gpu/drm/i915/intel_huc.c
index c144609425f6..7af900bcdc05 100644
--- a/drivers/gpu/drm/i915/intel_huc.c
+++ b/drivers/gpu/drm/i915/intel_huc.c
@@ -141,58 +141,43 @@ static int huc_ucode_xfer(struct drm_i915_private *dev_priv)
}
/**
- * intel_huc_init() - initiate HuC firmware loading request
- * @dev_priv: the drm_i915_private device
- *
- * Called early during driver load, but after GEM is initialised. The loading
- * will continue only when driver explicitly specify firmware name and version.
- * All other cases are considered as INTEL_UC_FIRMWARE_NONE either because HW
- * is not capable or driver yet support it. And there will be no error message
- * for INTEL_UC_FIRMWARE_NONE cases.
- *
- * The DMA-copying to HW is done later when intel_huc_load() is called.
+ * intel_huc_select_fw() - selects HuC firmware for loading
+ * @huc: intel_huc struct
*/
-void intel_huc_init(struct drm_i915_private *dev_priv)
+void intel_huc_select_fw(struct intel_huc *huc)
{
- struct intel_huc *huc = &dev_priv->huc;
- struct intel_uc_fw *huc_fw = &huc->fw;
- const char *fw_path = NULL;
-
- huc_fw->path = NULL;
- huc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
- huc_fw->load_status = INTEL_UC_FIRMWARE_NONE;
- huc_fw->fw = INTEL_UC_FW_TYPE_HUC;
-
- if (!HAS_HUC_UCODE(dev_priv))
- return;
-
- if (IS_SKYLAKE(dev_priv)) {
- fw_path = I915_SKL_HUC_UCODE;
- huc_fw->major_ver_wanted = SKL_HUC_FW_MAJOR;
- huc_fw->minor_ver_wanted = SKL_HUC_FW_MINOR;
+ struct drm_i915_private *dev_priv = huc_to_i915(huc);
+
+ huc->fw.path = NULL;
+ huc->fw.fetch_status = INTEL_UC_FIRMWARE_NONE;
+ huc->fw.load_status = INTEL_UC_FIRMWARE_NONE;
+ huc->fw.type = INTEL_UC_FW_TYPE_HUC;
+
+ if (i915.huc_firmware_path) {
+ huc->fw.path = i915.huc_firmware_path;
+ huc->fw.major_ver_wanted = 0;
+ huc->fw.minor_ver_wanted = 0;
+ } else if (IS_SKYLAKE(dev_priv)) {
+ huc->fw.path = I915_SKL_HUC_UCODE;
+ huc->fw.major_ver_wanted = SKL_HUC_FW_MAJOR;
+ huc->fw.minor_ver_wanted = SKL_HUC_FW_MINOR;
} else if (IS_BROXTON(dev_priv)) {
- fw_path = I915_BXT_HUC_UCODE;
- huc_fw->major_ver_wanted = BXT_HUC_FW_MAJOR;
- huc_fw->minor_ver_wanted = BXT_HUC_FW_MINOR;
+ huc->fw.path = I915_BXT_HUC_UCODE;
+ huc->fw.major_ver_wanted = BXT_HUC_FW_MAJOR;
+ huc->fw.minor_ver_wanted = BXT_HUC_FW_MINOR;
} else if (IS_KABYLAKE(dev_priv)) {
- fw_path = I915_KBL_HUC_UCODE;
- huc_fw->major_ver_wanted = KBL_HUC_FW_MAJOR;
- huc_fw->minor_ver_wanted = KBL_HUC_FW_MINOR;
+ huc->fw.path = I915_KBL_HUC_UCODE;
+ huc->fw.major_ver_wanted = KBL_HUC_FW_MAJOR;
+ huc->fw.minor_ver_wanted = KBL_HUC_FW_MINOR;
+ } else {
+ DRM_ERROR("No HuC firmware known for platform with HuC!\n");
+ return;
}
-
- huc_fw->path = fw_path;
- huc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
-
- DRM_DEBUG_DRIVER("HuC firmware pending, path %s\n", fw_path);
-
- WARN(huc_fw->path == NULL, "HuC present but no fw path\n");
-
- intel_uc_fw_fetch(dev_priv, huc_fw);
}
/**
- * intel_huc_load() - load HuC uCode to device
- * @dev_priv: the drm_i915_private device
+ * intel_huc_init_hw() - load HuC uCode to device
+ * @huc: intel_huc structure
*
* Called from guc_setup() during driver loading and also after a GPU reset.
* Be note that HuC loading must be done before GuC loading.
@@ -203,26 +188,26 @@ void intel_huc_init(struct drm_i915_private *dev_priv)
*
* Return: non-zero code on error
*/
-int intel_huc_load(struct drm_i915_private *dev_priv)
+int intel_huc_init_hw(struct intel_huc *huc)
{
- struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+ struct drm_i915_private *dev_priv = huc_to_i915(huc);
int err;
- if (huc_fw->fetch_status == INTEL_UC_FIRMWARE_NONE)
+ if (huc->fw.fetch_status == INTEL_UC_FIRMWARE_NONE)
return 0;
DRM_DEBUG_DRIVER("%s fw status: fetch %s, load %s\n",
- huc_fw->path,
- intel_uc_fw_status_repr(huc_fw->fetch_status),
- intel_uc_fw_status_repr(huc_fw->load_status));
+ huc->fw.path,
+ intel_uc_fw_status_repr(huc->fw.fetch_status),
+ intel_uc_fw_status_repr(huc->fw.load_status));
- if (huc_fw->fetch_status == INTEL_UC_FIRMWARE_SUCCESS &&
- huc_fw->load_status == INTEL_UC_FIRMWARE_FAIL)
+ if (huc->fw.fetch_status == INTEL_UC_FIRMWARE_SUCCESS &&
+ huc->fw.load_status == INTEL_UC_FIRMWARE_FAIL)
return -ENOEXEC;
- huc_fw->load_status = INTEL_UC_FIRMWARE_PENDING;
+ huc->fw.load_status = INTEL_UC_FIRMWARE_PENDING;
- switch (huc_fw->fetch_status) {
+ switch (huc->fw.fetch_status) {
case INTEL_UC_FIRMWARE_FAIL:
/* something went wrong :( */
err = -EIO;
@@ -233,9 +218,9 @@ int intel_huc_load(struct drm_i915_private *dev_priv)
default:
/* "can't happen" */
WARN_ONCE(1, "HuC fw %s invalid fetch_status %s [%d]\n",
- huc_fw->path,
- intel_uc_fw_status_repr(huc_fw->fetch_status),
- huc_fw->fetch_status);
+ huc->fw.path,
+ intel_uc_fw_status_repr(huc->fw.fetch_status),
+ huc->fw.fetch_status);
err = -ENXIO;
goto fail;
@@ -247,18 +232,18 @@ int intel_huc_load(struct drm_i915_private *dev_priv)
if (err)
goto fail;
- huc_fw->load_status = INTEL_UC_FIRMWARE_SUCCESS;
+ huc->fw.load_status = INTEL_UC_FIRMWARE_SUCCESS;
DRM_DEBUG_DRIVER("%s fw status: fetch %s, load %s\n",
- huc_fw->path,
- intel_uc_fw_status_repr(huc_fw->fetch_status),
- intel_uc_fw_status_repr(huc_fw->load_status));
+ huc->fw.path,
+ intel_uc_fw_status_repr(huc->fw.fetch_status),
+ intel_uc_fw_status_repr(huc->fw.load_status));
return 0;
fail:
- if (huc_fw->load_status == INTEL_UC_FIRMWARE_PENDING)
- huc_fw->load_status = INTEL_UC_FIRMWARE_FAIL;
+ if (huc->fw.load_status == INTEL_UC_FIRMWARE_PENDING)
+ huc->fw.load_status = INTEL_UC_FIRMWARE_FAIL;
DRM_ERROR("Failed to complete HuC uCode load with ret %d\n", err);
@@ -274,12 +259,11 @@ fail:
void intel_huc_fini(struct drm_i915_private *dev_priv)
{
struct intel_uc_fw *huc_fw = &dev_priv->huc.fw;
+ struct drm_i915_gem_object *obj;
- mutex_lock(&dev_priv->drm.struct_mutex);
- if (huc_fw->obj)
- i915_gem_object_put(huc_fw->obj);
- huc_fw->obj = NULL;
- mutex_unlock(&dev_priv->drm.struct_mutex);
+ obj = fetch_and_zero(&huc_fw->obj);
+ if (obj)
+ i915_gem_object_put(obj);
huc_fw->fetch_status = INTEL_UC_FIRMWARE_NONE;
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index bce1ba80f277..b6401e8f1bd6 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -74,7 +74,7 @@ static const struct gmbus_pin *get_gmbus_pin(struct drm_i915_private *dev_priv,
{
if (IS_GEN9_LP(dev_priv))
return &gmbus_pins_bxt[pin];
- else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ else if (IS_GEN9_BC(dev_priv))
return &gmbus_pins_skl[pin];
else if (IS_BROADWELL(dev_priv))
return &gmbus_pins_bdw[pin];
@@ -89,7 +89,7 @@ bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
if (IS_GEN9_LP(dev_priv))
size = ARRAY_SIZE(gmbus_pins_bxt);
- else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ else if (IS_GEN9_BC(dev_priv))
size = ARRAY_SIZE(gmbus_pins_skl);
else if (IS_BROADWELL(dev_priv))
size = ARRAY_SIZE(gmbus_pins_bdw);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index ebf8023d21e6..77168e673e0a 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -190,13 +190,7 @@
#define CTX_R_PWR_CLK_STATE 0x42
#define CTX_GPGPU_CSR_BASE_ADDRESS 0x44
-#define GEN8_CTX_VALID (1<<0)
-#define GEN8_CTX_FORCE_PD_RESTORE (1<<1)
-#define GEN8_CTX_FORCE_RESTORE (1<<2)
-#define GEN8_CTX_L3LLC_COHERENT (1<<5)
-#define GEN8_CTX_PRIVILEGE (1<<8)
-
-#define ASSIGN_CTX_REG(reg_state, pos, reg, val) do { \
+#define CTX_REG(reg_state, pos, reg, val) do { \
(reg_state)[(pos)+0] = i915_mmio_reg_offset(reg); \
(reg_state)[(pos)+1] = (val); \
} while (0)
@@ -212,14 +206,6 @@
reg_state[CTX_PDP0_LDW + 1] = lower_32_bits(px_dma(&ppgtt->pml4)); \
} while (0)
-enum {
- FAULT_AND_HANG = 0,
- FAULT_AND_HALT, /* Debug only */
- FAULT_AND_STREAM,
- FAULT_AND_CONTINUE /* Unsupported */
-};
-#define GEN8_CTX_ID_SHIFT 32
-#define GEN8_CTX_ID_WIDTH 21
#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
@@ -267,30 +253,6 @@ int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enabl
return 0;
}
-static void
-logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- engine->disable_lite_restore_wa =
- IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) &&
- (engine->id == VCS || engine->id == VCS2);
-
- engine->ctx_desc_template = GEN8_CTX_VALID;
- if (IS_GEN8(dev_priv))
- engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
- engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
-
- /* TODO: WaDisableLiteRestore when we start using semaphore
- * signalling between Command Streamers */
- /* ring->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE; */
-
- /* WaEnableForceRestoreInCtxtDescForVCS:skl */
- /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
- if (engine->disable_lite_restore_wa)
- engine->ctx_desc_template |= GEN8_CTX_FORCE_RESTORE;
-}
-
/**
* intel_lr_context_descriptor_update() - calculate & cache the descriptor
* descriptor for a pinned context
@@ -304,7 +266,7 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
*
* This is what a descriptor looks like, from LSB to MSB::
*
- * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template)
+ * bits 0-11: flags, GEN8_CTX_* (cached in ctx->desc_template)
* bits 12-31: LRCA, GTT address of (the HWSP of) this context
* bits 32-52: ctx ID, a globally unique tag
* bits 53-54: mbz, reserved for use by hardware
@@ -319,8 +281,7 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
- desc = ctx->desc_template; /* bits 3-4 */
- desc |= engine->ctx_desc_template; /* bits 0-11 */
+ desc = ctx->desc_template; /* bits 0-11 */
desc |= i915_ggtt_offset(ce->state) + LRC_PPHWSP_PN * PAGE_SIZE;
/* bits 12-31 */
desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
@@ -345,7 +306,8 @@ execlists_context_status_change(struct drm_i915_gem_request *rq,
if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
return;
- atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
+ atomic_notifier_call_chain(&rq->engine->context_status_notifier,
+ status, rq);
}
static void
@@ -364,6 +326,7 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
u32 *reg_state = ce->lrc_reg_state;
+ GEM_BUG_ON(!IS_ALIGNED(rq->tail, 8));
reg_state[CTX_RING_TAIL+1] = rq->tail;
/* True 32b PPGTT with dynamic page allocation: update PDP
@@ -371,7 +334,7 @@ static u64 execlists_update_context(struct drm_i915_gem_request *rq)
* PML4 is allocated during ppgtt init, so this is not needed
* in 48-bit mode.
*/
- if (ppgtt && !USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
+ if (ppgtt && !i915_vm_is_48bit(&ppgtt->base))
execlists_update_context_pdps(ppgtt, reg_state);
return ce->lrc_desc;
@@ -385,17 +348,20 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
dev_priv->regs + i915_mmio_reg_offset(RING_ELSP(engine));
u64 desc[2];
+ GEM_BUG_ON(port[0].count > 1);
if (!port[0].count)
execlists_context_status_change(port[0].request,
INTEL_CONTEXT_SCHEDULE_IN);
desc[0] = execlists_update_context(port[0].request);
- engine->preempt_wa = port[0].count++; /* bdw only? fixed on skl? */
+ GEM_DEBUG_EXEC(port[0].context_id = upper_32_bits(desc[0]));
+ port[0].count++;
if (port[1].request) {
GEM_BUG_ON(port[1].count);
execlists_context_status_change(port[1].request,
INTEL_CONTEXT_SCHEDULE_IN);
desc[1] = execlists_update_context(port[1].request);
+ GEM_DEBUG_EXEC(port[1].context_id = upper_32_bits(desc[1]));
port[1].count = 1;
} else {
desc[1] = 0;
@@ -437,6 +403,18 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
struct rb_node *rb;
bool submit = false;
+ /* After execlist_first is updated, the tasklet will be rescheduled.
+ *
+ * If we are currently running (inside the tasklet) and a third
+ * party queues a request and so updates engine->execlist_first under
+ * the spinlock (which we have elided), it will atomically set the
+ * TASKLET_SCHED flag causing the us to be re-executed and pick up
+ * the change in state (the update to TASKLET_SCHED incurs a memory
+ * barrier making this cross-cpu checking safe).
+ */
+ if (!READ_ONCE(engine->execlist_first))
+ return;
+
last = port->request;
if (last)
/* WaIdleLiteRestore:bdw,skl
@@ -514,6 +492,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
cursor->priotree.priority = INT_MAX;
__i915_gem_request_submit(cursor);
+ trace_i915_gem_request_in(cursor, port - engine->execlist_port);
last = cursor;
submit = true;
}
@@ -532,37 +511,11 @@ static bool execlists_elsp_idle(struct intel_engine_cs *engine)
return !engine->execlist_port[0].request;
}
-/**
- * intel_execlists_idle() - Determine if all engine submission ports are idle
- * @dev_priv: i915 device private
- *
- * Return true if there are no requests pending on any of the submission ports
- * of any engines.
- */
-bool intel_execlists_idle(struct drm_i915_private *dev_priv)
+static bool execlists_elsp_ready(const struct intel_engine_cs *engine)
{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- if (!i915.enable_execlists)
- return true;
-
- for_each_engine(engine, dev_priv, id)
- if (!execlists_elsp_idle(engine))
- return false;
-
- return true;
-}
-
-static bool execlists_elsp_ready(struct intel_engine_cs *engine)
-{
- int port;
-
- port = 1; /* wait for a free slot */
- if (engine->disable_lite_restore_wa || engine->preempt_wa)
- port = 0; /* wait for GPU to be idle before continuing */
+ const struct execlist_port *port = engine->execlist_port;
- return !engine->execlist_port[port].request;
+ return port[0].count + port[1].count < 2;
}
/*
@@ -577,7 +530,7 @@ static void intel_lrc_irq_handler(unsigned long data)
intel_uncore_forcewake_get(dev_priv, engine->fw_domains);
- if (!execlists_elsp_idle(engine)) {
+ while (test_and_clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) {
u32 __iomem *csb_mmio =
dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine));
u32 __iomem *buf =
@@ -587,31 +540,55 @@ static void intel_lrc_irq_handler(unsigned long data)
csb = readl(csb_mmio);
head = GEN8_CSB_READ_PTR(csb);
tail = GEN8_CSB_WRITE_PTR(csb);
+ if (head == tail)
+ break;
+
if (tail < head)
tail += GEN8_CSB_ENTRIES;
- while (head < tail) {
+ do {
unsigned int idx = ++head % GEN8_CSB_ENTRIES;
unsigned int status = readl(buf + 2 * idx);
+ /* We are flying near dragons again.
+ *
+ * We hold a reference to the request in execlist_port[]
+ * but no more than that. We are operating in softirq
+ * context and so cannot hold any mutex or sleep. That
+ * prevents us stopping the requests we are processing
+ * in port[] from being retired simultaneously (the
+ * breadcrumb will be complete before we see the
+ * context-switch). As we only hold the reference to the
+ * request, any pointer chasing underneath the request
+ * is subject to a potential use-after-free. Thus we
+ * store all of the bookkeeping within port[] as
+ * required, and avoid using unguarded pointers beneath
+ * request itself. The same applies to the atomic
+ * status notifier.
+ */
+
if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
continue;
+ /* Check the context/desc id for this event matches */
+ GEM_DEBUG_BUG_ON(readl(buf + 2 * idx + 1) !=
+ port[0].context_id);
+
GEM_BUG_ON(port[0].count == 0);
if (--port[0].count == 0) {
GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
+ GEM_BUG_ON(!i915_gem_request_completed(port[0].request));
execlists_context_status_change(port[0].request,
INTEL_CONTEXT_SCHEDULE_OUT);
+ trace_i915_gem_request_out(port[0].request);
i915_gem_request_put(port[0].request);
port[0] = port[1];
memset(&port[1], 0, sizeof(port[1]));
-
- engine->preempt_wa = false;
}
GEM_BUG_ON(port[0].count == 0 &&
!(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
- }
+ } while (head < tail);
writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK,
GEN8_CSB_WRITE_PTR(csb) << 8),
@@ -658,10 +635,11 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
/* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&engine->timeline->lock, flags);
- if (insert_request(&request->priotree, &engine->execlist_queue))
+ if (insert_request(&request->priotree, &engine->execlist_queue)) {
engine->execlist_first = &request->priotree.node;
- if (execlists_elsp_idle(engine))
- tasklet_hi_schedule(&engine->irq_tasklet);
+ if (execlists_elsp_ready(engine))
+ tasklet_hi_schedule(&engine->irq_tasklet);
+ }
spin_unlock_irqrestore(&engine->timeline->lock, flags);
}
@@ -776,6 +754,7 @@ static int execlists_context_pin(struct intel_engine_cs *engine,
if (ce->pin_count++)
return 0;
+ GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
if (!ce->state) {
ret = execlists_context_deferred_alloc(ctx, engine);
@@ -784,11 +763,9 @@ static int execlists_context_pin(struct intel_engine_cs *engine,
}
GEM_BUG_ON(!ce->state);
- flags = PIN_GLOBAL;
+ flags = PIN_GLOBAL | PIN_HIGH;
if (ctx->ggtt_offset_bias)
flags |= PIN_OFFSET_BIAS | ctx->ggtt_offset_bias;
- if (i915_gem_context_is_kernel(ctx))
- flags |= PIN_HIGH;
ret = i915_vma_pin(ce->state, 0, GEN8_LR_CONTEXT_ALIGN, flags);
if (ret)
@@ -847,6 +824,7 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
{
struct intel_engine_cs *engine = request->engine;
struct intel_context *ce = &request->ctx->engine[engine->id];
+ u32 *cs;
int ret;
GEM_BUG_ON(!ce->pin_count);
@@ -871,9 +849,11 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
goto err;
}
- ret = intel_ring_begin(request, 0);
- if (ret)
+ cs = intel_ring_begin(request, 0);
+ if (IS_ERR(cs)) {
+ ret = PTR_ERR(cs);
goto err_unreserve;
+ }
if (!ce->initialised) {
ret = engine->init_context(request);
@@ -900,51 +880,6 @@ err:
return ret;
}
-static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
-{
- int ret, i;
- struct intel_ring *ring = req->ring;
- struct i915_workarounds *w = &req->i915->workarounds;
-
- if (w->count == 0)
- return 0;
-
- ret = req->engine->emit_flush(req, EMIT_BARRIER);
- if (ret)
- return ret;
-
- ret = intel_ring_begin(req, w->count * 2 + 2);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
- for (i = 0; i < w->count; i++) {
- intel_ring_emit_reg(ring, w->reg[i].addr);
- intel_ring_emit(ring, w->reg[i].value);
- }
- intel_ring_emit(ring, MI_NOOP);
-
- intel_ring_advance(ring);
-
- ret = req->engine->emit_flush(req, EMIT_BARRIER);
- if (ret)
- return ret;
-
- return 0;
-}
-
-#define wa_ctx_emit(batch, index, cmd) \
- do { \
- int __index = (index)++; \
- if (WARN_ON(__index >= (PAGE_SIZE / sizeof(uint32_t)))) { \
- return -ENOSPC; \
- } \
- batch[__index] = (cmd); \
- } while (0)
-
-#define wa_ctx_emit_reg(batch, index, reg) \
- wa_ctx_emit((batch), (index), i915_mmio_reg_offset(reg))
-
/*
* In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
* PIPE_CONTROL instruction. This is required for the flush to happen correctly
@@ -961,56 +896,29 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
* This WA is also required for Gen9 so extracting as a function avoids
* code duplication.
*/
-static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
- uint32_t *batch,
- uint32_t index)
+static u32 *
+gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
{
- uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
-
- wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
- MI_SRM_LRM_GLOBAL_GTT));
- wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
- wa_ctx_emit(batch, index, i915_ggtt_offset(engine->scratch) + 256);
- wa_ctx_emit(batch, index, 0);
-
- wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
- wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
- wa_ctx_emit(batch, index, l3sqc4_flush);
-
- wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
- wa_ctx_emit(batch, index, (PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_DC_FLUSH_ENABLE));
- wa_ctx_emit(batch, index, 0);
- wa_ctx_emit(batch, index, 0);
- wa_ctx_emit(batch, index, 0);
- wa_ctx_emit(batch, index, 0);
-
- wa_ctx_emit(batch, index, (MI_LOAD_REGISTER_MEM_GEN8 |
- MI_SRM_LRM_GLOBAL_GTT));
- wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
- wa_ctx_emit(batch, index, i915_ggtt_offset(engine->scratch) + 256);
- wa_ctx_emit(batch, index, 0);
-
- return index;
-}
+ *batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
+ *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
+ *batch++ = i915_ggtt_offset(engine->scratch) + 256;
+ *batch++ = 0;
-static inline uint32_t wa_ctx_start(struct i915_wa_ctx_bb *wa_ctx,
- uint32_t offset,
- uint32_t start_alignment)
-{
- return wa_ctx->offset = ALIGN(offset, start_alignment);
-}
+ *batch++ = MI_LOAD_REGISTER_IMM(1);
+ *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
+ *batch++ = 0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES;
-static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
- uint32_t offset,
- uint32_t size_alignment)
-{
- wa_ctx->size = offset - wa_ctx->offset;
+ batch = gen8_emit_pipe_control(batch,
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_DC_FLUSH_ENABLE,
+ 0);
- WARN(wa_ctx->size % size_alignment,
- "wa_ctx_bb failed sanity checks: size %d is not aligned to %d\n",
- wa_ctx->size, size_alignment);
- return 0;
+ *batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
+ *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
+ *batch++ = i915_ggtt_offset(engine->scratch) + 256;
+ *batch++ = 0;
+
+ return batch;
}
/*
@@ -1028,42 +936,28 @@ static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
* MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
* makes a complete batch buffer.
*/
-static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
- struct i915_wa_ctx_bb *wa_ctx,
- uint32_t *batch,
- uint32_t *offset)
+static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
{
- uint32_t scratch_addr;
- uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
-
/* WaDisableCtxRestoreArbitration:bdw,chv */
- wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+ *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
/* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
- if (IS_BROADWELL(engine->i915)) {
- int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
- if (rc < 0)
- return rc;
- index = rc;
- }
+ if (IS_BROADWELL(engine->i915))
+ batch = gen8_emit_flush_coherentl3_wa(engine, batch);
/* WaClearSlmSpaceAtContextSwitch:bdw,chv */
/* Actual scratch location is at 128 bytes offset */
- scratch_addr = i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
-
- wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
- wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
- PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_QW_WRITE));
- wa_ctx_emit(batch, index, scratch_addr);
- wa_ctx_emit(batch, index, 0);
- wa_ctx_emit(batch, index, 0);
- wa_ctx_emit(batch, index, 0);
+ batch = gen8_emit_pipe_control(batch,
+ PIPE_CONTROL_FLUSH_L3 |
+ PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE,
+ i915_ggtt_offset(engine->scratch) +
+ 2 * CACHELINE_BYTES);
/* Pad to end of cacheline */
- while (index % CACHELINE_DWORDS)
- wa_ctx_emit(batch, index, MI_NOOP);
+ while ((unsigned long)batch % CACHELINE_BYTES)
+ *batch++ = MI_NOOP;
/*
* MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
@@ -1071,7 +965,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
* in the register CTX_RCS_INDIRECT_CTX
*/
- return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
+ return batch;
}
/*
@@ -1083,65 +977,40 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
* This batch is terminated with MI_BATCH_BUFFER_END and so we need not add padding
* to align it with cacheline as padding after MI_BATCH_BUFFER_END is redundant.
*/
-static int gen8_init_perctx_bb(struct intel_engine_cs *engine,
- struct i915_wa_ctx_bb *wa_ctx,
- uint32_t *batch,
- uint32_t *offset)
+static u32 *gen8_init_perctx_bb(struct intel_engine_cs *engine, u32 *batch)
{
- uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
-
/* WaDisableCtxRestoreArbitration:bdw,chv */
- wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
-
- wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
+ *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ *batch++ = MI_BATCH_BUFFER_END;
- return wa_ctx_end(wa_ctx, *offset = index, 1);
+ return batch;
}
-static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
- struct i915_wa_ctx_bb *wa_ctx,
- uint32_t *batch,
- uint32_t *offset)
+static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
{
- int ret;
- struct drm_i915_private *dev_priv = engine->i915;
- uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
+ /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
+ batch = gen8_emit_flush_coherentl3_wa(engine, batch);
- /* WaDisableCtxRestoreArbitration:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
- wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
-
- /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
- ret = gen8_emit_flush_coherentl3_wa(engine, batch, index);
- if (ret < 0)
- return ret;
- index = ret;
-
- /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl */
- wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
- wa_ctx_emit_reg(batch, index, COMMON_SLICE_CHICKEN2);
- wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(
- GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE));
- wa_ctx_emit(batch, index, MI_NOOP);
+ /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
+ *batch++ = MI_LOAD_REGISTER_IMM(1);
+ *batch++ = i915_mmio_reg_offset(COMMON_SLICE_CHICKEN2);
+ *batch++ = _MASKED_BIT_DISABLE(
+ GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE);
+ *batch++ = MI_NOOP;
/* WaClearSlmSpaceAtContextSwitch:kbl */
/* Actual scratch location is at 128 bytes offset */
- if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)) {
- u32 scratch_addr =
- i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
-
- wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
- wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
- PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_QW_WRITE));
- wa_ctx_emit(batch, index, scratch_addr);
- wa_ctx_emit(batch, index, 0);
- wa_ctx_emit(batch, index, 0);
- wa_ctx_emit(batch, index, 0);
+ if (IS_KBL_REVID(engine->i915, 0, KBL_REVID_A0)) {
+ batch = gen8_emit_pipe_control(batch,
+ PIPE_CONTROL_FLUSH_L3 |
+ PIPE_CONTROL_GLOBAL_GTT_IVB |
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE,
+ i915_ggtt_offset(engine->scratch)
+ + 2 * CACHELINE_BYTES);
}
- /* WaMediaPoolStateCmdInWABB:bxt */
+ /* WaMediaPoolStateCmdInWABB:bxt,glk */
if (HAS_POOLED_EU(engine->i915)) {
/*
* EU pool configuration is setup along with golden context
@@ -1156,73 +1025,37 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
* possible configurations, to avoid duplication they are
* not shown here again.
*/
- u32 eu_pool_config = 0x00777000;
- wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_STATE);
- wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_ENABLE);
- wa_ctx_emit(batch, index, eu_pool_config);
- wa_ctx_emit(batch, index, 0);
- wa_ctx_emit(batch, index, 0);
- wa_ctx_emit(batch, index, 0);
+ *batch++ = GEN9_MEDIA_POOL_STATE;
+ *batch++ = GEN9_MEDIA_POOL_ENABLE;
+ *batch++ = 0x00777000;
+ *batch++ = 0;
+ *batch++ = 0;
+ *batch++ = 0;
}
/* Pad to end of cacheline */
- while (index % CACHELINE_DWORDS)
- wa_ctx_emit(batch, index, MI_NOOP);
+ while ((unsigned long)batch % CACHELINE_BYTES)
+ *batch++ = MI_NOOP;
- return wa_ctx_end(wa_ctx, *offset = index, CACHELINE_DWORDS);
+ return batch;
}
-static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
- struct i915_wa_ctx_bb *wa_ctx,
- uint32_t *batch,
- uint32_t *offset)
+static u32 *gen9_init_perctx_bb(struct intel_engine_cs *engine, u32 *batch)
{
- uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
-
- /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
- if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
- wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
- wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
- wa_ctx_emit(batch, index,
- _MASKED_BIT_ENABLE(DISABLE_PIXEL_MASK_CAMMING));
- wa_ctx_emit(batch, index, MI_NOOP);
- }
+ *batch++ = MI_BATCH_BUFFER_END;
- /* WaClearTdlStateAckDirtyBits:bxt */
- if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_B0)) {
- wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4));
-
- wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK);
- wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
-
- wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE1);
- wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
-
- wa_ctx_emit_reg(batch, index, GEN9_STATE_ACK_SLICE2);
- wa_ctx_emit(batch, index, _MASKED_BIT_DISABLE(GEN9_SUBSLICE_TDL_ACK_BITS));
-
- wa_ctx_emit_reg(batch, index, GEN7_ROW_CHICKEN2);
- /* dummy write to CS, mask bits are 0 to ensure the register is not modified */
- wa_ctx_emit(batch, index, 0x0);
- wa_ctx_emit(batch, index, MI_NOOP);
- }
-
- /* WaDisableCtxRestoreArbitration:bxt */
- if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
- wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
-
- wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
-
- return wa_ctx_end(wa_ctx, *offset = index, 1);
+ return batch;
}
-static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
+#define CTX_WA_BB_OBJ_SIZE (PAGE_SIZE)
+
+static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
int err;
- obj = i915_gem_object_create(engine->i915, PAGE_ALIGN(size));
+ obj = i915_gem_object_create(engine->i915, CTX_WA_BB_OBJ_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -1244,82 +1077,79 @@ err:
return err;
}
-static void lrc_destroy_wa_ctx_obj(struct intel_engine_cs *engine)
+static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine)
{
i915_vma_unpin_and_release(&engine->wa_ctx.vma);
}
+typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch);
+
static int intel_init_workaround_bb(struct intel_engine_cs *engine)
{
struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
- uint32_t *batch;
- uint32_t offset;
+ struct i915_wa_ctx_bb *wa_bb[2] = { &wa_ctx->indirect_ctx,
+ &wa_ctx->per_ctx };
+ wa_bb_func_t wa_bb_fn[2];
struct page *page;
+ void *batch, *batch_ptr;
+ unsigned int i;
int ret;
- WARN_ON(engine->id != RCS);
+ if (WARN_ON(engine->id != RCS || !engine->scratch))
+ return -EINVAL;
- /* update this when WA for higher Gen are added */
- if (INTEL_GEN(engine->i915) > 9) {
- DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
- INTEL_GEN(engine->i915));
+ switch (INTEL_GEN(engine->i915)) {
+ case 9:
+ wa_bb_fn[0] = gen9_init_indirectctx_bb;
+ wa_bb_fn[1] = gen9_init_perctx_bb;
+ break;
+ case 8:
+ wa_bb_fn[0] = gen8_init_indirectctx_bb;
+ wa_bb_fn[1] = gen8_init_perctx_bb;
+ break;
+ default:
+ MISSING_CASE(INTEL_GEN(engine->i915));
return 0;
}
- /* some WA perform writes to scratch page, ensure it is valid */
- if (!engine->scratch) {
- DRM_ERROR("scratch page not allocated for %s\n", engine->name);
- return -EINVAL;
- }
-
- ret = lrc_setup_wa_ctx_obj(engine, PAGE_SIZE);
+ ret = lrc_setup_wa_ctx(engine);
if (ret) {
DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
return ret;
}
page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0);
- batch = kmap_atomic(page);
- offset = 0;
-
- if (IS_GEN8(engine->i915)) {
- ret = gen8_init_indirectctx_bb(engine,
- &wa_ctx->indirect_ctx,
- batch,
- &offset);
- if (ret)
- goto out;
+ batch = batch_ptr = kmap_atomic(page);
- ret = gen8_init_perctx_bb(engine,
- &wa_ctx->per_ctx,
- batch,
- &offset);
- if (ret)
- goto out;
- } else if (IS_GEN9(engine->i915)) {
- ret = gen9_init_indirectctx_bb(engine,
- &wa_ctx->indirect_ctx,
- batch,
- &offset);
- if (ret)
- goto out;
-
- ret = gen9_init_perctx_bb(engine,
- &wa_ctx->per_ctx,
- batch,
- &offset);
- if (ret)
- goto out;
+ /*
+ * Emit the two workaround batch buffers, recording the offset from the
+ * start of the workaround batch buffer object for each and their
+ * respective sizes.
+ */
+ for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) {
+ wa_bb[i]->offset = batch_ptr - batch;
+ if (WARN_ON(!IS_ALIGNED(wa_bb[i]->offset, CACHELINE_BYTES))) {
+ ret = -EINVAL;
+ break;
+ }
+ batch_ptr = wa_bb_fn[i](engine, batch_ptr);
+ wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset);
}
-out:
+ BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE);
+
kunmap_atomic(batch);
if (ret)
- lrc_destroy_wa_ctx_obj(engine);
+ lrc_destroy_wa_ctx(engine);
return ret;
}
+static u32 port_seqno(struct execlist_port *port)
+{
+ return port->request ? port->request->global_seqno : 0;
+}
+
static int gen8_init_common_ring(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
@@ -1334,7 +1164,6 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
I915_WRITE(RING_MODE_GEN7(engine),
- _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
_MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
I915_WRITE(RING_HWS_PGA(engine->mmio_base),
engine->status_page.ggtt_offset);
@@ -1343,7 +1172,12 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
DRM_DEBUG_DRIVER("Execlists enabled for %s\n", engine->name);
/* After a GPU reset, we may have requests to replay */
- if (!execlists_elsp_idle(engine)) {
+ clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+ if (!i915.enable_guc_submission && !execlists_elsp_idle(engine)) {
+ DRM_DEBUG_DRIVER("Restarting %s from requests [0x%x, 0x%x]\n",
+ engine->name,
+ port_seqno(&engine->execlist_port[0]),
+ port_seqno(&engine->execlist_port[1]));
engine->execlist_port[0].count = 0;
engine->execlist_port[1].count = 0;
execlists_submit_ports(engine);
@@ -1388,7 +1222,6 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
static void reset_common_ring(struct intel_engine_cs *engine,
struct drm_i915_gem_request *request)
{
- struct drm_i915_private *dev_priv = engine->i915;
struct execlist_port *port = engine->execlist_port;
struct intel_context *ce;
@@ -1425,11 +1258,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
request->ring->last_retired_head = -1;
intel_ring_update_space(request->ring);
- if (i915.enable_guc_submission)
- return;
-
/* Catch up with any missed context-switch interrupts */
- I915_WRITE(RING_CONTEXT_STATUS_PTR(engine), _MASKED_FIELD(0xffff, 0));
if (request->ctx != port[0].request->ctx) {
i915_gem_request_put(port[0].request);
port[0] = port[1];
@@ -1440,42 +1269,42 @@ static void reset_common_ring(struct intel_engine_cs *engine,
/* Reset WaIdleLiteRestore:bdw,skl as well */
request->tail = request->wa_tail - WA_TAIL_DWORDS * sizeof(u32);
+ GEM_BUG_ON(!IS_ALIGNED(request->tail, 8));
}
static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
{
struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
- struct intel_ring *ring = req->ring;
struct intel_engine_cs *engine = req->engine;
- const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
- int i, ret;
+ const int num_lri_cmds = GEN8_3LVL_PDPES * 2;
+ u32 *cs;
+ int i;
- ret = intel_ring_begin(req, num_lri_cmds * 2 + 2);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, num_lri_cmds * 2 + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_lri_cmds));
- for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
+ *cs++ = MI_LOAD_REGISTER_IMM(num_lri_cmds);
+ for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
- intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, i));
- intel_ring_emit(ring, upper_32_bits(pd_daddr));
- intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, i));
- intel_ring_emit(ring, lower_32_bits(pd_daddr));
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
+ *cs++ = upper_32_bits(pd_daddr);
+ *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
+ *cs++ = lower_32_bits(pd_daddr);
}
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return 0;
}
static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
u64 offset, u32 len,
- unsigned int dispatch_flags)
+ const unsigned int flags)
{
- struct intel_ring *ring = req->ring;
- bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
+ u32 *cs;
int ret;
/* Don't rely in hw updating PDPs, specially in lite-restore.
@@ -1485,30 +1314,28 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
* not idle). PML4 is allocated during ppgtt init so this is
* not needed in 48-bit.*/
if (req->ctx->ppgtt &&
- (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
- if (!USES_FULL_48BIT_PPGTT(req->i915) &&
- !intel_vgpu_active(req->i915)) {
- ret = intel_logical_ring_emit_pdps(req);
- if (ret)
- return ret;
- }
+ (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings) &&
+ !i915_vm_is_48bit(&req->ctx->ppgtt->base) &&
+ !intel_vgpu_active(req->i915)) {
+ ret = intel_logical_ring_emit_pdps(req);
+ if (ret)
+ return ret;
req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
}
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
/* FIXME(BDW): Address space and security selectors. */
- intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 |
- (ppgtt<<8) |
- (dispatch_flags & I915_DISPATCH_RS ?
- MI_BATCH_RESOURCE_STREAMER : 0));
- intel_ring_emit(ring, lower_32_bits(offset));
- intel_ring_emit(ring, upper_32_bits(offset));
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 |
+ (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)) |
+ (flags & I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return 0;
}
@@ -1529,13 +1356,11 @@ static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
{
- struct intel_ring *ring = request->ring;
- u32 cmd;
- int ret;
+ u32 cmd, *cs;
- ret = intel_ring_begin(request, 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(request, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
cmd = MI_FLUSH_DW + 1;
@@ -1552,13 +1377,11 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
cmd |= MI_INVALIDATE_BSD;
}
- intel_ring_emit(ring, cmd);
- intel_ring_emit(ring,
- I915_GEM_HWS_SCRATCH_ADDR |
- MI_FLUSH_DW_USE_GTT);
- intel_ring_emit(ring, 0); /* upper addr */
- intel_ring_emit(ring, 0); /* value */
- intel_ring_advance(ring);
+ *cs++ = cmd;
+ *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
+ *cs++ = 0; /* upper addr */
+ *cs++ = 0; /* value */
+ intel_ring_advance(request, cs);
return 0;
}
@@ -1566,13 +1389,11 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
u32 mode)
{
- struct intel_ring *ring = request->ring;
struct intel_engine_cs *engine = request->engine;
u32 scratch_addr =
i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
bool vf_flush_wa = false, dc_flush_wa = false;
- u32 flags = 0;
- int ret;
+ u32 *cs, flags = 0;
int len;
flags |= PIPE_CONTROL_CS_STALL;
@@ -1614,62 +1435,25 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
if (dc_flush_wa)
len += 12;
- ret = intel_ring_begin(request, len);
- if (ret)
- return ret;
+ cs = intel_ring_begin(request, len);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- if (vf_flush_wa) {
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- }
+ if (vf_flush_wa)
+ cs = gen8_emit_pipe_control(cs, 0, 0);
- if (dc_flush_wa) {
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
- intel_ring_emit(ring, PIPE_CONTROL_DC_FLUSH_ENABLE);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- }
+ if (dc_flush_wa)
+ cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE,
+ 0);
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
- intel_ring_emit(ring, flags);
- intel_ring_emit(ring, scratch_addr);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
-
- if (dc_flush_wa) {
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
- intel_ring_emit(ring, PIPE_CONTROL_CS_STALL);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- }
+ cs = gen8_emit_pipe_control(cs, flags, scratch_addr);
- intel_ring_advance(ring);
+ if (dc_flush_wa)
+ cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0);
- return 0;
-}
+ intel_ring_advance(request, cs);
-static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
-{
- /*
- * On BXT A steppings there is a HW coherency issue whereby the
- * MI_STORE_DATA_IMM storing the completed request's seqno
- * occasionally doesn't invalidate the CPU cache. Work around this by
- * clflushing the corresponding cacheline whenever the caller wants
- * the coherency to be guaranteed. Note that this cacheline is known
- * to be clean at this point, since we only write it in
- * bxt_a_set_seqno(), where we also do a clflush after the write. So
- * this clflush in practice becomes an invalidate operation.
- */
- intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
+ return 0;
}
/*
@@ -1677,34 +1461,34 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
* used as a workaround for not being allowed to do lite
* restore with HEAD==TAIL (WaIdleLiteRestore).
*/
-static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *out)
+static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *cs)
{
- *out++ = MI_NOOP;
- *out++ = MI_NOOP;
- request->wa_tail = intel_ring_offset(request->ring, out);
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ request->wa_tail = intel_ring_offset(request, cs);
}
-static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request,
- u32 *out)
+static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request, u32 *cs)
{
/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
- *out++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
- *out++ = intel_hws_seqno_address(request->engine) | MI_FLUSH_DW_USE_GTT;
- *out++ = 0;
- *out++ = request->global_seqno;
- *out++ = MI_USER_INTERRUPT;
- *out++ = MI_NOOP;
- request->tail = intel_ring_offset(request->ring, out);
+ *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
+ *cs++ = intel_hws_seqno_address(request->engine) | MI_FLUSH_DW_USE_GTT;
+ *cs++ = 0;
+ *cs++ = request->global_seqno;
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
+ request->tail = intel_ring_offset(request, cs);
+ GEM_BUG_ON(!IS_ALIGNED(request->tail, 8));
- gen8_emit_wa_tail(request, out);
+ gen8_emit_wa_tail(request, cs);
}
static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS;
static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
- u32 *out)
+ u32 *cs)
{
/* We're using qword write, seqno should be aligned to 8 bytes. */
BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
@@ -1713,20 +1497,20 @@ static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
* need a prior CS_STALL, which is emitted by the flush
* following the batch.
*/
- *out++ = GFX_OP_PIPE_CONTROL(6);
- *out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_QW_WRITE);
- *out++ = intel_hws_seqno_address(request->engine);
- *out++ = 0;
- *out++ = request->global_seqno;
+ *cs++ = GFX_OP_PIPE_CONTROL(6);
+ *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE;
+ *cs++ = intel_hws_seqno_address(request->engine);
+ *cs++ = 0;
+ *cs++ = request->global_seqno;
/* We're thrashing one dword of HWS. */
- *out++ = 0;
- *out++ = MI_USER_INTERRUPT;
- *out++ = MI_NOOP;
- request->tail = intel_ring_offset(request->ring, out);
+ *cs++ = 0;
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
+ request->tail = intel_ring_offset(request, cs);
+ GEM_BUG_ON(!IS_ALIGNED(request->tail, 8));
- gen8_emit_wa_tail(request, out);
+ gen8_emit_wa_tail(request, cs);
}
static const int gen8_emit_breadcrumb_render_sz = 8 + WA_TAIL_DWORDS;
@@ -1735,7 +1519,7 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
{
int ret;
- ret = intel_logical_ring_workarounds_emit(req);
+ ret = intel_ring_workarounds_emit(req);
if (ret)
return ret;
@@ -1781,21 +1565,16 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
intel_engine_cleanup_common(engine);
- lrc_destroy_wa_ctx_obj(engine);
+ lrc_destroy_wa_ctx(engine);
engine->i915 = NULL;
dev_priv->engine[engine->id] = NULL;
kfree(engine);
}
-void intel_execlists_enable_submission(struct drm_i915_private *dev_priv)
+static void execlists_set_default_submission(struct intel_engine_cs *engine)
{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
- for_each_engine(engine, dev_priv, id) {
- engine->submit_request = execlists_submit_request;
- engine->schedule = execlists_schedule;
- }
+ engine->submit_request = execlists_submit_request;
+ engine->schedule = execlists_schedule;
}
static void
@@ -1813,14 +1592,12 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
engine->emit_flush = gen8_emit_flush;
engine->emit_breadcrumb = gen8_emit_breadcrumb;
engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz;
- engine->submit_request = execlists_submit_request;
- engine->schedule = execlists_schedule;
+
+ engine->set_default_submission = execlists_set_default_submission;
engine->irq_enable = gen8_logical_ring_enable_irq;
engine->irq_disable = gen8_logical_ring_disable_irq;
engine->emit_bb_start = gen8_emit_bb_start;
- if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
- engine->irq_seqno_barrier = bxt_a_seqno_barrier;
}
static inline void
@@ -1877,7 +1654,6 @@ logical_ring_setup(struct intel_engine_cs *engine)
tasklet_init(&engine->irq_tasklet,
intel_lrc_irq_handler, (unsigned long)engine);
- logical_ring_init_platform_invariants(engine);
logical_ring_default_vfuncs(engine);
logical_ring_default_irqs(engine);
}
@@ -2015,105 +1791,89 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
return indirect_ctx_offset;
}
-static void execlists_init_reg_state(u32 *reg_state,
+static void execlists_init_reg_state(u32 *regs,
struct i915_gem_context *ctx,
struct intel_engine_cs *engine,
struct intel_ring *ring)
{
struct drm_i915_private *dev_priv = engine->i915;
struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt;
+ u32 base = engine->mmio_base;
+ bool rcs = engine->id == RCS;
+
+ /* A context is actually a big batch buffer with several
+ * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
+ * values we are setting here are only for the first context restore:
+ * on a subsequent save, the GPU will recreate this batchbuffer with new
+ * values (including all the missing MI_LOAD_REGISTER_IMM commands that
+ * we are not initializing here).
+ */
+ regs[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(rcs ? 14 : 11) |
+ MI_LRI_FORCE_POSTED;
+
+ CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(engine),
+ _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
+ CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
+ (HAS_RESOURCE_STREAMER(dev_priv) ?
+ CTX_CTRL_RS_CTX_ENABLE : 0)));
+ CTX_REG(regs, CTX_RING_HEAD, RING_HEAD(base), 0);
+ CTX_REG(regs, CTX_RING_TAIL, RING_TAIL(base), 0);
+ CTX_REG(regs, CTX_RING_BUFFER_START, RING_START(base), 0);
+ CTX_REG(regs, CTX_RING_BUFFER_CONTROL, RING_CTL(base),
+ RING_CTL_SIZE(ring->size) | RING_VALID);
+ CTX_REG(regs, CTX_BB_HEAD_U, RING_BBADDR_UDW(base), 0);
+ CTX_REG(regs, CTX_BB_HEAD_L, RING_BBADDR(base), 0);
+ CTX_REG(regs, CTX_BB_STATE, RING_BBSTATE(base), RING_BB_PPGTT);
+ CTX_REG(regs, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(base), 0);
+ CTX_REG(regs, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(base), 0);
+ CTX_REG(regs, CTX_SECOND_BB_STATE, RING_SBBSTATE(base), 0);
+ if (rcs) {
+ CTX_REG(regs, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(base), 0);
+ CTX_REG(regs, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(base), 0);
+ CTX_REG(regs, CTX_RCS_INDIRECT_CTX_OFFSET,
+ RING_INDIRECT_CTX_OFFSET(base), 0);
- /* A context is actually a big batch buffer with several MI_LOAD_REGISTER_IMM
- * commands followed by (reg, value) pairs. The values we are setting here are
- * only for the first context restore: on a subsequent save, the GPU will
- * recreate this batchbuffer with new values (including all the missing
- * MI_LOAD_REGISTER_IMM commands that we are not initializing here). */
- reg_state[CTX_LRI_HEADER_0] =
- MI_LOAD_REGISTER_IMM(engine->id == RCS ? 14 : 11) | MI_LRI_FORCE_POSTED;
- ASSIGN_CTX_REG(reg_state, CTX_CONTEXT_CONTROL,
- RING_CONTEXT_CONTROL(engine),
- _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
- CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
- (HAS_RESOURCE_STREAMER(dev_priv) ?
- CTX_CTRL_RS_CTX_ENABLE : 0)));
- ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
- 0);
- ASSIGN_CTX_REG(reg_state, CTX_RING_TAIL, RING_TAIL(engine->mmio_base),
- 0);
- ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_START,
- RING_START(engine->mmio_base), 0);
- ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
- RING_CTL(engine->mmio_base),
- RING_CTL_SIZE(ring->size) | RING_VALID);
- ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
- RING_BBADDR_UDW(engine->mmio_base), 0);
- ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
- RING_BBADDR(engine->mmio_base), 0);
- ASSIGN_CTX_REG(reg_state, CTX_BB_STATE,
- RING_BBSTATE(engine->mmio_base),
- RING_BB_PPGTT);
- ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_U,
- RING_SBBADDR_UDW(engine->mmio_base), 0);
- ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_HEAD_L,
- RING_SBBADDR(engine->mmio_base), 0);
- ASSIGN_CTX_REG(reg_state, CTX_SECOND_BB_STATE,
- RING_SBBSTATE(engine->mmio_base), 0);
- if (engine->id == RCS) {
- ASSIGN_CTX_REG(reg_state, CTX_BB_PER_CTX_PTR,
- RING_BB_PER_CTX_PTR(engine->mmio_base), 0);
- ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX,
- RING_INDIRECT_CTX(engine->mmio_base), 0);
- ASSIGN_CTX_REG(reg_state, CTX_RCS_INDIRECT_CTX_OFFSET,
- RING_INDIRECT_CTX_OFFSET(engine->mmio_base), 0);
if (engine->wa_ctx.vma) {
struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
- reg_state[CTX_RCS_INDIRECT_CTX+1] =
- (ggtt_offset + wa_ctx->indirect_ctx.offset * sizeof(uint32_t)) |
- (wa_ctx->indirect_ctx.size / CACHELINE_DWORDS);
+ regs[CTX_RCS_INDIRECT_CTX + 1] =
+ (ggtt_offset + wa_ctx->indirect_ctx.offset) |
+ (wa_ctx->indirect_ctx.size / CACHELINE_BYTES);
- reg_state[CTX_RCS_INDIRECT_CTX_OFFSET+1] =
+ regs[CTX_RCS_INDIRECT_CTX_OFFSET + 1] =
intel_lr_indirect_ctx_offset(engine) << 6;
- reg_state[CTX_BB_PER_CTX_PTR+1] =
- (ggtt_offset + wa_ctx->per_ctx.offset * sizeof(uint32_t)) |
- 0x01;
+ regs[CTX_BB_PER_CTX_PTR + 1] =
+ (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01;
}
}
- reg_state[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
- ASSIGN_CTX_REG(reg_state, CTX_CTX_TIMESTAMP,
- RING_CTX_TIMESTAMP(engine->mmio_base), 0);
+
+ regs[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
+
+ CTX_REG(regs, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(base), 0);
/* PDP values well be assigned later if needed */
- ASSIGN_CTX_REG(reg_state, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3),
- 0);
- ASSIGN_CTX_REG(reg_state, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3),
- 0);
- ASSIGN_CTX_REG(reg_state, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2),
- 0);
- ASSIGN_CTX_REG(reg_state, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2),
- 0);
- ASSIGN_CTX_REG(reg_state, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1),
- 0);
- ASSIGN_CTX_REG(reg_state, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1),
- 0);
- ASSIGN_CTX_REG(reg_state, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0),
- 0);
- ASSIGN_CTX_REG(reg_state, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0),
- 0);
-
- if (ppgtt && USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
+ CTX_REG(regs, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3), 0);
+ CTX_REG(regs, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3), 0);
+ CTX_REG(regs, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2), 0);
+ CTX_REG(regs, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2), 0);
+ CTX_REG(regs, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1), 0);
+ CTX_REG(regs, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1), 0);
+ CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0);
+ CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0);
+
+ if (ppgtt && i915_vm_is_48bit(&ppgtt->base)) {
/* 64b PPGTT (48bit canonical)
* PDP0_DESCRIPTOR contains the base address to PML4 and
* other PDP Descriptors are ignored.
*/
- ASSIGN_CTX_PML4(ppgtt, reg_state);
+ ASSIGN_CTX_PML4(ppgtt, regs);
}
- if (engine->id == RCS) {
- reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
- ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
- make_rpcs(dev_priv));
+ if (rcs) {
+ regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
+ CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
+ make_rpcs(dev_priv));
}
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 0c852c024227..e8015e7bf4e9 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -68,8 +68,6 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
int logical_render_ring_init(struct intel_engine_cs *engine);
int logical_xcs_ring_init(struct intel_engine_cs *engine);
-int intel_engines_init(struct drm_i915_private *dev_priv);
-
/* Logical Ring Contexts */
/* One extra page is added before LRC for GuC as shared data */
@@ -89,7 +87,5 @@ uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
/* Execlists */
int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
int enable_execlists);
-void intel_execlists_enable_submission(struct drm_i915_private *dev_priv);
-bool intel_execlists_idle(struct drm_i915_private *dev_priv);
#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
index c300647ef604..71cbe9c08932 100644
--- a/drivers/gpu/drm/i915/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/intel_lspcon.c
@@ -162,21 +162,8 @@ static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon)
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
unsigned long start = jiffies;
- if (!lspcon->desc_valid)
- return;
-
while (1) {
- struct intel_dp_desc desc;
-
- /*
- * The w/a only applies in PCON mode and we don't expect any
- * AUX errors.
- */
- if (!__intel_dp_read_desc(intel_dp, &desc))
- return;
-
- if (intel_digital_port_connected(dev_priv, dig_port) &&
- !memcmp(&intel_dp->desc, &desc, sizeof(desc))) {
+ if (intel_digital_port_connected(dev_priv, dig_port)) {
DRM_DEBUG_KMS("LSPCON recovering in PCON mode after %u ms\n",
jiffies_to_msecs(jiffies - start));
return;
@@ -253,7 +240,7 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port)
return false;
}
- lspcon->desc_valid = intel_dp_read_desc(dp);
+ intel_dp_read_desc(dp);
DRM_DEBUG_KMS("Success: LSPCON init\n");
return true;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 9ca4dc4d2378..8b942ef2b3ec 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -91,12 +91,11 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
- enum intel_display_power_domain power_domain;
u32 tmp;
bool ret;
- power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv,
+ encoder->power_domain))
return false;
ret = false;
@@ -114,7 +113,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
ret = true;
out:
- intel_display_power_put(dev_priv, power_domain);
+ intel_display_power_put(dev_priv, encoder->power_domain);
return ret;
}
@@ -1066,6 +1065,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_LVDS;
+ intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
intel_encoder->port = PORT_NONE;
intel_encoder->cloneable = 0;
if (HAS_PCH_SPLIT(dev_priv))
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index c787fc4e6eb9..92e461c68385 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -178,7 +178,7 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
{
bool result = false;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_GEN9_BC(dev_priv)) {
table->size = ARRAY_SIZE(skylake_mocs_table);
table->table = skylake_mocs_table;
result = true;
@@ -191,7 +191,7 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
"Platform that should have a MOCS table does not.\n");
}
- /* WaDisableSkipCaching:skl,bxt,kbl */
+ /* WaDisableSkipCaching:skl,bxt,kbl,glk */
if (IS_GEN9(dev_priv)) {
int i;
@@ -276,23 +276,22 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine)
static int emit_mocs_control_table(struct drm_i915_gem_request *req,
const struct drm_i915_mocs_table *table)
{
- struct intel_ring *ring = req->ring;
enum intel_engine_id engine = req->engine->id;
unsigned int index;
- int ret;
+ u32 *cs;
if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
return -ENODEV;
- ret = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
+ *cs++ = MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES);
for (index = 0; index < table->size; index++) {
- intel_ring_emit_reg(ring, mocs_register(engine, index));
- intel_ring_emit(ring, table->table[index].control_value);
+ *cs++ = i915_mmio_reg_offset(mocs_register(engine, index));
+ *cs++ = table->table[index].control_value;
}
/*
@@ -304,12 +303,12 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
* that value to all the used entries.
*/
for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
- intel_ring_emit_reg(ring, mocs_register(engine, index));
- intel_ring_emit(ring, table->table[0].control_value);
+ *cs++ = i915_mmio_reg_offset(mocs_register(engine, index));
+ *cs++ = table->table[0].control_value;
}
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return 0;
}
@@ -336,29 +335,27 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
const struct drm_i915_mocs_table *table)
{
- struct intel_ring *ring = req->ring;
unsigned int i;
- int ret;
+ u32 *cs;
if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
return -ENODEV;
- ret = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring,
- MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
+ *cs++ = MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2);
for (i = 0; i < table->size/2; i++) {
- intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
- intel_ring_emit(ring, l3cc_combine(table, 2*i, 2*i+1));
+ *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
+ *cs++ = l3cc_combine(table, 2 * i, 2 * i + 1);
}
if (table->size & 0x01) {
/* Odd table size - 1 left over */
- intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
- intel_ring_emit(ring, l3cc_combine(table, 2*i, 0));
+ *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
+ *cs++ = l3cc_combine(table, 2 * i, 0);
i++;
}
@@ -368,12 +365,12 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
* they are reserved by the hardware.
*/
for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
- intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
- intel_ring_emit(ring, l3cc_combine(table, 0, 0));
+ *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
+ *cs++ = l3cc_combine(table, 0, 0);
}
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 4a862a358c70..441c01466384 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -434,6 +434,7 @@ int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
{
struct intel_connector *connector;
+ struct drm_connector_list_iter conn_iter;
struct opregion_asle *asle = dev_priv->opregion.asle;
struct drm_device *dev = &dev_priv->drm;
@@ -458,8 +459,10 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
* only one).
*/
DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
- for_each_intel_connector(dev, connector)
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter)
intel_panel_set_backlight_acpi(connector, bclp, 255);
+ drm_connector_list_iter_end(&conn_iter);
asle->cblv = DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID;
drm_modeset_unlock(&dev->mode_config.connection_mutex);
@@ -701,6 +704,7 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->opregion;
struct intel_connector *connector;
+ struct drm_connector_list_iter conn_iter;
int i = 0, max_outputs;
int display_index[16] = {};
@@ -714,7 +718,8 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv)
max_outputs = ARRAY_SIZE(opregion->acpi->didl) +
ARRAY_SIZE(opregion->acpi->did2);
- for_each_intel_connector(&dev_priv->drm, connector) {
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
u32 device_id, type;
device_id = acpi_display_type(connector);
@@ -729,6 +734,7 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv)
set_did(opregion, i, device_id);
i++;
}
+ drm_connector_list_iter_end(&conn_iter);
DRM_DEBUG_KMS("%d outputs detected\n", i);
@@ -745,6 +751,7 @@ static void intel_setup_cadls(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->opregion;
struct intel_connector *connector;
+ struct drm_connector_list_iter conn_iter;
int i = 0;
/*
@@ -757,11 +764,13 @@ static void intel_setup_cadls(struct drm_i915_private *dev_priv)
* Note that internal panels should be at the front of the connector
* list already, ensuring they're not left out.
*/
- for_each_intel_connector(&dev_priv->drm, connector) {
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
if (i >= ARRAY_SIZE(opregion->acpi->cadl))
break;
opregion->acpi->cadl[i++] = connector->acpi_device_id;
}
+ drm_connector_list_iter_end(&conn_iter);
/* If fewer than 8 active devices, the list must be null terminated */
if (i < ARRAY_SIZE(opregion->acpi->cadl))
@@ -1061,16 +1070,5 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
return -ENODEV;
}
- /*
- * FIXME On Dell XPS 13 9350 the OpRegion panel type (0) gives us
- * low vswing for eDP, whereas the VBT panel type (2) gives us normal
- * vswing instead. Low vswing results in some display flickers, so
- * let's simply ignore the OpRegion panel type on SKL for now.
- */
- if (IS_SKYLAKE(dev_priv)) {
- DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1);
- return -ENODEV;
- }
-
return ret - 1;
}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 0608fad7f593..2e0c56ed22bb 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -267,8 +267,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
struct drm_i915_gem_request *req;
- struct intel_ring *ring;
- int ret;
+ u32 *cs;
WARN_ON(overlay->active);
WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
@@ -277,10 +276,10 @@ static int intel_overlay_on(struct intel_overlay *overlay)
if (IS_ERR(req))
return PTR_ERR(req);
- ret = intel_ring_begin(req, 4);
- if (ret) {
- i915_add_request_no_flush(req);
- return ret;
+ cs = intel_ring_begin(req, 4);
+ if (IS_ERR(cs)) {
+ i915_add_request(req);
+ return PTR_ERR(cs);
}
overlay->active = true;
@@ -288,12 +287,11 @@ static int intel_overlay_on(struct intel_overlay *overlay)
if (IS_I830(dev_priv))
i830_overlay_clock_gating(dev_priv, false);
- ring = req->ring;
- intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
- intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_ON;
+ *cs++ = overlay->flip_addr | OFC_UPDATE;
+ *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return intel_overlay_do_wait_request(overlay, req, NULL);
}
@@ -326,10 +324,8 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
{
struct drm_i915_private *dev_priv = overlay->i915;
struct drm_i915_gem_request *req;
- struct intel_ring *ring;
u32 flip_addr = overlay->flip_addr;
- u32 tmp;
- int ret;
+ u32 tmp, *cs;
WARN_ON(!overlay->active);
@@ -345,16 +341,15 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
if (IS_ERR(req))
return PTR_ERR(req);
- ret = intel_ring_begin(req, 2);
- if (ret) {
- i915_add_request_no_flush(req);
- return ret;
+ cs = intel_ring_begin(req, 2);
+ if (IS_ERR(cs)) {
+ i915_add_request(req);
+ return PTR_ERR(cs);
}
- ring = req->ring;
- intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
- intel_ring_emit(ring, flip_addr);
- intel_ring_advance(ring);
+ *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
+ *cs++ = flip_addr;
+ intel_ring_advance(req, cs);
intel_overlay_flip_prepare(overlay, vma);
@@ -408,9 +403,7 @@ static void intel_overlay_off_tail(struct i915_gem_active *active,
static int intel_overlay_off(struct intel_overlay *overlay)
{
struct drm_i915_gem_request *req;
- struct intel_ring *ring;
- u32 flip_addr = overlay->flip_addr;
- int ret;
+ u32 *cs, flip_addr = overlay->flip_addr;
WARN_ON(!overlay->active);
@@ -424,25 +417,23 @@ static int intel_overlay_off(struct intel_overlay *overlay)
if (IS_ERR(req))
return PTR_ERR(req);
- ret = intel_ring_begin(req, 6);
- if (ret) {
- i915_add_request_no_flush(req);
- return ret;
+ cs = intel_ring_begin(req, 6);
+ if (IS_ERR(cs)) {
+ i915_add_request(req);
+ return PTR_ERR(cs);
}
- ring = req->ring;
-
/* wait for overlay to go idle */
- intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
- intel_ring_emit(ring, flip_addr);
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
+ *cs++ = flip_addr;
+ *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
/* turn overlay off */
- intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
- intel_ring_emit(ring, flip_addr);
- intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+ *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_OFF;
+ *cs++ = flip_addr;
+ *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
- intel_ring_advance(ring);
+ intel_ring_advance(req, cs);
intel_overlay_flip_prepare(overlay, NULL);
@@ -465,6 +456,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
+ u32 *cs;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -478,23 +470,20 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
/* synchronous slowpath */
struct drm_i915_gem_request *req;
- struct intel_ring *ring;
req = alloc_request(overlay);
if (IS_ERR(req))
return PTR_ERR(req);
- ret = intel_ring_begin(req, 2);
- if (ret) {
- i915_add_request_no_flush(req);
- return ret;
+ cs = intel_ring_begin(req, 2);
+ if (IS_ERR(cs)) {
+ i915_add_request(req);
+ return PTR_ERR(cs);
}
- ring = req->ring;
- intel_ring_emit(ring,
- MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
ret = intel_overlay_do_wait_request(overlay, req,
intel_overlay_release_old_vid_tail);
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 1a6ff26dea20..cb50c527401f 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -1315,7 +1315,7 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
if (IS_PINEVIEW(dev_priv))
clock = KHz(dev_priv->rawclk_freq);
else
- clock = KHz(dev_priv->cdclk_freq);
+ clock = KHz(dev_priv->cdclk.hw.cdclk);
return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 32);
}
@@ -1333,7 +1333,7 @@ static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
if (IS_G4X(dev_priv))
clock = KHz(dev_priv->rawclk_freq);
else
- clock = KHz(dev_priv->cdclk_freq);
+ clock = KHz(dev_priv->cdclk.hw.cdclk);
return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 128);
}
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c
index 5aa524e32df7..9fd9c70baeed 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.c
@@ -80,7 +80,7 @@ static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
static int pipe_crc_data_count(struct intel_pipe_crc *pipe_crc)
{
- assert_spin_locked(&pipe_crc->lock);
+ lockdep_assert_held(&pipe_crc->lock);
return CIRC_CNT(pipe_crc->head, pipe_crc->tail,
INTEL_PIPE_CRC_ENTRIES_NR);
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 249623d45be0..aece0ff88a5d 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -65,12 +65,12 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(GEN8_CONFIG0,
I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
- /* WaEnableChickenDCPR:skl,bxt,kbl */
+ /* WaEnableChickenDCPR:skl,bxt,kbl,glk */
I915_WRITE(GEN8_CHICKEN_DCPR_1,
I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
/* WaFbcTurnOffFbcWatermark:skl,bxt,kbl */
- /* WaFbcWakeMemOn:skl,bxt,kbl */
+ /* WaFbcWakeMemOn:skl,bxt,kbl,glk */
I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS |
DISP_FBC_MEMORY_WAKE);
@@ -99,9 +99,31 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv)
* Wa: Backlight PWM may stop in the asserted state, causing backlight
* to stay fully on.
*/
- if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
- I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
- PWM1_GATING_DIS | PWM2_GATING_DIS);
+ I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
+ PWM1_GATING_DIS | PWM2_GATING_DIS);
+}
+
+static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
+{
+ gen9_init_clock_gating(dev_priv);
+
+ /*
+ * WaDisablePWMClockGating:glk
+ * Backlight PWM may stop in the asserted state, causing backlight
+ * to stay fully on.
+ */
+ I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
+ PWM1_GATING_DIS | PWM2_GATING_DIS);
+
+ /* WaDDIIOTimeout:glk */
+ if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1)) {
+ u32 val = I915_READ(CHICKEN_MISC_2);
+ val &= ~(GLK_CL0_PWR_DOWN |
+ GLK_CL1_PWR_DOWN |
+ GLK_CL2_PWR_DOWN);
+ I915_WRITE(CHICKEN_MISC_2, val);
+ }
+
}
static void i915_pineview_get_mem_freq(struct drm_i915_private *dev_priv)
@@ -355,6 +377,8 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl
return false;
}
+ trace_intel_memory_cxsr(dev_priv, was_enabled, enable);
+
DRM_DEBUG_KMS("memory self-refresh is %s (was %s)\n",
enableddisabled(enable),
enableddisabled(was_enabled));
@@ -393,15 +417,15 @@ static const int pessimal_latency_ns = 5000;
#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \
((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8))
-static int vlv_get_fifo_size(struct intel_plane *plane)
+static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- int sprite0_start, sprite1_start, size;
-
- if (plane->id == PLANE_CURSOR)
- return 63;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
+ enum pipe pipe = crtc->pipe;
+ int sprite0_start, sprite1_start;
- switch (plane->pipe) {
+ switch (pipe) {
uint32_t dsparb, dsparb2, dsparb3;
case PIPE_A:
dsparb = I915_READ(DSPARB);
@@ -422,26 +446,21 @@ static int vlv_get_fifo_size(struct intel_plane *plane)
sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20);
break;
default:
- return 0;
- }
-
- switch (plane->id) {
- case PLANE_PRIMARY:
- size = sprite0_start;
- break;
- case PLANE_SPRITE0:
- size = sprite1_start - sprite0_start;
- break;
- case PLANE_SPRITE1:
- size = 512 - 1 - sprite1_start;
- break;
- default:
- return 0;
+ MISSING_CASE(pipe);
+ return;
}
- DRM_DEBUG_KMS("%s FIFO size: %d\n", plane->base.name, size);
+ fifo_state->plane[PLANE_PRIMARY] = sprite0_start;
+ fifo_state->plane[PLANE_SPRITE0] = sprite1_start - sprite0_start;
+ fifo_state->plane[PLANE_SPRITE1] = 511 - sprite1_start;
+ fifo_state->plane[PLANE_CURSOR] = 63;
- return size;
+ DRM_DEBUG_KMS("Pipe %c FIFO size: %d/%d/%d/%d\n",
+ pipe_name(pipe),
+ fifo_state->plane[PLANE_PRIMARY],
+ fifo_state->plane[PLANE_SPRITE0],
+ fifo_state->plane[PLANE_SPRITE1],
+ fifo_state->plane[PLANE_CURSOR]);
}
static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, int plane)
@@ -871,6 +890,8 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
enum pipe pipe;
for_each_pipe(dev_priv, pipe) {
+ trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm);
+
I915_WRITE(VLV_DDL(pipe),
(wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) |
(wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) |
@@ -941,12 +962,6 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv,
#undef FW_WM_VLV
-enum vlv_wm_level {
- VLV_WM_LEVEL_PM2,
- VLV_WM_LEVEL_PM5,
- VLV_WM_LEVEL_DDR_DVFS,
-};
-
/* latency must be in 0.1us units. */
static unsigned int vlv_wm_method2(unsigned int pixel_rate,
unsigned int pipe_htotal,
@@ -1017,71 +1032,114 @@ static uint16_t vlv_compute_wm_level(const struct intel_crtc_state *crtc_state,
return min_t(int, wm, USHRT_MAX);
}
-static void vlv_compute_fifo(struct intel_crtc *crtc)
+static bool vlv_need_sprite0_fifo_workaround(unsigned int active_planes)
{
- struct drm_device *dev = crtc->base.dev;
- struct vlv_wm_state *wm_state = &crtc->wm_state;
- struct intel_plane *plane;
- unsigned int total_rate = 0;
- const int fifo_size = 512 - 1;
+ return (active_planes & (BIT(PLANE_SPRITE0) |
+ BIT(PLANE_SPRITE1))) == BIT(PLANE_SPRITE1);
+}
+
+static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ const struct vlv_pipe_wm *raw =
+ &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
+ struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
+ unsigned int active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ int num_active_planes = hweight32(active_planes);
+ const int fifo_size = 511;
int fifo_extra, fifo_left = fifo_size;
+ int sprite0_fifo_extra = 0;
+ unsigned int total_rate;
+ enum plane_id plane_id;
- for_each_intel_plane_on_crtc(dev, crtc, plane) {
- struct intel_plane_state *state =
- to_intel_plane_state(plane->base.state);
+ /*
+ * When enabling sprite0 after sprite1 has already been enabled
+ * we tend to get an underrun unless sprite0 already has some
+ * FIFO space allcoated. Hence we always allocate at least one
+ * cacheline for sprite0 whenever sprite1 is enabled.
+ *
+ * All other plane enable sequences appear immune to this problem.
+ */
+ if (vlv_need_sprite0_fifo_workaround(active_planes))
+ sprite0_fifo_extra = 1;
- if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
- continue;
+ total_rate = raw->plane[PLANE_PRIMARY] +
+ raw->plane[PLANE_SPRITE0] +
+ raw->plane[PLANE_SPRITE1] +
+ sprite0_fifo_extra;
- if (state->base.visible) {
- wm_state->num_active_planes++;
- total_rate += state->base.fb->format->cpp[0];
- }
- }
+ if (total_rate > fifo_size)
+ return -EINVAL;
- for_each_intel_plane_on_crtc(dev, crtc, plane) {
- struct intel_plane_state *state =
- to_intel_plane_state(plane->base.state);
- unsigned int rate;
+ if (total_rate == 0)
+ total_rate = 1;
- if (plane->base.type == DRM_PLANE_TYPE_CURSOR) {
- plane->wm.fifo_size = 63;
- continue;
- }
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ unsigned int rate;
- if (!state->base.visible) {
- plane->wm.fifo_size = 0;
+ if ((active_planes & BIT(plane_id)) == 0) {
+ fifo_state->plane[plane_id] = 0;
continue;
}
- rate = state->base.fb->format->cpp[0];
- plane->wm.fifo_size = fifo_size * rate / total_rate;
- fifo_left -= plane->wm.fifo_size;
+ rate = raw->plane[plane_id];
+ fifo_state->plane[plane_id] = fifo_size * rate / total_rate;
+ fifo_left -= fifo_state->plane[plane_id];
}
- fifo_extra = DIV_ROUND_UP(fifo_left, wm_state->num_active_planes ?: 1);
+ fifo_state->plane[PLANE_SPRITE0] += sprite0_fifo_extra;
+ fifo_left -= sprite0_fifo_extra;
+
+ fifo_state->plane[PLANE_CURSOR] = 63;
+
+ fifo_extra = DIV_ROUND_UP(fifo_left, num_active_planes ?: 1);
/* spread the remainder evenly */
- for_each_intel_plane_on_crtc(dev, crtc, plane) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
int plane_extra;
if (fifo_left == 0)
break;
- if (plane->base.type == DRM_PLANE_TYPE_CURSOR)
- continue;
-
- /* give it all to the first plane if none are active */
- if (plane->wm.fifo_size == 0 &&
- wm_state->num_active_planes)
+ if ((active_planes & BIT(plane_id)) == 0)
continue;
plane_extra = min(fifo_extra, fifo_left);
- plane->wm.fifo_size += plane_extra;
+ fifo_state->plane[plane_id] += plane_extra;
fifo_left -= plane_extra;
}
- WARN_ON(fifo_left != 0);
+ WARN_ON(active_planes != 0 && fifo_left != 0);
+
+ /* give it all to the first plane if none are active */
+ if (active_planes == 0) {
+ WARN_ON(fifo_left != fifo_size);
+ fifo_state->plane[PLANE_PRIMARY] = fifo_left;
+ }
+
+ return 0;
+}
+
+static int vlv_num_wm_levels(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->wm.max_level + 1;
+}
+
+/* mark all levels starting from 'level' as invalid */
+static void vlv_invalidate_wms(struct intel_crtc *crtc,
+ struct vlv_wm_state *wm_state, int level)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ for (; level < vlv_num_wm_levels(dev_priv); level++) {
+ enum plane_id plane_id;
+
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ wm_state->wm[level].plane[plane_id] = USHRT_MAX;
+
+ wm_state->sr[level].cursor = USHRT_MAX;
+ wm_state->sr[level].plane = USHRT_MAX;
+ }
}
static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
@@ -1092,144 +1150,230 @@ static u16 vlv_invert_wm_value(u16 wm, u16 fifo_size)
return fifo_size - wm;
}
-static void vlv_invert_wms(struct intel_crtc *crtc)
+/*
+ * Starting from 'level' set all higher
+ * levels to 'value' in the "raw" watermarks.
+ */
+static bool vlv_raw_plane_wm_set(struct intel_crtc_state *crtc_state,
+ int level, enum plane_id plane_id, u16 value)
{
- struct vlv_wm_state *wm_state = &crtc->wm_state;
- int level;
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ int num_levels = vlv_num_wm_levels(dev_priv);
+ bool dirty = false;
- for (level = 0; level < wm_state->num_levels; level++) {
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const int sr_fifo_size =
- INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
- struct intel_plane *plane;
+ for (; level < num_levels; level++) {
+ struct vlv_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
- wm_state->sr[level].plane =
- vlv_invert_wm_value(wm_state->sr[level].plane,
- sr_fifo_size);
- wm_state->sr[level].cursor =
- vlv_invert_wm_value(wm_state->sr[level].cursor,
- 63);
-
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
- wm_state->wm[level].plane[plane->id] =
- vlv_invert_wm_value(wm_state->wm[level].plane[plane->id],
- plane->wm.fifo_size);
- }
+ dirty |= raw->plane[plane_id] != value;
+ raw->plane[plane_id] = value;
}
+
+ return dirty;
}
-static void vlv_compute_wm(struct intel_crtc *crtc)
+static bool vlv_plane_wm_compute(struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct vlv_wm_state *wm_state = &crtc->wm_state;
- struct intel_plane *plane;
+ struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
+ enum plane_id plane_id = plane->id;
+ int num_levels = vlv_num_wm_levels(to_i915(plane->base.dev));
int level;
+ bool dirty = false;
- memset(wm_state, 0, sizeof(*wm_state));
+ if (!plane_state->base.visible) {
+ dirty |= vlv_raw_plane_wm_set(crtc_state, 0, plane_id, 0);
+ goto out;
+ }
+
+ for (level = 0; level < num_levels; level++) {
+ struct vlv_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
+ int wm = vlv_compute_wm_level(crtc_state, plane_state, level);
+ int max_wm = plane_id == PLANE_CURSOR ? 63 : 511;
+
+ if (wm > max_wm)
+ break;
+
+ dirty |= raw->plane[plane_id] != wm;
+ raw->plane[plane_id] = wm;
+ }
+
+ /* mark all higher levels as invalid */
+ dirty |= vlv_raw_plane_wm_set(crtc_state, level, plane_id, USHRT_MAX);
+
+out:
+ if (dirty)
+ DRM_DEBUG_KMS("%s wms: [0]=%d,[1]=%d,[2]=%d\n",
+ plane->base.name,
+ crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2].plane[plane_id],
+ crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM5].plane[plane_id],
+ crtc_state->wm.vlv.raw[VLV_WM_LEVEL_DDR_DVFS].plane[plane_id]);
+
+ return dirty;
+}
- wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
- wm_state->num_levels = dev_priv->wm.max_level + 1;
+static bool vlv_plane_wm_is_valid(const struct intel_crtc_state *crtc_state,
+ enum plane_id plane_id, int level)
+{
+ const struct vlv_pipe_wm *raw =
+ &crtc_state->wm.vlv.raw[level];
+ const struct vlv_fifo_state *fifo_state =
+ &crtc_state->wm.vlv.fifo_state;
- wm_state->num_active_planes = 0;
+ return raw->plane[plane_id] <= fifo_state->plane[plane_id];
+}
- vlv_compute_fifo(crtc);
+static bool vlv_crtc_wm_is_valid(const struct intel_crtc_state *crtc_state, int level)
+{
+ return vlv_plane_wm_is_valid(crtc_state, PLANE_PRIMARY, level) &&
+ vlv_plane_wm_is_valid(crtc_state, PLANE_SPRITE0, level) &&
+ vlv_plane_wm_is_valid(crtc_state, PLANE_SPRITE1, level) &&
+ vlv_plane_wm_is_valid(crtc_state, PLANE_CURSOR, level);
+}
- if (wm_state->num_active_planes != 1)
- wm_state->cxsr = false;
+static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(crtc_state->base.state);
+ struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
+ const struct vlv_fifo_state *fifo_state =
+ &crtc_state->wm.vlv.fifo_state;
+ int num_active_planes = hweight32(crtc_state->active_planes &
+ ~BIT(PLANE_CURSOR));
+ bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->base);
+ struct intel_plane_state *plane_state;
+ struct intel_plane *plane;
+ enum plane_id plane_id;
+ int level, ret, i;
+ unsigned int dirty = 0;
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
- struct intel_plane_state *state =
+ for_each_intel_plane_in_state(state, plane, plane_state, i) {
+ const struct intel_plane_state *old_plane_state =
to_intel_plane_state(plane->base.state);
- int level;
- if (!state->base.visible)
+ if (plane_state->base.crtc != &crtc->base &&
+ old_plane_state->base.crtc != &crtc->base)
continue;
- /* normal watermarks */
- for (level = 0; level < wm_state->num_levels; level++) {
- int wm = vlv_compute_wm_level(crtc->config, state, level);
- int max_wm = plane->wm.fifo_size;
+ if (vlv_plane_wm_compute(crtc_state, plane_state))
+ dirty |= BIT(plane->id);
+ }
- /* hack */
- if (WARN_ON(level == 0 && wm > max_wm))
- wm = max_wm;
+ /*
+ * DSPARB registers may have been reset due to the
+ * power well being turned off. Make sure we restore
+ * them to a consistent state even if no primary/sprite
+ * planes are initially active.
+ */
+ if (needs_modeset)
+ crtc_state->fifo_changed = true;
- if (wm > max_wm)
- break;
+ if (!dirty)
+ return 0;
- wm_state->wm[level].plane[plane->id] = wm;
- }
+ /* cursor changes don't warrant a FIFO recompute */
+ if (dirty & ~BIT(PLANE_CURSOR)) {
+ const struct intel_crtc_state *old_crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ const struct vlv_fifo_state *old_fifo_state =
+ &old_crtc_state->wm.vlv.fifo_state;
+
+ ret = vlv_compute_fifo(crtc_state);
+ if (ret)
+ return ret;
- wm_state->num_levels = level;
+ if (needs_modeset ||
+ memcmp(old_fifo_state, fifo_state,
+ sizeof(*fifo_state)) != 0)
+ crtc_state->fifo_changed = true;
+ }
- if (!wm_state->cxsr)
- continue;
+ /* initially allow all levels */
+ wm_state->num_levels = vlv_num_wm_levels(dev_priv);
+ /*
+ * Note that enabling cxsr with no primary/sprite planes
+ * enabled can wedge the pipe. Hence we only allow cxsr
+ * with exactly one enabled primary/sprite plane.
+ */
+ wm_state->cxsr = crtc->pipe != PIPE_C && num_active_planes == 1;
- /* maxfifo watermarks */
- if (plane->id == PLANE_CURSOR) {
- for (level = 0; level < wm_state->num_levels; level++)
- wm_state->sr[level].cursor =
- wm_state->wm[level].plane[PLANE_CURSOR];
- } else {
- for (level = 0; level < wm_state->num_levels; level++)
- wm_state->sr[level].plane =
- max(wm_state->sr[level].plane,
- wm_state->wm[level].plane[plane->id]);
+ for (level = 0; level < wm_state->num_levels; level++) {
+ const struct vlv_pipe_wm *raw = &crtc_state->wm.vlv.raw[level];
+ const int sr_fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1;
+
+ if (!vlv_crtc_wm_is_valid(crtc_state, level))
+ break;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ wm_state->wm[level].plane[plane_id] =
+ vlv_invert_wm_value(raw->plane[plane_id],
+ fifo_state->plane[plane_id]);
}
- }
- /* clear any (partially) filled invalid levels */
- for (level = wm_state->num_levels; level < dev_priv->wm.max_level + 1; level++) {
- memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
- memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
+ wm_state->sr[level].plane =
+ vlv_invert_wm_value(max3(raw->plane[PLANE_PRIMARY],
+ raw->plane[PLANE_SPRITE0],
+ raw->plane[PLANE_SPRITE1]),
+ sr_fifo_size);
+
+ wm_state->sr[level].cursor =
+ vlv_invert_wm_value(raw->plane[PLANE_CURSOR],
+ 63);
}
- vlv_invert_wms(crtc);
+ if (level == 0)
+ return -EINVAL;
+
+ /* limit to only levels we can actually handle */
+ wm_state->num_levels = level;
+
+ /* invalidate the higher levels */
+ vlv_invalidate_wms(crtc, wm_state, level);
+
+ return 0;
}
#define VLV_FIFO(plane, value) \
(((value) << DSPARB_ ## plane ## _SHIFT_VLV) & DSPARB_ ## plane ## _MASK_VLV)
-static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)
+static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
+ struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_plane *plane;
- int sprite0_start = 0, sprite1_start = 0, fifo_size = 0;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct vlv_fifo_state *fifo_state =
+ &crtc_state->wm.vlv.fifo_state;
+ int sprite0_start, sprite1_start, fifo_size;
- for_each_intel_plane_on_crtc(dev, crtc, plane) {
- switch (plane->id) {
- case PLANE_PRIMARY:
- sprite0_start = plane->wm.fifo_size;
- break;
- case PLANE_SPRITE0:
- sprite1_start = sprite0_start + plane->wm.fifo_size;
- break;
- case PLANE_SPRITE1:
- fifo_size = sprite1_start + plane->wm.fifo_size;
- break;
- case PLANE_CURSOR:
- WARN_ON(plane->wm.fifo_size != 63);
- break;
- default:
- MISSING_CASE(plane->id);
- break;
- }
- }
+ if (!crtc_state->fifo_changed)
+ return;
- WARN_ON(fifo_size != 512 - 1);
+ sprite0_start = fifo_state->plane[PLANE_PRIMARY];
+ sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
+ fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
- DRM_DEBUG_KMS("Pipe %c FIFO split %d / %d / %d\n",
- pipe_name(crtc->pipe), sprite0_start,
- sprite1_start, fifo_size);
+ WARN_ON(fifo_state->plane[PLANE_CURSOR] != 63);
+ WARN_ON(fifo_size != 511);
- spin_lock(&dev_priv->wm.dsparb_lock);
+ trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
+
+ /*
+ * uncore.lock serves a double purpose here. It allows us to
+ * use the less expensive I915_{READ,WRITE}_FW() functions, and
+ * it protects the DSPARB registers from getting clobbered by
+ * parallel updates from multiple pipes.
+ *
+ * intel_pipe_update_start() has already disabled interrupts
+ * for us, so a plain spin_lock() is sufficient here.
+ */
+ spin_lock(&dev_priv->uncore.lock);
switch (crtc->pipe) {
uint32_t dsparb, dsparb2, dsparb3;
case PIPE_A:
- dsparb = I915_READ(DSPARB);
- dsparb2 = I915_READ(DSPARB2);
+ dsparb = I915_READ_FW(DSPARB);
+ dsparb2 = I915_READ_FW(DSPARB2);
dsparb &= ~(VLV_FIFO(SPRITEA, 0xff) |
VLV_FIFO(SPRITEB, 0xff));
@@ -1241,12 +1385,12 @@ static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)
dsparb2 |= (VLV_FIFO(SPRITEA_HI, sprite0_start >> 8) |
VLV_FIFO(SPRITEB_HI, sprite1_start >> 8));
- I915_WRITE(DSPARB, dsparb);
- I915_WRITE(DSPARB2, dsparb2);
+ I915_WRITE_FW(DSPARB, dsparb);
+ I915_WRITE_FW(DSPARB2, dsparb2);
break;
case PIPE_B:
- dsparb = I915_READ(DSPARB);
- dsparb2 = I915_READ(DSPARB2);
+ dsparb = I915_READ_FW(DSPARB);
+ dsparb2 = I915_READ_FW(DSPARB2);
dsparb &= ~(VLV_FIFO(SPRITEC, 0xff) |
VLV_FIFO(SPRITED, 0xff));
@@ -1258,12 +1402,12 @@ static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)
dsparb2 |= (VLV_FIFO(SPRITEC_HI, sprite0_start >> 8) |
VLV_FIFO(SPRITED_HI, sprite1_start >> 8));
- I915_WRITE(DSPARB, dsparb);
- I915_WRITE(DSPARB2, dsparb2);
+ I915_WRITE_FW(DSPARB, dsparb);
+ I915_WRITE_FW(DSPARB2, dsparb2);
break;
case PIPE_C:
- dsparb3 = I915_READ(DSPARB3);
- dsparb2 = I915_READ(DSPARB2);
+ dsparb3 = I915_READ_FW(DSPARB3);
+ dsparb2 = I915_READ_FW(DSPARB2);
dsparb3 &= ~(VLV_FIFO(SPRITEE, 0xff) |
VLV_FIFO(SPRITEF, 0xff));
@@ -1275,20 +1419,60 @@ static void vlv_pipe_set_fifo_size(struct intel_crtc *crtc)
dsparb2 |= (VLV_FIFO(SPRITEE_HI, sprite0_start >> 8) |
VLV_FIFO(SPRITEF_HI, sprite1_start >> 8));
- I915_WRITE(DSPARB3, dsparb3);
- I915_WRITE(DSPARB2, dsparb2);
+ I915_WRITE_FW(DSPARB3, dsparb3);
+ I915_WRITE_FW(DSPARB2, dsparb2);
break;
default:
break;
}
- POSTING_READ(DSPARB);
+ POSTING_READ_FW(DSPARB);
- spin_unlock(&dev_priv->wm.dsparb_lock);
+ spin_unlock(&dev_priv->uncore.lock);
}
#undef VLV_FIFO
+static int vlv_compute_intermediate_wm(struct drm_device *dev,
+ struct intel_crtc *crtc,
+ struct intel_crtc_state *crtc_state)
+{
+ struct vlv_wm_state *intermediate = &crtc_state->wm.vlv.intermediate;
+ const struct vlv_wm_state *optimal = &crtc_state->wm.vlv.optimal;
+ const struct vlv_wm_state *active = &crtc->wm.active.vlv;
+ int level;
+
+ intermediate->num_levels = min(optimal->num_levels, active->num_levels);
+ intermediate->cxsr = optimal->cxsr && active->cxsr &&
+ !crtc_state->disable_cxsr;
+
+ for (level = 0; level < intermediate->num_levels; level++) {
+ enum plane_id plane_id;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ intermediate->wm[level].plane[plane_id] =
+ min(optimal->wm[level].plane[plane_id],
+ active->wm[level].plane[plane_id]);
+ }
+
+ intermediate->sr[level].plane = min(optimal->sr[level].plane,
+ active->sr[level].plane);
+ intermediate->sr[level].cursor = min(optimal->sr[level].cursor,
+ active->sr[level].cursor);
+ }
+
+ vlv_invalidate_wms(crtc, intermediate, level);
+
+ /*
+ * If our intermediate WM are identical to the final WM, then we can
+ * omit the post-vblank programming; only update if it's different.
+ */
+ if (memcmp(intermediate, optimal, sizeof(*intermediate)) != 0)
+ crtc_state->wm.need_postvbl_update = true;
+
+ return 0;
+}
+
static void vlv_merge_wm(struct drm_i915_private *dev_priv,
struct vlv_wm_values *wm)
{
@@ -1299,7 +1483,7 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
wm->cxsr = true;
for_each_intel_crtc(&dev_priv->drm, crtc) {
- const struct vlv_wm_state *wm_state = &crtc->wm_state;
+ const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
if (!crtc->active)
continue;
@@ -1318,14 +1502,11 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv,
wm->level = VLV_WM_LEVEL_PM2;
for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct vlv_wm_state *wm_state = &crtc->wm_state;
+ const struct vlv_wm_state *wm_state = &crtc->wm.active.vlv;
enum pipe pipe = crtc->pipe;
- if (!crtc->active)
- continue;
-
wm->pipe[pipe] = wm_state->wm[wm->level];
- if (wm->cxsr)
+ if (crtc->active && wm->cxsr)
wm->sr = wm_state->sr[wm->level];
wm->ddl[pipe].plane[PLANE_PRIMARY] = DDL_PRECISION_HIGH | 2;
@@ -1345,22 +1526,15 @@ static bool is_enabling(int old, int new, int threshold)
return old < threshold && new >= threshold;
}
-static void vlv_update_wm(struct intel_crtc *crtc)
+static void vlv_program_watermarks(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
struct vlv_wm_values *old_wm = &dev_priv->wm.vlv;
struct vlv_wm_values new_wm = {};
- vlv_compute_wm(crtc);
vlv_merge_wm(dev_priv, &new_wm);
- if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0) {
- /* FIXME should be part of crtc atomic commit */
- vlv_pipe_set_fifo_size(crtc);
-
+ if (memcmp(old_wm, &new_wm, sizeof(new_wm)) == 0)
return;
- }
if (is_disabling(old_wm->level, new_wm.level, VLV_WM_LEVEL_DDR_DVFS))
chv_set_memory_dvfs(dev_priv, false);
@@ -1371,17 +1545,8 @@ static void vlv_update_wm(struct intel_crtc *crtc)
if (is_disabling(old_wm->cxsr, new_wm.cxsr, true))
_intel_set_memory_cxsr(dev_priv, false);
- /* FIXME should be part of crtc atomic commit */
- vlv_pipe_set_fifo_size(crtc);
-
vlv_write_wm_values(dev_priv, &new_wm);
- DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, "
- "sprite0=%d, sprite1=%d, SR: plane=%d, cursor=%d level=%d cxsr=%d\n",
- pipe_name(pipe), new_wm.pipe[pipe].plane[PLANE_PRIMARY], new_wm.pipe[pipe].plane[PLANE_CURSOR],
- new_wm.pipe[pipe].plane[PLANE_SPRITE0], new_wm.pipe[pipe].plane[PLANE_SPRITE1],
- new_wm.sr.plane, new_wm.sr.cursor, new_wm.level, new_wm.cxsr);
-
if (is_enabling(old_wm->cxsr, new_wm.cxsr, true))
_intel_set_memory_cxsr(dev_priv, true);
@@ -1394,6 +1559,33 @@ static void vlv_update_wm(struct intel_crtc *crtc)
*old_wm = new_wm;
}
+static void vlv_initial_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+
+ mutex_lock(&dev_priv->wm.wm_mutex);
+ crtc->wm.active.vlv = crtc_state->wm.vlv.intermediate;
+ vlv_program_watermarks(dev_priv);
+ mutex_unlock(&dev_priv->wm.wm_mutex);
+}
+
+static void vlv_optimize_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
+
+ if (!crtc_state->wm.need_postvbl_update)
+ return;
+
+ mutex_lock(&dev_priv->wm.wm_mutex);
+ intel_crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
+ vlv_program_watermarks(dev_priv);
+ mutex_unlock(&dev_priv->wm.wm_mutex);
+}
+
#define single_plane_enabled(mask) is_power_of_2(mask)
static void g4x_update_wm(struct intel_crtc *crtc)
@@ -1701,39 +1893,6 @@ static void i845_update_wm(struct intel_crtc *unused_crtc)
I915_WRITE(FW_BLC, fwater_lo);
}
-uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
-{
- uint32_t pixel_rate;
-
- pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
-
- /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
- * adjust the pixel_rate here. */
-
- if (pipe_config->pch_pfit.enabled) {
- uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
- uint32_t pfit_size = pipe_config->pch_pfit.size;
-
- pipe_w = pipe_config->pipe_src_w;
- pipe_h = pipe_config->pipe_src_h;
-
- pfit_w = (pfit_size >> 16) & 0xFFFF;
- pfit_h = pfit_size & 0xFFFF;
- if (pipe_w < pfit_w)
- pipe_w = pfit_w;
- if (pipe_h < pfit_h)
- pipe_h = pfit_h;
-
- if (WARN_ON(!pfit_w || !pfit_h))
- return pixel_rate;
-
- pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
- pfit_w * pfit_h);
- }
-
- return pixel_rate;
-}
-
/* latency must be in 0.1us units. */
static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t cpp, uint32_t latency)
{
@@ -1807,12 +1966,12 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate,
cpp = pstate->base.fb->format->cpp[0];
- method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
+ method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value);
if (!is_lp)
return method1;
- method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
+ method2 = ilk_wm_method2(cstate->pixel_rate,
cstate->base.adjusted_mode.crtc_htotal,
drm_rect_width(&pstate->base.dst),
cpp, mem_value);
@@ -1836,8 +1995,8 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate,
cpp = pstate->base.fb->format->cpp[0];
- method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value);
- method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
+ method1 = ilk_wm_method1(cstate->pixel_rate, cpp, mem_value);
+ method2 = ilk_wm_method2(cstate->pixel_rate,
cstate->base.adjusted_mode.crtc_htotal,
drm_rect_width(&pstate->base.dst),
cpp, mem_value);
@@ -1852,20 +2011,24 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate,
const struct intel_plane_state *pstate,
uint32_t mem_value)
{
+ int cpp;
+
/*
- * We treat the cursor plane as always-on for the purposes of watermark
- * calculation. Until we have two-stage watermark programming merged,
- * this is necessary to avoid flickering.
+ * Treat cursor with fb as always visible since cursor updates
+ * can happen faster than the vrefresh rate, and the current
+ * watermark code doesn't handle that correctly. Cursor updates
+ * which set/clear the fb or change the cursor size are going
+ * to get throttled by intel_legacy_cursor_update() to work
+ * around this problem with the watermark code.
*/
- int cpp = 4;
- int width = pstate->base.visible ? pstate->base.crtc_w : 64;
-
- if (!cstate->base.active)
+ if (!cstate->base.active || !pstate->base.fb)
return 0;
- return ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
+ cpp = pstate->base.fb->format->cpp[0];
+
+ return ilk_wm_method2(cstate->pixel_rate,
cstate->base.adjusted_mode.crtc_htotal,
- width, cpp, mem_value);
+ pstate->base.crtc_w, cpp, mem_value);
}
/* Only for WM_LP. */
@@ -2095,7 +2258,7 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
return 0;
if (WARN_ON(adjusted_mode->crtc_clock == 0))
return 0;
- if (WARN_ON(intel_state->cdclk == 0))
+ if (WARN_ON(intel_state->cdclk.logical.cdclk == 0))
return 0;
/* The WM are computed with base on how long it takes to fill a single
@@ -2104,7 +2267,7 @@ hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
adjusted_mode->crtc_clock);
ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
- intel_state->cdclk);
+ intel_state->cdclk.logical.cdclk);
return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
PIPE_WM_LINETIME_TIME(linetime);
@@ -2173,7 +2336,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
}
/*
- * WaWmMemoryReadLatency:skl
+ * WaWmMemoryReadLatency:skl,glk
*
* punit doesn't take into account the read latency so we need
* to add 2us to the various latency levels we retrieve from the
@@ -2498,8 +2661,8 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
* If our intermediate WM are identical to the final WM, then we can
* omit the post-vblank programming; only update if it's different.
*/
- if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) == 0)
- newstate->wm.need_postvbl_update = false;
+ if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) != 0)
+ newstate->wm.need_postvbl_update = true;
return 0;
}
@@ -2895,8 +3058,7 @@ static bool skl_needs_memory_bw_wa(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- if (IS_SKYLAKE(dev_priv) || IS_BROXTON(dev_priv) ||
- IS_KABYLAKE(dev_priv))
+ if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv))
return true;
return false;
@@ -3547,7 +3709,7 @@ static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cst
* Adjusted plane pixel rate is just the pipe's adjusted pixel rate
* with additional adjustments for plane-specific scaling.
*/
- adjusted_pixel_rate = ilk_pipe_pixel_rate(cstate);
+ adjusted_pixel_rate = cstate->pixel_rate;
downscale_amount = skl_plane_downscale_amount(pstate);
pixel_rate = adjusted_pixel_rate * downscale_amount >> 16;
@@ -3775,7 +3937,7 @@ skl_compute_linetime_wm(struct intel_crtc_state *cstate)
if (!cstate->base.active)
return 0;
- pixel_rate = ilk_pipe_pixel_rate(cstate);
+ pixel_rate = cstate->pixel_rate;
if (WARN_ON(pixel_rate == 0))
return 0;
@@ -3967,7 +4129,7 @@ pipes_modified(struct drm_atomic_state *state)
struct drm_crtc_state *cstate;
uint32_t i, ret = 0;
- for_each_crtc_in_state(state, crtc, cstate, i)
+ for_each_new_crtc_in_state(state, crtc, cstate, i)
ret |= drm_crtc_mask(crtc);
return ret;
@@ -4110,7 +4272,7 @@ skl_print_wm_changes(const struct drm_atomic_state *state)
const struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb;
int i;
- for_each_crtc_in_state(state, crtc, cstate, i) {
+ for_each_new_crtc_in_state(state, crtc, cstate, i) {
const struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum pipe pipe = intel_crtc->pipe;
@@ -4152,7 +4314,7 @@ skl_compute_wm(struct drm_atomic_state *state)
* since any racing commits that want to update them would need to
* hold _all_ CRTC state mutexes.
*/
- for_each_crtc_in_state(state, crtc, cstate, i)
+ for_each_new_crtc_in_state(state, crtc, cstate, i)
changed = true;
if (!changed)
return 0;
@@ -4174,7 +4336,7 @@ skl_compute_wm(struct drm_atomic_state *state)
* should allow skl_update_pipe_wm() to return failure in cases where
* no suitable watermark values can be found.
*/
- for_each_crtc_in_state(state, crtc, cstate, i) {
+ for_each_new_crtc_in_state(state, crtc, cstate, i) {
struct intel_crtc_state *intel_cstate =
to_intel_crtc_state(cstate);
const struct skl_pipe_wm *old_pipe_wm =
@@ -4539,15 +4701,11 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
struct vlv_wm_values *wm = &dev_priv->wm.vlv;
- struct intel_plane *plane;
- enum pipe pipe;
+ struct intel_crtc *crtc;
u32 val;
vlv_read_wm_values(dev_priv, wm);
- for_each_intel_plane(dev, plane)
- plane->wm.fifo_size = vlv_get_fifo_size(plane);
-
wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
wm->level = VLV_WM_LEVEL_PM2;
@@ -4585,18 +4743,107 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
mutex_unlock(&dev_priv->rps.hw_lock);
}
- for_each_pipe(dev_priv, pipe)
+ for_each_intel_crtc(dev, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct vlv_wm_state *active = &crtc->wm.active.vlv;
+ const struct vlv_fifo_state *fifo_state =
+ &crtc_state->wm.vlv.fifo_state;
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
+ int level;
+
+ vlv_get_fifo_size(crtc_state);
+
+ active->num_levels = wm->level + 1;
+ active->cxsr = wm->cxsr;
+
+ for (level = 0; level < active->num_levels; level++) {
+ struct vlv_pipe_wm *raw =
+ &crtc_state->wm.vlv.raw[level];
+
+ active->sr[level].plane = wm->sr.plane;
+ active->sr[level].cursor = wm->sr.cursor;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ active->wm[level].plane[plane_id] =
+ wm->pipe[pipe].plane[plane_id];
+
+ raw->plane[plane_id] =
+ vlv_invert_wm_value(active->wm[level].plane[plane_id],
+ fifo_state->plane[plane_id]);
+ }
+ }
+
+ for_each_plane_id_on_crtc(crtc, plane_id)
+ vlv_raw_plane_wm_set(crtc_state, level,
+ plane_id, USHRT_MAX);
+ vlv_invalidate_wms(crtc, active, level);
+
+ crtc_state->wm.vlv.optimal = *active;
+ crtc_state->wm.vlv.intermediate = *active;
+
DRM_DEBUG_KMS("Initial watermarks: pipe %c, plane=%d, cursor=%d, sprite0=%d, sprite1=%d\n",
pipe_name(pipe),
wm->pipe[pipe].plane[PLANE_PRIMARY],
wm->pipe[pipe].plane[PLANE_CURSOR],
wm->pipe[pipe].plane[PLANE_SPRITE0],
wm->pipe[pipe].plane[PLANE_SPRITE1]);
+ }
DRM_DEBUG_KMS("Initial watermarks: SR plane=%d, SR cursor=%d level=%d cxsr=%d\n",
wm->sr.plane, wm->sr.cursor, wm->level, wm->cxsr);
}
+void vlv_wm_sanitize(struct drm_i915_private *dev_priv)
+{
+ struct intel_plane *plane;
+ struct intel_crtc *crtc;
+
+ mutex_lock(&dev_priv->wm.wm_mutex);
+
+ for_each_intel_plane(&dev_priv->drm, plane) {
+ struct intel_crtc *crtc =
+ intel_get_crtc_for_pipe(dev_priv, plane->pipe);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
+ const struct vlv_fifo_state *fifo_state =
+ &crtc_state->wm.vlv.fifo_state;
+ enum plane_id plane_id = plane->id;
+ int level;
+
+ if (plane_state->base.visible)
+ continue;
+
+ for (level = 0; level < wm_state->num_levels; level++) {
+ struct vlv_pipe_wm *raw =
+ &crtc_state->wm.vlv.raw[level];
+
+ raw->plane[plane_id] = 0;
+
+ wm_state->wm[level].plane[plane_id] =
+ vlv_invert_wm_value(raw->plane[plane_id],
+ fifo_state->plane[plane_id]);
+ }
+ }
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ crtc_state->wm.vlv.intermediate =
+ crtc_state->wm.vlv.optimal;
+ crtc->wm.active.vlv = crtc_state->wm.vlv.optimal;
+ }
+
+ vlv_program_watermarks(dev_priv);
+
+ mutex_unlock(&dev_priv->wm.wm_mutex);
+}
+
void ilk_wm_get_hw_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -4680,7 +4927,7 @@ bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
{
u16 rgvswctl;
- assert_spin_locked(&mchdev_lock);
+ lockdep_assert_held(&mchdev_lock);
rgvswctl = I915_READ16(MEMSWCTL);
if (rgvswctl & MEMCTL_CMD_STS) {
@@ -4891,6 +5138,12 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
break;
}
+ /* When byt can survive without system hang with dynamic
+ * sw freq adjustments, this restriction can be lifted.
+ */
+ if (IS_VALLEYVIEW(dev_priv))
+ goto skip_hw_write;
+
I915_WRITE(GEN6_RP_UP_EI,
GT_INTERVAL_FROM_US(dev_priv, ei_up));
I915_WRITE(GEN6_RP_UP_THRESHOLD,
@@ -4911,6 +5164,7 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_AVG);
+skip_hw_write:
dev_priv->rps.power = new_power;
dev_priv->rps.up_threshold = threshold_up;
dev_priv->rps.down_threshold = threshold_down;
@@ -4921,8 +5175,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
{
u32 mask = 0;
+ /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
if (val > dev_priv->rps.min_freq_softlimit)
- mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
+ mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
if (val < dev_priv->rps.max_freq_softlimit)
mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
@@ -4934,16 +5189,8 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
/* gen6_set_rps is called to update the frequency request, but should also be
* called when the range (min_delay and max_delay) is modified so that we can
* update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
-static void gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
+static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
{
- /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
- return;
-
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
- WARN_ON(val > dev_priv->rps.max_freq);
- WARN_ON(val < dev_priv->rps.min_freq);
-
/* min/max delay may still have been modified so be sure to
* write the limits value.
*/
@@ -4969,17 +5216,15 @@ static void gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val));
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
- POSTING_READ(GEN6_RPNSWREQ);
-
dev_priv->rps.cur_freq = val;
trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
+
+ return 0;
}
-static void valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
+static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
{
- WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
- WARN_ON(val > dev_priv->rps.max_freq);
- WARN_ON(val < dev_priv->rps.min_freq);
+ int err;
if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1),
"Odd GPU freq value\n"))
@@ -4988,13 +5233,17 @@ static void valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
if (val != dev_priv->rps.cur_freq) {
- vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
- if (!IS_CHERRYVIEW(dev_priv))
- gen6_set_rps_thresholds(dev_priv, val);
+ err = vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
+ if (err)
+ return err;
+
+ gen6_set_rps_thresholds(dev_priv, val);
}
dev_priv->rps.cur_freq = val;
trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
+
+ return 0;
}
/* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down
@@ -5007,6 +5256,7 @@ static void valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
{
u32 val = dev_priv->rps.idle_freq;
+ int err;
if (dev_priv->rps.cur_freq <= val)
return;
@@ -5024,26 +5274,37 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
* power than the render powerwell.
*/
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
- valleyview_set_rps(dev_priv, val);
+ err = valleyview_set_rps(dev_priv, val);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
+
+ if (err)
+ DRM_ERROR("Failed to set RPS for idle\n");
}
void gen6_rps_busy(struct drm_i915_private *dev_priv)
{
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
- if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
+ u8 freq;
+
+ if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
gen6_rps_reset_ei(dev_priv);
I915_WRITE(GEN6_PMINTRMSK,
gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
gen6_enable_rps_interrupts(dev_priv);
- /* Ensure we start at the user's desired frequency */
- intel_set_rps(dev_priv,
- clamp(dev_priv->rps.cur_freq,
- dev_priv->rps.min_freq_softlimit,
- dev_priv->rps.max_freq_softlimit));
+ /* Use the user's desired frequency as a guide, but for better
+ * performance, jump directly to RPe as our starting frequency.
+ */
+ freq = max(dev_priv->rps.cur_freq,
+ dev_priv->rps.efficient_freq);
+
+ if (intel_set_rps(dev_priv,
+ clamp(freq,
+ dev_priv->rps.min_freq_softlimit,
+ dev_priv->rps.max_freq_softlimit)))
+ DRM_DEBUG_DRIVER("Failed to set idle frequency\n");
}
mutex_unlock(&dev_priv->rps.hw_lock);
}
@@ -5111,12 +5372,25 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
spin_unlock(&dev_priv->rps.client_lock);
}
-void intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
+int intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
{
+ int err;
+
+ lockdep_assert_held(&dev_priv->rps.hw_lock);
+ GEM_BUG_ON(val > dev_priv->rps.max_freq);
+ GEM_BUG_ON(val < dev_priv->rps.min_freq);
+
+ if (!dev_priv->rps.enabled) {
+ dev_priv->rps.cur_freq = val;
+ return 0;
+ }
+
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- valleyview_set_rps(dev_priv, val);
+ err = valleyview_set_rps(dev_priv, val);
else
- gen6_set_rps(dev_priv, val);
+ err = gen6_set_rps(dev_priv, val);
+
+ return err;
}
static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
@@ -5294,7 +5568,7 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
- IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ IS_GEN9_BC(dev_priv)) {
u32 ddcc_status = 0;
if (sandybridge_pcode_read(dev_priv,
@@ -5307,7 +5581,7 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
dev_priv->rps.max_freq);
}
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_GEN9_BC(dev_priv)) {
/* Store the frequency values in 16.66 MHZ units, which is
* the natural hardware unit for SKL
*/
@@ -5320,7 +5594,7 @@ static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
}
static void reset_rps(struct drm_i915_private *dev_priv,
- void (*set)(struct drm_i915_private *, u8))
+ int (*set)(struct drm_i915_private *, u8))
{
u8 freq = dev_priv->rps.cur_freq;
@@ -5328,7 +5602,8 @@ static void reset_rps(struct drm_i915_private *dev_priv,
dev_priv->rps.power = -1;
dev_priv->rps.cur_freq = -1;
- set(dev_priv, freq);
+ if (set(dev_priv, freq))
+ DRM_ERROR("Failed to reset RPS to initial values\n");
}
/* See the Gen9_GT_PM_Programming_Guide doc for the below */
@@ -5336,22 +5611,6 @@ static void gen9_enable_rps(struct drm_i915_private *dev_priv)
{
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
- /*
- * BIOS could leave the Hw Turbo enabled, so need to explicitly
- * clear out the Control register just to avoid inconsitency
- * with debugfs interface, which will show Turbo as enabled
- * only and that is not expected by the User after adding the
- * WaGsvDisableTurbo. Apart from this there is no problem even
- * if the Turbo is left enabled in the Control register, as the
- * Up/Down interrupts would remain masked.
- */
- gen9_disable_rps(dev_priv);
- intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
- return;
- }
-
/* Program defaults and thresholds for RPS*/
I915_WRITE(GEN6_RC_VIDEO_FREQ,
GEN9_FREQUENCY(dev_priv->rps.rp1_freq));
@@ -5411,18 +5670,9 @@ static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
if (intel_enable_rc6() & INTEL_RC6_ENABLE)
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
- /* WaRsUseTimeoutMode:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
- I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
- I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
- GEN7_RC_CTL_TO_MODE |
- rc6_mask);
- } else {
- I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
- I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
- GEN6_RC_CTL_EI_MODE(1) |
- rc6_mask);
- }
+ I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
+ I915_WRITE(GEN6_RC_CONTROL,
+ GEN6_RC_CTL_HW_ENABLE | GEN6_RC_CTL_EI_MODE(1) | rc6_mask);
/*
* 3b: Enable Coarse Power Gating only when RC6 is enabled.
@@ -5637,7 +5887,7 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
/* convert DDR frequency from units of 266.6MHz to bandwidth */
min_ring_freq = mult_frac(min_ring_freq, 8, 3);
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_GEN9_BC(dev_priv)) {
/* Convert GT frequency to 50 HZ units */
min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
@@ -5655,7 +5905,7 @@ static void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
int diff = max_gpu_freq - gpu_freq;
unsigned int ia_freq = 0, ring_freq = 0;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_GEN9_BC(dev_priv)) {
/*
* ring_freq = 2 * GT. ring_freq is in 100MHz units
* No floor required for ring frequency on SKL.
@@ -5739,6 +5989,17 @@ static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
return rp1;
}
+static u32 cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
+{
+ u32 val, rpn;
+
+ val = vlv_punit_read(dev_priv, FB_GFX_FMIN_AT_VMIN_FUSE);
+ rpn = ((val >> FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT) &
+ FB_GFX_FREQ_FUSE_MASK);
+
+ return rpn;
+}
+
static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
{
u32 val, rp1;
@@ -5975,8 +6236,7 @@ static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
dev_priv->rps.rp1_freq);
- /* PUnit validated range is only [RPe, RP0] */
- dev_priv->rps.min_freq = dev_priv->rps.efficient_freq;
+ dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
intel_gpu_freq(dev_priv, dev_priv->rps.min_freq),
dev_priv->rps.min_freq);
@@ -6132,7 +6392,8 @@ static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
/* allows RC6 residency counter to work */
I915_WRITE(VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
+ VLV_MEDIA_RC0_COUNT_EN |
VLV_RENDER_RC0_COUNT_EN |
VLV_MEDIA_RC6_COUNT_EN |
VLV_RENDER_RC6_COUNT_EN));
@@ -6199,7 +6460,7 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
unsigned long now = jiffies_to_msecs(jiffies), diff1;
int i;
- assert_spin_locked(&mchdev_lock);
+ lockdep_assert_held(&mchdev_lock);
diff1 = now - dev_priv->ips.last_time1;
@@ -6304,7 +6565,7 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
u64 now, diff, diffms;
u32 count;
- assert_spin_locked(&mchdev_lock);
+ lockdep_assert_held(&mchdev_lock);
now = ktime_get_raw_ns();
diffms = now - dev_priv->ips.last_time2;
@@ -6349,7 +6610,7 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
unsigned long t, corr, state1, corr2, state2;
u32 pxvid, ext_v;
- assert_spin_locked(&mchdev_lock);
+ lockdep_assert_held(&mchdev_lock);
pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq));
pxvid = (pxvid >> 24) & 0x7f;
@@ -6775,7 +7036,7 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
} else if (INTEL_GEN(dev_priv) >= 9) {
gen9_enable_rc6(dev_priv);
gen9_enable_rps(dev_priv);
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ if (IS_GEN9_BC(dev_priv))
gen6_update_ring_freq(dev_priv);
} else if (IS_BROADWELL(dev_priv)) {
gen8_enable_rps(dev_priv);
@@ -6825,7 +7086,7 @@ static void __intel_autoenable_gt_powersave(struct work_struct *work)
rcs->init_context(req);
/* Mark the device busy, calling intel_enable_gt_powersave() */
- i915_add_request_no_flush(req);
+ i915_add_request(req);
unlock:
mutex_unlock(&dev_priv->drm.struct_mutex);
@@ -7260,6 +7521,14 @@ static void broadwell_init_clock_gating(struct drm_i915_private *dev_priv)
| KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
lpt_init_clock_gating(dev_priv);
+
+ /* WaDisableDopClockGating:bdw
+ *
+ * Also see the CHICKEN2 write in bdw_init_workarounds() to disable DOP
+ * clock gating.
+ */
+ I915_WRITE(GEN6_UCGCTL1,
+ I915_READ(GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
}
static void haswell_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -7656,8 +7925,10 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
dev_priv->display.init_clock_gating = skylake_init_clock_gating;
else if (IS_KABYLAKE(dev_priv))
dev_priv->display.init_clock_gating = kabylake_init_clock_gating;
- else if (IS_GEN9_LP(dev_priv))
+ else if (IS_BROXTON(dev_priv))
dev_priv->display.init_clock_gating = bxt_init_clock_gating;
+ else if (IS_GEMINILAKE(dev_priv))
+ dev_priv->display.init_clock_gating = glk_init_clock_gating;
else if (IS_BROADWELL(dev_priv))
dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
else if (IS_CHERRYVIEW(dev_priv))
@@ -7727,7 +7998,11 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
}
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
vlv_setup_wm_latency(dev_priv);
- dev_priv->display.update_wm = vlv_update_wm;
+ dev_priv->display.compute_pipe_wm = vlv_compute_pipe_wm;
+ dev_priv->display.compute_intermediate_wm = vlv_compute_intermediate_wm;
+ dev_priv->display.initial_watermarks = vlv_initial_watermarks;
+ dev_priv->display.optimize_watermarks = vlv_optimize_watermarks;
+ dev_priv->display.atomic_update_watermarks = vlv_atomic_update_fifo;
} else if (IS_PINEVIEW(dev_priv)) {
if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev_priv),
dev_priv->is_ddr3,
@@ -7916,10 +8191,10 @@ static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
* @timeout_base_ms: timeout for polling with preemption enabled
*
* Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
- * reports an error or an overall timeout of @timeout_base_ms+10 ms expires.
+ * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
* The request is acknowledged once the PCODE reply dword equals @reply after
* applying @reply_mask. Polling is first attempted with preemption enabled
- * for @timeout_base_ms and if this times out for another 10 ms with
+ * for @timeout_base_ms and if this times out for another 50 ms with
* preemption disabled.
*
* Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
@@ -7955,14 +8230,15 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
* worst case) _and_ PCODE was busy for some reason even after a
* (queued) request and @timeout_base_ms delay. As a workaround retry
* the poll with preemption disabled to maximize the number of
- * requests. Increase the timeout from @timeout_base_ms to 10ms to
+ * requests. Increase the timeout from @timeout_base_ms to 50ms to
* account for interrupts that could reduce the number of these
- * requests.
+ * requests, and for any quirks of the PCODE firmware that delays
+ * the request completion.
*/
DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
WARN_ON_ONCE(timeout_base_ms > 3);
preempt_disable();
- ret = wait_for_atomic(COND, 10);
+ ret = wait_for_atomic(COND, 50);
preempt_enable();
out:
@@ -8074,3 +8350,79 @@ void intel_pm_setup(struct drm_i915_private *dev_priv)
dev_priv->pm.suspended = false;
atomic_set(&dev_priv->pm.wakeref_count, 0);
}
+
+static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
+ const i915_reg_t reg)
+{
+ u32 lower, upper, tmp;
+
+ /* The register accessed do not need forcewake. We borrow
+ * uncore lock to prevent concurrent access to range reg.
+ */
+ spin_lock_irq(&dev_priv->uncore.lock);
+
+ /* vlv and chv residency counters are 40 bits in width.
+ * With a control bit, we can choose between upper or lower
+ * 32bit window into this counter.
+ *
+ * Although we always use the counter in high-range mode elsewhere,
+ * userspace may attempt to read the value before rc6 is initialised,
+ * before we have set the default VLV_COUNTER_CONTROL value. So always
+ * set the high bit to be safe.
+ */
+ I915_WRITE_FW(VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
+ upper = I915_READ_FW(reg);
+ do {
+ tmp = upper;
+
+ I915_WRITE_FW(VLV_COUNTER_CONTROL,
+ _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH));
+ lower = I915_READ_FW(reg);
+
+ I915_WRITE_FW(VLV_COUNTER_CONTROL,
+ _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
+ upper = I915_READ_FW(reg);
+ } while (upper != tmp);
+
+ /* Everywhere else we always use VLV_COUNTER_CONTROL with the
+ * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set
+ * now.
+ */
+
+ spin_unlock_irq(&dev_priv->uncore.lock);
+
+ return lower | (u64)upper << 8;
+}
+
+u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
+ const i915_reg_t reg)
+{
+ u64 time_hw, units, div;
+
+ if (!intel_enable_rc6())
+ return 0;
+
+ intel_runtime_pm_get(dev_priv);
+
+ /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ units = 1000;
+ div = dev_priv->czclk_freq;
+
+ time_hw = vlv_residency_raw(dev_priv, reg);
+ } else if (IS_GEN9_LP(dev_priv)) {
+ units = 1000;
+ div = 1200; /* 833.33ns */
+
+ time_hw = I915_READ(reg);
+ } else {
+ units = 128000; /* 1.28us */
+ div = 100000;
+
+ time_hw = I915_READ(reg);
+ }
+
+ intel_runtime_pm_put(dev_priv);
+ return DIV_ROUND_UP_ULL(time_hw * units, div);
+}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 91bc4abf5d3e..d9b8d17c3fc6 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -39,7 +39,7 @@
*/
#define LEGACY_REQUEST_SIZE 200
-int __intel_ring_space(int head, int tail, int size)
+static int __intel_ring_space(int head, int tail, int size)
{
int space = head - tail;
if (space <= 0)
@@ -61,22 +61,20 @@ void intel_ring_update_space(struct intel_ring *ring)
static int
gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
- struct intel_ring *ring = req->ring;
- u32 cmd;
- int ret;
+ u32 cmd, *cs;
cmd = MI_FLUSH;
if (mode & EMIT_INVALIDATE)
cmd |= MI_READ_FLUSH;
- ret = intel_ring_begin(req, 2);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = cmd;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return 0;
}
@@ -84,9 +82,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
static int
gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
- struct intel_ring *ring = req->ring;
- u32 cmd;
- int ret;
+ u32 cmd, *cs;
/*
* read/write caches:
@@ -123,13 +119,13 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
cmd |= MI_INVALIDATE_ISP;
}
- ret = intel_ring_begin(req, 2);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = cmd;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return 0;
}
@@ -174,35 +170,33 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
static int
intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
{
- struct intel_ring *ring = req->ring;
u32 scratch_addr =
i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
- int ret;
-
- ret = intel_ring_begin(req, 6);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
- intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_STALL_AT_SCOREBOARD);
- intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, 0); /* low dword */
- intel_ring_emit(ring, 0); /* high dword */
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
-
- ret = intel_ring_begin(req, 6);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
- intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
- intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ u32 *cs;
+
+ cs = intel_ring_begin(req, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = GFX_OP_PIPE_CONTROL(5);
+ *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
+ *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0; /* low dword */
+ *cs++ = 0; /* high dword */
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
+
+ cs = intel_ring_begin(req, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = GFX_OP_PIPE_CONTROL(5);
+ *cs++ = PIPE_CONTROL_QW_WRITE;
+ *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return 0;
}
@@ -210,10 +204,9 @@ intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
static int
gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
- struct intel_ring *ring = req->ring;
u32 scratch_addr =
i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
- u32 flags = 0;
+ u32 *cs, flags = 0;
int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -247,15 +240,15 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
}
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
- intel_ring_emit(ring, flags);
- intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = flags;
+ *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0;
+ intel_ring_advance(req, cs);
return 0;
}
@@ -263,20 +256,17 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
static int
gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
{
- struct intel_ring *ring = req->ring;
- int ret;
+ u32 *cs;
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
- intel_ring_emit(ring,
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_STALL_AT_SCOREBOARD);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
+ *cs++ = 0;
+ *cs++ = 0;
+ intel_ring_advance(req, cs);
return 0;
}
@@ -284,11 +274,9 @@ gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
static int
gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
- struct intel_ring *ring = req->ring;
u32 scratch_addr =
i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
- u32 flags = 0;
- int ret;
+ u32 *cs, flags = 0;
/*
* Ensure that any following seqno writes only happen when the render
@@ -332,37 +320,15 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
gen7_render_ring_cs_stall_wa(req);
}
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
- intel_ring_emit(ring, flags);
- intel_ring_emit(ring, scratch_addr);
- intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
-
- return 0;
-}
-
-static int
-gen8_emit_pipe_control(struct drm_i915_gem_request *req,
- u32 flags, u32 scratch_addr)
-{
- struct intel_ring *ring = req->ring;
- int ret;
-
- ret = intel_ring_begin(req, 6);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
- intel_ring_emit(ring, flags);
- intel_ring_emit(ring, scratch_addr);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = flags;
+ *cs++ = scratch_addr;
+ *cs++ = 0;
+ intel_ring_advance(req, cs);
return 0;
}
@@ -370,12 +336,14 @@ gen8_emit_pipe_control(struct drm_i915_gem_request *req,
static int
gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
- u32 scratch_addr =
- i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
- u32 flags = 0;
- int ret;
+ u32 flags;
+ u32 *cs;
- flags |= PIPE_CONTROL_CS_STALL;
+ cs = intel_ring_begin(req, mode & EMIT_INVALIDATE ? 12 : 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ flags = PIPE_CONTROL_CS_STALL;
if (mode & EMIT_FLUSH) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
@@ -394,15 +362,19 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
/* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
- ret = gen8_emit_pipe_control(req,
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_STALL_AT_SCOREBOARD,
- 0);
- if (ret)
- return ret;
+ cs = gen8_emit_pipe_control(cs,
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_STALL_AT_SCOREBOARD,
+ 0);
}
- return gen8_emit_pipe_control(req, flags, scratch_addr);
+ cs = gen8_emit_pipe_control(cs, flags,
+ i915_ggtt_offset(req->engine->scratch) +
+ 2 * CACHELINE_BYTES);
+
+ intel_ring_advance(req, cs);
+
+ return 0;
}
static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
@@ -657,41 +629,6 @@ static void reset_ring_common(struct intel_engine_cs *engine,
}
}
-static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
-{
- struct intel_ring *ring = req->ring;
- struct i915_workarounds *w = &req->i915->workarounds;
- int ret, i;
-
- if (w->count == 0)
- return 0;
-
- ret = req->engine->emit_flush(req, EMIT_BARRIER);
- if (ret)
- return ret;
-
- ret = intel_ring_begin(req, (w->count * 2 + 2));
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
- for (i = 0; i < w->count; i++) {
- intel_ring_emit_reg(ring, w->reg[i].addr);
- intel_ring_emit(ring, w->reg[i].value);
- }
- intel_ring_emit(ring, MI_NOOP);
-
- intel_ring_advance(ring);
-
- ret = req->engine->emit_flush(req, EMIT_BARRIER);
- if (ret)
- return ret;
-
- DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count);
-
- return 0;
-}
-
static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
{
int ret;
@@ -707,498 +644,6 @@ static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
return 0;
}
-static int wa_add(struct drm_i915_private *dev_priv,
- i915_reg_t addr,
- const u32 mask, const u32 val)
-{
- const u32 idx = dev_priv->workarounds.count;
-
- if (WARN_ON(idx >= I915_MAX_WA_REGS))
- return -ENOSPC;
-
- dev_priv->workarounds.reg[idx].addr = addr;
- dev_priv->workarounds.reg[idx].value = val;
- dev_priv->workarounds.reg[idx].mask = mask;
-
- dev_priv->workarounds.count++;
-
- return 0;
-}
-
-#define WA_REG(addr, mask, val) do { \
- const int r = wa_add(dev_priv, (addr), (mask), (val)); \
- if (r) \
- return r; \
- } while (0)
-
-#define WA_SET_BIT_MASKED(addr, mask) \
- WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
-
-#define WA_CLR_BIT_MASKED(addr, mask) \
- WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
-
-#define WA_SET_FIELD_MASKED(addr, mask, value) \
- WA_REG(addr, mask, _MASKED_FIELD(mask, value))
-
-#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
-#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
-
-#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
-
-static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
- i915_reg_t reg)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- struct i915_workarounds *wa = &dev_priv->workarounds;
- const uint32_t index = wa->hw_whitelist_count[engine->id];
-
- if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
- return -EINVAL;
-
- WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
- i915_mmio_reg_offset(reg));
- wa->hw_whitelist_count[engine->id]++;
-
- return 0;
-}
-
-static int gen8_init_workarounds(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
-
- /* WaDisableAsyncFlipPerfMode:bdw,chv */
- WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
-
- /* WaDisablePartialInstShootdown:bdw,chv */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
- PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
-
- /* Use Force Non-Coherent whenever executing a 3D context. This is a
- * workaround for for a possible hang in the unlikely event a TLB
- * invalidation occurs during a PSD flush.
- */
- /* WaForceEnableNonCoherent:bdw,chv */
- /* WaHdcDisableFetchWhenMasked:bdw,chv */
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_DONOT_FETCH_MEM_WHEN_MASKED |
- HDC_FORCE_NON_COHERENT);
-
- /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
- * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
- * polygons in the same 8x4 pixel/sample area to be processed without
- * stalling waiting for the earlier ones to write to Hierarchical Z
- * buffer."
- *
- * This optimization is off by default for BDW and CHV; turn it on.
- */
- WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
-
- /* Wa4x4STCOptimizationDisable:bdw,chv */
- WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
-
- /*
- * BSpec recommends 8x4 when MSAA is used,
- * however in practice 16x4 seems fastest.
- *
- * Note that PS/WM thread counts depend on the WIZ hashing
- * disable bit, which we don't touch here, but it's good
- * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
- */
- WA_SET_FIELD_MASKED(GEN7_GT_MODE,
- GEN6_WIZ_HASHING_MASK,
- GEN6_WIZ_HASHING_16x4);
-
- return 0;
-}
-
-static int bdw_init_workarounds(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- int ret;
-
- ret = gen8_init_workarounds(engine);
- if (ret)
- return ret;
-
- /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
-
- /* WaDisableDopClockGating:bdw */
- WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
- DOP_CLOCK_GATING_DISABLE);
-
- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
- GEN8_SAMPLER_POWER_BYPASS_DIS);
-
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- /* WaForceContextSaveRestoreNonCoherent:bdw */
- HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
- /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
- (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
-
- return 0;
-}
-
-static int chv_init_workarounds(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- int ret;
-
- ret = gen8_init_workarounds(engine);
- if (ret)
- return ret;
-
- /* WaDisableThreadStallDopClockGating:chv */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
-
- /* Improve HiZ throughput on CHV. */
- WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
-
- return 0;
-}
-
-static int gen9_init_workarounds(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- int ret;
-
- /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */
- I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
-
- /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */
- I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
- GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
-
- /* WaDisableKillLogic:bxt,skl,kbl */
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
- ECOCHK_DIS_TLB);
-
- /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */
- /* WaDisablePartialInstShootdown:skl,bxt,kbl */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
- FLOW_CONTROL_ENABLE |
- PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
-
- /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
- GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
-
- /* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
- WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
- GEN9_DG_MIRROR_FIX_ENABLE);
-
- /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
- WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
- GEN9_RHWO_OPTIMIZATION_DISABLE);
- /*
- * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
- * but we do that in per ctx batchbuffer as there is an issue
- * with this register not getting restored on ctx restore
- */
- }
-
- /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
- WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
- GEN9_ENABLE_GPGPU_PREEMPTION);
-
- /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */
- /* WaDisablePartialResolveInVc:skl,bxt,kbl */
- WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
- GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
-
- /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */
- WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
- GEN9_CCS_TLB_PREFETCH_ENABLE);
-
- /* WaDisableMaskBasedCammingInRCC:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
- WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
- PIXEL_MASK_CAMMING_DISABLE);
-
- /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
- HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
-
- /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
- * both tied to WaForceContextSaveRestoreNonCoherent
- * in some hsds for skl. We keep the tie for all gen9. The
- * documentation is a bit hazy and so we want to get common behaviour,
- * even though there is no clear evidence we would need both on kbl/bxt.
- * This area has been source of system hangs so we play it safe
- * and mimic the skl regardless of what bspec says.
- *
- * Use Force Non-Coherent whenever executing a 3D context. This
- * is a workaround for a possible hang in the unlikely event
- * a TLB invalidation occurs during a PSD flush.
- */
-
- /* WaForceEnableNonCoherent:skl,bxt,kbl */
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FORCE_NON_COHERENT);
-
- /* WaDisableHDCInvalidation:skl,bxt,kbl */
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
- BDW_DISABLE_HDC_INVALIDATION);
-
- /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
- if (IS_SKYLAKE(dev_priv) ||
- IS_KABYLAKE(dev_priv) ||
- IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
- GEN8_SAMPLER_POWER_BYPASS_DIS);
-
- /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */
- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
-
- /* WaOCLCoherentLineFlush:skl,bxt,kbl */
- I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
- GEN8_LQSC_FLUSH_COHERENT_LINES));
-
- /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */
- ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
- if (ret)
- return ret;
-
- /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */
- ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
- if (ret)
- return ret;
-
- /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */
- ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- u8 vals[3] = { 0, 0, 0 };
- unsigned int i;
-
- for (i = 0; i < 3; i++) {
- u8 ss;
-
- /*
- * Only consider slices where one, and only one, subslice has 7
- * EUs
- */
- if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
- continue;
-
- /*
- * subslice_7eu[i] != 0 (because of the check above) and
- * ss_max == 4 (maximum number of subslices possible per slice)
- *
- * -> 0 <= ss <= 3;
- */
- ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
- vals[i] = 3 - ss;
- }
-
- if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
- return 0;
-
- /* Tune IZ hashing. See intel_device_info_runtime_init() */
- WA_SET_FIELD_MASKED(GEN7_GT_MODE,
- GEN9_IZ_HASHING_MASK(2) |
- GEN9_IZ_HASHING_MASK(1) |
- GEN9_IZ_HASHING_MASK(0),
- GEN9_IZ_HASHING(2, vals[2]) |
- GEN9_IZ_HASHING(1, vals[1]) |
- GEN9_IZ_HASHING(0, vals[0]));
-
- return 0;
-}
-
-static int skl_init_workarounds(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- int ret;
-
- ret = gen9_init_workarounds(engine);
- if (ret)
- return ret;
-
- /*
- * Actual WA is to disable percontext preemption granularity control
- * until D0 which is the default case so this is equivalent to
- * !WaDisablePerCtxtPreemptionGranularityControl:skl
- */
- I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
- _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
-
- /* WaEnableGapsTsvCreditFix:skl */
- I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
- GEN9_GAPS_TSV_CREDIT_DISABLE));
-
- /* WaDisableGafsUnitClkGating:skl */
- WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
-
- /* WaInPlaceDecompressionHang:skl */
- if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
- WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
- /* WaDisableLSQCROPERFforOCL:skl */
- ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
- if (ret)
- return ret;
-
- return skl_tune_iz_hashing(engine);
-}
-
-static int bxt_init_workarounds(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- int ret;
-
- ret = gen9_init_workarounds(engine);
- if (ret)
- return ret;
-
- /* WaStoreMultiplePTEenable:bxt */
- /* This is a requirement according to Hardware specification */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
- I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
-
- /* WaSetClckGatingDisableMedia:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
- I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
- ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
- }
-
- /* WaDisableThreadStallDopClockGating:bxt */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
- STALL_DOP_GATING_DISABLE);
-
- /* WaDisablePooledEuLoadBalancingFix:bxt */
- if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
- WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2,
- GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
- }
-
- /* WaDisableSbeCacheDispatchPortSharing:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
- WA_SET_BIT_MASKED(
- GEN7_HALF_SLICE_CHICKEN1,
- GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
- }
-
- /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
- /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
- /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
- /* WaDisableLSQCROPERFforOCL:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
- ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
- if (ret)
- return ret;
-
- ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
- if (ret)
- return ret;
- }
-
- /* WaProgramL3SqcReg1DefaultForPerf:bxt */
- if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
- I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
- L3_HIGH_PRIO_CREDITS(2));
-
- /* WaToEnableHwFixForPushConstHWBug:bxt */
- if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
- /* WaInPlaceDecompressionHang:bxt */
- if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
- WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
- return 0;
-}
-
-static int kbl_init_workarounds(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- int ret;
-
- ret = gen9_init_workarounds(engine);
- if (ret)
- return ret;
-
- /* WaEnableGapsTsvCreditFix:kbl */
- I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
- GEN9_GAPS_TSV_CREDIT_DISABLE));
-
- /* WaDisableDynamicCreditSharing:kbl */
- if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
- WA_SET_BIT(GAMT_CHKN_BIT_REG,
- GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
-
- /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
- if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FENCE_DEST_SLM_DISABLE);
-
- /* WaToEnableHwFixForPushConstHWBug:kbl */
- if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
- /* WaDisableGafsUnitClkGating:kbl */
- WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
-
- /* WaDisableSbeCacheDispatchPortSharing:kbl */
- WA_SET_BIT_MASKED(
- GEN7_HALF_SLICE_CHICKEN1,
- GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
-
- /* WaInPlaceDecompressionHang:kbl */
- WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
- /* WaDisableLSQCROPERFforOCL:kbl */
- ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
- if (ret)
- return ret;
-
- return 0;
-}
-
-int init_workarounds_ring(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- WARN_ON(engine->id != RCS);
-
- dev_priv->workarounds.count = 0;
- dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
-
- if (IS_BROADWELL(dev_priv))
- return bdw_init_workarounds(engine);
-
- if (IS_CHERRYVIEW(dev_priv))
- return chv_init_workarounds(engine);
-
- if (IS_SKYLAKE(dev_priv))
- return skl_init_workarounds(engine);
-
- if (IS_BROXTON(dev_priv))
- return bxt_init_workarounds(engine);
-
- if (IS_KABYLAKE(dev_priv))
- return kbl_init_workarounds(engine);
-
- return 0;
-}
-
static int init_render_ring(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
@@ -1257,7 +702,7 @@ static void render_ring_cleanup(struct intel_engine_cs *engine)
i915_vma_unpin_and_release(&dev_priv->semaphore);
}
-static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *out)
+static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *cs)
{
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *waiter;
@@ -1268,23 +713,22 @@ static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *out)
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
- *out++ = GFX_OP_PIPE_CONTROL(6);
- *out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_CS_STALL);
- *out++ = lower_32_bits(gtt_offset);
- *out++ = upper_32_bits(gtt_offset);
- *out++ = req->global_seqno;
- *out++ = 0;
- *out++ = (MI_SEMAPHORE_SIGNAL |
- MI_SEMAPHORE_TARGET(waiter->hw_id));
- *out++ = 0;
+ *cs++ = GFX_OP_PIPE_CONTROL(6);
+ *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_CS_STALL;
+ *cs++ = lower_32_bits(gtt_offset);
+ *cs++ = upper_32_bits(gtt_offset);
+ *cs++ = req->global_seqno;
+ *cs++ = 0;
+ *cs++ = MI_SEMAPHORE_SIGNAL |
+ MI_SEMAPHORE_TARGET(waiter->hw_id);
+ *cs++ = 0;
}
- return out;
+ return cs;
}
-static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *out)
+static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *cs)
{
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *waiter;
@@ -1295,19 +739,19 @@ static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *out)
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
- *out++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
- *out++ = lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT;
- *out++ = upper_32_bits(gtt_offset);
- *out++ = req->global_seqno;
- *out++ = (MI_SEMAPHORE_SIGNAL |
- MI_SEMAPHORE_TARGET(waiter->hw_id));
- *out++ = 0;
+ *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
+ *cs++ = lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT;
+ *cs++ = upper_32_bits(gtt_offset);
+ *cs++ = req->global_seqno;
+ *cs++ = MI_SEMAPHORE_SIGNAL |
+ MI_SEMAPHORE_TARGET(waiter->hw_id);
+ *cs++ = 0;
}
- return out;
+ return cs;
}
-static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *out)
+static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs)
{
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *engine;
@@ -1322,16 +766,16 @@ static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *out)
mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
if (i915_mmio_reg_valid(mbox_reg)) {
- *out++ = MI_LOAD_REGISTER_IMM(1);
- *out++ = i915_mmio_reg_offset(mbox_reg);
- *out++ = req->global_seqno;
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(mbox_reg);
+ *cs++ = req->global_seqno;
num_rings++;
}
}
if (num_rings & 1)
- *out++ = MI_NOOP;
+ *cs++ = MI_NOOP;
- return out;
+ return cs;
}
static void i9xx_submit_request(struct drm_i915_gem_request *request)
@@ -1340,18 +784,19 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request)
i915_gem_request_submit(request);
+ GEM_BUG_ON(!IS_ALIGNED(request->tail, 8));
I915_WRITE_TAIL(request->engine, request->tail);
}
-static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req,
- u32 *out)
+static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
{
- *out++ = MI_STORE_DWORD_INDEX;
- *out++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
- *out++ = req->global_seqno;
- *out++ = MI_USER_INTERRUPT;
+ *cs++ = MI_STORE_DWORD_INDEX;
+ *cs++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
+ *cs++ = req->global_seqno;
+ *cs++ = MI_USER_INTERRUPT;
- req->tail = intel_ring_offset(req->ring, out);
+ req->tail = intel_ring_offset(req, cs);
+ GEM_BUG_ON(!IS_ALIGNED(req->tail, 8));
}
static const int i9xx_emit_breadcrumb_sz = 4;
@@ -1364,34 +809,33 @@ static const int i9xx_emit_breadcrumb_sz = 4;
* Update the mailbox registers in the *other* rings with the current seqno.
* This acts like a signal in the canonical semaphore.
*/
-static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req,
- u32 *out)
+static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
{
return i9xx_emit_breadcrumb(req,
- req->engine->semaphore.signal(req, out));
+ req->engine->semaphore.signal(req, cs));
}
static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request *req,
- u32 *out)
+ u32 *cs)
{
struct intel_engine_cs *engine = req->engine;
if (engine->semaphore.signal)
- out = engine->semaphore.signal(req, out);
-
- *out++ = GFX_OP_PIPE_CONTROL(6);
- *out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_QW_WRITE);
- *out++ = intel_hws_seqno_address(engine);
- *out++ = 0;
- *out++ = req->global_seqno;
+ cs = engine->semaphore.signal(req, cs);
+
+ *cs++ = GFX_OP_PIPE_CONTROL(6);
+ *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE;
+ *cs++ = intel_hws_seqno_address(engine);
+ *cs++ = 0;
+ *cs++ = req->global_seqno;
/* We're thrashing one dword of HWS. */
- *out++ = 0;
- *out++ = MI_USER_INTERRUPT;
- *out++ = MI_NOOP;
+ *cs++ = 0;
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
- req->tail = intel_ring_offset(req->ring, out);
+ req->tail = intel_ring_offset(req, cs);
+ GEM_BUG_ON(!IS_ALIGNED(req->tail, 8));
}
static const int gen8_render_emit_breadcrumb_sz = 8;
@@ -1408,24 +852,21 @@ static int
gen8_ring_sync_to(struct drm_i915_gem_request *req,
struct drm_i915_gem_request *signal)
{
- struct intel_ring *ring = req->ring;
struct drm_i915_private *dev_priv = req->i915;
u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
struct i915_hw_ppgtt *ppgtt;
- int ret;
+ u32 *cs;
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring,
- MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_SAD_GTE_SDD);
- intel_ring_emit(ring, signal->global_seqno);
- intel_ring_emit(ring, lower_32_bits(offset));
- intel_ring_emit(ring, upper_32_bits(offset));
- intel_ring_advance(ring);
+ *cs++ = MI_SEMAPHORE_WAIT | MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_SAD_GTE_SDD;
+ *cs++ = signal->global_seqno;
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ intel_ring_advance(req, cs);
/* When the !RCS engines idle waiting upon a semaphore, they lose their
* pagetables and we must reload them before executing the batch.
@@ -1442,28 +883,27 @@ static int
gen6_ring_sync_to(struct drm_i915_gem_request *req,
struct drm_i915_gem_request *signal)
{
- struct intel_ring *ring = req->ring;
u32 dw1 = MI_SEMAPHORE_MBOX |
MI_SEMAPHORE_COMPARE |
MI_SEMAPHORE_REGISTER;
u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id];
- int ret;
+ u32 *cs;
WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, dw1 | wait_mbox);
+ *cs++ = dw1 | wait_mbox;
/* Throughout all of the GEM code, seqno passed implies our current
* seqno is >= the last seqno executed. However for hardware the
* comparison is strictly greater than.
*/
- intel_ring_emit(ring, signal->global_seqno - 1);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = signal->global_seqno - 1;
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return 0;
}
@@ -1564,16 +1004,15 @@ i8xx_irq_disable(struct intel_engine_cs *engine)
static int
bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
- struct intel_ring *ring = req->ring;
- int ret;
+ u32 *cs;
- ret = intel_ring_begin(req, 2);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, MI_FLUSH);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = MI_FLUSH;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return 0;
}
@@ -1639,20 +1078,16 @@ i965_emit_bb_start(struct drm_i915_gem_request *req,
u64 offset, u32 length,
unsigned int dispatch_flags)
{
- struct intel_ring *ring = req->ring;
- int ret;
+ u32 *cs;
- ret = intel_ring_begin(req, 2);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring,
- MI_BATCH_BUFFER_START |
- MI_BATCH_GTT |
- (dispatch_flags & I915_DISPATCH_SECURE ?
- 0 : MI_BATCH_NON_SECURE_I965));
- intel_ring_emit(ring, offset);
- intel_ring_advance(ring);
+ *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags &
+ I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965);
+ *cs++ = offset;
+ intel_ring_advance(req, cs);
return 0;
}
@@ -1666,59 +1101,56 @@ i830_emit_bb_start(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
- struct intel_ring *ring = req->ring;
- u32 cs_offset = i915_ggtt_offset(req->engine->scratch);
- int ret;
+ u32 *cs, cs_offset = i915_ggtt_offset(req->engine->scratch);
- ret = intel_ring_begin(req, 6);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
/* Evict the invalid PTE TLBs */
- intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
- intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
- intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
- intel_ring_emit(ring, cs_offset);
- intel_ring_emit(ring, 0xdeadbeef);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
+ *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
+ *cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */
+ *cs++ = cs_offset;
+ *cs++ = 0xdeadbeef;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
- ret = intel_ring_begin(req, 6 + 2);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 6 + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
/* Blit the batch (which has now all relocs applied) to the
* stable batch scratch bo area (so that the CS never
* stumbles over its tlb invalidation bug) ...
*/
- intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
- intel_ring_emit(ring,
- BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
- intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
- intel_ring_emit(ring, cs_offset);
- intel_ring_emit(ring, 4096);
- intel_ring_emit(ring, offset);
-
- intel_ring_emit(ring, MI_FLUSH);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA;
+ *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
+ *cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
+ *cs++ = cs_offset;
+ *cs++ = 4096;
+ *cs++ = offset;
+
+ *cs++ = MI_FLUSH;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
/* ... and execute it. */
offset = cs_offset;
}
- ret = intel_ring_begin(req, 2);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
- intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
- 0 : MI_BATCH_NON_SECURE));
- intel_ring_advance(ring);
+ *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+ *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
+ MI_BATCH_NON_SECURE);
+ intel_ring_advance(req, cs);
return 0;
}
@@ -1728,17 +1160,16 @@ i915_emit_bb_start(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
- struct intel_ring *ring = req->ring;
- int ret;
+ u32 *cs;
- ret = intel_ring_begin(req, 2);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
- intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
- 0 : MI_BATCH_NON_SECURE));
- intel_ring_advance(ring);
+ *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+ *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
+ MI_BATCH_NON_SECURE);
+ intel_ring_advance(req, cs);
return 0;
}
@@ -1985,7 +1416,7 @@ intel_ring_free(struct intel_ring *ring)
kfree(ring);
}
-static int context_pin(struct i915_gem_context *ctx, unsigned int flags)
+static int context_pin(struct i915_gem_context *ctx)
{
struct i915_vma *vma = ctx->engine[RCS].state;
int ret;
@@ -2000,7 +1431,8 @@ static int context_pin(struct i915_gem_context *ctx, unsigned int flags)
return ret;
}
- return i915_vma_pin(vma, 0, ctx->ggtt_alignment, PIN_GLOBAL | flags);
+ return i915_vma_pin(vma, 0, I915_GTT_MIN_ALIGNMENT,
+ PIN_GLOBAL | PIN_HIGH);
}
static int intel_ring_context_pin(struct intel_engine_cs *engine,
@@ -2013,15 +1445,10 @@ static int intel_ring_context_pin(struct intel_engine_cs *engine,
if (ce->pin_count++)
return 0;
+ GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
if (ce->state) {
- unsigned int flags;
-
- flags = 0;
- if (i915_gem_context_is_kernel(ctx))
- flags = PIN_HIGH;
-
- ret = context_pin(ctx, flags);
+ ret = context_pin(ctx);
if (ret)
goto error;
}
@@ -2152,7 +1579,7 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
static int ring_request_alloc(struct drm_i915_gem_request *request)
{
- int ret;
+ u32 *cs;
GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count);
@@ -2165,9 +1592,9 @@ static int ring_request_alloc(struct drm_i915_gem_request *request)
GEM_BUG_ON(!request->engine->buffer);
request->ring = request->engine->buffer;
- ret = intel_ring_begin(request, 0);
- if (ret)
- return ret;
+ cs = intel_ring_begin(request, 0);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
request->reserved_space -= LEGACY_REQUEST_SIZE;
return 0;
@@ -2222,7 +1649,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
return 0;
}
-int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
+u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
{
struct intel_ring *ring = req->ring;
int remain_actual = ring->size - ring->tail;
@@ -2230,6 +1657,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
int bytes = num_dwords * sizeof(u32);
int total_bytes, wait_bytes;
bool need_wrap = false;
+ u32 *cs;
total_bytes = bytes + req->reserved_space;
@@ -2256,7 +1684,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
if (wait_bytes > ring->space) {
int ret = wait_for_space(req, wait_bytes);
if (unlikely(ret))
- return ret;
+ return ERR_PTR(ret);
}
if (unlikely(need_wrap)) {
@@ -2269,31 +1697,34 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
ring->space -= remain_actual;
}
+ GEM_BUG_ON(ring->tail > ring->size - bytes);
+ cs = ring->vaddr + ring->tail;
+ ring->tail += bytes;
ring->space -= bytes;
GEM_BUG_ON(ring->space < 0);
- return 0;
+
+ return cs;
}
/* Align the ring tail to a cacheline boundary */
int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
{
- struct intel_ring *ring = req->ring;
int num_dwords =
- (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
- int ret;
+ (req->ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+ u32 *cs;
if (num_dwords == 0)
return 0;
num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
- ret = intel_ring_begin(req, num_dwords);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, num_dwords);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
while (num_dwords--)
- intel_ring_emit(ring, MI_NOOP);
+ *cs++ = MI_NOOP;
- intel_ring_advance(ring);
+ intel_ring_advance(req, cs);
return 0;
}
@@ -2337,13 +1768,11 @@ static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
- struct intel_ring *ring = req->ring;
- uint32_t cmd;
- int ret;
+ u32 cmd, *cs;
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
cmd = MI_FLUSH_DW;
if (INTEL_GEN(req->i915) >= 8)
@@ -2365,16 +1794,16 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
if (mode & EMIT_INVALIDATE)
cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
- intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+ *cs++ = cmd;
+ *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
if (INTEL_GEN(req->i915) >= 8) {
- intel_ring_emit(ring, 0); /* upper addr */
- intel_ring_emit(ring, 0); /* value */
+ *cs++ = 0; /* upper addr */
+ *cs++ = 0; /* value */
} else {
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
}
- intel_ring_advance(ring);
+ intel_ring_advance(req, cs);
return 0;
}
@@ -2383,23 +1812,21 @@ gen8_emit_bb_start(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
- struct intel_ring *ring = req->ring;
bool ppgtt = USES_PPGTT(req->i915) &&
!(dispatch_flags & I915_DISPATCH_SECURE);
- int ret;
+ u32 *cs;
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
/* FIXME(BDW): Address space and security selectors. */
- intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
- (dispatch_flags & I915_DISPATCH_RS ?
- MI_BATCH_RESOURCE_STREAMER : 0));
- intel_ring_emit(ring, lower_32_bits(offset));
- intel_ring_emit(ring, upper_32_bits(offset));
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 | (ppgtt << 8) | (dispatch_flags &
+ I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return 0;
}
@@ -2409,22 +1836,19 @@ hsw_emit_bb_start(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
- struct intel_ring *ring = req->ring;
- int ret;
+ u32 *cs;
- ret = intel_ring_begin(req, 2);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring,
- MI_BATCH_BUFFER_START |
- (dispatch_flags & I915_DISPATCH_SECURE ?
- 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
- (dispatch_flags & I915_DISPATCH_RS ?
- MI_BATCH_RESOURCE_STREAMER : 0));
+ *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
+ 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
+ (dispatch_flags & I915_DISPATCH_RS ?
+ MI_BATCH_RESOURCE_STREAMER : 0);
/* bit0-7 is the length on GEN6+ */
- intel_ring_emit(ring, offset);
- intel_ring_advance(ring);
+ *cs++ = offset;
+ intel_ring_advance(req, cs);
return 0;
}
@@ -2434,20 +1858,17 @@ gen6_emit_bb_start(struct drm_i915_gem_request *req,
u64 offset, u32 len,
unsigned int dispatch_flags)
{
- struct intel_ring *ring = req->ring;
- int ret;
+ u32 *cs;
- ret = intel_ring_begin(req, 2);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring,
- MI_BATCH_BUFFER_START |
- (dispatch_flags & I915_DISPATCH_SECURE ?
- 0 : MI_BATCH_NON_SECURE_I965));
+ *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
+ 0 : MI_BATCH_NON_SECURE_I965);
/* bit0-7 is the length on GEN6+ */
- intel_ring_emit(ring, offset);
- intel_ring_advance(ring);
+ *cs++ = offset;
+ intel_ring_advance(req, cs);
return 0;
}
@@ -2456,13 +1877,11 @@ gen6_emit_bb_start(struct drm_i915_gem_request *req,
static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
- struct intel_ring *ring = req->ring;
- uint32_t cmd;
- int ret;
+ u32 cmd, *cs;
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
cmd = MI_FLUSH_DW;
if (INTEL_GEN(req->i915) >= 8)
@@ -2483,17 +1902,16 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
*/
if (mode & EMIT_INVALIDATE)
cmd |= MI_INVALIDATE_TLB;
- intel_ring_emit(ring, cmd);
- intel_ring_emit(ring,
- I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+ *cs++ = cmd;
+ *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
if (INTEL_GEN(req->i915) >= 8) {
- intel_ring_emit(ring, 0); /* upper addr */
- intel_ring_emit(ring, 0); /* value */
+ *cs++ = 0; /* upper addr */
+ *cs++ = 0; /* value */
} else {
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
}
- intel_ring_advance(ring);
+ intel_ring_advance(req, cs);
return 0;
}
@@ -2633,6 +2051,16 @@ static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
}
}
+static void i9xx_set_default_submission(struct intel_engine_cs *engine)
+{
+ engine->submit_request = i9xx_submit_request;
+}
+
+static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
+{
+ engine->submit_request = gen6_bsd_submit_request;
+}
+
static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
struct intel_engine_cs *engine)
{
@@ -2663,7 +2091,8 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
engine->emit_breadcrumb_sz++;
}
}
- engine->submit_request = i9xx_submit_request;
+
+ engine->set_default_submission = i9xx_set_default_submission;
if (INTEL_GEN(dev_priv) >= 8)
engine->emit_bb_start = gen8_emit_bb_start;
@@ -2748,7 +2177,7 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
if (INTEL_GEN(dev_priv) >= 6) {
/* gen6 bsd needs a special wa for tail updates */
if (IS_GEN6(dev_priv))
- engine->submit_request = gen6_bsd_submit_request;
+ engine->set_default_submission = gen6_bsd_set_default_submission;
engine->emit_flush = gen6_bsd_ring_flush;
if (INTEL_GEN(dev_priv) < 8)
engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 79c2b8d72322..847aea554464 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -5,6 +5,7 @@
#include "i915_gem_batch_pool.h"
#include "i915_gem_request.h"
#include "i915_gem_timeline.h"
+#include "i915_selftest.h"
#define I915_CMD_HASH_ORDER 9
@@ -144,6 +145,7 @@ struct intel_ring {
u32 head;
u32 tail;
+
int space;
int size;
int effective_size;
@@ -184,26 +186,26 @@ struct i915_ctx_workarounds {
struct drm_i915_gem_request;
struct intel_render_state;
+/*
+ * Engine IDs definitions.
+ * Keep instances of the same type engine together.
+ */
+enum intel_engine_id {
+ RCS = 0,
+ BCS,
+ VCS,
+ VCS2,
+#define _VCS(n) (VCS + (n))
+ VECS
+};
+
struct intel_engine_cs {
struct drm_i915_private *i915;
const char *name;
- enum intel_engine_id {
- RCS = 0,
- BCS,
- VCS,
- VCS2, /* Keep instances of the same type engine together. */
- VECS
- } id;
-#define _VCS(n) (VCS + (n))
+ enum intel_engine_id id;
unsigned int exec_id;
- enum intel_engine_hw_id {
- RCS_HW = 0,
- VCS_HW,
- BCS_HW,
- VECS_HW,
- VCS2_HW
- } hw_id;
- enum intel_engine_hw_id guc_id; /* XXX same as hw_id? */
+ unsigned int hw_id;
+ unsigned int guc_id;
u32 mmio_base;
unsigned int irq_shift;
struct intel_ring *buffer;
@@ -211,6 +213,11 @@ struct intel_engine_cs {
struct intel_render_state *render_state;
+ atomic_t irq_count;
+ unsigned long irq_posted;
+#define ENGINE_IRQ_BREADCRUMB 0
+#define ENGINE_IRQ_EXECLIST 1
+
/* Rather than have every client wait upon all user interrupts,
* with the herd waking after every interrupt and each doing the
* heavyweight seqno dance, we delegate the task (of being the
@@ -228,22 +235,22 @@ struct intel_engine_cs {
* the overhead of waking that client is much preferred.
*/
struct intel_breadcrumbs {
- struct task_struct __rcu *irq_seqno_bh; /* bh for interrupts */
- bool irq_posted;
+ spinlock_t irq_lock; /* protects irq_*; irqsafe */
+ struct intel_wait *irq_wait; /* oldest waiter by retirement */
- spinlock_t lock; /* protects the lists of requests; irqsafe */
+ spinlock_t rb_lock; /* protects the rb and wraps irq_lock */
struct rb_root waiters; /* sorted by retirement, priority */
struct rb_root signals; /* sorted by retirement */
- struct intel_wait *first_wait; /* oldest waiter by retirement */
struct task_struct *signaler; /* used for fence signalling */
- struct drm_i915_gem_request *first_signal;
+ struct drm_i915_gem_request __rcu *first_signal;
struct timer_list fake_irq; /* used after a missed interrupt */
struct timer_list hangcheck; /* detect missed interrupts */
- unsigned long timeout;
+ unsigned int hangcheck_interrupts;
+ bool irq_armed : 1;
bool irq_enabled : 1;
- bool rpm_wakelock : 1;
+ I915_SELFTEST_DECLARE(bool mock : 1);
} breadcrumbs;
/*
@@ -266,6 +273,8 @@ struct intel_engine_cs {
void (*reset_hw)(struct intel_engine_cs *engine,
struct drm_i915_gem_request *req);
+ void (*set_default_submission)(struct intel_engine_cs *engine);
+
int (*context_pin)(struct intel_engine_cs *engine,
struct i915_gem_context *ctx);
void (*context_unpin)(struct intel_engine_cs *engine,
@@ -285,7 +294,7 @@ struct intel_engine_cs {
#define I915_DISPATCH_PINNED BIT(1)
#define I915_DISPATCH_RS BIT(2)
void (*emit_breadcrumb)(struct drm_i915_gem_request *req,
- u32 *out);
+ u32 *cs);
int emit_breadcrumb_sz;
/* Pass the request to the hardware queue (e.g. directly into
@@ -368,7 +377,7 @@ struct intel_engine_cs {
/* AKA wait() */
int (*sync_to)(struct drm_i915_gem_request *req,
struct drm_i915_gem_request *signal);
- u32 *(*signal)(struct drm_i915_gem_request *req, u32 *out);
+ u32 *(*signal)(struct drm_i915_gem_request *req, u32 *cs);
} semaphore;
/* Execlists */
@@ -376,13 +385,11 @@ struct intel_engine_cs {
struct execlist_port {
struct drm_i915_gem_request *request;
unsigned int count;
+ GEM_DEBUG_DECL(u32 context_id);
} execlist_port[2];
struct rb_root execlist_queue;
struct rb_node *execlist_first;
unsigned int fw_domains;
- bool disable_lite_restore_wa;
- bool preempt_wa;
- u32 ctx_desc_template;
/* Contexts are pinned whilst they are active on the GPU. The last
* context executed remains active whilst the GPU is idle - the
@@ -403,6 +410,9 @@ struct intel_engine_cs {
*/
struct i915_gem_context *legacy_active_context;
+ /* status_notifier: list of callbacks for context-switch changes */
+ struct atomic_notifier_head context_status_notifier;
+
struct intel_engine_hangcheck hangcheck;
bool needs_cmd_parser;
@@ -457,7 +467,11 @@ static inline void
intel_write_status_page(struct intel_engine_cs *engine,
int reg, u32 value)
{
+ mb();
+ clflush(&engine->status_page.page_addr[reg]);
engine->status_page.page_addr[reg] = value;
+ clflush(&engine->status_page.page_addr[reg]);
+ mb();
}
/*
@@ -492,21 +506,12 @@ void intel_engine_cleanup(struct intel_engine_cs *engine);
void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
-int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
-static inline void intel_ring_emit(struct intel_ring *ring, u32 data)
-{
- *(uint32_t *)(ring->vaddr + ring->tail) = data;
- ring->tail += 4;
-}
+u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req, int n);
-static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
-{
- intel_ring_emit(ring, i915_mmio_reg_offset(reg));
-}
-
-static inline void intel_ring_advance(struct intel_ring *ring)
+static inline void
+intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
{
/* Dummy function.
*
@@ -516,16 +521,18 @@ static inline void intel_ring_advance(struct intel_ring *ring)
* reserved for the command packet (i.e. the value passed to
* intel_ring_begin()).
*/
+ GEM_BUG_ON((req->ring->vaddr + req->ring->tail) != cs);
}
-static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr)
+static inline u32
+intel_ring_offset(struct drm_i915_gem_request *req, void *addr)
{
/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
- u32 offset = addr - ring->vaddr;
- return offset & (ring->size - 1);
+ u32 offset = addr - req->ring->vaddr;
+ GEM_BUG_ON(offset > req->ring->size);
+ return offset & (req->ring->size - 1);
}
-int __intel_ring_space(int head, int tail, int size);
void intel_ring_update_space(struct intel_ring *ring);
void intel_engine_init_global_seqno(struct intel_engine_cs *engine, u32 seqno);
@@ -558,10 +565,11 @@ static inline u32 intel_engine_last_submit(struct intel_engine_cs *engine)
* wtih serialising this hint with anything, so document it as
* a hint and nothing more.
*/
- return READ_ONCE(engine->timeline->last_submitted_seqno);
+ return READ_ONCE(engine->timeline->seqno);
}
int init_workarounds_ring(struct intel_engine_cs *engine);
+int intel_ring_workarounds_emit(struct drm_i915_gem_request *req);
void intel_engine_get_instdone(struct intel_engine_cs *engine,
struct intel_instdone *instdone);
@@ -583,12 +591,51 @@ static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
-static inline void intel_wait_init(struct intel_wait *wait, u32 seqno)
+static inline void intel_wait_init(struct intel_wait *wait,
+ struct drm_i915_gem_request *rq)
+{
+ wait->tsk = current;
+ wait->request = rq;
+}
+
+static inline void intel_wait_init_for_seqno(struct intel_wait *wait, u32 seqno)
{
wait->tsk = current;
wait->seqno = seqno;
}
+static inline bool intel_wait_has_seqno(const struct intel_wait *wait)
+{
+ return wait->seqno;
+}
+
+static inline bool
+intel_wait_update_seqno(struct intel_wait *wait, u32 seqno)
+{
+ wait->seqno = seqno;
+ return intel_wait_has_seqno(wait);
+}
+
+static inline bool
+intel_wait_update_request(struct intel_wait *wait,
+ const struct drm_i915_gem_request *rq)
+{
+ return intel_wait_update_seqno(wait, i915_gem_request_global_seqno(rq));
+}
+
+static inline bool
+intel_wait_check_seqno(const struct intel_wait *wait, u32 seqno)
+{
+ return wait->seqno == seqno;
+}
+
+static inline bool
+intel_wait_check_request(const struct intel_wait *wait,
+ const struct drm_i915_gem_request *rq)
+{
+ return intel_wait_check_seqno(wait, i915_gem_request_global_seqno(rq));
+}
+
static inline bool intel_wait_complete(const struct intel_wait *wait)
{
return RB_EMPTY_NODE(&wait->node);
@@ -599,38 +646,38 @@ bool intel_engine_add_wait(struct intel_engine_cs *engine,
void intel_engine_remove_wait(struct intel_engine_cs *engine,
struct intel_wait *wait);
void intel_engine_enable_signaling(struct drm_i915_gem_request *request);
+void intel_engine_cancel_signaling(struct drm_i915_gem_request *request);
static inline bool intel_engine_has_waiter(const struct intel_engine_cs *engine)
{
- return rcu_access_pointer(engine->breadcrumbs.irq_seqno_bh);
+ return READ_ONCE(engine->breadcrumbs.irq_wait);
}
-static inline bool intel_engine_wakeup(const struct intel_engine_cs *engine)
+unsigned int intel_engine_wakeup(struct intel_engine_cs *engine);
+#define ENGINE_WAKEUP_WAITER BIT(0)
+#define ENGINE_WAKEUP_ASLEEP BIT(1)
+
+void __intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
+void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine);
+
+void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
+void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
+bool intel_breadcrumbs_busy(struct intel_engine_cs *engine);
+
+static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset)
{
- bool wakeup = false;
+ memset(batch, 0, 6 * sizeof(u32));
- /* Note that for this not to dangerously chase a dangling pointer,
- * we must hold the rcu_read_lock here.
- *
- * Also note that tsk is likely to be in !TASK_RUNNING state so an
- * early test for tsk->state != TASK_RUNNING before wake_up_process()
- * is unlikely to be beneficial.
- */
- if (intel_engine_has_waiter(engine)) {
- struct task_struct *tsk;
-
- rcu_read_lock();
- tsk = rcu_dereference(engine->breadcrumbs.irq_seqno_bh);
- if (tsk)
- wakeup = wake_up_process(tsk);
- rcu_read_unlock();
- }
+ batch[0] = GFX_OP_PIPE_CONTROL(6);
+ batch[1] = flags;
+ batch[2] = offset;
- return wakeup;
+ return batch + 6;
}
-void intel_engine_reset_breadcrumbs(struct intel_engine_cs *engine);
-void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
-unsigned int intel_breadcrumbs_busy(struct drm_i915_private *i915);
+bool intel_engine_is_idle(struct intel_engine_cs *engine);
+bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
+
+void intel_engines_reset_default_submission(struct drm_i915_private *i915);
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index c0b7e95b5b8e..012bc358a33a 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -49,19 +49,6 @@
* present for a given platform.
*/
-#define for_each_power_well(i, power_well, domain_mask, power_domains) \
- for (i = 0; \
- i < (power_domains)->power_well_count && \
- ((power_well) = &(power_domains)->power_wells[i]); \
- i++) \
- for_each_if ((power_well)->domains & (domain_mask))
-
-#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
- for (i = (power_domains)->power_well_count - 1; \
- i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
- i--) \
- for_each_if ((power_well)->domains & (domain_mask))
-
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
int power_well_id);
@@ -106,6 +93,16 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
return "PORT_DDI_D_LANES";
case POWER_DOMAIN_PORT_DDI_E_LANES:
return "PORT_DDI_E_LANES";
+ case POWER_DOMAIN_PORT_DDI_A_IO:
+ return "PORT_DDI_A_IO";
+ case POWER_DOMAIN_PORT_DDI_B_IO:
+ return "PORT_DDI_B_IO";
+ case POWER_DOMAIN_PORT_DDI_C_IO:
+ return "PORT_DDI_C_IO";
+ case POWER_DOMAIN_PORT_DDI_D_IO:
+ return "PORT_DDI_D_IO";
+ case POWER_DOMAIN_PORT_DDI_E_IO:
+ return "PORT_DDI_E_IO";
case POWER_DOMAIN_PORT_DSI:
return "PORT_DSI";
case POWER_DOMAIN_PORT_CRT:
@@ -198,19 +195,15 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
bool is_enabled;
- int i;
if (dev_priv->pm.suspended)
return false;
- power_domains = &dev_priv->power_domains;
-
is_enabled = true;
- for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
+ for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain)) {
if (power_well->always_on)
continue;
@@ -385,124 +378,121 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
}
#define SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_TRANSCODER_A) | \
- BIT(POWER_DOMAIN_PIPE_B) | \
- BIT(POWER_DOMAIN_TRANSCODER_B) | \
- BIT(POWER_DOMAIN_PIPE_C) | \
- BIT(POWER_DOMAIN_TRANSCODER_C) | \
- BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_E_LANES) | \
- BIT(POWER_DOMAIN_AUX_B) | \
- BIT(POWER_DOMAIN_AUX_C) | \
- BIT(POWER_DOMAIN_AUX_D) | \
- BIT(POWER_DOMAIN_AUDIO) | \
- BIT(POWER_DOMAIN_VGA) | \
- BIT(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DDI_A_E_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_E_LANES) | \
- BIT(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DDI_B_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DDI_C_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT(POWER_DOMAIN_INIT))
-#define SKL_DISPLAY_DDI_D_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT(POWER_DOMAIN_INIT))
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
- BIT(POWER_DOMAIN_MODESET) | \
- BIT(POWER_DOMAIN_AUX_A) | \
- BIT(POWER_DOMAIN_INIT))
+ BIT_ULL(POWER_DOMAIN_MODESET) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
#define BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_TRANSCODER_A) | \
- BIT(POWER_DOMAIN_PIPE_B) | \
- BIT(POWER_DOMAIN_TRANSCODER_B) | \
- BIT(POWER_DOMAIN_PIPE_C) | \
- BIT(POWER_DOMAIN_TRANSCODER_C) | \
- BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT(POWER_DOMAIN_AUX_B) | \
- BIT(POWER_DOMAIN_AUX_C) | \
- BIT(POWER_DOMAIN_AUDIO) | \
- BIT(POWER_DOMAIN_VGA) | \
- BIT(POWER_DOMAIN_GMBUS) | \
- BIT(POWER_DOMAIN_INIT))
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_GMBUS) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
#define BXT_DISPLAY_DC_OFF_POWER_DOMAINS ( \
BXT_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
- BIT(POWER_DOMAIN_MODESET) | \
- BIT(POWER_DOMAIN_AUX_A) | \
- BIT(POWER_DOMAIN_INIT))
+ BIT_ULL(POWER_DOMAIN_MODESET) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
- BIT(POWER_DOMAIN_AUX_A) | \
- BIT(POWER_DOMAIN_INIT))
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
#define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT(POWER_DOMAIN_AUX_B) | \
- BIT(POWER_DOMAIN_AUX_C) | \
- BIT(POWER_DOMAIN_INIT))
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
#define GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_TRANSCODER_A) | \
- BIT(POWER_DOMAIN_PIPE_B) | \
- BIT(POWER_DOMAIN_TRANSCODER_B) | \
- BIT(POWER_DOMAIN_PIPE_C) | \
- BIT(POWER_DOMAIN_TRANSCODER_C) | \
- BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT(POWER_DOMAIN_AUX_B) | \
- BIT(POWER_DOMAIN_AUX_C) | \
- BIT(POWER_DOMAIN_AUDIO) | \
- BIT(POWER_DOMAIN_VGA) | \
- BIT(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_DDI_A_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
- BIT(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_DDI_B_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT(POWER_DOMAIN_INIT))
-#define GLK_DISPLAY_DDI_C_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT(POWER_DOMAIN_INIT))
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
+#define GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_A_IO))
+#define GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_IO))
+#define GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS ( \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_IO))
#define GLK_DPIO_CMN_A_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
- BIT(POWER_DOMAIN_AUX_A) | \
- BIT(POWER_DOMAIN_INIT))
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_A_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
#define GLK_DPIO_CMN_B_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT(POWER_DOMAIN_AUX_B) | \
- BIT(POWER_DOMAIN_INIT))
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
#define GLK_DPIO_CMN_C_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT(POWER_DOMAIN_AUX_C) | \
- BIT(POWER_DOMAIN_INIT))
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
#define GLK_DISPLAY_AUX_A_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_AUX_A) | \
- BIT(POWER_DOMAIN_INIT))
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
#define GLK_DISPLAY_AUX_B_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_AUX_B) | \
- BIT(POWER_DOMAIN_INIT))
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
#define GLK_DISPLAY_AUX_C_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_AUX_C) | \
- BIT(POWER_DOMAIN_INIT))
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
#define GLK_DISPLAY_DC_OFF_POWER_DOMAINS ( \
GLK_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
- BIT(POWER_DOMAIN_MODESET) | \
- BIT(POWER_DOMAIN_AUX_A) | \
- BIT(POWER_DOMAIN_INIT))
+ BIT_ULL(POWER_DOMAIN_MODESET) | \
+ BIT_ULL(POWER_DOMAIN_AUX_A) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
{
@@ -732,7 +722,7 @@ gen9_sanitize_power_well_requests(struct drm_i915_private *dev_priv,
* other request bits to be set, so WARN for those.
*/
if (power_well_id == SKL_DISP_PW_1 ||
- ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
+ (IS_GEN9_BC(dev_priv) &&
power_well_id == SKL_DISP_PW_MISC_IO))
DRM_DEBUG_DRIVER("Clearing auxiliary requests for %s forced on "
"by DMC\n", power_well->name);
@@ -847,14 +837,14 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
-
- /*
- * We're taking over the BIOS, so clear any requests made by it since
- * the driver is in charge now.
- */
- if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
+ /* Take over the request bit if set by BIOS. */
+ if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST) {
+ if (!(I915_READ(HSW_PWR_WELL_DRIVER) &
+ HSW_PWR_WELL_ENABLE_REQUEST))
+ I915_WRITE(HSW_PWR_WELL_DRIVER,
+ HSW_PWR_WELL_ENABLE_REQUEST);
I915_WRITE(HSW_PWR_WELL_BIOS, 0);
+ }
}
static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
@@ -881,10 +871,17 @@ static bool skl_power_well_enabled(struct drm_i915_private *dev_priv,
static void skl_power_well_sync_hw(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- skl_set_power_well(dev_priv, power_well, power_well->count > 0);
+ uint32_t mask = SKL_POWER_WELL_REQ(power_well->id);
+ uint32_t bios_req = I915_READ(HSW_PWR_WELL_BIOS);
+
+ /* Take over the request bit if set by BIOS. */
+ if (bios_req & mask) {
+ uint32_t drv_req = I915_READ(HSW_PWR_WELL_DRIVER);
- /* Clear any request made by BIOS as driver is taking over */
- I915_WRITE(HSW_PWR_WELL_BIOS, 0);
+ if (!(drv_req & mask))
+ I915_WRITE(HSW_PWR_WELL_DRIVER, drv_req | mask);
+ I915_WRITE(HSW_PWR_WELL_BIOS, bios_req & ~mask);
+ }
}
static void skl_power_well_enable(struct drm_i915_private *dev_priv,
@@ -917,16 +914,6 @@ static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
return bxt_ddi_phy_is_enabled(dev_priv, power_well->data);
}
-static void bxt_dpio_cmn_power_well_sync_hw(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- if (power_well->count > 0)
- bxt_dpio_cmn_power_well_enable(dev_priv, power_well);
- else
- bxt_dpio_cmn_power_well_disable(dev_priv, power_well);
-}
-
-
static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
{
struct i915_power_well *power_well;
@@ -964,10 +951,12 @@ static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
+ struct intel_cdclk_state cdclk_state = {};
+
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
- WARN_ON(dev_priv->cdclk_freq !=
- dev_priv->display.get_display_clock_speed(dev_priv));
+ dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
+ WARN_ON(!intel_cdclk_state_compare(&dev_priv->cdclk.hw, &cdclk_state));
gen9_assert_dbuf_enabled(dev_priv);
@@ -987,13 +976,9 @@ static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
gen9_enable_dc5(dev_priv);
}
-static void gen9_dc_off_power_well_sync_hw(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
+static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
{
- if (power_well->count > 0)
- gen9_dc_off_power_well_enable(dev_priv, power_well);
- else
- gen9_dc_off_power_well_disable(dev_priv, power_well);
}
static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
@@ -1043,12 +1028,6 @@ out:
mutex_unlock(&dev_priv->rps.hw_lock);
}
-static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
-}
-
static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
@@ -1249,7 +1228,7 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
vlv_set_power_well(dev_priv, power_well, false);
}
-#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
+#define POWER_DOMAIN_MASK (GENMASK_ULL(POWER_DOMAIN_NUM - 1, 0))
static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
int power_well_id)
@@ -1659,14 +1638,6 @@ out:
mutex_unlock(&dev_priv->rps.hw_lock);
}
-static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- WARN_ON_ONCE(power_well->id != PIPE_A);
-
- chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
-}
-
static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
@@ -1693,9 +1664,8 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *power_well;
- int i;
- for_each_power_well(i, power_well, BIT(domain), power_domains)
+ for_each_power_domain_well(dev_priv, power_well, BIT_ULL(domain))
intel_power_well_get(dev_priv, power_well);
power_domains->domain_use_count[domain]++;
@@ -1779,7 +1749,6 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
{
struct i915_power_domains *power_domains;
struct i915_power_well *power_well;
- int i;
power_domains = &dev_priv->power_domains;
@@ -1790,7 +1759,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
intel_display_power_domain_str(domain));
power_domains->domain_use_count[domain]--;
- for_each_power_well_rev(i, power_well, BIT(domain), power_domains)
+ for_each_power_domain_well_rev(dev_priv, power_well, BIT_ULL(domain))
intel_power_well_put(dev_priv, power_well);
mutex_unlock(&power_domains->lock);
@@ -1799,134 +1768,134 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
}
#define HSW_DISPLAY_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PIPE_B) | \
- BIT(POWER_DOMAIN_PIPE_C) | \
- BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
- BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT(POWER_DOMAIN_TRANSCODER_A) | \
- BIT(POWER_DOMAIN_TRANSCODER_B) | \
- BIT(POWER_DOMAIN_TRANSCODER_C) | \
- BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
- BIT(POWER_DOMAIN_VGA) | \
- BIT(POWER_DOMAIN_AUDIO) | \
- BIT(POWER_DOMAIN_INIT))
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
#define BDW_DISPLAY_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PIPE_B) | \
- BIT(POWER_DOMAIN_PIPE_C) | \
- BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT(POWER_DOMAIN_TRANSCODER_A) | \
- BIT(POWER_DOMAIN_TRANSCODER_B) | \
- BIT(POWER_DOMAIN_TRANSCODER_C) | \
- BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
- BIT(POWER_DOMAIN_VGA) | \
- BIT(POWER_DOMAIN_AUDIO) | \
- BIT(POWER_DOMAIN_INIT))
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_CRT) | /* DDI E */ \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
#define VLV_DISPLAY_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PIPE_A) | \
- BIT(POWER_DOMAIN_PIPE_B) | \
- BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
- BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT(POWER_DOMAIN_TRANSCODER_A) | \
- BIT(POWER_DOMAIN_TRANSCODER_B) | \
- BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT(POWER_DOMAIN_PORT_DSI) | \
- BIT(POWER_DOMAIN_PORT_CRT) | \
- BIT(POWER_DOMAIN_VGA) | \
- BIT(POWER_DOMAIN_AUDIO) | \
- BIT(POWER_DOMAIN_AUX_B) | \
- BIT(POWER_DOMAIN_AUX_C) | \
- BIT(POWER_DOMAIN_GMBUS) | \
- BIT(POWER_DOMAIN_INIT))
+ BIT_ULL(POWER_DOMAIN_PIPE_A) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
+ BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_GMBUS) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT(POWER_DOMAIN_PORT_CRT) | \
- BIT(POWER_DOMAIN_AUX_B) | \
- BIT(POWER_DOMAIN_AUX_C) | \
- BIT(POWER_DOMAIN_INIT))
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_CRT) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT(POWER_DOMAIN_AUX_B) | \
- BIT(POWER_DOMAIN_INIT))
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT(POWER_DOMAIN_AUX_B) | \
- BIT(POWER_DOMAIN_INIT))
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT(POWER_DOMAIN_AUX_C) | \
- BIT(POWER_DOMAIN_INIT))
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT(POWER_DOMAIN_AUX_C) | \
- BIT(POWER_DOMAIN_INIT))
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
#define CHV_DISPLAY_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PIPE_A) | \
- BIT(POWER_DOMAIN_PIPE_B) | \
- BIT(POWER_DOMAIN_PIPE_C) | \
- BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
- BIT(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
- BIT(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
- BIT(POWER_DOMAIN_TRANSCODER_A) | \
- BIT(POWER_DOMAIN_TRANSCODER_B) | \
- BIT(POWER_DOMAIN_TRANSCODER_C) | \
- BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT(POWER_DOMAIN_PORT_DSI) | \
- BIT(POWER_DOMAIN_VGA) | \
- BIT(POWER_DOMAIN_AUDIO) | \
- BIT(POWER_DOMAIN_AUX_B) | \
- BIT(POWER_DOMAIN_AUX_C) | \
- BIT(POWER_DOMAIN_AUX_D) | \
- BIT(POWER_DOMAIN_GMBUS) | \
- BIT(POWER_DOMAIN_INIT))
+ BIT_ULL(POWER_DOMAIN_PIPE_A) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_A_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_PIPE_C_PANEL_FITTER) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_A) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \
+ BIT_ULL(POWER_DOMAIN_TRANSCODER_C) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DSI) | \
+ BIT_ULL(POWER_DOMAIN_VGA) | \
+ BIT_ULL(POWER_DOMAIN_AUDIO) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D) | \
+ BIT_ULL(POWER_DOMAIN_GMBUS) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
- BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
- BIT(POWER_DOMAIN_AUX_B) | \
- BIT(POWER_DOMAIN_AUX_C) | \
- BIT(POWER_DOMAIN_INIT))
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_B_LANES) | \
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_C_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_B) | \
+ BIT_ULL(POWER_DOMAIN_AUX_C) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
- BIT(POWER_DOMAIN_PORT_DDI_D_LANES) | \
- BIT(POWER_DOMAIN_AUX_D) | \
- BIT(POWER_DOMAIN_INIT))
+ BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \
+ BIT_ULL(POWER_DOMAIN_AUX_D) | \
+ BIT_ULL(POWER_DOMAIN_INIT))
static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
- .sync_hw = i9xx_always_on_power_well_noop,
+ .sync_hw = i9xx_power_well_sync_hw_noop,
.enable = i9xx_always_on_power_well_noop,
.disable = i9xx_always_on_power_well_noop,
.is_enabled = i9xx_always_on_power_well_enabled,
};
static const struct i915_power_well_ops chv_pipe_power_well_ops = {
- .sync_hw = chv_pipe_power_well_sync_hw,
+ .sync_hw = i9xx_power_well_sync_hw_noop,
.enable = chv_pipe_power_well_enable,
.disable = chv_pipe_power_well_disable,
.is_enabled = chv_pipe_power_well_enabled,
};
static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
- .sync_hw = vlv_power_well_sync_hw,
+ .sync_hw = i9xx_power_well_sync_hw_noop,
.enable = chv_dpio_cmn_power_well_enable,
.disable = chv_dpio_cmn_power_well_disable,
.is_enabled = vlv_power_well_enabled,
@@ -1956,14 +1925,14 @@ static const struct i915_power_well_ops skl_power_well_ops = {
};
static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
- .sync_hw = gen9_dc_off_power_well_sync_hw,
+ .sync_hw = i9xx_power_well_sync_hw_noop,
.enable = gen9_dc_off_power_well_enable,
.disable = gen9_dc_off_power_well_disable,
.is_enabled = gen9_dc_off_power_well_enabled,
};
static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
- .sync_hw = bxt_dpio_cmn_power_well_sync_hw,
+ .sync_hw = i9xx_power_well_sync_hw_noop,
.enable = bxt_dpio_cmn_power_well_enable,
.disable = bxt_dpio_cmn_power_well_disable,
.is_enabled = bxt_dpio_cmn_power_well_enabled,
@@ -1998,21 +1967,21 @@ static struct i915_power_well bdw_power_wells[] = {
};
static const struct i915_power_well_ops vlv_display_power_well_ops = {
- .sync_hw = vlv_power_well_sync_hw,
+ .sync_hw = i9xx_power_well_sync_hw_noop,
.enable = vlv_display_power_well_enable,
.disable = vlv_display_power_well_disable,
.is_enabled = vlv_power_well_enabled,
};
static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
- .sync_hw = vlv_power_well_sync_hw,
+ .sync_hw = i9xx_power_well_sync_hw_noop,
.enable = vlv_dpio_cmn_power_well_enable,
.disable = vlv_dpio_cmn_power_well_disable,
.is_enabled = vlv_power_well_enabled,
};
static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
- .sync_hw = vlv_power_well_sync_hw,
+ .sync_hw = i9xx_power_well_sync_hw_noop,
.enable = vlv_power_well_enable,
.disable = vlv_power_well_disable,
.is_enabled = vlv_power_well_enabled,
@@ -2155,26 +2124,26 @@ static struct i915_power_well skl_power_wells[] = {
.id = SKL_DISP_PW_2,
},
{
- .name = "DDI A/E power well",
- .domains = SKL_DISPLAY_DDI_A_E_POWER_DOMAINS,
+ .name = "DDI A/E IO power well",
+ .domains = SKL_DISPLAY_DDI_IO_A_E_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.id = SKL_DISP_PW_DDI_A_E,
},
{
- .name = "DDI B power well",
- .domains = SKL_DISPLAY_DDI_B_POWER_DOMAINS,
+ .name = "DDI B IO power well",
+ .domains = SKL_DISPLAY_DDI_IO_B_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.id = SKL_DISP_PW_DDI_B,
},
{
- .name = "DDI C power well",
- .domains = SKL_DISPLAY_DDI_C_POWER_DOMAINS,
+ .name = "DDI C IO power well",
+ .domains = SKL_DISPLAY_DDI_IO_C_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.id = SKL_DISP_PW_DDI_C,
},
{
- .name = "DDI D power well",
- .domains = SKL_DISPLAY_DDI_D_POWER_DOMAINS,
+ .name = "DDI D IO power well",
+ .domains = SKL_DISPLAY_DDI_IO_D_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.id = SKL_DISP_PW_DDI_D,
},
@@ -2287,20 +2256,20 @@ static struct i915_power_well glk_power_wells[] = {
.id = GLK_DISP_PW_AUX_C,
},
{
- .name = "DDI A power well",
- .domains = GLK_DISPLAY_DDI_A_POWER_DOMAINS,
+ .name = "DDI A IO power well",
+ .domains = GLK_DISPLAY_DDI_IO_A_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.id = GLK_DISP_PW_DDI_A,
},
{
- .name = "DDI B power well",
- .domains = GLK_DISPLAY_DDI_B_POWER_DOMAINS,
+ .name = "DDI B IO power well",
+ .domains = GLK_DISPLAY_DDI_IO_B_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.id = SKL_DISP_PW_DDI_B,
},
{
- .name = "DDI C power well",
- .domains = GLK_DISPLAY_DDI_C_POWER_DOMAINS,
+ .name = "DDI C IO power well",
+ .domains = GLK_DISPLAY_DDI_IO_C_POWER_DOMAINS,
.ops = &skl_power_well_ops,
.id = SKL_DISP_PW_DDI_C,
},
@@ -2323,7 +2292,7 @@ static uint32_t get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
int requested_dc;
int max_dc;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_GEN9_BC(dev_priv)) {
max_dc = 2;
mask = 0;
} else if (IS_GEN9_LP(dev_priv)) {
@@ -2386,7 +2355,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv,
i915.enable_dc);
- BUILD_BUG_ON(POWER_DOMAIN_NUM > 31);
+ BUILD_BUG_ON(POWER_DOMAIN_NUM > 64);
mutex_init(&power_domains->lock);
@@ -2398,7 +2367,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
set_power_wells(power_domains, hsw_power_wells);
} else if (IS_BROADWELL(dev_priv)) {
set_power_wells(power_domains, bdw_power_wells);
- } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ } else if (IS_GEN9_BC(dev_priv)) {
set_power_wells(power_domains, skl_power_wells);
} else if (IS_BROXTON(dev_priv)) {
set_power_wells(power_domains, bxt_power_wells);
@@ -2454,10 +2423,9 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *power_well;
- int i;
mutex_lock(&power_domains->lock);
- for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
+ for_each_power_well(dev_priv, power_well) {
power_well->ops->sync_hw(dev_priv, power_well);
power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
power_well);
@@ -2722,7 +2690,10 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
* @resume: Called from resume code paths or not
*
* This function initializes the hardware power domain state and enables all
- * power domains using intel_display_set_init_power().
+ * power wells belonging to the INIT power domain. Power wells in other
+ * domains (and not in the INIT domain) are referenced or disabled during the
+ * modeset state HW readout. After that the reference count of each power well
+ * must match its HW enabled state, see intel_power_domains_verify_state().
*/
void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
{
@@ -2730,7 +2701,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
power_domains->initializing = true;
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_GEN9_BC(dev_priv)) {
skl_display_core_init(dev_priv, resume);
} else if (IS_GEN9_LP(dev_priv)) {
bxt_display_core_init(dev_priv, resume);
@@ -2769,12 +2740,92 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
if (!i915.disable_power_well)
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
+ if (IS_GEN9_BC(dev_priv))
skl_display_core_uninit(dev_priv);
else if (IS_GEN9_LP(dev_priv))
bxt_display_core_uninit(dev_priv);
}
+static void intel_power_domains_dump_info(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *power_well;
+
+ for_each_power_well(dev_priv, power_well) {
+ enum intel_display_power_domain domain;
+
+ DRM_DEBUG_DRIVER("%-25s %d\n",
+ power_well->name, power_well->count);
+
+ for_each_power_domain(domain, power_well->domains)
+ DRM_DEBUG_DRIVER(" %-23s %d\n",
+ intel_display_power_domain_str(domain),
+ power_domains->domain_use_count[domain]);
+ }
+}
+
+/**
+ * intel_power_domains_verify_state - verify the HW/SW state for all power wells
+ * @dev_priv: i915 device instance
+ *
+ * Verify if the reference count of each power well matches its HW enabled
+ * state and the total refcount of the domains it belongs to. This must be
+ * called after modeset HW state sanitization, which is responsible for
+ * acquiring reference counts for any power wells in use and disabling the
+ * ones left on by BIOS but not required by any active output.
+ */
+void intel_power_domains_verify_state(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *power_well;
+ bool dump_domain_info;
+
+ mutex_lock(&power_domains->lock);
+
+ dump_domain_info = false;
+ for_each_power_well(dev_priv, power_well) {
+ enum intel_display_power_domain domain;
+ int domains_count;
+ bool enabled;
+
+ /*
+ * Power wells not belonging to any domain (like the MISC_IO
+ * and PW1 power wells) are under FW control, so ignore them,
+ * since their state can change asynchronously.
+ */
+ if (!power_well->domains)
+ continue;
+
+ enabled = power_well->ops->is_enabled(dev_priv, power_well);
+ if ((power_well->count || power_well->always_on) != enabled)
+ DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
+ power_well->name, power_well->count, enabled);
+
+ domains_count = 0;
+ for_each_power_domain(domain, power_well->domains)
+ domains_count += power_domains->domain_use_count[domain];
+
+ if (power_well->count != domains_count) {
+ DRM_ERROR("power well %s refcount/domain refcount mismatch "
+ "(refcount %d/domains refcount %d)\n",
+ power_well->name, power_well->count,
+ domains_count);
+ dump_domain_info = true;
+ }
+ }
+
+ if (dump_domain_info) {
+ static bool dumped;
+
+ if (!dumped) {
+ intel_power_domains_dump_info(dev_priv);
+ dumped = true;
+ }
+ }
+
+ mutex_unlock(&power_domains->lock);
+}
+
/**
* intel_runtime_pm_get - grab a runtime pm reference
* @dev_priv: i915 device instance
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 2ad13903a054..816a6f5a3fd9 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -2981,6 +2981,7 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
/* encoder type will be decided later */
intel_encoder = &intel_sdvo->base;
intel_encoder->type = INTEL_OUTPUT_SDVO;
+ intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
intel_encoder->port = port;
drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
&intel_sdvo_enc_funcs, 0,
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 1a840bf92eea..7d971cb56116 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -60,8 +60,7 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
}
I915_WRITE(VLV_IOSF_ADDR, addr);
- if (!is_read)
- I915_WRITE(VLV_IOSF_DATA, *val);
+ I915_WRITE(VLV_IOSF_DATA, is_read ? 0 : *val);
I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
if (intel_wait_for_register(dev_priv,
@@ -74,7 +73,6 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
if (is_read)
*val = I915_READ(VLV_IOSF_DATA);
- I915_WRITE(VLV_IOSF_DATA, 0);
return 0;
}
@@ -93,14 +91,18 @@ u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr)
return val;
}
-void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val)
+int vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val)
{
+ int err;
+
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
mutex_lock(&dev_priv->sb_lock);
- vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
- SB_CRWRDA_NP, addr, &val);
+ err = vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
+ SB_CRWRDA_NP, addr, &val);
mutex_unlock(&dev_priv->sb_lock);
+
+ return err;
}
u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg)
@@ -214,6 +216,7 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
}
I915_WRITE(SBI_ADDR, (reg << 16));
+ I915_WRITE(SBI_DATA, 0);
if (destination == SBI_ICLK)
value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
@@ -223,10 +226,15 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
if (intel_wait_for_register(dev_priv,
SBI_CTL_STAT,
- SBI_BUSY | SBI_RESPONSE_FAIL,
+ SBI_BUSY,
0,
100)) {
- DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
+ DRM_ERROR("timeout waiting for SBI to complete read\n");
+ return 0;
+ }
+
+ if (I915_READ(SBI_CTL_STAT) & SBI_RESPONSE_FAIL) {
+ DRM_ERROR("error during SBI read of reg %x\n", reg);
return 0;
}
@@ -258,10 +266,16 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
if (intel_wait_for_register(dev_priv,
SBI_CTL_STAT,
- SBI_BUSY | SBI_RESPONSE_FAIL,
+ SBI_BUSY,
0,
100)) {
- DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
+ DRM_ERROR("timeout waiting for SBI to complete write\n");
+ return;
+ }
+
+ if (I915_READ(SBI_CTL_STAT) & SBI_RESPONSE_FAIL) {
+ DRM_ERROR("error during SBI write of %x to reg %x\n",
+ value, reg);
return;
}
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 9ef54688872a..b931d0bd7a64 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -65,6 +65,8 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
1000 * adjusted_mode->crtc_htotal);
}
+#define VBLANK_EVASION_TIME_US 100
+
/**
* intel_pipe_update_start() - start update of a set of display registers
* @crtc: the crtc of which the registers are going to be updated
@@ -92,7 +94,8 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
vblank_start = DIV_ROUND_UP(vblank_start, 2);
/* FIXME needs to be calibrated sensibly */
- min = vblank_start - intel_usecs_to_scanlines(adjusted_mode, 100);
+ min = vblank_start - intel_usecs_to_scanlines(adjusted_mode,
+ VBLANK_EVASION_TIME_US);
max = vblank_start - 1;
local_irq_disable();
@@ -158,6 +161,7 @@ void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work
int scanline_end = intel_get_crtc_scanline(crtc);
u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
ktime_t end_vbl_time = ktime_get();
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
if (work) {
work->flip_queued_vblank = end_vbl_count;
@@ -183,6 +187,9 @@ void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work
local_irq_enable();
+ if (intel_vgpu_active(dev_priv))
+ return;
+
if (crtc->debug.start_vbl_count &&
crtc->debug.start_vbl_count != end_vbl_count) {
DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n",
@@ -191,7 +198,12 @@ void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work
ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time),
crtc->debug.min_vbl, crtc->debug.max_vbl,
crtc->debug.scanline_start, scanline_end);
- }
+ } else if (ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time) >
+ VBLANK_EVASION_TIME_US)
+ DRM_WARN("Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
+ pipe_name(pipe),
+ ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time),
+ VBLANK_EVASION_TIME_US);
}
static void
@@ -218,22 +230,21 @@ skl_update_plane(struct drm_plane *drm_plane,
uint32_t y = plane_state->main.y;
uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ unsigned long irqflags;
- plane_ctl = PLANE_CTL_ENABLE |
- PLANE_CTL_PIPE_GAMMA_ENABLE |
- PLANE_CTL_PIPE_CSC_ENABLE;
+ plane_ctl = PLANE_CTL_ENABLE;
+
+ if (!IS_GEMINILAKE(dev_priv)) {
+ plane_ctl |=
+ PLANE_CTL_PIPE_GAMMA_ENABLE |
+ PLANE_CTL_PIPE_CSC_ENABLE |
+ PLANE_CTL_PLANE_GAMMA_DISABLE;
+ }
plane_ctl |= skl_plane_ctl_format(fb->format->format);
plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
-
plane_ctl |= skl_plane_ctl_rotation(rotation);
- if (key->flags) {
- I915_WRITE(PLANE_KEYVAL(pipe, plane_id), key->min_value);
- I915_WRITE(PLANE_KEYMAX(pipe, plane_id), key->max_value);
- I915_WRITE(PLANE_KEYMSK(pipe, plane_id), key->channel_mask);
- }
-
if (key->flags & I915_SET_COLORKEY_DESTINATION)
plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
else if (key->flags & I915_SET_COLORKEY_SOURCE)
@@ -245,36 +256,50 @@ skl_update_plane(struct drm_plane *drm_plane,
crtc_w--;
crtc_h--;
- I915_WRITE(PLANE_OFFSET(pipe, plane_id), (y << 16) | x);
- I915_WRITE(PLANE_STRIDE(pipe, plane_id), stride);
- I915_WRITE(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ if (IS_GEMINILAKE(dev_priv)) {
+ I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id),
+ PLANE_COLOR_PIPE_GAMMA_ENABLE |
+ PLANE_COLOR_PIPE_CSC_ENABLE |
+ PLANE_COLOR_PLANE_GAMMA_DISABLE);
+ }
+
+ if (key->flags) {
+ I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value);
+ I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), key->max_value);
+ I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), key->channel_mask);
+ }
+
+ I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x);
+ I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
+ I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
/* program plane scaler */
if (plane_state->scaler_id >= 0) {
int scaler_id = plane_state->scaler_id;
const struct intel_scaler *scaler;
- DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n",
- plane_id, PS_PLANE_SEL(plane_id));
-
scaler = &crtc_state->scaler_state.scalers[scaler_id];
- I915_WRITE(SKL_PS_CTRL(pipe, scaler_id),
- PS_SCALER_EN | PS_PLANE_SEL(plane_id) | scaler->mode);
- I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
- I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
- I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id),
- ((crtc_w + 1) << 16)|(crtc_h + 1));
+ I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
+ PS_SCALER_EN | PS_PLANE_SEL(plane_id) | scaler->mode);
+ I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
+ I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
+ I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id),
+ ((crtc_w + 1) << 16)|(crtc_h + 1));
- I915_WRITE(PLANE_POS(pipe, plane_id), 0);
+ I915_WRITE_FW(PLANE_POS(pipe, plane_id), 0);
} else {
- I915_WRITE(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
+ I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
}
- I915_WRITE(PLANE_CTL(pipe, plane_id), plane_ctl);
- I915_WRITE(PLANE_SURF(pipe, plane_id),
- intel_plane_ggtt_offset(plane_state) + surf_addr);
- POSTING_READ(PLANE_SURF(pipe, plane_id));
+ I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl);
+ I915_WRITE_FW(PLANE_SURF(pipe, plane_id),
+ intel_plane_ggtt_offset(plane_state) + surf_addr);
+ POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
@@ -285,11 +310,16 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
struct intel_plane *intel_plane = to_intel_plane(dplane);
enum plane_id plane_id = intel_plane->id;
enum pipe pipe = intel_plane->pipe;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
- I915_WRITE(PLANE_CTL(pipe, plane_id), 0);
+ I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0);
+ POSTING_READ_FW(PLANE_SURF(pipe, plane_id));
- I915_WRITE(PLANE_SURF(pipe, plane_id), 0);
- POSTING_READ(PLANE_SURF(pipe, plane_id));
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
@@ -312,23 +342,23 @@ chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
* Cb and Cr apparently come in as signed already, so no
* need for any offset. For Y we need to remove the offset.
*/
- I915_WRITE(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(-64));
- I915_WRITE(SPCSCCBOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
- I915_WRITE(SPCSCCROFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
-
- I915_WRITE(SPCSCC01(plane_id), SPCSC_C1(4769) | SPCSC_C0(6537));
- I915_WRITE(SPCSCC23(plane_id), SPCSC_C1(-3330) | SPCSC_C0(0));
- I915_WRITE(SPCSCC45(plane_id), SPCSC_C1(-1605) | SPCSC_C0(4769));
- I915_WRITE(SPCSCC67(plane_id), SPCSC_C1(4769) | SPCSC_C0(0));
- I915_WRITE(SPCSCC8(plane_id), SPCSC_C0(8263));
-
- I915_WRITE(SPCSCYGICLAMP(plane_id), SPCSC_IMAX(940) | SPCSC_IMIN(64));
- I915_WRITE(SPCSCCBICLAMP(plane_id), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
- I915_WRITE(SPCSCCRICLAMP(plane_id), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
-
- I915_WRITE(SPCSCYGOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
- I915_WRITE(SPCSCCBOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
- I915_WRITE(SPCSCCROCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+ I915_WRITE_FW(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(-64));
+ I915_WRITE_FW(SPCSCCBOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
+ I915_WRITE_FW(SPCSCCROFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
+
+ I915_WRITE_FW(SPCSCC01(plane_id), SPCSC_C1(4769) | SPCSC_C0(6537));
+ I915_WRITE_FW(SPCSCC23(plane_id), SPCSC_C1(-3330) | SPCSC_C0(0));
+ I915_WRITE_FW(SPCSCC45(plane_id), SPCSC_C1(-1605) | SPCSC_C0(4769));
+ I915_WRITE_FW(SPCSCC67(plane_id), SPCSC_C1(4769) | SPCSC_C0(0));
+ I915_WRITE_FW(SPCSCC8(plane_id), SPCSC_C0(8263));
+
+ I915_WRITE_FW(SPCSCYGICLAMP(plane_id), SPCSC_IMAX(940) | SPCSC_IMIN(64));
+ I915_WRITE_FW(SPCSCCBICLAMP(plane_id), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
+ I915_WRITE_FW(SPCSCCRICLAMP(plane_id), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
+
+ I915_WRITE_FW(SPCSCYGOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+ I915_WRITE_FW(SPCSCCBOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+ I915_WRITE_FW(SPCSCCROCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
}
static void
@@ -354,6 +384,7 @@ vlv_update_plane(struct drm_plane *dplane,
uint32_t y = plane_state->base.src.y1 >> 16;
uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ unsigned long irqflags;
sprctl = SP_ENABLE;
@@ -415,6 +446,9 @@ vlv_update_plane(struct drm_plane *dplane,
if (rotation & DRM_REFLECT_X)
sprctl |= SP_MIRROR;
+ if (key->flags & I915_SET_COLORKEY_SOURCE)
+ sprctl |= SP_SOURCE_KEY;
+
/* Sizes are 0 based */
src_w--;
src_h--;
@@ -433,33 +467,33 @@ vlv_update_plane(struct drm_plane *dplane,
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
- if (key->flags) {
- I915_WRITE(SPKEYMINVAL(pipe, plane_id), key->min_value);
- I915_WRITE(SPKEYMAXVAL(pipe, plane_id), key->max_value);
- I915_WRITE(SPKEYMSK(pipe, plane_id), key->channel_mask);
- }
-
- if (key->flags & I915_SET_COLORKEY_SOURCE)
- sprctl |= SP_SOURCE_KEY;
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
chv_update_csc(intel_plane, fb->format->format);
- I915_WRITE(SPSTRIDE(pipe, plane_id), fb->pitches[0]);
- I915_WRITE(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
+ if (key->flags) {
+ I915_WRITE_FW(SPKEYMINVAL(pipe, plane_id), key->min_value);
+ I915_WRITE_FW(SPKEYMAXVAL(pipe, plane_id), key->max_value);
+ I915_WRITE_FW(SPKEYMSK(pipe, plane_id), key->channel_mask);
+ }
+ I915_WRITE_FW(SPSTRIDE(pipe, plane_id), fb->pitches[0]);
+ I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
- I915_WRITE(SPTILEOFF(pipe, plane_id), (y << 16) | x);
+ I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x);
else
- I915_WRITE(SPLINOFF(pipe, plane_id), linear_offset);
+ I915_WRITE_FW(SPLINOFF(pipe, plane_id), linear_offset);
+
+ I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0);
- I915_WRITE(SPCONSTALPHA(pipe, plane_id), 0);
+ I915_WRITE_FW(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w);
+ I915_WRITE_FW(SPCNTR(pipe, plane_id), sprctl);
+ I915_WRITE_FW(SPSURF(pipe, plane_id),
+ intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
+ POSTING_READ_FW(SPSURF(pipe, plane_id));
- I915_WRITE(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w);
- I915_WRITE(SPCNTR(pipe, plane_id), sprctl);
- I915_WRITE(SPSURF(pipe, plane_id),
- intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
- POSTING_READ(SPSURF(pipe, plane_id));
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
@@ -470,11 +504,16 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
struct intel_plane *intel_plane = to_intel_plane(dplane);
enum pipe pipe = intel_plane->pipe;
enum plane_id plane_id = intel_plane->id;
+ unsigned long irqflags;
- I915_WRITE(SPCNTR(pipe, plane_id), 0);
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE(SPSURF(pipe, plane_id), 0);
- POSTING_READ(SPSURF(pipe, plane_id));
+ I915_WRITE_FW(SPCNTR(pipe, plane_id), 0);
+
+ I915_WRITE_FW(SPSURF(pipe, plane_id), 0);
+ POSTING_READ_FW(SPSURF(pipe, plane_id));
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
@@ -499,6 +538,7 @@ ivb_update_plane(struct drm_plane *plane,
uint32_t y = plane_state->base.src.y1 >> 16;
uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ unsigned long irqflags;
sprctl = SPRITE_ENABLE;
@@ -545,6 +585,11 @@ ivb_update_plane(struct drm_plane *plane,
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
sprctl |= SPRITE_PIPE_CSC_ENABLE;
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ sprctl |= SPRITE_DEST_KEY;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ sprctl |= SPRITE_SOURCE_KEY;
+
/* Sizes are 0 based */
src_w--;
src_h--;
@@ -566,36 +611,35 @@ ivb_update_plane(struct drm_plane *plane,
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
if (key->flags) {
- I915_WRITE(SPRKEYVAL(pipe), key->min_value);
- I915_WRITE(SPRKEYMAX(pipe), key->max_value);
- I915_WRITE(SPRKEYMSK(pipe), key->channel_mask);
+ I915_WRITE_FW(SPRKEYVAL(pipe), key->min_value);
+ I915_WRITE_FW(SPRKEYMAX(pipe), key->max_value);
+ I915_WRITE_FW(SPRKEYMSK(pipe), key->channel_mask);
}
- if (key->flags & I915_SET_COLORKEY_DESTINATION)
- sprctl |= SPRITE_DEST_KEY;
- else if (key->flags & I915_SET_COLORKEY_SOURCE)
- sprctl |= SPRITE_SOURCE_KEY;
-
- I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
- I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
+ I915_WRITE_FW(SPRSTRIDE(pipe), fb->pitches[0]);
+ I915_WRITE_FW(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
* register */
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
+ I915_WRITE_FW(SPROFFSET(pipe), (y << 16) | x);
else if (fb->modifier == I915_FORMAT_MOD_X_TILED)
- I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
+ I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x);
else
- I915_WRITE(SPRLINOFF(pipe), linear_offset);
+ I915_WRITE_FW(SPRLINOFF(pipe), linear_offset);
- I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
+ I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
if (intel_plane->can_scale)
- I915_WRITE(SPRSCALE(pipe), sprscale);
- I915_WRITE(SPRCTL(pipe), sprctl);
- I915_WRITE(SPRSURF(pipe),
- intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
- POSTING_READ(SPRSURF(pipe));
+ I915_WRITE_FW(SPRSCALE(pipe), sprscale);
+ I915_WRITE_FW(SPRCTL(pipe), sprctl);
+ I915_WRITE_FW(SPRSURF(pipe),
+ intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
+ POSTING_READ_FW(SPRSURF(pipe));
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
@@ -605,14 +649,19 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe;
+ unsigned long irqflags;
- I915_WRITE(SPRCTL(pipe), 0);
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ I915_WRITE_FW(SPRCTL(pipe), 0);
/* Can't leave the scaler enabled... */
if (intel_plane->can_scale)
- I915_WRITE(SPRSCALE(pipe), 0);
+ I915_WRITE_FW(SPRSCALE(pipe), 0);
+
+ I915_WRITE_FW(SPRSURF(pipe), 0);
+ POSTING_READ_FW(SPRSURF(pipe));
- I915_WRITE(SPRSURF(pipe), 0);
- POSTING_READ(SPRSURF(pipe));
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
@@ -637,6 +686,7 @@ ilk_update_plane(struct drm_plane *plane,
uint32_t y = plane_state->base.src.y1 >> 16;
uint32_t src_w = drm_rect_width(&plane_state->base.src) >> 16;
uint32_t src_h = drm_rect_height(&plane_state->base.src) >> 16;
+ unsigned long irqflags;
dvscntr = DVS_ENABLE;
@@ -678,6 +728,11 @@ ilk_update_plane(struct drm_plane *plane,
if (IS_GEN6(dev_priv))
dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ dvscntr |= DVS_DEST_KEY;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ dvscntr |= DVS_SOURCE_KEY;
+
/* Sizes are 0 based */
src_w--;
src_h--;
@@ -698,31 +753,30 @@ ilk_update_plane(struct drm_plane *plane,
linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
if (key->flags) {
- I915_WRITE(DVSKEYVAL(pipe), key->min_value);
- I915_WRITE(DVSKEYMAX(pipe), key->max_value);
- I915_WRITE(DVSKEYMSK(pipe), key->channel_mask);
+ I915_WRITE_FW(DVSKEYVAL(pipe), key->min_value);
+ I915_WRITE_FW(DVSKEYMAX(pipe), key->max_value);
+ I915_WRITE_FW(DVSKEYMSK(pipe), key->channel_mask);
}
- if (key->flags & I915_SET_COLORKEY_DESTINATION)
- dvscntr |= DVS_DEST_KEY;
- else if (key->flags & I915_SET_COLORKEY_SOURCE)
- dvscntr |= DVS_SOURCE_KEY;
-
- I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
- I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
+ I915_WRITE_FW(DVSSTRIDE(pipe), fb->pitches[0]);
+ I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
if (fb->modifier == I915_FORMAT_MOD_X_TILED)
- I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
+ I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x);
else
- I915_WRITE(DVSLINOFF(pipe), linear_offset);
-
- I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
- I915_WRITE(DVSSCALE(pipe), dvsscale);
- I915_WRITE(DVSCNTR(pipe), dvscntr);
- I915_WRITE(DVSSURF(pipe),
- intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
- POSTING_READ(DVSSURF(pipe));
+ I915_WRITE_FW(DVSLINOFF(pipe), linear_offset);
+
+ I915_WRITE_FW(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
+ I915_WRITE_FW(DVSSCALE(pipe), dvsscale);
+ I915_WRITE_FW(DVSCNTR(pipe), dvscntr);
+ I915_WRITE_FW(DVSSURF(pipe),
+ intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
+ POSTING_READ_FW(DVSSURF(pipe));
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static void
@@ -732,13 +786,18 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE(DVSCNTR(pipe), 0);
+ I915_WRITE_FW(DVSCNTR(pipe), 0);
/* Disable the scaler */
- I915_WRITE(DVSSCALE(pipe), 0);
+ I915_WRITE_FW(DVSSCALE(pipe), 0);
+
+ I915_WRITE_FW(DVSSURF(pipe), 0);
+ POSTING_READ_FW(DVSSURF(pipe));
- I915_WRITE(DVSSURF(pipe), 0);
- POSTING_READ(DVSSURF(pipe));
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
static int
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index eb692e4ffe01..6ed1a3ce47b7 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1621,6 +1621,7 @@ intel_tv_init(struct drm_i915_private *dev_priv)
intel_connector_attach_encoder(intel_connector, intel_encoder);
intel_encoder->type = INTEL_OUTPUT_TVOUT;
+ intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER;
intel_encoder->port = PORT_NONE;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
intel_encoder->cloneable = 0;
diff --git a/drivers/gpu/drm/i915/intel_uc.c b/drivers/gpu/drm/i915/intel_uc.c
index c46bc8594f22..d15a7d9d4eb0 100644
--- a/drivers/gpu/drm/i915/intel_uc.c
+++ b/drivers/gpu/drm/i915/intel_uc.c
@@ -24,12 +24,167 @@
#include "i915_drv.h"
#include "intel_uc.h"
+#include <linux/firmware.h>
+
+/* Reset GuC providing us with fresh state for both GuC and HuC.
+ */
+static int __intel_uc_reset_hw(struct drm_i915_private *dev_priv)
+{
+ int ret;
+ u32 guc_status;
+
+ ret = intel_guc_reset(dev_priv);
+ if (ret) {
+ DRM_ERROR("GuC reset failed, ret = %d\n", ret);
+ return ret;
+ }
+
+ guc_status = I915_READ(GUC_STATUS);
+ WARN(!(guc_status & GS_MIA_IN_RESET),
+ "GuC status: 0x%x, MIA core expected to be in reset\n",
+ guc_status);
+
+ return ret;
+}
+
+void intel_uc_sanitize_options(struct drm_i915_private *dev_priv)
+{
+ if (!HAS_GUC(dev_priv)) {
+ if (i915.enable_guc_loading > 0 ||
+ i915.enable_guc_submission > 0)
+ DRM_INFO("Ignoring GuC options, no hardware\n");
+
+ i915.enable_guc_loading = 0;
+ i915.enable_guc_submission = 0;
+ return;
+ }
+
+ /* A negative value means "use platform default" */
+ if (i915.enable_guc_loading < 0)
+ i915.enable_guc_loading = HAS_GUC_UCODE(dev_priv);
+
+ /* Verify firmware version */
+ if (i915.enable_guc_loading) {
+ if (HAS_HUC_UCODE(dev_priv))
+ intel_huc_select_fw(&dev_priv->huc);
+
+ if (intel_guc_select_fw(&dev_priv->guc))
+ i915.enable_guc_loading = 0;
+ }
+
+ /* Can't enable guc submission without guc loaded */
+ if (!i915.enable_guc_loading)
+ i915.enable_guc_submission = 0;
+
+ /* A negative value means "use platform default" */
+ if (i915.enable_guc_submission < 0)
+ i915.enable_guc_submission = HAS_GUC_SCHED(dev_priv);
+}
void intel_uc_init_early(struct drm_i915_private *dev_priv)
{
mutex_init(&dev_priv->guc.send_mutex);
}
+void intel_uc_init_fw(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->huc.fw.path)
+ intel_uc_prepare_fw(dev_priv, &dev_priv->huc.fw);
+
+ if (dev_priv->guc.fw.path)
+ intel_uc_prepare_fw(dev_priv, &dev_priv->guc.fw);
+}
+
+int intel_uc_init_hw(struct drm_i915_private *dev_priv)
+{
+ int ret, attempts;
+
+ /* GuC not enabled, nothing to do */
+ if (!i915.enable_guc_loading)
+ return 0;
+
+ gen9_reset_guc_interrupts(dev_priv);
+
+ /* We need to notify the guc whenever we change the GGTT */
+ i915_ggtt_enable_guc(dev_priv);
+
+ if (i915.enable_guc_submission) {
+ ret = i915_guc_submission_init(dev_priv);
+ if (ret)
+ goto err;
+ }
+
+ /* WaEnableuKernelHeaderValidFix:skl */
+ /* WaEnableGuCBootHashCheckNotSet:skl,bxt,kbl */
+ if (IS_GEN9(dev_priv))
+ attempts = 3;
+ else
+ attempts = 1;
+
+ while (attempts--) {
+ /*
+ * Always reset the GuC just before (re)loading, so
+ * that the state and timing are fairly predictable
+ */
+ ret = __intel_uc_reset_hw(dev_priv);
+ if (ret)
+ goto err_submission;
+
+ intel_huc_init_hw(&dev_priv->huc);
+ ret = intel_guc_init_hw(&dev_priv->guc);
+ if (ret == 0 || ret != -EAGAIN)
+ break;
+
+ DRM_DEBUG_DRIVER("GuC fw load failed: %d; will reset and "
+ "retry %d more time(s)\n", ret, attempts);
+ }
+
+ /* Did we succeded or run out of retries? */
+ if (ret)
+ goto err_submission;
+
+ intel_guc_auth_huc(dev_priv);
+ if (i915.enable_guc_submission) {
+ if (i915.guc_log_level >= 0)
+ gen9_enable_guc_interrupts(dev_priv);
+
+ ret = i915_guc_submission_enable(dev_priv);
+ if (ret)
+ goto err_submission;
+ }
+
+ return 0;
+
+ /*
+ * We've failed to load the firmware :(
+ *
+ * Decide whether to disable GuC submission and fall back to
+ * execlist mode, and whether to hide the error by returning
+ * zero or to return -EIO, which the caller will treat as a
+ * nonfatal error (i.e. it doesn't prevent driver load, but
+ * marks the GPU as wedged until reset).
+ */
+err_submission:
+ if (i915.enable_guc_submission)
+ i915_guc_submission_fini(dev_priv);
+
+err:
+ i915_ggtt_disable_guc(dev_priv);
+
+ DRM_ERROR("GuC init failed\n");
+ if (i915.enable_guc_loading > 1 || i915.enable_guc_submission > 1)
+ ret = -EIO;
+ else
+ ret = 0;
+
+ if (i915.enable_guc_submission) {
+ i915.enable_guc_submission = 0;
+ DRM_NOTE("Falling back from GuC submission to execlist mode\n");
+ }
+
+ return ret;
+}
+
/*
* Read GuC command/status register (SOFT_SCRATCH_0)
* Return true if it contains a response rather than a command
@@ -114,3 +269,135 @@ int intel_guc_sample_forcewake(struct intel_guc *guc)
return intel_guc_send(guc, action, ARRAY_SIZE(action));
}
+void intel_uc_prepare_fw(struct drm_i915_private *dev_priv,
+ struct intel_uc_fw *uc_fw)
+{
+ struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct drm_i915_gem_object *obj;
+ const struct firmware *fw = NULL;
+ struct uc_css_header *css;
+ size_t size;
+ int err;
+
+ uc_fw->fetch_status = INTEL_UC_FIRMWARE_PENDING;
+
+ DRM_DEBUG_DRIVER("before requesting firmware: uC fw fetch status %s\n",
+ intel_uc_fw_status_repr(uc_fw->fetch_status));
+
+ err = request_firmware(&fw, uc_fw->path, &pdev->dev);
+ if (err)
+ goto fail;
+ if (!fw)
+ goto fail;
+
+ DRM_DEBUG_DRIVER("fetch uC fw from %s succeeded, fw %p\n",
+ uc_fw->path, fw);
+
+ /* Check the size of the blob before examining buffer contents */
+ if (fw->size < sizeof(struct uc_css_header)) {
+ DRM_NOTE("Firmware header is missing\n");
+ goto fail;
+ }
+
+ css = (struct uc_css_header *)fw->data;
+
+ /* Firmware bits always start from header */
+ uc_fw->header_offset = 0;
+ uc_fw->header_size = (css->header_size_dw - css->modulus_size_dw -
+ css->key_size_dw - css->exponent_size_dw) * sizeof(u32);
+
+ if (uc_fw->header_size != sizeof(struct uc_css_header)) {
+ DRM_NOTE("CSS header definition mismatch\n");
+ goto fail;
+ }
+
+ /* then, uCode */
+ uc_fw->ucode_offset = uc_fw->header_offset + uc_fw->header_size;
+ uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
+
+ /* now RSA */
+ if (css->key_size_dw != UOS_RSA_SCRATCH_MAX_COUNT) {
+ DRM_NOTE("RSA key size is bad\n");
+ goto fail;
+ }
+ uc_fw->rsa_offset = uc_fw->ucode_offset + uc_fw->ucode_size;
+ uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
+
+ /* At least, it should have header, uCode and RSA. Size of all three. */
+ size = uc_fw->header_size + uc_fw->ucode_size + uc_fw->rsa_size;
+ if (fw->size < size) {
+ DRM_NOTE("Missing firmware components\n");
+ goto fail;
+ }
+
+ /*
+ * The GuC firmware image has the version number embedded at a
+ * well-known offset within the firmware blob; note that major / minor
+ * version are TWO bytes each (i.e. u16), although all pointers and
+ * offsets are defined in terms of bytes (u8).
+ */
+ switch (uc_fw->type) {
+ case INTEL_UC_FW_TYPE_GUC:
+ /* Header and uCode will be loaded to WOPCM. Size of the two. */
+ size = uc_fw->header_size + uc_fw->ucode_size;
+
+ /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
+ if (size > intel_guc_wopcm_size(dev_priv)) {
+ DRM_ERROR("Firmware is too large to fit in WOPCM\n");
+ goto fail;
+ }
+ uc_fw->major_ver_found = css->guc.sw_version >> 16;
+ uc_fw->minor_ver_found = css->guc.sw_version & 0xFFFF;
+ break;
+
+ case INTEL_UC_FW_TYPE_HUC:
+ uc_fw->major_ver_found = css->huc.sw_version >> 16;
+ uc_fw->minor_ver_found = css->huc.sw_version & 0xFFFF;
+ break;
+
+ default:
+ DRM_ERROR("Unknown firmware type %d\n", uc_fw->type);
+ err = -ENOEXEC;
+ goto fail;
+ }
+
+ if (uc_fw->major_ver_wanted == 0 && uc_fw->minor_ver_wanted == 0) {
+ DRM_NOTE("Skipping uC firmware version check\n");
+ } else if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
+ uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
+ DRM_NOTE("uC firmware version %d.%d, required %d.%d\n",
+ uc_fw->major_ver_found, uc_fw->minor_ver_found,
+ uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
+ err = -ENOEXEC;
+ goto fail;
+ }
+
+ DRM_DEBUG_DRIVER("firmware version %d.%d OK (minimum %d.%d)\n",
+ uc_fw->major_ver_found, uc_fw->minor_ver_found,
+ uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
+
+ obj = i915_gem_object_create_from_data(dev_priv, fw->data, fw->size);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto fail;
+ }
+
+ uc_fw->obj = obj;
+ uc_fw->size = fw->size;
+
+ DRM_DEBUG_DRIVER("uC fw fetch status SUCCESS, obj %p\n",
+ uc_fw->obj);
+
+ release_firmware(fw);
+ uc_fw->fetch_status = INTEL_UC_FIRMWARE_SUCCESS;
+ return;
+
+fail:
+ DRM_WARN("Failed to fetch valid uC firmware from %s (error %d)\n",
+ uc_fw->path, err);
+ DRM_DEBUG_DRIVER("uC fw fetch status FAIL; err %d, fw %p, obj %p\n",
+ err, fw, uc_fw->obj);
+
+ release_firmware(fw); /* OK even if fw is NULL */
+ uc_fw->fetch_status = INTEL_UC_FIRMWARE_FAIL;
+}
diff --git a/drivers/gpu/drm/i915/intel_uc.h b/drivers/gpu/drm/i915/intel_uc.h
index d74f4d3ad8dc..a35ededfaa40 100644
--- a/drivers/gpu/drm/i915/intel_uc.h
+++ b/drivers/gpu/drm/i915/intel_uc.h
@@ -121,7 +121,7 @@ struct intel_uc_fw {
uint16_t major_ver_found;
uint16_t minor_ver_found;
- enum intel_uc_fw_type fw;
+ enum intel_uc_fw_type type;
uint32_t header_size;
uint32_t header_offset;
uint32_t rsa_size;
@@ -184,19 +184,22 @@ struct intel_huc {
};
/* intel_uc.c */
+void intel_uc_sanitize_options(struct drm_i915_private *dev_priv);
void intel_uc_init_early(struct drm_i915_private *dev_priv);
+void intel_uc_init_fw(struct drm_i915_private *dev_priv);
+int intel_uc_init_hw(struct drm_i915_private *dev_priv);
+void intel_uc_prepare_fw(struct drm_i915_private *dev_priv,
+ struct intel_uc_fw *uc_fw);
int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len);
int intel_guc_sample_forcewake(struct intel_guc *guc);
/* intel_guc_loader.c */
-extern void intel_guc_init(struct drm_i915_private *dev_priv);
-extern int intel_guc_setup(struct drm_i915_private *dev_priv);
-extern void intel_guc_fini(struct drm_i915_private *dev_priv);
-extern const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status);
-extern int intel_guc_suspend(struct drm_i915_private *dev_priv);
-extern int intel_guc_resume(struct drm_i915_private *dev_priv);
-void intel_uc_fw_fetch(struct drm_i915_private *dev_priv,
- struct intel_uc_fw *uc_fw);
+int intel_guc_select_fw(struct intel_guc *guc);
+int intel_guc_init_hw(struct intel_guc *guc);
+void intel_guc_fini(struct drm_i915_private *dev_priv);
+const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status);
+int intel_guc_suspend(struct drm_i915_private *dev_priv);
+int intel_guc_resume(struct drm_i915_private *dev_priv);
u32 intel_guc_wopcm_size(struct drm_i915_private *dev_priv);
/* i915_guc_submission.c */
@@ -223,9 +226,9 @@ static inline u32 guc_ggtt_offset(struct i915_vma *vma)
}
/* intel_huc.c */
-void intel_huc_init(struct drm_i915_private *dev_priv);
+void intel_huc_select_fw(struct intel_huc *huc);
void intel_huc_fini(struct drm_i915_private *dev_priv);
-int intel_huc_load(struct drm_i915_private *dev_priv);
+int intel_huc_init_hw(struct intel_huc *huc);
void intel_guc_auth_huc(struct drm_i915_private *dev_priv);
#endif
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index abe08885a5ba..09f5f02d7901 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -25,6 +25,7 @@
#include "intel_drv.h"
#include "i915_vgpu.h"
+#include <asm/iosf_mbi.h>
#include <linux/pm_runtime.h>
#define FORCEWAKE_ACK_TIMEOUT_MS 50
@@ -119,6 +120,8 @@ fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma
for_each_fw_domain_masked(d, fw_domains, dev_priv)
fw_domain_wait_ack(d);
+
+ dev_priv->uncore.fw_domains_active |= fw_domains;
}
static void
@@ -130,6 +133,8 @@ fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma
fw_domain_put(d);
fw_domain_posting_read(d);
}
+
+ dev_priv->uncore.fw_domains_active &= ~fw_domains;
}
static void
@@ -240,18 +245,16 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
if (WARN_ON(domain->wake_count == 0))
domain->wake_count++;
- if (--domain->wake_count == 0) {
+ if (--domain->wake_count == 0)
dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask);
- dev_priv->uncore.fw_domains_active &= ~domain->mask;
- }
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
return HRTIMER_NORESTART;
}
-void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
- bool restore)
+static void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
+ bool restore)
{
unsigned long irqflags;
struct intel_uncore_forcewake_domain *domain;
@@ -427,10 +430,18 @@ static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
}
-void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
- bool restore_forcewake)
+void intel_uncore_suspend(struct drm_i915_private *dev_priv)
{
- __intel_uncore_early_sanitize(dev_priv, restore_forcewake);
+ iosf_mbi_unregister_pmic_bus_access_notifier(
+ &dev_priv->uncore.pmic_bus_access_nb);
+ intel_uncore_forcewake_reset(dev_priv, false);
+}
+
+void intel_uncore_resume_early(struct drm_i915_private *dev_priv)
+{
+ __intel_uncore_early_sanitize(dev_priv, true);
+ iosf_mbi_register_pmic_bus_access_notifier(
+ &dev_priv->uncore.pmic_bus_access_nb);
i915_check_and_clear_faults(dev_priv);
}
@@ -454,10 +465,8 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
fw_domains &= ~domain->mask;
}
- if (fw_domains) {
+ if (fw_domains)
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
- dev_priv->uncore.fw_domains_active |= fw_domains;
- }
}
/**
@@ -499,7 +508,7 @@ void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
enum forcewake_domains fw_domains)
{
- assert_spin_locked(&dev_priv->uncore.lock);
+ lockdep_assert_held(&dev_priv->uncore.lock);
if (!dev_priv->uncore.funcs.force_wake_get)
return;
@@ -557,7 +566,7 @@ void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
enum forcewake_domains fw_domains)
{
- assert_spin_locked(&dev_priv->uncore.lock);
+ lockdep_assert_held(&dev_priv->uncore.lock);
if (!dev_priv->uncore.funcs.force_wake_put)
return;
@@ -635,33 +644,6 @@ find_fw_domain(struct drm_i915_private *dev_priv, u32 offset)
return entry->domains;
}
-static void
-intel_fw_table_check(struct drm_i915_private *dev_priv)
-{
- const struct intel_forcewake_range *ranges;
- unsigned int num_ranges;
- s32 prev;
- unsigned int i;
-
- if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG))
- return;
-
- ranges = dev_priv->uncore.fw_domains_table;
- if (!ranges)
- return;
-
- num_ranges = dev_priv->uncore.fw_domains_table_entries;
-
- for (i = 0, prev = -1; i < num_ranges; i++, ranges++) {
- WARN_ON_ONCE(IS_GEN9(dev_priv) &&
- (prev + 1) != (s32)ranges->start);
- WARN_ON_ONCE(prev >= (s32)ranges->start);
- prev = ranges->start;
- WARN_ON_ONCE(prev >= (s32)ranges->end);
- prev = ranges->end;
- }
-}
-
#define GEN_FW_RANGE(s, e, d) \
{ .start = (s), .end = (e), .domains = (d) }
@@ -700,23 +682,6 @@ static const i915_reg_t gen8_shadowed_regs[] = {
/* TODO: Other registers are not yet used */
};
-static void intel_shadow_table_check(void)
-{
- const i915_reg_t *reg = gen8_shadowed_regs;
- s32 prev;
- u32 offset;
- unsigned int i;
-
- if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG))
- return;
-
- for (i = 0, prev = -1; i < ARRAY_SIZE(gen8_shadowed_regs); i++, reg++) {
- offset = i915_mmio_reg_offset(*reg);
- WARN_ON_ONCE(prev >= (s32)offset);
- prev = offset;
- }
-}
-
static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
{
u32 offset = i915_mmio_reg_offset(*reg);
@@ -968,7 +933,6 @@ static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv,
fw_domain_arm_timer(domain);
dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains);
- dev_priv->uncore.fw_domains_active |= fw_domains;
}
static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
@@ -985,29 +949,19 @@ static inline void __force_wake_auto(struct drm_i915_private *dev_priv,
___force_wake_auto(dev_priv, fw_domains);
}
-#define __gen6_read(x) \
-static u##x \
-gen6_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
- enum forcewake_domains fw_engine; \
- GEN6_READ_HEADER(x); \
- fw_engine = __gen6_reg_read_fw_domains(offset); \
- if (fw_engine) \
- __force_wake_auto(dev_priv, fw_engine); \
- val = __raw_i915_read##x(dev_priv, reg); \
- GEN6_READ_FOOTER; \
-}
-
-#define __fwtable_read(x) \
+#define __gen_read(func, x) \
static u##x \
-fwtable_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
+func##_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
- fw_engine = __fwtable_reg_read_fw_domains(offset); \
+ fw_engine = __##func##_reg_read_fw_domains(offset); \
if (fw_engine) \
__force_wake_auto(dev_priv, fw_engine); \
val = __raw_i915_read##x(dev_priv, reg); \
GEN6_READ_FOOTER; \
}
+#define __gen6_read(x) __gen_read(gen6, x)
+#define __fwtable_read(x) __gen_read(fwtable, x)
#define __gen9_decoupled_read(x) \
static u##x \
@@ -1045,34 +999,6 @@ __gen6_read(64)
#undef GEN6_READ_FOOTER
#undef GEN6_READ_HEADER
-#define VGPU_READ_HEADER(x) \
- unsigned long irqflags; \
- u##x val = 0; \
- assert_rpm_device_not_suspended(dev_priv); \
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
-
-#define VGPU_READ_FOOTER \
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
- trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
- return val
-
-#define __vgpu_read(x) \
-static u##x \
-vgpu_read##x(struct drm_i915_private *dev_priv, i915_reg_t reg, bool trace) { \
- VGPU_READ_HEADER(x); \
- val = __raw_i915_read##x(dev_priv, reg); \
- VGPU_READ_FOOTER; \
-}
-
-__vgpu_read(8)
-__vgpu_read(16)
-__vgpu_read(32)
-__vgpu_read(64)
-
-#undef __vgpu_read
-#undef VGPU_READ_FOOTER
-#undef VGPU_READ_HEADER
-
#define GEN2_WRITE_HEADER \
trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
assert_rpm_wakelock_held(dev_priv); \
@@ -1136,29 +1062,19 @@ gen6_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool
GEN6_WRITE_FOOTER; \
}
-#define __gen8_write(x) \
+#define __gen_write(func, x) \
static void \
-gen8_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
+func##_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_WRITE_HEADER; \
- fw_engine = __gen8_reg_write_fw_domains(offset); \
- if (fw_engine) \
- __force_wake_auto(dev_priv, fw_engine); \
- __raw_i915_write##x(dev_priv, reg, val); \
- GEN6_WRITE_FOOTER; \
-}
-
-#define __fwtable_write(x) \
-static void \
-fwtable_write##x(struct drm_i915_private *dev_priv, i915_reg_t reg, u##x val, bool trace) { \
- enum forcewake_domains fw_engine; \
- GEN6_WRITE_HEADER; \
- fw_engine = __fwtable_reg_write_fw_domains(offset); \
+ fw_engine = __##func##_reg_write_fw_domains(offset); \
if (fw_engine) \
__force_wake_auto(dev_priv, fw_engine); \
__raw_i915_write##x(dev_priv, reg, val); \
GEN6_WRITE_FOOTER; \
}
+#define __gen8_write(x) __gen_write(gen8, x)
+#define __fwtable_write(x) __gen_write(fwtable, x)
#define __gen9_decoupled_write(x) \
static void \
@@ -1195,31 +1111,6 @@ __gen6_write(32)
#undef GEN6_WRITE_FOOTER
#undef GEN6_WRITE_HEADER
-#define VGPU_WRITE_HEADER \
- unsigned long irqflags; \
- trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
- assert_rpm_device_not_suspended(dev_priv); \
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
-
-#define VGPU_WRITE_FOOTER \
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags)
-
-#define __vgpu_write(x) \
-static void vgpu_write##x(struct drm_i915_private *dev_priv, \
- i915_reg_t reg, u##x val, bool trace) { \
- VGPU_WRITE_HEADER; \
- __raw_i915_write##x(dev_priv, reg, val); \
- VGPU_WRITE_FOOTER; \
-}
-
-__vgpu_write(8)
-__vgpu_write(16)
-__vgpu_write(32)
-
-#undef __vgpu_write
-#undef VGPU_WRITE_FOOTER
-#undef VGPU_WRITE_HEADER
-
#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
do { \
dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
@@ -1289,7 +1180,7 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
{
- if (INTEL_INFO(dev_priv)->gen <= 5)
+ if (INTEL_GEN(dev_priv) <= 5 || intel_vgpu_active(dev_priv))
return;
if (IS_GEN9(dev_priv)) {
@@ -1386,6 +1277,32 @@ static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
dev_priv->uncore.fw_domains_table_entries = ARRAY_SIZE((d)); \
}
+static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct drm_i915_private *dev_priv = container_of(nb,
+ struct drm_i915_private, uncore.pmic_bus_access_nb);
+
+ switch (action) {
+ case MBI_PMIC_BUS_ACCESS_BEGIN:
+ /*
+ * forcewake all now to make sure that we don't need to do a
+ * forcewake later which on systems where this notifier gets
+ * called requires the punit to access to the shared pmic i2c
+ * bus, which will be busy after this notification, leading to:
+ * "render: timed out waiting for forcewake ack request."
+ * errors.
+ */
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ break;
+ case MBI_PMIC_BUS_ACCESS_END:
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
void intel_uncore_init(struct drm_i915_private *dev_priv)
{
i915_check_vgpu(dev_priv);
@@ -1395,23 +1312,25 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
__intel_uncore_early_sanitize(dev_priv, false);
dev_priv->uncore.unclaimed_mmio_check = 1;
+ dev_priv->uncore.pmic_bus_access_nb.notifier_call =
+ i915_pmic_bus_access_notifier;
- switch (INTEL_INFO(dev_priv)->gen) {
- default:
- case 9:
- ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
- ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
- ASSIGN_READ_MMIO_VFUNCS(fwtable);
- if (HAS_DECOUPLED_MMIO(dev_priv)) {
- dev_priv->uncore.funcs.mmio_readl =
- gen9_decoupled_read32;
- dev_priv->uncore.funcs.mmio_readq =
- gen9_decoupled_read64;
- dev_priv->uncore.funcs.mmio_writel =
- gen9_decoupled_write32;
+ if (IS_GEN(dev_priv, 2, 4) || intel_vgpu_active(dev_priv)) {
+ ASSIGN_WRITE_MMIO_VFUNCS(gen2);
+ ASSIGN_READ_MMIO_VFUNCS(gen2);
+ } else if (IS_GEN5(dev_priv)) {
+ ASSIGN_WRITE_MMIO_VFUNCS(gen5);
+ ASSIGN_READ_MMIO_VFUNCS(gen5);
+ } else if (IS_GEN(dev_priv, 6, 7)) {
+ ASSIGN_WRITE_MMIO_VFUNCS(gen6);
+
+ if (IS_VALLEYVIEW(dev_priv)) {
+ ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
+ ASSIGN_READ_MMIO_VFUNCS(fwtable);
+ } else {
+ ASSIGN_READ_MMIO_VFUNCS(gen6);
}
- break;
- case 8:
+ } else if (IS_GEN8(dev_priv)) {
if (IS_CHERRYVIEW(dev_priv)) {
ASSIGN_FW_DOMAINS_TABLE(__chv_fw_ranges);
ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
@@ -1421,38 +1340,22 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
ASSIGN_WRITE_MMIO_VFUNCS(gen8);
ASSIGN_READ_MMIO_VFUNCS(gen6);
}
- break;
- case 7:
- case 6:
- ASSIGN_WRITE_MMIO_VFUNCS(gen6);
-
- if (IS_VALLEYVIEW(dev_priv)) {
- ASSIGN_FW_DOMAINS_TABLE(__vlv_fw_ranges);
- ASSIGN_READ_MMIO_VFUNCS(fwtable);
- } else {
- ASSIGN_READ_MMIO_VFUNCS(gen6);
+ } else {
+ ASSIGN_FW_DOMAINS_TABLE(__gen9_fw_ranges);
+ ASSIGN_WRITE_MMIO_VFUNCS(fwtable);
+ ASSIGN_READ_MMIO_VFUNCS(fwtable);
+ if (HAS_DECOUPLED_MMIO(dev_priv)) {
+ dev_priv->uncore.funcs.mmio_readl =
+ gen9_decoupled_read32;
+ dev_priv->uncore.funcs.mmio_readq =
+ gen9_decoupled_read64;
+ dev_priv->uncore.funcs.mmio_writel =
+ gen9_decoupled_write32;
}
- break;
- case 5:
- ASSIGN_WRITE_MMIO_VFUNCS(gen5);
- ASSIGN_READ_MMIO_VFUNCS(gen5);
- break;
- case 4:
- case 3:
- case 2:
- ASSIGN_WRITE_MMIO_VFUNCS(gen2);
- ASSIGN_READ_MMIO_VFUNCS(gen2);
- break;
}
- intel_fw_table_check(dev_priv);
- if (INTEL_GEN(dev_priv) >= 8)
- intel_shadow_table_check();
-
- if (intel_vgpu_active(dev_priv)) {
- ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
- ASSIGN_READ_MMIO_VFUNCS(vgpu);
- }
+ iosf_mbi_register_pmic_bus_access_notifier(
+ &dev_priv->uncore.pmic_bus_access_nb);
i915_check_and_clear_faults(dev_priv);
}
@@ -1461,6 +1364,9 @@ void intel_uncore_init(struct drm_i915_private *dev_priv)
void intel_uncore_fini(struct drm_i915_private *dev_priv)
{
+ iosf_mbi_unregister_pmic_bus_access_notifier(
+ &dev_priv->uncore.pmic_bus_access_nb);
+
/* Paranoia: make sure we have disabled everything before we exit. */
intel_uncore_sanitize(dev_priv);
intel_uncore_forcewake_reset(dev_priv, false);
@@ -1971,3 +1877,7 @@ intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
return fw_domains;
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftests/intel_uncore.c"
+#endif
diff --git a/drivers/gpu/drm/i915/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
new file mode 100644
index 000000000000..4e681fc13be4
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/huge_gem_object.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "huge_gem_object.h"
+
+static void huge_free_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ unsigned long nreal = obj->scratch / PAGE_SIZE;
+ struct scatterlist *sg;
+
+ for (sg = pages->sgl; sg && nreal--; sg = __sg_next(sg))
+ __free_page(sg_page(sg));
+
+ sg_free_table(pages);
+ kfree(pages);
+}
+
+static struct sg_table *
+huge_get_pages(struct drm_i915_gem_object *obj)
+{
+#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
+ const unsigned long nreal = obj->scratch / PAGE_SIZE;
+ const unsigned long npages = obj->base.size / PAGE_SIZE;
+ struct scatterlist *sg, *src, *end;
+ struct sg_table *pages;
+ unsigned long n;
+
+ pages = kmalloc(sizeof(*pages), GFP);
+ if (!pages)
+ return ERR_PTR(-ENOMEM);
+
+ if (sg_alloc_table(pages, npages, GFP)) {
+ kfree(pages);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ sg = pages->sgl;
+ for (n = 0; n < nreal; n++) {
+ struct page *page;
+
+ page = alloc_page(GFP | __GFP_HIGHMEM);
+ if (!page) {
+ sg_mark_end(sg);
+ goto err;
+ }
+
+ sg_set_page(sg, page, PAGE_SIZE, 0);
+ sg = __sg_next(sg);
+ }
+ if (nreal < npages) {
+ for (end = sg, src = pages->sgl; sg; sg = __sg_next(sg)) {
+ sg_set_page(sg, sg_page(src), PAGE_SIZE, 0);
+ src = __sg_next(src);
+ if (src == end)
+ src = pages->sgl;
+ }
+ }
+
+ if (i915_gem_gtt_prepare_pages(obj, pages))
+ goto err;
+
+ return pages;
+
+err:
+ huge_free_pages(obj, pages);
+ return ERR_PTR(-ENOMEM);
+#undef GFP
+}
+
+static void huge_put_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ i915_gem_gtt_finish_pages(obj, pages);
+ huge_free_pages(obj, pages);
+
+ obj->mm.dirty = false;
+}
+
+static const struct drm_i915_gem_object_ops huge_ops = {
+ .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
+ I915_GEM_OBJECT_IS_SHRINKABLE,
+ .get_pages = huge_get_pages,
+ .put_pages = huge_put_pages,
+};
+
+struct drm_i915_gem_object *
+huge_gem_object(struct drm_i915_private *i915,
+ phys_addr_t phys_size,
+ dma_addr_t dma_size)
+{
+ struct drm_i915_gem_object *obj;
+
+ GEM_BUG_ON(!phys_size || phys_size > dma_size);
+ GEM_BUG_ON(!IS_ALIGNED(phys_size, PAGE_SIZE));
+ GEM_BUG_ON(!IS_ALIGNED(dma_size, I915_GTT_PAGE_SIZE));
+
+ if (overflows_type(dma_size, obj->base.size))
+ return ERR_PTR(-E2BIG);
+
+ obj = i915_gem_object_alloc(i915);
+ if (!obj)
+ return ERR_PTR(-ENOMEM);
+
+ drm_gem_private_object_init(&i915->drm, &obj->base, dma_size);
+ i915_gem_object_init(obj, &huge_ops);
+
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
+ obj->scratch = phys_size;
+
+ return obj;
+}
diff --git a/drivers/gpu/drm/i915/selftests/huge_gem_object.h b/drivers/gpu/drm/i915/selftests/huge_gem_object.h
new file mode 100644
index 000000000000..a6133a9e8029
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/huge_gem_object.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __HUGE_GEM_OBJECT_H
+#define __HUGE_GEM_OBJECT_H
+
+struct drm_i915_gem_object *
+huge_gem_object(struct drm_i915_private *i915,
+ phys_addr_t phys_size,
+ dma_addr_t dma_size);
+
+static inline phys_addr_t
+huge_gem_object_phys_size(struct drm_i915_gem_object *obj)
+{
+ return obj->scratch;
+}
+
+static inline dma_addr_t
+huge_gem_object_dma_size(struct drm_i915_gem_object *obj)
+{
+ return obj->base.size;
+}
+
+#endif /* !__HUGE_GEM_OBJECT_H */
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
new file mode 100644
index 000000000000..f08d0179b3df
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
@@ -0,0 +1,385 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/prime_numbers.h>
+
+#include "../i915_selftest.h"
+#include "i915_random.h"
+
+static int cpu_set(struct drm_i915_gem_object *obj,
+ unsigned long offset,
+ u32 v)
+{
+ unsigned int needs_clflush;
+ struct page *page;
+ typeof(v) *map;
+ int err;
+
+ err = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
+ if (err)
+ return err;
+
+ page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
+ map = kmap_atomic(page);
+ if (needs_clflush & CLFLUSH_BEFORE)
+ clflush(map+offset_in_page(offset) / sizeof(*map));
+ map[offset_in_page(offset) / sizeof(*map)] = v;
+ if (needs_clflush & CLFLUSH_AFTER)
+ clflush(map+offset_in_page(offset) / sizeof(*map));
+ kunmap_atomic(map);
+
+ i915_gem_obj_finish_shmem_access(obj);
+ return 0;
+}
+
+static int cpu_get(struct drm_i915_gem_object *obj,
+ unsigned long offset,
+ u32 *v)
+{
+ unsigned int needs_clflush;
+ struct page *page;
+ typeof(v) map;
+ int err;
+
+ err = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
+ if (err)
+ return err;
+
+ page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
+ map = kmap_atomic(page);
+ if (needs_clflush & CLFLUSH_BEFORE)
+ clflush(map+offset_in_page(offset) / sizeof(*map));
+ *v = map[offset_in_page(offset) / sizeof(*map)];
+ kunmap_atomic(map);
+
+ i915_gem_obj_finish_shmem_access(obj);
+ return 0;
+}
+
+static int gtt_set(struct drm_i915_gem_object *obj,
+ unsigned long offset,
+ u32 v)
+{
+ struct i915_vma *vma;
+ typeof(v) *map;
+ int err;
+
+ err = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (err)
+ return err;
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ map = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ map[offset / sizeof(*map)] = v;
+ i915_vma_unpin_iomap(vma);
+
+ return 0;
+}
+
+static int gtt_get(struct drm_i915_gem_object *obj,
+ unsigned long offset,
+ u32 *v)
+{
+ struct i915_vma *vma;
+ typeof(v) map;
+ int err;
+
+ err = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (err)
+ return err;
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ map = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ *v = map[offset / sizeof(*map)];
+ i915_vma_unpin_iomap(vma);
+
+ return 0;
+}
+
+static int wc_set(struct drm_i915_gem_object *obj,
+ unsigned long offset,
+ u32 v)
+{
+ typeof(v) *map;
+ int err;
+
+ /* XXX GTT write followed by WC write go missing */
+ i915_gem_object_flush_gtt_write_domain(obj);
+
+ err = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (err)
+ return err;
+
+ map = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ map[offset / sizeof(*map)] = v;
+ i915_gem_object_unpin_map(obj);
+
+ return 0;
+}
+
+static int wc_get(struct drm_i915_gem_object *obj,
+ unsigned long offset,
+ u32 *v)
+{
+ typeof(v) map;
+ int err;
+
+ /* XXX WC write followed by GTT write go missing */
+ i915_gem_object_flush_gtt_write_domain(obj);
+
+ err = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (err)
+ return err;
+
+ map = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ *v = map[offset / sizeof(*map)];
+ i915_gem_object_unpin_map(obj);
+
+ return 0;
+}
+
+static int gpu_set(struct drm_i915_gem_object *obj,
+ unsigned long offset,
+ u32 v)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct drm_i915_gem_request *rq;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
+
+ err = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (err)
+ return err;
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ rq = i915_gem_request_alloc(i915->engine[RCS], i915->kernel_context);
+ if (IS_ERR(rq)) {
+ i915_vma_unpin(vma);
+ return PTR_ERR(rq);
+ }
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ __i915_add_request(rq, false);
+ i915_vma_unpin(vma);
+ return PTR_ERR(cs);
+ }
+
+ if (INTEL_GEN(i915) >= 8) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
+ *cs++ = lower_32_bits(i915_ggtt_offset(vma) + offset);
+ *cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset);
+ *cs++ = v;
+ } else if (INTEL_GEN(i915) >= 4) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
+ *cs++ = 0;
+ *cs++ = i915_ggtt_offset(vma) + offset;
+ *cs++ = v;
+ } else {
+ *cs++ = MI_STORE_DWORD_IMM | 1 << 22;
+ *cs++ = i915_ggtt_offset(vma) + offset;
+ *cs++ = v;
+ *cs++ = MI_NOOP;
+ }
+ intel_ring_advance(rq, cs);
+
+ i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unpin(vma);
+
+ reservation_object_lock(obj->resv, NULL);
+ reservation_object_add_excl_fence(obj->resv, &rq->fence);
+ reservation_object_unlock(obj->resv);
+
+ __i915_add_request(rq, true);
+
+ return 0;
+}
+
+static bool always_valid(struct drm_i915_private *i915)
+{
+ return true;
+}
+
+static bool needs_mi_store_dword(struct drm_i915_private *i915)
+{
+ return igt_can_mi_store_dword_imm(i915);
+}
+
+static const struct igt_coherency_mode {
+ const char *name;
+ int (*set)(struct drm_i915_gem_object *, unsigned long offset, u32 v);
+ int (*get)(struct drm_i915_gem_object *, unsigned long offset, u32 *v);
+ bool (*valid)(struct drm_i915_private *i915);
+} igt_coherency_mode[] = {
+ { "cpu", cpu_set, cpu_get, always_valid },
+ { "gtt", gtt_set, gtt_get, always_valid },
+ { "wc", wc_set, wc_get, always_valid },
+ { "gpu", gpu_set, NULL, needs_mi_store_dword },
+ { },
+};
+
+static int igt_gem_coherency(void *arg)
+{
+ const unsigned int ncachelines = PAGE_SIZE/64;
+ I915_RND_STATE(prng);
+ struct drm_i915_private *i915 = arg;
+ const struct igt_coherency_mode *read, *write, *over;
+ struct drm_i915_gem_object *obj;
+ unsigned long count, n;
+ u32 *offsets, *values;
+ int err = 0;
+
+ /* We repeatedly write, overwrite and read from a sequence of
+ * cachelines in order to try and detect incoherency (unflushed writes
+ * from either the CPU or GPU). Each setter/getter uses our cache
+ * domain API which should prevent incoherency.
+ */
+
+ offsets = kmalloc_array(ncachelines, 2*sizeof(u32), GFP_KERNEL);
+ if (!offsets)
+ return -ENOMEM;
+ for (count = 0; count < ncachelines; count++)
+ offsets[count] = count * 64 + 4 * (count % 16);
+
+ values = offsets + ncachelines;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ for (over = igt_coherency_mode; over->name; over++) {
+ if (!over->set)
+ continue;
+
+ if (!over->valid(i915))
+ continue;
+
+ for (write = igt_coherency_mode; write->name; write++) {
+ if (!write->set)
+ continue;
+
+ if (!write->valid(i915))
+ continue;
+
+ for (read = igt_coherency_mode; read->name; read++) {
+ if (!read->get)
+ continue;
+
+ if (!read->valid(i915))
+ continue;
+
+ for_each_prime_number_from(count, 1, ncachelines) {
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto unlock;
+ }
+
+ i915_random_reorder(offsets, ncachelines, &prng);
+ for (n = 0; n < count; n++)
+ values[n] = prandom_u32_state(&prng);
+
+ for (n = 0; n < count; n++) {
+ err = over->set(obj, offsets[n], ~values[n]);
+ if (err) {
+ pr_err("Failed to set stale value[%ld/%ld] in object using %s, err=%d\n",
+ n, count, over->name, err);
+ goto put_object;
+ }
+ }
+
+ for (n = 0; n < count; n++) {
+ err = write->set(obj, offsets[n], values[n]);
+ if (err) {
+ pr_err("Failed to set value[%ld/%ld] in object using %s, err=%d\n",
+ n, count, write->name, err);
+ goto put_object;
+ }
+ }
+
+ for (n = 0; n < count; n++) {
+ u32 found;
+
+ err = read->get(obj, offsets[n], &found);
+ if (err) {
+ pr_err("Failed to get value[%ld/%ld] in object using %s, err=%d\n",
+ n, count, read->name, err);
+ goto put_object;
+ }
+
+ if (found != values[n]) {
+ pr_err("Value[%ld/%ld] mismatch, (overwrite with %s) wrote [%s] %x read [%s] %x (inverse %x), at offset %x\n",
+ n, count, over->name,
+ write->name, values[n],
+ read->name, found,
+ ~values[n], offsets[n]);
+ err = -EINVAL;
+ goto put_object;
+ }
+ }
+
+ __i915_gem_object_release_unless_active(obj);
+ }
+ }
+ }
+ }
+unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ kfree(offsets);
+ return err;
+
+put_object:
+ __i915_gem_object_release_unless_active(obj);
+ goto unlock;
+}
+
+int i915_gem_coherency_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_gem_coherency),
+ };
+
+ return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
new file mode 100644
index 000000000000..1afb8b06e3e1
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -0,0 +1,463 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "../i915_selftest.h"
+
+#include "mock_drm.h"
+#include "huge_gem_object.h"
+
+#define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
+
+static struct i915_vma *
+gpu_fill_dw(struct i915_vma *vma, u64 offset, unsigned long count, u32 value)
+{
+ struct drm_i915_gem_object *obj;
+ const int gen = INTEL_GEN(vma->vm->i915);
+ unsigned long n, size;
+ u32 *cmd;
+ int err;
+
+ GEM_BUG_ON(!igt_can_mi_store_dword_imm(vma->vm->i915));
+
+ size = (4 * count + 1) * sizeof(u32);
+ size = round_up(size, PAGE_SIZE);
+ obj = i915_gem_object_create_internal(vma->vm->i915, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto err;
+ }
+
+ GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size);
+ offset += vma->node.start;
+
+ for (n = 0; n < count; n++) {
+ if (gen >= 8) {
+ *cmd++ = MI_STORE_DWORD_IMM_GEN4;
+ *cmd++ = lower_32_bits(offset);
+ *cmd++ = upper_32_bits(offset);
+ *cmd++ = value;
+ } else if (gen >= 4) {
+ *cmd++ = MI_STORE_DWORD_IMM_GEN4 |
+ (gen < 6 ? 1 << 22 : 0);
+ *cmd++ = 0;
+ *cmd++ = offset;
+ *cmd++ = value;
+ } else {
+ *cmd++ = MI_STORE_DWORD_IMM | 1 << 22;
+ *cmd++ = offset;
+ *cmd++ = value;
+ }
+ offset += PAGE_SIZE;
+ }
+ *cmd = MI_BATCH_BUFFER_END;
+ i915_gem_object_unpin_map(obj);
+
+ err = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (err)
+ goto err;
+
+ vma = i915_vma_instance(obj, vma->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err;
+
+ return vma;
+
+err:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static unsigned long real_page_count(struct drm_i915_gem_object *obj)
+{
+ return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
+}
+
+static unsigned long fake_page_count(struct drm_i915_gem_object *obj)
+{
+ return huge_gem_object_dma_size(obj) >> PAGE_SHIFT;
+}
+
+static int gpu_fill(struct drm_i915_gem_object *obj,
+ struct i915_gem_context *ctx,
+ struct intel_engine_cs *engine,
+ unsigned int dw)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_address_space *vm =
+ ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct drm_i915_gem_request *rq;
+ struct i915_vma *vma;
+ struct i915_vma *batch;
+ unsigned int flags;
+ int err;
+
+ GEM_BUG_ON(obj->base.size > vm->total);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ err = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (err)
+ return err;
+
+ err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER);
+ if (err)
+ return err;
+
+ /* Within the GTT the huge objects maps every page onto
+ * its 1024 real pages (using phys_pfn = dma_pfn % 1024).
+ * We set the nth dword within the page using the nth
+ * mapping via the GTT - this should exercise the GTT mapping
+ * whilst checking that each context provides a unique view
+ * into the object.
+ */
+ batch = gpu_fill_dw(vma,
+ (dw * real_page_count(obj)) << PAGE_SHIFT |
+ (dw * sizeof(u32)),
+ real_page_count(obj),
+ dw);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto err_vma;
+ }
+
+ rq = i915_gem_request_alloc(engine, ctx);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_batch;
+ }
+
+ err = engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto err_request;
+
+ err = i915_switch_context(rq);
+ if (err)
+ goto err_request;
+
+ flags = 0;
+ if (INTEL_GEN(vm->i915) <= 5)
+ flags |= I915_DISPATCH_SECURE;
+
+ err = engine->emit_bb_start(rq,
+ batch->node.start, batch->node.size,
+ flags);
+ if (err)
+ goto err_request;
+
+ i915_vma_move_to_active(batch, rq, 0);
+ i915_gem_object_set_active_reference(batch->obj);
+ i915_vma_unpin(batch);
+ i915_vma_close(batch);
+
+ i915_vma_move_to_active(vma, rq, 0);
+ i915_vma_unpin(vma);
+
+ reservation_object_lock(obj->resv, NULL);
+ reservation_object_add_excl_fence(obj->resv, &rq->fence);
+ reservation_object_unlock(obj->resv);
+
+ __i915_add_request(rq, true);
+
+ return 0;
+
+err_request:
+ __i915_add_request(rq, false);
+err_batch:
+ i915_vma_unpin(batch);
+err_vma:
+ i915_vma_unpin(vma);
+ return err;
+}
+
+static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
+{
+ const bool has_llc = HAS_LLC(to_i915(obj->base.dev));
+ unsigned int n, m, need_flush;
+ int err;
+
+ err = i915_gem_obj_prepare_shmem_write(obj, &need_flush);
+ if (err)
+ return err;
+
+ for (n = 0; n < real_page_count(obj); n++) {
+ u32 *map;
+
+ map = kmap_atomic(i915_gem_object_get_page(obj, n));
+ for (m = 0; m < DW_PER_PAGE; m++)
+ map[m] = value;
+ if (!has_llc)
+ drm_clflush_virt_range(map, PAGE_SIZE);
+ kunmap_atomic(map);
+ }
+
+ i915_gem_obj_finish_shmem_access(obj);
+ obj->base.read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = 0;
+ return 0;
+}
+
+static int cpu_check(struct drm_i915_gem_object *obj, unsigned int max)
+{
+ unsigned int n, m, needs_flush;
+ int err;
+
+ err = i915_gem_obj_prepare_shmem_read(obj, &needs_flush);
+ if (err)
+ return err;
+
+ for (n = 0; n < real_page_count(obj); n++) {
+ u32 *map;
+
+ map = kmap_atomic(i915_gem_object_get_page(obj, n));
+ if (needs_flush & CLFLUSH_BEFORE)
+ drm_clflush_virt_range(map, PAGE_SIZE);
+
+ for (m = 0; m < max; m++) {
+ if (map[m] != m) {
+ pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
+ n, m, map[m], m);
+ err = -EINVAL;
+ goto out_unmap;
+ }
+ }
+
+ for (; m < DW_PER_PAGE; m++) {
+ if (map[m] != 0xdeadbeef) {
+ pr_err("Invalid value at page %d, offset %d: found %x expected %x\n",
+ n, m, map[m], 0xdeadbeef);
+ err = -EINVAL;
+ goto out_unmap;
+ }
+ }
+
+out_unmap:
+ kunmap_atomic(map);
+ if (err)
+ break;
+ }
+
+ i915_gem_obj_finish_shmem_access(obj);
+ return err;
+}
+
+static struct drm_i915_gem_object *
+create_test_object(struct i915_gem_context *ctx,
+ struct drm_file *file,
+ struct list_head *objects)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm =
+ ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base;
+ u64 size;
+ u32 handle;
+ int err;
+
+ size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
+ size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
+
+ obj = huge_gem_object(ctx->i915, DW_PER_PAGE * PAGE_SIZE, size);
+ if (IS_ERR(obj))
+ return obj;
+
+ /* tie the handle to the drm_file for easy reaping */
+ err = drm_gem_handle_create(file, &obj->base, &handle);
+ i915_gem_object_put(obj);
+ if (err)
+ return ERR_PTR(err);
+
+ err = cpu_fill(obj, 0xdeadbeef);
+ if (err) {
+ pr_err("Failed to fill object with cpu, err=%d\n",
+ err);
+ return ERR_PTR(err);
+ }
+
+ list_add_tail(&obj->st_link, objects);
+ return obj;
+}
+
+static unsigned long max_dwords(struct drm_i915_gem_object *obj)
+{
+ unsigned long npages = fake_page_count(obj);
+
+ GEM_BUG_ON(!IS_ALIGNED(npages, DW_PER_PAGE));
+ return npages / DW_PER_PAGE;
+}
+
+static int igt_ctx_exec(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ struct drm_file *file;
+ IGT_TIMEOUT(end_time);
+ LIST_HEAD(objects);
+ unsigned long ncontexts, ndwords, dw;
+ bool first_shared_gtt = true;
+ int err;
+
+ /* Create a few different contexts (with different mm) and write
+ * through each ctx/mm using the GPU making sure those writes end
+ * up in the expected pages of our obj.
+ */
+
+ file = mock_file(i915);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ ncontexts = 0;
+ ndwords = 0;
+ dw = 0;
+ while (!time_after(jiffies, end_time)) {
+ struct intel_engine_cs *engine;
+ struct i915_gem_context *ctx;
+ unsigned int id;
+
+ if (first_shared_gtt) {
+ ctx = __create_hw_context(i915, file->driver_priv);
+ first_shared_gtt = false;
+ } else {
+ ctx = i915_gem_create_context(i915, file->driver_priv);
+ }
+ if (IS_ERR(ctx)) {
+ err = PTR_ERR(ctx);
+ goto out_unlock;
+ }
+
+ for_each_engine(engine, i915, id) {
+ if (dw == 0) {
+ obj = create_test_object(ctx, file, &objects);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_unlock;
+ }
+ }
+
+ err = gpu_fill(obj, ctx, engine, dw);
+ if (err) {
+ pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
+ ndwords, dw, max_dwords(obj),
+ engine->name, ctx->hw_id,
+ yesno(!!ctx->ppgtt), err);
+ goto out_unlock;
+ }
+
+ if (++dw == max_dwords(obj))
+ dw = 0;
+ ndwords++;
+ }
+ ncontexts++;
+ }
+ pr_info("Submitted %lu contexts (across %u engines), filling %lu dwords\n",
+ ncontexts, INTEL_INFO(i915)->num_rings, ndwords);
+
+ dw = 0;
+ list_for_each_entry(obj, &objects, st_link) {
+ unsigned int rem =
+ min_t(unsigned int, ndwords - dw, max_dwords(obj));
+
+ err = cpu_check(obj, rem);
+ if (err)
+ break;
+
+ dw += rem;
+ }
+
+out_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ mock_file_free(i915, file);
+ return err;
+}
+
+static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
+{
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ err = i915_gem_init_aliasing_ppgtt(i915);
+ if (err)
+ return err;
+
+ list_for_each_entry(obj, &i915->mm.bound_list, global_link) {
+ struct i915_vma *vma;
+
+ vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ if (IS_ERR(vma))
+ continue;
+
+ vma->flags &= ~I915_VMA_LOCAL_BIND;
+ }
+
+ return 0;
+}
+
+static void fake_aliasing_ppgtt_disable(struct drm_i915_private *i915)
+{
+ i915_gem_fini_aliasing_ppgtt(i915);
+}
+
+int i915_gem_context_live_selftests(struct drm_i915_private *dev_priv)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_ctx_exec),
+ };
+ bool fake_alias = false;
+ int err;
+
+ /* Install a fake aliasing gtt for exercise */
+ if (USES_PPGTT(dev_priv) && !dev_priv->mm.aliasing_ppgtt) {
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ err = fake_aliasing_ppgtt_enable(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ if (err)
+ return err;
+
+ GEM_BUG_ON(!dev_priv->mm.aliasing_ppgtt);
+ fake_alias = true;
+ }
+
+ err = i915_subtests(tests, dev_priv);
+
+ if (fake_alias) {
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ fake_aliasing_ppgtt_disable(dev_priv);
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ }
+
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
new file mode 100644
index 000000000000..817bef74bbcb
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c
@@ -0,0 +1,303 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "../i915_selftest.h"
+
+#include "mock_gem_device.h"
+#include "mock_dmabuf.h"
+
+static int igt_dmabuf_export(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ struct dma_buf *dmabuf;
+
+ obj = i915_gem_object_create(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
+ i915_gem_object_put(obj);
+ if (IS_ERR(dmabuf)) {
+ pr_err("i915_gem_prime_export failed with err=%d\n",
+ (int)PTR_ERR(dmabuf));
+ return PTR_ERR(dmabuf);
+ }
+
+ dma_buf_put(dmabuf);
+ return 0;
+}
+
+static int igt_dmabuf_import_self(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ struct drm_gem_object *import;
+ struct dma_buf *dmabuf;
+ int err;
+
+ obj = i915_gem_object_create(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
+ if (IS_ERR(dmabuf)) {
+ pr_err("i915_gem_prime_export failed with err=%d\n",
+ (int)PTR_ERR(dmabuf));
+ err = PTR_ERR(dmabuf);
+ goto out;
+ }
+
+ import = i915_gem_prime_import(&i915->drm, dmabuf);
+ if (IS_ERR(import)) {
+ pr_err("i915_gem_prime_import failed with err=%d\n",
+ (int)PTR_ERR(import));
+ err = PTR_ERR(import);
+ goto out_dmabuf;
+ }
+
+ if (import != &obj->base) {
+ pr_err("i915_gem_prime_import created a new object!\n");
+ err = -EINVAL;
+ goto out_import;
+ }
+
+ err = 0;
+out_import:
+ i915_gem_object_put(to_intel_bo(import));
+out_dmabuf:
+ dma_buf_put(dmabuf);
+out:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static int igt_dmabuf_import(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ struct dma_buf *dmabuf;
+ void *obj_map, *dma_map;
+ u32 pattern[] = { 0, 0xaa, 0xcc, 0x55, 0xff };
+ int err, i;
+
+ dmabuf = mock_dmabuf(1);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf));
+ if (IS_ERR(obj)) {
+ pr_err("i915_gem_prime_import failed with err=%d\n",
+ (int)PTR_ERR(obj));
+ err = PTR_ERR(obj);
+ goto out_dmabuf;
+ }
+
+ if (obj->base.dev != &i915->drm) {
+ pr_err("i915_gem_prime_import created a non-i915 object!\n");
+ err = -EINVAL;
+ goto out_obj;
+ }
+
+ if (obj->base.size != PAGE_SIZE) {
+ pr_err("i915_gem_prime_import is wrong size found %lld, expected %ld\n",
+ (long long)obj->base.size, PAGE_SIZE);
+ err = -EINVAL;
+ goto out_obj;
+ }
+
+ dma_map = dma_buf_vmap(dmabuf);
+ if (!dma_map) {
+ pr_err("dma_buf_vmap failed\n");
+ err = -ENOMEM;
+ goto out_obj;
+ }
+
+ if (0) { /* Can not yet map dmabuf */
+ obj_map = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(obj_map)) {
+ err = PTR_ERR(obj_map);
+ pr_err("i915_gem_object_pin_map failed with err=%d\n", err);
+ goto out_dma_map;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pattern); i++) {
+ memset(dma_map, pattern[i], PAGE_SIZE);
+ if (memchr_inv(obj_map, pattern[i], PAGE_SIZE)) {
+ err = -EINVAL;
+ pr_err("imported vmap not all set to %x!\n", pattern[i]);
+ i915_gem_object_unpin_map(obj);
+ goto out_dma_map;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(pattern); i++) {
+ memset(obj_map, pattern[i], PAGE_SIZE);
+ if (memchr_inv(dma_map, pattern[i], PAGE_SIZE)) {
+ err = -EINVAL;
+ pr_err("exported vmap not all set to %x!\n", pattern[i]);
+ i915_gem_object_unpin_map(obj);
+ goto out_dma_map;
+ }
+ }
+
+ i915_gem_object_unpin_map(obj);
+ }
+
+ err = 0;
+out_dma_map:
+ dma_buf_vunmap(dmabuf, dma_map);
+out_obj:
+ i915_gem_object_put(obj);
+out_dmabuf:
+ dma_buf_put(dmabuf);
+ return err;
+}
+
+static int igt_dmabuf_import_ownership(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ struct dma_buf *dmabuf;
+ void *ptr;
+ int err;
+
+ dmabuf = mock_dmabuf(1);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ ptr = dma_buf_vmap(dmabuf);
+ if (!ptr) {
+ pr_err("dma_buf_vmap failed\n");
+ err = -ENOMEM;
+ goto err_dmabuf;
+ }
+
+ memset(ptr, 0xc5, PAGE_SIZE);
+ dma_buf_vunmap(dmabuf, ptr);
+
+ obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf));
+ if (IS_ERR(obj)) {
+ pr_err("i915_gem_prime_import failed with err=%d\n",
+ (int)PTR_ERR(obj));
+ err = PTR_ERR(obj);
+ goto err_dmabuf;
+ }
+
+ dma_buf_put(dmabuf);
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ pr_err("i915_gem_object_pin_pages failed with err=%d\n", err);
+ goto out_obj;
+ }
+
+ err = 0;
+ i915_gem_object_unpin_pages(obj);
+out_obj:
+ i915_gem_object_put(obj);
+ return err;
+
+err_dmabuf:
+ dma_buf_put(dmabuf);
+ return err;
+}
+
+static int igt_dmabuf_export_vmap(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ struct dma_buf *dmabuf;
+ void *ptr;
+ int err;
+
+ obj = i915_gem_object_create(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
+ if (IS_ERR(dmabuf)) {
+ pr_err("i915_gem_prime_export failed with err=%d\n",
+ (int)PTR_ERR(dmabuf));
+ err = PTR_ERR(dmabuf);
+ goto err_obj;
+ }
+ i915_gem_object_put(obj);
+
+ ptr = dma_buf_vmap(dmabuf);
+ if (IS_ERR(ptr)) {
+ err = PTR_ERR(ptr);
+ pr_err("dma_buf_vmap failed with err=%d\n", err);
+ goto out;
+ }
+
+ if (memchr_inv(ptr, 0, dmabuf->size)) {
+ pr_err("Exported object not initialiased to zero!\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ memset(ptr, 0xc5, dmabuf->size);
+
+ err = 0;
+ dma_buf_vunmap(dmabuf, ptr);
+out:
+ dma_buf_put(dmabuf);
+ return err;
+
+err_obj:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+int i915_gem_dmabuf_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_dmabuf_export),
+ SUBTEST(igt_dmabuf_import_self),
+ SUBTEST(igt_dmabuf_import),
+ SUBTEST(igt_dmabuf_import_ownership),
+ SUBTEST(igt_dmabuf_export_vmap),
+ };
+ struct drm_i915_private *i915;
+ int err;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ err = i915_subtests(tests, i915);
+
+ drm_dev_unref(&i915->drm);
+ return err;
+}
+
+int i915_gem_dmabuf_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_dmabuf_export),
+ };
+
+ return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
new file mode 100644
index 000000000000..14e9c2fbc4e6
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -0,0 +1,350 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "../i915_selftest.h"
+
+#include "mock_gem_device.h"
+
+static int populate_ggtt(struct drm_i915_private *i915)
+{
+ struct drm_i915_gem_object *obj;
+ u64 size;
+
+ for (size = 0;
+ size + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+ size += I915_GTT_PAGE_SIZE) {
+ struct i915_vma *vma;
+
+ obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+ }
+
+ if (!list_empty(&i915->mm.unbound_list)) {
+ size = 0;
+ list_for_each_entry(obj, &i915->mm.unbound_list, global_link)
+ size++;
+
+ pr_err("Found %lld objects unbound!\n", size);
+ return -EINVAL;
+ }
+
+ if (list_empty(&i915->ggtt.base.inactive_list)) {
+ pr_err("No objects on the GGTT inactive list!\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void unpin_ggtt(struct drm_i915_private *i915)
+{
+ struct i915_vma *vma;
+
+ list_for_each_entry(vma, &i915->ggtt.base.inactive_list, vm_link)
+ i915_vma_unpin(vma);
+}
+
+static void cleanup_objects(struct drm_i915_private *i915)
+{
+ struct drm_i915_gem_object *obj, *on;
+
+ list_for_each_entry_safe(obj, on, &i915->mm.unbound_list, global_link)
+ i915_gem_object_put(obj);
+
+ list_for_each_entry_safe(obj, on, &i915->mm.bound_list, global_link)
+ i915_gem_object_put(obj);
+
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ i915_gem_drain_freed_objects(i915);
+
+ mutex_lock(&i915->drm.struct_mutex);
+}
+
+static int igt_evict_something(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ int err;
+
+ /* Fill the GGTT with pinned objects and try to evict one. */
+
+ err = populate_ggtt(i915);
+ if (err)
+ goto cleanup;
+
+ /* Everything is pinned, nothing should happen */
+ err = i915_gem_evict_something(&ggtt->base,
+ I915_GTT_PAGE_SIZE, 0, 0,
+ 0, U64_MAX,
+ 0);
+ if (err != -ENOSPC) {
+ pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n",
+ err);
+ goto cleanup;
+ }
+
+ unpin_ggtt(i915);
+
+ /* Everything is unpinned, we should be able to evict something */
+ err = i915_gem_evict_something(&ggtt->base,
+ I915_GTT_PAGE_SIZE, 0, 0,
+ 0, U64_MAX,
+ 0);
+ if (err) {
+ pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n",
+ err);
+ goto cleanup;
+ }
+
+cleanup:
+ cleanup_objects(i915);
+ return err;
+}
+
+static int igt_overcommit(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ /* Fill the GGTT with pinned objects and then try to pin one more.
+ * We expect it to fail.
+ */
+
+ err = populate_ggtt(i915);
+ if (err)
+ goto cleanup;
+
+ obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto cleanup;
+ }
+
+ list_move(&obj->global_link, &i915->mm.unbound_list);
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
+ if (!IS_ERR(vma) || PTR_ERR(vma) != -ENOSPC) {
+ pr_err("Failed to evict+insert, i915_gem_object_ggtt_pin returned err=%d\n", (int)PTR_ERR(vma));
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+cleanup:
+ cleanup_objects(i915);
+ return err;
+}
+
+static int igt_evict_for_vma(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ struct drm_mm_node target = {
+ .start = 0,
+ .size = 4096,
+ };
+ int err;
+
+ /* Fill the GGTT with pinned objects and try to evict a range. */
+
+ err = populate_ggtt(i915);
+ if (err)
+ goto cleanup;
+
+ /* Everything is pinned, nothing should happen */
+ err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+ if (err != -ENOSPC) {
+ pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n",
+ err);
+ goto cleanup;
+ }
+
+ unpin_ggtt(i915);
+
+ /* Everything is unpinned, we should be able to evict the node */
+ err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+ if (err) {
+ pr_err("i915_gem_evict_for_node returned err=%d\n",
+ err);
+ goto cleanup;
+ }
+
+cleanup:
+ cleanup_objects(i915);
+ return err;
+}
+
+static void mock_color_adjust(const struct drm_mm_node *node,
+ unsigned long color,
+ u64 *start,
+ u64 *end)
+{
+}
+
+static int igt_evict_for_cache_color(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ const unsigned long flags = PIN_OFFSET_FIXED;
+ struct drm_mm_node target = {
+ .start = I915_GTT_PAGE_SIZE * 2,
+ .size = I915_GTT_PAGE_SIZE,
+ .color = I915_CACHE_LLC,
+ };
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ /* Currently the use of color_adjust is limited to cache domains within
+ * the ggtt, and so the presence of mm.color_adjust is assumed to be
+ * i915_gtt_color_adjust throughout our driver, so using a mock color
+ * adjust will work just fine for our purposes.
+ */
+ ggtt->base.mm.color_adjust = mock_color_adjust;
+
+ obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto cleanup;
+ }
+ i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+ I915_GTT_PAGE_SIZE | flags);
+ if (IS_ERR(vma)) {
+ pr_err("[0]i915_gem_object_ggtt_pin failed\n");
+ err = PTR_ERR(vma);
+ goto cleanup;
+ }
+
+ obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto cleanup;
+ }
+ i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
+
+ /* Neighbouring; same colour - should fit */
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+ (I915_GTT_PAGE_SIZE * 2) | flags);
+ if (IS_ERR(vma)) {
+ pr_err("[1]i915_gem_object_ggtt_pin failed\n");
+ err = PTR_ERR(vma);
+ goto cleanup;
+ }
+
+ i915_vma_unpin(vma);
+
+ /* Remove just the second vma */
+ err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+ if (err) {
+ pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err);
+ goto cleanup;
+ }
+
+ /* Attempt to remove the first *pinned* vma, by removing the (empty)
+ * neighbour -- this should fail.
+ */
+ target.color = I915_CACHE_L3_LLC;
+
+ err = i915_gem_evict_for_node(&ggtt->base, &target, 0);
+ if (!err) {
+ pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err);
+ err = -EINVAL;
+ goto cleanup;
+ }
+
+ err = 0;
+
+cleanup:
+ unpin_ggtt(i915);
+ cleanup_objects(i915);
+ ggtt->base.mm.color_adjust = NULL;
+ return err;
+}
+
+static int igt_evict_vm(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ int err;
+
+ /* Fill the GGTT with pinned objects and try to evict everything. */
+
+ err = populate_ggtt(i915);
+ if (err)
+ goto cleanup;
+
+ /* Everything is pinned, nothing should happen */
+ err = i915_gem_evict_vm(&ggtt->base, false);
+ if (err) {
+ pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
+ err);
+ goto cleanup;
+ }
+
+ unpin_ggtt(i915);
+
+ err = i915_gem_evict_vm(&ggtt->base, false);
+ if (err) {
+ pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
+ err);
+ goto cleanup;
+ }
+
+cleanup:
+ cleanup_objects(i915);
+ return err;
+}
+
+int i915_gem_evict_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_evict_something),
+ SUBTEST(igt_evict_for_vma),
+ SUBTEST(igt_evict_for_cache_color),
+ SUBTEST(igt_evict_vm),
+ SUBTEST(igt_overcommit),
+ };
+ struct drm_i915_private *i915;
+ int err;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = i915_subtests(tests, i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ drm_dev_unref(&i915->drm);
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
new file mode 100644
index 000000000000..50710e3f1caa
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -0,0 +1,1562 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/list_sort.h>
+#include <linux/prime_numbers.h>
+
+#include "../i915_selftest.h"
+#include "i915_random.h"
+
+#include "mock_context.h"
+#include "mock_drm.h"
+#include "mock_gem_device.h"
+
+static void fake_free_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ sg_free_table(pages);
+ kfree(pages);
+}
+
+static struct sg_table *
+fake_get_pages(struct drm_i915_gem_object *obj)
+{
+#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
+#define PFN_BIAS 0x1000
+ struct sg_table *pages;
+ struct scatterlist *sg;
+ typeof(obj->base.size) rem;
+
+ pages = kmalloc(sizeof(*pages), GFP);
+ if (!pages)
+ return ERR_PTR(-ENOMEM);
+
+ rem = round_up(obj->base.size, BIT(31)) >> 31;
+ if (sg_alloc_table(pages, rem, GFP)) {
+ kfree(pages);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ rem = obj->base.size;
+ for (sg = pages->sgl; sg; sg = sg_next(sg)) {
+ unsigned long len = min_t(typeof(rem), rem, BIT(31));
+
+ GEM_BUG_ON(!len);
+ sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
+ sg_dma_address(sg) = page_to_phys(sg_page(sg));
+ sg_dma_len(sg) = len;
+
+ rem -= len;
+ }
+ GEM_BUG_ON(rem);
+
+ obj->mm.madv = I915_MADV_DONTNEED;
+ return pages;
+#undef GFP
+}
+
+static void fake_put_pages(struct drm_i915_gem_object *obj,
+ struct sg_table *pages)
+{
+ fake_free_pages(obj, pages);
+ obj->mm.dirty = false;
+ obj->mm.madv = I915_MADV_WILLNEED;
+}
+
+static const struct drm_i915_gem_object_ops fake_ops = {
+ .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
+ .get_pages = fake_get_pages,
+ .put_pages = fake_put_pages,
+};
+
+static struct drm_i915_gem_object *
+fake_dma_object(struct drm_i915_private *i915, u64 size)
+{
+ struct drm_i915_gem_object *obj;
+
+ GEM_BUG_ON(!size);
+ GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
+
+ if (overflows_type(size, obj->base.size))
+ return ERR_PTR(-E2BIG);
+
+ obj = i915_gem_object_alloc(i915);
+ if (!obj)
+ goto err;
+
+ drm_gem_private_object_init(&i915->drm, &obj->base, size);
+ i915_gem_object_init(obj, &fake_ops);
+
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->cache_level = I915_CACHE_NONE;
+
+ /* Preallocate the "backing storage" */
+ if (i915_gem_object_pin_pages(obj))
+ goto err_obj;
+
+ i915_gem_object_unpin_pages(obj);
+ return obj;
+
+err_obj:
+ i915_gem_object_put(obj);
+err:
+ return ERR_PTR(-ENOMEM);
+}
+
+static int igt_ppgtt_alloc(void *arg)
+{
+ struct drm_i915_private *dev_priv = arg;
+ struct i915_hw_ppgtt *ppgtt;
+ u64 size, last;
+ int err;
+
+ /* Allocate a ppggt and try to fill the entire range */
+
+ if (!USES_PPGTT(dev_priv))
+ return 0;
+
+ ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+ if (!ppgtt)
+ return -ENOMEM;
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ err = __hw_ppgtt_init(ppgtt, dev_priv);
+ if (err)
+ goto err_ppgtt;
+
+ if (!ppgtt->base.allocate_va_range)
+ goto err_ppgtt_cleanup;
+
+ /* Check we can allocate the entire range */
+ for (size = 4096;
+ size <= ppgtt->base.total;
+ size <<= 2) {
+ err = ppgtt->base.allocate_va_range(&ppgtt->base, 0, size);
+ if (err) {
+ if (err == -ENOMEM) {
+ pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
+ size, ilog2(size));
+ err = 0; /* virtual space too large! */
+ }
+ goto err_ppgtt_cleanup;
+ }
+
+ ppgtt->base.clear_range(&ppgtt->base, 0, size);
+ }
+
+ /* Check we can incrementally allocate the entire range */
+ for (last = 0, size = 4096;
+ size <= ppgtt->base.total;
+ last = size, size <<= 2) {
+ err = ppgtt->base.allocate_va_range(&ppgtt->base,
+ last, size - last);
+ if (err) {
+ if (err == -ENOMEM) {
+ pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
+ last, size - last, ilog2(size));
+ err = 0; /* virtual space too large! */
+ }
+ goto err_ppgtt_cleanup;
+ }
+ }
+
+err_ppgtt_cleanup:
+ ppgtt->base.cleanup(&ppgtt->base);
+err_ppgtt:
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+ kfree(ppgtt);
+ return err;
+}
+
+static int lowlevel_hole(struct drm_i915_private *i915,
+ struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time)
+{
+ I915_RND_STATE(seed_prng);
+ unsigned int size;
+
+ /* Keep creating larger objects until one cannot fit into the hole */
+ for (size = 12; (hole_end - hole_start) >> size; size++) {
+ I915_RND_SUBSTATE(prng, seed_prng);
+ struct drm_i915_gem_object *obj;
+ unsigned int *order, count, n;
+ u64 hole_size;
+
+ hole_size = (hole_end - hole_start) >> size;
+ if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
+ hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
+ count = hole_size;
+ do {
+ count >>= 1;
+ order = i915_random_order(count, &prng);
+ } while (!order && count);
+ if (!order)
+ break;
+
+ GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
+ GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
+
+ /* Ignore allocation failures (i.e. don't report them as
+ * a test failure) as we are purposefully allocating very
+ * large objects without checking that we have sufficient
+ * memory. We expect to hit -ENOMEM.
+ */
+
+ obj = fake_dma_object(i915, BIT_ULL(size));
+ if (IS_ERR(obj)) {
+ kfree(order);
+ break;
+ }
+
+ GEM_BUG_ON(obj->base.size != BIT_ULL(size));
+
+ if (i915_gem_object_pin_pages(obj)) {
+ i915_gem_object_put(obj);
+ kfree(order);
+ break;
+ }
+
+ for (n = 0; n < count; n++) {
+ u64 addr = hole_start + order[n] * BIT_ULL(size);
+
+ GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
+
+ if (igt_timeout(end_time,
+ "%s timed out before %d/%d\n",
+ __func__, n, count)) {
+ hole_end = hole_start; /* quit */
+ break;
+ }
+
+ if (vm->allocate_va_range &&
+ vm->allocate_va_range(vm, addr, BIT_ULL(size)))
+ break;
+
+ vm->insert_entries(vm, obj->mm.pages, addr,
+ I915_CACHE_NONE, 0);
+ }
+ count = n;
+
+ i915_random_reorder(order, count, &prng);
+ for (n = 0; n < count; n++) {
+ u64 addr = hole_start + order[n] * BIT_ULL(size);
+
+ GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
+ vm->clear_range(vm, addr, BIT_ULL(size));
+ }
+
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+
+ kfree(order);
+ }
+
+ return 0;
+}
+
+static void close_object_list(struct list_head *objects,
+ struct i915_address_space *vm)
+{
+ struct drm_i915_gem_object *obj, *on;
+ int ignored;
+
+ list_for_each_entry_safe(obj, on, objects, st_link) {
+ struct i915_vma *vma;
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (!IS_ERR(vma))
+ ignored = i915_vma_unbind(vma);
+ /* Only ppgtt vma may be closed before the object is freed */
+ if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma))
+ i915_vma_close(vma);
+
+ list_del(&obj->st_link);
+ i915_gem_object_put(obj);
+ }
+}
+
+static int fill_hole(struct drm_i915_private *i915,
+ struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time)
+{
+ const u64 hole_size = hole_end - hole_start;
+ struct drm_i915_gem_object *obj;
+ const unsigned long max_pages =
+ min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
+ const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
+ unsigned long npages, prime, flags;
+ struct i915_vma *vma;
+ LIST_HEAD(objects);
+ int err;
+
+ /* Try binding many VMA working inwards from either edge */
+
+ flags = PIN_OFFSET_FIXED | PIN_USER;
+ if (i915_is_ggtt(vm))
+ flags |= PIN_GLOBAL;
+
+ for_each_prime_number_from(prime, 2, max_step) {
+ for (npages = 1; npages <= max_pages; npages *= prime) {
+ const u64 full_size = npages << PAGE_SHIFT;
+ const struct {
+ const char *name;
+ u64 offset;
+ int step;
+ } phases[] = {
+ { "top-down", hole_end, -1, },
+ { "bottom-up", hole_start, 1, },
+ { }
+ }, *p;
+
+ obj = fake_dma_object(i915, full_size);
+ if (IS_ERR(obj))
+ break;
+
+ list_add(&obj->st_link, &objects);
+
+ /* Align differing sized objects against the edges, and
+ * check we don't walk off into the void when binding
+ * them into the GTT.
+ */
+ for (p = phases; p->name; p++) {
+ u64 offset;
+
+ offset = p->offset;
+ list_for_each_entry(obj, &objects, st_link) {
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ continue;
+
+ if (p->step < 0) {
+ if (offset < hole_start + obj->base.size)
+ break;
+ offset -= obj->base.size;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, offset | flags);
+ if (err) {
+ pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
+ __func__, p->name, err, npages, prime, offset);
+ goto err;
+ }
+
+ if (!drm_mm_node_allocated(&vma->node) ||
+ i915_vma_misplaced(vma, 0, 0, offset | flags)) {
+ pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
+ __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
+ offset);
+ err = -EINVAL;
+ goto err;
+ }
+
+ i915_vma_unpin(vma);
+
+ if (p->step > 0) {
+ if (offset + obj->base.size > hole_end)
+ break;
+ offset += obj->base.size;
+ }
+ }
+
+ offset = p->offset;
+ list_for_each_entry(obj, &objects, st_link) {
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ continue;
+
+ if (p->step < 0) {
+ if (offset < hole_start + obj->base.size)
+ break;
+ offset -= obj->base.size;
+ }
+
+ if (!drm_mm_node_allocated(&vma->node) ||
+ i915_vma_misplaced(vma, 0, 0, offset | flags)) {
+ pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
+ __func__, p->name, vma->node.start, vma->node.size,
+ offset);
+ err = -EINVAL;
+ goto err;
+ }
+
+ err = i915_vma_unbind(vma);
+ if (err) {
+ pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
+ __func__, p->name, vma->node.start, vma->node.size,
+ err);
+ goto err;
+ }
+
+ if (p->step > 0) {
+ if (offset + obj->base.size > hole_end)
+ break;
+ offset += obj->base.size;
+ }
+ }
+
+ offset = p->offset;
+ list_for_each_entry_reverse(obj, &objects, st_link) {
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ continue;
+
+ if (p->step < 0) {
+ if (offset < hole_start + obj->base.size)
+ break;
+ offset -= obj->base.size;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, offset | flags);
+ if (err) {
+ pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
+ __func__, p->name, err, npages, prime, offset);
+ goto err;
+ }
+
+ if (!drm_mm_node_allocated(&vma->node) ||
+ i915_vma_misplaced(vma, 0, 0, offset | flags)) {
+ pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
+ __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
+ offset);
+ err = -EINVAL;
+ goto err;
+ }
+
+ i915_vma_unpin(vma);
+
+ if (p->step > 0) {
+ if (offset + obj->base.size > hole_end)
+ break;
+ offset += obj->base.size;
+ }
+ }
+
+ offset = p->offset;
+ list_for_each_entry_reverse(obj, &objects, st_link) {
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ continue;
+
+ if (p->step < 0) {
+ if (offset < hole_start + obj->base.size)
+ break;
+ offset -= obj->base.size;
+ }
+
+ if (!drm_mm_node_allocated(&vma->node) ||
+ i915_vma_misplaced(vma, 0, 0, offset | flags)) {
+ pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
+ __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
+ offset);
+ err = -EINVAL;
+ goto err;
+ }
+
+ err = i915_vma_unbind(vma);
+ if (err) {
+ pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
+ __func__, p->name, vma->node.start, vma->node.size,
+ err);
+ goto err;
+ }
+
+ if (p->step > 0) {
+ if (offset + obj->base.size > hole_end)
+ break;
+ offset += obj->base.size;
+ }
+ }
+ }
+
+ if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
+ __func__, npages, prime)) {
+ err = -EINTR;
+ goto err;
+ }
+ }
+
+ close_object_list(&objects, vm);
+ }
+
+ return 0;
+
+err:
+ close_object_list(&objects, vm);
+ return err;
+}
+
+static int walk_hole(struct drm_i915_private *i915,
+ struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time)
+{
+ const u64 hole_size = hole_end - hole_start;
+ const unsigned long max_pages =
+ min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
+ unsigned long flags;
+ u64 size;
+
+ /* Try binding a single VMA in different positions within the hole */
+
+ flags = PIN_OFFSET_FIXED | PIN_USER;
+ if (i915_is_ggtt(vm))
+ flags |= PIN_GLOBAL;
+
+ for_each_prime_number_from(size, 1, max_pages) {
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u64 addr;
+ int err = 0;
+
+ obj = fake_dma_object(i915, size << PAGE_SHIFT);
+ if (IS_ERR(obj))
+ break;
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_put;
+ }
+
+ for (addr = hole_start;
+ addr + obj->base.size < hole_end;
+ addr += obj->base.size) {
+ err = i915_vma_pin(vma, 0, 0, addr | flags);
+ if (err) {
+ pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
+ __func__, addr, vma->size,
+ hole_start, hole_end, err);
+ goto err_close;
+ }
+ i915_vma_unpin(vma);
+
+ if (!drm_mm_node_allocated(&vma->node) ||
+ i915_vma_misplaced(vma, 0, 0, addr | flags)) {
+ pr_err("%s incorrect at %llx + %llx\n",
+ __func__, addr, vma->size);
+ err = -EINVAL;
+ goto err_close;
+ }
+
+ err = i915_vma_unbind(vma);
+ if (err) {
+ pr_err("%s unbind failed at %llx + %llx with err=%d\n",
+ __func__, addr, vma->size, err);
+ goto err_close;
+ }
+
+ GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
+
+ if (igt_timeout(end_time,
+ "%s timed out at %llx\n",
+ __func__, addr)) {
+ err = -EINTR;
+ goto err_close;
+ }
+ }
+
+err_close:
+ if (!i915_vma_is_ggtt(vma))
+ i915_vma_close(vma);
+err_put:
+ i915_gem_object_put(obj);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int pot_hole(struct drm_i915_private *i915,
+ struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ unsigned long flags;
+ unsigned int pot;
+ int err = 0;
+
+ flags = PIN_OFFSET_FIXED | PIN_USER;
+ if (i915_is_ggtt(vm))
+ flags |= PIN_GLOBAL;
+
+ obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ /* Insert a pair of pages across every pot boundary within the hole */
+ for (pot = fls64(hole_end - 1) - 1;
+ pot > ilog2(2 * I915_GTT_PAGE_SIZE);
+ pot--) {
+ u64 step = BIT_ULL(pot);
+ u64 addr;
+
+ for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
+ addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
+ addr += step) {
+ err = i915_vma_pin(vma, 0, 0, addr | flags);
+ if (err) {
+ pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
+ __func__,
+ addr,
+ hole_start, hole_end,
+ err);
+ goto err;
+ }
+
+ if (!drm_mm_node_allocated(&vma->node) ||
+ i915_vma_misplaced(vma, 0, 0, addr | flags)) {
+ pr_err("%s incorrect at %llx + %llx\n",
+ __func__, addr, vma->size);
+ i915_vma_unpin(vma);
+ err = i915_vma_unbind(vma);
+ err = -EINVAL;
+ goto err;
+ }
+
+ i915_vma_unpin(vma);
+ err = i915_vma_unbind(vma);
+ GEM_BUG_ON(err);
+ }
+
+ if (igt_timeout(end_time,
+ "%s timed out after %d/%d\n",
+ __func__, pot, fls64(hole_end - 1) - 1)) {
+ err = -EINTR;
+ goto err;
+ }
+ }
+
+err:
+ if (!i915_vma_is_ggtt(vma))
+ i915_vma_close(vma);
+err_obj:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static int drunk_hole(struct drm_i915_private *i915,
+ struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time)
+{
+ I915_RND_STATE(prng);
+ unsigned int size;
+ unsigned long flags;
+
+ flags = PIN_OFFSET_FIXED | PIN_USER;
+ if (i915_is_ggtt(vm))
+ flags |= PIN_GLOBAL;
+
+ /* Keep creating larger objects until one cannot fit into the hole */
+ for (size = 12; (hole_end - hole_start) >> size; size++) {
+ struct drm_i915_gem_object *obj;
+ unsigned int *order, count, n;
+ struct i915_vma *vma;
+ u64 hole_size;
+ int err;
+
+ hole_size = (hole_end - hole_start) >> size;
+ if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
+ hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
+ count = hole_size;
+ do {
+ count >>= 1;
+ order = i915_random_order(count, &prng);
+ } while (!order && count);
+ if (!order)
+ break;
+
+ /* Ignore allocation failures (i.e. don't report them as
+ * a test failure) as we are purposefully allocating very
+ * large objects without checking that we have sufficient
+ * memory. We expect to hit -ENOMEM.
+ */
+
+ obj = fake_dma_object(i915, BIT_ULL(size));
+ if (IS_ERR(obj)) {
+ kfree(order);
+ break;
+ }
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ GEM_BUG_ON(vma->size != BIT_ULL(size));
+
+ for (n = 0; n < count; n++) {
+ u64 addr = hole_start + order[n] * BIT_ULL(size);
+
+ err = i915_vma_pin(vma, 0, 0, addr | flags);
+ if (err) {
+ pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
+ __func__,
+ addr, BIT_ULL(size),
+ hole_start, hole_end,
+ err);
+ goto err;
+ }
+
+ if (!drm_mm_node_allocated(&vma->node) ||
+ i915_vma_misplaced(vma, 0, 0, addr | flags)) {
+ pr_err("%s incorrect at %llx + %llx\n",
+ __func__, addr, BIT_ULL(size));
+ i915_vma_unpin(vma);
+ err = i915_vma_unbind(vma);
+ err = -EINVAL;
+ goto err;
+ }
+
+ i915_vma_unpin(vma);
+ err = i915_vma_unbind(vma);
+ GEM_BUG_ON(err);
+
+ if (igt_timeout(end_time,
+ "%s timed out after %d/%d\n",
+ __func__, n, count)) {
+ err = -EINTR;
+ goto err;
+ }
+ }
+
+err:
+ if (!i915_vma_is_ggtt(vma))
+ i915_vma_close(vma);
+err_obj:
+ i915_gem_object_put(obj);
+ kfree(order);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int __shrink_hole(struct drm_i915_private *i915,
+ struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time)
+{
+ struct drm_i915_gem_object *obj;
+ unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
+ unsigned int order = 12;
+ LIST_HEAD(objects);
+ int err = 0;
+ u64 addr;
+
+ /* Keep creating larger objects until one cannot fit into the hole */
+ for (addr = hole_start; addr < hole_end; ) {
+ struct i915_vma *vma;
+ u64 size = BIT_ULL(order++);
+
+ size = min(size, hole_end - addr);
+ obj = fake_dma_object(i915, size);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ break;
+ }
+
+ list_add(&obj->st_link, &objects);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ break;
+ }
+
+ GEM_BUG_ON(vma->size != size);
+
+ err = i915_vma_pin(vma, 0, 0, addr | flags);
+ if (err) {
+ pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
+ __func__, addr, size, hole_start, hole_end, err);
+ break;
+ }
+
+ if (!drm_mm_node_allocated(&vma->node) ||
+ i915_vma_misplaced(vma, 0, 0, addr | flags)) {
+ pr_err("%s incorrect at %llx + %llx\n",
+ __func__, addr, size);
+ i915_vma_unpin(vma);
+ err = i915_vma_unbind(vma);
+ err = -EINVAL;
+ break;
+ }
+
+ i915_vma_unpin(vma);
+ addr += size;
+
+ if (igt_timeout(end_time,
+ "%s timed out at ofset %llx [%llx - %llx]\n",
+ __func__, addr, hole_start, hole_end)) {
+ err = -EINTR;
+ break;
+ }
+ }
+
+ close_object_list(&objects, vm);
+ return err;
+}
+
+static int shrink_hole(struct drm_i915_private *i915,
+ struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time)
+{
+ unsigned long prime;
+ int err;
+
+ vm->fault_attr.probability = 999;
+ atomic_set(&vm->fault_attr.times, -1);
+
+ for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
+ vm->fault_attr.interval = prime;
+ err = __shrink_hole(i915, vm, hole_start, hole_end, end_time);
+ if (err)
+ break;
+ }
+
+ memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
+
+ return err;
+}
+
+static int exercise_ppgtt(struct drm_i915_private *dev_priv,
+ int (*func)(struct drm_i915_private *i915,
+ struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time))
+{
+ struct drm_file *file;
+ struct i915_hw_ppgtt *ppgtt;
+ IGT_TIMEOUT(end_time);
+ int err;
+
+ if (!USES_FULL_PPGTT(dev_priv))
+ return 0;
+
+ file = mock_file(dev_priv);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ mutex_lock(&dev_priv->drm.struct_mutex);
+ ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv, "mock");
+ if (IS_ERR(ppgtt)) {
+ err = PTR_ERR(ppgtt);
+ goto out_unlock;
+ }
+ GEM_BUG_ON(offset_in_page(ppgtt->base.total));
+ GEM_BUG_ON(ppgtt->base.closed);
+
+ err = func(dev_priv, &ppgtt->base, 0, ppgtt->base.total, end_time);
+
+ i915_ppgtt_close(&ppgtt->base);
+ i915_ppgtt_put(ppgtt);
+out_unlock:
+ mutex_unlock(&dev_priv->drm.struct_mutex);
+
+ mock_file_free(dev_priv, file);
+ return err;
+}
+
+static int igt_ppgtt_fill(void *arg)
+{
+ return exercise_ppgtt(arg, fill_hole);
+}
+
+static int igt_ppgtt_walk(void *arg)
+{
+ return exercise_ppgtt(arg, walk_hole);
+}
+
+static int igt_ppgtt_pot(void *arg)
+{
+ return exercise_ppgtt(arg, pot_hole);
+}
+
+static int igt_ppgtt_drunk(void *arg)
+{
+ return exercise_ppgtt(arg, drunk_hole);
+}
+
+static int igt_ppgtt_lowlevel(void *arg)
+{
+ return exercise_ppgtt(arg, lowlevel_hole);
+}
+
+static int igt_ppgtt_shrink(void *arg)
+{
+ return exercise_ppgtt(arg, shrink_hole);
+}
+
+static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
+{
+ struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
+ struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
+
+ if (a->start < b->start)
+ return -1;
+ else
+ return 1;
+}
+
+static int exercise_ggtt(struct drm_i915_private *i915,
+ int (*func)(struct drm_i915_private *i915,
+ struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time))
+{
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ u64 hole_start, hole_end, last = 0;
+ struct drm_mm_node *node;
+ IGT_TIMEOUT(end_time);
+ int err;
+
+ mutex_lock(&i915->drm.struct_mutex);
+restart:
+ list_sort(NULL, &ggtt->base.mm.hole_stack, sort_holes);
+ drm_mm_for_each_hole(node, &ggtt->base.mm, hole_start, hole_end) {
+ if (hole_start < last)
+ continue;
+
+ if (ggtt->base.mm.color_adjust)
+ ggtt->base.mm.color_adjust(node, 0,
+ &hole_start, &hole_end);
+ if (hole_start >= hole_end)
+ continue;
+
+ err = func(i915, &ggtt->base, hole_start, hole_end, end_time);
+ if (err)
+ break;
+
+ /* As we have manipulated the drm_mm, the list may be corrupt */
+ last = hole_end;
+ goto restart;
+ }
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ return err;
+}
+
+static int igt_ggtt_fill(void *arg)
+{
+ return exercise_ggtt(arg, fill_hole);
+}
+
+static int igt_ggtt_walk(void *arg)
+{
+ return exercise_ggtt(arg, walk_hole);
+}
+
+static int igt_ggtt_pot(void *arg)
+{
+ return exercise_ggtt(arg, pot_hole);
+}
+
+static int igt_ggtt_drunk(void *arg)
+{
+ return exercise_ggtt(arg, drunk_hole);
+}
+
+static int igt_ggtt_lowlevel(void *arg)
+{
+ return exercise_ggtt(arg, lowlevel_hole);
+}
+
+static int igt_ggtt_page(void *arg)
+{
+ const unsigned int count = PAGE_SIZE/sizeof(u32);
+ I915_RND_STATE(prng);
+ struct drm_i915_private *i915 = arg;
+ struct i915_ggtt *ggtt = &i915->ggtt;
+ struct drm_i915_gem_object *obj;
+ struct drm_mm_node tmp;
+ unsigned int *order, n;
+ int err;
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out_unlock;
+ }
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err)
+ goto out_free;
+
+ memset(&tmp, 0, sizeof(tmp));
+ err = drm_mm_insert_node_in_range(&ggtt->base.mm, &tmp,
+ 1024 * PAGE_SIZE, 0,
+ I915_COLOR_UNEVICTABLE,
+ 0, ggtt->mappable_end,
+ DRM_MM_INSERT_LOW);
+ if (err)
+ goto out_unpin;
+
+ order = i915_random_order(count, &prng);
+ if (!order) {
+ err = -ENOMEM;
+ goto out_remove;
+ }
+
+ for (n = 0; n < count; n++) {
+ u64 offset = tmp.start + order[n] * PAGE_SIZE;
+ u32 __iomem *vaddr;
+
+ ggtt->base.insert_page(&ggtt->base,
+ i915_gem_object_get_dma_address(obj, 0),
+ offset, I915_CACHE_NONE, 0);
+
+ vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset);
+ iowrite32(n, vaddr + n);
+ io_mapping_unmap_atomic(vaddr);
+
+ wmb();
+ ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE);
+ }
+
+ i915_random_reorder(order, count, &prng);
+ for (n = 0; n < count; n++) {
+ u64 offset = tmp.start + order[n] * PAGE_SIZE;
+ u32 __iomem *vaddr;
+ u32 val;
+
+ ggtt->base.insert_page(&ggtt->base,
+ i915_gem_object_get_dma_address(obj, 0),
+ offset, I915_CACHE_NONE, 0);
+
+ vaddr = io_mapping_map_atomic_wc(&ggtt->mappable, offset);
+ val = ioread32(vaddr + n);
+ io_mapping_unmap_atomic(vaddr);
+
+ ggtt->base.clear_range(&ggtt->base, offset, PAGE_SIZE);
+
+ if (val != n) {
+ pr_err("insert page failed: found %d, expected %d\n",
+ val, n);
+ err = -EINVAL;
+ break;
+ }
+ }
+
+ kfree(order);
+out_remove:
+ drm_mm_remove_node(&tmp);
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+out_free:
+ i915_gem_object_put(obj);
+out_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+static void track_vma_bind(struct i915_vma *vma)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+
+ obj->bind_count++; /* track for eviction later */
+ __i915_gem_object_pin_pages(obj);
+
+ vma->pages = obj->mm.pages;
+ list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+}
+
+static int exercise_mock(struct drm_i915_private *i915,
+ int (*func)(struct drm_i915_private *i915,
+ struct i915_address_space *vm,
+ u64 hole_start, u64 hole_end,
+ unsigned long end_time))
+{
+ struct i915_gem_context *ctx;
+ struct i915_hw_ppgtt *ppgtt;
+ IGT_TIMEOUT(end_time);
+ int err;
+
+ ctx = mock_context(i915, "mock");
+ if (!ctx)
+ return -ENOMEM;
+
+ ppgtt = ctx->ppgtt;
+ GEM_BUG_ON(!ppgtt);
+
+ err = func(i915, &ppgtt->base, 0, ppgtt->base.total, end_time);
+
+ mock_context_close(ctx);
+ return err;
+}
+
+static int igt_mock_fill(void *arg)
+{
+ return exercise_mock(arg, fill_hole);
+}
+
+static int igt_mock_walk(void *arg)
+{
+ return exercise_mock(arg, walk_hole);
+}
+
+static int igt_mock_pot(void *arg)
+{
+ return exercise_mock(arg, pot_hole);
+}
+
+static int igt_mock_drunk(void *arg)
+{
+ return exercise_mock(arg, drunk_hole);
+}
+
+static int igt_gtt_reserve(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj, *on;
+ LIST_HEAD(objects);
+ u64 total;
+ int err;
+
+ /* i915_gem_gtt_reserve() tries to reserve the precise range
+ * for the node, and evicts if it has to. So our test checks that
+ * it can give us the requsted space and prevent overlaps.
+ */
+
+ /* Start by filling the GGTT */
+ for (total = 0;
+ total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+ total += 2*I915_GTT_PAGE_SIZE) {
+ struct i915_vma *vma;
+
+ obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out;
+ }
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ i915_gem_object_put(obj);
+ goto out;
+ }
+
+ list_add(&obj->st_link, &objects);
+
+ vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out;
+ }
+
+ err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
+ obj->base.size,
+ total,
+ obj->cache_level,
+ 0);
+ if (err) {
+ pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
+ total, i915->ggtt.base.total, err);
+ goto out;
+ }
+ track_vma_bind(vma);
+
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ if (vma->node.start != total ||
+ vma->node.size != 2*I915_GTT_PAGE_SIZE) {
+ pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
+ vma->node.start, vma->node.size,
+ total, 2*I915_GTT_PAGE_SIZE);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ /* Now we start forcing evictions */
+ for (total = I915_GTT_PAGE_SIZE;
+ total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+ total += 2*I915_GTT_PAGE_SIZE) {
+ struct i915_vma *vma;
+
+ obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out;
+ }
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ i915_gem_object_put(obj);
+ goto out;
+ }
+
+ list_add(&obj->st_link, &objects);
+
+ vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out;
+ }
+
+ err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
+ obj->base.size,
+ total,
+ obj->cache_level,
+ 0);
+ if (err) {
+ pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
+ total, i915->ggtt.base.total, err);
+ goto out;
+ }
+ track_vma_bind(vma);
+
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ if (vma->node.start != total ||
+ vma->node.size != 2*I915_GTT_PAGE_SIZE) {
+ pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
+ vma->node.start, vma->node.size,
+ total, 2*I915_GTT_PAGE_SIZE);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ /* And then try at random */
+ list_for_each_entry_safe(obj, on, &objects, st_link) {
+ struct i915_vma *vma;
+ u64 offset;
+
+ vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out;
+ }
+
+ err = i915_vma_unbind(vma);
+ if (err) {
+ pr_err("i915_vma_unbind failed with err=%d!\n", err);
+ goto out;
+ }
+
+ offset = random_offset(0, i915->ggtt.base.total,
+ 2*I915_GTT_PAGE_SIZE,
+ I915_GTT_MIN_ALIGNMENT);
+
+ err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node,
+ obj->base.size,
+ offset,
+ obj->cache_level,
+ 0);
+ if (err) {
+ pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
+ total, i915->ggtt.base.total, err);
+ goto out;
+ }
+ track_vma_bind(vma);
+
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ if (vma->node.start != offset ||
+ vma->node.size != 2*I915_GTT_PAGE_SIZE) {
+ pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
+ vma->node.start, vma->node.size,
+ offset, 2*I915_GTT_PAGE_SIZE);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+out:
+ list_for_each_entry_safe(obj, on, &objects, st_link) {
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+ }
+ return err;
+}
+
+static int igt_gtt_insert(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj, *on;
+ struct drm_mm_node tmp = {};
+ const struct invalid_insert {
+ u64 size;
+ u64 alignment;
+ u64 start, end;
+ } invalid_insert[] = {
+ {
+ i915->ggtt.base.total + I915_GTT_PAGE_SIZE, 0,
+ 0, i915->ggtt.base.total,
+ },
+ {
+ 2*I915_GTT_PAGE_SIZE, 0,
+ 0, I915_GTT_PAGE_SIZE,
+ },
+ {
+ -(u64)I915_GTT_PAGE_SIZE, 0,
+ 0, 4*I915_GTT_PAGE_SIZE,
+ },
+ {
+ -(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
+ 0, 4*I915_GTT_PAGE_SIZE,
+ },
+ {
+ I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
+ I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
+ },
+ {}
+ }, *ii;
+ LIST_HEAD(objects);
+ u64 total;
+ int err;
+
+ /* i915_gem_gtt_insert() tries to allocate some free space in the GTT
+ * to the node, evicting if required.
+ */
+
+ /* Check a couple of obviously invalid requests */
+ for (ii = invalid_insert; ii->size; ii++) {
+ err = i915_gem_gtt_insert(&i915->ggtt.base, &tmp,
+ ii->size, ii->alignment,
+ I915_COLOR_UNEVICTABLE,
+ ii->start, ii->end,
+ 0);
+ if (err != -ENOSPC) {
+ pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
+ ii->size, ii->alignment, ii->start, ii->end,
+ err);
+ return -EINVAL;
+ }
+ }
+
+ /* Start by filling the GGTT */
+ for (total = 0;
+ total + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+ total += I915_GTT_PAGE_SIZE) {
+ struct i915_vma *vma;
+
+ obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out;
+ }
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ i915_gem_object_put(obj);
+ goto out;
+ }
+
+ list_add(&obj->st_link, &objects);
+
+ vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out;
+ }
+
+ err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
+ obj->base.size, 0, obj->cache_level,
+ 0, i915->ggtt.base.total,
+ 0);
+ if (err == -ENOSPC) {
+ /* maxed out the GGTT space */
+ i915_gem_object_put(obj);
+ break;
+ }
+ if (err) {
+ pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
+ total, i915->ggtt.base.total, err);
+ goto out;
+ }
+ track_vma_bind(vma);
+ __i915_vma_pin(vma);
+
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ }
+
+ list_for_each_entry(obj, &objects, st_link) {
+ struct i915_vma *vma;
+
+ vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out;
+ }
+
+ if (!drm_mm_node_allocated(&vma->node)) {
+ pr_err("VMA was unexpectedly evicted!\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ __i915_vma_unpin(vma);
+ }
+
+ /* If we then reinsert, we should find the same hole */
+ list_for_each_entry_safe(obj, on, &objects, st_link) {
+ struct i915_vma *vma;
+ u64 offset;
+
+ vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out;
+ }
+
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ offset = vma->node.start;
+
+ err = i915_vma_unbind(vma);
+ if (err) {
+ pr_err("i915_vma_unbind failed with err=%d!\n", err);
+ goto out;
+ }
+
+ err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
+ obj->base.size, 0, obj->cache_level,
+ 0, i915->ggtt.base.total,
+ 0);
+ if (err) {
+ pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
+ total, i915->ggtt.base.total, err);
+ goto out;
+ }
+ track_vma_bind(vma);
+
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ if (vma->node.start != offset) {
+ pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
+ offset, vma->node.start);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+ /* And then force evictions */
+ for (total = 0;
+ total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total;
+ total += 2*I915_GTT_PAGE_SIZE) {
+ struct i915_vma *vma;
+
+ obj = i915_gem_object_create_internal(i915, 2*I915_GTT_PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out;
+ }
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ i915_gem_object_put(obj);
+ goto out;
+ }
+
+ list_add(&obj->st_link, &objects);
+
+ vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out;
+ }
+
+ err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node,
+ obj->base.size, 0, obj->cache_level,
+ 0, i915->ggtt.base.total,
+ 0);
+ if (err) {
+ pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
+ total, i915->ggtt.base.total, err);
+ goto out;
+ }
+ track_vma_bind(vma);
+
+ GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+ }
+
+out:
+ list_for_each_entry_safe(obj, on, &objects, st_link) {
+ i915_gem_object_unpin_pages(obj);
+ i915_gem_object_put(obj);
+ }
+ return err;
+}
+
+int i915_gem_gtt_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_mock_drunk),
+ SUBTEST(igt_mock_walk),
+ SUBTEST(igt_mock_pot),
+ SUBTEST(igt_mock_fill),
+ SUBTEST(igt_gtt_reserve),
+ SUBTEST(igt_gtt_insert),
+ };
+ struct drm_i915_private *i915;
+ int err;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = i915_subtests(tests, i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ drm_dev_unref(&i915->drm);
+ return err;
+}
+
+int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_ppgtt_alloc),
+ SUBTEST(igt_ppgtt_lowlevel),
+ SUBTEST(igt_ppgtt_drunk),
+ SUBTEST(igt_ppgtt_walk),
+ SUBTEST(igt_ppgtt_pot),
+ SUBTEST(igt_ppgtt_fill),
+ SUBTEST(igt_ppgtt_shrink),
+ SUBTEST(igt_ggtt_lowlevel),
+ SUBTEST(igt_ggtt_drunk),
+ SUBTEST(igt_ggtt_walk),
+ SUBTEST(igt_ggtt_pot),
+ SUBTEST(igt_ggtt_fill),
+ SUBTEST(igt_ggtt_page),
+ };
+
+ GEM_BUG_ON(offset_in_page(i915->ggtt.base.total));
+
+ return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_object.c b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
new file mode 100644
index 000000000000..67d82bf1407f
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_object.c
@@ -0,0 +1,600 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "../i915_selftest.h"
+
+#include "mock_gem_device.h"
+#include "huge_gem_object.h"
+
+static int igt_gem_object(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ int err = -ENOMEM;
+
+ /* Basic test to ensure we can create an object */
+
+ obj = i915_gem_object_create(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ pr_err("i915_gem_object_create failed, err=%d\n", err);
+ goto out;
+ }
+
+ err = 0;
+ i915_gem_object_put(obj);
+out:
+ return err;
+}
+
+static int igt_phys_object(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ /* Create an object and bind it to a contiguous set of physical pages,
+ * i.e. exercise the i915_gem_object_phys API.
+ */
+
+ obj = i915_gem_object_create(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ pr_err("i915_gem_object_create failed, err=%d\n", err);
+ goto out;
+ }
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = i915_gem_object_attach_phys(obj, PAGE_SIZE);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (err) {
+ pr_err("i915_gem_object_attach_phys failed, err=%d\n", err);
+ goto out_obj;
+ }
+
+ if (obj->ops != &i915_gem_phys_ops) {
+ pr_err("i915_gem_object_attach_phys did not create a phys object\n");
+ err = -EINVAL;
+ goto out_obj;
+ }
+
+ if (!atomic_read(&obj->mm.pages_pin_count)) {
+ pr_err("i915_gem_object_attach_phys did not pin its phys pages\n");
+ err = -EINVAL;
+ goto out_obj;
+ }
+
+ /* Make the object dirty so that put_pages must do copy back the data */
+ mutex_lock(&i915->drm.struct_mutex);
+ err = i915_gem_object_set_to_gtt_domain(obj, true);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (err) {
+ pr_err("i915_gem_object_set_to_gtt_domain failed with err=%d\n",
+ err);
+ goto out_obj;
+ }
+
+out_obj:
+ i915_gem_object_put(obj);
+out:
+ return err;
+}
+
+static int igt_gem_huge(void *arg)
+{
+ const unsigned int nreal = 509; /* just to be awkward */
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ unsigned int n;
+ int err;
+
+ /* Basic sanitycheck of our huge fake object allocation */
+
+ obj = huge_gem_object(i915,
+ nreal * PAGE_SIZE,
+ i915->ggtt.base.total + PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
+ nreal, obj->base.size / PAGE_SIZE, err);
+ goto out;
+ }
+
+ for (n = 0; n < obj->base.size / PAGE_SIZE; n++) {
+ if (i915_gem_object_get_page(obj, n) !=
+ i915_gem_object_get_page(obj, n % nreal)) {
+ pr_err("Page lookup mismatch at index %u [%u]\n",
+ n, n % nreal);
+ err = -EINVAL;
+ goto out_unpin;
+ }
+ }
+
+out_unpin:
+ i915_gem_object_unpin_pages(obj);
+out:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+struct tile {
+ unsigned int width;
+ unsigned int height;
+ unsigned int stride;
+ unsigned int size;
+ unsigned int tiling;
+ unsigned int swizzle;
+};
+
+static u64 swizzle_bit(unsigned int bit, u64 offset)
+{
+ return (offset & BIT_ULL(bit)) >> (bit - 6);
+}
+
+static u64 tiled_offset(const struct tile *tile, u64 v)
+{
+ u64 x, y;
+
+ if (tile->tiling == I915_TILING_NONE)
+ return v;
+
+ y = div64_u64_rem(v, tile->stride, &x);
+ v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height;
+
+ if (tile->tiling == I915_TILING_X) {
+ v += y * tile->width;
+ v += div64_u64_rem(x, tile->width, &x) << tile->size;
+ v += x;
+ } else {
+ const unsigned int ytile_span = 16;
+ const unsigned int ytile_height = 32 * ytile_span;
+
+ v += y * ytile_span;
+ v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
+ v += x;
+ }
+
+ switch (tile->swizzle) {
+ case I915_BIT_6_SWIZZLE_9:
+ v ^= swizzle_bit(9, v);
+ break;
+ case I915_BIT_6_SWIZZLE_9_10:
+ v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
+ break;
+ case I915_BIT_6_SWIZZLE_9_11:
+ v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
+ break;
+ case I915_BIT_6_SWIZZLE_9_10_11:
+ v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
+ break;
+ }
+
+ return v;
+}
+
+static int check_partial_mapping(struct drm_i915_gem_object *obj,
+ const struct tile *tile,
+ unsigned long end_time)
+{
+ const unsigned int nreal = obj->scratch / PAGE_SIZE;
+ const unsigned long npages = obj->base.size / PAGE_SIZE;
+ struct i915_vma *vma;
+ unsigned long page;
+ int err;
+
+ if (igt_timeout(end_time,
+ "%s: timed out before tiling=%d stride=%d\n",
+ __func__, tile->tiling, tile->stride))
+ return -EINTR;
+
+ err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
+ if (err)
+ return err;
+
+ GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
+ GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
+
+ for_each_prime_number_from(page, 1, npages) {
+ struct i915_ggtt_view view =
+ compute_partial_view(obj, page, MIN_CHUNK_PAGES);
+ u32 __iomem *io;
+ struct page *p;
+ unsigned int n;
+ u64 offset;
+ u32 *cpu;
+
+ GEM_BUG_ON(view.partial.size > nreal);
+
+ err = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (err)
+ return err;
+
+ vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma)) {
+ pr_err("Failed to pin partial view: offset=%lu\n",
+ page);
+ return PTR_ERR(vma);
+ }
+
+ n = page - view.partial.offset;
+ GEM_BUG_ON(n >= view.partial.size);
+
+ io = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+ if (IS_ERR(io)) {
+ pr_err("Failed to iomap partial view: offset=%lu\n",
+ page);
+ return PTR_ERR(io);
+ }
+
+ err = i915_vma_get_fence(vma);
+ if (err) {
+ pr_err("Failed to get fence for partial view: offset=%lu\n",
+ page);
+ i915_vma_unpin_iomap(vma);
+ return err;
+ }
+
+ iowrite32(page, io + n * PAGE_SIZE/sizeof(*io));
+ i915_vma_unpin_iomap(vma);
+
+ offset = tiled_offset(tile, page << PAGE_SHIFT);
+ if (offset >= obj->base.size)
+ continue;
+
+ i915_gem_object_flush_gtt_write_domain(obj);
+
+ p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
+ cpu = kmap(p) + offset_in_page(offset);
+ drm_clflush_virt_range(cpu, sizeof(*cpu));
+ if (*cpu != (u32)page) {
+ pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
+ page, n,
+ view.partial.offset,
+ view.partial.size,
+ vma->size >> PAGE_SHIFT,
+ tile_row_pages(obj),
+ vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
+ offset >> PAGE_SHIFT,
+ (unsigned int)offset_in_page(offset),
+ offset,
+ (u32)page, *cpu);
+ err = -EINVAL;
+ }
+ *cpu = 0;
+ drm_clflush_virt_range(cpu, sizeof(*cpu));
+ kunmap(p);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int igt_partial_tiling(void *arg)
+{
+ const unsigned int nreal = 1 << 12; /* largest tile row x2 */
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj;
+ int tiling;
+ int err;
+
+ /* We want to check the page mapping and fencing of a large object
+ * mmapped through the GTT. The object we create is larger than can
+ * possibly be mmaped as a whole, and so we must use partial GGTT vma.
+ * We then check that a write through each partial GGTT vma ends up
+ * in the right set of pages within the object, and with the expected
+ * tiling, which we verify by manual swizzling.
+ */
+
+ obj = huge_gem_object(i915,
+ nreal << PAGE_SHIFT,
+ (1 + next_prime_number(i915->ggtt.base.total >> PAGE_SHIFT)) << PAGE_SHIFT);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = i915_gem_object_pin_pages(obj);
+ if (err) {
+ pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
+ nreal, obj->base.size / PAGE_SIZE, err);
+ goto out;
+ }
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ if (1) {
+ IGT_TIMEOUT(end);
+ struct tile tile;
+
+ tile.height = 1;
+ tile.width = 1;
+ tile.size = 0;
+ tile.stride = 0;
+ tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
+ tile.tiling = I915_TILING_NONE;
+
+ err = check_partial_mapping(obj, &tile, end);
+ if (err && err != -EINTR)
+ goto out_unlock;
+ }
+
+ for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) {
+ IGT_TIMEOUT(end);
+ unsigned int max_pitch;
+ unsigned int pitch;
+ struct tile tile;
+
+ tile.tiling = tiling;
+ switch (tiling) {
+ case I915_TILING_X:
+ tile.swizzle = i915->mm.bit_6_swizzle_x;
+ break;
+ case I915_TILING_Y:
+ tile.swizzle = i915->mm.bit_6_swizzle_y;
+ break;
+ }
+
+ if (tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN ||
+ tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
+ continue;
+
+ if (INTEL_GEN(i915) <= 2) {
+ tile.height = 16;
+ tile.width = 128;
+ tile.size = 11;
+ } else if (tile.tiling == I915_TILING_Y &&
+ HAS_128_BYTE_Y_TILING(i915)) {
+ tile.height = 32;
+ tile.width = 128;
+ tile.size = 12;
+ } else {
+ tile.height = 8;
+ tile.width = 512;
+ tile.size = 12;
+ }
+
+ if (INTEL_GEN(i915) < 4)
+ max_pitch = 8192 / tile.width;
+ else if (INTEL_GEN(i915) < 7)
+ max_pitch = 128 * I965_FENCE_MAX_PITCH_VAL / tile.width;
+ else
+ max_pitch = 128 * GEN7_FENCE_MAX_PITCH_VAL / tile.width;
+
+ for (pitch = max_pitch; pitch; pitch >>= 1) {
+ tile.stride = tile.width * pitch;
+ err = check_partial_mapping(obj, &tile, end);
+ if (err == -EINTR)
+ goto next_tiling;
+ if (err)
+ goto out_unlock;
+
+ if (pitch > 2 && INTEL_GEN(i915) >= 4) {
+ tile.stride = tile.width * (pitch - 1);
+ err = check_partial_mapping(obj, &tile, end);
+ if (err == -EINTR)
+ goto next_tiling;
+ if (err)
+ goto out_unlock;
+ }
+
+ if (pitch < max_pitch && INTEL_GEN(i915) >= 4) {
+ tile.stride = tile.width * (pitch + 1);
+ err = check_partial_mapping(obj, &tile, end);
+ if (err == -EINTR)
+ goto next_tiling;
+ if (err)
+ goto out_unlock;
+ }
+ }
+
+ if (INTEL_GEN(i915) >= 4) {
+ for_each_prime_number(pitch, max_pitch) {
+ tile.stride = tile.width * pitch;
+ err = check_partial_mapping(obj, &tile, end);
+ if (err == -EINTR)
+ goto next_tiling;
+ if (err)
+ goto out_unlock;
+ }
+ }
+
+next_tiling: ;
+ }
+
+out_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ i915_gem_object_unpin_pages(obj);
+out:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static int make_obj_busy(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct drm_i915_gem_request *rq;
+ struct i915_vma *vma;
+ int err;
+
+ vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ return err;
+
+ rq = i915_gem_request_alloc(i915->engine[RCS], i915->kernel_context);
+ if (IS_ERR(rq)) {
+ i915_vma_unpin(vma);
+ return PTR_ERR(rq);
+ }
+
+ i915_vma_move_to_active(vma, rq, 0);
+ i915_add_request(rq);
+
+ i915_gem_object_set_active_reference(obj);
+ i915_vma_unpin(vma);
+ return 0;
+}
+
+static bool assert_mmap_offset(struct drm_i915_private *i915,
+ unsigned long size,
+ int expected)
+{
+ struct drm_i915_gem_object *obj;
+ int err;
+
+ obj = i915_gem_object_create_internal(i915, size);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ err = i915_gem_object_create_mmap_offset(obj);
+ i915_gem_object_put(obj);
+
+ return err == expected;
+}
+
+static int igt_mmap_offset_exhaustion(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
+ struct drm_i915_gem_object *obj;
+ struct drm_mm_node resv, *hole;
+ u64 hole_start, hole_end;
+ int loop, err;
+
+ /* Trim the device mmap space to only a page */
+ memset(&resv, 0, sizeof(resv));
+ drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
+ resv.start = hole_start;
+ resv.size = hole_end - hole_start - 1; /* PAGE_SIZE units */
+ err = drm_mm_reserve_node(mm, &resv);
+ if (err) {
+ pr_err("Failed to trim VMA manager, err=%d\n", err);
+ return err;
+ }
+ break;
+ }
+
+ /* Just fits! */
+ if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
+ pr_err("Unable to insert object into single page hole\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* Too large */
+ if (!assert_mmap_offset(i915, 2*PAGE_SIZE, -ENOSPC)) {
+ pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* Fill the hole, further allocation attempts should then fail */
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out;
+ }
+
+ err = i915_gem_object_create_mmap_offset(obj);
+ if (err) {
+ pr_err("Unable to insert object into reclaimed hole\n");
+ goto err_obj;
+ }
+
+ if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) {
+ pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
+ err = -EINVAL;
+ goto err_obj;
+ }
+
+ i915_gem_object_put(obj);
+
+ /* Now fill with busy dead objects that we expect to reap */
+ for (loop = 0; loop < 3; loop++) {
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj)) {
+ err = PTR_ERR(obj);
+ goto out;
+ }
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = make_obj_busy(obj);
+ mutex_unlock(&i915->drm.struct_mutex);
+ if (err) {
+ pr_err("[loop %d] Failed to busy the object\n", loop);
+ goto err_obj;
+ }
+
+ GEM_BUG_ON(!i915_gem_object_is_active(obj));
+ err = i915_gem_object_create_mmap_offset(obj);
+ if (err) {
+ pr_err("[loop %d] i915_gem_object_create_mmap_offset failed with err=%d\n",
+ loop, err);
+ goto out;
+ }
+ }
+
+out:
+ drm_mm_remove_node(&resv);
+ return err;
+err_obj:
+ i915_gem_object_put(obj);
+ goto out;
+}
+
+int i915_gem_object_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_gem_object),
+ SUBTEST(igt_phys_object),
+ };
+ struct drm_i915_private *i915;
+ int err;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ err = i915_subtests(tests, i915);
+
+ drm_dev_unref(&i915->drm);
+ return err;
+}
+
+int i915_gem_object_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_gem_huge),
+ SUBTEST(igt_partial_tiling),
+ SUBTEST(igt_mmap_offset_exhaustion),
+ };
+
+ return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_request.c b/drivers/gpu/drm/i915/selftests/i915_gem_request.c
new file mode 100644
index 000000000000..926b24c117d6
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_request.c
@@ -0,0 +1,882 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/prime_numbers.h>
+
+#include "../i915_selftest.h"
+
+#include "mock_context.h"
+#include "mock_gem_device.h"
+
+static int igt_add_request(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_request *request;
+ int err = -ENOMEM;
+
+ /* Basic preliminary test to create a request and let it loose! */
+
+ mutex_lock(&i915->drm.struct_mutex);
+ request = mock_request(i915->engine[RCS],
+ i915->kernel_context,
+ HZ / 10);
+ if (!request)
+ goto out_unlock;
+
+ i915_add_request(request);
+
+ err = 0;
+out_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+static int igt_wait_request(void *arg)
+{
+ const long T = HZ / 4;
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_request *request;
+ int err = -EINVAL;
+
+ /* Submit a request, then wait upon it */
+
+ mutex_lock(&i915->drm.struct_mutex);
+ request = mock_request(i915->engine[RCS], i915->kernel_context, T);
+ if (!request) {
+ err = -ENOMEM;
+ goto out_unlock;
+ }
+
+ if (i915_wait_request(request, I915_WAIT_LOCKED, 0) != -ETIME) {
+ pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
+ goto out_unlock;
+ }
+
+ if (i915_wait_request(request, I915_WAIT_LOCKED, T) != -ETIME) {
+ pr_err("request wait succeeded (expected timeout before submit!)\n");
+ goto out_unlock;
+ }
+
+ if (i915_gem_request_completed(request)) {
+ pr_err("request completed before submit!!\n");
+ goto out_unlock;
+ }
+
+ i915_add_request(request);
+
+ if (i915_wait_request(request, I915_WAIT_LOCKED, 0) != -ETIME) {
+ pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
+ goto out_unlock;
+ }
+
+ if (i915_gem_request_completed(request)) {
+ pr_err("request completed immediately!\n");
+ goto out_unlock;
+ }
+
+ if (i915_wait_request(request, I915_WAIT_LOCKED, T / 2) != -ETIME) {
+ pr_err("request wait succeeded (expected timeout!)\n");
+ goto out_unlock;
+ }
+
+ if (i915_wait_request(request, I915_WAIT_LOCKED, T) == -ETIME) {
+ pr_err("request wait timed out!\n");
+ goto out_unlock;
+ }
+
+ if (!i915_gem_request_completed(request)) {
+ pr_err("request not complete after waiting!\n");
+ goto out_unlock;
+ }
+
+ if (i915_wait_request(request, I915_WAIT_LOCKED, T) == -ETIME) {
+ pr_err("request wait timed out when already complete!\n");
+ goto out_unlock;
+ }
+
+ err = 0;
+out_unlock:
+ mock_device_flush(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+static int igt_fence_wait(void *arg)
+{
+ const long T = HZ / 4;
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_request *request;
+ int err = -EINVAL;
+
+ /* Submit a request, treat it as a fence and wait upon it */
+
+ mutex_lock(&i915->drm.struct_mutex);
+ request = mock_request(i915->engine[RCS], i915->kernel_context, T);
+ if (!request) {
+ err = -ENOMEM;
+ goto out_locked;
+ }
+ mutex_unlock(&i915->drm.struct_mutex); /* safe as we are single user */
+
+ if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) {
+ pr_err("fence wait success before submit (expected timeout)!\n");
+ goto out_device;
+ }
+
+ mutex_lock(&i915->drm.struct_mutex);
+ i915_add_request(request);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ if (dma_fence_is_signaled(&request->fence)) {
+ pr_err("fence signaled immediately!\n");
+ goto out_device;
+ }
+
+ if (dma_fence_wait_timeout(&request->fence, false, T / 2) != -ETIME) {
+ pr_err("fence wait success after submit (expected timeout)!\n");
+ goto out_device;
+ }
+
+ if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
+ pr_err("fence wait timed out (expected success)!\n");
+ goto out_device;
+ }
+
+ if (!dma_fence_is_signaled(&request->fence)) {
+ pr_err("fence unsignaled after waiting!\n");
+ goto out_device;
+ }
+
+ if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) {
+ pr_err("fence wait timed out when complete (expected success)!\n");
+ goto out_device;
+ }
+
+ err = 0;
+out_device:
+ mutex_lock(&i915->drm.struct_mutex);
+out_locked:
+ mock_device_flush(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+static int igt_request_rewind(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_request *request, *vip;
+ struct i915_gem_context *ctx[2];
+ int err = -EINVAL;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ ctx[0] = mock_context(i915, "A");
+ request = mock_request(i915->engine[RCS], ctx[0], 2 * HZ);
+ if (!request) {
+ err = -ENOMEM;
+ goto err_context_0;
+ }
+
+ i915_gem_request_get(request);
+ i915_add_request(request);
+
+ ctx[1] = mock_context(i915, "B");
+ vip = mock_request(i915->engine[RCS], ctx[1], 0);
+ if (!vip) {
+ err = -ENOMEM;
+ goto err_context_1;
+ }
+
+ /* Simulate preemption by manual reordering */
+ if (!mock_cancel_request(request)) {
+ pr_err("failed to cancel request (already executed)!\n");
+ i915_add_request(vip);
+ goto err_context_1;
+ }
+ i915_gem_request_get(vip);
+ i915_add_request(vip);
+ request->engine->submit_request(request);
+
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ if (i915_wait_request(vip, 0, HZ) == -ETIME) {
+ pr_err("timed out waiting for high priority request, vip.seqno=%d, current seqno=%d\n",
+ vip->global_seqno, intel_engine_get_seqno(i915->engine[RCS]));
+ goto err;
+ }
+
+ if (i915_gem_request_completed(request)) {
+ pr_err("low priority request already completed\n");
+ goto err;
+ }
+
+ err = 0;
+err:
+ i915_gem_request_put(vip);
+ mutex_lock(&i915->drm.struct_mutex);
+err_context_1:
+ mock_context_close(ctx[1]);
+ i915_gem_request_put(request);
+err_context_0:
+ mock_context_close(ctx[0]);
+ mock_device_flush(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+int i915_gem_request_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_add_request),
+ SUBTEST(igt_wait_request),
+ SUBTEST(igt_fence_wait),
+ SUBTEST(igt_request_rewind),
+ };
+ struct drm_i915_private *i915;
+ int err;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ err = i915_subtests(tests, i915);
+ drm_dev_unref(&i915->drm);
+
+ return err;
+}
+
+struct live_test {
+ struct drm_i915_private *i915;
+ const char *func;
+ const char *name;
+
+ unsigned int reset_count;
+};
+
+static int begin_live_test(struct live_test *t,
+ struct drm_i915_private *i915,
+ const char *func,
+ const char *name)
+{
+ int err;
+
+ t->i915 = i915;
+ t->func = func;
+ t->name = name;
+
+ err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
+ if (err) {
+ pr_err("%s(%s): failed to idle before, with err=%d!",
+ func, name, err);
+ return err;
+ }
+
+ i915_gem_retire_requests(i915);
+
+ i915->gpu_error.missed_irq_rings = 0;
+ t->reset_count = i915_reset_count(&i915->gpu_error);
+
+ return 0;
+}
+
+static int end_live_test(struct live_test *t)
+{
+ struct drm_i915_private *i915 = t->i915;
+
+ if (wait_for(intel_engines_are_idle(i915), 1)) {
+ pr_err("%s(%s): GPU not idle\n", t->func, t->name);
+ return -EIO;
+ }
+
+ if (t->reset_count != i915_reset_count(&i915->gpu_error)) {
+ pr_err("%s(%s): GPU was reset %d times!\n",
+ t->func, t->name,
+ i915_reset_count(&i915->gpu_error) - t->reset_count);
+ return -EIO;
+ }
+
+ if (i915->gpu_error.missed_irq_rings) {
+ pr_err("%s(%s): Missed interrupts on engines %lx\n",
+ t->func, t->name, i915->gpu_error.missed_irq_rings);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int live_nop_request(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct live_test t;
+ unsigned int id;
+ int err;
+
+ /* Submit various sized batches of empty requests, to each engine
+ * (individually), and wait for the batch to complete. We can check
+ * the overhead of submitting requests to the hardware.
+ */
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ for_each_engine(engine, i915, id) {
+ IGT_TIMEOUT(end_time);
+ struct drm_i915_gem_request *request;
+ unsigned long n, prime;
+ ktime_t times[2] = {};
+
+ err = begin_live_test(&t, i915, __func__, engine->name);
+ if (err)
+ goto out_unlock;
+
+ for_each_prime_number_from(prime, 1, 8192) {
+ times[1] = ktime_get_raw();
+
+ for (n = 0; n < prime; n++) {
+ request = i915_gem_request_alloc(engine,
+ i915->kernel_context);
+ if (IS_ERR(request)) {
+ err = PTR_ERR(request);
+ goto out_unlock;
+ }
+
+ /* This space is left intentionally blank.
+ *
+ * We do not actually want to perform any
+ * action with this request, we just want
+ * to measure the latency in allocation
+ * and submission of our breadcrumbs -
+ * ensuring that the bare request is sufficient
+ * for the system to work (i.e. proper HEAD
+ * tracking of the rings, interrupt handling,
+ * etc). It also gives us the lowest bounds
+ * for latency.
+ */
+
+ i915_add_request(request);
+ }
+ i915_wait_request(request,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+
+ times[1] = ktime_sub(ktime_get_raw(), times[1]);
+ if (prime == 1)
+ times[0] = times[1];
+
+ if (__igt_timeout(end_time, NULL))
+ break;
+ }
+
+ err = end_live_test(&t);
+ if (err)
+ goto out_unlock;
+
+ pr_info("Request latencies on %s: 1 = %lluns, %lu = %lluns\n",
+ engine->name,
+ ktime_to_ns(times[0]),
+ prime, div64_u64(ktime_to_ns(times[1]), prime));
+ }
+
+out_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+static struct i915_vma *empty_batch(struct drm_i915_private *i915)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *cmd;
+ int err;
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto err;
+ }
+ *cmd = MI_BATCH_BUFFER_END;
+ i915_gem_object_unpin_map(obj);
+
+ err = i915_gem_object_set_to_gtt_domain(obj, false);
+ if (err)
+ goto err;
+
+ vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_GLOBAL);
+ if (err)
+ goto err;
+
+ return vma;
+
+err:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static struct drm_i915_gem_request *
+empty_request(struct intel_engine_cs *engine,
+ struct i915_vma *batch)
+{
+ struct drm_i915_gem_request *request;
+ int err;
+
+ request = i915_gem_request_alloc(engine,
+ engine->i915->kernel_context);
+ if (IS_ERR(request))
+ return request;
+
+ err = engine->emit_flush(request, EMIT_INVALIDATE);
+ if (err)
+ goto out_request;
+
+ err = i915_switch_context(request);
+ if (err)
+ goto out_request;
+
+ err = engine->emit_bb_start(request,
+ batch->node.start,
+ batch->node.size,
+ I915_DISPATCH_SECURE);
+ if (err)
+ goto out_request;
+
+out_request:
+ __i915_add_request(request, err == 0);
+ return err ? ERR_PTR(err) : request;
+}
+
+static int live_empty_request(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct live_test t;
+ struct i915_vma *batch;
+ unsigned int id;
+ int err = 0;
+
+ /* Submit various sized batches of empty requests, to each engine
+ * (individually), and wait for the batch to complete. We can check
+ * the overhead of submitting requests to the hardware.
+ */
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ batch = empty_batch(i915);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ goto out_unlock;
+ }
+
+ for_each_engine(engine, i915, id) {
+ IGT_TIMEOUT(end_time);
+ struct drm_i915_gem_request *request;
+ unsigned long n, prime;
+ ktime_t times[2] = {};
+
+ err = begin_live_test(&t, i915, __func__, engine->name);
+ if (err)
+ goto out_batch;
+
+ /* Warmup / preload */
+ request = empty_request(engine, batch);
+ if (IS_ERR(request)) {
+ err = PTR_ERR(request);
+ goto out_batch;
+ }
+ i915_wait_request(request,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+
+ for_each_prime_number_from(prime, 1, 8192) {
+ times[1] = ktime_get_raw();
+
+ for (n = 0; n < prime; n++) {
+ request = empty_request(engine, batch);
+ if (IS_ERR(request)) {
+ err = PTR_ERR(request);
+ goto out_batch;
+ }
+ }
+ i915_wait_request(request,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+
+ times[1] = ktime_sub(ktime_get_raw(), times[1]);
+ if (prime == 1)
+ times[0] = times[1];
+
+ if (__igt_timeout(end_time, NULL))
+ break;
+ }
+
+ err = end_live_test(&t);
+ if (err)
+ goto out_batch;
+
+ pr_info("Batch latencies on %s: 1 = %lluns, %lu = %lluns\n",
+ engine->name,
+ ktime_to_ns(times[0]),
+ prime, div64_u64(ktime_to_ns(times[1]), prime));
+ }
+
+out_batch:
+ i915_vma_unpin(batch);
+ i915_vma_put(batch);
+out_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
+{
+ struct i915_gem_context *ctx = i915->kernel_context;
+ struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+ struct drm_i915_gem_object *obj;
+ const int gen = INTEL_GEN(i915);
+ struct i915_vma *vma;
+ u32 *cmd;
+ int err;
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ goto err;
+
+ err = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (err)
+ goto err;
+
+ cmd = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto err;
+ }
+
+ if (gen >= 8) {
+ *cmd++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
+ *cmd++ = lower_32_bits(vma->node.start);
+ *cmd++ = upper_32_bits(vma->node.start);
+ } else if (gen >= 6) {
+ *cmd++ = MI_BATCH_BUFFER_START | 1 << 8;
+ *cmd++ = lower_32_bits(vma->node.start);
+ } else if (gen >= 4) {
+ *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+ *cmd++ = lower_32_bits(vma->node.start);
+ } else {
+ *cmd++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | 1;
+ *cmd++ = lower_32_bits(vma->node.start);
+ }
+ *cmd++ = MI_BATCH_BUFFER_END; /* terminate early in case of error */
+
+ wmb();
+ i915_gem_object_unpin_map(obj);
+
+ return vma;
+
+err:
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+}
+
+static int recursive_batch_resolve(struct i915_vma *batch)
+{
+ u32 *cmd;
+
+ cmd = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ if (IS_ERR(cmd))
+ return PTR_ERR(cmd);
+
+ *cmd = MI_BATCH_BUFFER_END;
+ wmb();
+
+ i915_gem_object_unpin_map(batch->obj);
+
+ return 0;
+}
+
+static int live_all_engines(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct drm_i915_gem_request *request[I915_NUM_ENGINES];
+ struct i915_vma *batch;
+ struct live_test t;
+ unsigned int id;
+ int err;
+
+ /* Check we can submit requests to all engines simultaneously. We
+ * send a recursive batch to each engine - checking that we don't
+ * block doing so, and that they don't complete too soon.
+ */
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ err = begin_live_test(&t, i915, __func__, "");
+ if (err)
+ goto out_unlock;
+
+ batch = recursive_batch(i915);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
+ goto out_unlock;
+ }
+
+ for_each_engine(engine, i915, id) {
+ request[id] = i915_gem_request_alloc(engine,
+ i915->kernel_context);
+ if (IS_ERR(request[id])) {
+ err = PTR_ERR(request[id]);
+ pr_err("%s: Request allocation failed with err=%d\n",
+ __func__, err);
+ goto out_request;
+ }
+
+ err = engine->emit_flush(request[id], EMIT_INVALIDATE);
+ GEM_BUG_ON(err);
+
+ err = i915_switch_context(request[id]);
+ GEM_BUG_ON(err);
+
+ err = engine->emit_bb_start(request[id],
+ batch->node.start,
+ batch->node.size,
+ 0);
+ GEM_BUG_ON(err);
+ request[id]->batch = batch;
+
+ if (!i915_gem_object_has_active_reference(batch->obj)) {
+ i915_gem_object_get(batch->obj);
+ i915_gem_object_set_active_reference(batch->obj);
+ }
+
+ i915_vma_move_to_active(batch, request[id], 0);
+ i915_gem_request_get(request[id]);
+ i915_add_request(request[id]);
+ }
+
+ for_each_engine(engine, i915, id) {
+ if (i915_gem_request_completed(request[id])) {
+ pr_err("%s(%s): request completed too early!\n",
+ __func__, engine->name);
+ err = -EINVAL;
+ goto out_request;
+ }
+ }
+
+ err = recursive_batch_resolve(batch);
+ if (err) {
+ pr_err("%s: failed to resolve batch, err=%d\n", __func__, err);
+ goto out_request;
+ }
+
+ for_each_engine(engine, i915, id) {
+ long timeout;
+
+ timeout = i915_wait_request(request[id],
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+ if (timeout < 0) {
+ err = timeout;
+ pr_err("%s: error waiting for request on %s, err=%d\n",
+ __func__, engine->name, err);
+ goto out_request;
+ }
+
+ GEM_BUG_ON(!i915_gem_request_completed(request[id]));
+ i915_gem_request_put(request[id]);
+ request[id] = NULL;
+ }
+
+ err = end_live_test(&t);
+
+out_request:
+ for_each_engine(engine, i915, id)
+ if (request[id])
+ i915_gem_request_put(request[id]);
+ i915_vma_unpin(batch);
+ i915_vma_put(batch);
+out_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+static int live_sequential_engines(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_request *request[I915_NUM_ENGINES] = {};
+ struct drm_i915_gem_request *prev = NULL;
+ struct intel_engine_cs *engine;
+ struct live_test t;
+ unsigned int id;
+ int err;
+
+ /* Check we can submit requests to all engines sequentially, such
+ * that each successive request waits for the earlier ones. This
+ * tests that we don't execute requests out of order, even though
+ * they are running on independent engines.
+ */
+
+ mutex_lock(&i915->drm.struct_mutex);
+
+ err = begin_live_test(&t, i915, __func__, "");
+ if (err)
+ goto out_unlock;
+
+ for_each_engine(engine, i915, id) {
+ struct i915_vma *batch;
+
+ batch = recursive_batch(i915);
+ if (IS_ERR(batch)) {
+ err = PTR_ERR(batch);
+ pr_err("%s: Unable to create batch for %s, err=%d\n",
+ __func__, engine->name, err);
+ goto out_unlock;
+ }
+
+ request[id] = i915_gem_request_alloc(engine,
+ i915->kernel_context);
+ if (IS_ERR(request[id])) {
+ err = PTR_ERR(request[id]);
+ pr_err("%s: Request allocation failed for %s with err=%d\n",
+ __func__, engine->name, err);
+ goto out_request;
+ }
+
+ if (prev) {
+ err = i915_gem_request_await_dma_fence(request[id],
+ &prev->fence);
+ if (err) {
+ i915_add_request(request[id]);
+ pr_err("%s: Request await failed for %s with err=%d\n",
+ __func__, engine->name, err);
+ goto out_request;
+ }
+ }
+
+ err = engine->emit_flush(request[id], EMIT_INVALIDATE);
+ GEM_BUG_ON(err);
+
+ err = i915_switch_context(request[id]);
+ GEM_BUG_ON(err);
+
+ err = engine->emit_bb_start(request[id],
+ batch->node.start,
+ batch->node.size,
+ 0);
+ GEM_BUG_ON(err);
+ request[id]->batch = batch;
+
+ i915_vma_move_to_active(batch, request[id], 0);
+ i915_gem_object_set_active_reference(batch->obj);
+ i915_vma_get(batch);
+
+ i915_gem_request_get(request[id]);
+ i915_add_request(request[id]);
+
+ prev = request[id];
+ }
+
+ for_each_engine(engine, i915, id) {
+ long timeout;
+
+ if (i915_gem_request_completed(request[id])) {
+ pr_err("%s(%s): request completed too early!\n",
+ __func__, engine->name);
+ err = -EINVAL;
+ goto out_request;
+ }
+
+ err = recursive_batch_resolve(request[id]->batch);
+ if (err) {
+ pr_err("%s: failed to resolve batch, err=%d\n",
+ __func__, err);
+ goto out_request;
+ }
+
+ timeout = i915_wait_request(request[id],
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+ if (timeout < 0) {
+ err = timeout;
+ pr_err("%s: error waiting for request on %s, err=%d\n",
+ __func__, engine->name, err);
+ goto out_request;
+ }
+
+ GEM_BUG_ON(!i915_gem_request_completed(request[id]));
+ }
+
+ err = end_live_test(&t);
+
+out_request:
+ for_each_engine(engine, i915, id) {
+ u32 *cmd;
+
+ if (!request[id])
+ break;
+
+ cmd = i915_gem_object_pin_map(request[id]->batch->obj,
+ I915_MAP_WC);
+ if (!IS_ERR(cmd)) {
+ *cmd = MI_BATCH_BUFFER_END;
+ wmb();
+ i915_gem_object_unpin_map(request[id]->batch->obj);
+ }
+
+ i915_vma_put(request[id]->batch);
+ i915_gem_request_put(request[id]);
+ }
+out_unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+int i915_gem_request_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_nop_request),
+ SUBTEST(live_all_engines),
+ SUBTEST(live_sequential_engines),
+ SUBTEST(live_empty_request),
+ };
+ return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
new file mode 100644
index 000000000000..18b174d855ca
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -0,0 +1,19 @@
+/* List each unit test as selftest(name, function)
+ *
+ * The name is used as both an enum and expanded as subtest__name to create
+ * a module parameter. It must be unique and legal for a C identifier.
+ *
+ * The function should be of type int function(void). It may be conditionally
+ * compiled using #if IS_ENABLED(DRM_I915_SELFTEST).
+ *
+ * Tests are executed in order by igt/drv_selftest
+ */
+selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */
+selftest(uncore, intel_uncore_live_selftests)
+selftest(requests, i915_gem_request_live_selftests)
+selftest(objects, i915_gem_object_live_selftests)
+selftest(dmabuf, i915_gem_dmabuf_live_selftests)
+selftest(coherency, i915_gem_coherency_live_selftests)
+selftest(gtt, i915_gem_gtt_live_selftests)
+selftest(contexts, i915_gem_context_live_selftests)
+selftest(hangcheck, intel_hangcheck_live_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
new file mode 100644
index 000000000000..be9a9ebf5692
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -0,0 +1,20 @@
+/* List each unit test as selftest(name, function)
+ *
+ * The name is used as both an enum and expanded as subtest__name to create
+ * a module parameter. It must be unique and legal for a C identifier.
+ *
+ * The function should be of type int function(void). It may be conditionally
+ * compiled using #if IS_ENABLED(DRM_I915_SELFTEST).
+ *
+ * Tests are executed in order by igt/drv_selftest
+ */
+selftest(sanitycheck, i915_mock_sanitycheck) /* keep first (igt selfcheck) */
+selftest(scatterlist, scatterlist_mock_selftests)
+selftest(uncore, intel_uncore_mock_selftests)
+selftest(breadcrumbs, intel_breadcrumbs_mock_selftests)
+selftest(requests, i915_gem_request_mock_selftests)
+selftest(objects, i915_gem_object_mock_selftests)
+selftest(dmabuf, i915_gem_dmabuf_mock_selftests)
+selftest(vma, i915_vma_mock_selftests)
+selftest(evict, i915_gem_evict_mock_selftests)
+selftest(gtt, i915_gem_gtt_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.c b/drivers/gpu/drm/i915/selftests/i915_random.c
new file mode 100644
index 000000000000..c17c83c30637
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_random.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/random.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include "i915_random.h"
+
+static inline u32 i915_prandom_u32_max_state(u32 ep_ro, struct rnd_state *state)
+{
+ return upper_32_bits((u64)prandom_u32_state(state) * ep_ro);
+}
+
+void i915_random_reorder(unsigned int *order, unsigned int count,
+ struct rnd_state *state)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < count; i++) {
+ BUILD_BUG_ON(sizeof(unsigned int) > sizeof(u32));
+ j = i915_prandom_u32_max_state(count, state);
+ swap(order[i], order[j]);
+ }
+}
+
+unsigned int *i915_random_order(unsigned int count, struct rnd_state *state)
+{
+ unsigned int *order, i;
+
+ order = kmalloc_array(count, sizeof(*order), GFP_TEMPORARY);
+ if (!order)
+ return order;
+
+ for (i = 0; i < count; i++)
+ order[i] = i;
+
+ i915_random_reorder(order, count, state);
+ return order;
+}
diff --git a/drivers/gpu/drm/i915/selftests/i915_random.h b/drivers/gpu/drm/i915/selftests/i915_random.h
new file mode 100644
index 000000000000..b9c334ce6cd9
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_random.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __I915_SELFTESTS_RANDOM_H__
+#define __I915_SELFTESTS_RANDOM_H__
+
+#include <linux/random.h>
+
+#include "../i915_selftest.h"
+
+#define I915_RND_STATE_INITIALIZER(x) ({ \
+ struct rnd_state state__; \
+ prandom_seed_state(&state__, (x)); \
+ state__; \
+})
+
+#define I915_RND_STATE(name__) \
+ struct rnd_state name__ = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed)
+
+#define I915_RND_SUBSTATE(name__, parent__) \
+ struct rnd_state name__ = I915_RND_STATE_INITIALIZER(prandom_u32_state(&(parent__)))
+
+unsigned int *i915_random_order(unsigned int count,
+ struct rnd_state *state);
+void i915_random_reorder(unsigned int *order,
+ unsigned int count,
+ struct rnd_state *state);
+
+#endif /* !__I915_SELFTESTS_RANDOM_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c
new file mode 100644
index 000000000000..addc5a599c4a
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c
@@ -0,0 +1,250 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/random.h>
+
+#include "../i915_drv.h"
+#include "../i915_selftest.h"
+
+struct i915_selftest i915_selftest __read_mostly = {
+ .timeout_ms = 1000,
+};
+
+int i915_mock_sanitycheck(void)
+{
+ pr_info(DRIVER_NAME ": %s() - ok!\n", __func__);
+ return 0;
+}
+
+int i915_live_sanitycheck(struct drm_i915_private *i915)
+{
+ pr_info("%s: %s() - ok!\n", i915->drm.driver->name, __func__);
+ return 0;
+}
+
+enum {
+#define selftest(name, func) mock_##name,
+#include "i915_mock_selftests.h"
+#undef selftest
+};
+
+enum {
+#define selftest(name, func) live_##name,
+#include "i915_live_selftests.h"
+#undef selftest
+};
+
+struct selftest {
+ bool enabled;
+ const char *name;
+ union {
+ int (*mock)(void);
+ int (*live)(struct drm_i915_private *);
+ };
+};
+
+#define selftest(n, f) [mock_##n] = { .name = #n, { .mock = f } },
+static struct selftest mock_selftests[] = {
+#include "i915_mock_selftests.h"
+};
+#undef selftest
+
+#define selftest(n, f) [live_##n] = { .name = #n, { .live = f } },
+static struct selftest live_selftests[] = {
+#include "i915_live_selftests.h"
+};
+#undef selftest
+
+/* Embed the line number into the parameter name so that we can order tests */
+#define selftest(n, func) selftest_0(n, func, param(n))
+#define param(n) __PASTE(igt__, __PASTE(__LINE__, __mock_##n))
+#define selftest_0(n, func, id) \
+module_param_named(id, mock_selftests[mock_##n].enabled, bool, 0400);
+#include "i915_mock_selftests.h"
+#undef selftest_0
+#undef param
+
+#define param(n) __PASTE(igt__, __PASTE(__LINE__, __live_##n))
+#define selftest_0(n, func, id) \
+module_param_named(id, live_selftests[live_##n].enabled, bool, 0400);
+#include "i915_live_selftests.h"
+#undef selftest_0
+#undef param
+#undef selftest
+
+static void set_default_test_all(struct selftest *st, unsigned int count)
+{
+ unsigned int i;
+
+ for (i = 0; i < count; i++)
+ if (st[i].enabled)
+ return;
+
+ for (i = 0; i < count; i++)
+ st[i].enabled = true;
+}
+
+static int __run_selftests(const char *name,
+ struct selftest *st,
+ unsigned int count,
+ void *data)
+{
+ int err = 0;
+
+ while (!i915_selftest.random_seed)
+ i915_selftest.random_seed = get_random_int();
+
+ i915_selftest.timeout_jiffies =
+ i915_selftest.timeout_ms ?
+ msecs_to_jiffies_timeout(i915_selftest.timeout_ms) :
+ MAX_SCHEDULE_TIMEOUT;
+
+ set_default_test_all(st, count);
+
+ pr_info(DRIVER_NAME ": Performing %s selftests with st_random_seed=0x%x st_timeout=%u\n",
+ name, i915_selftest.random_seed, i915_selftest.timeout_ms);
+
+ /* Tests are listed in order in i915_*_selftests.h */
+ for (; count--; st++) {
+ if (!st->enabled)
+ continue;
+
+ cond_resched();
+ if (signal_pending(current))
+ return -EINTR;
+
+ pr_debug(DRIVER_NAME ": Running %s\n", st->name);
+ if (data)
+ err = st->live(data);
+ else
+ err = st->mock();
+ if (err == -EINTR && !signal_pending(current))
+ err = 0;
+ if (err)
+ break;
+ }
+
+ if (WARN(err > 0 || err == -ENOTTY,
+ "%s returned %d, conflicting with selftest's magic values!\n",
+ st->name, err))
+ err = -1;
+
+ return err;
+}
+
+#define run_selftests(x, data) \
+ __run_selftests(#x, x##_selftests, ARRAY_SIZE(x##_selftests), data)
+
+int i915_mock_selftests(void)
+{
+ int err;
+
+ if (!i915_selftest.mock)
+ return 0;
+
+ err = run_selftests(mock, NULL);
+ if (err) {
+ i915_selftest.mock = err;
+ return err;
+ }
+
+ if (i915_selftest.mock < 0) {
+ i915_selftest.mock = -ENOTTY;
+ return 1;
+ }
+
+ return 0;
+}
+
+int i915_live_selftests(struct pci_dev *pdev)
+{
+ int err;
+
+ if (!i915_selftest.live)
+ return 0;
+
+ err = run_selftests(live, to_i915(pci_get_drvdata(pdev)));
+ if (err) {
+ i915_selftest.live = err;
+ return err;
+ }
+
+ if (i915_selftest.live < 0) {
+ i915_selftest.live = -ENOTTY;
+ return 1;
+ }
+
+ return 0;
+}
+
+int __i915_subtests(const char *caller,
+ const struct i915_subtest *st,
+ unsigned int count,
+ void *data)
+{
+ int err;
+
+ for (; count--; st++) {
+ cond_resched();
+ if (signal_pending(current))
+ return -EINTR;
+
+ pr_debug(DRIVER_NAME ": Running %s/%s\n", caller, st->name);
+ err = st->func(data);
+ if (err && err != -EINTR) {
+ pr_err(DRIVER_NAME "/%s: %s failed with error %d\n",
+ caller, st->name, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+bool __igt_timeout(unsigned long timeout, const char *fmt, ...)
+{
+ va_list va;
+
+ if (!signal_pending(current)) {
+ cond_resched();
+ if (time_before(jiffies, timeout))
+ return false;
+ }
+
+ if (fmt) {
+ va_start(va, fmt);
+ vprintk(fmt, va);
+ va_end(va);
+ }
+
+ return true;
+}
+
+module_param_named(st_random_seed, i915_selftest.random_seed, uint, 0400);
+module_param_named(st_timeout, i915_selftest.timeout_ms, uint, 0400);
+
+module_param_named_unsafe(mock_selftests, i915_selftest.mock, int, 0400);
+MODULE_PARM_DESC(mock_selftests, "Run selftests before loading, using mock hardware (0:disabled [default], 1:run tests then load driver, -1:run tests then exit module)");
+
+module_param_named_unsafe(live_selftests, i915_selftest.live, int, 0400);
+MODULE_PARM_DESC(live_selftests, "Run selftests after driver initialisation on the live system (0:disabled [default], 1:run tests then continue, -1:run tests then exit module)");
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
new file mode 100644
index 000000000000..ad56566e24db
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -0,0 +1,746 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/prime_numbers.h>
+
+#include "../i915_selftest.h"
+
+#include "mock_gem_device.h"
+#include "mock_context.h"
+
+static bool assert_vma(struct i915_vma *vma,
+ struct drm_i915_gem_object *obj,
+ struct i915_gem_context *ctx)
+{
+ bool ok = true;
+
+ if (vma->vm != &ctx->ppgtt->base) {
+ pr_err("VMA created with wrong VM\n");
+ ok = false;
+ }
+
+ if (vma->size != obj->base.size) {
+ pr_err("VMA created with wrong size, found %llu, expected %zu\n",
+ vma->size, obj->base.size);
+ ok = false;
+ }
+
+ if (vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) {
+ pr_err("VMA created with wrong type [%d]\n",
+ vma->ggtt_view.type);
+ ok = false;
+ }
+
+ return ok;
+}
+
+static struct i915_vma *
+checked_vma_instance(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ struct i915_ggtt_view *view)
+{
+ struct i915_vma *vma;
+ bool ok = true;
+
+ vma = i915_vma_instance(obj, vm, view);
+ if (IS_ERR(vma))
+ return vma;
+
+ /* Manual checks, will be reinforced by i915_vma_compare! */
+ if (vma->vm != vm) {
+ pr_err("VMA's vm [%p] does not match request [%p]\n",
+ vma->vm, vm);
+ ok = false;
+ }
+
+ if (i915_is_ggtt(vm) != i915_vma_is_ggtt(vma)) {
+ pr_err("VMA ggtt status [%d] does not match parent [%d]\n",
+ i915_vma_is_ggtt(vma), i915_is_ggtt(vm));
+ ok = false;
+ }
+
+ if (i915_vma_compare(vma, vm, view)) {
+ pr_err("i915_vma_compare failed with create parmaters!\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (i915_vma_compare(vma, vma->vm,
+ i915_vma_is_ggtt(vma) ? &vma->ggtt_view : NULL)) {
+ pr_err("i915_vma_compare failed with itself\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!ok) {
+ pr_err("i915_vma_compare failed to detect the difference!\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ return vma;
+}
+
+static int create_vmas(struct drm_i915_private *i915,
+ struct list_head *objects,
+ struct list_head *contexts)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_gem_context *ctx;
+ int pinned;
+
+ list_for_each_entry(obj, objects, st_link) {
+ for (pinned = 0; pinned <= 1; pinned++) {
+ list_for_each_entry(ctx, contexts, link) {
+ struct i915_address_space *vm =
+ &ctx->ppgtt->base;
+ struct i915_vma *vma;
+ int err;
+
+ vma = checked_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ if (!assert_vma(vma, obj, ctx)) {
+ pr_err("VMA lookup/create failed\n");
+ return -EINVAL;
+ }
+
+ if (!pinned) {
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err) {
+ pr_err("Failed to pin VMA\n");
+ return err;
+ }
+ } else {
+ i915_vma_unpin(vma);
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int igt_vma_create(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_object *obj, *on;
+ struct i915_gem_context *ctx, *cn;
+ unsigned long num_obj, num_ctx;
+ unsigned long no, nc;
+ IGT_TIMEOUT(end_time);
+ LIST_HEAD(contexts);
+ LIST_HEAD(objects);
+ int err;
+
+ /* Exercise creating many vma amonst many objections, checking the
+ * vma creation and lookup routines.
+ */
+
+ no = 0;
+ for_each_prime_number(num_obj, ULONG_MAX - 1) {
+ for (; no < num_obj; no++) {
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ goto out;
+
+ list_add(&obj->st_link, &objects);
+ }
+
+ nc = 0;
+ for_each_prime_number(num_ctx, MAX_CONTEXT_HW_ID) {
+ for (; nc < num_ctx; nc++) {
+ ctx = mock_context(i915, "mock");
+ if (!ctx)
+ goto out;
+
+ list_move(&ctx->link, &contexts);
+ }
+
+ err = create_vmas(i915, &objects, &contexts);
+ if (err)
+ goto out;
+
+ if (igt_timeout(end_time,
+ "%s timed out: after %lu objects in %lu contexts\n",
+ __func__, no, nc))
+ goto end;
+ }
+
+ list_for_each_entry_safe(ctx, cn, &contexts, link)
+ mock_context_close(ctx);
+ }
+
+end:
+ /* Final pass to lookup all created contexts */
+ err = create_vmas(i915, &objects, &contexts);
+out:
+ list_for_each_entry_safe(ctx, cn, &contexts, link)
+ mock_context_close(ctx);
+
+ list_for_each_entry_safe(obj, on, &objects, st_link)
+ i915_gem_object_put(obj);
+ return err;
+}
+
+struct pin_mode {
+ u64 size;
+ u64 flags;
+ bool (*assert)(const struct i915_vma *,
+ const struct pin_mode *mode,
+ int result);
+ const char *string;
+};
+
+static bool assert_pin_valid(const struct i915_vma *vma,
+ const struct pin_mode *mode,
+ int result)
+{
+ if (result)
+ return false;
+
+ if (i915_vma_misplaced(vma, mode->size, 0, mode->flags))
+ return false;
+
+ return true;
+}
+
+__maybe_unused
+static bool assert_pin_e2big(const struct i915_vma *vma,
+ const struct pin_mode *mode,
+ int result)
+{
+ return result == -E2BIG;
+}
+
+__maybe_unused
+static bool assert_pin_enospc(const struct i915_vma *vma,
+ const struct pin_mode *mode,
+ int result)
+{
+ return result == -ENOSPC;
+}
+
+__maybe_unused
+static bool assert_pin_einval(const struct i915_vma *vma,
+ const struct pin_mode *mode,
+ int result)
+{
+ return result == -EINVAL;
+}
+
+static int igt_vma_pin1(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ const struct pin_mode modes[] = {
+#define VALID(sz, fl) { .size = (sz), .flags = (fl), .assert = assert_pin_valid, .string = #sz ", " #fl ", (valid) " }
+#define __INVALID(sz, fl, check, eval) { .size = (sz), .flags = (fl), .assert = (check), .string = #sz ", " #fl ", (invalid " #eval ")" }
+#define INVALID(sz, fl) __INVALID(sz, fl, assert_pin_einval, EINVAL)
+#define TOOBIG(sz, fl) __INVALID(sz, fl, assert_pin_e2big, E2BIG)
+#define NOSPACE(sz, fl) __INVALID(sz, fl, assert_pin_enospc, ENOSPC)
+ VALID(0, PIN_GLOBAL),
+ VALID(0, PIN_GLOBAL | PIN_MAPPABLE),
+
+ VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 4096),
+ VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 8192),
+ VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
+ VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
+ VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.base.total - 4096)),
+
+ VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
+ INVALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | i915->ggtt.mappable_end),
+ VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)),
+ INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.base.total),
+ INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | round_down(U64_MAX, PAGE_SIZE)),
+
+ VALID(4096, PIN_GLOBAL),
+ VALID(8192, PIN_GLOBAL),
+ VALID(i915->ggtt.mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE),
+ VALID(i915->ggtt.mappable_end, PIN_GLOBAL | PIN_MAPPABLE),
+ TOOBIG(i915->ggtt.mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE),
+ VALID(i915->ggtt.base.total - 4096, PIN_GLOBAL),
+ VALID(i915->ggtt.base.total, PIN_GLOBAL),
+ TOOBIG(i915->ggtt.base.total + 4096, PIN_GLOBAL),
+ TOOBIG(round_down(U64_MAX, PAGE_SIZE), PIN_GLOBAL),
+ INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
+ INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)),
+ INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (round_down(U64_MAX, PAGE_SIZE) - 4096)),
+
+ VALID(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
+
+#if !IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
+ /* Misusing BIAS is a programming error (it is not controllable
+ * from userspace) so when debugging is enabled, it explodes.
+ * However, the tests are still quite interesting for checking
+ * variable start, end and size.
+ */
+ NOSPACE(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | i915->ggtt.mappable_end),
+ NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.base.total),
+ NOSPACE(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
+ NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.base.total - 4096)),
+#endif
+ { },
+#undef NOSPACE
+#undef TOOBIG
+#undef INVALID
+#undef __INVALID
+#undef VALID
+ }, *m;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err = -EINVAL;
+
+ /* Exercise all the weird and wonderful i915_vma_pin requests,
+ * focusing on error handling of boundary conditions.
+ */
+
+ GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.base.mm));
+
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = checked_vma_instance(obj, &i915->ggtt.base, NULL);
+ if (IS_ERR(vma))
+ goto out;
+
+ for (m = modes; m->assert; m++) {
+ err = i915_vma_pin(vma, m->size, 0, m->flags);
+ if (!m->assert(vma, m, err)) {
+ pr_err("%s to pin single page into GGTT with mode[%d:%s]: size=%llx flags=%llx, err=%d\n",
+ m->assert == assert_pin_valid ? "Failed" : "Unexpectedly succeeded",
+ (int)(m - modes), m->string, m->size, m->flags,
+ err);
+ if (!err)
+ i915_vma_unpin(vma);
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (!err) {
+ i915_vma_unpin(vma);
+ err = i915_vma_unbind(vma);
+ if (err) {
+ pr_err("Failed to unbind single page from GGTT, err=%d\n", err);
+ goto out;
+ }
+ }
+ }
+
+ err = 0;
+out:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static unsigned long rotated_index(const struct intel_rotation_info *r,
+ unsigned int n,
+ unsigned int x,
+ unsigned int y)
+{
+ return (r->plane[n].stride * (r->plane[n].height - y - 1) +
+ r->plane[n].offset + x);
+}
+
+static struct scatterlist *
+assert_rotated(struct drm_i915_gem_object *obj,
+ const struct intel_rotation_info *r, unsigned int n,
+ struct scatterlist *sg)
+{
+ unsigned int x, y;
+
+ for (x = 0; x < r->plane[n].width; x++) {
+ for (y = 0; y < r->plane[n].height; y++) {
+ unsigned long src_idx;
+ dma_addr_t src;
+
+ if (!sg) {
+ pr_err("Invalid sg table: too short at plane %d, (%d, %d)!\n",
+ n, x, y);
+ return ERR_PTR(-EINVAL);
+ }
+
+ src_idx = rotated_index(r, n, x, y);
+ src = i915_gem_object_get_dma_address(obj, src_idx);
+
+ if (sg_dma_len(sg) != PAGE_SIZE) {
+ pr_err("Invalid sg.length, found %d, expected %lu for rotated page (%d, %d) [src index %lu]\n",
+ sg_dma_len(sg), PAGE_SIZE,
+ x, y, src_idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (sg_dma_address(sg) != src) {
+ pr_err("Invalid address for rotated page (%d, %d) [src index %lu]\n",
+ x, y, src_idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ sg = sg_next(sg);
+ }
+ }
+
+ return sg;
+}
+
+static unsigned int rotated_size(const struct intel_rotation_plane_info *a,
+ const struct intel_rotation_plane_info *b)
+{
+ return a->width * a->height + b->width * b->height;
+}
+
+static int igt_vma_rotate(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_address_space *vm = &i915->ggtt.base;
+ struct drm_i915_gem_object *obj;
+ const struct intel_rotation_plane_info planes[] = {
+ { .width = 1, .height = 1, .stride = 1 },
+ { .width = 2, .height = 2, .stride = 2 },
+ { .width = 4, .height = 4, .stride = 4 },
+ { .width = 8, .height = 8, .stride = 8 },
+
+ { .width = 3, .height = 5, .stride = 3 },
+ { .width = 3, .height = 5, .stride = 4 },
+ { .width = 3, .height = 5, .stride = 5 },
+
+ { .width = 5, .height = 3, .stride = 5 },
+ { .width = 5, .height = 3, .stride = 7 },
+ { .width = 5, .height = 3, .stride = 9 },
+
+ { .width = 4, .height = 6, .stride = 6 },
+ { .width = 6, .height = 4, .stride = 6 },
+ { }
+ }, *a, *b;
+ const unsigned int max_pages = 64;
+ int err = -ENOMEM;
+
+ /* Create VMA for many different combinations of planes and check
+ * that the page layout within the rotated VMA match our expectations.
+ */
+
+ obj = i915_gem_object_create_internal(i915, max_pages * PAGE_SIZE);
+ if (IS_ERR(obj))
+ goto out;
+
+ for (a = planes; a->width; a++) {
+ for (b = planes + ARRAY_SIZE(planes); b-- != planes; ) {
+ struct i915_ggtt_view view;
+ unsigned int n, max_offset;
+
+ max_offset = max(a->stride * a->height,
+ b->stride * b->height);
+ GEM_BUG_ON(max_offset > max_pages);
+ max_offset = max_pages - max_offset;
+
+ view.type = I915_GGTT_VIEW_ROTATED;
+ view.rotated.plane[0] = *a;
+ view.rotated.plane[1] = *b;
+
+ for_each_prime_number_from(view.rotated.plane[0].offset, 0, max_offset) {
+ for_each_prime_number_from(view.rotated.plane[1].offset, 0, max_offset) {
+ struct scatterlist *sg;
+ struct i915_vma *vma;
+
+ vma = checked_vma_instance(obj, vm, &view);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_object;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err) {
+ pr_err("Failed to pin VMA, err=%d\n", err);
+ goto out_object;
+ }
+
+ if (vma->size != rotated_size(a, b) * PAGE_SIZE) {
+ pr_err("VMA is wrong size, expected %lu, found %llu\n",
+ PAGE_SIZE * rotated_size(a, b), vma->size);
+ err = -EINVAL;
+ goto out_object;
+ }
+
+ if (vma->pages->nents != rotated_size(a, b)) {
+ pr_err("sg table is wrong sizeo, expected %u, found %u nents\n",
+ rotated_size(a, b), vma->pages->nents);
+ err = -EINVAL;
+ goto out_object;
+ }
+
+ if (vma->node.size < vma->size) {
+ pr_err("VMA binding too small, expected %llu, found %llu\n",
+ vma->size, vma->node.size);
+ err = -EINVAL;
+ goto out_object;
+ }
+
+ if (vma->pages == obj->mm.pages) {
+ pr_err("VMA using unrotated object pages!\n");
+ err = -EINVAL;
+ goto out_object;
+ }
+
+ sg = vma->pages->sgl;
+ for (n = 0; n < ARRAY_SIZE(view.rotated.plane); n++) {
+ sg = assert_rotated(obj, &view.rotated, n, sg);
+ if (IS_ERR(sg)) {
+ pr_err("Inconsistent VMA pages for plane %d: [(%d, %d, %d, %d), (%d, %d, %d, %d)]\n", n,
+ view.rotated.plane[0].width,
+ view.rotated.plane[0].height,
+ view.rotated.plane[0].stride,
+ view.rotated.plane[0].offset,
+ view.rotated.plane[1].width,
+ view.rotated.plane[1].height,
+ view.rotated.plane[1].stride,
+ view.rotated.plane[1].offset);
+ err = -EINVAL;
+ goto out_object;
+ }
+ }
+
+ i915_vma_unpin(vma);
+ }
+ }
+ }
+ }
+
+out_object:
+ i915_gem_object_put(obj);
+out:
+ return err;
+}
+
+static bool assert_partial(struct drm_i915_gem_object *obj,
+ struct i915_vma *vma,
+ unsigned long offset,
+ unsigned long size)
+{
+ struct sgt_iter sgt;
+ dma_addr_t dma;
+
+ for_each_sgt_dma(dma, sgt, vma->pages) {
+ dma_addr_t src;
+
+ if (!size) {
+ pr_err("Partial scattergather list too long\n");
+ return false;
+ }
+
+ src = i915_gem_object_get_dma_address(obj, offset);
+ if (src != dma) {
+ pr_err("DMA mismatch for partial page offset %lu\n",
+ offset);
+ return false;
+ }
+
+ offset++;
+ size--;
+ }
+
+ return true;
+}
+
+static bool assert_pin(struct i915_vma *vma,
+ struct i915_ggtt_view *view,
+ u64 size,
+ const char *name)
+{
+ bool ok = true;
+
+ if (vma->size != size) {
+ pr_err("(%s) VMA is wrong size, expected %llu, found %llu\n",
+ name, size, vma->size);
+ ok = false;
+ }
+
+ if (vma->node.size < vma->size) {
+ pr_err("(%s) VMA binding too small, expected %llu, found %llu\n",
+ name, vma->size, vma->node.size);
+ ok = false;
+ }
+
+ if (view && view->type != I915_GGTT_VIEW_NORMAL) {
+ if (memcmp(&vma->ggtt_view, view, sizeof(*view))) {
+ pr_err("(%s) VMA mismatch upon creation!\n",
+ name);
+ ok = false;
+ }
+
+ if (vma->pages == vma->obj->mm.pages) {
+ pr_err("(%s) VMA using original object pages!\n",
+ name);
+ ok = false;
+ }
+ } else {
+ if (vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) {
+ pr_err("Not the normal ggtt view! Found %d\n",
+ vma->ggtt_view.type);
+ ok = false;
+ }
+
+ if (vma->pages != vma->obj->mm.pages) {
+ pr_err("VMA not using object pages!\n");
+ ok = false;
+ }
+ }
+
+ return ok;
+}
+
+static int igt_vma_partial(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct i915_address_space *vm = &i915->ggtt.base;
+ const unsigned int npages = 1021; /* prime! */
+ struct drm_i915_gem_object *obj;
+ const struct phase {
+ const char *name;
+ } phases[] = {
+ { "create" },
+ { "lookup" },
+ { },
+ }, *p;
+ unsigned int sz, offset;
+ struct i915_vma *vma;
+ int err = -ENOMEM;
+
+ /* Create lots of different VMA for the object and check that
+ * we are returned the same VMA when we later request the same range.
+ */
+
+ obj = i915_gem_object_create_internal(i915, npages*PAGE_SIZE);
+ if (IS_ERR(obj))
+ goto out;
+
+ for (p = phases; p->name; p++) { /* exercise both create/lookup */
+ unsigned int count, nvma;
+
+ nvma = 0;
+ for_each_prime_number_from(sz, 1, npages) {
+ for_each_prime_number_from(offset, 0, npages - sz) {
+ struct i915_ggtt_view view;
+
+ view.type = I915_GGTT_VIEW_PARTIAL;
+ view.partial.offset = offset;
+ view.partial.size = sz;
+
+ if (sz == npages)
+ view.type = I915_GGTT_VIEW_NORMAL;
+
+ vma = checked_vma_instance(obj, vm, &view);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_object;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto out_object;
+
+ if (!assert_pin(vma, &view, sz*PAGE_SIZE, p->name)) {
+ pr_err("(%s) Inconsistent partial pinning for (offset=%d, size=%d)\n",
+ p->name, offset, sz);
+ err = -EINVAL;
+ goto out_object;
+ }
+
+ if (!assert_partial(obj, vma, offset, sz)) {
+ pr_err("(%s) Inconsistent partial pages for (offset=%d, size=%d)\n",
+ p->name, offset, sz);
+ err = -EINVAL;
+ goto out_object;
+ }
+
+ i915_vma_unpin(vma);
+ nvma++;
+ }
+ }
+
+ count = 0;
+ list_for_each_entry(vma, &obj->vma_list, obj_link)
+ count++;
+ if (count != nvma) {
+ pr_err("(%s) All partial vma were not recorded on the obj->vma_list: found %u, expected %u\n",
+ p->name, count, nvma);
+ err = -EINVAL;
+ goto out_object;
+ }
+
+ /* Check that we did create the whole object mapping */
+ vma = checked_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_object;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto out_object;
+
+ if (!assert_pin(vma, NULL, obj->base.size, p->name)) {
+ pr_err("(%s) inconsistent full pin\n", p->name);
+ err = -EINVAL;
+ goto out_object;
+ }
+
+ i915_vma_unpin(vma);
+
+ count = 0;
+ list_for_each_entry(vma, &obj->vma_list, obj_link)
+ count++;
+ if (count != nvma) {
+ pr_err("(%s) allocated an extra full vma!\n", p->name);
+ err = -EINVAL;
+ goto out_object;
+ }
+ }
+
+out_object:
+ i915_gem_object_put(obj);
+out:
+ return err;
+}
+
+int i915_vma_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_vma_create),
+ SUBTEST(igt_vma_pin1),
+ SUBTEST(igt_vma_rotate),
+ SUBTEST(igt_vma_partial),
+ };
+ struct drm_i915_private *i915;
+ int err;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = i915_subtests(tests, i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ drm_dev_unref(&i915->drm);
+ return err;
+}
+
diff --git a/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
new file mode 100644
index 000000000000..19860a372d90
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/intel_breadcrumbs.c
@@ -0,0 +1,481 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "../i915_selftest.h"
+#include "i915_random.h"
+
+#include "mock_gem_device.h"
+#include "mock_engine.h"
+
+static int check_rbtree(struct intel_engine_cs *engine,
+ const unsigned long *bitmap,
+ const struct intel_wait *waiters,
+ const int count)
+{
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+ struct rb_node *rb;
+ int n;
+
+ if (&b->irq_wait->node != rb_first(&b->waiters)) {
+ pr_err("First waiter does not match first element of wait-tree\n");
+ return -EINVAL;
+ }
+
+ n = find_first_bit(bitmap, count);
+ for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
+ struct intel_wait *w = container_of(rb, typeof(*w), node);
+ int idx = w - waiters;
+
+ if (!test_bit(idx, bitmap)) {
+ pr_err("waiter[%d, seqno=%d] removed but still in wait-tree\n",
+ idx, w->seqno);
+ return -EINVAL;
+ }
+
+ if (n != idx) {
+ pr_err("waiter[%d, seqno=%d] does not match expected next element in tree [%d]\n",
+ idx, w->seqno, n);
+ return -EINVAL;
+ }
+
+ n = find_next_bit(bitmap, count, n + 1);
+ }
+
+ return 0;
+}
+
+static int check_completion(struct intel_engine_cs *engine,
+ const unsigned long *bitmap,
+ const struct intel_wait *waiters,
+ const int count)
+{
+ int n;
+
+ for (n = 0; n < count; n++) {
+ if (intel_wait_complete(&waiters[n]) != !!test_bit(n, bitmap))
+ continue;
+
+ pr_err("waiter[%d, seqno=%d] is %s, but expected %s\n",
+ n, waiters[n].seqno,
+ intel_wait_complete(&waiters[n]) ? "complete" : "active",
+ test_bit(n, bitmap) ? "active" : "complete");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int check_rbtree_empty(struct intel_engine_cs *engine)
+{
+ struct intel_breadcrumbs *b = &engine->breadcrumbs;
+
+ if (b->irq_wait) {
+ pr_err("Empty breadcrumbs still has a waiter\n");
+ return -EINVAL;
+ }
+
+ if (!RB_EMPTY_ROOT(&b->waiters)) {
+ pr_err("Empty breadcrumbs, but wait-tree not empty\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int igt_random_insert_remove(void *arg)
+{
+ const u32 seqno_bias = 0x1000;
+ I915_RND_STATE(prng);
+ struct intel_engine_cs *engine = arg;
+ struct intel_wait *waiters;
+ const int count = 4096;
+ unsigned int *order;
+ unsigned long *bitmap;
+ int err = -ENOMEM;
+ int n;
+
+ mock_engine_reset(engine);
+
+ waiters = drm_malloc_gfp(count, sizeof(*waiters), GFP_TEMPORARY);
+ if (!waiters)
+ goto out_engines;
+
+ bitmap = kcalloc(DIV_ROUND_UP(count, BITS_PER_LONG), sizeof(*bitmap),
+ GFP_TEMPORARY);
+ if (!bitmap)
+ goto out_waiters;
+
+ order = i915_random_order(count, &prng);
+ if (!order)
+ goto out_bitmap;
+
+ for (n = 0; n < count; n++)
+ intel_wait_init_for_seqno(&waiters[n], seqno_bias + n);
+
+ err = check_rbtree(engine, bitmap, waiters, count);
+ if (err)
+ goto out_order;
+
+ /* Add and remove waiters into the rbtree in random order. At each
+ * step, we verify that the rbtree is correctly ordered.
+ */
+ for (n = 0; n < count; n++) {
+ int i = order[n];
+
+ intel_engine_add_wait(engine, &waiters[i]);
+ __set_bit(i, bitmap);
+
+ err = check_rbtree(engine, bitmap, waiters, count);
+ if (err)
+ goto out_order;
+ }
+
+ i915_random_reorder(order, count, &prng);
+ for (n = 0; n < count; n++) {
+ int i = order[n];
+
+ intel_engine_remove_wait(engine, &waiters[i]);
+ __clear_bit(i, bitmap);
+
+ err = check_rbtree(engine, bitmap, waiters, count);
+ if (err)
+ goto out_order;
+ }
+
+ err = check_rbtree_empty(engine);
+out_order:
+ kfree(order);
+out_bitmap:
+ kfree(bitmap);
+out_waiters:
+ drm_free_large(waiters);
+out_engines:
+ mock_engine_flush(engine);
+ return err;
+}
+
+static int igt_insert_complete(void *arg)
+{
+ const u32 seqno_bias = 0x1000;
+ struct intel_engine_cs *engine = arg;
+ struct intel_wait *waiters;
+ const int count = 4096;
+ unsigned long *bitmap;
+ int err = -ENOMEM;
+ int n, m;
+
+ mock_engine_reset(engine);
+
+ waiters = drm_malloc_gfp(count, sizeof(*waiters), GFP_TEMPORARY);
+ if (!waiters)
+ goto out_engines;
+
+ bitmap = kcalloc(DIV_ROUND_UP(count, BITS_PER_LONG), sizeof(*bitmap),
+ GFP_TEMPORARY);
+ if (!bitmap)
+ goto out_waiters;
+
+ for (n = 0; n < count; n++) {
+ intel_wait_init_for_seqno(&waiters[n], n + seqno_bias);
+ intel_engine_add_wait(engine, &waiters[n]);
+ __set_bit(n, bitmap);
+ }
+ err = check_rbtree(engine, bitmap, waiters, count);
+ if (err)
+ goto out_bitmap;
+
+ /* On each step, we advance the seqno so that several waiters are then
+ * complete (we increase the seqno by increasingly larger values to
+ * retire more and more waiters at once). All retired waiters should
+ * be woken and removed from the rbtree, and so that we check.
+ */
+ for (n = 0; n < count; n = m) {
+ int seqno = 2 * n;
+
+ GEM_BUG_ON(find_first_bit(bitmap, count) != n);
+
+ if (intel_wait_complete(&waiters[n])) {
+ pr_err("waiter[%d, seqno=%d] completed too early\n",
+ n, waiters[n].seqno);
+ err = -EINVAL;
+ goto out_bitmap;
+ }
+
+ /* complete the following waiters */
+ mock_seqno_advance(engine, seqno + seqno_bias);
+ for (m = n; m <= seqno; m++) {
+ if (m == count)
+ break;
+
+ GEM_BUG_ON(!test_bit(m, bitmap));
+ __clear_bit(m, bitmap);
+ }
+
+ intel_engine_remove_wait(engine, &waiters[n]);
+ RB_CLEAR_NODE(&waiters[n].node);
+
+ err = check_rbtree(engine, bitmap, waiters, count);
+ if (err) {
+ pr_err("rbtree corrupt after seqno advance to %d\n",
+ seqno + seqno_bias);
+ goto out_bitmap;
+ }
+
+ err = check_completion(engine, bitmap, waiters, count);
+ if (err) {
+ pr_err("completions after seqno advance to %d failed\n",
+ seqno + seqno_bias);
+ goto out_bitmap;
+ }
+ }
+
+ err = check_rbtree_empty(engine);
+out_bitmap:
+ kfree(bitmap);
+out_waiters:
+ drm_free_large(waiters);
+out_engines:
+ mock_engine_flush(engine);
+ return err;
+}
+
+struct igt_wakeup {
+ struct task_struct *tsk;
+ atomic_t *ready, *set, *done;
+ struct intel_engine_cs *engine;
+ unsigned long flags;
+#define STOP 0
+#define IDLE 1
+ wait_queue_head_t *wq;
+ u32 seqno;
+};
+
+static int wait_atomic(atomic_t *p)
+{
+ schedule();
+ return 0;
+}
+
+static int wait_atomic_timeout(atomic_t *p)
+{
+ return schedule_timeout(10 * HZ) ? 0 : -ETIMEDOUT;
+}
+
+static bool wait_for_ready(struct igt_wakeup *w)
+{
+ DEFINE_WAIT(ready);
+
+ set_bit(IDLE, &w->flags);
+ if (atomic_dec_and_test(w->done))
+ wake_up_atomic_t(w->done);
+
+ if (test_bit(STOP, &w->flags))
+ goto out;
+
+ for (;;) {
+ prepare_to_wait(w->wq, &ready, TASK_INTERRUPTIBLE);
+ if (atomic_read(w->ready) == 0)
+ break;
+
+ schedule();
+ }
+ finish_wait(w->wq, &ready);
+
+out:
+ clear_bit(IDLE, &w->flags);
+ if (atomic_dec_and_test(w->set))
+ wake_up_atomic_t(w->set);
+
+ return !test_bit(STOP, &w->flags);
+}
+
+static int igt_wakeup_thread(void *arg)
+{
+ struct igt_wakeup *w = arg;
+ struct intel_wait wait;
+
+ while (wait_for_ready(w)) {
+ GEM_BUG_ON(kthread_should_stop());
+
+ intel_wait_init_for_seqno(&wait, w->seqno);
+ intel_engine_add_wait(w->engine, &wait);
+ for (;;) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (i915_seqno_passed(intel_engine_get_seqno(w->engine),
+ w->seqno))
+ break;
+
+ if (test_bit(STOP, &w->flags)) /* emergency escape */
+ break;
+
+ schedule();
+ }
+ intel_engine_remove_wait(w->engine, &wait);
+ __set_current_state(TASK_RUNNING);
+ }
+
+ return 0;
+}
+
+static void igt_wake_all_sync(atomic_t *ready,
+ atomic_t *set,
+ atomic_t *done,
+ wait_queue_head_t *wq,
+ int count)
+{
+ atomic_set(set, count);
+ atomic_set(ready, 0);
+ wake_up_all(wq);
+
+ wait_on_atomic_t(set, wait_atomic, TASK_UNINTERRUPTIBLE);
+ atomic_set(ready, count);
+ atomic_set(done, count);
+}
+
+static int igt_wakeup(void *arg)
+{
+ I915_RND_STATE(prng);
+ const int state = TASK_UNINTERRUPTIBLE;
+ struct intel_engine_cs *engine = arg;
+ struct igt_wakeup *waiters;
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
+ const int count = 4096;
+ const u32 max_seqno = count / 4;
+ atomic_t ready, set, done;
+ int err = -ENOMEM;
+ int n, step;
+
+ mock_engine_reset(engine);
+
+ waiters = drm_malloc_gfp(count, sizeof(*waiters), GFP_TEMPORARY);
+ if (!waiters)
+ goto out_engines;
+
+ /* Create a large number of threads, each waiting on a random seqno.
+ * Multiple waiters will be waiting for the same seqno.
+ */
+ atomic_set(&ready, count);
+ for (n = 0; n < count; n++) {
+ waiters[n].wq = &wq;
+ waiters[n].ready = &ready;
+ waiters[n].set = &set;
+ waiters[n].done = &done;
+ waiters[n].engine = engine;
+ waiters[n].flags = BIT(IDLE);
+
+ waiters[n].tsk = kthread_run(igt_wakeup_thread, &waiters[n],
+ "i915/igt:%d", n);
+ if (IS_ERR(waiters[n].tsk))
+ goto out_waiters;
+
+ get_task_struct(waiters[n].tsk);
+ }
+
+ for (step = 1; step <= max_seqno; step <<= 1) {
+ u32 seqno;
+
+ /* The waiter threads start paused as we assign them a random
+ * seqno and reset the engine. Once the engine is reset,
+ * we signal that the threads may begin their wait upon their
+ * seqno.
+ */
+ for (n = 0; n < count; n++) {
+ GEM_BUG_ON(!test_bit(IDLE, &waiters[n].flags));
+ waiters[n].seqno =
+ 1 + prandom_u32_state(&prng) % max_seqno;
+ }
+ mock_seqno_advance(engine, 0);
+ igt_wake_all_sync(&ready, &set, &done, &wq, count);
+
+ /* Simulate the GPU doing chunks of work, with one or more
+ * seqno appearing to finish at the same time. A random number
+ * of threads will be waiting upon the update and hopefully be
+ * woken.
+ */
+ for (seqno = 1; seqno <= max_seqno + step; seqno += step) {
+ usleep_range(50, 500);
+ mock_seqno_advance(engine, seqno);
+ }
+ GEM_BUG_ON(intel_engine_get_seqno(engine) < 1 + max_seqno);
+
+ /* With the seqno now beyond any of the waiting threads, they
+ * should all be woken, see that they are complete and signal
+ * that they are ready for the next test. We wait until all
+ * threads are complete and waiting for us (i.e. not a seqno).
+ */
+ err = wait_on_atomic_t(&done, wait_atomic_timeout, state);
+ if (err) {
+ pr_err("Timed out waiting for %d remaining waiters\n",
+ atomic_read(&done));
+ break;
+ }
+
+ err = check_rbtree_empty(engine);
+ if (err)
+ break;
+ }
+
+out_waiters:
+ for (n = 0; n < count; n++) {
+ if (IS_ERR(waiters[n].tsk))
+ break;
+
+ set_bit(STOP, &waiters[n].flags);
+ }
+ mock_seqno_advance(engine, INT_MAX); /* wakeup any broken waiters */
+ igt_wake_all_sync(&ready, &set, &done, &wq, n);
+
+ for (n = 0; n < count; n++) {
+ if (IS_ERR(waiters[n].tsk))
+ break;
+
+ kthread_stop(waiters[n].tsk);
+ put_task_struct(waiters[n].tsk);
+ }
+
+ drm_free_large(waiters);
+out_engines:
+ mock_engine_flush(engine);
+ return err;
+}
+
+int intel_breadcrumbs_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_random_insert_remove),
+ SUBTEST(igt_insert_complete),
+ SUBTEST(igt_wakeup),
+ };
+ struct drm_i915_private *i915;
+ int err;
+
+ i915 = mock_gem_device();
+ if (!i915)
+ return -ENOMEM;
+
+ err = i915_subtests(tests, i915->engine[RCS]);
+ drm_dev_unref(&i915->drm);
+
+ return err;
+}
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
new file mode 100644
index 000000000000..6ec7c731a267
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -0,0 +1,543 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "../i915_selftest.h"
+
+struct hang {
+ struct drm_i915_private *i915;
+ struct drm_i915_gem_object *hws;
+ struct drm_i915_gem_object *obj;
+ u32 *seqno;
+ u32 *batch;
+};
+
+static int hang_init(struct hang *h, struct drm_i915_private *i915)
+{
+ void *vaddr;
+ int err;
+
+ memset(h, 0, sizeof(*h));
+ h->i915 = i915;
+
+ h->hws = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(h->hws))
+ return PTR_ERR(h->hws);
+
+ h->obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(h->obj)) {
+ err = PTR_ERR(h->obj);
+ goto err_hws;
+ }
+
+ i915_gem_object_set_cache_level(h->hws, I915_CACHE_LLC);
+ vaddr = i915_gem_object_pin_map(h->hws, I915_MAP_WB);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_obj;
+ }
+ h->seqno = memset(vaddr, 0xff, PAGE_SIZE);
+
+ vaddr = i915_gem_object_pin_map(h->obj,
+ HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ err = PTR_ERR(vaddr);
+ goto err_unpin_hws;
+ }
+ h->batch = vaddr;
+
+ return 0;
+
+err_unpin_hws:
+ i915_gem_object_unpin_map(h->hws);
+err_obj:
+ i915_gem_object_put(h->obj);
+err_hws:
+ i915_gem_object_put(h->hws);
+ return err;
+}
+
+static u64 hws_address(const struct i915_vma *hws,
+ const struct drm_i915_gem_request *rq)
+{
+ return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context);
+}
+
+static int emit_recurse_batch(struct hang *h,
+ struct drm_i915_gem_request *rq)
+{
+ struct drm_i915_private *i915 = h->i915;
+ struct i915_address_space *vm = rq->ctx->ppgtt ? &rq->ctx->ppgtt->base : &i915->ggtt.base;
+ struct i915_vma *hws, *vma;
+ unsigned int flags;
+ u32 *batch;
+ int err;
+
+ vma = i915_vma_instance(h->obj, vm, NULL);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ hws = i915_vma_instance(h->hws, vm, NULL);
+ if (IS_ERR(hws))
+ return PTR_ERR(hws);
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err)
+ return err;
+
+ err = i915_vma_pin(hws, 0, 0, PIN_USER);
+ if (err)
+ goto unpin_vma;
+
+ err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
+ if (err)
+ goto unpin_hws;
+
+ err = i915_switch_context(rq);
+ if (err)
+ goto unpin_hws;
+
+ i915_vma_move_to_active(vma, rq, 0);
+ if (!i915_gem_object_has_active_reference(vma->obj)) {
+ i915_gem_object_get(vma->obj);
+ i915_gem_object_set_active_reference(vma->obj);
+ }
+
+ i915_vma_move_to_active(hws, rq, 0);
+ if (!i915_gem_object_has_active_reference(hws->obj)) {
+ i915_gem_object_get(hws->obj);
+ i915_gem_object_set_active_reference(hws->obj);
+ }
+
+ batch = h->batch;
+ if (INTEL_GEN(i915) >= 8) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4;
+ *batch++ = lower_32_bits(hws_address(hws, rq));
+ *batch++ = upper_32_bits(hws_address(hws, rq));
+ *batch++ = rq->fence.seqno;
+ *batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
+ *batch++ = lower_32_bits(vma->node.start);
+ *batch++ = upper_32_bits(vma->node.start);
+ } else if (INTEL_GEN(i915) >= 6) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4;
+ *batch++ = 0;
+ *batch++ = lower_32_bits(hws_address(hws, rq));
+ *batch++ = rq->fence.seqno;
+ *batch++ = MI_BATCH_BUFFER_START | 1 << 8;
+ *batch++ = lower_32_bits(vma->node.start);
+ } else if (INTEL_GEN(i915) >= 4) {
+ *batch++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
+ *batch++ = 0;
+ *batch++ = lower_32_bits(hws_address(hws, rq));
+ *batch++ = rq->fence.seqno;
+ *batch++ = MI_BATCH_BUFFER_START | 2 << 6;
+ *batch++ = lower_32_bits(vma->node.start);
+ } else {
+ *batch++ = MI_STORE_DWORD_IMM;
+ *batch++ = lower_32_bits(hws_address(hws, rq));
+ *batch++ = rq->fence.seqno;
+ *batch++ = MI_BATCH_BUFFER_START | 2 << 6 | 1;
+ *batch++ = lower_32_bits(vma->node.start);
+ }
+ *batch++ = MI_BATCH_BUFFER_END; /* not reached */
+
+ flags = 0;
+ if (INTEL_GEN(vm->i915) <= 5)
+ flags |= I915_DISPATCH_SECURE;
+
+ err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
+
+unpin_hws:
+ i915_vma_unpin(hws);
+unpin_vma:
+ i915_vma_unpin(vma);
+ return err;
+}
+
+static struct drm_i915_gem_request *
+hang_create_request(struct hang *h,
+ struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
+{
+ struct drm_i915_gem_request *rq;
+ int err;
+
+ if (i915_gem_object_is_active(h->obj)) {
+ struct drm_i915_gem_object *obj;
+ void *vaddr;
+
+ obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vaddr = i915_gem_object_pin_map(obj,
+ HAS_LLC(h->i915) ? I915_MAP_WB : I915_MAP_WC);
+ if (IS_ERR(vaddr)) {
+ i915_gem_object_put(obj);
+ return ERR_CAST(vaddr);
+ }
+
+ i915_gem_object_unpin_map(h->obj);
+ i915_gem_object_put(h->obj);
+
+ h->obj = obj;
+ h->batch = vaddr;
+ }
+
+ rq = i915_gem_request_alloc(engine, ctx);
+ if (IS_ERR(rq))
+ return rq;
+
+ err = emit_recurse_batch(h, rq);
+ if (err) {
+ __i915_add_request(rq, false);
+ return ERR_PTR(err);
+ }
+
+ return rq;
+}
+
+static u32 hws_seqno(const struct hang *h,
+ const struct drm_i915_gem_request *rq)
+{
+ return READ_ONCE(h->seqno[rq->fence.context % (PAGE_SIZE/sizeof(u32))]);
+}
+
+static void hang_fini(struct hang *h)
+{
+ *h->batch = MI_BATCH_BUFFER_END;
+ wmb();
+
+ i915_gem_object_unpin_map(h->obj);
+ i915_gem_object_put(h->obj);
+
+ i915_gem_object_unpin_map(h->hws);
+ i915_gem_object_put(h->hws);
+
+ i915_gem_wait_for_idle(h->i915, I915_WAIT_LOCKED);
+ i915_gem_retire_requests(h->i915);
+}
+
+static int igt_hang_sanitycheck(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_request *rq;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct hang h;
+ int err;
+
+ /* Basic check that we can execute our hanging batch */
+
+ if (!igt_can_mi_store_dword_imm(i915))
+ return 0;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = hang_init(&h, i915);
+ if (err)
+ goto unlock;
+
+ for_each_engine(engine, i915, id) {
+ long timeout;
+
+ rq = hang_create_request(&h, engine, i915->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ pr_err("Failed to create request for %s, err=%d\n",
+ engine->name, err);
+ goto fini;
+ }
+
+ i915_gem_request_get(rq);
+
+ *h.batch = MI_BATCH_BUFFER_END;
+ __i915_add_request(rq, true);
+
+ timeout = i915_wait_request(rq,
+ I915_WAIT_LOCKED,
+ MAX_SCHEDULE_TIMEOUT);
+ i915_gem_request_put(rq);
+
+ if (timeout < 0) {
+ err = timeout;
+ pr_err("Wait for request failed on %s, err=%d\n",
+ engine->name, err);
+ goto fini;
+ }
+ }
+
+fini:
+ hang_fini(&h);
+unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ return err;
+}
+
+static int igt_global_reset(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ unsigned int reset_count;
+ int err = 0;
+
+ /* Check that we can issue a global GPU reset */
+
+ set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+ set_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ reset_count = i915_reset_count(&i915->gpu_error);
+
+ i915_reset(i915);
+
+ if (i915_reset_count(&i915->gpu_error) == reset_count) {
+ pr_err("No GPU reset recorded!\n");
+ err = -EINVAL;
+ }
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags));
+ clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+ if (i915_terminally_wedged(&i915->gpu_error))
+ err = -EIO;
+
+ return err;
+}
+
+static u32 fake_hangcheck(struct drm_i915_gem_request *rq)
+{
+ u32 reset_count;
+
+ rq->engine->hangcheck.stalled = true;
+ rq->engine->hangcheck.seqno = intel_engine_get_seqno(rq->engine);
+
+ reset_count = i915_reset_count(&rq->i915->gpu_error);
+
+ set_bit(I915_RESET_HANDOFF, &rq->i915->gpu_error.flags);
+ wake_up_all(&rq->i915->gpu_error.wait_queue);
+
+ return reset_count;
+}
+
+static bool wait_for_hang(struct hang *h, struct drm_i915_gem_request *rq)
+{
+ return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq),
+ rq->fence.seqno),
+ 10) &&
+ wait_for(i915_seqno_passed(hws_seqno(h, rq),
+ rq->fence.seqno),
+ 1000));
+}
+
+static int igt_wait_reset(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct drm_i915_gem_request *rq;
+ unsigned int reset_count;
+ struct hang h;
+ long timeout;
+ int err;
+
+ /* Check that we detect a stuck waiter and issue a reset */
+
+ set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ err = hang_init(&h, i915);
+ if (err)
+ goto unlock;
+
+ rq = hang_create_request(&h, i915->engine[RCS], i915->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto fini;
+ }
+
+ i915_gem_request_get(rq);
+ __i915_add_request(rq, true);
+
+ if (!wait_for_hang(&h, rq)) {
+ pr_err("Failed to start request %x\n", rq->fence.seqno);
+ err = -EIO;
+ goto out_rq;
+ }
+
+ reset_count = fake_hangcheck(rq);
+
+ timeout = i915_wait_request(rq, I915_WAIT_LOCKED, 10);
+ if (timeout < 0) {
+ pr_err("i915_wait_request failed on a stuck request: err=%ld\n",
+ timeout);
+ err = timeout;
+ goto out_rq;
+ }
+
+ GEM_BUG_ON(test_bit(I915_RESET_HANDOFF, &i915->gpu_error.flags));
+ if (i915_reset_count(&i915->gpu_error) == reset_count) {
+ pr_err("No GPU reset recorded!\n");
+ err = -EINVAL;
+ goto out_rq;
+ }
+
+out_rq:
+ i915_gem_request_put(rq);
+fini:
+ hang_fini(&h);
+unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+
+ if (i915_terminally_wedged(&i915->gpu_error))
+ return -EIO;
+
+ return err;
+}
+
+static int igt_reset_queue(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct hang h;
+ int err;
+
+ /* Check that we replay pending requests following a hang */
+
+ if (!igt_can_mi_store_dword_imm(i915))
+ return 0;
+
+ set_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+ mutex_lock(&i915->drm.struct_mutex);
+ err = hang_init(&h, i915);
+ if (err)
+ goto unlock;
+
+ for_each_engine(engine, i915, id) {
+ struct drm_i915_gem_request *prev;
+ IGT_TIMEOUT(end_time);
+ unsigned int count;
+
+ prev = hang_create_request(&h, engine, i915->kernel_context);
+ if (IS_ERR(prev)) {
+ err = PTR_ERR(prev);
+ goto fini;
+ }
+
+ i915_gem_request_get(prev);
+ __i915_add_request(prev, true);
+
+ count = 0;
+ do {
+ struct drm_i915_gem_request *rq;
+ unsigned int reset_count;
+
+ rq = hang_create_request(&h,
+ engine,
+ i915->kernel_context);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto fini;
+ }
+
+ i915_gem_request_get(rq);
+ __i915_add_request(rq, true);
+
+ if (!wait_for_hang(&h, prev)) {
+ pr_err("Failed to start request %x\n",
+ prev->fence.seqno);
+ i915_gem_request_put(rq);
+ i915_gem_request_put(prev);
+ err = -EIO;
+ goto fini;
+ }
+
+ reset_count = fake_hangcheck(prev);
+
+ i915_reset(i915);
+
+ GEM_BUG_ON(test_bit(I915_RESET_HANDOFF,
+ &i915->gpu_error.flags));
+
+ if (prev->fence.error != -EIO) {
+ pr_err("GPU reset not recorded on hanging request [fence.error=%d]!\n",
+ prev->fence.error);
+ i915_gem_request_put(rq);
+ i915_gem_request_put(prev);
+ err = -EINVAL;
+ goto fini;
+ }
+
+ if (rq->fence.error) {
+ pr_err("Fence error status not zero [%d] after unrelated reset\n",
+ rq->fence.error);
+ i915_gem_request_put(rq);
+ i915_gem_request_put(prev);
+ err = -EINVAL;
+ goto fini;
+ }
+
+ if (i915_reset_count(&i915->gpu_error) == reset_count) {
+ pr_err("No GPU reset recorded!\n");
+ i915_gem_request_put(rq);
+ i915_gem_request_put(prev);
+ err = -EINVAL;
+ goto fini;
+ }
+
+ i915_gem_request_put(prev);
+ prev = rq;
+ count++;
+ } while (time_before(jiffies, end_time));
+ pr_info("%s: Completed %d resets\n", engine->name, count);
+
+ *h.batch = MI_BATCH_BUFFER_END;
+ wmb();
+
+ i915_gem_request_put(prev);
+ }
+
+fini:
+ hang_fini(&h);
+unlock:
+ mutex_unlock(&i915->drm.struct_mutex);
+ clear_bit(I915_RESET_BACKOFF, &i915->gpu_error.flags);
+
+ if (i915_terminally_wedged(&i915->gpu_error))
+ return -EIO;
+
+ return err;
+}
+
+int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_hang_sanitycheck),
+ SUBTEST(igt_global_reset),
+ SUBTEST(igt_wait_reset),
+ SUBTEST(igt_reset_queue),
+ };
+
+ if (!intel_has_gpu_reset(i915))
+ return 0;
+
+ return i915_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
new file mode 100644
index 000000000000..2d0fef2cfca6
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -0,0 +1,182 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "../i915_selftest.h"
+
+static int intel_fw_table_check(const struct intel_forcewake_range *ranges,
+ unsigned int num_ranges,
+ bool is_watertight)
+{
+ unsigned int i;
+ s32 prev;
+
+ for (i = 0, prev = -1; i < num_ranges; i++, ranges++) {
+ /* Check that the table is watertight */
+ if (is_watertight && (prev + 1) != (s32)ranges->start) {
+ pr_err("%s: entry[%d]:(%x, %x) is not watertight to previous (%x)\n",
+ __func__, i, ranges->start, ranges->end, prev);
+ return -EINVAL;
+ }
+
+ /* Check that the table never goes backwards */
+ if (prev >= (s32)ranges->start) {
+ pr_err("%s: entry[%d]:(%x, %x) is less than the previous (%x)\n",
+ __func__, i, ranges->start, ranges->end, prev);
+ return -EINVAL;
+ }
+
+ /* Check that the entry is valid */
+ if (ranges->start >= ranges->end) {
+ pr_err("%s: entry[%d]:(%x, %x) has negative length\n",
+ __func__, i, ranges->start, ranges->end);
+ return -EINVAL;
+ }
+
+ prev = ranges->end;
+ }
+
+ return 0;
+}
+
+static int intel_shadow_table_check(void)
+{
+ const i915_reg_t *reg = gen8_shadowed_regs;
+ unsigned int i;
+ s32 prev;
+
+ for (i = 0, prev = -1; i < ARRAY_SIZE(gen8_shadowed_regs); i++, reg++) {
+ u32 offset = i915_mmio_reg_offset(*reg);
+
+ if (prev >= (s32)offset) {
+ pr_err("%s: entry[%d]:(%x) is before previous (%x)\n",
+ __func__, i, offset, prev);
+ return -EINVAL;
+ }
+
+ prev = offset;
+ }
+
+ return 0;
+}
+
+int intel_uncore_mock_selftests(void)
+{
+ struct {
+ const struct intel_forcewake_range *ranges;
+ unsigned int num_ranges;
+ bool is_watertight;
+ } fw[] = {
+ { __vlv_fw_ranges, ARRAY_SIZE(__vlv_fw_ranges), false },
+ { __chv_fw_ranges, ARRAY_SIZE(__chv_fw_ranges), false },
+ { __gen9_fw_ranges, ARRAY_SIZE(__gen9_fw_ranges), true },
+ };
+ int err, i;
+
+ for (i = 0; i < ARRAY_SIZE(fw); i++) {
+ err = intel_fw_table_check(fw[i].ranges,
+ fw[i].num_ranges,
+ fw[i].is_watertight);
+ if (err)
+ return err;
+ }
+
+ err = intel_shadow_table_check();
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int intel_uncore_check_forcewake_domains(struct drm_i915_private *dev_priv)
+{
+#define FW_RANGE 0x40000
+ unsigned long *valid;
+ u32 offset;
+ int err;
+
+ if (!HAS_FPGA_DBG_UNCLAIMED(dev_priv) &&
+ !IS_VALLEYVIEW(dev_priv) &&
+ !IS_CHERRYVIEW(dev_priv))
+ return 0;
+
+ if (IS_VALLEYVIEW(dev_priv)) /* XXX system lockup! */
+ return 0;
+
+ if (IS_BROADWELL(dev_priv)) /* XXX random GPU hang afterwards! */
+ return 0;
+
+ valid = kzalloc(BITS_TO_LONGS(FW_RANGE) * sizeof(*valid),
+ GFP_TEMPORARY);
+ if (!valid)
+ return -ENOMEM;
+
+ intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+
+ check_for_unclaimed_mmio(dev_priv);
+ for (offset = 0; offset < FW_RANGE; offset += 4) {
+ i915_reg_t reg = { offset };
+
+ (void)I915_READ_FW(reg);
+ if (!check_for_unclaimed_mmio(dev_priv))
+ set_bit(offset, valid);
+ }
+
+ intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+
+ err = 0;
+ for_each_set_bit(offset, valid, FW_RANGE) {
+ i915_reg_t reg = { offset };
+
+ intel_uncore_forcewake_reset(dev_priv, false);
+ check_for_unclaimed_mmio(dev_priv);
+
+ (void)I915_READ(reg);
+ if (check_for_unclaimed_mmio(dev_priv)) {
+ pr_err("Unclaimed mmio read to register 0x%04x\n",
+ offset);
+ err = -EINVAL;
+ }
+ }
+
+ kfree(valid);
+ return err;
+}
+
+int intel_uncore_live_selftests(struct drm_i915_private *i915)
+{
+ int err;
+
+ /* Confirm the table we load is still valid */
+ err = intel_fw_table_check(i915->uncore.fw_domains_table,
+ i915->uncore.fw_domains_table_entries,
+ INTEL_GEN(i915) >= 9);
+ if (err)
+ return err;
+
+ err = intel_uncore_check_forcewake_domains(i915);
+ if (err)
+ return err;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
new file mode 100644
index 000000000000..8d3a90c3f8ac
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "mock_context.h"
+#include "mock_gtt.h"
+
+struct i915_gem_context *
+mock_context(struct drm_i915_private *i915,
+ const char *name)
+{
+ struct i915_gem_context *ctx;
+ int ret;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return NULL;
+
+ kref_init(&ctx->ref);
+ INIT_LIST_HEAD(&ctx->link);
+ ctx->i915 = i915;
+
+ ret = ida_simple_get(&i915->context_hw_ida,
+ 0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
+ if (ret < 0)
+ goto err_free;
+ ctx->hw_id = ret;
+
+ if (name) {
+ ctx->name = kstrdup(name, GFP_KERNEL);
+ if (!ctx->name)
+ goto err_put;
+
+ ctx->ppgtt = mock_ppgtt(i915, name);
+ if (!ctx->ppgtt)
+ goto err_put;
+ }
+
+ return ctx;
+
+err_free:
+ kfree(ctx);
+ return NULL;
+
+err_put:
+ i915_gem_context_set_closed(ctx);
+ i915_gem_context_put(ctx);
+ return NULL;
+}
+
+void mock_context_close(struct i915_gem_context *ctx)
+{
+ i915_gem_context_set_closed(ctx);
+
+ i915_ppgtt_close(&ctx->ppgtt->base);
+
+ i915_gem_context_put(ctx);
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.h b/drivers/gpu/drm/i915/selftests/mock_context.h
new file mode 100644
index 000000000000..2427e5c0916a
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_context.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MOCK_CONTEXT_H
+#define __MOCK_CONTEXT_H
+
+struct i915_gem_context *
+mock_context(struct drm_i915_private *i915,
+ const char *name);
+
+void mock_context_close(struct i915_gem_context *ctx);
+
+#endif /* !__MOCK_CONTEXT_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_dmabuf.c b/drivers/gpu/drm/i915/selftests/mock_dmabuf.c
new file mode 100644
index 000000000000..99da8f4ef497
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_dmabuf.c
@@ -0,0 +1,176 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "mock_dmabuf.h"
+
+static struct sg_table *mock_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction dir)
+{
+ struct mock_dmabuf *mock = to_mock(attachment->dmabuf);
+ struct sg_table *st;
+ struct scatterlist *sg;
+ int i, err;
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return ERR_PTR(-ENOMEM);
+
+ err = sg_alloc_table(st, mock->npages, GFP_KERNEL);
+ if (err)
+ goto err_free;
+
+ sg = st->sgl;
+ for (i = 0; i < mock->npages; i++) {
+ sg_set_page(sg, mock->pages[i], PAGE_SIZE, 0);
+ sg = sg_next(sg);
+ }
+
+ if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
+ err = -ENOMEM;
+ goto err_st;
+ }
+
+ return st;
+
+err_st:
+ sg_free_table(st);
+err_free:
+ kfree(st);
+ return ERR_PTR(err);
+}
+
+static void mock_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *st,
+ enum dma_data_direction dir)
+{
+ dma_unmap_sg(attachment->dev, st->sgl, st->nents, dir);
+ sg_free_table(st);
+ kfree(st);
+}
+
+static void mock_dmabuf_release(struct dma_buf *dma_buf)
+{
+ struct mock_dmabuf *mock = to_mock(dma_buf);
+ int i;
+
+ for (i = 0; i < mock->npages; i++)
+ put_page(mock->pages[i]);
+
+ kfree(mock);
+}
+
+static void *mock_dmabuf_vmap(struct dma_buf *dma_buf)
+{
+ struct mock_dmabuf *mock = to_mock(dma_buf);
+
+ return vm_map_ram(mock->pages, mock->npages, 0, PAGE_KERNEL);
+}
+
+static void mock_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+ struct mock_dmabuf *mock = to_mock(dma_buf);
+
+ vm_unmap_ram(vaddr, mock->npages);
+}
+
+static void *mock_dmabuf_kmap_atomic(struct dma_buf *dma_buf, unsigned long page_num)
+{
+ struct mock_dmabuf *mock = to_mock(dma_buf);
+
+ return kmap_atomic(mock->pages[page_num]);
+}
+
+static void mock_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+ kunmap_atomic(addr);
+}
+
+static void *mock_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
+{
+ struct mock_dmabuf *mock = to_mock(dma_buf);
+
+ return kmap(mock->pages[page_num]);
+}
+
+static void mock_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
+{
+ struct mock_dmabuf *mock = to_mock(dma_buf);
+
+ return kunmap(mock->pages[page_num]);
+}
+
+static int mock_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+{
+ return -ENODEV;
+}
+
+static const struct dma_buf_ops mock_dmabuf_ops = {
+ .map_dma_buf = mock_map_dma_buf,
+ .unmap_dma_buf = mock_unmap_dma_buf,
+ .release = mock_dmabuf_release,
+ .kmap = mock_dmabuf_kmap,
+ .kmap_atomic = mock_dmabuf_kmap_atomic,
+ .kunmap = mock_dmabuf_kunmap,
+ .kunmap_atomic = mock_dmabuf_kunmap_atomic,
+ .mmap = mock_dmabuf_mmap,
+ .vmap = mock_dmabuf_vmap,
+ .vunmap = mock_dmabuf_vunmap,
+};
+
+static struct dma_buf *mock_dmabuf(int npages)
+{
+ struct mock_dmabuf *mock;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ struct dma_buf *dmabuf;
+ int i;
+
+ mock = kmalloc(sizeof(*mock) + npages * sizeof(struct page *),
+ GFP_KERNEL);
+ if (!mock)
+ return ERR_PTR(-ENOMEM);
+
+ mock->npages = npages;
+ for (i = 0; i < npages; i++) {
+ mock->pages[i] = alloc_page(GFP_KERNEL);
+ if (!mock->pages[i])
+ goto err;
+ }
+
+ exp_info.ops = &mock_dmabuf_ops;
+ exp_info.size = npages * PAGE_SIZE;
+ exp_info.flags = O_CLOEXEC;
+ exp_info.priv = mock;
+
+ dmabuf = dma_buf_export(&exp_info);
+ if (IS_ERR(dmabuf))
+ goto err;
+
+ return dmabuf;
+
+err:
+ while (i--)
+ put_page(mock->pages[i]);
+ kfree(mock);
+ return ERR_PTR(-ENOMEM);
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_dmabuf.h b/drivers/gpu/drm/i915/selftests/mock_dmabuf.h
new file mode 100644
index 000000000000..ec80613159b9
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_dmabuf.h
@@ -0,0 +1,41 @@
+
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MOCK_DMABUF_H__
+#define __MOCK_DMABUF_H__
+
+#include <linux/dma-buf.h>
+
+struct mock_dmabuf {
+ int npages;
+ struct page *pages[];
+};
+
+static struct mock_dmabuf *to_mock(struct dma_buf *buf)
+{
+ return buf->priv;
+}
+
+#endif /* !__MOCK_DMABUF_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/mock_drm.c b/drivers/gpu/drm/i915/selftests/mock_drm.c
new file mode 100644
index 000000000000..113dec05c7dc
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_drm.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "mock_drm.h"
+
+static inline struct inode fake_inode(struct drm_i915_private *i915)
+{
+ return (struct inode){ .i_rdev = i915->drm.primary->index };
+}
+
+struct drm_file *mock_file(struct drm_i915_private *i915)
+{
+ struct inode inode = fake_inode(i915);
+ struct file filp = {};
+ struct drm_file *file;
+ int err;
+
+ err = drm_open(&inode, &filp);
+ if (unlikely(err))
+ return ERR_PTR(err);
+
+ file = filp.private_data;
+ file->authenticated = true;
+ return file;
+}
+
+void mock_file_free(struct drm_i915_private *i915, struct drm_file *file)
+{
+ struct inode inode = fake_inode(i915);
+ struct file filp = { .private_data = file };
+
+ drm_release(&inode, &filp);
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_drm.h b/drivers/gpu/drm/i915/selftests/mock_drm.h
new file mode 100644
index 000000000000..b39beee9f8f6
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_drm.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MOCK_DRM_H
+#define __MOCK_DRM_H
+
+struct drm_file *mock_file(struct drm_i915_private *i915);
+void mock_file_free(struct drm_i915_private *i915, struct drm_file *file);
+
+#endif /* !__MOCK_DRM_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
new file mode 100644
index 000000000000..8d5ba037064c
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "mock_engine.h"
+#include "mock_request.h"
+
+static struct mock_request *first_request(struct mock_engine *engine)
+{
+ return list_first_entry_or_null(&engine->hw_queue,
+ struct mock_request,
+ link);
+}
+
+static void hw_delay_complete(unsigned long data)
+{
+ struct mock_engine *engine = (typeof(engine))data;
+ struct mock_request *request;
+
+ spin_lock(&engine->hw_lock);
+
+ request = first_request(engine);
+ if (request) {
+ list_del_init(&request->link);
+ mock_seqno_advance(&engine->base, request->base.global_seqno);
+ }
+
+ request = first_request(engine);
+ if (request)
+ mod_timer(&engine->hw_delay, jiffies + request->delay);
+
+ spin_unlock(&engine->hw_lock);
+}
+
+static int mock_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
+{
+ i915_gem_context_get(ctx);
+ return 0;
+}
+
+static void mock_context_unpin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
+{
+ i915_gem_context_put(ctx);
+}
+
+static int mock_request_alloc(struct drm_i915_gem_request *request)
+{
+ struct mock_request *mock = container_of(request, typeof(*mock), base);
+
+ INIT_LIST_HEAD(&mock->link);
+ mock->delay = 0;
+
+ request->ring = request->engine->buffer;
+ return 0;
+}
+
+static int mock_emit_flush(struct drm_i915_gem_request *request,
+ unsigned int flags)
+{
+ return 0;
+}
+
+static void mock_emit_breadcrumb(struct drm_i915_gem_request *request,
+ u32 *flags)
+{
+}
+
+static void mock_submit_request(struct drm_i915_gem_request *request)
+{
+ struct mock_request *mock = container_of(request, typeof(*mock), base);
+ struct mock_engine *engine =
+ container_of(request->engine, typeof(*engine), base);
+
+ i915_gem_request_submit(request);
+ GEM_BUG_ON(!request->global_seqno);
+
+ spin_lock_irq(&engine->hw_lock);
+ list_add_tail(&mock->link, &engine->hw_queue);
+ if (mock->link.prev == &engine->hw_queue)
+ mod_timer(&engine->hw_delay, jiffies + mock->delay);
+ spin_unlock_irq(&engine->hw_lock);
+}
+
+static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
+{
+ const unsigned long sz = roundup_pow_of_two(sizeof(struct intel_ring));
+ struct intel_ring *ring;
+
+ ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
+ if (!ring)
+ return NULL;
+
+ ring->engine = engine;
+ ring->size = sz;
+ ring->effective_size = sz;
+ ring->vaddr = (void *)(ring + 1);
+
+ INIT_LIST_HEAD(&ring->request_list);
+ ring->last_retired_head = -1;
+ intel_ring_update_space(ring);
+
+ return ring;
+}
+
+struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
+ const char *name)
+{
+ struct mock_engine *engine;
+ static int id;
+
+ engine = kzalloc(sizeof(*engine) + PAGE_SIZE, GFP_KERNEL);
+ if (!engine)
+ return NULL;
+
+ engine->base.buffer = mock_ring(&engine->base);
+ if (!engine->base.buffer) {
+ kfree(engine);
+ return NULL;
+ }
+
+ /* minimal engine setup for requests */
+ engine->base.i915 = i915;
+ engine->base.name = name;
+ engine->base.id = id++;
+ engine->base.status_page.page_addr = (void *)(engine + 1);
+
+ engine->base.context_pin = mock_context_pin;
+ engine->base.context_unpin = mock_context_unpin;
+ engine->base.request_alloc = mock_request_alloc;
+ engine->base.emit_flush = mock_emit_flush;
+ engine->base.emit_breadcrumb = mock_emit_breadcrumb;
+ engine->base.submit_request = mock_submit_request;
+
+ engine->base.timeline =
+ &i915->gt.global_timeline.engine[engine->base.id];
+
+ intel_engine_init_breadcrumbs(&engine->base);
+ engine->base.breadcrumbs.mock = true; /* prevent touching HW for irqs */
+
+ /* fake hw queue */
+ spin_lock_init(&engine->hw_lock);
+ setup_timer(&engine->hw_delay,
+ hw_delay_complete,
+ (unsigned long)engine);
+ INIT_LIST_HEAD(&engine->hw_queue);
+
+ return &engine->base;
+}
+
+void mock_engine_flush(struct intel_engine_cs *engine)
+{
+ struct mock_engine *mock =
+ container_of(engine, typeof(*mock), base);
+ struct mock_request *request, *rn;
+
+ del_timer_sync(&mock->hw_delay);
+
+ spin_lock_irq(&mock->hw_lock);
+ list_for_each_entry_safe(request, rn, &mock->hw_queue, link) {
+ list_del_init(&request->link);
+ mock_seqno_advance(&mock->base, request->base.global_seqno);
+ }
+ spin_unlock_irq(&mock->hw_lock);
+}
+
+void mock_engine_reset(struct intel_engine_cs *engine)
+{
+ intel_write_status_page(engine, I915_GEM_HWS_INDEX, 0);
+}
+
+void mock_engine_free(struct intel_engine_cs *engine)
+{
+ struct mock_engine *mock =
+ container_of(engine, typeof(*mock), base);
+
+ GEM_BUG_ON(timer_pending(&mock->hw_delay));
+
+ if (engine->last_retired_context)
+ engine->context_unpin(engine, engine->last_retired_context);
+
+ intel_engine_fini_breadcrumbs(engine);
+
+ kfree(engine->buffer);
+ kfree(engine);
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.h b/drivers/gpu/drm/i915/selftests/mock_engine.h
new file mode 100644
index 000000000000..e5e240216ba3
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MOCK_ENGINE_H__
+#define __MOCK_ENGINE_H__
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+
+#include "../intel_ringbuffer.h"
+
+struct mock_engine {
+ struct intel_engine_cs base;
+
+ spinlock_t hw_lock;
+ struct list_head hw_queue;
+ struct timer_list hw_delay;
+};
+
+struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
+ const char *name);
+void mock_engine_flush(struct intel_engine_cs *engine);
+void mock_engine_reset(struct intel_engine_cs *engine);
+void mock_engine_free(struct intel_engine_cs *engine);
+
+static inline void mock_seqno_advance(struct intel_engine_cs *engine, u32 seqno)
+{
+ intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
+ intel_engine_wakeup(engine);
+}
+
+#endif /* !__MOCK_ENGINE_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
new file mode 100644
index 000000000000..6a8258eacdcb
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -0,0 +1,226 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/pm_runtime.h>
+
+#include "mock_engine.h"
+#include "mock_context.h"
+#include "mock_request.h"
+#include "mock_gem_device.h"
+#include "mock_gem_object.h"
+#include "mock_gtt.h"
+
+void mock_device_flush(struct drm_i915_private *i915)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ lockdep_assert_held(&i915->drm.struct_mutex);
+
+ for_each_engine(engine, i915, id)
+ mock_engine_flush(engine);
+
+ i915_gem_retire_requests(i915);
+}
+
+static void mock_device_release(struct drm_device *dev)
+{
+ struct drm_i915_private *i915 = to_i915(dev);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ mock_device_flush(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ cancel_delayed_work_sync(&i915->gt.retire_work);
+ cancel_delayed_work_sync(&i915->gt.idle_work);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ for_each_engine(engine, i915, id)
+ mock_engine_free(engine);
+ i915_gem_context_fini(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ drain_workqueue(i915->wq);
+ i915_gem_drain_freed_objects(i915);
+
+ mutex_lock(&i915->drm.struct_mutex);
+ mock_fini_ggtt(i915);
+ i915_gem_timeline_fini(&i915->gt.global_timeline);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ destroy_workqueue(i915->wq);
+
+ kmem_cache_destroy(i915->dependencies);
+ kmem_cache_destroy(i915->requests);
+ kmem_cache_destroy(i915->vmas);
+ kmem_cache_destroy(i915->objects);
+
+ drm_dev_fini(&i915->drm);
+ put_device(&i915->drm.pdev->dev);
+}
+
+static struct drm_driver mock_driver = {
+ .name = "mock",
+ .driver_features = DRIVER_GEM,
+ .release = mock_device_release,
+
+ .gem_close_object = i915_gem_close_object,
+ .gem_free_object_unlocked = i915_gem_free_object,
+};
+
+static void release_dev(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+
+ kfree(pdev);
+}
+
+static void mock_retire_work_handler(struct work_struct *work)
+{
+}
+
+static void mock_idle_work_handler(struct work_struct *work)
+{
+}
+
+struct drm_i915_private *mock_gem_device(void)
+{
+ struct drm_i915_private *i915;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct pci_dev *pdev;
+ int err;
+
+ pdev = kzalloc(sizeof(*pdev) + sizeof(*i915), GFP_KERNEL);
+ if (!pdev)
+ goto err;
+
+ device_initialize(&pdev->dev);
+ pdev->dev.release = release_dev;
+ dev_set_name(&pdev->dev, "mock");
+ dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ i915 = (struct drm_i915_private *)(pdev + 1);
+ pci_set_drvdata(pdev, i915);
+
+ err = drm_dev_init(&i915->drm, &mock_driver, &pdev->dev);
+ if (err) {
+ pr_err("Failed to initialise mock GEM device: err=%d\n", err);
+ goto put_device;
+ }
+ i915->drm.pdev = pdev;
+ i915->drm.dev_private = i915;
+
+ /* Using the global GTT may ask questions about KMS users, so prepare */
+ drm_mode_config_init(&i915->drm);
+
+ mkwrite_device_info(i915)->gen = -1;
+
+ spin_lock_init(&i915->mm.object_stat_lock);
+
+ init_waitqueue_head(&i915->gpu_error.wait_queue);
+ init_waitqueue_head(&i915->gpu_error.reset_queue);
+
+ i915->wq = alloc_ordered_workqueue("mock", 0);
+ if (!i915->wq)
+ goto put_device;
+
+ INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
+ init_llist_head(&i915->mm.free_list);
+ INIT_LIST_HEAD(&i915->mm.unbound_list);
+ INIT_LIST_HEAD(&i915->mm.bound_list);
+
+ ida_init(&i915->context_hw_ida);
+
+ INIT_DELAYED_WORK(&i915->gt.retire_work, mock_retire_work_handler);
+ INIT_DELAYED_WORK(&i915->gt.idle_work, mock_idle_work_handler);
+
+ i915->gt.awake = true;
+
+ i915->objects = KMEM_CACHE(mock_object, SLAB_HWCACHE_ALIGN);
+ if (!i915->objects)
+ goto err_wq;
+
+ i915->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
+ if (!i915->vmas)
+ goto err_objects;
+
+ i915->requests = KMEM_CACHE(mock_request,
+ SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT |
+ SLAB_DESTROY_BY_RCU);
+ if (!i915->requests)
+ goto err_vmas;
+
+ i915->dependencies = KMEM_CACHE(i915_dependency,
+ SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT);
+ if (!i915->dependencies)
+ goto err_requests;
+
+ mutex_lock(&i915->drm.struct_mutex);
+ INIT_LIST_HEAD(&i915->gt.timelines);
+ err = i915_gem_timeline_init__global(i915);
+ if (err) {
+ mutex_unlock(&i915->drm.struct_mutex);
+ goto err_dependencies;
+ }
+
+ mock_init_ggtt(i915);
+ mutex_unlock(&i915->drm.struct_mutex);
+
+ mkwrite_device_info(i915)->ring_mask = BIT(0);
+ i915->engine[RCS] = mock_engine(i915, "mock");
+ if (!i915->engine[RCS])
+ goto err_dependencies;
+
+ i915->kernel_context = mock_context(i915, NULL);
+ if (!i915->kernel_context)
+ goto err_engine;
+
+ return i915;
+
+err_engine:
+ for_each_engine(engine, i915, id)
+ mock_engine_free(engine);
+err_dependencies:
+ kmem_cache_destroy(i915->dependencies);
+err_requests:
+ kmem_cache_destroy(i915->requests);
+err_vmas:
+ kmem_cache_destroy(i915->vmas);
+err_objects:
+ kmem_cache_destroy(i915->objects);
+err_wq:
+ destroy_workqueue(i915->wq);
+put_device:
+ put_device(&pdev->dev);
+err:
+ return NULL;
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.h b/drivers/gpu/drm/i915/selftests/mock_gem_device.h
new file mode 100644
index 000000000000..4cca4d57f52c
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.h
@@ -0,0 +1,9 @@
+#ifndef __MOCK_GEM_DEVICE_H__
+#define __MOCK_GEM_DEVICE_H__
+
+struct drm_i915_private;
+
+struct drm_i915_private *mock_gem_device(void);
+void mock_device_flush(struct drm_i915_private *i915);
+
+#endif /* !__MOCK_GEM_DEVICE_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_object.h b/drivers/gpu/drm/i915/selftests/mock_gem_object.h
new file mode 100644
index 000000000000..9fbf67321662
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_object.h
@@ -0,0 +1,8 @@
+#ifndef __MOCK_GEM_OBJECT_H__
+#define __MOCK_GEM_OBJECT_H__
+
+struct mock_object {
+ struct drm_i915_gem_object base;
+};
+
+#endif /* !__MOCK_GEM_OBJECT_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c
new file mode 100644
index 000000000000..a61309c7cb3e
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "mock_gtt.h"
+
+static void mock_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 flags)
+{
+}
+
+static void mock_insert_entries(struct i915_address_space *vm,
+ struct sg_table *st,
+ u64 start,
+ enum i915_cache_level level, u32 flags)
+{
+}
+
+static int mock_bind_ppgtt(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ GEM_BUG_ON(flags & I915_VMA_GLOBAL_BIND);
+ vma->pages = vma->obj->mm.pages;
+ vma->flags |= I915_VMA_LOCAL_BIND;
+ return 0;
+}
+
+static void mock_unbind_ppgtt(struct i915_vma *vma)
+{
+}
+
+static void mock_cleanup(struct i915_address_space *vm)
+{
+}
+
+struct i915_hw_ppgtt *
+mock_ppgtt(struct drm_i915_private *i915,
+ const char *name)
+{
+ struct i915_hw_ppgtt *ppgtt;
+
+ ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
+ if (!ppgtt)
+ return NULL;
+
+ kref_init(&ppgtt->ref);
+ ppgtt->base.i915 = i915;
+ ppgtt->base.total = round_down(U64_MAX, PAGE_SIZE);
+ ppgtt->base.file = ERR_PTR(-ENODEV);
+
+ INIT_LIST_HEAD(&ppgtt->base.active_list);
+ INIT_LIST_HEAD(&ppgtt->base.inactive_list);
+ INIT_LIST_HEAD(&ppgtt->base.unbound_list);
+
+ INIT_LIST_HEAD(&ppgtt->base.global_link);
+ drm_mm_init(&ppgtt->base.mm, 0, ppgtt->base.total);
+ i915_gem_timeline_init(i915, &ppgtt->base.timeline, name);
+
+ ppgtt->base.clear_range = nop_clear_range;
+ ppgtt->base.insert_page = mock_insert_page;
+ ppgtt->base.insert_entries = mock_insert_entries;
+ ppgtt->base.bind_vma = mock_bind_ppgtt;
+ ppgtt->base.unbind_vma = mock_unbind_ppgtt;
+ ppgtt->base.cleanup = mock_cleanup;
+
+ return ppgtt;
+}
+
+static int mock_bind_ggtt(struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ int err;
+
+ err = i915_get_ggtt_vma_pages(vma);
+ if (err)
+ return err;
+
+ vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
+ return 0;
+}
+
+static void mock_unbind_ggtt(struct i915_vma *vma)
+{
+}
+
+void mock_init_ggtt(struct drm_i915_private *i915)
+{
+ struct i915_ggtt *ggtt = &i915->ggtt;
+
+ INIT_LIST_HEAD(&i915->vm_list);
+
+ ggtt->base.i915 = i915;
+
+ ggtt->mappable_base = 0;
+ ggtt->mappable_end = 2048 * PAGE_SIZE;
+ ggtt->base.total = 4096 * PAGE_SIZE;
+
+ ggtt->base.clear_range = nop_clear_range;
+ ggtt->base.insert_page = mock_insert_page;
+ ggtt->base.insert_entries = mock_insert_entries;
+ ggtt->base.bind_vma = mock_bind_ggtt;
+ ggtt->base.unbind_vma = mock_unbind_ggtt;
+ ggtt->base.cleanup = mock_cleanup;
+
+ i915_address_space_init(&ggtt->base, i915, "global");
+}
+
+void mock_fini_ggtt(struct drm_i915_private *i915)
+{
+ struct i915_ggtt *ggtt = &i915->ggtt;
+
+ i915_address_space_fini(&ggtt->base);
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.h b/drivers/gpu/drm/i915/selftests/mock_gtt.h
new file mode 100644
index 000000000000..9a0a833bb545
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_gtt.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MOCK_GTT_H
+#define __MOCK_GTT_H
+
+void mock_init_ggtt(struct drm_i915_private *i915);
+void mock_fini_ggtt(struct drm_i915_private *i915);
+
+struct i915_hw_ppgtt *
+mock_ppgtt(struct drm_i915_private *i915,
+ const char *name);
+
+#endif /* !__MOCK_GTT_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c
new file mode 100644
index 000000000000..0e8d2e7f8c70
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_request.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "mock_engine.h"
+#include "mock_request.h"
+
+struct drm_i915_gem_request *
+mock_request(struct intel_engine_cs *engine,
+ struct i915_gem_context *context,
+ unsigned long delay)
+{
+ struct drm_i915_gem_request *request;
+ struct mock_request *mock;
+
+ /* NB the i915->requests slab cache is enlarged to fit mock_request */
+ request = i915_gem_request_alloc(engine, context);
+ if (!request)
+ return NULL;
+
+ mock = container_of(request, typeof(*mock), base);
+ mock->delay = delay;
+
+ return &mock->base;
+}
+
+bool mock_cancel_request(struct drm_i915_gem_request *request)
+{
+ struct mock_request *mock = container_of(request, typeof(*mock), base);
+ struct mock_engine *engine =
+ container_of(request->engine, typeof(*engine), base);
+ bool was_queued;
+
+ spin_lock_irq(&engine->hw_lock);
+ was_queued = !list_empty(&mock->link);
+ list_del_init(&mock->link);
+ spin_unlock_irq(&engine->hw_lock);
+
+ if (was_queued)
+ i915_gem_request_unsubmit(request);
+
+ return was_queued;
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_request.h b/drivers/gpu/drm/i915/selftests/mock_request.h
new file mode 100644
index 000000000000..4dea74c8e96d
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/mock_request.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MOCK_REQUEST__
+#define __MOCK_REQUEST__
+
+#include <linux/list.h>
+
+#include "../i915_gem_request.h"
+
+struct mock_request {
+ struct drm_i915_gem_request base;
+
+ struct list_head link;
+ unsigned long delay;
+};
+
+struct drm_i915_gem_request *
+mock_request(struct intel_engine_cs *engine,
+ struct i915_gem_context *context,
+ unsigned long delay);
+
+bool mock_cancel_request(struct drm_i915_gem_request *request);
+
+#endif /* !__MOCK_REQUEST__ */
diff --git a/drivers/gpu/drm/i915/selftests/scatterlist.c b/drivers/gpu/drm/i915/selftests/scatterlist.c
new file mode 100644
index 000000000000..eb2cda8e2b9f
--- /dev/null
+++ b/drivers/gpu/drm/i915/selftests/scatterlist.c
@@ -0,0 +1,355 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/prime_numbers.h>
+#include <linux/random.h>
+
+#include "../i915_selftest.h"
+
+#define PFN_BIAS (1 << 10)
+
+struct pfn_table {
+ struct sg_table st;
+ unsigned long start, end;
+};
+
+typedef unsigned int (*npages_fn_t)(unsigned long n,
+ unsigned long count,
+ struct rnd_state *rnd);
+
+static noinline int expect_pfn_sg(struct pfn_table *pt,
+ npages_fn_t npages_fn,
+ struct rnd_state *rnd,
+ const char *who,
+ unsigned long timeout)
+{
+ struct scatterlist *sg;
+ unsigned long pfn, n;
+
+ pfn = pt->start;
+ for_each_sg(pt->st.sgl, sg, pt->st.nents, n) {
+ struct page *page = sg_page(sg);
+ unsigned int npages = npages_fn(n, pt->st.nents, rnd);
+
+ if (page_to_pfn(page) != pfn) {
+ pr_err("%s: %s left pages out of order, expected pfn %lu, found pfn %lu (using for_each_sg)\n",
+ __func__, who, pfn, page_to_pfn(page));
+ return -EINVAL;
+ }
+
+ if (sg->length != npages * PAGE_SIZE) {
+ pr_err("%s: %s copied wrong sg length, expected size %lu, found %u (using for_each_sg)\n",
+ __func__, who, npages * PAGE_SIZE, sg->length);
+ return -EINVAL;
+ }
+
+ if (igt_timeout(timeout, "%s timed out\n", who))
+ return -EINTR;
+
+ pfn += npages;
+ }
+ if (pfn != pt->end) {
+ pr_err("%s: %s finished on wrong pfn, expected %lu, found %lu\n",
+ __func__, who, pt->end, pfn);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static noinline int expect_pfn_sg_page_iter(struct pfn_table *pt,
+ const char *who,
+ unsigned long timeout)
+{
+ struct sg_page_iter sgiter;
+ unsigned long pfn;
+
+ pfn = pt->start;
+ for_each_sg_page(pt->st.sgl, &sgiter, pt->st.nents, 0) {
+ struct page *page = sg_page_iter_page(&sgiter);
+
+ if (page != pfn_to_page(pfn)) {
+ pr_err("%s: %s left pages out of order, expected pfn %lu, found pfn %lu (using for_each_sg_page)\n",
+ __func__, who, pfn, page_to_pfn(page));
+ return -EINVAL;
+ }
+
+ if (igt_timeout(timeout, "%s timed out\n", who))
+ return -EINTR;
+
+ pfn++;
+ }
+ if (pfn != pt->end) {
+ pr_err("%s: %s finished on wrong pfn, expected %lu, found %lu\n",
+ __func__, who, pt->end, pfn);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static noinline int expect_pfn_sgtiter(struct pfn_table *pt,
+ const char *who,
+ unsigned long timeout)
+{
+ struct sgt_iter sgt;
+ struct page *page;
+ unsigned long pfn;
+
+ pfn = pt->start;
+ for_each_sgt_page(page, sgt, &pt->st) {
+ if (page != pfn_to_page(pfn)) {
+ pr_err("%s: %s left pages out of order, expected pfn %lu, found pfn %lu (using for_each_sgt_page)\n",
+ __func__, who, pfn, page_to_pfn(page));
+ return -EINVAL;
+ }
+
+ if (igt_timeout(timeout, "%s timed out\n", who))
+ return -EINTR;
+
+ pfn++;
+ }
+ if (pfn != pt->end) {
+ pr_err("%s: %s finished on wrong pfn, expected %lu, found %lu\n",
+ __func__, who, pt->end, pfn);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int expect_pfn_sgtable(struct pfn_table *pt,
+ npages_fn_t npages_fn,
+ struct rnd_state *rnd,
+ const char *who,
+ unsigned long timeout)
+{
+ int err;
+
+ err = expect_pfn_sg(pt, npages_fn, rnd, who, timeout);
+ if (err)
+ return err;
+
+ err = expect_pfn_sg_page_iter(pt, who, timeout);
+ if (err)
+ return err;
+
+ err = expect_pfn_sgtiter(pt, who, timeout);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static unsigned int one(unsigned long n,
+ unsigned long count,
+ struct rnd_state *rnd)
+{
+ return 1;
+}
+
+static unsigned int grow(unsigned long n,
+ unsigned long count,
+ struct rnd_state *rnd)
+{
+ return n + 1;
+}
+
+static unsigned int shrink(unsigned long n,
+ unsigned long count,
+ struct rnd_state *rnd)
+{
+ return count - n;
+}
+
+static unsigned int random(unsigned long n,
+ unsigned long count,
+ struct rnd_state *rnd)
+{
+ return 1 + (prandom_u32_state(rnd) % 1024);
+}
+
+static int alloc_table(struct pfn_table *pt,
+ unsigned long count, unsigned long max,
+ npages_fn_t npages_fn,
+ struct rnd_state *rnd,
+ int alloc_error)
+{
+ struct scatterlist *sg;
+ unsigned long n, pfn;
+
+ if (sg_alloc_table(&pt->st, max,
+ GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN))
+ return alloc_error;
+
+ /* count should be less than 20 to prevent overflowing sg->length */
+ GEM_BUG_ON(overflows_type(count * PAGE_SIZE, sg->length));
+
+ /* Construct a table where each scatterlist contains different number
+ * of entries. The idea is to check that we can iterate the individual
+ * pages from inside the coalesced lists.
+ */
+ pt->start = PFN_BIAS;
+ pfn = pt->start;
+ sg = pt->st.sgl;
+ for (n = 0; n < count; n++) {
+ unsigned long npages = npages_fn(n, count, rnd);
+
+ /* Nobody expects the Sparse Memmap! */
+ if (pfn_to_page(pfn + npages) != pfn_to_page(pfn) + npages) {
+ sg_free_table(&pt->st);
+ return -ENOSPC;
+ }
+
+ if (n)
+ sg = sg_next(sg);
+ sg_set_page(sg, pfn_to_page(pfn), npages * PAGE_SIZE, 0);
+
+ GEM_BUG_ON(page_to_pfn(sg_page(sg)) != pfn);
+ GEM_BUG_ON(sg->length != npages * PAGE_SIZE);
+ GEM_BUG_ON(sg->offset != 0);
+
+ pfn += npages;
+ }
+ sg_mark_end(sg);
+ pt->st.nents = n;
+ pt->end = pfn;
+
+ return 0;
+}
+
+static const npages_fn_t npages_funcs[] = {
+ one,
+ grow,
+ shrink,
+ random,
+ NULL,
+};
+
+static int igt_sg_alloc(void *ignored)
+{
+ IGT_TIMEOUT(end_time);
+ const unsigned long max_order = 20; /* approximating a 4GiB object */
+ struct rnd_state prng;
+ unsigned long prime;
+ int alloc_error = -ENOMEM;
+
+ for_each_prime_number(prime, max_order) {
+ unsigned long size = BIT(prime);
+ int offset;
+
+ for (offset = -1; offset <= 1; offset++) {
+ unsigned long sz = size + offset;
+ const npages_fn_t *npages;
+ struct pfn_table pt;
+ int err;
+
+ for (npages = npages_funcs; *npages; npages++) {
+ prandom_seed_state(&prng,
+ i915_selftest.random_seed);
+ err = alloc_table(&pt, sz, sz, *npages, &prng,
+ alloc_error);
+ if (err == -ENOSPC)
+ break;
+ if (err)
+ return err;
+
+ prandom_seed_state(&prng,
+ i915_selftest.random_seed);
+ err = expect_pfn_sgtable(&pt, *npages, &prng,
+ "sg_alloc_table",
+ end_time);
+ sg_free_table(&pt.st);
+ if (err)
+ return err;
+ }
+ }
+
+ /* Test at least one continuation before accepting oom */
+ if (size > SG_MAX_SINGLE_ALLOC)
+ alloc_error = -ENOSPC;
+ }
+
+ return 0;
+}
+
+static int igt_sg_trim(void *ignored)
+{
+ IGT_TIMEOUT(end_time);
+ const unsigned long max = PAGE_SIZE; /* not prime! */
+ struct pfn_table pt;
+ unsigned long prime;
+ int alloc_error = -ENOMEM;
+
+ for_each_prime_number(prime, max) {
+ const npages_fn_t *npages;
+ int err;
+
+ for (npages = npages_funcs; *npages; npages++) {
+ struct rnd_state prng;
+
+ prandom_seed_state(&prng, i915_selftest.random_seed);
+ err = alloc_table(&pt, prime, max, *npages, &prng,
+ alloc_error);
+ if (err == -ENOSPC)
+ break;
+ if (err)
+ return err;
+
+ if (i915_sg_trim(&pt.st)) {
+ if (pt.st.orig_nents != prime ||
+ pt.st.nents != prime) {
+ pr_err("i915_sg_trim failed (nents %u, orig_nents %u), expected %lu\n",
+ pt.st.nents, pt.st.orig_nents, prime);
+ err = -EINVAL;
+ } else {
+ prandom_seed_state(&prng,
+ i915_selftest.random_seed);
+ err = expect_pfn_sgtable(&pt,
+ *npages, &prng,
+ "i915_sg_trim",
+ end_time);
+ }
+ }
+ sg_free_table(&pt.st);
+ if (err)
+ return err;
+ }
+
+ /* Test at least one continuation before accepting oom */
+ if (prime > SG_MAX_SINGLE_ALLOC)
+ alloc_error = -ENOSPC;
+ }
+
+ return 0;
+}
+
+int scatterlist_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_sg_alloc),
+ SUBTEST(igt_sg_trim),
+ };
+
+ return i915_subtests(tests, NULL);
+}
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 70736615983c..1888bf3920fc 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -30,14 +30,10 @@
#include <video/imx-ipu-v3.h>
#include "imx-drm.h"
+#include "ipuv3-plane.h"
#define MAX_CRTC 4
-struct imx_drm_component {
- struct device_node *of_node;
- struct list_head list;
-};
-
struct imx_drm_device {
struct drm_device *drm;
unsigned int pipes;
@@ -100,6 +96,11 @@ static int imx_drm_atomic_check(struct drm_device *dev,
if (ret)
return ret;
+ /* Assign PRG/PRE channels and check if all constrains are satisfied. */
+ ret = ipu_planes_assign_pre(dev, state);
+ if (ret)
+ return ret;
+
return ret;
}
@@ -113,6 +114,10 @@ static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
+ struct drm_plane *plane;
+ struct drm_plane_state *old_plane_state;
+ bool plane_disabling = false;
+ int i;
drm_atomic_helper_commit_modeset_disables(dev, state);
@@ -122,11 +127,20 @@ static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_modeset_enables(dev, state);
- drm_atomic_helper_commit_hw_done(state);
+ for_each_plane_in_state(state, plane, old_plane_state, i) {
+ if (drm_atomic_plane_disabling(old_plane_state, plane->state))
+ plane_disabling = true;
+ }
+
+ if (plane_disabling) {
+ drm_atomic_helper_wait_for_vblanks(dev, state);
- drm_atomic_helper_wait_for_vblanks(dev, state);
+ for_each_plane_in_state(state, plane, old_plane_state, i)
+ ipu_plane_disable_deferred(plane);
- drm_atomic_helper_cleanup_planes(dev, state);
+ }
+
+ drm_atomic_helper_commit_hw_done(state);
}
static const struct drm_mode_config_helper_funcs imx_drm_mode_config_helpers = {
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index cc003334505d..295434b199db 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -39,4 +39,7 @@ int imx_drm_encoder_parse_of(struct drm_device *drm,
void imx_drm_connector_destroy(struct drm_connector *connector);
void imx_drm_encoder_destroy(struct drm_encoder *encoder);
+int ipu_planes_assign_pre(struct drm_device *dev,
+ struct drm_atomic_state *state);
+
#endif /* _IMX_DRM_H_ */
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index a3f2843b78cd..dab9d50ffd8c 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -55,11 +55,32 @@ static void ipu_crtc_enable(struct drm_crtc *crtc)
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
+ ipu_prg_enable(ipu);
ipu_dc_enable(ipu);
ipu_dc_enable_channel(ipu_crtc->dc);
ipu_di_enable(ipu_crtc->di);
}
+static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ bool disable_partial = false;
+ bool disable_full = false;
+ struct drm_plane *plane;
+
+ drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) {
+ if (plane == &ipu_crtc->plane[0]->base)
+ disable_full = true;
+ if (&ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base)
+ disable_partial = true;
+ }
+
+ if (disable_partial)
+ ipu_plane_disable(ipu_crtc->plane[1], true);
+ if (disable_full)
+ ipu_plane_disable(ipu_crtc->plane[0], false);
+}
+
static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
@@ -73,8 +94,9 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
* attached IDMACs will be left in undefined state, possibly hanging
* the IPU or even system.
*/
- drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false);
+ ipu_crtc_disable_planes(ipu_crtc, old_crtc_state);
ipu_dc_disable(ipu);
+ ipu_prg_disable(ipu);
spin_lock_irq(&crtc->dev->event_lock);
if (crtc->state->event) {
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 8b5294d47cee..d63e853a0300 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -23,6 +23,17 @@
#include "video/imx-ipu-v3.h"
#include "ipuv3-plane.h"
+struct ipu_plane_state {
+ struct drm_plane_state base;
+ bool use_pre;
+};
+
+static inline struct ipu_plane_state *
+to_ipu_plane_state(struct drm_plane_state *p)
+{
+ return container_of(p, struct ipu_plane_state, base);
+}
+
static inline struct ipu_plane *to_ipu_plane(struct drm_plane *p)
{
return container_of(p, struct ipu_plane, base);
@@ -57,6 +68,12 @@ static const uint32_t ipu_plane_formats[] = {
DRM_FORMAT_NV12,
DRM_FORMAT_NV16,
DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB565_A8,
+ DRM_FORMAT_BGR565_A8,
+ DRM_FORMAT_RGB888_A8,
+ DRM_FORMAT_BGR888_A8,
+ DRM_FORMAT_RGBX8888_A8,
+ DRM_FORMAT_BGRX8888_A8,
};
int ipu_plane_irq(struct ipu_plane *ipu_plane)
@@ -66,18 +83,18 @@ int ipu_plane_irq(struct ipu_plane *ipu_plane)
}
static inline unsigned long
-drm_plane_state_to_eba(struct drm_plane_state *state)
+drm_plane_state_to_eba(struct drm_plane_state *state, int plane)
{
struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *cma_obj;
- int x = state->src_x >> 16;
- int y = state->src_y >> 16;
+ int x = state->src.x1 >> 16;
+ int y = state->src.y1 >> 16;
- cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
+ cma_obj = drm_fb_cma_get_gem_obj(fb, plane);
BUG_ON(!cma_obj);
- return cma_obj->paddr + fb->offsets[0] + fb->pitches[0] * y +
- fb->format->cpp[0] * x;
+ return cma_obj->paddr + fb->offsets[plane] + fb->pitches[plane] * y +
+ fb->format->cpp[plane] * x;
}
static inline unsigned long
@@ -85,9 +102,9 @@ drm_plane_state_to_ubo(struct drm_plane_state *state)
{
struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *cma_obj;
- unsigned long eba = drm_plane_state_to_eba(state);
- int x = state->src_x >> 16;
- int y = state->src_y >> 16;
+ unsigned long eba = drm_plane_state_to_eba(state, 0);
+ int x = state->src.x1 >> 16;
+ int y = state->src.y1 >> 16;
cma_obj = drm_fb_cma_get_gem_obj(fb, 1);
BUG_ON(!cma_obj);
@@ -104,9 +121,9 @@ drm_plane_state_to_vbo(struct drm_plane_state *state)
{
struct drm_framebuffer *fb = state->fb;
struct drm_gem_cma_object *cma_obj;
- unsigned long eba = drm_plane_state_to_eba(state);
- int x = state->src_x >> 16;
- int y = state->src_y >> 16;
+ unsigned long eba = drm_plane_state_to_eba(state, 0);
+ int x = state->src.x1 >> 16;
+ int y = state->src.y1 >> 16;
cma_obj = drm_fb_cma_get_gem_obj(fb, 2);
BUG_ON(!cma_obj);
@@ -126,11 +143,14 @@ void ipu_plane_put_resources(struct ipu_plane *ipu_plane)
ipu_dmfc_put(ipu_plane->dmfc);
if (!IS_ERR_OR_NULL(ipu_plane->ipu_ch))
ipu_idmac_put(ipu_plane->ipu_ch);
+ if (!IS_ERR_OR_NULL(ipu_plane->alpha_ch))
+ ipu_idmac_put(ipu_plane->alpha_ch);
}
int ipu_plane_get_resources(struct ipu_plane *ipu_plane)
{
int ret;
+ int alpha_ch;
ipu_plane->ipu_ch = ipu_idmac_get(ipu_plane->ipu, ipu_plane->dma);
if (IS_ERR(ipu_plane->ipu_ch)) {
@@ -139,6 +159,17 @@ int ipu_plane_get_resources(struct ipu_plane *ipu_plane)
return ret;
}
+ alpha_ch = ipu_channel_alpha_channel(ipu_plane->dma);
+ if (alpha_ch >= 0) {
+ ipu_plane->alpha_ch = ipu_idmac_get(ipu_plane->ipu, alpha_ch);
+ if (IS_ERR(ipu_plane->alpha_ch)) {
+ ret = PTR_ERR(ipu_plane->alpha_ch);
+ DRM_ERROR("failed to get alpha idmac channel %d: %d\n",
+ alpha_ch, ret);
+ return ret;
+ }
+ }
+
ipu_plane->dmfc = ipu_dmfc_get(ipu_plane->ipu, ipu_plane->dma);
if (IS_ERR(ipu_plane->dmfc)) {
ret = PTR_ERR(ipu_plane->dmfc);
@@ -162,33 +193,61 @@ err_out:
return ret;
}
+static bool ipu_plane_separate_alpha(struct ipu_plane *ipu_plane)
+{
+ switch (ipu_plane->base.state->fb->format->format) {
+ case DRM_FORMAT_RGB565_A8:
+ case DRM_FORMAT_BGR565_A8:
+ case DRM_FORMAT_RGB888_A8:
+ case DRM_FORMAT_BGR888_A8:
+ case DRM_FORMAT_RGBX8888_A8:
+ case DRM_FORMAT_BGRX8888_A8:
+ return true;
+ default:
+ return false;
+ }
+}
+
static void ipu_plane_enable(struct ipu_plane *ipu_plane)
{
if (ipu_plane->dp)
ipu_dp_enable(ipu_plane->ipu);
ipu_dmfc_enable_channel(ipu_plane->dmfc);
ipu_idmac_enable_channel(ipu_plane->ipu_ch);
+ if (ipu_plane_separate_alpha(ipu_plane))
+ ipu_idmac_enable_channel(ipu_plane->alpha_ch);
if (ipu_plane->dp)
ipu_dp_enable_channel(ipu_plane->dp);
}
-static int ipu_disable_plane(struct drm_plane *plane)
+void ipu_plane_disable(struct ipu_plane *ipu_plane, bool disable_dp_channel)
{
- struct ipu_plane *ipu_plane = to_ipu_plane(plane);
-
DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
ipu_idmac_wait_busy(ipu_plane->ipu_ch, 50);
- if (ipu_plane->dp)
- ipu_dp_disable_channel(ipu_plane->dp);
+ if (ipu_plane->dp && disable_dp_channel)
+ ipu_dp_disable_channel(ipu_plane->dp, false);
ipu_idmac_disable_channel(ipu_plane->ipu_ch);
+ if (ipu_plane->alpha_ch)
+ ipu_idmac_disable_channel(ipu_plane->alpha_ch);
ipu_dmfc_disable_channel(ipu_plane->dmfc);
if (ipu_plane->dp)
ipu_dp_disable(ipu_plane->ipu);
+ if (ipu_prg_present(ipu_plane->ipu))
+ ipu_prg_channel_disable(ipu_plane->ipu_ch);
+}
- return 0;
+void ipu_plane_disable_deferred(struct drm_plane *plane)
+{
+ struct ipu_plane *ipu_plane = to_ipu_plane(plane);
+
+ if (ipu_plane->disabling) {
+ ipu_plane->disabling = false;
+ ipu_plane_disable(ipu_plane, false);
+ }
}
+EXPORT_SYMBOL_GPL(ipu_plane_disable_deferred);
static void ipu_plane_destroy(struct drm_plane *plane)
{
@@ -200,13 +259,56 @@ static void ipu_plane_destroy(struct drm_plane *plane)
kfree(ipu_plane);
}
+void ipu_plane_state_reset(struct drm_plane *plane)
+{
+ struct ipu_plane_state *ipu_state;
+
+ if (plane->state) {
+ ipu_state = to_ipu_plane_state(plane->state);
+ __drm_atomic_helper_plane_destroy_state(plane->state);
+ kfree(ipu_state);
+ }
+
+ ipu_state = kzalloc(sizeof(*ipu_state), GFP_KERNEL);
+
+ if (ipu_state) {
+ ipu_state->base.plane = plane;
+ ipu_state->base.rotation = DRM_ROTATE_0;
+ }
+
+ plane->state = &ipu_state->base;
+}
+
+struct drm_plane_state *ipu_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct ipu_plane_state *state;
+
+ if (WARN_ON(!plane->state))
+ return NULL;
+
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (state)
+ __drm_atomic_helper_plane_duplicate_state(plane, &state->base);
+
+ return &state->base;
+}
+
+void ipu_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct ipu_plane_state *ipu_state = to_ipu_plane_state(state);
+
+ __drm_atomic_helper_plane_destroy_state(state);
+ kfree(ipu_state);
+}
+
static const struct drm_plane_funcs ipu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = ipu_plane_destroy,
- .reset = drm_atomic_helper_plane_reset,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .reset = ipu_plane_state_reset,
+ .atomic_duplicate_state = ipu_plane_duplicate_state,
+ .atomic_destroy_state = ipu_plane_destroy_state,
};
static int ipu_plane_atomic_check(struct drm_plane *plane,
@@ -217,8 +319,11 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
struct device *dev = plane->dev->dev;
struct drm_framebuffer *fb = state->fb;
struct drm_framebuffer *old_fb = old_state->fb;
- unsigned long eba, ubo, vbo, old_ubo, old_vbo;
+ unsigned long eba, ubo, vbo, old_ubo, old_vbo, alpha_eba;
+ bool can_position = (plane->type == DRM_PLANE_TYPE_OVERLAY);
+ struct drm_rect clip;
int hsub, vsub;
+ int ret;
/* Ok to disable */
if (!fb)
@@ -232,44 +337,35 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
if (WARN_ON(!crtc_state))
return -EINVAL;
+ clip.x1 = 0;
+ clip.y1 = 0;
+ clip.x2 = crtc_state->adjusted_mode.hdisplay;
+ clip.y2 = crtc_state->adjusted_mode.vdisplay;
+ ret = drm_plane_helper_check_state(state, &clip,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ can_position, true);
+ if (ret)
+ return ret;
+
/* CRTC should be enabled */
if (!crtc_state->enable)
return -EINVAL;
- /* no scaling */
- if (state->src_w >> 16 != state->crtc_w ||
- state->src_h >> 16 != state->crtc_h)
- return -EINVAL;
-
switch (plane->type) {
case DRM_PLANE_TYPE_PRIMARY:
- /* full plane doesn't support partial off screen */
- if (state->crtc_x || state->crtc_y ||
- state->crtc_w != crtc_state->adjusted_mode.hdisplay ||
- state->crtc_h != crtc_state->adjusted_mode.vdisplay)
- return -EINVAL;
-
/* full plane minimum width is 13 pixels */
- if (state->crtc_w < 13)
+ if (drm_rect_width(&state->dst) < 13)
return -EINVAL;
break;
case DRM_PLANE_TYPE_OVERLAY:
- if (state->crtc_x < 0 || state->crtc_y < 0)
- return -EINVAL;
-
- if (state->crtc_x + state->crtc_w >
- crtc_state->adjusted_mode.hdisplay)
- return -EINVAL;
- if (state->crtc_y + state->crtc_h >
- crtc_state->adjusted_mode.vdisplay)
- return -EINVAL;
break;
default:
- dev_warn(dev, "Unsupported plane type\n");
+ dev_warn(dev, "Unsupported plane type %d\n", plane->type);
return -EINVAL;
}
- if (state->crtc_h < 2)
+ if (drm_rect_height(&state->dst) < 2)
return -EINVAL;
/*
@@ -279,12 +375,13 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
* callback. The planes will be reenabled in plane's ->atomic_update
* callback.
*/
- if (old_fb && (state->src_w != old_state->src_w ||
- state->src_h != old_state->src_h ||
- fb->format != old_fb->format))
+ if (old_fb &&
+ (drm_rect_width(&state->dst) != drm_rect_width(&old_state->dst) ||
+ drm_rect_height(&state->dst) != drm_rect_height(&old_state->dst) ||
+ fb->format != old_fb->format))
crtc_state->mode_changed = true;
- eba = drm_plane_state_to_eba(state);
+ eba = drm_plane_state_to_eba(state, 0);
if (eba & 0x7)
return -EINVAL;
@@ -350,9 +447,26 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
*/
hsub = drm_format_horz_chroma_subsampling(fb->format->format);
vsub = drm_format_vert_chroma_subsampling(fb->format->format);
- if (((state->src_x >> 16) & (hsub - 1)) ||
- ((state->src_y >> 16) & (vsub - 1)))
+ if (((state->src.x1 >> 16) & (hsub - 1)) ||
+ ((state->src.y1 >> 16) & (vsub - 1)))
return -EINVAL;
+ break;
+ case DRM_FORMAT_RGB565_A8:
+ case DRM_FORMAT_BGR565_A8:
+ case DRM_FORMAT_RGB888_A8:
+ case DRM_FORMAT_BGR888_A8:
+ case DRM_FORMAT_RGBX8888_A8:
+ case DRM_FORMAT_BGRX8888_A8:
+ alpha_eba = drm_plane_state_to_eba(state, 1);
+ if (alpha_eba & 0x7)
+ return -EINVAL;
+
+ if (fb->pitches[1] < 1 || fb->pitches[1] > 16384)
+ return -EINVAL;
+
+ if (old_fb && old_fb->pitches[1] != fb->pitches[1])
+ crtc_state->mode_changed = true;
+ break;
}
return 0;
@@ -361,7 +475,25 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
static void ipu_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- ipu_disable_plane(plane);
+ struct ipu_plane *ipu_plane = to_ipu_plane(plane);
+
+ if (ipu_plane->dp)
+ ipu_dp_disable_channel(ipu_plane->dp, true);
+ ipu_plane->disabling = true;
+}
+
+static int ipu_chan_assign_axi_id(int ipu_chan)
+{
+ switch (ipu_chan) {
+ case IPUV3_CHANNEL_MEM_BG_SYNC:
+ return 1;
+ case IPUV3_CHANNEL_MEM_FG_SYNC:
+ return 2;
+ case IPUV3_CHANNEL_MEM_DC_SYNC:
+ return 3;
+ default:
+ return 0;
+ }
}
static void ipu_plane_atomic_update(struct drm_plane *plane,
@@ -369,18 +501,47 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
{
struct ipu_plane *ipu_plane = to_ipu_plane(plane);
struct drm_plane_state *state = plane->state;
+ struct ipu_plane_state *ipu_state = to_ipu_plane_state(state);
struct drm_crtc_state *crtc_state = state->crtc->state;
struct drm_framebuffer *fb = state->fb;
+ struct drm_rect *dst = &state->dst;
unsigned long eba, ubo, vbo;
+ unsigned long alpha_eba = 0;
enum ipu_color_space ics;
+ unsigned int axi_id = 0;
int active;
- eba = drm_plane_state_to_eba(state);
+ if (ipu_plane->dp_flow == IPU_DP_FLOW_SYNC_FG)
+ ipu_dp_set_window_pos(ipu_plane->dp, dst->x1, dst->y1);
+
+ eba = drm_plane_state_to_eba(state, 0);
+
+ /*
+ * Configure PRG channel and attached PRE, this changes the EBA to an
+ * internal SRAM location.
+ */
+ if (ipu_state->use_pre) {
+ axi_id = ipu_chan_assign_axi_id(ipu_plane->dma);
+ ipu_prg_channel_configure(ipu_plane->ipu_ch, axi_id,
+ drm_rect_width(&state->src) >> 16,
+ drm_rect_height(&state->src) >> 16,
+ state->fb->pitches[0],
+ state->fb->format->format, &eba);
+ }
if (old_state->fb && !drm_atomic_crtc_needs_modeset(crtc_state)) {
+ /* nothing to do if PRE is used */
+ if (ipu_state->use_pre)
+ return;
active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active);
+ if (ipu_plane_separate_alpha(ipu_plane)) {
+ active = ipu_idmac_get_current_buffer(ipu_plane->alpha_ch);
+ ipu_cpmem_set_buffer(ipu_plane->alpha_ch, !active,
+ alpha_eba);
+ ipu_idmac_select_buffer(ipu_plane->alpha_ch, !active);
+ }
return;
}
@@ -395,8 +556,6 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
ics = ipu_drm_fourcc_to_colorspace(state->fb->format->format);
ipu_dp_setup_channel(ipu_plane->dp, ics,
IPUV3_COLORSPACE_UNKNOWN);
- ipu_dp_set_window_pos(ipu_plane->dp, state->crtc_x,
- state->crtc_y);
/* Enable local alpha on partial plane */
switch (state->fb->format->format) {
case DRM_FORMAT_ARGB1555:
@@ -408,6 +567,12 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_RGBA8888:
case DRM_FORMAT_BGRA8888:
+ case DRM_FORMAT_RGB565_A8:
+ case DRM_FORMAT_BGR565_A8:
+ case DRM_FORMAT_RGB888_A8:
+ case DRM_FORMAT_BGR888_A8:
+ case DRM_FORMAT_RGBX8888_A8:
+ case DRM_FORMAT_BGRX8888_A8:
ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false);
break;
default:
@@ -416,15 +581,17 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
}
}
- ipu_dmfc_config_wait4eot(ipu_plane->dmfc, state->crtc_w);
+ ipu_dmfc_config_wait4eot(ipu_plane->dmfc, drm_rect_width(dst));
ipu_cpmem_zero(ipu_plane->ipu_ch);
- ipu_cpmem_set_resolution(ipu_plane->ipu_ch, state->src_w >> 16,
- state->src_h >> 16);
+ ipu_cpmem_set_resolution(ipu_plane->ipu_ch,
+ drm_rect_width(&state->src) >> 16,
+ drm_rect_height(&state->src) >> 16);
ipu_cpmem_set_fmt(ipu_plane->ipu_ch, state->fb->format->format);
ipu_cpmem_set_high_priority(ipu_plane->ipu_ch);
ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1);
ipu_cpmem_set_stride(ipu_plane->ipu_ch, state->fb->pitches[0]);
+ ipu_cpmem_set_axi_id(ipu_plane->ipu_ch, axi_id);
switch (fb->format->format) {
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
@@ -444,7 +611,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
dev_dbg(ipu_plane->base.dev->dev,
"phy = %lu %lu %lu, x = %d, y = %d", eba, ubo, vbo,
- state->src_x >> 16, state->src_y >> 16);
+ state->src.x1 >> 16, state->src.y1 >> 16);
break;
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV16:
@@ -455,11 +622,37 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
dev_dbg(ipu_plane->base.dev->dev,
"phy = %lu %lu, x = %d, y = %d", eba, ubo,
- state->src_x >> 16, state->src_y >> 16);
+ state->src.x1 >> 16, state->src.y1 >> 16);
+ break;
+ case DRM_FORMAT_RGB565_A8:
+ case DRM_FORMAT_BGR565_A8:
+ case DRM_FORMAT_RGB888_A8:
+ case DRM_FORMAT_BGR888_A8:
+ case DRM_FORMAT_RGBX8888_A8:
+ case DRM_FORMAT_BGRX8888_A8:
+ alpha_eba = drm_plane_state_to_eba(state, 1);
+
+ dev_dbg(ipu_plane->base.dev->dev, "phys = %lu %lu, x = %d, y = %d",
+ eba, alpha_eba, state->src.x1 >> 16, state->src.y1 >> 16);
+
+ ipu_cpmem_set_burstsize(ipu_plane->ipu_ch, 16);
+
+ ipu_cpmem_zero(ipu_plane->alpha_ch);
+ ipu_cpmem_set_resolution(ipu_plane->alpha_ch,
+ drm_rect_width(&state->src) >> 16,
+ drm_rect_height(&state->src) >> 16);
+ ipu_cpmem_set_format_passthrough(ipu_plane->alpha_ch, 8);
+ ipu_cpmem_set_high_priority(ipu_plane->alpha_ch);
+ ipu_idmac_set_double_buffer(ipu_plane->alpha_ch, 1);
+ ipu_cpmem_set_stride(ipu_plane->alpha_ch,
+ state->fb->pitches[1]);
+ ipu_cpmem_set_burstsize(ipu_plane->alpha_ch, 16);
+ ipu_cpmem_set_buffer(ipu_plane->alpha_ch, 0, alpha_eba);
+ ipu_cpmem_set_buffer(ipu_plane->alpha_ch, 1, alpha_eba);
break;
default:
dev_dbg(ipu_plane->base.dev->dev, "phys = %lu, x = %d, y = %d",
- eba, state->src_x >> 16, state->src_y >> 16);
+ eba, state->src.x1 >> 16, state->src.y1 >> 16);
break;
}
ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 0, eba);
@@ -474,6 +667,35 @@ static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = {
.atomic_update = ipu_plane_atomic_update,
};
+int ipu_planes_assign_pre(struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *plane_state;
+ struct drm_plane *plane;
+ int available_pres = ipu_prg_max_active_channels();
+ int i;
+
+ for_each_plane_in_state(state, plane, plane_state, i) {
+ struct ipu_plane_state *ipu_state =
+ to_ipu_plane_state(plane_state);
+ struct ipu_plane *ipu_plane = to_ipu_plane(plane);
+
+ if (ipu_prg_present(ipu_plane->ipu) && available_pres &&
+ plane_state->fb &&
+ ipu_prg_format_supported(ipu_plane->ipu,
+ plane_state->fb->format->format,
+ plane_state->fb->modifier)) {
+ ipu_state->use_pre = true;
+ available_pres--;
+ } else {
+ ipu_state->use_pre = false;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_planes_assign_pre);
+
struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
int dma, int dp, unsigned int possible_crtcs,
enum drm_plane_type type)
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h
index 338b88a74eb6..596b24ddbf65 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.h
+++ b/drivers/gpu/drm/imx/ipuv3-plane.h
@@ -18,11 +18,14 @@ struct ipu_plane {
struct ipu_soc *ipu;
struct ipuv3_channel *ipu_ch;
+ struct ipuv3_channel *alpha_ch;
struct dmfc_channel *dmfc;
struct ipu_dp *dp;
int dma;
int dp_flow;
+
+ bool disabling;
};
struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
@@ -42,4 +45,7 @@ void ipu_plane_put_resources(struct ipu_plane *plane);
int ipu_plane_irq(struct ipu_plane *plane);
+void ipu_plane_disable(struct ipu_plane *ipu_plane, bool disable_dp_channel);
+void ipu_plane_disable_deferred(struct drm_plane *plane);
+
#endif
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
index e10a4eda4078..1144e0c9e894 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c
@@ -65,13 +65,11 @@ static int mxsfb_set_pixel_fmt(struct mxsfb_drm_private *mxsfb)
switch (format) {
case DRM_FORMAT_RGB565:
dev_dbg(drm->dev, "Setting up RGB565 mode\n");
- ctrl |= CTRL_SET_BUS_WIDTH(STMLCDIF_16BIT);
ctrl |= CTRL_SET_WORD_LENGTH(0);
ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0xf);
break;
case DRM_FORMAT_XRGB8888:
dev_dbg(drm->dev, "Setting up XRGB8888 mode\n");
- ctrl |= CTRL_SET_BUS_WIDTH(STMLCDIF_24BIT);
ctrl |= CTRL_SET_WORD_LENGTH(3);
/* Do not use packed pixels = one pixel per word instead. */
ctrl1 |= CTRL1_SET_BYTE_PACKAGING(0x7);
@@ -87,6 +85,36 @@ static int mxsfb_set_pixel_fmt(struct mxsfb_drm_private *mxsfb)
return 0;
}
+static void mxsfb_set_bus_fmt(struct mxsfb_drm_private *mxsfb)
+{
+ struct drm_crtc *crtc = &mxsfb->pipe.crtc;
+ struct drm_device *drm = crtc->dev;
+ u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ u32 reg;
+
+ reg = readl(mxsfb->base + LCDC_CTRL);
+
+ if (mxsfb->connector.display_info.num_bus_formats)
+ bus_format = mxsfb->connector.display_info.bus_formats[0];
+
+ reg &= ~CTRL_BUS_WIDTH_MASK;
+ switch (bus_format) {
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ reg |= CTRL_SET_BUS_WIDTH(STMLCDIF_16BIT);
+ break;
+ case MEDIA_BUS_FMT_RGB666_1X18:
+ reg |= CTRL_SET_BUS_WIDTH(STMLCDIF_18BIT);
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ reg |= CTRL_SET_BUS_WIDTH(STMLCDIF_24BIT);
+ break;
+ default:
+ dev_err(drm->dev, "Unknown media bus format %d\n", bus_format);
+ break;
+ }
+ writel(reg, mxsfb->base + LCDC_CTRL);
+}
+
static void mxsfb_enable_controller(struct mxsfb_drm_private *mxsfb)
{
u32 reg;
@@ -168,13 +196,22 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
vdctrl0 |= VDCTRL0_HSYNC_ACT_HIGH;
if (m->flags & DRM_MODE_FLAG_PVSYNC)
vdctrl0 |= VDCTRL0_VSYNC_ACT_HIGH;
- if (bus_flags & DRM_BUS_FLAG_DE_HIGH)
+ /* Make sure Data Enable is high active by default */
+ if (!(bus_flags & DRM_BUS_FLAG_DE_LOW))
vdctrl0 |= VDCTRL0_ENABLE_ACT_HIGH;
- if (bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
+ /*
+ * DRM_BUS_FLAG_PIXDATA_ defines are controller centric,
+ * controllers VDCTRL0_DOTCLK is display centric.
+ * Drive on positive edge -> display samples on falling edge
+ * DRM_BUS_FLAG_PIXDATA_POSEDGE -> VDCTRL0_DOTCLK_ACT_FALLING
+ */
+ if (bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
vdctrl0 |= VDCTRL0_DOTCLK_ACT_FALLING;
writel(vdctrl0, mxsfb->base + LCDC_VDCTRL0);
+ mxsfb_set_bus_fmt(mxsfb);
+
/* Frame length in lines. */
writel(m->crtc_vtotal, mxsfb->base + LCDC_VDCTRL1);
@@ -184,8 +221,8 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
VDCTRL2_SET_HSYNC_PERIOD(m->crtc_htotal),
mxsfb->base + LCDC_VDCTRL2);
- writel(SET_HOR_WAIT_CNT(m->crtc_hblank_end - m->crtc_hsync_end) |
- SET_VERT_WAIT_CNT(m->crtc_vblank_end - m->crtc_vsync_end),
+ writel(SET_HOR_WAIT_CNT(m->crtc_htotal - m->crtc_hsync_start) |
+ SET_VERT_WAIT_CNT(m->crtc_vtotal - m->crtc_vsync_start),
mxsfb->base + LCDC_VDCTRL3);
writel(SET_DOTCLK_H_VALID_DATA_CNT(m->hdisplay),
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 5ac712325c75..d1b9c34c7c00 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -102,14 +102,18 @@ static void mxsfb_pipe_enable(struct drm_simple_display_pipe *pipe,
{
struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
+ drm_panel_prepare(mxsfb->panel);
mxsfb_crtc_enable(mxsfb);
+ drm_panel_enable(mxsfb->panel);
}
static void mxsfb_pipe_disable(struct drm_simple_display_pipe *pipe)
{
struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe);
+ drm_panel_disable(mxsfb->panel);
mxsfb_crtc_disable(mxsfb);
+ drm_panel_unprepare(mxsfb->panel);
}
static void mxsfb_pipe_update(struct drm_simple_display_pipe *pipe,
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_out.c b/drivers/gpu/drm/mxsfb/mxsfb_out.c
index fa8d17399407..b8e81422d4e2 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_out.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_out.c
@@ -112,6 +112,7 @@ static int mxsfb_attach_endpoint(struct drm_device *drm,
int mxsfb_create_output(struct drm_device *drm)
{
+ struct mxsfb_drm_private *mxsfb = drm->dev_private;
struct device_node *ep_np = NULL;
struct of_endpoint ep;
int ret;
@@ -127,5 +128,8 @@ int mxsfb_create_output(struct drm_device *drm)
}
}
+ if (!mxsfb->panel)
+ return -EPROBE_DEFER;
+
return 0;
}
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_regs.h b/drivers/gpu/drm/mxsfb/mxsfb_regs.h
index 31d62cd0d3d7..66a6ba9ec533 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_regs.h
+++ b/drivers/gpu/drm/mxsfb/mxsfb_regs.h
@@ -44,6 +44,7 @@
#define CTRL_DATA_SELECT (1 << 16)
#define CTRL_SET_BUS_WIDTH(x) (((x) & 0x3) << 10)
#define CTRL_GET_BUS_WIDTH(x) (((x) >> 10) & 0x3)
+#define CTRL_BUS_WIDTH_MASK (0x3 << 10)
#define CTRL_SET_WORD_LENGTH(x) (((x) & 0x3) << 8)
#define CTRL_GET_WORD_LENGTH(x) (((x) >> 8) & 0x3)
#define CTRL_MASTER (1 << 5)
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 3a2c0137d4b4..d08da82ba7ed 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -125,6 +125,7 @@
#define MAXWELL_B /* cl9097.h */ 0x0000b197
#define PASCAL_A /* cl9097.h */ 0x0000c097
+#define PASCAL_B /* cl9097.h */ 0x0000c197
#define NV74_BSP 0x000074b0
@@ -163,6 +164,7 @@
#define MAXWELL_COMPUTE_A 0x0000b0c0
#define MAXWELL_COMPUTE_B 0x0000b1c0
#define PASCAL_COMPUTE_A 0x0000c0c0
+#define PASCAL_COMPUTE_B 0x0000c1c0
#define NV74_CIPHER 0x000074c1
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index d426b86e2712..bb4c214f1046 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -59,6 +59,7 @@ enum nvkm_devidx {
NVKM_ENGINE_NVDEC,
NVKM_ENGINE_PM,
NVKM_ENGINE_SEC,
+ NVKM_ENGINE_SEC2,
NVKM_ENGINE_SW,
NVKM_ENGINE_VIC,
NVKM_ENGINE_VP,
@@ -155,9 +156,10 @@ struct nvkm_device {
struct nvkm_engine *msppp;
struct nvkm_engine *msvld;
struct nvkm_engine *nvenc[3];
- struct nvkm_engine *nvdec;
+ struct nvkm_nvdec *nvdec;
struct nvkm_pm *pm;
struct nvkm_engine *sec;
+ struct nvkm_sec2 *sec2;
struct nvkm_sw *sw;
struct nvkm_engine *vic;
struct nvkm_engine *vp;
@@ -225,9 +227,10 @@ struct nvkm_device_chip {
int (*msppp )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*msvld )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_engine **);
- int (*nvdec )(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*nvdec )(struct nvkm_device *, int idx, struct nvkm_nvdec **);
int (*pm )(struct nvkm_device *, int idx, struct nvkm_pm **);
int (*sec )(struct nvkm_device *, int idx, struct nvkm_engine **);
+ int (*sec2 )(struct nvkm_device *, int idx, struct nvkm_sec2 **);
int (*sw )(struct nvkm_device *, int idx, struct nvkm_sw **);
int (*vic )(struct nvkm_device *, int idx, struct nvkm_engine **);
int (*vp )(struct nvkm_device *, int idx, struct nvkm_engine **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h b/drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h
new file mode 100644
index 000000000000..fac0824197f1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/msgqueue.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NVKM_CORE_MSGQUEUE_H
+#define __NVKM_CORE_MSGQUEUE_H
+
+#include <core/os.h>
+
+struct nvkm_falcon;
+struct nvkm_msgqueue;
+enum nvkm_secboot_falcon;
+
+/* Hopefully we will never have firmware arguments larger than that... */
+#define NVKM_MSGQUEUE_CMDLINE_SIZE 0x100
+
+int nvkm_msgqueue_new(u32, struct nvkm_falcon *, struct nvkm_msgqueue **);
+void nvkm_msgqueue_del(struct nvkm_msgqueue **);
+void nvkm_msgqueue_recv(struct nvkm_msgqueue *);
+int nvkm_msgqueue_reinit(struct nvkm_msgqueue *);
+
+/* useful if we run a NVIDIA-signed firmware */
+void nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *, void *);
+
+/* interface to ACR unit running on falcon (NVIDIA signed firmware) */
+int nvkm_msgqueue_acr_boot_falcon(struct nvkm_msgqueue *,
+ enum nvkm_secboot_falcon);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
index 7e498e65b1e8..e1a854e2ade1 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/falcon.h
@@ -10,6 +10,7 @@ enum nvkm_falcon_dmaidx {
FALCON_DMAIDX_PHYS_VID = 2,
FALCON_DMAIDX_PHYS_SYS_COH = 3,
FALCON_DMAIDX_PHYS_SYS_NCOH = 4,
+ FALCON_SEC2_DMAIDX_UCODE = 6,
};
struct nvkm_falcon {
@@ -19,11 +20,13 @@ struct nvkm_falcon {
u32 addr;
struct mutex mutex;
+ struct mutex dmem_mutex;
const struct nvkm_subdev *user;
u8 version;
u8 secret;
bool debug;
+ bool has_emem;
struct nvkm_memory *core;
bool external;
@@ -45,8 +48,14 @@ struct nvkm_falcon {
struct nvkm_engine engine;
};
+/* This constructor must be called from the owner's oneinit() hook and
+ * *not* its constructor. This is to ensure that DEVINIT has been
+ * completed, and that the device is correctly enabled before we touch
+ * falcon registers.
+ */
int nvkm_falcon_v1_new(struct nvkm_subdev *owner, const char *name, u32 addr,
struct nvkm_falcon **);
+
void nvkm_falcon_del(struct nvkm_falcon **);
int nvkm_falcon_get(struct nvkm_falcon *, const struct nvkm_subdev *);
void nvkm_falcon_put(struct nvkm_falcon *, const struct nvkm_subdev *);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index 89cf99307828..0a636833e0eb 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -43,4 +43,5 @@ int gm107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gm200_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
int gp100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
+int gp102_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
index 30b76d13fdcb..00b2b227ff41 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/nvdec.h
@@ -1,4 +1,12 @@
#ifndef __NVKM_NVDEC_H__
#define __NVKM_NVDEC_H__
+#define nvkm_nvdec(p) container_of((p), struct nvkm_nvdec, engine)
#include <core/engine.h>
+
+struct nvkm_nvdec {
+ struct nvkm_engine engine;
+ struct nvkm_falcon *falcon;
+};
+
+int gp102_nvdec_new(struct nvkm_device *, int, struct nvkm_nvdec **);
#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
new file mode 100644
index 000000000000..d3db1b1e75c4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/sec2.h
@@ -0,0 +1,13 @@
+#ifndef __NVKM_SEC2_H__
+#define __NVKM_SEC2_H__
+#include <core/engine.h>
+
+struct nvkm_sec2 {
+ struct nvkm_engine engine;
+ struct nvkm_falcon *falcon;
+ struct nvkm_msgqueue *queue;
+ struct work_struct work;
+};
+
+int gp102_sec2_new(struct nvkm_device *, int, struct nvkm_sec2 **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 0b26a4c860ec..891497a0fe3b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -89,6 +89,7 @@ int gt215_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int mcp77_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int mcp89_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gf100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
+int gf108_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
@@ -146,6 +147,12 @@ struct nvkm_ram {
};
struct nvkm_ram_func {
+ u64 upper;
+ u32 (*probe_fbp)(const struct nvkm_ram_func *, struct nvkm_device *,
+ int fbp, int *pltcs);
+ u32 (*probe_fbp_amount)(const struct nvkm_ram_func *, u32 fbpao,
+ struct nvkm_device *, int fbp, int *pltcs);
+ u32 (*probe_fbpa_amount)(struct nvkm_device *, int fbpa);
void *(*dtor)(struct nvkm_ram *);
int (*init)(struct nvkm_ram *);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
index a63c5ac69f66..ce23cc6c672e 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
@@ -64,7 +64,7 @@ void nvkm_i2c_aux_monitor(struct nvkm_i2c_aux *, bool monitor);
int nvkm_i2c_aux_acquire(struct nvkm_i2c_aux *);
void nvkm_i2c_aux_release(struct nvkm_i2c_aux *);
int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type,
- u32 addr, u8 *data, u8 size);
+ u32 addr, u8 *data, u8 *size);
int nvkm_i2c_aux_lnk_ctl(struct nvkm_i2c_aux *, int link_nr, int link_bw,
bool enhanced_framing);
@@ -162,9 +162,11 @@ nvkm_probe_i2c(struct i2c_adapter *adap, u8 addr)
static inline int
nvkm_rdaux(struct nvkm_i2c_aux *aux, u32 addr, u8 *data, u8 size)
{
+ const u8 xfer = size;
int ret = nvkm_i2c_aux_acquire(aux);
if (ret == 0) {
- ret = nvkm_i2c_aux_xfer(aux, true, 9, addr, data, size);
+ ret = nvkm_i2c_aux_xfer(aux, true, 9, addr, data, &size);
+ WARN_ON(!ret && size != xfer);
nvkm_i2c_aux_release(aux);
}
return ret;
@@ -175,7 +177,7 @@ nvkm_wraux(struct nvkm_i2c_aux *aux, u32 addr, u8 *data, u8 size)
{
int ret = nvkm_i2c_aux_acquire(aux);
if (ret == 0) {
- ret = nvkm_i2c_aux_xfer(aux, true, 8, addr, data, size);
+ ret = nvkm_i2c_aux_xfer(aux, true, 8, addr, data, &size);
nvkm_i2c_aux_release(aux);
}
return ret;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
index 179b6ed3f595..e7f04732a425 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pmu.h
@@ -7,6 +7,7 @@ struct nvkm_pmu {
const struct nvkm_pmu_func *func;
struct nvkm_subdev subdev;
struct nvkm_falcon *falcon;
+ struct nvkm_msgqueue *queue;
struct {
u32 base;
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
index 5dbd8aa4f8c2..d6a4bdb6573b 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
@@ -30,10 +30,13 @@ enum nvkm_secboot_falcon {
NVKM_SECBOOT_FALCON_RESERVED = 1,
NVKM_SECBOOT_FALCON_FECS = 2,
NVKM_SECBOOT_FALCON_GPCCS = 3,
- NVKM_SECBOOT_FALCON_END = 4,
+ NVKM_SECBOOT_FALCON_SEC2 = 7,
+ NVKM_SECBOOT_FALCON_END = 8,
NVKM_SECBOOT_FALCON_INVALID = 0xffffffff,
};
+extern const char *nvkm_secboot_falcon_name[];
+
/**
* @wpr_set: whether the WPR region is currently set
*/
@@ -42,6 +45,7 @@ struct nvkm_secboot {
struct nvkm_acr *acr;
struct nvkm_subdev subdev;
struct nvkm_falcon *boot_falcon;
+ struct nvkm_falcon *halt_falcon;
u64 wpr_addr;
u32 wpr_size;
@@ -55,5 +59,6 @@ int nvkm_secboot_reset(struct nvkm_secboot *, enum nvkm_secboot_falcon);
int gm200_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
int gm20b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
+int gp102_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index f5add64c093f..f802bcd94457 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -1147,6 +1147,7 @@ nouveau_connector_aux_xfer(struct drm_dp_aux *obj, struct drm_dp_aux_msg *msg)
container_of(obj, typeof(*nv_connector), aux);
struct nouveau_encoder *nv_encoder;
struct nvkm_i2c_aux *aux;
+ u8 size = msg->size;
int ret;
nv_encoder = find_encoder(&nv_connector->base, DCB_OUTPUT_DP);
@@ -1162,11 +1163,11 @@ nouveau_connector_aux_xfer(struct drm_dp_aux *obj, struct drm_dp_aux_msg *msg)
return ret;
ret = nvkm_i2c_aux_xfer(aux, false, msg->request, msg->address,
- msg->buffer, msg->size);
+ msg->buffer, &size);
nvkm_i2c_aux_release(aux);
if (ret >= 0) {
msg->reply = ret;
- return msg->size;
+ return size;
}
return ret;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index a58c53639c18..418872b493a3 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -3628,7 +3628,7 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
struct nvkm_i2c_aux *aux =
nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
if (aux) {
- nv_encoder->i2c = &aux->i2c;
+ nv_encoder->i2c = &nv_connector->aux.ddc;
nv_encoder->aux = aux;
}
@@ -3778,6 +3778,7 @@ nv50_pior_func = {
static int
nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
{
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
struct nvkm_i2c_bus *bus = NULL;
@@ -3795,7 +3796,7 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
break;
case DCB_OUTPUT_DP:
aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbe->extdev));
- ddc = aux ? &aux->i2c : NULL;
+ ddc = aux ? &nv_connector->aux.ddc : NULL;
type = DRM_MODE_ENCODER_TMDS;
break;
default:
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index 19044aba265e..a134d225f958 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -78,6 +78,7 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = {
[NVKM_ENGINE_NVDEC ] = "nvdec",
[NVKM_ENGINE_PM ] = "pm",
[NVKM_ENGINE_SEC ] = "sec",
+ [NVKM_ENGINE_SEC2 ] = "sec2",
[NVKM_ENGINE_SW ] = "sw",
[NVKM_ENGINE_VIC ] = "vic",
[NVKM_ENGINE_VP ] = "vp",
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
index c2c8d2ac01b8..78571e8b01c5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/Kbuild
@@ -18,6 +18,7 @@ include $(src)/nvkm/engine/nvenc/Kbuild
include $(src)/nvkm/engine/nvdec/Kbuild
include $(src)/nvkm/engine/pm/Kbuild
include $(src)/nvkm/engine/sec/Kbuild
+include $(src)/nvkm/engine/sec2/Kbuild
include $(src)/nvkm/engine/sw/Kbuild
include $(src)/nvkm/engine/vic/Kbuild
include $(src)/nvkm/engine/vp/Kbuild
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 273562dd6bbd..1076949b802a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -1379,7 +1379,7 @@ nvc1_chipset = {
.bus = gf100_bus_new,
.clk = gf100_clk_new,
.devinit = gf100_devinit_new,
- .fb = gf100_fb_new,
+ .fb = gf108_fb_new,
.fuse = gf100_fuse_new,
.gpio = g94_gpio_new,
.i2c = g94_i2c_new,
@@ -2200,6 +2200,9 @@ nv132_chipset = {
.ltc = gp100_ltc_new,
.mc = gp100_mc_new,
.mmu = gf100_mmu_new,
+ .secboot = gp102_secboot_new,
+ .sec2 = gp102_sec2_new,
+ .nvdec = gp102_nvdec_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
@@ -2211,6 +2214,8 @@ nv132_chipset = {
.disp = gp102_disp_new,
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
+ .gr = gp102_gr_new,
+ .sw = gf100_sw_new,
};
static const struct nvkm_device_chip
@@ -2229,6 +2234,9 @@ nv134_chipset = {
.ltc = gp100_ltc_new,
.mc = gp100_mc_new,
.mmu = gf100_mmu_new,
+ .secboot = gp102_secboot_new,
+ .sec2 = gp102_sec2_new,
+ .nvdec = gp102_nvdec_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
@@ -2240,6 +2248,8 @@ nv134_chipset = {
.disp = gp102_disp_new,
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
+ .gr = gp102_gr_new,
+ .sw = gf100_sw_new,
};
static const struct nvkm_device_chip
@@ -2258,6 +2268,9 @@ nv136_chipset = {
.ltc = gp100_ltc_new,
.mc = gp100_mc_new,
.mmu = gf100_mmu_new,
+ .secboot = gp102_secboot_new,
+ .sec2 = gp102_sec2_new,
+ .nvdec = gp102_nvdec_new,
.pci = gp100_pci_new,
.pmu = gp102_pmu_new,
.timer = gk20a_timer_new,
@@ -2269,6 +2282,8 @@ nv136_chipset = {
.disp = gp102_disp_new,
.dma = gf119_dma_new,
.fifo = gp100_fifo_new,
+ .gr = gp102_gr_new,
+ .sw = gf100_sw_new,
};
static int
@@ -2362,9 +2377,10 @@ nvkm_device_engine(struct nvkm_device *device, int index)
_(NVENC0 , device->nvenc[0], device->nvenc[0]);
_(NVENC1 , device->nvenc[1], device->nvenc[1]);
_(NVENC2 , device->nvenc[2], device->nvenc[2]);
- _(NVDEC , device->nvdec , device->nvdec);
+ _(NVDEC , device->nvdec , &device->nvdec->engine);
_(PM , device->pm , &device->pm->engine);
_(SEC , device->sec , device->sec);
+ _(SEC2 , device->sec2 , &device->sec2->engine);
_(SW , device->sw , &device->sw->engine);
_(VIC , device->vic , device->vic);
_(VP , device->vp , device->vp);
@@ -2812,6 +2828,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
_(NVKM_ENGINE_NVDEC , nvdec);
_(NVKM_ENGINE_PM , pm);
_(NVKM_ENGINE_SEC , sec);
+ _(NVKM_ENGINE_SEC2 , sec2);
_(NVKM_ENGINE_SW , sw);
_(NVKM_ENGINE_VIC , vic);
_(NVKM_ENGINE_VP , vp);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
index 1a06ac175f55..6c16f3835f44 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/priv.h
@@ -41,6 +41,7 @@
#include <engine/nvdec.h>
#include <engine/pm.h>
#include <engine/sec.h>
+#include <engine/sec2.h>
#include <engine/sw.h>
#include <engine/vic.h>
#include <engine/vp.h>
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
index f1c494182248..2938ad5aca40 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
@@ -32,6 +32,7 @@ nvkm-y += nvkm/engine/gr/gm107.o
nvkm-y += nvkm/engine/gr/gm200.o
nvkm-y += nvkm/engine/gr/gm20b.o
nvkm-y += nvkm/engine/gr/gp100.o
+nvkm-y += nvkm/engine/gr/gp102.o
nvkm-y += nvkm/engine/gr/ctxnv40.o
nvkm-y += nvkm/engine/gr/ctxnv50.o
@@ -50,3 +51,4 @@ nvkm-y += nvkm/engine/gr/ctxgm107.o
nvkm-y += nvkm/engine/gr/ctxgm200.o
nvkm-y += nvkm/engine/gr/ctxgm20b.o
nvkm-y += nvkm/engine/gr/ctxgp100.o
+nvkm-y += nvkm/engine/gr/ctxgp102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index 52048b5a5274..0ae032fa2909 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -102,6 +102,10 @@ void gm200_grctx_generate_405b60(struct gf100_gr *);
extern const struct gf100_grctx_func gm20b_grctx;
extern const struct gf100_grctx_func gp100_grctx;
+void gp100_grctx_generate_main(struct gf100_gr *, struct gf100_grctx *);
+void gp100_grctx_generate_pagepool(struct gf100_grctx *);
+
+extern const struct gf100_grctx_func gp102_grctx;
/* context init value lists */
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
index 3d1ae7ddf7dd..7833bc777a29 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
@@ -29,7 +29,7 @@
* PGRAPH context implementation
******************************************************************************/
-static void
+void
gp100_grctx_generate_pagepool(struct gf100_grctx *info)
{
const struct gf100_grctx_func *grctx = info->gr->func->grctx;
@@ -123,7 +123,7 @@ gp100_grctx_generate_405b60(struct gf100_gr *gr)
nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]);
}
-static void
+void
gp100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
new file mode 100644
index 000000000000..ee26d64af73a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp102.c
@@ -0,0 +1,98 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "ctxgf100.h"
+
+#include <subdev/fb.h>
+
+/*******************************************************************************
+ * PGRAPH context implementation
+ ******************************************************************************/
+
+static void
+gp102_grctx_generate_attrib(struct gf100_grctx *info)
+{
+ struct gf100_gr *gr = info->gr;
+ const struct gf100_grctx_func *grctx = gr->func->grctx;
+ const u32 alpha = grctx->alpha_nr;
+ const u32 attrib = grctx->attrib_nr;
+ const u32 pertpc = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
+ const u32 size = roundup(gr->tpc_total * pertpc, 0x80);
+ const u32 access = NV_MEM_ACCESS_RW;
+ const int s = 12;
+ const int b = mmio_vram(info, size, (1 << s), access);
+ const int max_batches = 0xffff;
+ u32 ao = 0;
+ u32 bo = ao + grctx->alpha_nr_max * gr->tpc_total;
+ int gpc, ppc, n = 0;
+
+ mmio_refn(info, 0x418810, 0x80000000, s, b);
+ mmio_refn(info, 0x419848, 0x10000000, s, b);
+ mmio_refn(info, 0x419c2c, 0x10000000, s, b);
+ mmio_refn(info, 0x419b00, 0x00000000, s, b);
+ mmio_wr32(info, 0x419b04, 0x80000000 | size >> 7);
+ mmio_wr32(info, 0x405830, attrib);
+ mmio_wr32(info, 0x40585c, alpha);
+ mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
+
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) {
+ const u32 as = alpha * gr->ppc_tpc_nr[gpc][ppc];
+ const u32 bs = attrib * gr->ppc_tpc_nr[gpc][ppc];
+ const u32 u = 0x418ea0 + (n * 0x04);
+ const u32 o = PPC_UNIT(gpc, ppc, 0);
+ const u32 p = GPC_UNIT(gpc, 0xc44 + (ppc * 4));
+ if (!(gr->ppc_mask[gpc] & (1 << ppc)))
+ continue;
+ mmio_wr32(info, o + 0xc0, bs);
+ mmio_wr32(info, p, bs);
+ mmio_wr32(info, o + 0xf4, bo);
+ mmio_wr32(info, o + 0xf0, bs);
+ bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
+ mmio_wr32(info, o + 0xe4, as);
+ mmio_wr32(info, o + 0xf8, ao);
+ ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
+ mmio_wr32(info, u, bs);
+ }
+ }
+
+ mmio_wr32(info, 0x4181e4, 0x00000100);
+ mmio_wr32(info, 0x41befc, 0x00000100);
+}
+
+const struct gf100_grctx_func
+gp102_grctx = {
+ .main = gp100_grctx_generate_main,
+ .unkn = gk104_grctx_generate_unkn,
+ .bundle = gm107_grctx_generate_bundle,
+ .bundle_size = 0x3000,
+ .bundle_min_gpm_fifo_depth = 0x180,
+ .bundle_token_limit = 0x900,
+ .pagepool = gp100_grctx_generate_pagepool,
+ .pagepool_size = 0x20000,
+ .attrib = gp102_grctx_generate_attrib,
+ .attrib_nr_max = 0x5d4,
+ .attrib_nr = 0x320,
+ .alpha_nr_max = 0xc00,
+ .alpha_nr = 0x800,
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index f9acb8a944d2..a4410ef19db5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1647,8 +1647,18 @@ static int
gf100_gr_oneinit(struct nvkm_gr *base)
{
struct gf100_gr *gr = gf100_gr(base);
- struct nvkm_device *device = gr->base.engine.subdev.device;
+ struct nvkm_subdev *subdev = &gr->base.engine.subdev;
+ struct nvkm_device *device = subdev->device;
int i, j;
+ int ret;
+
+ ret = nvkm_falcon_v1_new(subdev, "FECS", 0x409000, &gr->fecs);
+ if (ret)
+ return ret;
+
+ ret = nvkm_falcon_v1_new(subdev, "GPCCS", 0x41a000, &gr->gpccs);
+ if (ret)
+ return ret;
nvkm_pmu_pgob(device->pmu, false);
@@ -1856,24 +1866,13 @@ int
gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device,
int index, struct gf100_gr *gr)
{
- struct nvkm_subdev *subdev = &gr->base.engine.subdev;
- int ret;
-
gr->func = func;
gr->firmware = nvkm_boolopt(device->cfgopt, "NvGrUseFW",
func->fecs.ucode == NULL);
- ret = nvkm_gr_ctor(&gf100_gr_, device, index,
- gr->firmware || func->fecs.ucode != NULL,
- &gr->base);
- if (ret)
- return ret;
-
- ret = nvkm_falcon_v1_new(subdev, "FECS", 0x409000, &gr->fecs);
- if (ret)
- return ret;
-
- return nvkm_falcon_v1_new(subdev, "GPCCS", 0x41a000, &gr->gpccs);
+ return nvkm_gr_ctor(&gf100_gr_, device, index,
+ gr->firmware || func->fecs.ucode != NULL,
+ &gr->base);
}
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index db6ee3b06841..1d2101af2a87 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -124,6 +124,7 @@ struct gf100_gr_func {
void (*init_gpc_mmu)(struct gf100_gr *);
void (*init_rop_active_fbps)(struct gf100_gr *);
void (*init_ppc_exceptions)(struct gf100_gr *);
+ void (*init_swdx_pes_mask)(struct gf100_gr *);
void (*set_hww_esr_report_mask)(struct gf100_gr *);
const struct gf100_gr_pack *mmio;
struct {
@@ -150,6 +151,9 @@ int gk20a_gr_init(struct gf100_gr *);
int gm200_gr_init(struct gf100_gr *);
int gm200_gr_rops(struct gf100_gr *);
+int gp100_gr_init(struct gf100_gr *);
+void gp100_gr_init_rop_active_fbps(struct gf100_gr *);
+
#define gf100_gr_chan(p) container_of((p), struct gf100_gr_chan, object)
struct gf100_gr_chan {
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
index 26ad79def0ff..94ed7debb714 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -30,7 +30,7 @@
* PGRAPH engine/subdev functions
******************************************************************************/
-static void
+void
gp100_gr_init_rop_active_fbps(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
@@ -40,7 +40,7 @@ gp100_gr_init_rop_active_fbps(struct gf100_gr *gr)
nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
}
-static int
+int
gp100_gr_init(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
@@ -85,6 +85,8 @@ gp100_gr_init(struct gf100_gr *gr)
nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
gr->func->init_rop_active_fbps(gr);
+ if (gr->func->init_swdx_pes_mask)
+ gr->func->init_swdx_pes_mask(gr);
nvkm_wr32(device, 0x400500, 0x00010001);
nvkm_wr32(device, 0x400100, 0xffffffff);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
new file mode 100644
index 000000000000..1d5117a16299
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp102.c
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2016 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "gf100.h"
+#include "ctxgf100.h"
+
+#include <nvif/class.h>
+
+static void
+gp102_gr_init_swdx_pes_mask(struct gf100_gr *gr)
+{
+ struct nvkm_device *device = gr->base.engine.subdev.device;
+ u32 mask = 0, data, gpc;
+
+ for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
+ data = nvkm_rd32(device, GPC_UNIT(gpc, 0x0c50)) & 0x0000000f;
+ mask |= data << (gpc * 4);
+ }
+
+ nvkm_wr32(device, 0x4181d0, mask);
+}
+
+static const struct gf100_gr_func
+gp102_gr = {
+ .init = gp100_gr_init,
+ .init_gpc_mmu = gm200_gr_init_gpc_mmu,
+ .init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
+ .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
+ .init_swdx_pes_mask = gp102_gr_init_swdx_pes_mask,
+ .rops = gm200_gr_rops,
+ .ppc_nr = 3,
+ .grctx = &gp102_grctx,
+ .sclass = {
+ { -1, -1, FERMI_TWOD_A },
+ { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
+ { -1, -1, PASCAL_B, &gf100_fermi },
+ { -1, -1, PASCAL_COMPUTE_B },
+ {}
+ }
+};
+
+int
+gp102_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
+{
+ return gm200_gr_new_(&gp102_gr, device, index, pgr);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
index 13b7c71ff900..98477beb823a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/Kbuild
@@ -1 +1,2 @@
-#nvkm-y += nvkm/engine/nvdec/base.o
+nvkm-y += nvkm/engine/nvdec/base.o
+nvkm-y += nvkm/engine/nvdec/gp102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
new file mode 100644
index 000000000000..4807021fd990
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/base.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <engine/falcon.h>
+
+static int
+nvkm_nvdec_oneinit(struct nvkm_engine *engine)
+{
+ struct nvkm_nvdec *nvdec = nvkm_nvdec(engine);
+ return nvkm_falcon_v1_new(&nvdec->engine.subdev, "NVDEC", 0x84000,
+ &nvdec->falcon);
+}
+
+static void *
+nvkm_nvdec_dtor(struct nvkm_engine *engine)
+{
+ struct nvkm_nvdec *nvdec = nvkm_nvdec(engine);
+ nvkm_falcon_del(&nvdec->falcon);
+ return nvdec;
+}
+
+static const struct nvkm_engine_func
+nvkm_nvdec = {
+ .dtor = nvkm_nvdec_dtor,
+ .oneinit = nvkm_nvdec_oneinit,
+};
+
+int
+nvkm_nvdec_new_(struct nvkm_device *device, int index,
+ struct nvkm_nvdec **pnvdec)
+{
+ struct nvkm_nvdec *nvdec;
+
+ if (!(nvdec = *pnvdec = kzalloc(sizeof(*nvdec), GFP_KERNEL)))
+ return -ENOMEM;
+
+ return nvkm_engine_ctor(&nvkm_nvdec, device, index, true,
+ &nvdec->engine);
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gp102.c
new file mode 100644
index 000000000000..fde6328c6d71
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/gp102.c
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "priv.h"
+
+int
+gp102_nvdec_new(struct nvkm_device *device, int index,
+ struct nvkm_nvdec **pnvdec)
+{
+ return nvkm_nvdec_new_(device, index, pnvdec);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
new file mode 100644
index 000000000000..353b94f51205
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/nvdec/priv.h
@@ -0,0 +1,6 @@
+#ifndef __NVKM_NVDEC_PRIV_H__
+#define __NVKM_NVDEC_PRIV_H__
+#include <engine/nvdec.h>
+
+int nvkm_nvdec_new_(struct nvkm_device *, int, struct nvkm_nvdec **);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild
new file mode 100644
index 000000000000..4b17254cfbd0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/Kbuild
@@ -0,0 +1,2 @@
+nvkm-y += nvkm/engine/sec2/base.o
+nvkm-y += nvkm/engine/sec2/gp102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
new file mode 100644
index 000000000000..f865d2a3e184
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/base.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+#include "priv.h"
+
+#include <core/msgqueue.h>
+#include <engine/falcon.h>
+
+static void *
+nvkm_sec2_dtor(struct nvkm_engine *engine)
+{
+ struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
+ nvkm_msgqueue_del(&sec2->queue);
+ nvkm_falcon_del(&sec2->falcon);
+ return sec2;
+}
+
+static void
+nvkm_sec2_intr(struct nvkm_engine *engine)
+{
+ struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
+ struct nvkm_subdev *subdev = &engine->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 disp = nvkm_rd32(device, 0x8701c);
+ u32 intr = nvkm_rd32(device, 0x87008) & disp & ~(disp >> 16);
+
+ if (intr & 0x00000040) {
+ schedule_work(&sec2->work);
+ nvkm_wr32(device, 0x87004, 0x00000040);
+ intr &= ~0x00000040;
+ }
+
+ if (intr) {
+ nvkm_error(subdev, "unhandled intr %08x\n", intr);
+ nvkm_wr32(device, 0x87004, intr);
+
+ }
+}
+
+static void
+nvkm_sec2_recv(struct work_struct *work)
+{
+ struct nvkm_sec2 *sec2 = container_of(work, typeof(*sec2), work);
+
+ if (!sec2->queue) {
+ nvkm_warn(&sec2->engine.subdev,
+ "recv function called while no firmware set!\n");
+ return;
+ }
+
+ nvkm_msgqueue_recv(sec2->queue);
+}
+
+
+static int
+nvkm_sec2_oneinit(struct nvkm_engine *engine)
+{
+ struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
+ return nvkm_falcon_v1_new(&sec2->engine.subdev, "SEC2", 0x87000,
+ &sec2->falcon);
+}
+
+static int
+nvkm_sec2_fini(struct nvkm_engine *engine, bool suspend)
+{
+ struct nvkm_sec2 *sec2 = nvkm_sec2(engine);
+ flush_work(&sec2->work);
+ return 0;
+}
+
+static const struct nvkm_engine_func
+nvkm_sec2 = {
+ .dtor = nvkm_sec2_dtor,
+ .oneinit = nvkm_sec2_oneinit,
+ .fini = nvkm_sec2_fini,
+ .intr = nvkm_sec2_intr,
+};
+
+int
+nvkm_sec2_new_(struct nvkm_device *device, int index,
+ struct nvkm_sec2 **psec2)
+{
+ struct nvkm_sec2 *sec2;
+
+ if (!(sec2 = *psec2 = kzalloc(sizeof(*sec2), GFP_KERNEL)))
+ return -ENOMEM;
+ INIT_WORK(&sec2->work, nvkm_sec2_recv);
+
+ return nvkm_engine_ctor(&nvkm_sec2, device, index, true, &sec2->engine);
+};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
new file mode 100644
index 000000000000..9be1524c08f5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/gp102.c
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "priv.h"
+
+int
+gp102_sec2_new(struct nvkm_device *device, int index,
+ struct nvkm_sec2 **psec2)
+{
+ return nvkm_sec2_new_(device, index, psec2);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
new file mode 100644
index 000000000000..7ecc9d4724dc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/sec2/priv.h
@@ -0,0 +1,9 @@
+#ifndef __NVKM_SEC2_PRIV_H__
+#define __NVKM_SEC2_PRIV_H__
+#include <engine/sec2.h>
+
+#define nvkm_sec2(p) container_of((p), struct nvkm_sec2, engine)
+
+int nvkm_sec2_new_(struct nvkm_device *, int, struct nvkm_sec2 **);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
index 584863db9bfc..2aa040ba39e5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild
@@ -1,2 +1,5 @@
nvkm-y += nvkm/falcon/base.o
nvkm-y += nvkm/falcon/v1.o
+nvkm-y += nvkm/falcon/msgqueue.o
+nvkm-y += nvkm/falcon/msgqueue_0137c63d.o
+nvkm-y += nvkm/falcon/msgqueue_0148cdec.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
index 4852f313762f..1b7f48efd8b1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c
@@ -41,14 +41,22 @@ void
nvkm_falcon_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
u32 size, u8 port)
{
+ mutex_lock(&falcon->dmem_mutex);
+
falcon->func->load_dmem(falcon, data, start, size, port);
+
+ mutex_unlock(&falcon->dmem_mutex);
}
void
nvkm_falcon_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, u8 port,
void *data)
{
+ mutex_lock(&falcon->dmem_mutex);
+
falcon->func->read_dmem(falcon, start, size, port, data);
+
+ mutex_unlock(&falcon->dmem_mutex);
}
void
@@ -129,6 +137,9 @@ nvkm_falcon_clear_interrupt(struct nvkm_falcon *falcon, u32 mask)
void
nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user)
{
+ if (unlikely(!falcon))
+ return;
+
mutex_lock(&falcon->mutex);
if (falcon->user == user) {
nvkm_debug(falcon->user, "released %s falcon\n", falcon->name);
@@ -159,6 +170,7 @@ nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
struct nvkm_subdev *subdev, const char *name, u32 addr,
struct nvkm_falcon *falcon)
{
+ u32 debug_reg;
u32 reg;
falcon->func = func;
@@ -166,6 +178,7 @@ nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
falcon->name = name;
falcon->addr = addr;
mutex_init(&falcon->mutex);
+ mutex_init(&falcon->dmem_mutex);
reg = nvkm_falcon_rd32(falcon, 0x12c);
falcon->version = reg & 0xf;
@@ -177,8 +190,31 @@ nvkm_falcon_ctor(const struct nvkm_falcon_func *func,
falcon->code.limit = (reg & 0x1ff) << 8;
falcon->data.limit = (reg & 0x3fe00) >> 1;
- reg = nvkm_falcon_rd32(falcon, 0xc08);
- falcon->debug = (reg >> 20) & 0x1;
+ switch (subdev->index) {
+ case NVKM_ENGINE_GR:
+ debug_reg = 0x0;
+ break;
+ case NVKM_SUBDEV_PMU:
+ debug_reg = 0xc08;
+ break;
+ case NVKM_ENGINE_NVDEC:
+ debug_reg = 0xd00;
+ break;
+ case NVKM_ENGINE_SEC2:
+ debug_reg = 0x408;
+ falcon->has_emem = true;
+ break;
+ default:
+ nvkm_warn(subdev, "unsupported falcon %s!\n",
+ nvkm_subdev_name[subdev->index]);
+ debug_reg = 0;
+ break;
+ }
+
+ if (debug_reg) {
+ u32 val = nvkm_falcon_rd32(falcon, debug_reg);
+ falcon->debug = (val >> 20) & 0x1;
+ }
}
void
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
new file mode 100644
index 000000000000..982efedb4b13
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.c
@@ -0,0 +1,552 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "msgqueue.h"
+#include <engine/falcon.h>
+
+#include <subdev/secboot.h>
+
+
+#define HDR_SIZE sizeof(struct nvkm_msgqueue_hdr)
+#define QUEUE_ALIGNMENT 4
+/* max size of the messages we can receive */
+#define MSG_BUF_SIZE 128
+
+static int
+msg_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
+{
+ struct nvkm_falcon *falcon = priv->falcon;
+
+ mutex_lock(&queue->mutex);
+
+ queue->position = nvkm_falcon_rd32(falcon, queue->tail_reg);
+
+ return 0;
+}
+
+static void
+msg_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
+ bool commit)
+{
+ struct nvkm_falcon *falcon = priv->falcon;
+
+ if (commit)
+ nvkm_falcon_wr32(falcon, queue->tail_reg, queue->position);
+
+ mutex_unlock(&queue->mutex);
+}
+
+static bool
+msg_queue_empty(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
+{
+ struct nvkm_falcon *falcon = priv->falcon;
+ u32 head, tail;
+
+ head = nvkm_falcon_rd32(falcon, queue->head_reg);
+ tail = nvkm_falcon_rd32(falcon, queue->tail_reg);
+
+ return head == tail;
+}
+
+static int
+msg_queue_pop(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
+ void *data, u32 size)
+{
+ struct nvkm_falcon *falcon = priv->falcon;
+ const struct nvkm_subdev *subdev = priv->falcon->owner;
+ u32 head, tail, available;
+
+ head = nvkm_falcon_rd32(falcon, queue->head_reg);
+ /* has the buffer looped? */
+ if (head < queue->position)
+ queue->position = queue->offset;
+
+ tail = queue->position;
+
+ available = head - tail;
+
+ if (available == 0) {
+ nvkm_warn(subdev, "no message data available\n");
+ return 0;
+ }
+
+ if (size > available) {
+ nvkm_warn(subdev, "message data smaller than read request\n");
+ size = available;
+ }
+
+ nvkm_falcon_read_dmem(priv->falcon, tail, size, 0, data);
+ queue->position += ALIGN(size, QUEUE_ALIGNMENT);
+
+ return size;
+}
+
+static int
+msg_queue_read(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
+ struct nvkm_msgqueue_hdr *hdr)
+{
+ const struct nvkm_subdev *subdev = priv->falcon->owner;
+ int err;
+
+ err = msg_queue_open(priv, queue);
+ if (err) {
+ nvkm_error(subdev, "fail to open queue %d\n", queue->index);
+ return err;
+ }
+
+ if (msg_queue_empty(priv, queue)) {
+ err = 0;
+ goto close;
+ }
+
+ err = msg_queue_pop(priv, queue, hdr, HDR_SIZE);
+ if (err >= 0 && err != HDR_SIZE)
+ err = -EINVAL;
+ if (err < 0) {
+ nvkm_error(subdev, "failed to read message header: %d\n", err);
+ goto close;
+ }
+
+ if (hdr->size > MSG_BUF_SIZE) {
+ nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size);
+ err = -ENOSPC;
+ goto close;
+ }
+
+ if (hdr->size > HDR_SIZE) {
+ u32 read_size = hdr->size - HDR_SIZE;
+
+ err = msg_queue_pop(priv, queue, (hdr + 1), read_size);
+ if (err >= 0 && err != read_size)
+ err = -EINVAL;
+ if (err < 0) {
+ nvkm_error(subdev, "failed to read message: %d\n", err);
+ goto close;
+ }
+ }
+
+close:
+ msg_queue_close(priv, queue, (err >= 0));
+
+ return err;
+}
+
+static bool
+cmd_queue_has_room(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
+ u32 size, bool *rewind)
+{
+ struct nvkm_falcon *falcon = priv->falcon;
+ u32 head, tail, free;
+
+ size = ALIGN(size, QUEUE_ALIGNMENT);
+
+ head = nvkm_falcon_rd32(falcon, queue->head_reg);
+ tail = nvkm_falcon_rd32(falcon, queue->tail_reg);
+
+ if (head >= tail) {
+ free = queue->offset + queue->size - head;
+ free -= HDR_SIZE;
+
+ if (size > free) {
+ *rewind = true;
+ head = queue->offset;
+ }
+ }
+
+ if (head < tail)
+ free = tail - head - 1;
+
+ return size <= free;
+}
+
+static int
+cmd_queue_push(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
+ void *data, u32 size)
+{
+ nvkm_falcon_load_dmem(priv->falcon, data, queue->position, size, 0);
+ queue->position += ALIGN(size, QUEUE_ALIGNMENT);
+
+ return 0;
+}
+
+/* REWIND unit is always 0x00 */
+#define MSGQUEUE_UNIT_REWIND 0x00
+
+static void
+cmd_queue_rewind(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue)
+{
+ const struct nvkm_subdev *subdev = priv->falcon->owner;
+ struct nvkm_msgqueue_hdr cmd;
+ int err;
+
+ cmd.unit_id = MSGQUEUE_UNIT_REWIND;
+ cmd.size = sizeof(cmd);
+ err = cmd_queue_push(priv, queue, &cmd, cmd.size);
+ if (err)
+ nvkm_error(subdev, "queue %d rewind failed\n", queue->index);
+ else
+ nvkm_error(subdev, "queue %d rewinded\n", queue->index);
+
+ queue->position = queue->offset;
+}
+
+static int
+cmd_queue_open(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
+ u32 size)
+{
+ struct nvkm_falcon *falcon = priv->falcon;
+ const struct nvkm_subdev *subdev = priv->falcon->owner;
+ bool rewind = false;
+
+ mutex_lock(&queue->mutex);
+
+ if (!cmd_queue_has_room(priv, queue, size, &rewind)) {
+ nvkm_error(subdev, "queue full\n");
+ mutex_unlock(&queue->mutex);
+ return -EAGAIN;
+ }
+
+ queue->position = nvkm_falcon_rd32(falcon, queue->head_reg);
+
+ if (rewind)
+ cmd_queue_rewind(priv, queue);
+
+ return 0;
+}
+
+static void
+cmd_queue_close(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_queue *queue,
+ bool commit)
+{
+ struct nvkm_falcon *falcon = priv->falcon;
+
+ if (commit)
+ nvkm_falcon_wr32(falcon, queue->head_reg, queue->position);
+
+ mutex_unlock(&queue->mutex);
+}
+
+static int
+cmd_write(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *cmd,
+ struct nvkm_msgqueue_queue *queue)
+{
+ const struct nvkm_subdev *subdev = priv->falcon->owner;
+ static unsigned long timeout = ~0;
+ unsigned long end_jiffies = jiffies + msecs_to_jiffies(timeout);
+ int ret = -EAGAIN;
+ bool commit = true;
+
+ while (ret == -EAGAIN && time_before(jiffies, end_jiffies))
+ ret = cmd_queue_open(priv, queue, cmd->size);
+ if (ret) {
+ nvkm_error(subdev, "pmu_queue_open_write failed\n");
+ return ret;
+ }
+
+ ret = cmd_queue_push(priv, queue, cmd, cmd->size);
+ if (ret) {
+ nvkm_error(subdev, "pmu_queue_push failed\n");
+ commit = false;
+ }
+
+ cmd_queue_close(priv, queue, commit);
+
+ return ret;
+}
+
+static struct nvkm_msgqueue_seq *
+msgqueue_seq_acquire(struct nvkm_msgqueue *priv)
+{
+ const struct nvkm_subdev *subdev = priv->falcon->owner;
+ struct nvkm_msgqueue_seq *seq;
+ u32 index;
+
+ mutex_lock(&priv->seq_lock);
+
+ index = find_first_zero_bit(priv->seq_tbl, NVKM_MSGQUEUE_NUM_SEQUENCES);
+
+ if (index >= NVKM_MSGQUEUE_NUM_SEQUENCES) {
+ nvkm_error(subdev, "no free sequence available\n");
+ mutex_unlock(&priv->seq_lock);
+ return ERR_PTR(-EAGAIN);
+ }
+
+ set_bit(index, priv->seq_tbl);
+
+ mutex_unlock(&priv->seq_lock);
+
+ seq = &priv->seq[index];
+ seq->state = SEQ_STATE_PENDING;
+
+ return seq;
+}
+
+static void
+msgqueue_seq_release(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_seq *seq)
+{
+ /* no need to acquire seq_lock since clear_bit is atomic */
+ seq->state = SEQ_STATE_FREE;
+ seq->callback = NULL;
+ seq->completion = NULL;
+ clear_bit(seq->id, priv->seq_tbl);
+}
+
+/* specifies that we want to know the command status in the answer message */
+#define CMD_FLAGS_STATUS BIT(0)
+/* specifies that we want an interrupt when the answer message is queued */
+#define CMD_FLAGS_INTR BIT(1)
+
+int
+nvkm_msgqueue_post(struct nvkm_msgqueue *priv, enum msgqueue_msg_priority prio,
+ struct nvkm_msgqueue_hdr *cmd, nvkm_msgqueue_callback cb,
+ struct completion *completion, bool wait_init)
+{
+ struct nvkm_msgqueue_seq *seq;
+ struct nvkm_msgqueue_queue *queue;
+ int ret;
+
+ if (wait_init && !wait_for_completion_timeout(&priv->init_done,
+ msecs_to_jiffies(1000)))
+ return -ETIMEDOUT;
+
+ queue = priv->func->cmd_queue(priv, prio);
+ if (IS_ERR(queue))
+ return PTR_ERR(queue);
+
+ seq = msgqueue_seq_acquire(priv);
+ if (IS_ERR(seq))
+ return PTR_ERR(seq);
+
+ cmd->seq_id = seq->id;
+ cmd->ctrl_flags = CMD_FLAGS_STATUS | CMD_FLAGS_INTR;
+
+ seq->callback = cb;
+ seq->state = SEQ_STATE_USED;
+ seq->completion = completion;
+
+ ret = cmd_write(priv, cmd, queue);
+ if (ret) {
+ seq->state = SEQ_STATE_PENDING;
+ msgqueue_seq_release(priv, seq);
+ }
+
+ return ret;
+}
+
+static int
+msgqueue_msg_handle(struct nvkm_msgqueue *priv, struct nvkm_msgqueue_hdr *hdr)
+{
+ const struct nvkm_subdev *subdev = priv->falcon->owner;
+ struct nvkm_msgqueue_seq *seq;
+
+ seq = &priv->seq[hdr->seq_id];
+ if (seq->state != SEQ_STATE_USED && seq->state != SEQ_STATE_CANCELLED) {
+ nvkm_error(subdev, "msg for unknown sequence %d", seq->id);
+ return -EINVAL;
+ }
+
+ if (seq->state == SEQ_STATE_USED) {
+ if (seq->callback)
+ seq->callback(priv, hdr);
+ }
+
+ if (seq->completion)
+ complete(seq->completion);
+
+ msgqueue_seq_release(priv, seq);
+
+ return 0;
+}
+
+static int
+msgqueue_handle_init_msg(struct nvkm_msgqueue *priv,
+ struct nvkm_msgqueue_hdr *hdr)
+{
+ struct nvkm_falcon *falcon = priv->falcon;
+ const struct nvkm_subdev *subdev = falcon->owner;
+ u32 tail;
+ u32 tail_reg;
+ int ret;
+
+ /*
+ * Of course the message queue registers vary depending on the falcon
+ * used...
+ */
+ switch (falcon->owner->index) {
+ case NVKM_SUBDEV_PMU:
+ tail_reg = 0x4cc;
+ break;
+ case NVKM_ENGINE_SEC2:
+ tail_reg = 0xa34;
+ break;
+ default:
+ nvkm_error(subdev, "falcon %s unsupported for msgqueue!\n",
+ nvkm_subdev_name[falcon->owner->index]);
+ return -EINVAL;
+ }
+
+ /*
+ * Read the message - queues are not initialized yet so we cannot rely
+ * on msg_queue_read()
+ */
+ tail = nvkm_falcon_rd32(falcon, tail_reg);
+ nvkm_falcon_read_dmem(falcon, tail, HDR_SIZE, 0, hdr);
+
+ if (hdr->size > MSG_BUF_SIZE) {
+ nvkm_error(subdev, "message too big (%d bytes)\n", hdr->size);
+ return -ENOSPC;
+ }
+
+ nvkm_falcon_read_dmem(falcon, tail + HDR_SIZE, hdr->size - HDR_SIZE, 0,
+ (hdr + 1));
+
+ tail += ALIGN(hdr->size, QUEUE_ALIGNMENT);
+ nvkm_falcon_wr32(falcon, tail_reg, tail);
+
+ ret = priv->func->init_func->init_callback(priv, hdr);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void
+nvkm_msgqueue_process_msgs(struct nvkm_msgqueue *priv,
+ struct nvkm_msgqueue_queue *queue)
+{
+ /*
+ * We are invoked from a worker thread, so normally we have plenty of
+ * stack space to work with.
+ */
+ u8 msg_buffer[MSG_BUF_SIZE];
+ struct nvkm_msgqueue_hdr *hdr = (void *)msg_buffer;
+ int ret;
+
+ /* the first message we receive must be the init message */
+ if ((!priv->init_msg_received)) {
+ ret = msgqueue_handle_init_msg(priv, hdr);
+ if (!ret)
+ priv->init_msg_received = true;
+ } else {
+ while (msg_queue_read(priv, queue, hdr) > 0)
+ msgqueue_msg_handle(priv, hdr);
+ }
+}
+
+void
+nvkm_msgqueue_write_cmdline(struct nvkm_msgqueue *queue, void *buf)
+{
+ if (!queue || !queue->func || !queue->func->init_func)
+ return;
+
+ queue->func->init_func->gen_cmdline(queue, buf);
+}
+
+int
+nvkm_msgqueue_acr_boot_falcon(struct nvkm_msgqueue *queue, enum nvkm_secboot_falcon falcon)
+{
+ if (!queue || !queue->func->acr_func || !queue->func->acr_func->boot_falcon)
+ return -ENODEV;
+
+ return queue->func->acr_func->boot_falcon(queue, falcon);
+}
+
+int
+nvkm_msgqueue_new(u32 version, struct nvkm_falcon *falcon, struct nvkm_msgqueue **queue)
+{
+ const struct nvkm_subdev *subdev = falcon->owner;
+ int ret = -EINVAL;
+
+ switch (version) {
+ case 0x0137c63d:
+ ret = msgqueue_0137c63d_new(falcon, queue);
+ break;
+ case 0x0148cdec:
+ ret = msgqueue_0148cdec_new(falcon, queue);
+ break;
+ default:
+ nvkm_error(subdev, "unhandled firmware version 0x%08x\n",
+ version);
+ break;
+ }
+
+ if (ret == 0) {
+ nvkm_debug(subdev, "firmware version: 0x%08x\n", version);
+ (*queue)->fw_version = version;
+ }
+
+ return ret;
+}
+
+void
+nvkm_msgqueue_del(struct nvkm_msgqueue **queue)
+{
+ if (*queue) {
+ (*queue)->func->dtor(*queue);
+ *queue = NULL;
+ }
+}
+
+void
+nvkm_msgqueue_recv(struct nvkm_msgqueue *queue)
+{
+ if (!queue->func || !queue->func->recv) {
+ const struct nvkm_subdev *subdev = queue->falcon->owner;
+
+ nvkm_warn(subdev, "missing msgqueue recv function\n");
+ return;
+ }
+
+ queue->func->recv(queue);
+}
+
+int
+nvkm_msgqueue_reinit(struct nvkm_msgqueue *queue)
+{
+ /* firmware not set yet... */
+ if (!queue)
+ return 0;
+
+ queue->init_msg_received = false;
+ reinit_completion(&queue->init_done);
+
+ return 0;
+}
+
+void
+nvkm_msgqueue_ctor(const struct nvkm_msgqueue_func *func,
+ struct nvkm_falcon *falcon,
+ struct nvkm_msgqueue *queue)
+{
+ int i;
+
+ queue->func = func;
+ queue->falcon = falcon;
+ mutex_init(&queue->seq_lock);
+ for (i = 0; i < NVKM_MSGQUEUE_NUM_SEQUENCES; i++)
+ queue->seq[i].id = i;
+
+ init_completion(&queue->init_done);
+
+
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h
new file mode 100644
index 000000000000..f37afe963d3e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue.h
@@ -0,0 +1,207 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NVKM_CORE_FALCON_MSGQUEUE_H
+#define __NVKM_CORE_FALCON_MSGQUEUE_H
+
+#include <core/msgqueue.h>
+
+/*
+ * The struct nvkm_msgqueue (named so for lack of better candidate) manages
+ * a firmware (typically, NVIDIA signed firmware) running under a given falcon.
+ *
+ * Such firmwares expect to receive commands (through one or several command
+ * queues) and will reply to such command by sending messages (using one
+ * message queue).
+ *
+ * Each firmware can support one or several units - ACR for managing secure
+ * falcons, PMU for power management, etc. A unit can be seen as a class to
+ * which command can be sent.
+ *
+ * One usage example would be to send a command to the SEC falcon to ask it to
+ * reset a secure falcon. The SEC falcon will receive the command, process it,
+ * and send a message to signal success or failure. Only when the corresponding
+ * message is received can the requester assume the request has been processed.
+ *
+ * Since we expect many variations between the firmwares NVIDIA will release
+ * across GPU generations, this library is built in a very modular way. Message
+ * formats and queues details (such as number of usage) are left to
+ * specializations of struct nvkm_msgqueue, while the functions in msgqueue.c
+ * take care of posting commands and processing messages in a fashion that is
+ * universal.
+ *
+ */
+
+enum msgqueue_msg_priority {
+ MSGQUEUE_MSG_PRIORITY_HIGH,
+ MSGQUEUE_MSG_PRIORITY_LOW,
+};
+
+/**
+ * struct nvkm_msgqueue_hdr - header for all commands/messages
+ * @unit_id: id of firmware using receiving the command/sending the message
+ * @size: total size of command/message
+ * @ctrl_flags: type of command/message
+ * @seq_id: used to match a message from its corresponding command
+ */
+struct nvkm_msgqueue_hdr {
+ u8 unit_id;
+ u8 size;
+ u8 ctrl_flags;
+ u8 seq_id;
+};
+
+/**
+ * struct nvkm_msgqueue_msg - base message.
+ *
+ * This is just a header and a message (or command) type. Useful when
+ * building command-specific structures.
+ */
+struct nvkm_msgqueue_msg {
+ struct nvkm_msgqueue_hdr hdr;
+ u8 msg_type;
+};
+
+struct nvkm_msgqueue;
+typedef void
+(*nvkm_msgqueue_callback)(struct nvkm_msgqueue *, struct nvkm_msgqueue_hdr *);
+
+/**
+ * struct nvkm_msgqueue_init_func - msgqueue functions related to initialization
+ *
+ * @gen_cmdline: build the commandline into a pre-allocated buffer
+ * @init_callback: called to process the init message
+ */
+struct nvkm_msgqueue_init_func {
+ void (*gen_cmdline)(struct nvkm_msgqueue *, void *);
+ int (*init_callback)(struct nvkm_msgqueue *, struct nvkm_msgqueue_hdr *);
+};
+
+/**
+ * struct nvkm_msgqueue_acr_func - msgqueue functions related to ACR
+ *
+ * @boot_falcon: build and send the command to reset a given falcon
+ */
+struct nvkm_msgqueue_acr_func {
+ int (*boot_falcon)(struct nvkm_msgqueue *, enum nvkm_secboot_falcon);
+};
+
+struct nvkm_msgqueue_func {
+ const struct nvkm_msgqueue_init_func *init_func;
+ const struct nvkm_msgqueue_acr_func *acr_func;
+ void (*dtor)(struct nvkm_msgqueue *);
+ struct nvkm_msgqueue_queue *(*cmd_queue)(struct nvkm_msgqueue *,
+ enum msgqueue_msg_priority);
+ void (*recv)(struct nvkm_msgqueue *queue);
+};
+
+/**
+ * struct nvkm_msgqueue_queue - information about a command or message queue
+ *
+ * The number of queues is firmware-dependent. All queues must have their
+ * information filled by the init message handler.
+ *
+ * @mutex_lock: to be acquired when the queue is being used
+ * @index: physical queue index
+ * @offset: DMEM offset where this queue begins
+ * @size: size allocated to this queue in DMEM (in bytes)
+ * @position: current write position
+ * @head_reg: address of the HEAD register for this queue
+ * @tail_reg: address of the TAIL register for this queue
+ */
+struct nvkm_msgqueue_queue {
+ struct mutex mutex;
+ u32 index;
+ u32 offset;
+ u32 size;
+ u32 position;
+
+ u32 head_reg;
+ u32 tail_reg;
+};
+
+/**
+ * struct nvkm_msgqueue_seq - keep track of ongoing commands
+ *
+ * Every time a command is sent, a sequence is assigned to it so the
+ * corresponding message can be matched. Upon receiving the message, a callback
+ * can be called and/or a completion signaled.
+ *
+ * @id: sequence ID
+ * @state: current state
+ * @callback: callback to call upon receiving matching message
+ * @completion: completion to signal after callback is called
+ */
+struct nvkm_msgqueue_seq {
+ u16 id;
+ enum {
+ SEQ_STATE_FREE = 0,
+ SEQ_STATE_PENDING,
+ SEQ_STATE_USED,
+ SEQ_STATE_CANCELLED
+ } state;
+ nvkm_msgqueue_callback callback;
+ struct completion *completion;
+};
+
+/*
+ * We can have an arbitrary number of sequences, but realistically we will
+ * probably not use that much simultaneously.
+ */
+#define NVKM_MSGQUEUE_NUM_SEQUENCES 16
+
+/**
+ * struct nvkm_msgqueue - manage a command/message based FW on a falcon
+ *
+ * @falcon: falcon to be managed
+ * @func: implementation of the firmware to use
+ * @init_msg_received: whether the init message has already been received
+ * @init_done: whether all init is complete and commands can be processed
+ * @seq_lock: protects seq and seq_tbl
+ * @seq: sequences to match commands and messages
+ * @seq_tbl: bitmap of sequences currently in use
+ */
+struct nvkm_msgqueue {
+ struct nvkm_falcon *falcon;
+ const struct nvkm_msgqueue_func *func;
+ u32 fw_version;
+ bool init_msg_received;
+ struct completion init_done;
+
+ struct mutex seq_lock;
+ struct nvkm_msgqueue_seq seq[NVKM_MSGQUEUE_NUM_SEQUENCES];
+ unsigned long seq_tbl[BITS_TO_LONGS(NVKM_MSGQUEUE_NUM_SEQUENCES)];
+};
+
+void nvkm_msgqueue_ctor(const struct nvkm_msgqueue_func *, struct nvkm_falcon *,
+ struct nvkm_msgqueue *);
+int nvkm_msgqueue_post(struct nvkm_msgqueue *, enum msgqueue_msg_priority,
+ struct nvkm_msgqueue_hdr *, nvkm_msgqueue_callback,
+ struct completion *, bool);
+void nvkm_msgqueue_process_msgs(struct nvkm_msgqueue *,
+ struct nvkm_msgqueue_queue *);
+
+int msgqueue_0137c63d_new(struct nvkm_falcon *, struct nvkm_msgqueue **);
+int msgqueue_0148cdec_new(struct nvkm_falcon *, struct nvkm_msgqueue **);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c
new file mode 100644
index 000000000000..bba91207fb18
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c
@@ -0,0 +1,323 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include "msgqueue.h"
+#include <engine/falcon.h>
+#include <subdev/secboot.h>
+
+/* Queues identifiers */
+enum {
+ /* High Priority Command Queue for Host -> PMU communication */
+ MSGQUEUE_0137C63D_COMMAND_QUEUE_HPQ = 0,
+ /* Low Priority Command Queue for Host -> PMU communication */
+ MSGQUEUE_0137C63D_COMMAND_QUEUE_LPQ = 1,
+ /* Message queue for PMU -> Host communication */
+ MSGQUEUE_0137C63D_MESSAGE_QUEUE = 4,
+ MSGQUEUE_0137C63D_NUM_QUEUES = 5,
+};
+
+struct msgqueue_0137c63d {
+ struct nvkm_msgqueue base;
+
+ struct nvkm_msgqueue_queue queue[MSGQUEUE_0137C63D_NUM_QUEUES];
+};
+#define msgqueue_0137c63d(q) \
+ container_of(q, struct msgqueue_0137c63d, base)
+
+static struct nvkm_msgqueue_queue *
+msgqueue_0137c63d_cmd_queue(struct nvkm_msgqueue *queue,
+ enum msgqueue_msg_priority priority)
+{
+ struct msgqueue_0137c63d *priv = msgqueue_0137c63d(queue);
+ const struct nvkm_subdev *subdev = priv->base.falcon->owner;
+
+ switch (priority) {
+ case MSGQUEUE_MSG_PRIORITY_HIGH:
+ return &priv->queue[MSGQUEUE_0137C63D_COMMAND_QUEUE_HPQ];
+ case MSGQUEUE_MSG_PRIORITY_LOW:
+ return &priv->queue[MSGQUEUE_0137C63D_COMMAND_QUEUE_LPQ];
+ default:
+ nvkm_error(subdev, "invalid command queue!\n");
+ return ERR_PTR(-EINVAL);
+ }
+}
+
+static void
+msgqueue_0137c63d_process_msgs(struct nvkm_msgqueue *queue)
+{
+ struct msgqueue_0137c63d *priv = msgqueue_0137c63d(queue);
+ struct nvkm_msgqueue_queue *q_queue =
+ &priv->queue[MSGQUEUE_0137C63D_MESSAGE_QUEUE];
+
+ nvkm_msgqueue_process_msgs(&priv->base, q_queue);
+}
+
+/* Init unit */
+#define MSGQUEUE_0137C63D_UNIT_INIT 0x07
+
+enum {
+ INIT_MSG_INIT = 0x0,
+};
+
+static void
+init_gen_cmdline(struct nvkm_msgqueue *queue, void *buf)
+{
+ struct {
+ u32 reserved;
+ u32 freq_hz;
+ u32 trace_size;
+ u32 trace_dma_base;
+ u16 trace_dma_base1;
+ u8 trace_dma_offset;
+ u32 trace_dma_idx;
+ bool secure_mode;
+ bool raise_priv_sec;
+ struct {
+ u32 dma_base;
+ u16 dma_base1;
+ u8 dma_offset;
+ u16 fb_size;
+ u8 dma_idx;
+ } gc6_ctx;
+ u8 pad;
+ } *args = buf;
+
+ args->secure_mode = 1;
+}
+
+/* forward declaration */
+static int acr_init_wpr(struct nvkm_msgqueue *queue);
+
+static int
+init_callback(struct nvkm_msgqueue *_queue, struct nvkm_msgqueue_hdr *hdr)
+{
+ struct msgqueue_0137c63d *priv = msgqueue_0137c63d(_queue);
+ struct {
+ struct nvkm_msgqueue_msg base;
+
+ u8 pad;
+ u16 os_debug_entry_point;
+
+ struct {
+ u16 size;
+ u16 offset;
+ u8 index;
+ u8 pad;
+ } queue_info[MSGQUEUE_0137C63D_NUM_QUEUES];
+
+ u16 sw_managed_area_offset;
+ u16 sw_managed_area_size;
+ } *init = (void *)hdr;
+ const struct nvkm_subdev *subdev = _queue->falcon->owner;
+ int i;
+
+ if (init->base.hdr.unit_id != MSGQUEUE_0137C63D_UNIT_INIT) {
+ nvkm_error(subdev, "expected message from init unit\n");
+ return -EINVAL;
+ }
+
+ if (init->base.msg_type != INIT_MSG_INIT) {
+ nvkm_error(subdev, "expected PMU init msg\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < MSGQUEUE_0137C63D_NUM_QUEUES; i++) {
+ struct nvkm_msgqueue_queue *queue = &priv->queue[i];
+
+ mutex_init(&queue->mutex);
+
+ queue->index = init->queue_info[i].index;
+ queue->offset = init->queue_info[i].offset;
+ queue->size = init->queue_info[i].size;
+
+ if (i != MSGQUEUE_0137C63D_MESSAGE_QUEUE) {
+ queue->head_reg = 0x4a0 + (queue->index * 4);
+ queue->tail_reg = 0x4b0 + (queue->index * 4);
+ } else {
+ queue->head_reg = 0x4c8;
+ queue->tail_reg = 0x4cc;
+ }
+
+ nvkm_debug(subdev,
+ "queue %d: index %d, offset 0x%08x, size 0x%08x\n",
+ i, queue->index, queue->offset, queue->size);
+ }
+
+ /* Complete initialization by initializing WPR region */
+ return acr_init_wpr(&priv->base);
+}
+
+static const struct nvkm_msgqueue_init_func
+msgqueue_0137c63d_init_func = {
+ .gen_cmdline = init_gen_cmdline,
+ .init_callback = init_callback,
+};
+
+
+
+/* ACR unit */
+#define MSGQUEUE_0137C63D_UNIT_ACR 0x0a
+
+enum {
+ ACR_CMD_INIT_WPR_REGION = 0x00,
+ ACR_CMD_BOOTSTRAP_FALCON = 0x01,
+};
+
+static void
+acr_init_wpr_callback(struct nvkm_msgqueue *queue,
+ struct nvkm_msgqueue_hdr *hdr)
+{
+ struct {
+ struct nvkm_msgqueue_msg base;
+ u32 error_code;
+ } *msg = (void *)hdr;
+ const struct nvkm_subdev *subdev = queue->falcon->owner;
+
+ if (msg->error_code) {
+ nvkm_error(subdev, "ACR WPR init failure: %d\n",
+ msg->error_code);
+ return;
+ }
+
+ nvkm_debug(subdev, "ACR WPR init complete\n");
+ complete_all(&queue->init_done);
+}
+
+static int
+acr_init_wpr(struct nvkm_msgqueue *queue)
+{
+ /*
+ * region_id: region ID in WPR region
+ * wpr_offset: offset in WPR region
+ */
+ struct {
+ struct nvkm_msgqueue_hdr hdr;
+ u8 cmd_type;
+ u32 region_id;
+ u32 wpr_offset;
+ } cmd;
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR;
+ cmd.hdr.size = sizeof(cmd);
+ cmd.cmd_type = ACR_CMD_INIT_WPR_REGION;
+ cmd.region_id = 0x01;
+ cmd.wpr_offset = 0x00;
+
+ nvkm_msgqueue_post(queue, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
+ acr_init_wpr_callback, NULL, false);
+
+ return 0;
+}
+
+
+static void
+acr_boot_falcon_callback(struct nvkm_msgqueue *priv,
+ struct nvkm_msgqueue_hdr *hdr)
+{
+ struct acr_bootstrap_falcon_msg {
+ struct nvkm_msgqueue_msg base;
+
+ u32 falcon_id;
+ } *msg = (void *)hdr;
+ const struct nvkm_subdev *subdev = priv->falcon->owner;
+ u32 falcon_id = msg->falcon_id;
+
+ if (falcon_id >= NVKM_SECBOOT_FALCON_END) {
+ nvkm_error(subdev, "in bootstrap falcon callback:\n");
+ nvkm_error(subdev, "invalid falcon ID 0x%x\n", falcon_id);
+ return;
+ }
+ nvkm_debug(subdev, "%s booted\n", nvkm_secboot_falcon_name[falcon_id]);
+}
+
+enum {
+ ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES = 0,
+ ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_NO = 1,
+};
+
+static int
+acr_boot_falcon(struct nvkm_msgqueue *priv, enum nvkm_secboot_falcon falcon)
+{
+ DECLARE_COMPLETION_ONSTACK(completed);
+ /*
+ * flags - Flag specifying RESET or no RESET.
+ * falcon id - Falcon id specifying falcon to bootstrap.
+ */
+ struct {
+ struct nvkm_msgqueue_hdr hdr;
+ u8 cmd_type;
+ u32 flags;
+ u32 falcon_id;
+ } cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.hdr.unit_id = MSGQUEUE_0137C63D_UNIT_ACR;
+ cmd.hdr.size = sizeof(cmd);
+ cmd.cmd_type = ACR_CMD_BOOTSTRAP_FALCON;
+ cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
+ cmd.falcon_id = falcon;
+ nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
+ acr_boot_falcon_callback, &completed, true);
+
+ if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static const struct nvkm_msgqueue_acr_func
+msgqueue_0137c63d_acr_func = {
+ .boot_falcon = acr_boot_falcon,
+};
+
+static void
+msgqueue_0137c63d_dtor(struct nvkm_msgqueue *queue)
+{
+ kfree(msgqueue_0137c63d(queue));
+}
+
+static const struct nvkm_msgqueue_func
+msgqueue_0137c63d_func = {
+ .init_func = &msgqueue_0137c63d_init_func,
+ .acr_func = &msgqueue_0137c63d_acr_func,
+ .cmd_queue = msgqueue_0137c63d_cmd_queue,
+ .recv = msgqueue_0137c63d_process_msgs,
+ .dtor = msgqueue_0137c63d_dtor,
+};
+
+int
+msgqueue_0137c63d_new(struct nvkm_falcon *falcon, struct nvkm_msgqueue **queue)
+{
+ struct msgqueue_0137c63d *ret;
+
+ ret = kzalloc(sizeof(*ret), GFP_KERNEL);
+ if (!ret)
+ return -ENOMEM;
+
+ *queue = &ret->base;
+
+ nvkm_msgqueue_ctor(&msgqueue_0137c63d_func, falcon, &ret->base);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c
new file mode 100644
index 000000000000..ed5d0da4f4e9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c
@@ -0,0 +1,263 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "msgqueue.h"
+#include <engine/falcon.h>
+#include <subdev/secboot.h>
+
+/*
+ * This firmware runs on the SEC falcon. It only has one command and one
+ * message queue, and uses a different command line and init message.
+ */
+
+enum {
+ MSGQUEUE_0148CDEC_COMMAND_QUEUE = 0,
+ MSGQUEUE_0148CDEC_MESSAGE_QUEUE = 1,
+ MSGQUEUE_0148CDEC_NUM_QUEUES,
+};
+
+struct msgqueue_0148cdec {
+ struct nvkm_msgqueue base;
+
+ struct nvkm_msgqueue_queue queue[MSGQUEUE_0148CDEC_NUM_QUEUES];
+};
+#define msgqueue_0148cdec(q) \
+ container_of(q, struct msgqueue_0148cdec, base)
+
+static struct nvkm_msgqueue_queue *
+msgqueue_0148cdec_cmd_queue(struct nvkm_msgqueue *queue,
+ enum msgqueue_msg_priority priority)
+{
+ struct msgqueue_0148cdec *priv = msgqueue_0148cdec(queue);
+
+ return &priv->queue[MSGQUEUE_0148CDEC_COMMAND_QUEUE];
+}
+
+static void
+msgqueue_0148cdec_process_msgs(struct nvkm_msgqueue *queue)
+{
+ struct msgqueue_0148cdec *priv = msgqueue_0148cdec(queue);
+ struct nvkm_msgqueue_queue *q_queue =
+ &priv->queue[MSGQUEUE_0148CDEC_MESSAGE_QUEUE];
+
+ nvkm_msgqueue_process_msgs(&priv->base, q_queue);
+}
+
+
+/* Init unit */
+#define MSGQUEUE_0148CDEC_UNIT_INIT 0x01
+
+enum {
+ INIT_MSG_INIT = 0x0,
+};
+
+static void
+init_gen_cmdline(struct nvkm_msgqueue *queue, void *buf)
+{
+ struct {
+ u32 freq_hz;
+ u32 falc_trace_size;
+ u32 falc_trace_dma_base;
+ u32 falc_trace_dma_idx;
+ bool secure_mode;
+ } *args = buf;
+
+ args->secure_mode = false;
+}
+
+static int
+init_callback(struct nvkm_msgqueue *_queue, struct nvkm_msgqueue_hdr *hdr)
+{
+ struct msgqueue_0148cdec *priv = msgqueue_0148cdec(_queue);
+ struct {
+ struct nvkm_msgqueue_msg base;
+
+ u8 num_queues;
+ u16 os_debug_entry_point;
+
+ struct {
+ u32 offset;
+ u16 size;
+ u8 index;
+ u8 id;
+ } queue_info[MSGQUEUE_0148CDEC_NUM_QUEUES];
+
+ u16 sw_managed_area_offset;
+ u16 sw_managed_area_size;
+ } *init = (void *)hdr;
+ const struct nvkm_subdev *subdev = _queue->falcon->owner;
+ int i;
+
+ if (init->base.hdr.unit_id != MSGQUEUE_0148CDEC_UNIT_INIT) {
+ nvkm_error(subdev, "expected message from init unit\n");
+ return -EINVAL;
+ }
+
+ if (init->base.msg_type != INIT_MSG_INIT) {
+ nvkm_error(subdev, "expected SEC init msg\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < MSGQUEUE_0148CDEC_NUM_QUEUES; i++) {
+ u8 id = init->queue_info[i].id;
+ struct nvkm_msgqueue_queue *queue = &priv->queue[id];
+
+ mutex_init(&queue->mutex);
+
+ queue->index = init->queue_info[i].index;
+ queue->offset = init->queue_info[i].offset;
+ queue->size = init->queue_info[i].size;
+
+ if (id == MSGQUEUE_0148CDEC_MESSAGE_QUEUE) {
+ queue->head_reg = 0xa30 + (queue->index * 8);
+ queue->tail_reg = 0xa34 + (queue->index * 8);
+ } else {
+ queue->head_reg = 0xa00 + (queue->index * 8);
+ queue->tail_reg = 0xa04 + (queue->index * 8);
+ }
+
+ nvkm_debug(subdev,
+ "queue %d: index %d, offset 0x%08x, size 0x%08x\n",
+ id, queue->index, queue->offset, queue->size);
+ }
+
+ complete_all(&_queue->init_done);
+
+ return 0;
+}
+
+static const struct nvkm_msgqueue_init_func
+msgqueue_0148cdec_init_func = {
+ .gen_cmdline = init_gen_cmdline,
+ .init_callback = init_callback,
+};
+
+
+
+/* ACR unit */
+#define MSGQUEUE_0148CDEC_UNIT_ACR 0x08
+
+enum {
+ ACR_CMD_BOOTSTRAP_FALCON = 0x00,
+};
+
+static void
+acr_boot_falcon_callback(struct nvkm_msgqueue *priv,
+ struct nvkm_msgqueue_hdr *hdr)
+{
+ struct acr_bootstrap_falcon_msg {
+ struct nvkm_msgqueue_msg base;
+
+ u32 error_code;
+ u32 falcon_id;
+ } *msg = (void *)hdr;
+ const struct nvkm_subdev *subdev = priv->falcon->owner;
+ u32 falcon_id = msg->falcon_id;
+
+ if (msg->error_code) {
+ nvkm_error(subdev, "in bootstrap falcon callback:\n");
+ nvkm_error(subdev, "expected error code 0x%x\n",
+ msg->error_code);
+ return;
+ }
+
+ if (falcon_id >= NVKM_SECBOOT_FALCON_END) {
+ nvkm_error(subdev, "in bootstrap falcon callback:\n");
+ nvkm_error(subdev, "invalid falcon ID 0x%x\n", falcon_id);
+ return;
+ }
+
+ nvkm_debug(subdev, "%s booted\n", nvkm_secboot_falcon_name[falcon_id]);
+}
+
+enum {
+ ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES = 0,
+ ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_NO = 1,
+};
+
+static int
+acr_boot_falcon(struct nvkm_msgqueue *priv, enum nvkm_secboot_falcon falcon)
+{
+ DECLARE_COMPLETION_ONSTACK(completed);
+ /*
+ * flags - Flag specifying RESET or no RESET.
+ * falcon id - Falcon id specifying falcon to bootstrap.
+ */
+ struct {
+ struct nvkm_msgqueue_hdr hdr;
+ u8 cmd_type;
+ u32 flags;
+ u32 falcon_id;
+ } cmd;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.hdr.unit_id = MSGQUEUE_0148CDEC_UNIT_ACR;
+ cmd.hdr.size = sizeof(cmd);
+ cmd.cmd_type = ACR_CMD_BOOTSTRAP_FALCON;
+ cmd.flags = ACR_CMD_BOOTSTRAP_FALCON_FLAGS_RESET_YES;
+ cmd.falcon_id = falcon;
+ nvkm_msgqueue_post(priv, MSGQUEUE_MSG_PRIORITY_HIGH, &cmd.hdr,
+ acr_boot_falcon_callback, &completed, true);
+
+ if (!wait_for_completion_timeout(&completed, msecs_to_jiffies(1000)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+const struct nvkm_msgqueue_acr_func
+msgqueue_0148cdec_acr_func = {
+ .boot_falcon = acr_boot_falcon,
+};
+
+static void
+msgqueue_0148cdec_dtor(struct nvkm_msgqueue *queue)
+{
+ kfree(msgqueue_0148cdec(queue));
+}
+
+const struct nvkm_msgqueue_func
+msgqueue_0148cdec_func = {
+ .init_func = &msgqueue_0148cdec_init_func,
+ .acr_func = &msgqueue_0148cdec_acr_func,
+ .cmd_queue = msgqueue_0148cdec_cmd_queue,
+ .recv = msgqueue_0148cdec_process_msgs,
+ .dtor = msgqueue_0148cdec_dtor,
+};
+
+int
+msgqueue_0148cdec_new(struct nvkm_falcon *falcon, struct nvkm_msgqueue **queue)
+{
+ struct msgqueue_0148cdec *ret;
+
+ ret = kzalloc(sizeof(*ret), GFP_KERNEL);
+ if (!ret)
+ return -ENOMEM;
+
+ *queue = &ret->base;
+
+ nvkm_msgqueue_ctor(&msgqueue_0148cdec_func, falcon, &ret->base);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
index b537f111f39c..669c24028470 100644
--- a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
+++ b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c
@@ -40,8 +40,8 @@ nvkm_falcon_v1_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
for (i = 0; i < size / 4; i++) {
/* write new tag every 256B */
if ((i & 0x3f) == 0)
- nvkm_falcon_wr32(falcon, 0x188, tag++);
- nvkm_falcon_wr32(falcon, 0x184, ((u32 *)data)[i]);
+ nvkm_falcon_wr32(falcon, 0x188 + (port * 16), tag++);
+ nvkm_falcon_wr32(falcon, 0x184 + (port * 16), ((u32 *)data)[i]);
}
/*
@@ -53,37 +53,98 @@ nvkm_falcon_v1_load_imem(struct nvkm_falcon *falcon, void *data, u32 start,
/* write new tag every 256B */
if ((i & 0x3f) == 0)
- nvkm_falcon_wr32(falcon, 0x188, tag++);
- nvkm_falcon_wr32(falcon, 0x184, extra & (BIT(rem * 8) - 1));
+ nvkm_falcon_wr32(falcon, 0x188 + (port * 16), tag++);
+ nvkm_falcon_wr32(falcon, 0x184 + (port * 16),
+ extra & (BIT(rem * 8) - 1));
++i;
}
/* code must be padded to 0x40 words */
for (; i & 0x3f; i++)
- nvkm_falcon_wr32(falcon, 0x184, 0);
+ nvkm_falcon_wr32(falcon, 0x184 + (port * 16), 0);
}
static void
+nvkm_falcon_v1_load_emem(struct nvkm_falcon *falcon, void *data, u32 start,
+ u32 size, u8 port)
+{
+ u8 rem = size % 4;
+ int i;
+
+ size -= rem;
+
+ nvkm_falcon_wr32(falcon, 0xac0 + (port * 8), start | (0x1 << 24));
+ for (i = 0; i < size / 4; i++)
+ nvkm_falcon_wr32(falcon, 0xac4 + (port * 8), ((u32 *)data)[i]);
+
+ /*
+ * If size is not a multiple of 4, mask the last word to ensure garbage
+ * does not get written
+ */
+ if (rem) {
+ u32 extra = ((u32 *)data)[i];
+
+ nvkm_falcon_wr32(falcon, 0xac4 + (port * 8),
+ extra & (BIT(rem * 8) - 1));
+ }
+}
+
+static const u32 EMEM_START_ADDR = 0x1000000;
+
+static void
nvkm_falcon_v1_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start,
u32 size, u8 port)
{
u8 rem = size % 4;
int i;
+ if (start >= EMEM_START_ADDR && falcon->has_emem)
+ return nvkm_falcon_v1_load_emem(falcon, data,
+ start - EMEM_START_ADDR, size,
+ port);
+
size -= rem;
- nvkm_falcon_wr32(falcon, 0x1c0 + (port * 16), start | (0x1 << 24));
+ nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), start | (0x1 << 24));
for (i = 0; i < size / 4; i++)
- nvkm_falcon_wr32(falcon, 0x1c4, ((u32 *)data)[i]);
+ nvkm_falcon_wr32(falcon, 0x1c4 + (port * 8), ((u32 *)data)[i]);
/*
- * If size is not a multiple of 4, mask the last work to ensure garbage
- * does not get read
+ * If size is not a multiple of 4, mask the last word to ensure garbage
+ * does not get written
*/
if (rem) {
u32 extra = ((u32 *)data)[i];
- nvkm_falcon_wr32(falcon, 0x1c4, extra & (BIT(rem * 8) - 1));
+ nvkm_falcon_wr32(falcon, 0x1c4 + (port * 8),
+ extra & (BIT(rem * 8) - 1));
+ }
+}
+
+static void
+nvkm_falcon_v1_read_emem(struct nvkm_falcon *falcon, u32 start, u32 size,
+ u8 port, void *data)
+{
+ u8 rem = size % 4;
+ int i;
+
+ size -= rem;
+
+ nvkm_falcon_wr32(falcon, 0xac0 + (port * 8), start | (0x1 << 25));
+ for (i = 0; i < size / 4; i++)
+ ((u32 *)data)[i] = nvkm_falcon_rd32(falcon, 0xac4 + (port * 8));
+
+ /*
+ * If size is not a multiple of 4, mask the last word to ensure garbage
+ * does not get read
+ */
+ if (rem) {
+ u32 extra = nvkm_falcon_rd32(falcon, 0xac4 + (port * 8));
+
+ for (i = size; i < size + rem; i++) {
+ ((u8 *)data)[i] = (u8)(extra & 0xff);
+ extra >>= 8;
+ }
}
}
@@ -94,18 +155,22 @@ nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size,
u8 rem = size % 4;
int i;
+ if (start >= EMEM_START_ADDR && falcon->has_emem)
+ return nvkm_falcon_v1_read_emem(falcon, start - EMEM_START_ADDR,
+ size, port, data);
+
size -= rem;
- nvkm_falcon_wr32(falcon, 0x1c0 + (port * 16), start | (0x1 << 25));
+ nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), start | (0x1 << 25));
for (i = 0; i < size / 4; i++)
- ((u32 *)data)[i] = nvkm_falcon_rd32(falcon, 0x1c4);
+ ((u32 *)data)[i] = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8));
/*
- * If size is not a multiple of 4, mask the last work to ensure garbage
+ * If size is not a multiple of 4, mask the last word to ensure garbage
* does not get read
*/
if (rem) {
- u32 extra = nvkm_falcon_rd32(falcon, 0x1c4);
+ u32 extra = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8));
for (i = size; i < size + rem; i++) {
((u8 *)data)[i] = (u8)(extra & 0xff);
@@ -118,6 +183,7 @@ static void
nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx)
{
u32 inst_loc;
+ u32 fbif;
/* disable instance block binding */
if (ctx == NULL) {
@@ -125,19 +191,34 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx)
return;
}
+ switch (falcon->owner->index) {
+ case NVKM_ENGINE_NVENC0:
+ case NVKM_ENGINE_NVENC1:
+ case NVKM_ENGINE_NVENC2:
+ fbif = 0x800;
+ break;
+ case NVKM_SUBDEV_PMU:
+ fbif = 0xe00;
+ break;
+ default:
+ fbif = 0x600;
+ break;
+ }
+
nvkm_falcon_wr32(falcon, 0x10c, 0x1);
/* setup apertures - virtual */
- nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_UCODE, 0x4);
- nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_VIRT, 0x0);
+ nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_UCODE, 0x4);
+ nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_VIRT, 0x0);
/* setup apertures - physical */
- nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_PHYS_VID, 0x4);
- nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_PHYS_SYS_COH, 0x5);
- nvkm_falcon_wr32(falcon, 0xe00 + 4 * FALCON_DMAIDX_PHYS_SYS_NCOH, 0x6);
+ nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_VID, 0x4);
+ nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_SYS_COH, 0x5);
+ nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_SYS_NCOH, 0x6);
/* Set context */
switch (nvkm_memory_target(ctx->memory)) {
case NVKM_MEM_TARGET_VRAM: inst_loc = 0; break;
+ case NVKM_MEM_TARGET_HOST: inst_loc = 2; break;
case NVKM_MEM_TARGET_NCOH: inst_loc = 3; break;
default:
WARN_ON(1);
@@ -146,9 +227,12 @@ nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *ctx)
/* Enable context */
nvkm_falcon_mask(falcon, 0x048, 0x1, 0x1);
- nvkm_falcon_wr32(falcon, 0x480,
+ nvkm_falcon_wr32(falcon, 0x054,
((ctx->addr >> 12) & 0xfffffff) |
(inst_loc << 28) | (1 << 30));
+
+ nvkm_falcon_mask(falcon, 0x090, 0x10000, 0x10000);
+ nvkm_falcon_mask(falcon, 0x0a4, 0x8, 0x8);
}
static void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index 63566ba12fbb..1c5e5ba487a8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -20,6 +20,7 @@ nvkm-y += nvkm/subdev/fb/gt215.o
nvkm-y += nvkm/subdev/fb/mcp77.o
nvkm-y += nvkm/subdev/fb/mcp89.o
nvkm-y += nvkm/subdev/fb/gf100.o
+nvkm-y += nvkm/subdev/fb/gf108.o
nvkm-y += nvkm/subdev/fb/gk104.o
nvkm-y += nvkm/subdev/fb/gk20a.o
nvkm-y += nvkm/subdev/fb/gm107.o
@@ -42,8 +43,10 @@ nvkm-y += nvkm/subdev/fb/ramnv50.o
nvkm-y += nvkm/subdev/fb/ramgt215.o
nvkm-y += nvkm/subdev/fb/rammcp77.o
nvkm-y += nvkm/subdev/fb/ramgf100.o
+nvkm-y += nvkm/subdev/fb/ramgf108.o
nvkm-y += nvkm/subdev/fb/ramgk104.o
nvkm-y += nvkm/subdev/fb/ramgm107.o
+nvkm-y += nvkm/subdev/fb/ramgm200.o
nvkm-y += nvkm/subdev/fb/ramgp100.o
nvkm-y += nvkm/subdev/fb/sddr2.o
nvkm-y += nvkm/subdev/fb/sddr3.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c
new file mode 100644
index 000000000000..56af84aa333b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf108.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "gf100.h"
+#include "ram.h"
+
+static const struct nvkm_fb_func
+gf108_fb = {
+ .dtor = gf100_fb_dtor,
+ .oneinit = gf100_fb_oneinit,
+ .init = gf100_fb_init,
+ .init_page = gf100_fb_init_page,
+ .intr = gf100_fb_intr,
+ .ram_new = gf108_ram_new,
+ .memtype_valid = gf100_fb_memtype_valid,
+};
+
+int
+gf108_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
+{
+ return gf100_fb_new_(&gf108_fb, device, index, pfb);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
index fe5886013ac0..d83da5ddbc1e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
@@ -68,7 +68,7 @@ gm200_fb = {
.init = gm200_fb_init,
.init_page = gm200_fb_init_page,
.intr = gf100_fb_intr,
- .ram_new = gm107_ram_new,
+ .ram_new = gm200_ram_new,
.memtype_valid = gf100_fb_memtype_valid,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
index b60068b7d8f9..fac7e73c3ddf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
@@ -19,13 +19,38 @@ int nv50_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **);
void nv50_ram_put(struct nvkm_ram *, struct nvkm_mem **);
void __nv50_ram_put(struct nvkm_ram *, struct nvkm_mem *);
+int gf100_ram_new_(const struct nvkm_ram_func *, struct nvkm_fb *,
+ struct nvkm_ram **);
int gf100_ram_ctor(const struct nvkm_ram_func *, struct nvkm_fb *,
- u32, struct nvkm_ram *);
+ struct nvkm_ram *);
+u32 gf100_ram_probe_fbp(const struct nvkm_ram_func *,
+ struct nvkm_device *, int, int *);
+u32 gf100_ram_probe_fbp_amount(const struct nvkm_ram_func *, u32,
+ struct nvkm_device *, int, int *);
+u32 gf100_ram_probe_fbpa_amount(struct nvkm_device *, int);
int gf100_ram_get(struct nvkm_ram *, u64, u32, u32, u32, struct nvkm_mem **);
void gf100_ram_put(struct nvkm_ram *, struct nvkm_mem **);
+int gf100_ram_init(struct nvkm_ram *);
+int gf100_ram_calc(struct nvkm_ram *, u32);
+int gf100_ram_prog(struct nvkm_ram *);
+void gf100_ram_tidy(struct nvkm_ram *);
+
+u32 gf108_ram_probe_fbp_amount(const struct nvkm_ram_func *, u32,
+ struct nvkm_device *, int, int *);
+
+int gk104_ram_new_(const struct nvkm_ram_func *, struct nvkm_fb *,
+ struct nvkm_ram **);
+void *gk104_ram_dtor(struct nvkm_ram *);
+int gk104_ram_init(struct nvkm_ram *);
+int gk104_ram_calc(struct nvkm_ram *, u32);
+int gk104_ram_prog(struct nvkm_ram *);
+void gk104_ram_tidy(struct nvkm_ram *);
+
+u32 gm107_ram_probe_fbp(const struct nvkm_ram_func *,
+ struct nvkm_device *, int, int *);
-int gk104_ram_ctor(struct nvkm_fb *, struct nvkm_ram **, u32);
-int gk104_ram_init(struct nvkm_ram *ram);
+u32 gm200_ram_probe_fbp_amount(const struct nvkm_ram_func *, u32,
+ struct nvkm_device *, int, int *);
/* RAM type-specific MR calculation routines */
int nvkm_sddr2_calc(struct nvkm_ram *);
@@ -46,7 +71,9 @@ int nv50_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gt215_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int mcp77_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gf100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int gf108_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gk104_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gm107_ram_new(struct nvkm_fb *, struct nvkm_ram **);
+int gm200_ram_new(struct nvkm_fb *, struct nvkm_ram **);
int gp100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
index 6758da93a3a1..53c32fc694e9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf100.c
@@ -124,7 +124,7 @@ gf100_ram_train(struct gf100_ramfuc *fuc, u32 magic)
}
}
-static int
+int
gf100_ram_calc(struct nvkm_ram *base, u32 freq)
{
struct gf100_ram *ram = gf100_ram(base);
@@ -404,7 +404,7 @@ gf100_ram_calc(struct nvkm_ram *base, u32 freq)
return 0;
}
-static int
+int
gf100_ram_prog(struct nvkm_ram *base)
{
struct gf100_ram *ram = gf100_ram(base);
@@ -413,7 +413,7 @@ gf100_ram_prog(struct nvkm_ram *base)
return 0;
}
-static void
+void
gf100_ram_tidy(struct nvkm_ram *base)
{
struct gf100_ram *ram = gf100_ram(base);
@@ -500,7 +500,7 @@ gf100_ram_get(struct nvkm_ram *ram, u64 size, u32 align, u32 ncmin,
return 0;
}
-static int
+int
gf100_ram_init(struct nvkm_ram *base)
{
static const u8 train0[] = {
@@ -543,77 +543,96 @@ gf100_ram_init(struct nvkm_ram *base)
return 0;
}
-static const struct nvkm_ram_func
-gf100_ram_func = {
- .init = gf100_ram_init,
- .get = gf100_ram_get,
- .put = gf100_ram_put,
- .calc = gf100_ram_calc,
- .prog = gf100_ram_prog,
- .tidy = gf100_ram_tidy,
-};
+u32
+gf100_ram_probe_fbpa_amount(struct nvkm_device *device, int fbpa)
+{
+ return nvkm_rd32(device, 0x11020c + (fbpa * 0x1000));
+}
+
+u32
+gf100_ram_probe_fbp_amount(const struct nvkm_ram_func *func, u32 fbpao,
+ struct nvkm_device *device, int fbp, int *pltcs)
+{
+ if (!(fbpao & BIT(fbp))) {
+ *pltcs = 1;
+ return func->probe_fbpa_amount(device, fbp);
+ }
+ return 0;
+}
+
+u32
+gf100_ram_probe_fbp(const struct nvkm_ram_func *func,
+ struct nvkm_device *device, int fbp, int *pltcs)
+{
+ u32 fbpao = nvkm_rd32(device, 0x022554);
+ return func->probe_fbp_amount(func, fbpao, device, fbp, pltcs);
+}
int
gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
- u32 maskaddr, struct nvkm_ram *ram)
+ struct nvkm_ram *ram)
{
struct nvkm_subdev *subdev = &fb->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_bios *bios = device->bios;
const u32 rsvd_head = ( 256 * 1024); /* vga memory */
const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
- u32 parts = nvkm_rd32(device, 0x022438);
- u32 pmask = nvkm_rd32(device, maskaddr);
- u64 bsize = (u64)nvkm_rd32(device, 0x10f20c) << 20;
- u64 psize, size = 0;
enum nvkm_ram_type type = nvkm_fb_bios_memtype(bios);
- bool uniform = true;
- int ret, i;
-
- nvkm_debug(subdev, "100800: %08x\n", nvkm_rd32(device, 0x100800));
- nvkm_debug(subdev, "parts %08x mask %08x\n", parts, pmask);
-
- /* read amount of vram attached to each memory controller */
- for (i = 0; i < parts; i++) {
- if (pmask & (1 << i))
- continue;
-
- psize = (u64)nvkm_rd32(device, 0x11020c + (i * 0x1000)) << 20;
- if (psize != bsize) {
- if (psize < bsize)
- bsize = psize;
- uniform = false;
+ u32 fbps = nvkm_rd32(device, 0x022438);
+ u64 total = 0, lcomm = ~0, lower, ubase, usize;
+ int ret, fbp, ltcs, ltcn = 0;
+
+ nvkm_debug(subdev, "%d FBP(s)\n", fbps);
+ for (fbp = 0; fbp < fbps; fbp++) {
+ u32 size = func->probe_fbp(func, device, fbp, &ltcs);
+ if (size) {
+ nvkm_debug(subdev, "FBP %d: %4d MiB, %d LTC(s)\n",
+ fbp, size, ltcs);
+ lcomm = min(lcomm, (u64)(size / ltcs) << 20);
+ total += size << 20;
+ ltcn += ltcs;
+ } else {
+ nvkm_debug(subdev, "FBP %d: disabled\n", fbp);
}
-
- nvkm_debug(subdev, "%d: %d MiB\n", i, (u32)(psize >> 20));
- size += psize;
}
- ret = nvkm_ram_ctor(func, fb, type, size, 0, ram);
+ lower = lcomm * ltcn;
+ ubase = lcomm + func->upper;
+ usize = total - lower;
+
+ nvkm_debug(subdev, "Lower: %4lld MiB @ %010llx\n", lower >> 20, 0ULL);
+ nvkm_debug(subdev, "Upper: %4lld MiB @ %010llx\n", usize >> 20, ubase);
+ nvkm_debug(subdev, "Total: %4lld MiB\n", total >> 20);
+
+ ret = nvkm_ram_ctor(func, fb, type, total, 0, ram);
if (ret)
return ret;
nvkm_mm_fini(&ram->vram);
- /* if all controllers have the same amount attached, there's no holes */
- if (uniform) {
+ /* Some GPUs are in what's known as a "mixed memory" configuration.
+ *
+ * This is either where some FBPs have more memory than the others,
+ * or where LTCs have been disabled on a FBP.
+ */
+ if (lower != total) {
+ /* The common memory amount is addressed normally. */
ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
- (size - rsvd_head - rsvd_tail) >>
- NVKM_RAM_MM_SHIFT, 1);
+ (lower - rsvd_head) >> NVKM_RAM_MM_SHIFT, 1);
if (ret)
return ret;
- } else {
- /* otherwise, address lowest common amount from 0GiB */
- ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
- ((bsize * parts) - rsvd_head) >>
- NVKM_RAM_MM_SHIFT, 1);
+
+ /* And the rest is much higher in the physical address
+ * space, and may not be usable for certain operations.
+ */
+ ret = nvkm_mm_init(&ram->vram, ubase >> NVKM_RAM_MM_SHIFT,
+ (usize - rsvd_tail) >> NVKM_RAM_MM_SHIFT, 1);
if (ret)
return ret;
-
- /* and the rest starting from (8GiB + common_size) */
- ret = nvkm_mm_init(&ram->vram, (0x0200000000ULL + bsize) >>
- NVKM_RAM_MM_SHIFT,
- (size - (bsize * parts) - rsvd_tail) >>
+ } else {
+ /* GPUs without mixed-memory are a lot nicer... */
+ ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
+ (total - rsvd_head - rsvd_tail) >>
NVKM_RAM_MM_SHIFT, 1);
if (ret)
return ret;
@@ -624,7 +643,8 @@ gf100_ram_ctor(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
}
int
-gf100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
+gf100_ram_new_(const struct nvkm_ram_func *func,
+ struct nvkm_fb *fb, struct nvkm_ram **pram)
{
struct nvkm_subdev *subdev = &fb->subdev;
struct nvkm_bios *bios = subdev->device->bios;
@@ -635,7 +655,7 @@ gf100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
return -ENOMEM;
*pram = &ram->base;
- ret = gf100_ram_ctor(&gf100_ram_func, fb, 0x022554, &ram->base);
+ ret = gf100_ram_ctor(func, fb, &ram->base);
if (ret)
return ret;
@@ -711,3 +731,23 @@ gf100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
ram->fuc.r_0x13d8f4 = ramfuc_reg(0x13d8f4);
return 0;
}
+
+static const struct nvkm_ram_func
+gf100_ram = {
+ .upper = 0x0200000000,
+ .probe_fbp = gf100_ram_probe_fbp,
+ .probe_fbp_amount = gf100_ram_probe_fbp_amount,
+ .probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
+ .init = gf100_ram_init,
+ .get = gf100_ram_get,
+ .put = gf100_ram_put,
+ .calc = gf100_ram_calc,
+ .prog = gf100_ram_prog,
+ .tidy = gf100_ram_tidy,
+};
+
+int
+gf100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
+{
+ return gf100_ram_new_(&gf100_ram, fb, pram);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c
new file mode 100644
index 000000000000..985ec64cf369
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgf108.c
@@ -0,0 +1,62 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "ram.h"
+
+u32
+gf108_ram_probe_fbp_amount(const struct nvkm_ram_func *func, u32 fbpao,
+ struct nvkm_device *device, int fbp, int *pltcs)
+{
+ u32 fbpt = nvkm_rd32(device, 0x022438);
+ u32 fbpat = nvkm_rd32(device, 0x02243c);
+ u32 fbpas = fbpat / fbpt;
+ u32 fbpa = fbp * fbpas;
+ u32 size = 0;
+ while (fbpas--) {
+ if (!(fbpao & BIT(fbpa)))
+ size += func->probe_fbpa_amount(device, fbpa);
+ fbpa++;
+ }
+ *pltcs = 1;
+ return size;
+}
+
+static const struct nvkm_ram_func
+gf108_ram = {
+ .upper = 0x0200000000,
+ .probe_fbp = gf100_ram_probe_fbp,
+ .probe_fbp_amount = gf108_ram_probe_fbp_amount,
+ .probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
+ .init = gf100_ram_init,
+ .get = gf100_ram_get,
+ .put = gf100_ram_put,
+ .calc = gf100_ram_calc,
+ .prog = gf100_ram_prog,
+ .tidy = gf100_ram_tidy,
+};
+
+int
+gf108_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
+{
+ return gf100_ram_new_(&gf108_ram, fb, pram);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
index fb8a1239743d..f6c00791722c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgk104.c
@@ -1108,7 +1108,7 @@ gk104_ram_calc_xits(struct gk104_ram *ram, struct nvkm_ram_data *next)
return ret;
}
-static int
+int
gk104_ram_calc(struct nvkm_ram *base, u32 freq)
{
struct gk104_ram *ram = gk104_ram(base);
@@ -1227,7 +1227,7 @@ gk104_ram_prog_0(struct gk104_ram *ram, u32 freq)
nvkm_mask(device, 0x10f444, mask, data);
}
-static int
+int
gk104_ram_prog(struct nvkm_ram *base)
{
struct gk104_ram *ram = gk104_ram(base);
@@ -1247,7 +1247,7 @@ gk104_ram_prog(struct nvkm_ram *base)
return (ram->base.next == &ram->base.xition);
}
-static void
+void
gk104_ram_tidy(struct nvkm_ram *base)
{
struct gk104_ram *ram = gk104_ram(base);
@@ -1509,7 +1509,7 @@ done:
return ret;
}
-static void *
+void *
gk104_ram_dtor(struct nvkm_ram *base)
{
struct gk104_ram *ram = gk104_ram(base);
@@ -1522,31 +1522,14 @@ gk104_ram_dtor(struct nvkm_ram *base)
return ram;
}
-static const struct nvkm_ram_func
-gk104_ram_func = {
- .dtor = gk104_ram_dtor,
- .init = gk104_ram_init,
- .get = gf100_ram_get,
- .put = gf100_ram_put,
- .calc = gk104_ram_calc,
- .prog = gk104_ram_prog,
- .tidy = gk104_ram_tidy,
-};
-
int
-gk104_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
-{
- return gk104_ram_ctor(fb, pram, 0x022554);
-}
-
-int
-gk104_ram_ctor(struct nvkm_fb *fb, struct nvkm_ram **pram, u32 maskaddr)
+gk104_ram_new_(const struct nvkm_ram_func *func, struct nvkm_fb *fb,
+ struct nvkm_ram **pram)
{
struct nvkm_subdev *subdev = &fb->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_bios *bios = device->bios;
- struct nvkm_gpio *gpio = device->gpio;
- struct dcb_gpio_func func;
+ struct dcb_gpio_func gpio;
struct gk104_ram *ram;
int ret, i;
u8 ramcfg = nvbios_ramcfg_index(subdev);
@@ -1556,7 +1539,7 @@ gk104_ram_ctor(struct nvkm_fb *fb, struct nvkm_ram **pram, u32 maskaddr)
return -ENOMEM;
*pram = &ram->base;
- ret = gf100_ram_ctor(&gk104_ram_func, fb, maskaddr, &ram->base);
+ ret = gf100_ram_ctor(func, fb, &ram->base);
if (ret)
return ret;
@@ -1614,18 +1597,18 @@ gk104_ram_ctor(struct nvkm_fb *fb, struct nvkm_ram **pram, u32 maskaddr)
}
/* lookup memory voltage gpios */
- ret = nvkm_gpio_find(gpio, 0, 0x18, DCB_GPIO_UNUSED, &func);
+ ret = nvkm_gpio_find(device->gpio, 0, 0x18, DCB_GPIO_UNUSED, &gpio);
if (ret == 0) {
- ram->fuc.r_gpioMV = ramfuc_reg(0x00d610 + (func.line * 0x04));
- ram->fuc.r_funcMV[0] = (func.log[0] ^ 2) << 12;
- ram->fuc.r_funcMV[1] = (func.log[1] ^ 2) << 12;
+ ram->fuc.r_gpioMV = ramfuc_reg(0x00d610 + (gpio.line * 0x04));
+ ram->fuc.r_funcMV[0] = (gpio.log[0] ^ 2) << 12;
+ ram->fuc.r_funcMV[1] = (gpio.log[1] ^ 2) << 12;
}
- ret = nvkm_gpio_find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
+ ret = nvkm_gpio_find(device->gpio, 0, 0x2e, DCB_GPIO_UNUSED, &gpio);
if (ret == 0) {
- ram->fuc.r_gpio2E = ramfuc_reg(0x00d610 + (func.line * 0x04));
- ram->fuc.r_func2E[0] = (func.log[0] ^ 2) << 12;
- ram->fuc.r_func2E[1] = (func.log[1] ^ 2) << 12;
+ ram->fuc.r_gpio2E = ramfuc_reg(0x00d610 + (gpio.line * 0x04));
+ ram->fuc.r_func2E[0] = (gpio.log[0] ^ 2) << 12;
+ ram->fuc.r_func2E[1] = (gpio.log[1] ^ 2) << 12;
}
ram->fuc.r_gpiotrig = ramfuc_reg(0x00d604);
@@ -1717,3 +1700,24 @@ gk104_ram_ctor(struct nvkm_fb *fb, struct nvkm_ram **pram, u32 maskaddr)
ram->fuc.r_0x100750 = ramfuc_reg(0x100750);
return 0;
}
+
+static const struct nvkm_ram_func
+gk104_ram = {
+ .upper = 0x0200000000,
+ .probe_fbp = gf100_ram_probe_fbp,
+ .probe_fbp_amount = gf108_ram_probe_fbp_amount,
+ .probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
+ .dtor = gk104_ram_dtor,
+ .init = gk104_ram_init,
+ .get = gf100_ram_get,
+ .put = gf100_ram_put,
+ .calc = gk104_ram_calc,
+ .prog = gk104_ram_prog,
+ .tidy = gk104_ram_tidy,
+};
+
+int
+gk104_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
+{
+ return gk104_ram_new_(&gk104_ram, fb, pram);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
index ac862d1d77bd..3f0b56347291 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm107.c
@@ -23,8 +23,31 @@
*/
#include "ram.h"
+u32
+gm107_ram_probe_fbp(const struct nvkm_ram_func *func,
+ struct nvkm_device *device, int fbp, int *pltcs)
+{
+ u32 fbpao = nvkm_rd32(device, 0x021c14);
+ return func->probe_fbp_amount(func, fbpao, device, fbp, pltcs);
+}
+
+static const struct nvkm_ram_func
+gm107_ram = {
+ .upper = 0x1000000000,
+ .probe_fbp = gm107_ram_probe_fbp,
+ .probe_fbp_amount = gf108_ram_probe_fbp_amount,
+ .probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
+ .dtor = gk104_ram_dtor,
+ .init = gk104_ram_init,
+ .get = gf100_ram_get,
+ .put = gf100_ram_put,
+ .calc = gk104_ram_calc,
+ .prog = gk104_ram_prog,
+ .tidy = gk104_ram_tidy,
+};
+
int
gm107_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
- return gk104_ram_ctor(fb, pram, 0x021c14);
+ return gk104_ram_new_(&gm107_ram, fb, pram);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c
new file mode 100644
index 000000000000..fd8facf90476
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgm200.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2017 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs <bskeggs@redhat.com>
+ */
+#include "ram.h"
+
+u32
+gm200_ram_probe_fbp_amount(const struct nvkm_ram_func *func, u32 fbpao,
+ struct nvkm_device *device, int fbp, int *pltcs)
+{
+ u32 ltcs = nvkm_rd32(device, 0x022450);
+ u32 fbpas = nvkm_rd32(device, 0x022458);
+ u32 fbpa = fbp * fbpas;
+ u32 size = 0;
+ if (!(nvkm_rd32(device, 0x021d38) & BIT(fbp))) {
+ u32 ltco = nvkm_rd32(device, 0x021d70 + (fbp * 4));
+ u32 ltcm = ~ltco & ((1 << ltcs) - 1);
+
+ while (fbpas--) {
+ if (!(fbpao & (1 << fbpa)))
+ size += func->probe_fbpa_amount(device, fbpa);
+ fbpa++;
+ }
+
+ *pltcs = hweight32(ltcm);
+ }
+ return size;
+}
+
+static const struct nvkm_ram_func
+gm200_ram = {
+ .upper = 0x1000000000,
+ .probe_fbp = gm107_ram_probe_fbp,
+ .probe_fbp_amount = gm200_ram_probe_fbp_amount,
+ .probe_fbpa_amount = gf100_ram_probe_fbpa_amount,
+ .dtor = gk104_ram_dtor,
+ .init = gk104_ram_init,
+ .get = gf100_ram_get,
+ .put = gf100_ram_put,
+ .calc = gk104_ram_calc,
+ .prog = gk104_ram_prog,
+ .tidy = gk104_ram_tidy,
+};
+
+int
+gm200_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
+{
+ return gk104_ram_new_(&gm200_ram, fb, pram);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
index 405faabe8dcd..cac70047ad5a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
@@ -76,8 +76,18 @@ gp100_ram_init(struct nvkm_ram *ram)
return 0;
}
+static u32
+gp100_ram_probe_fbpa(struct nvkm_device *device, int fbpa)
+{
+ return nvkm_rd32(device, 0x90020c + (fbpa * 0x4000));
+}
+
static const struct nvkm_ram_func
-gp100_ram_func = {
+gp100_ram = {
+ .upper = 0x1000000000,
+ .probe_fbp = gm107_ram_probe_fbp,
+ .probe_fbp_amount = gm200_ram_probe_fbp_amount,
+ .probe_fbpa_amount = gp100_ram_probe_fbpa,
.init = gp100_ram_init,
.get = gf100_ram_get,
.put = gf100_ram_put,
@@ -87,60 +97,10 @@ int
gp100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
{
struct nvkm_ram *ram;
- struct nvkm_subdev *subdev = &fb->subdev;
- struct nvkm_device *device = subdev->device;
- enum nvkm_ram_type type = nvkm_fb_bios_memtype(device->bios);
- const u32 rsvd_head = ( 256 * 1024); /* vga memory */
- const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
- u32 fbpa_num = nvkm_rd32(device, 0x02243c), fbpa;
- u32 fbio_opt = nvkm_rd32(device, 0x021c14);
- u64 part, size = 0, comm = ~0ULL;
- bool mixed = false;
- int ret;
-
- nvkm_debug(subdev, "02243c: %08x\n", fbpa_num);
- nvkm_debug(subdev, "021c14: %08x\n", fbio_opt);
- for (fbpa = 0; fbpa < fbpa_num; fbpa++) {
- if (!(fbio_opt & (1 << fbpa))) {
- part = nvkm_rd32(device, 0x90020c + (fbpa * 0x4000));
- nvkm_debug(subdev, "fbpa %02x: %lld MiB\n", fbpa, part);
- part = part << 20;
- if (part != comm) {
- if (comm != ~0ULL)
- mixed = true;
- comm = min(comm, part);
- }
- size = size + part;
- }
- }
-
- ret = nvkm_ram_new_(&gp100_ram_func, fb, type, size, 0, &ram);
- *pram = ram;
- if (ret)
- return ret;
- nvkm_mm_fini(&ram->vram);
+ if (!(ram = *pram = kzalloc(sizeof(*ram), GFP_KERNEL)))
+ return -ENOMEM;
- if (mixed) {
- ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
- ((comm * fbpa_num) - rsvd_head) >>
- NVKM_RAM_MM_SHIFT, 1);
- if (ret)
- return ret;
+ return gf100_ram_ctor(&gp100_ram, fb, ram);
- ret = nvkm_mm_init(&ram->vram, (0x1000000000ULL + comm) >>
- NVKM_RAM_MM_SHIFT,
- (size - (comm * fbpa_num) - rsvd_tail) >>
- NVKM_RAM_MM_SHIFT, 1);
- if (ret)
- return ret;
- } else {
- ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
- (size - rsvd_head - rsvd_tail) >>
- NVKM_RAM_MM_SHIFT, 1);
- if (ret)
- return ret;
- }
-
- return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c
index b7b01c3f7037..dd391809fef7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/anx9805.c
@@ -134,7 +134,7 @@ struct anx9805_aux {
static int
anx9805_aux_xfer(struct nvkm_i2c_aux *base, bool retry,
- u8 type, u32 addr, u8 *data, u8 size)
+ u8 type, u32 addr, u8 *data, u8 *size)
{
struct anx9805_aux *aux = anx9805_aux(base);
struct anx9805_pad *pad = aux->pad;
@@ -143,7 +143,7 @@ anx9805_aux_xfer(struct nvkm_i2c_aux *base, bool retry,
u8 buf[16] = {};
u8 tmp;
- AUX_DBG(&aux->base, "%02x %05x %d", type, addr, size);
+ AUX_DBG(&aux->base, "%02x %05x %d", type, addr, *size);
tmp = nvkm_rdi2cr(adap, pad->addr, 0x07) & ~0x04;
nvkm_wri2cr(adap, pad->addr, 0x07, tmp | 0x04);
@@ -152,12 +152,12 @@ anx9805_aux_xfer(struct nvkm_i2c_aux *base, bool retry,
nvkm_wri2cr(adap, aux->addr, 0xe4, 0x80);
if (!(type & 1)) {
- memcpy(buf, data, size);
+ memcpy(buf, data, *size);
AUX_DBG(&aux->base, "%16ph", buf);
- for (i = 0; i < size; i++)
+ for (i = 0; i < *size; i++)
nvkm_wri2cr(adap, aux->addr, 0xf0 + i, buf[i]);
}
- nvkm_wri2cr(adap, aux->addr, 0xe5, ((size - 1) << 4) | type);
+ nvkm_wri2cr(adap, aux->addr, 0xe5, ((*size - 1) << 4) | type);
nvkm_wri2cr(adap, aux->addr, 0xe6, (addr & 0x000ff) >> 0);
nvkm_wri2cr(adap, aux->addr, 0xe7, (addr & 0x0ff00) >> 8);
nvkm_wri2cr(adap, aux->addr, 0xe8, (addr & 0xf0000) >> 16);
@@ -176,10 +176,10 @@ anx9805_aux_xfer(struct nvkm_i2c_aux *base, bool retry,
}
if (type & 1) {
- for (i = 0; i < size; i++)
+ for (i = 0; i < *size; i++)
buf[i] = nvkm_rdi2cr(adap, aux->addr, 0xf0 + i);
AUX_DBG(&aux->base, "%16ph", buf);
- memcpy(data, buf, size);
+ memcpy(data, buf, *size);
}
ret = 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index 01d5c5a56e2e..d172e42dd228 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -51,7 +51,7 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
if (mcnt || remaining > 16)
cmd |= 4; /* MOT */
- ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, cnt);
+ ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, &cnt);
if (ret < 0) {
nvkm_i2c_aux_release(aux);
return ret;
@@ -115,7 +115,7 @@ nvkm_i2c_aux_acquire(struct nvkm_i2c_aux *aux)
int
nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *aux, bool retry, u8 type,
- u32 addr, u8 *data, u8 size)
+ u32 addr, u8 *data, u8 *size)
{
return aux->func->xfer(aux, retry, type, addr, data, size);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
index fc6b162fa0b1..27a4a39c87f0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
@@ -4,7 +4,7 @@
struct nvkm_i2c_aux_func {
int (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type,
- u32 addr, u8 *data, u8 size);
+ u32 addr, u8 *data, u8 *size);
int (*lnk_ctl)(struct nvkm_i2c_aux *, int link_nr, int link_bw,
bool enhanced_framing);
};
@@ -15,7 +15,7 @@ int nvkm_i2c_aux_new_(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *,
int id, struct nvkm_i2c_aux **);
void nvkm_i2c_aux_del(struct nvkm_i2c_aux **);
int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type,
- u32 addr, u8 *data, u8 size);
+ u32 addr, u8 *data, u8 *size);
int g94_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
int gm200_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
index b80236a4eeac..ab8cb196c34e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
@@ -74,7 +74,7 @@ g94_i2c_aux_init(struct g94_i2c_aux *aux)
static int
g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
- u8 type, u32 addr, u8 *data, u8 size)
+ u8 type, u32 addr, u8 *data, u8 *size)
{
struct g94_i2c_aux *aux = g94_i2c_aux(obj);
struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
@@ -83,7 +83,7 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
u32 xbuf[4] = {};
int ret, i;
- AUX_TRACE(&aux->base, "%d: %08x %d", type, addr, size);
+ AUX_TRACE(&aux->base, "%d: %08x %d", type, addr, *size);
ret = g94_i2c_aux_init(aux);
if (ret < 0)
@@ -97,7 +97,7 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
}
if (!(type & 1)) {
- memcpy(xbuf, data, size);
+ memcpy(xbuf, data, *size);
for (i = 0; i < 16; i += 4) {
AUX_TRACE(&aux->base, "wr %08x", xbuf[i / 4]);
nvkm_wr32(device, 0x00e4c0 + base + i, xbuf[i / 4]);
@@ -107,7 +107,7 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
ctrl = nvkm_rd32(device, 0x00e4e4 + base);
ctrl &= ~0x0001f0ff;
ctrl |= type << 12;
- ctrl |= size - 1;
+ ctrl |= *size - 1;
nvkm_wr32(device, 0x00e4e0 + base, addr);
/* (maybe) retry transaction a number of times on failure... */
@@ -151,7 +151,8 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
xbuf[i / 4] = nvkm_rd32(device, 0x00e4d0 + base + i);
AUX_TRACE(&aux->base, "rd %08x", xbuf[i / 4]);
}
- memcpy(data, xbuf, size);
+ memcpy(data, xbuf, *size);
+ *size = stat & 0x0000001f;
}
out:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
index ed458c7f056b..ee091fa79628 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
@@ -74,7 +74,7 @@ gm200_i2c_aux_init(struct gm200_i2c_aux *aux)
static int
gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
- u8 type, u32 addr, u8 *data, u8 size)
+ u8 type, u32 addr, u8 *data, u8 *size)
{
struct gm200_i2c_aux *aux = gm200_i2c_aux(obj);
struct nvkm_device *device = aux->base.pad->i2c->subdev.device;
@@ -83,7 +83,7 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
u32 xbuf[4] = {};
int ret, i;
- AUX_TRACE(&aux->base, "%d: %08x %d", type, addr, size);
+ AUX_TRACE(&aux->base, "%d: %08x %d", type, addr, *size);
ret = gm200_i2c_aux_init(aux);
if (ret < 0)
@@ -97,7 +97,7 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
}
if (!(type & 1)) {
- memcpy(xbuf, data, size);
+ memcpy(xbuf, data, *size);
for (i = 0; i < 16; i += 4) {
AUX_TRACE(&aux->base, "wr %08x", xbuf[i / 4]);
nvkm_wr32(device, 0x00d930 + base + i, xbuf[i / 4]);
@@ -107,7 +107,7 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
ctrl = nvkm_rd32(device, 0x00d954 + base);
ctrl &= ~0x0001f0ff;
ctrl |= type << 12;
- ctrl |= size - 1;
+ ctrl |= *size - 1;
nvkm_wr32(device, 0x00d950 + base, addr);
/* (maybe) retry transaction a number of times on failure... */
@@ -151,7 +151,8 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
xbuf[i / 4] = nvkm_rd32(device, 0x00d940 + base + i);
AUX_TRACE(&aux->base, "rd %08x", xbuf[i / 4]);
}
- memcpy(data, xbuf, size);
+ memcpy(data, xbuf, *size);
+ *size = stat & 0x0000001f;
}
out:
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
index 2c6b374f1420..d80dbc8f09b2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
@@ -30,7 +30,7 @@ gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0400));
u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0400));
u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0400));
- nvkm_error(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
+ nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
nvkm_mask(device, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000);
}
@@ -41,7 +41,7 @@ gf100_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0400));
u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0400));
u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0400));
- nvkm_error(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
+ nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
nvkm_mask(device, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000);
}
@@ -52,7 +52,7 @@ gf100_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0400));
u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0400));
u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0400));
- nvkm_error(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
+ nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
nvkm_mask(device, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
index c673853f3213..9025ed1bd2a9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
@@ -30,7 +30,7 @@ gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i)
u32 addr = nvkm_rd32(device, 0x122120 + (i * 0x0800));
u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0800));
u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0800));
- nvkm_error(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
+ nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat);
nvkm_mask(device, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000);
}
@@ -41,7 +41,7 @@ gk104_ibus_intr_rop(struct nvkm_subdev *ibus, int i)
u32 addr = nvkm_rd32(device, 0x124120 + (i * 0x0800));
u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0800));
u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0800));
- nvkm_error(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
+ nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat);
nvkm_mask(device, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000);
}
@@ -52,7 +52,7 @@ gk104_ibus_intr_gpc(struct nvkm_subdev *ibus, int i)
u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0800));
u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0800));
u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0800));
- nvkm_error(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
+ nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat);
nvkm_mask(device, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
index a73f690eb4b5..3306f9fe7140 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/base.c
@@ -23,6 +23,7 @@
*/
#include "priv.h"
+#include <core/msgqueue.h>
#include <subdev/timer.h>
void
@@ -85,7 +86,8 @@ nvkm_pmu_reset(struct nvkm_pmu *pmu)
);
/* Reset. */
- pmu->func->reset(pmu);
+ if (pmu->func->reset)
+ pmu->func->reset(pmu);
/* Wait for IMEM/DMEM scrubbing to be complete. */
nvkm_msec(device, 2000,
@@ -113,10 +115,18 @@ nvkm_pmu_init(struct nvkm_subdev *subdev)
return ret;
}
+static int
+nvkm_pmu_oneinit(struct nvkm_subdev *subdev)
+{
+ struct nvkm_pmu *pmu = nvkm_pmu(subdev);
+ return nvkm_falcon_v1_new(&pmu->subdev, "PMU", 0x10a000, &pmu->falcon);
+}
+
static void *
nvkm_pmu_dtor(struct nvkm_subdev *subdev)
{
struct nvkm_pmu *pmu = nvkm_pmu(subdev);
+ nvkm_msgqueue_del(&pmu->queue);
nvkm_falcon_del(&pmu->falcon);
return nvkm_pmu(subdev);
}
@@ -125,6 +135,7 @@ static const struct nvkm_subdev_func
nvkm_pmu = {
.dtor = nvkm_pmu_dtor,
.preinit = nvkm_pmu_preinit,
+ .oneinit = nvkm_pmu_oneinit,
.init = nvkm_pmu_init,
.fini = nvkm_pmu_fini,
.intr = nvkm_pmu_intr,
@@ -138,7 +149,7 @@ nvkm_pmu_ctor(const struct nvkm_pmu_func *func, struct nvkm_device *device,
pmu->func = func;
INIT_WORK(&pmu->recv.work, nvkm_pmu_recv);
init_waitqueue_head(&pmu->recv.wait);
- return nvkm_falcon_v1_new(&pmu->subdev, "PMU", 0x10a000, &pmu->falcon);
+ return 0;
}
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
index 0b8a1cc4a0ee..44bef22bce52 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gm20b.c
@@ -20,15 +20,36 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <engine/falcon.h>
+#include <core/msgqueue.h>
#include "priv.h"
+static void
+gm20b_pmu_recv(struct nvkm_pmu *pmu)
+{
+ if (!pmu->queue) {
+ nvkm_warn(&pmu->subdev,
+ "recv function called while no firmware set!\n");
+ return;
+ }
+
+ nvkm_msgqueue_recv(pmu->queue);
+}
+
static const struct nvkm_pmu_func
gm20b_pmu = {
- .reset = gt215_pmu_reset,
+ .intr = gt215_pmu_intr,
+ .recv = gm20b_pmu_recv,
};
int
gm20b_pmu_new(struct nvkm_device *device, int index, struct nvkm_pmu **ppmu)
{
- return nvkm_pmu_new_(&gm20b_pmu, device, index, ppmu);
+ int ret;
+
+ ret = nvkm_pmu_new_(&gm20b_pmu, device, index, ppmu);
+ if (ret)
+ return ret;
+
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
index 5076d1500f47..ac7f50ae53c6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/Kbuild
@@ -1,7 +1,13 @@
nvkm-y += nvkm/subdev/secboot/base.o
+nvkm-y += nvkm/subdev/secboot/hs_ucode.o
nvkm-y += nvkm/subdev/secboot/ls_ucode_gr.o
+nvkm-y += nvkm/subdev/secboot/ls_ucode_msgqueue.o
nvkm-y += nvkm/subdev/secboot/acr.o
nvkm-y += nvkm/subdev/secboot/acr_r352.o
nvkm-y += nvkm/subdev/secboot/acr_r361.o
+nvkm-y += nvkm/subdev/secboot/acr_r364.o
+nvkm-y += nvkm/subdev/secboot/acr_r367.o
+nvkm-y += nvkm/subdev/secboot/acr_r375.o
nvkm-y += nvkm/subdev/secboot/gm200.o
nvkm-y += nvkm/subdev/secboot/gm20b.o
+nvkm-y += nvkm/subdev/secboot/gp102.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
index 97795b342b6f..93d804652d44 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr.h
@@ -37,12 +37,10 @@ struct nvkm_acr_func {
void (*dtor)(struct nvkm_acr *);
int (*oneinit)(struct nvkm_acr *, struct nvkm_secboot *);
int (*fini)(struct nvkm_acr *, struct nvkm_secboot *, bool);
- int (*load)(struct nvkm_acr *, struct nvkm_secboot *,
+ int (*load)(struct nvkm_acr *, struct nvkm_falcon *,
struct nvkm_gpuobj *, u64);
int (*reset)(struct nvkm_acr *, struct nvkm_secboot *,
enum nvkm_secboot_falcon);
- int (*start)(struct nvkm_acr *, struct nvkm_secboot *,
- enum nvkm_secboot_falcon);
};
/**
@@ -50,7 +48,7 @@ struct nvkm_acr_func {
*
* @boot_falcon: ID of the falcon that will perform secure boot
* @managed_falcons: bitfield of falcons managed by this ACR
- * @start_address: virtual start address of the HS bootloader
+ * @optional_falcons: bitfield of falcons we can live without
*/
struct nvkm_acr {
const struct nvkm_acr_func *func;
@@ -58,12 +56,15 @@ struct nvkm_acr {
enum nvkm_secboot_falcon boot_falcon;
unsigned long managed_falcons;
- u32 start_address;
+ unsigned long optional_falcons;
};
void *nvkm_acr_load_firmware(const struct nvkm_subdev *, const char *, size_t);
struct nvkm_acr *acr_r352_new(unsigned long);
struct nvkm_acr *acr_r361_new(unsigned long);
+struct nvkm_acr *acr_r364_new(unsigned long);
+struct nvkm_acr *acr_r367_new(enum nvkm_secboot_falcon, unsigned long);
+struct nvkm_acr *acr_r375_new(enum nvkm_secboot_falcon, unsigned long);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
index 1aa37ea18580..993a38eb3ed5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.c
@@ -21,35 +21,16 @@
*/
#include "acr_r352.h"
+#include "hs_ucode.h"
#include <core/gpuobj.h>
#include <core/firmware.h>
#include <engine/falcon.h>
-
-/**
- * struct hsf_fw_header - HS firmware descriptor
- * @sig_dbg_offset: offset of the debug signature
- * @sig_dbg_size: size of the debug signature
- * @sig_prod_offset: offset of the production signature
- * @sig_prod_size: size of the production signature
- * @patch_loc: offset of the offset (sic) of where the signature is
- * @patch_sig: offset of the offset (sic) to add to sig_*_offset
- * @hdr_offset: offset of the load header (see struct hs_load_header)
- * @hdr_size: size of above header
- *
- * This structure is embedded in the HS firmware image at
- * hs_bin_hdr.header_offset.
- */
-struct hsf_fw_header {
- u32 sig_dbg_offset;
- u32 sig_dbg_size;
- u32 sig_prod_offset;
- u32 sig_prod_size;
- u32 patch_loc;
- u32 patch_sig;
- u32 hdr_offset;
- u32 hdr_size;
-};
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <subdev/pmu.h>
+#include <core/msgqueue.h>
+#include <engine/sec2.h>
/**
* struct acr_r352_flcn_bl_desc - DMEM bootloader descriptor
@@ -95,15 +76,14 @@ struct acr_r352_flcn_bl_desc {
*/
static void
acr_r352_generate_flcn_bl_desc(const struct nvkm_acr *acr,
- const struct ls_ucode_img *_img, u64 wpr_addr,
+ const struct ls_ucode_img *img, u64 wpr_addr,
void *_desc)
{
- struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
struct acr_r352_flcn_bl_desc *desc = _desc;
- const struct ls_ucode_img_desc *pdesc = &_img->ucode_desc;
+ const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
u64 base, addr_code, addr_data;
- base = wpr_addr + img->lsb_header.ucode_off + pdesc->app_start_offset;
+ base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
addr_code = (base + pdesc->app_resident_code_offset) >> 8;
addr_data = (base + pdesc->app_resident_data_offset) >> 8;
@@ -167,6 +147,96 @@ struct hsflcn_acr_desc {
*/
/**
+ * struct acr_r352_lsf_lsb_header - LS firmware header
+ * @signature: signature to verify the firmware against
+ * @ucode_off: offset of the ucode blob in the WPR region. The ucode
+ * blob contains the bootloader, code and data of the
+ * LS falcon
+ * @ucode_size: size of the ucode blob, including bootloader
+ * @data_size: size of the ucode blob data
+ * @bl_code_size: size of the bootloader code
+ * @bl_imem_off: offset in imem of the bootloader
+ * @bl_data_off: offset of the bootloader data in WPR region
+ * @bl_data_size: size of the bootloader data
+ * @app_code_off: offset of the app code relative to ucode_off
+ * @app_code_size: size of the app code
+ * @app_data_off: offset of the app data relative to ucode_off
+ * @app_data_size: size of the app data
+ * @flags: flags for the secure bootloader
+ *
+ * This structure is written into the WPR region for each managed falcon. Each
+ * instance is referenced by the lsb_offset member of the corresponding
+ * lsf_wpr_header.
+ */
+struct acr_r352_lsf_lsb_header {
+ /**
+ * LS falcon signatures
+ * @prd_keys: signature to use in production mode
+ * @dgb_keys: signature to use in debug mode
+ * @b_prd_present: whether the production key is present
+ * @b_dgb_present: whether the debug key is present
+ * @falcon_id: ID of the falcon the ucode applies to
+ */
+ struct {
+ u8 prd_keys[2][16];
+ u8 dbg_keys[2][16];
+ u32 b_prd_present;
+ u32 b_dbg_present;
+ u32 falcon_id;
+ } signature;
+ u32 ucode_off;
+ u32 ucode_size;
+ u32 data_size;
+ u32 bl_code_size;
+ u32 bl_imem_off;
+ u32 bl_data_off;
+ u32 bl_data_size;
+ u32 app_code_off;
+ u32 app_code_size;
+ u32 app_data_off;
+ u32 app_data_size;
+ u32 flags;
+};
+
+/**
+ * struct acr_r352_lsf_wpr_header - LS blob WPR Header
+ * @falcon_id: LS falcon ID
+ * @lsb_offset: offset of the lsb_lsf_header in the WPR region
+ * @bootstrap_owner: secure falcon reponsible for bootstrapping the LS falcon
+ * @lazy_bootstrap: skip bootstrapping by ACR
+ * @status: bootstrapping status
+ *
+ * An array of these is written at the beginning of the WPR region, one for
+ * each managed falcon. The array is terminated by an instance which falcon_id
+ * is LSF_FALCON_ID_INVALID.
+ */
+struct acr_r352_lsf_wpr_header {
+ u32 falcon_id;
+ u32 lsb_offset;
+ u32 bootstrap_owner;
+ u32 lazy_bootstrap;
+ u32 status;
+#define LSF_IMAGE_STATUS_NONE 0
+#define LSF_IMAGE_STATUS_COPY 1
+#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2
+#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3
+#define LSF_IMAGE_STATUS_VALIDATION_DONE 4
+#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5
+#define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6
+};
+
+/**
+ * struct ls_ucode_img_r352 - ucode image augmented with r352 headers
+ */
+struct ls_ucode_img_r352 {
+ struct ls_ucode_img base;
+
+ struct acr_r352_lsf_wpr_header wpr_header;
+ struct acr_r352_lsf_lsb_header lsb_header;
+};
+#define ls_ucode_img_r352(i) container_of(i, struct ls_ucode_img_r352, base)
+
+/**
* ls_ucode_img_load() - create a lsf_ucode_img and load it
*/
struct ls_ucode_img *
@@ -255,7 +325,7 @@ acr_r352_ls_img_fill_headers(struct acr_r352 *acr,
* image size
*/
offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN);
- lhdr->ucode_off = offset;
+ _img->ucode_off = lhdr->ucode_off = offset;
offset += _img->ucode_size;
/*
@@ -341,7 +411,7 @@ acr_r352_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs)
*/
int
acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
- struct nvkm_gpuobj *wpr_blob, u32 wpr_addr)
+ struct nvkm_gpuobj *wpr_blob, u64 wpr_addr)
{
struct ls_ucode_img *_img;
u32 pos = 0;
@@ -381,8 +451,8 @@ acr_r352_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
return 0;
}
-/* Both size and address of WPR need to be 128K-aligned */
-#define WPR_ALIGNMENT 0x20000
+/* Both size and address of WPR need to be 256K-aligned */
+#define WPR_ALIGNMENT 0x40000
/**
* acr_r352_prepare_ls_blob() - prepare the LS blob
*
@@ -399,7 +469,7 @@ acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size)
struct ls_ucode_img *img, *t;
unsigned long managed_falcons = acr->base.managed_falcons;
int managed_count = 0;
- u32 image_wpr_size;
+ u32 image_wpr_size, ls_blob_size;
int falcon_id;
int ret;
@@ -411,6 +481,12 @@ acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size)
img = acr->func->ls_ucode_img_load(acr, falcon_id);
if (IS_ERR(img)) {
+ if (acr->base.optional_falcons & BIT(falcon_id)) {
+ managed_falcons &= ~BIT(falcon_id);
+ nvkm_info(subdev, "skipping %s falcon...\n",
+ nvkm_secboot_falcon_name[falcon_id]);
+ continue;
+ }
ret = PTR_ERR(img);
goto cleanup;
}
@@ -419,6 +495,24 @@ acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size)
managed_count++;
}
+ /* Commit the actual list of falcons we will manage from now on */
+ acr->base.managed_falcons = managed_falcons;
+
+ /*
+ * If the boot falcon has a firmare, let it manage the bootstrap of other
+ * falcons.
+ */
+ if (acr->func->ls_func[acr->base.boot_falcon] &&
+ (managed_falcons & BIT(acr->base.boot_falcon))) {
+ for_each_set_bit(falcon_id, &managed_falcons,
+ NVKM_SECBOOT_FALCON_END) {
+ if (falcon_id == acr->base.boot_falcon)
+ continue;
+
+ acr->lazy_bootstrap |= BIT(falcon_id);
+ }
+ }
+
/*
* Fill the WPR and LSF headers with the right offsets and compute
* required WPR size
@@ -426,8 +520,17 @@ acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size)
image_wpr_size = acr->func->ls_fill_headers(acr, &imgs);
image_wpr_size = ALIGN(image_wpr_size, WPR_ALIGNMENT);
+ ls_blob_size = image_wpr_size;
+
+ /*
+ * If we need a shadow area, allocate twice the size and use the
+ * upper half as WPR
+ */
+ if (wpr_size == 0 && acr->func->shadow_blob)
+ ls_blob_size *= 2;
+
/* Allocate GPU object that will contain the WPR region */
- ret = nvkm_gpuobj_new(subdev->device, image_wpr_size, WPR_ALIGNMENT,
+ ret = nvkm_gpuobj_new(subdev->device, ls_blob_size, WPR_ALIGNMENT,
false, NULL, &acr->ls_blob);
if (ret)
goto cleanup;
@@ -438,6 +541,9 @@ acr_r352_prepare_ls_blob(struct acr_r352 *acr, u64 wpr_addr, u32 wpr_size)
/* If WPR address and size are not fixed, set them to fit the LS blob */
if (wpr_size == 0) {
wpr_addr = acr->ls_blob->addr;
+ if (acr->func->shadow_blob)
+ wpr_addr += acr->ls_blob->size / 2;
+
wpr_size = image_wpr_size;
/*
* But if the WPR region is set by the bootloader, it is illegal for
@@ -469,41 +575,17 @@ cleanup:
-/**
- * acr_r352_hsf_patch_signature() - patch HS blob with correct signature
- */
-static void
-acr_r352_hsf_patch_signature(struct nvkm_secboot *sb, void *acr_image)
-{
- struct fw_bin_header *hsbin_hdr = acr_image;
- struct hsf_fw_header *fw_hdr = acr_image + hsbin_hdr->header_offset;
- void *hs_data = acr_image + hsbin_hdr->data_offset;
- void *sig;
- u32 sig_size;
-
- /* Falcon in debug or production mode? */
- if (sb->boot_falcon->debug) {
- sig = acr_image + fw_hdr->sig_dbg_offset;
- sig_size = fw_hdr->sig_dbg_size;
- } else {
- sig = acr_image + fw_hdr->sig_prod_offset;
- sig_size = fw_hdr->sig_prod_size;
- }
-
- /* Patch signature */
- memcpy(hs_data + fw_hdr->patch_loc, sig + fw_hdr->patch_sig, sig_size);
-}
-
-static void
+void
acr_r352_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
- struct hsflcn_acr_desc *desc)
+ void *_desc)
{
+ struct hsflcn_acr_desc *desc = _desc;
struct nvkm_gpuobj *ls_blob = acr->ls_blob;
/* WPR region information if WPR is not fixed */
if (sb->wpr_size == 0) {
- u32 wpr_start = ls_blob->addr;
- u32 wpr_end = wpr_start + ls_blob->size;
+ u64 wpr_start = ls_blob->addr;
+ u64 wpr_end = wpr_start + ls_blob->size;
desc->wpr_region_id = 1;
desc->regions.no_regions = 2;
@@ -533,8 +615,8 @@ acr_r352_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
bl_desc->code_dma_base = lower_32_bits(addr_code);
bl_desc->non_sec_code_off = hdr->non_sec_code_off;
bl_desc->non_sec_code_size = hdr->non_sec_code_size;
- bl_desc->sec_code_off = hdr->app[0].sec_code_off;
- bl_desc->sec_code_size = hdr->app[0].sec_code_size;
+ bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
+ bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
bl_desc->code_entry_point = 0;
bl_desc->data_dma_base = lower_32_bits(addr_data);
bl_desc->data_size = hdr->data_size;
@@ -562,7 +644,7 @@ acr_r352_prepare_hs_blob(struct acr_r352 *acr, struct nvkm_secboot *sb,
void *acr_data;
int ret;
- acr_image = nvkm_acr_load_firmware(subdev, fw, 0);
+ acr_image = hs_ucode_load_blob(subdev, sb->boot_falcon, fw);
if (IS_ERR(acr_image))
return PTR_ERR(acr_image);
@@ -571,15 +653,12 @@ acr_r352_prepare_hs_blob(struct acr_r352 *acr, struct nvkm_secboot *sb,
load_hdr = acr_image + fw_hdr->hdr_offset;
acr_data = acr_image + hsbin_hdr->data_offset;
- /* Patch signature */
- acr_r352_hsf_patch_signature(sb, acr_image);
-
/* Patch descriptor with WPR information? */
if (patch) {
struct hsflcn_acr_desc *desc;
desc = acr_data + load_hdr->data_dma_base;
- acr_r352_fixup_hs_desc(acr, sb, desc);
+ acr->func->fixup_hs_desc(acr, sb, desc);
}
if (load_hdr->num_apps > ACR_R352_MAX_APPS) {
@@ -589,7 +668,7 @@ acr_r352_prepare_hs_blob(struct acr_r352 *acr, struct nvkm_secboot *sb,
goto cleanup;
}
memcpy(load_header, load_hdr, sizeof(*load_header) +
- (sizeof(load_hdr->app[0]) * load_hdr->num_apps));
+ (sizeof(load_hdr->apps[0]) * 2 * load_hdr->num_apps));
/* Create ACR blob and copy HS data to it */
ret = nvkm_gpuobj_new(subdev->device, ALIGN(hsbin_hdr->data_size, 256),
@@ -607,30 +686,6 @@ cleanup:
return ret;
}
-static int
-acr_r352_prepare_hsbl_blob(struct acr_r352 *acr)
-{
- const struct nvkm_subdev *subdev = acr->base.subdev;
- struct fw_bin_header *hdr;
- struct fw_bl_desc *hsbl_desc;
-
- acr->hsbl_blob = nvkm_acr_load_firmware(subdev, "acr/bl", 0);
- if (IS_ERR(acr->hsbl_blob)) {
- int ret = PTR_ERR(acr->hsbl_blob);
-
- acr->hsbl_blob = NULL;
- return ret;
- }
-
- hdr = acr->hsbl_blob;
- hsbl_desc = acr->hsbl_blob + hdr->header_offset;
-
- /* virtual start address for boot vector */
- acr->base.start_address = hsbl_desc->start_tag << 8;
-
- return 0;
-}
-
/**
* acr_r352_load_blobs - load blobs common to all ACR V1 versions.
*
@@ -641,6 +696,7 @@ acr_r352_prepare_hsbl_blob(struct acr_r352 *acr)
int
acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb)
{
+ struct nvkm_subdev *subdev = &sb->subdev;
int ret;
/* Firmware already loaded? */
@@ -672,9 +728,24 @@ acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb)
/* Load the HS firmware bootloader */
if (!acr->hsbl_blob) {
- ret = acr_r352_prepare_hsbl_blob(acr);
- if (ret)
+ acr->hsbl_blob = nvkm_acr_load_firmware(subdev, "acr/bl", 0);
+ if (IS_ERR(acr->hsbl_blob)) {
+ ret = PTR_ERR(acr->hsbl_blob);
+ acr->hsbl_blob = NULL;
return ret;
+ }
+
+ if (acr->base.boot_falcon != NVKM_SECBOOT_FALCON_PMU) {
+ acr->hsbl_unload_blob = nvkm_acr_load_firmware(subdev,
+ "acr/unload_bl", 0);
+ if (IS_ERR(acr->hsbl_unload_blob)) {
+ ret = PTR_ERR(acr->hsbl_unload_blob);
+ acr->hsbl_unload_blob = NULL;
+ return ret;
+ }
+ } else {
+ acr->hsbl_unload_blob = acr->hsbl_blob;
+ }
}
acr->firmware_ok = true;
@@ -684,35 +755,42 @@ acr_r352_load_blobs(struct acr_r352 *acr, struct nvkm_secboot *sb)
}
/**
- * acr_r352_load() - prepare HS falcon to run the specified blob, mapped
- * at GPU address offset.
+ * acr_r352_load() - prepare HS falcon to run the specified blob, mapped.
+ *
+ * Returns the start address to use, or a negative error value.
*/
static int
-acr_r352_load(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
+acr_r352_load(struct nvkm_acr *_acr, struct nvkm_falcon *falcon,
struct nvkm_gpuobj *blob, u64 offset)
{
struct acr_r352 *acr = acr_r352(_acr);
- struct nvkm_falcon *falcon = sb->boot_falcon;
- struct fw_bin_header *hdr = acr->hsbl_blob;
- struct fw_bl_desc *hsbl_desc = acr->hsbl_blob + hdr->header_offset;
- void *blob_data = acr->hsbl_blob + hdr->data_offset;
- void *hsbl_code = blob_data + hsbl_desc->code_off;
- void *hsbl_data = blob_data + hsbl_desc->data_off;
- u32 code_size = ALIGN(hsbl_desc->code_size, 256);
- const struct hsf_load_header *load_hdr;
const u32 bl_desc_size = acr->func->hs_bl_desc_size;
+ const struct hsf_load_header *load_hdr;
+ struct fw_bin_header *bl_hdr;
+ struct fw_bl_desc *hsbl_desc;
+ void *bl, *blob_data, *hsbl_code, *hsbl_data;
+ u32 code_size;
u8 bl_desc[bl_desc_size];
/* Find the bootloader descriptor for our blob and copy it */
if (blob == acr->load_blob) {
load_hdr = &acr->load_bl_header;
+ bl = acr->hsbl_blob;
} else if (blob == acr->unload_blob) {
load_hdr = &acr->unload_bl_header;
+ bl = acr->hsbl_unload_blob;
} else {
nvkm_error(_acr->subdev, "invalid secure boot blob!\n");
return -EINVAL;
}
+ bl_hdr = bl;
+ hsbl_desc = bl + bl_hdr->header_offset;
+ blob_data = bl + bl_hdr->data_offset;
+ hsbl_code = blob_data + hsbl_desc->code_off;
+ hsbl_data = blob_data + hsbl_desc->data_off;
+ code_size = ALIGN(hsbl_desc->code_size, 256);
+
/*
* Copy HS bootloader data
*/
@@ -732,23 +810,32 @@ acr_r352_load(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
nvkm_falcon_load_dmem(falcon, bl_desc, hsbl_desc->dmem_load_off,
bl_desc_size, 0);
- return 0;
+ return hsbl_desc->start_tag << 8;
}
static int
acr_r352_shutdown(struct acr_r352 *acr, struct nvkm_secboot *sb)
{
+ struct nvkm_subdev *subdev = &sb->subdev;
int i;
/* Run the unload blob to unprotect the WPR region */
if (acr->unload_blob && sb->wpr_set) {
int ret;
- nvkm_debug(&sb->subdev, "running HS unload blob\n");
- ret = sb->func->run_blob(sb, acr->unload_blob);
- if (ret)
+ nvkm_debug(subdev, "running HS unload blob\n");
+ ret = sb->func->run_blob(sb, acr->unload_blob, sb->halt_falcon);
+ if (ret < 0)
return ret;
- nvkm_debug(&sb->subdev, "HS unload blob completed\n");
+ /*
+ * Unload blob will return this error code - it is not an error
+ * and the expected behavior on RM as well
+ */
+ if (ret && ret != 0x1d) {
+ nvkm_error(subdev, "HS unload failed, ret 0x%08x", ret);
+ return -EINVAL;
+ }
+ nvkm_debug(subdev, "HS unload blob completed\n");
}
for (i = 0; i < NVKM_SECBOOT_FALCON_END; i++)
@@ -759,9 +846,44 @@ acr_r352_shutdown(struct acr_r352 *acr, struct nvkm_secboot *sb)
return 0;
}
+/**
+ * Check if the WPR region has been indeed set by the ACR firmware, and
+ * matches where it should be.
+ */
+static bool
+acr_r352_wpr_is_set(const struct acr_r352 *acr, const struct nvkm_secboot *sb)
+{
+ const struct nvkm_subdev *subdev = &sb->subdev;
+ const struct nvkm_device *device = subdev->device;
+ u64 wpr_lo, wpr_hi;
+ u64 wpr_range_lo, wpr_range_hi;
+
+ nvkm_wr32(device, 0x100cd4, 0x2);
+ wpr_lo = (nvkm_rd32(device, 0x100cd4) & ~0xff);
+ wpr_lo <<= 8;
+ nvkm_wr32(device, 0x100cd4, 0x3);
+ wpr_hi = (nvkm_rd32(device, 0x100cd4) & ~0xff);
+ wpr_hi <<= 8;
+
+ if (sb->wpr_size != 0) {
+ wpr_range_lo = sb->wpr_addr;
+ wpr_range_hi = wpr_range_lo + sb->wpr_size;
+ } else {
+ wpr_range_lo = acr->ls_blob->addr;
+ wpr_range_hi = wpr_range_lo + acr->ls_blob->size;
+ }
+
+ return (wpr_lo >= wpr_range_lo && wpr_lo < wpr_range_hi &&
+ wpr_hi > wpr_range_lo && wpr_hi <= wpr_range_hi);
+}
+
static int
acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb)
{
+ const struct nvkm_subdev *subdev = &sb->subdev;
+ unsigned long managed_falcons = acr->base.managed_falcons;
+ u32 reg;
+ int falcon_id;
int ret;
if (sb->wpr_set)
@@ -772,40 +894,95 @@ acr_r352_bootstrap(struct acr_r352 *acr, struct nvkm_secboot *sb)
if (ret)
return ret;
- nvkm_debug(&sb->subdev, "running HS load blob\n");
- ret = sb->func->run_blob(sb, acr->load_blob);
+ nvkm_debug(subdev, "running HS load blob\n");
+ ret = sb->func->run_blob(sb, acr->load_blob, sb->boot_falcon);
/* clear halt interrupt */
nvkm_falcon_clear_interrupt(sb->boot_falcon, 0x10);
- if (ret)
+ sb->wpr_set = acr_r352_wpr_is_set(acr, sb);
+ if (ret < 0) {
return ret;
- nvkm_debug(&sb->subdev, "HS load blob completed\n");
+ } else if (ret > 0) {
+ nvkm_error(subdev, "HS load failed, ret 0x%08x", ret);
+ return -EINVAL;
+ }
+ nvkm_debug(subdev, "HS load blob completed\n");
+ /* WPR must be set at this point */
+ if (!sb->wpr_set) {
+ nvkm_error(subdev, "ACR blob completed but WPR not set!\n");
+ return -EINVAL;
+ }
+
+ /* Run LS firmwares post_run hooks */
+ for_each_set_bit(falcon_id, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
+ const struct acr_r352_ls_func *func =
+ acr->func->ls_func[falcon_id];
+
+ if (func->post_run)
+ func->post_run(&acr->base, sb);
+ }
+
+ /* Re-start ourselves if we are managed */
+ if (!nvkm_secboot_is_managed(sb, acr->base.boot_falcon))
+ return 0;
+
+ /* Enable interrupts */
+ nvkm_falcon_wr32(sb->boot_falcon, 0x10, 0xff);
+ nvkm_mc_intr_mask(subdev->device, sb->boot_falcon->owner->index, true);
+
+ /* Start LS firmware on boot falcon */
+ nvkm_falcon_start(sb->boot_falcon);
+
+ /*
+ * There is a bug where the LS firmware sometimes require to be started
+ * twice (this happens only on SEC). Detect and workaround that
+ * condition.
+ *
+ * Once started, the falcon will end up in STOPPED condition (bit 5)
+ * if successful, or in HALT condition (bit 4) if not.
+ */
+ nvkm_msec(subdev->device, 1,
+ if ((reg = nvkm_rd32(subdev->device,
+ sb->boot_falcon->addr + 0x100)
+ & 0x30) != 0)
+ break;
+ );
+ if (reg & BIT(4)) {
+ nvkm_debug(subdev, "applying workaround for start bug...");
+ nvkm_falcon_start(sb->boot_falcon);
+ nvkm_msec(subdev->device, 1,
+ if ((reg = nvkm_rd32(subdev->device,
+ sb->boot_falcon->addr + 0x100)
+ & 0x30) != 0)
+ break;
+ );
+ if (reg & BIT(4)) {
+ nvkm_error(subdev, "%s failed to start\n",
+ nvkm_secboot_falcon_name[acr->base.boot_falcon]);
+ return -EINVAL;
+ }
+ }
- sb->wpr_set = true;
+ nvkm_debug(subdev, "%s started\n",
+ nvkm_secboot_falcon_name[acr->base.boot_falcon]);
return 0;
}
-/*
- * acr_r352_reset() - execute secure boot from the prepared state
+/**
+ * acr_r352_reset_nopmu - dummy reset method when no PMU firmware is loaded
*
- * Load the HS bootloader and ask the falcon to run it. This will in turn
- * load the HS firmware and run it, so once the falcon stops all the managed
- * falcons should have their LS firmware loaded and be ready to run.
+ * Reset is done by re-executing secure boot from scratch, with lazy bootstrap
+ * disabled. This has the effect of making all managed falcons ready-to-run.
*/
static int
-acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
- enum nvkm_secboot_falcon falcon)
+acr_r352_reset_nopmu(struct acr_r352 *acr, struct nvkm_secboot *sb,
+ enum nvkm_secboot_falcon falcon)
{
- struct acr_r352 *acr = acr_r352(_acr);
int ret;
/*
- * Dummy GM200 implementation: perform secure boot each time we are
- * called on FECS. Since only FECS and GPCCS are managed and started
- * together, this ought to be safe.
- *
- * Once we have proper PMU firmware and support, this will be changed
- * to a proper call to the PMU method.
+ * Perform secure boot each time we are called on FECS. Since only FECS
+ * and GPCCS are managed and started together, this ought to be safe.
*/
if (falcon != NVKM_SECBOOT_FALCON_FECS)
goto end;
@@ -814,7 +991,7 @@ acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
if (ret)
return ret;
- acr_r352_bootstrap(acr, sb);
+ ret = acr_r352_bootstrap(acr, sb);
if (ret)
return ret;
@@ -823,28 +1000,57 @@ end:
return 0;
}
+/*
+ * acr_r352_reset() - execute secure boot from the prepared state
+ *
+ * Load the HS bootloader and ask the falcon to run it. This will in turn
+ * load the HS firmware and run it, so once the falcon stops all the managed
+ * falcons should have their LS firmware loaded and be ready to run.
+ */
static int
-acr_r352_start(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
- enum nvkm_secboot_falcon falcon)
+acr_r352_reset(struct nvkm_acr *_acr, struct nvkm_secboot *sb,
+ enum nvkm_secboot_falcon falcon)
{
struct acr_r352 *acr = acr_r352(_acr);
- const struct nvkm_subdev *subdev = &sb->subdev;
- int base;
+ struct nvkm_msgqueue *queue;
+ const char *fname = nvkm_secboot_falcon_name[falcon];
+ bool wpr_already_set = sb->wpr_set;
+ int ret;
- switch (falcon) {
- case NVKM_SECBOOT_FALCON_FECS:
- base = 0x409000;
+ /* Make sure secure boot is performed */
+ ret = acr_r352_bootstrap(acr, sb);
+ if (ret)
+ return ret;
+
+ /* No PMU interface? */
+ if (!nvkm_secboot_is_managed(sb, _acr->boot_falcon)) {
+ /* Redo secure boot entirely if it was already done */
+ if (wpr_already_set)
+ return acr_r352_reset_nopmu(acr, sb, falcon);
+ /* Else return the result of the initial invokation */
+ else
+ return ret;
+ }
+
+ switch (_acr->boot_falcon) {
+ case NVKM_SECBOOT_FALCON_PMU:
+ queue = sb->subdev.device->pmu->queue;
break;
- case NVKM_SECBOOT_FALCON_GPCCS:
- base = 0x41a000;
+ case NVKM_SECBOOT_FALCON_SEC2:
+ queue = sb->subdev.device->sec2->queue;
break;
default:
- nvkm_error(subdev, "cannot start unhandled falcon!\n");
return -EINVAL;
}
- nvkm_wr32(subdev->device, base + 0x130, 0x00000002);
- acr->falcon_state[falcon] = RUNNING;
+ /* Otherwise just ask the LS firmware to reset the falcon */
+ nvkm_debug(&sb->subdev, "resetting %s falcon\n", fname);
+ ret = nvkm_msgqueue_acr_boot_falcon(queue, falcon);
+ if (ret) {
+ nvkm_error(&sb->subdev, "cannot boot %s falcon\n", fname);
+ return ret;
+ }
+ nvkm_debug(&sb->subdev, "falcon %s reset\n", fname);
return 0;
}
@@ -864,6 +1070,8 @@ acr_r352_dtor(struct nvkm_acr *_acr)
nvkm_gpuobj_del(&acr->unload_blob);
+ if (_acr->boot_falcon != NVKM_SECBOOT_FALCON_PMU)
+ kfree(acr->hsbl_unload_blob);
kfree(acr->hsbl_blob);
nvkm_gpuobj_del(&acr->load_blob);
nvkm_gpuobj_del(&acr->ls_blob);
@@ -887,8 +1095,88 @@ acr_r352_ls_gpccs_func = {
.lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
};
+
+
+/**
+ * struct acr_r352_pmu_bl_desc - PMU DMEM bootloader descriptor
+ * @dma_idx: DMA context to be used by BL while loading code/data
+ * @code_dma_base: 256B-aligned Physical FB Address where code is located
+ * @total_code_size: total size of the code part in the ucode
+ * @code_size_to_load: size of the code part to load in PMU IMEM.
+ * @code_entry_point: entry point in the code.
+ * @data_dma_base: Physical FB address where data part of ucode is located
+ * @data_size: Total size of the data portion.
+ * @overlay_dma_base: Physical Fb address for resident code present in ucode
+ * @argc: Total number of args
+ * @argv: offset where args are copied into PMU's DMEM.
+ *
+ * Structure used by the PMU bootloader to load the rest of the code
+ */
+struct acr_r352_pmu_bl_desc {
+ u32 dma_idx;
+ u32 code_dma_base;
+ u32 code_size_total;
+ u32 code_size_to_load;
+ u32 code_entry_point;
+ u32 data_dma_base;
+ u32 data_size;
+ u32 overlay_dma_base;
+ u32 argc;
+ u32 argv;
+ u16 code_dma_base1;
+ u16 data_dma_base1;
+ u16 overlay_dma_base1;
+};
+
+/**
+ * acr_r352_generate_pmu_bl_desc() - populate a DMEM BL descriptor for PMU LS image
+ *
+ */
+static void
+acr_r352_generate_pmu_bl_desc(const struct nvkm_acr *acr,
+ const struct ls_ucode_img *img, u64 wpr_addr,
+ void *_desc)
+{
+ const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
+ const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
+ struct acr_r352_pmu_bl_desc *desc = _desc;
+ u64 base;
+ u64 addr_code;
+ u64 addr_data;
+ u32 addr_args;
+
+ base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
+ addr_code = (base + pdesc->app_resident_code_offset) >> 8;
+ addr_data = (base + pdesc->app_resident_data_offset) >> 8;
+ addr_args = pmu->falcon->data.limit;
+ addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
+
+ desc->dma_idx = FALCON_DMAIDX_UCODE;
+ desc->code_dma_base = lower_32_bits(addr_code);
+ desc->code_dma_base1 = upper_32_bits(addr_code);
+ desc->code_size_total = pdesc->app_size;
+ desc->code_size_to_load = pdesc->app_resident_code_size;
+ desc->code_entry_point = pdesc->app_imem_entry;
+ desc->data_dma_base = lower_32_bits(addr_data);
+ desc->data_dma_base1 = upper_32_bits(addr_data);
+ desc->data_size = pdesc->app_resident_data_size;
+ desc->overlay_dma_base = lower_32_bits(addr_code);
+ desc->overlay_dma_base1 = upper_32_bits(addr_code);
+ desc->argc = 1;
+ desc->argv = addr_args;
+}
+
+static const struct acr_r352_ls_func
+acr_r352_ls_pmu_func = {
+ .load = acr_ls_ucode_load_pmu,
+ .generate_bl_desc = acr_r352_generate_pmu_bl_desc,
+ .bl_desc_size = sizeof(struct acr_r352_pmu_bl_desc),
+ .post_run = acr_ls_pmu_post_run,
+};
+
const struct acr_r352_func
acr_r352_func = {
+ .fixup_hs_desc = acr_r352_fixup_hs_desc,
.generate_hs_bl_desc = acr_r352_generate_hs_bl_desc,
.hs_bl_desc_size = sizeof(struct acr_r352_flcn_bl_desc),
.ls_ucode_img_load = acr_r352_ls_ucode_img_load,
@@ -897,6 +1185,7 @@ acr_r352_func = {
.ls_func = {
[NVKM_SECBOOT_FALCON_FECS] = &acr_r352_ls_fecs_func,
[NVKM_SECBOOT_FALCON_GPCCS] = &acr_r352_ls_gpccs_func,
+ [NVKM_SECBOOT_FALCON_PMU] = &acr_r352_ls_pmu_func,
},
};
@@ -906,7 +1195,6 @@ acr_r352_base_func = {
.fini = acr_r352_fini,
.load = acr_r352_load,
.reset = acr_r352_reset,
- .start = acr_r352_start,
};
struct nvkm_acr *
@@ -915,6 +1203,13 @@ acr_r352_new_(const struct acr_r352_func *func,
unsigned long managed_falcons)
{
struct acr_r352 *acr;
+ int i;
+
+ /* Check that all requested falcons are supported */
+ for_each_set_bit(i, &managed_falcons, NVKM_SECBOOT_FALCON_END) {
+ if (!func->ls_func[i])
+ return ERR_PTR(-ENOTSUPP);
+ }
acr = kzalloc(sizeof(*acr), GFP_KERNEL);
if (!acr)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
index ad5923b0fd3c..6e88520566c9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r352.h
@@ -24,131 +24,27 @@
#include "acr.h"
#include "ls_ucode.h"
+#include "hs_ucode.h"
struct ls_ucode_img;
#define ACR_R352_MAX_APPS 8
-/*
- *
- * LS blob structures
- *
- */
-
-/**
- * struct acr_r352_lsf_lsb_header - LS firmware header
- * @signature: signature to verify the firmware against
- * @ucode_off: offset of the ucode blob in the WPR region. The ucode
- * blob contains the bootloader, code and data of the
- * LS falcon
- * @ucode_size: size of the ucode blob, including bootloader
- * @data_size: size of the ucode blob data
- * @bl_code_size: size of the bootloader code
- * @bl_imem_off: offset in imem of the bootloader
- * @bl_data_off: offset of the bootloader data in WPR region
- * @bl_data_size: size of the bootloader data
- * @app_code_off: offset of the app code relative to ucode_off
- * @app_code_size: size of the app code
- * @app_data_off: offset of the app data relative to ucode_off
- * @app_data_size: size of the app data
- * @flags: flags for the secure bootloader
- *
- * This structure is written into the WPR region for each managed falcon. Each
- * instance is referenced by the lsb_offset member of the corresponding
- * lsf_wpr_header.
- */
-struct acr_r352_lsf_lsb_header {
- /**
- * LS falcon signatures
- * @prd_keys: signature to use in production mode
- * @dgb_keys: signature to use in debug mode
- * @b_prd_present: whether the production key is present
- * @b_dgb_present: whether the debug key is present
- * @falcon_id: ID of the falcon the ucode applies to
- */
- struct {
- u8 prd_keys[2][16];
- u8 dbg_keys[2][16];
- u32 b_prd_present;
- u32 b_dbg_present;
- u32 falcon_id;
- } signature;
- u32 ucode_off;
- u32 ucode_size;
- u32 data_size;
- u32 bl_code_size;
- u32 bl_imem_off;
- u32 bl_data_off;
- u32 bl_data_size;
- u32 app_code_off;
- u32 app_code_size;
- u32 app_data_off;
- u32 app_data_size;
- u32 flags;
#define LSF_FLAG_LOAD_CODE_AT_0 1
#define LSF_FLAG_DMACTL_REQ_CTX 4
#define LSF_FLAG_FORCE_PRIV_LOAD 8
-};
-
-/**
- * struct acr_r352_lsf_wpr_header - LS blob WPR Header
- * @falcon_id: LS falcon ID
- * @lsb_offset: offset of the lsb_lsf_header in the WPR region
- * @bootstrap_owner: secure falcon reponsible for bootstrapping the LS falcon
- * @lazy_bootstrap: skip bootstrapping by ACR
- * @status: bootstrapping status
- *
- * An array of these is written at the beginning of the WPR region, one for
- * each managed falcon. The array is terminated by an instance which falcon_id
- * is LSF_FALCON_ID_INVALID.
- */
-struct acr_r352_lsf_wpr_header {
- u32 falcon_id;
- u32 lsb_offset;
- u32 bootstrap_owner;
- u32 lazy_bootstrap;
- u32 status;
-#define LSF_IMAGE_STATUS_NONE 0
-#define LSF_IMAGE_STATUS_COPY 1
-#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2
-#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3
-#define LSF_IMAGE_STATUS_VALIDATION_DONE 4
-#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5
-#define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6
-};
-
-/**
- * struct ls_ucode_img_r352 - ucode image augmented with r352 headers
- */
-struct ls_ucode_img_r352 {
- struct ls_ucode_img base;
-
- struct acr_r352_lsf_wpr_header wpr_header;
- struct acr_r352_lsf_lsb_header lsb_header;
-};
-#define ls_ucode_img_r352(i) container_of(i, struct ls_ucode_img_r352, base)
-
-
-/*
- * HS blob structures
- */
-struct hsf_load_header_app {
- u32 sec_code_off;
- u32 sec_code_size;
-};
+static inline u32
+hsf_load_header_app_off(const struct hsf_load_header *hdr, u32 app)
+{
+ return hdr->apps[app];
+}
-/**
- * struct hsf_load_header - HS firmware load header
- */
-struct hsf_load_header {
- u32 non_sec_code_off;
- u32 non_sec_code_size;
- u32 data_dma_base;
- u32 data_size;
- u32 num_apps;
- struct hsf_load_header_app app[0];
-};
+static inline u32
+hsf_load_header_app_size(const struct hsf_load_header *hdr, u32 app)
+{
+ return hdr->apps[hdr->num_apps + app];
+}
/**
* struct acr_r352_ls_func - manages a single LS firmware
@@ -157,6 +53,7 @@ struct hsf_load_header {
* @generate_bl_desc: function called on a block of bl_desc_size to generate the
* proper bootloader descriptor for this LS firmware
* @bl_desc_size: size of the bootloader descriptor
+ * @post_run: hook called right after the ACR is executed
* @lhdr_flags: LS flags
*/
struct acr_r352_ls_func {
@@ -164,6 +61,7 @@ struct acr_r352_ls_func {
void (*generate_bl_desc)(const struct nvkm_acr *,
const struct ls_ucode_img *, u64, void *);
u32 bl_desc_size;
+ void (*post_run)(const struct nvkm_acr *, const struct nvkm_secboot *);
u32 lhdr_flags;
};
@@ -179,13 +77,15 @@ struct acr_r352;
struct acr_r352_func {
void (*generate_hs_bl_desc)(const struct hsf_load_header *, void *,
u64);
+ void (*fixup_hs_desc)(struct acr_r352 *, struct nvkm_secboot *, void *);
u32 hs_bl_desc_size;
+ bool shadow_blob;
struct ls_ucode_img *(*ls_ucode_img_load)(const struct acr_r352 *,
enum nvkm_secboot_falcon);
int (*ls_fill_headers)(struct acr_r352 *, struct list_head *);
int (*ls_write_wpr)(struct acr_r352 *, struct list_head *,
- struct nvkm_gpuobj *, u32);
+ struct nvkm_gpuobj *, u64);
const struct acr_r352_ls_func *ls_func[NVKM_SECBOOT_FALCON_END];
};
@@ -204,19 +104,22 @@ struct acr_r352 {
struct nvkm_gpuobj *load_blob;
struct {
struct hsf_load_header load_bl_header;
- struct hsf_load_header_app __load_apps[ACR_R352_MAX_APPS];
+ u32 __load_apps[ACR_R352_MAX_APPS * 2];
};
/* HS FW - unlock WPR region (dGPU only) */
struct nvkm_gpuobj *unload_blob;
struct {
struct hsf_load_header unload_bl_header;
- struct hsf_load_header_app __unload_apps[ACR_R352_MAX_APPS];
+ u32 __unload_apps[ACR_R352_MAX_APPS * 2];
};
/* HS bootloader */
void *hsbl_blob;
+ /* HS bootloader for unload blob, if using a different falcon */
+ void *hsbl_unload_blob;
+
/* LS FWs, to be loaded by the HS ACR */
struct nvkm_gpuobj *ls_blob;
@@ -245,6 +148,8 @@ struct ls_ucode_img *acr_r352_ls_ucode_img_load(const struct acr_r352 *,
enum nvkm_secboot_falcon);
int acr_r352_ls_fill_headers(struct acr_r352 *, struct list_head *);
int acr_r352_ls_write_wpr(struct acr_r352 *, struct list_head *,
- struct nvkm_gpuobj *, u32);
+ struct nvkm_gpuobj *, u64);
+
+void acr_r352_fixup_hs_desc(struct acr_r352 *, struct nvkm_secboot *, void *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c
index f0aff1d98474..14b36ef93628 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.c
@@ -20,58 +20,23 @@
* DEALINGS IN THE SOFTWARE.
*/
-#include "acr_r352.h"
+#include "acr_r361.h"
#include <engine/falcon.h>
-
-/**
- * struct acr_r361_flcn_bl_desc - DMEM bootloader descriptor
- * @signature: 16B signature for secure code. 0s if no secure code
- * @ctx_dma: DMA context to be used by BL while loading code/data
- * @code_dma_base: 256B-aligned Physical FB Address where code is located
- * (falcon's $xcbase register)
- * @non_sec_code_off: offset from code_dma_base where the non-secure code is
- * located. The offset must be multiple of 256 to help perf
- * @non_sec_code_size: the size of the nonSecure code part.
- * @sec_code_off: offset from code_dma_base where the secure code is
- * located. The offset must be multiple of 256 to help perf
- * @sec_code_size: offset from code_dma_base where the secure code is
- * located. The offset must be multiple of 256 to help perf
- * @code_entry_point: code entry point which will be invoked by BL after
- * code is loaded.
- * @data_dma_base: 256B aligned Physical FB Address where data is located.
- * (falcon's $xdbase register)
- * @data_size: size of data block. Should be multiple of 256B
- *
- * Structure used by the bootloader to load the rest of the code. This has
- * to be filled by host and copied into DMEM at offset provided in the
- * hsflcn_bl_desc.bl_desc_dmem_load_off.
- */
-struct acr_r361_flcn_bl_desc {
- u32 reserved[4];
- u32 signature[4];
- u32 ctx_dma;
- struct flcn_u64 code_dma_base;
- u32 non_sec_code_off;
- u32 non_sec_code_size;
- u32 sec_code_off;
- u32 sec_code_size;
- u32 code_entry_point;
- struct flcn_u64 data_dma_base;
- u32 data_size;
-};
+#include <core/msgqueue.h>
+#include <subdev/pmu.h>
+#include <engine/sec2.h>
static void
acr_r361_generate_flcn_bl_desc(const struct nvkm_acr *acr,
- const struct ls_ucode_img *_img, u64 wpr_addr,
+ const struct ls_ucode_img *img, u64 wpr_addr,
void *_desc)
{
- struct ls_ucode_img_r352 *img = ls_ucode_img_r352(_img);
struct acr_r361_flcn_bl_desc *desc = _desc;
- const struct ls_ucode_img_desc *pdesc = &img->base.ucode_desc;
+ const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
u64 base, addr_code, addr_data;
- base = wpr_addr + img->lsb_header.ucode_off + pdesc->app_start_offset;
+ base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
addr_code = base + pdesc->app_resident_code_offset;
addr_data = base + pdesc->app_resident_data_offset;
@@ -84,7 +49,7 @@ acr_r361_generate_flcn_bl_desc(const struct nvkm_acr *acr,
desc->data_size = pdesc->app_resident_data_size;
}
-static void
+void
acr_r361_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
u64 offset)
{
@@ -94,8 +59,8 @@ acr_r361_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
bl_desc->code_dma_base = u64_to_flcn64(offset);
bl_desc->non_sec_code_off = hdr->non_sec_code_off;
bl_desc->non_sec_code_size = hdr->non_sec_code_size;
- bl_desc->sec_code_off = hdr->app[0].sec_code_off;
- bl_desc->sec_code_size = hdr->app[0].sec_code_size;
+ bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
+ bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
bl_desc->code_entry_point = 0;
bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base);
bl_desc->data_size = hdr->data_size;
@@ -117,8 +82,100 @@ acr_r361_ls_gpccs_func = {
.lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
};
+struct acr_r361_pmu_bl_desc {
+ u32 reserved;
+ u32 dma_idx;
+ struct flcn_u64 code_dma_base;
+ u32 total_code_size;
+ u32 code_size_to_load;
+ u32 code_entry_point;
+ struct flcn_u64 data_dma_base;
+ u32 data_size;
+ struct flcn_u64 overlay_dma_base;
+ u32 argc;
+ u32 argv;
+};
+
+static void
+acr_r361_generate_pmu_bl_desc(const struct nvkm_acr *acr,
+ const struct ls_ucode_img *img, u64 wpr_addr,
+ void *_desc)
+{
+ const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
+ const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
+ struct acr_r361_pmu_bl_desc *desc = _desc;
+ u64 base, addr_code, addr_data;
+ u32 addr_args;
+
+ base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
+ addr_code = base + pdesc->app_resident_code_offset;
+ addr_data = base + pdesc->app_resident_data_offset;
+ addr_args = pmu->falcon->data.limit;
+ addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
+
+ desc->dma_idx = FALCON_DMAIDX_UCODE;
+ desc->code_dma_base = u64_to_flcn64(addr_code);
+ desc->total_code_size = pdesc->app_size;
+ desc->code_size_to_load = pdesc->app_resident_code_size;
+ desc->code_entry_point = pdesc->app_imem_entry;
+ desc->data_dma_base = u64_to_flcn64(addr_data);
+ desc->data_size = pdesc->app_resident_data_size;
+ desc->overlay_dma_base = u64_to_flcn64(addr_code);
+ desc->argc = 1;
+ desc->argv = addr_args;
+}
+
+const struct acr_r352_ls_func
+acr_r361_ls_pmu_func = {
+ .load = acr_ls_ucode_load_pmu,
+ .generate_bl_desc = acr_r361_generate_pmu_bl_desc,
+ .bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
+ .post_run = acr_ls_pmu_post_run,
+};
+
+static void
+acr_r361_generate_sec2_bl_desc(const struct nvkm_acr *acr,
+ const struct ls_ucode_img *img, u64 wpr_addr,
+ void *_desc)
+{
+ const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
+ const struct nvkm_sec2 *sec = acr->subdev->device->sec2;
+ struct acr_r361_pmu_bl_desc *desc = _desc;
+ u64 base, addr_code, addr_data;
+ u32 addr_args;
+
+ base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
+ /* For some reason we should not add app_resident_code_offset here */
+ addr_code = base;
+ addr_data = base + pdesc->app_resident_data_offset;
+ addr_args = sec->falcon->data.limit;
+ addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
+
+ desc->dma_idx = FALCON_SEC2_DMAIDX_UCODE;
+ desc->code_dma_base = u64_to_flcn64(addr_code);
+ desc->total_code_size = pdesc->app_size;
+ desc->code_size_to_load = pdesc->app_resident_code_size;
+ desc->code_entry_point = pdesc->app_imem_entry;
+ desc->data_dma_base = u64_to_flcn64(addr_data);
+ desc->data_size = pdesc->app_resident_data_size;
+ desc->overlay_dma_base = u64_to_flcn64(addr_code);
+ desc->argc = 1;
+ /* args are stored at the beginning of EMEM */
+ desc->argv = 0x01000000;
+}
+
+const struct acr_r352_ls_func
+acr_r361_ls_sec2_func = {
+ .load = acr_ls_ucode_load_sec2,
+ .generate_bl_desc = acr_r361_generate_sec2_bl_desc,
+ .bl_desc_size = sizeof(struct acr_r361_pmu_bl_desc),
+ .post_run = acr_ls_sec2_post_run,
+};
+
+
const struct acr_r352_func
acr_r361_func = {
+ .fixup_hs_desc = acr_r352_fixup_hs_desc,
.generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
.hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
.ls_ucode_img_load = acr_r352_ls_ucode_img_load,
@@ -127,6 +184,8 @@ acr_r361_func = {
.ls_func = {
[NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
[NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
+ [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
+ [NVKM_SECBOOT_FALCON_SEC2] = &acr_r361_ls_sec2_func,
},
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h
new file mode 100644
index 000000000000..f9f978daadb9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r361.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NVKM_SECBOOT_ACR_R361_H__
+#define __NVKM_SECBOOT_ACR_R361_H__
+
+#include "acr_r352.h"
+
+/**
+ * struct acr_r361_flcn_bl_desc - DMEM bootloader descriptor
+ * @signature: 16B signature for secure code. 0s if no secure code
+ * @ctx_dma: DMA context to be used by BL while loading code/data
+ * @code_dma_base: 256B-aligned Physical FB Address where code is located
+ * (falcon's $xcbase register)
+ * @non_sec_code_off: offset from code_dma_base where the non-secure code is
+ * located. The offset must be multiple of 256 to help perf
+ * @non_sec_code_size: the size of the nonSecure code part.
+ * @sec_code_off: offset from code_dma_base where the secure code is
+ * located. The offset must be multiple of 256 to help perf
+ * @sec_code_size: offset from code_dma_base where the secure code is
+ * located. The offset must be multiple of 256 to help perf
+ * @code_entry_point: code entry point which will be invoked by BL after
+ * code is loaded.
+ * @data_dma_base: 256B aligned Physical FB Address where data is located.
+ * (falcon's $xdbase register)
+ * @data_size: size of data block. Should be multiple of 256B
+ *
+ * Structure used by the bootloader to load the rest of the code. This has
+ * to be filled by host and copied into DMEM at offset provided in the
+ * hsflcn_bl_desc.bl_desc_dmem_load_off.
+ */
+struct acr_r361_flcn_bl_desc {
+ u32 reserved[4];
+ u32 signature[4];
+ u32 ctx_dma;
+ struct flcn_u64 code_dma_base;
+ u32 non_sec_code_off;
+ u32 non_sec_code_size;
+ u32 sec_code_off;
+ u32 sec_code_size;
+ u32 code_entry_point;
+ struct flcn_u64 data_dma_base;
+ u32 data_size;
+};
+
+void acr_r361_generate_hs_bl_desc(const struct hsf_load_header *, void *, u64);
+
+extern const struct acr_r352_ls_func acr_r361_ls_fecs_func;
+extern const struct acr_r352_ls_func acr_r361_ls_gpccs_func;
+extern const struct acr_r352_ls_func acr_r361_ls_pmu_func;
+extern const struct acr_r352_ls_func acr_r361_ls_sec2_func;
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c
new file mode 100644
index 000000000000..30cf04109991
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r364.c
@@ -0,0 +1,117 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "acr_r361.h"
+
+#include <core/gpuobj.h>
+
+/*
+ * r364 ACR: hsflcn_desc structure has changed to introduce the shadow_mem
+ * parameter.
+ */
+
+struct acr_r364_hsflcn_desc {
+ union {
+ u8 reserved_dmem[0x200];
+ u32 signatures[4];
+ } ucode_reserved_space;
+ u32 wpr_region_id;
+ u32 wpr_offset;
+ u32 mmu_memory_range;
+ struct {
+ u32 no_regions;
+ struct {
+ u32 start_addr;
+ u32 end_addr;
+ u32 region_id;
+ u32 read_mask;
+ u32 write_mask;
+ u32 client_mask;
+ u32 shadow_mem_start_addr;
+ } region_props[2];
+ } regions;
+ u32 ucode_blob_size;
+ u64 ucode_blob_base __aligned(8);
+ struct {
+ u32 vpr_enabled;
+ u32 vpr_start;
+ u32 vpr_end;
+ u32 hdcp_policies;
+ } vpr_desc;
+};
+
+static void
+acr_r364_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
+ void *_desc)
+{
+ struct acr_r364_hsflcn_desc *desc = _desc;
+ struct nvkm_gpuobj *ls_blob = acr->ls_blob;
+
+ /* WPR region information if WPR is not fixed */
+ if (sb->wpr_size == 0) {
+ u64 wpr_start = ls_blob->addr;
+ u64 wpr_end = ls_blob->addr + ls_blob->size;
+
+ if (acr->func->shadow_blob)
+ wpr_start += ls_blob->size / 2;
+
+ desc->wpr_region_id = 1;
+ desc->regions.no_regions = 2;
+ desc->regions.region_props[0].start_addr = wpr_start >> 8;
+ desc->regions.region_props[0].end_addr = wpr_end >> 8;
+ desc->regions.region_props[0].region_id = 1;
+ desc->regions.region_props[0].read_mask = 0xf;
+ desc->regions.region_props[0].write_mask = 0xc;
+ desc->regions.region_props[0].client_mask = 0x2;
+ if (acr->func->shadow_blob)
+ desc->regions.region_props[0].shadow_mem_start_addr =
+ ls_blob->addr >> 8;
+ else
+ desc->regions.region_props[0].shadow_mem_start_addr = 0;
+ } else {
+ desc->ucode_blob_base = ls_blob->addr;
+ desc->ucode_blob_size = ls_blob->size;
+ }
+}
+
+const struct acr_r352_func
+acr_r364_func = {
+ .fixup_hs_desc = acr_r364_fixup_hs_desc,
+ .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
+ .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
+ .ls_ucode_img_load = acr_r352_ls_ucode_img_load,
+ .ls_fill_headers = acr_r352_ls_fill_headers,
+ .ls_write_wpr = acr_r352_ls_write_wpr,
+ .ls_func = {
+ [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
+ [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
+ [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
+ },
+};
+
+
+struct nvkm_acr *
+acr_r364_new(unsigned long managed_falcons)
+{
+ return acr_r352_new_(&acr_r364_func, NVKM_SECBOOT_FALCON_PMU,
+ managed_falcons);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
new file mode 100644
index 000000000000..f860713642f1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.c
@@ -0,0 +1,388 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "acr_r367.h"
+#include "acr_r361.h"
+
+#include <core/gpuobj.h>
+
+/*
+ * r367 ACR: new LS signature format requires a rewrite of LS firmware and
+ * blob creation functions. Also the hsflcn_desc layout has changed slightly.
+ */
+
+#define LSF_LSB_DEPMAP_SIZE 11
+
+/**
+ * struct acr_r367_lsf_lsb_header - LS firmware header
+ *
+ * See also struct acr_r352_lsf_lsb_header for documentation.
+ */
+struct acr_r367_lsf_lsb_header {
+ /**
+ * LS falcon signatures
+ * @prd_keys: signature to use in production mode
+ * @dgb_keys: signature to use in debug mode
+ * @b_prd_present: whether the production key is present
+ * @b_dgb_present: whether the debug key is present
+ * @falcon_id: ID of the falcon the ucode applies to
+ */
+ struct {
+ u8 prd_keys[2][16];
+ u8 dbg_keys[2][16];
+ u32 b_prd_present;
+ u32 b_dbg_present;
+ u32 falcon_id;
+ u32 supports_versioning;
+ u32 version;
+ u32 depmap_count;
+ u8 depmap[LSF_LSB_DEPMAP_SIZE * 2 * 4];
+ u8 kdf[16];
+ } signature;
+ u32 ucode_off;
+ u32 ucode_size;
+ u32 data_size;
+ u32 bl_code_size;
+ u32 bl_imem_off;
+ u32 bl_data_off;
+ u32 bl_data_size;
+ u32 app_code_off;
+ u32 app_code_size;
+ u32 app_data_off;
+ u32 app_data_size;
+ u32 flags;
+};
+
+/**
+ * struct acr_r367_lsf_wpr_header - LS blob WPR Header
+ *
+ * See also struct acr_r352_lsf_wpr_header for documentation.
+ */
+struct acr_r367_lsf_wpr_header {
+ u32 falcon_id;
+ u32 lsb_offset;
+ u32 bootstrap_owner;
+ u32 lazy_bootstrap;
+ u32 bin_version;
+ u32 status;
+#define LSF_IMAGE_STATUS_NONE 0
+#define LSF_IMAGE_STATUS_COPY 1
+#define LSF_IMAGE_STATUS_VALIDATION_CODE_FAILED 2
+#define LSF_IMAGE_STATUS_VALIDATION_DATA_FAILED 3
+#define LSF_IMAGE_STATUS_VALIDATION_DONE 4
+#define LSF_IMAGE_STATUS_VALIDATION_SKIPPED 5
+#define LSF_IMAGE_STATUS_BOOTSTRAP_READY 6
+#define LSF_IMAGE_STATUS_REVOCATION_CHECK_FAILED 7
+};
+
+/**
+ * struct ls_ucode_img_r367 - ucode image augmented with r367 headers
+ */
+struct ls_ucode_img_r367 {
+ struct ls_ucode_img base;
+
+ struct acr_r367_lsf_wpr_header wpr_header;
+ struct acr_r367_lsf_lsb_header lsb_header;
+};
+#define ls_ucode_img_r367(i) container_of(i, struct ls_ucode_img_r367, base)
+
+struct ls_ucode_img *
+acr_r367_ls_ucode_img_load(const struct acr_r352 *acr,
+ enum nvkm_secboot_falcon falcon_id)
+{
+ const struct nvkm_subdev *subdev = acr->base.subdev;
+ struct ls_ucode_img_r367 *img;
+ int ret;
+
+ img = kzalloc(sizeof(*img), GFP_KERNEL);
+ if (!img)
+ return ERR_PTR(-ENOMEM);
+
+ img->base.falcon_id = falcon_id;
+
+ ret = acr->func->ls_func[falcon_id]->load(subdev, &img->base);
+ if (ret) {
+ kfree(img->base.ucode_data);
+ kfree(img->base.sig);
+ kfree(img);
+ return ERR_PTR(ret);
+ }
+
+ /* Check that the signature size matches our expectations... */
+ if (img->base.sig_size != sizeof(img->lsb_header.signature)) {
+ nvkm_error(subdev, "invalid signature size for %s falcon!\n",
+ nvkm_secboot_falcon_name[falcon_id]);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Copy signature to the right place */
+ memcpy(&img->lsb_header.signature, img->base.sig, img->base.sig_size);
+
+ /* not needed? the signature should already have the right value */
+ img->lsb_header.signature.falcon_id = falcon_id;
+
+ return &img->base;
+}
+
+#define LSF_LSB_HEADER_ALIGN 256
+#define LSF_BL_DATA_ALIGN 256
+#define LSF_BL_DATA_SIZE_ALIGN 256
+#define LSF_BL_CODE_SIZE_ALIGN 256
+#define LSF_UCODE_DATA_ALIGN 4096
+
+static u32
+acr_r367_ls_img_fill_headers(struct acr_r352 *acr,
+ struct ls_ucode_img_r367 *img, u32 offset)
+{
+ struct ls_ucode_img *_img = &img->base;
+ struct acr_r367_lsf_wpr_header *whdr = &img->wpr_header;
+ struct acr_r367_lsf_lsb_header *lhdr = &img->lsb_header;
+ struct ls_ucode_img_desc *desc = &_img->ucode_desc;
+ const struct acr_r352_ls_func *func =
+ acr->func->ls_func[_img->falcon_id];
+
+ /* Fill WPR header */
+ whdr->falcon_id = _img->falcon_id;
+ whdr->bootstrap_owner = acr->base.boot_falcon;
+ whdr->bin_version = lhdr->signature.version;
+ whdr->status = LSF_IMAGE_STATUS_COPY;
+
+ /* Skip bootstrapping falcons started by someone else than ACR */
+ if (acr->lazy_bootstrap & BIT(_img->falcon_id))
+ whdr->lazy_bootstrap = 1;
+
+ /* Align, save off, and include an LSB header size */
+ offset = ALIGN(offset, LSF_LSB_HEADER_ALIGN);
+ whdr->lsb_offset = offset;
+ offset += sizeof(*lhdr);
+
+ /*
+ * Align, save off, and include the original (static) ucode
+ * image size
+ */
+ offset = ALIGN(offset, LSF_UCODE_DATA_ALIGN);
+ _img->ucode_off = lhdr->ucode_off = offset;
+ offset += _img->ucode_size;
+
+ /*
+ * For falcons that use a boot loader (BL), we append a loader
+ * desc structure on the end of the ucode image and consider
+ * this the boot loader data. The host will then copy the loader
+ * desc args to this space within the WPR region (before locking
+ * down) and the HS bin will then copy them to DMEM 0 for the
+ * loader.
+ */
+ lhdr->bl_code_size = ALIGN(desc->bootloader_size,
+ LSF_BL_CODE_SIZE_ALIGN);
+ lhdr->ucode_size = ALIGN(desc->app_resident_data_offset,
+ LSF_BL_CODE_SIZE_ALIGN) + lhdr->bl_code_size;
+ lhdr->data_size = ALIGN(desc->app_size, LSF_BL_CODE_SIZE_ALIGN) +
+ lhdr->bl_code_size - lhdr->ucode_size;
+ /*
+ * Though the BL is located at 0th offset of the image, the VA
+ * is different to make sure that it doesn't collide the actual
+ * OS VA range
+ */
+ lhdr->bl_imem_off = desc->bootloader_imem_offset;
+ lhdr->app_code_off = desc->app_start_offset +
+ desc->app_resident_code_offset;
+ lhdr->app_code_size = desc->app_resident_code_size;
+ lhdr->app_data_off = desc->app_start_offset +
+ desc->app_resident_data_offset;
+ lhdr->app_data_size = desc->app_resident_data_size;
+
+ lhdr->flags = func->lhdr_flags;
+ if (_img->falcon_id == acr->base.boot_falcon)
+ lhdr->flags |= LSF_FLAG_DMACTL_REQ_CTX;
+
+ /* Align and save off BL descriptor size */
+ lhdr->bl_data_size = ALIGN(func->bl_desc_size, LSF_BL_DATA_SIZE_ALIGN);
+
+ /*
+ * Align, save off, and include the additional BL data
+ */
+ offset = ALIGN(offset, LSF_BL_DATA_ALIGN);
+ lhdr->bl_data_off = offset;
+ offset += lhdr->bl_data_size;
+
+ return offset;
+}
+
+int
+acr_r367_ls_fill_headers(struct acr_r352 *acr, struct list_head *imgs)
+{
+ struct ls_ucode_img_r367 *img;
+ struct list_head *l;
+ u32 count = 0;
+ u32 offset;
+
+ /* Count the number of images to manage */
+ list_for_each(l, imgs)
+ count++;
+
+ /*
+ * Start with an array of WPR headers at the base of the WPR.
+ * The expectation here is that the secure falcon will do a single DMA
+ * read of this array and cache it internally so it's ok to pack these.
+ * Also, we add 1 to the falcon count to indicate the end of the array.
+ */
+ offset = sizeof(img->wpr_header) * (count + 1);
+
+ /*
+ * Walk the managed falcons, accounting for the LSB structs
+ * as well as the ucode images.
+ */
+ list_for_each_entry(img, imgs, base.node) {
+ offset = acr_r367_ls_img_fill_headers(acr, img, offset);
+ }
+
+ return offset;
+}
+
+int
+acr_r367_ls_write_wpr(struct acr_r352 *acr, struct list_head *imgs,
+ struct nvkm_gpuobj *wpr_blob, u64 wpr_addr)
+{
+ struct ls_ucode_img *_img;
+ u32 pos = 0;
+
+ nvkm_kmap(wpr_blob);
+
+ list_for_each_entry(_img, imgs, node) {
+ struct ls_ucode_img_r367 *img = ls_ucode_img_r367(_img);
+ const struct acr_r352_ls_func *ls_func =
+ acr->func->ls_func[_img->falcon_id];
+ u8 gdesc[ls_func->bl_desc_size];
+
+ nvkm_gpuobj_memcpy_to(wpr_blob, pos, &img->wpr_header,
+ sizeof(img->wpr_header));
+
+ nvkm_gpuobj_memcpy_to(wpr_blob, img->wpr_header.lsb_offset,
+ &img->lsb_header, sizeof(img->lsb_header));
+
+ /* Generate and write BL descriptor */
+ memset(gdesc, 0, ls_func->bl_desc_size);
+ ls_func->generate_bl_desc(&acr->base, _img, wpr_addr, gdesc);
+
+ nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.bl_data_off,
+ gdesc, ls_func->bl_desc_size);
+
+ /* Copy ucode */
+ nvkm_gpuobj_memcpy_to(wpr_blob, img->lsb_header.ucode_off,
+ _img->ucode_data, _img->ucode_size);
+
+ pos += sizeof(img->wpr_header);
+ }
+
+ nvkm_wo32(wpr_blob, pos, NVKM_SECBOOT_FALCON_INVALID);
+
+ nvkm_done(wpr_blob);
+
+ return 0;
+}
+
+struct acr_r367_hsflcn_desc {
+ u8 reserved_dmem[0x200];
+ u32 signatures[4];
+ u32 wpr_region_id;
+ u32 wpr_offset;
+ u32 mmu_memory_range;
+#define FLCN_ACR_MAX_REGIONS 2
+ struct {
+ u32 no_regions;
+ struct {
+ u32 start_addr;
+ u32 end_addr;
+ u32 region_id;
+ u32 read_mask;
+ u32 write_mask;
+ u32 client_mask;
+ u32 shadow_mem_start_addr;
+ } region_props[FLCN_ACR_MAX_REGIONS];
+ } regions;
+ u32 ucode_blob_size;
+ u64 ucode_blob_base __aligned(8);
+ struct {
+ u32 vpr_enabled;
+ u32 vpr_start;
+ u32 vpr_end;
+ u32 hdcp_policies;
+ } vpr_desc;
+};
+
+void
+acr_r367_fixup_hs_desc(struct acr_r352 *acr, struct nvkm_secboot *sb,
+ void *_desc)
+{
+ struct acr_r367_hsflcn_desc *desc = _desc;
+ struct nvkm_gpuobj *ls_blob = acr->ls_blob;
+
+ /* WPR region information if WPR is not fixed */
+ if (sb->wpr_size == 0) {
+ u64 wpr_start = ls_blob->addr;
+ u64 wpr_end = ls_blob->addr + ls_blob->size;
+
+ if (acr->func->shadow_blob)
+ wpr_start += ls_blob->size / 2;
+
+ desc->wpr_region_id = 1;
+ desc->regions.no_regions = 2;
+ desc->regions.region_props[0].start_addr = wpr_start >> 8;
+ desc->regions.region_props[0].end_addr = wpr_end >> 8;
+ desc->regions.region_props[0].region_id = 1;
+ desc->regions.region_props[0].read_mask = 0xf;
+ desc->regions.region_props[0].write_mask = 0xc;
+ desc->regions.region_props[0].client_mask = 0x2;
+ if (acr->func->shadow_blob)
+ desc->regions.region_props[0].shadow_mem_start_addr =
+ ls_blob->addr >> 8;
+ else
+ desc->regions.region_props[0].shadow_mem_start_addr = 0;
+ } else {
+ desc->ucode_blob_base = ls_blob->addr;
+ desc->ucode_blob_size = ls_blob->size;
+ }
+}
+
+const struct acr_r352_func
+acr_r367_func = {
+ .fixup_hs_desc = acr_r367_fixup_hs_desc,
+ .generate_hs_bl_desc = acr_r361_generate_hs_bl_desc,
+ .hs_bl_desc_size = sizeof(struct acr_r361_flcn_bl_desc),
+ .shadow_blob = true,
+ .ls_ucode_img_load = acr_r367_ls_ucode_img_load,
+ .ls_fill_headers = acr_r367_ls_fill_headers,
+ .ls_write_wpr = acr_r367_ls_write_wpr,
+ .ls_func = {
+ [NVKM_SECBOOT_FALCON_FECS] = &acr_r361_ls_fecs_func,
+ [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r361_ls_gpccs_func,
+ [NVKM_SECBOOT_FALCON_PMU] = &acr_r361_ls_pmu_func,
+ [NVKM_SECBOOT_FALCON_SEC2] = &acr_r361_ls_sec2_func,
+ },
+};
+
+struct nvkm_acr *
+acr_r367_new(enum nvkm_secboot_falcon boot_falcon,
+ unsigned long managed_falcons)
+{
+ return acr_r352_new_(&acr_r367_func, boot_falcon, managed_falcons);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h
new file mode 100644
index 000000000000..ec6a71ca36be
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r367.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NVKM_SECBOOT_ACR_R367_H__
+#define __NVKM_SECBOOT_ACR_R367_H__
+
+#include "acr_r352.h"
+
+void acr_r367_fixup_hs_desc(struct acr_r352 *, struct nvkm_secboot *, void *);
+
+struct ls_ucode_img *acr_r367_ls_ucode_img_load(const struct acr_r352 *,
+ enum nvkm_secboot_falcon);
+int acr_r367_ls_fill_headers(struct acr_r352 *, struct list_head *);
+int acr_r367_ls_write_wpr(struct acr_r352 *, struct list_head *,
+ struct nvkm_gpuobj *, u64);
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
new file mode 100644
index 000000000000..ddb795bb007b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/acr_r375.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "acr_r367.h"
+
+#include <engine/falcon.h>
+#include <core/msgqueue.h>
+#include <subdev/pmu.h>
+
+/*
+ * r375 ACR: similar to r367, but with a unified bootloader descriptor
+ * structure for GR and PMU falcons.
+ */
+
+/* Same as acr_r361_flcn_bl_desc, plus argc/argv */
+struct acr_r375_flcn_bl_desc {
+ u32 reserved[4];
+ u32 signature[4];
+ u32 ctx_dma;
+ struct flcn_u64 code_dma_base;
+ u32 non_sec_code_off;
+ u32 non_sec_code_size;
+ u32 sec_code_off;
+ u32 sec_code_size;
+ u32 code_entry_point;
+ struct flcn_u64 data_dma_base;
+ u32 data_size;
+ u32 argc;
+ u32 argv;
+};
+
+static void
+acr_r375_generate_flcn_bl_desc(const struct nvkm_acr *acr,
+ const struct ls_ucode_img *img, u64 wpr_addr,
+ void *_desc)
+{
+ struct acr_r375_flcn_bl_desc *desc = _desc;
+ const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
+ u64 base, addr_code, addr_data;
+
+ base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
+ addr_code = base + pdesc->app_resident_code_offset;
+ addr_data = base + pdesc->app_resident_data_offset;
+
+ desc->ctx_dma = FALCON_DMAIDX_UCODE;
+ desc->code_dma_base = u64_to_flcn64(addr_code);
+ desc->non_sec_code_off = pdesc->app_resident_code_offset;
+ desc->non_sec_code_size = pdesc->app_resident_code_size;
+ desc->code_entry_point = pdesc->app_imem_entry;
+ desc->data_dma_base = u64_to_flcn64(addr_data);
+ desc->data_size = pdesc->app_resident_data_size;
+}
+
+static void
+acr_r375_generate_hs_bl_desc(const struct hsf_load_header *hdr, void *_bl_desc,
+ u64 offset)
+{
+ struct acr_r375_flcn_bl_desc *bl_desc = _bl_desc;
+
+ bl_desc->ctx_dma = FALCON_DMAIDX_VIRT;
+ bl_desc->non_sec_code_off = hdr->non_sec_code_off;
+ bl_desc->non_sec_code_size = hdr->non_sec_code_size;
+ bl_desc->sec_code_off = hsf_load_header_app_off(hdr, 0);
+ bl_desc->sec_code_size = hsf_load_header_app_size(hdr, 0);
+ bl_desc->code_entry_point = 0;
+ bl_desc->code_dma_base = u64_to_flcn64(offset);
+ bl_desc->data_dma_base = u64_to_flcn64(offset + hdr->data_dma_base);
+ bl_desc->data_size = hdr->data_size;
+}
+
+const struct acr_r352_ls_func
+acr_r375_ls_fecs_func = {
+ .load = acr_ls_ucode_load_fecs,
+ .generate_bl_desc = acr_r375_generate_flcn_bl_desc,
+ .bl_desc_size = sizeof(struct acr_r375_flcn_bl_desc),
+};
+
+const struct acr_r352_ls_func
+acr_r375_ls_gpccs_func = {
+ .load = acr_ls_ucode_load_gpccs,
+ .generate_bl_desc = acr_r375_generate_flcn_bl_desc,
+ .bl_desc_size = sizeof(struct acr_r375_flcn_bl_desc),
+ /* GPCCS will be loaded using PRI */
+ .lhdr_flags = LSF_FLAG_FORCE_PRIV_LOAD,
+};
+
+
+static void
+acr_r375_generate_pmu_bl_desc(const struct nvkm_acr *acr,
+ const struct ls_ucode_img *img, u64 wpr_addr,
+ void *_desc)
+{
+ const struct ls_ucode_img_desc *pdesc = &img->ucode_desc;
+ const struct nvkm_pmu *pmu = acr->subdev->device->pmu;
+ struct acr_r375_flcn_bl_desc *desc = _desc;
+ u64 base, addr_code, addr_data;
+ u32 addr_args;
+
+ base = wpr_addr + img->ucode_off + pdesc->app_start_offset;
+ addr_code = base + pdesc->app_resident_code_offset;
+ addr_data = base + pdesc->app_resident_data_offset;
+ addr_args = pmu->falcon->data.limit;
+ addr_args -= NVKM_MSGQUEUE_CMDLINE_SIZE;
+
+ desc->ctx_dma = FALCON_DMAIDX_UCODE;
+ desc->code_dma_base = u64_to_flcn64(addr_code);
+ desc->non_sec_code_off = pdesc->app_resident_code_offset;
+ desc->non_sec_code_size = pdesc->app_resident_code_size;
+ desc->code_entry_point = pdesc->app_imem_entry;
+ desc->data_dma_base = u64_to_flcn64(addr_data);
+ desc->data_size = pdesc->app_resident_data_size;
+ desc->argc = 1;
+ desc->argv = addr_args;
+}
+
+const struct acr_r352_ls_func
+acr_r375_ls_pmu_func = {
+ .load = acr_ls_ucode_load_pmu,
+ .generate_bl_desc = acr_r375_generate_pmu_bl_desc,
+ .bl_desc_size = sizeof(struct acr_r375_flcn_bl_desc),
+ .post_run = acr_ls_pmu_post_run,
+};
+
+
+const struct acr_r352_func
+acr_r375_func = {
+ .fixup_hs_desc = acr_r367_fixup_hs_desc,
+ .generate_hs_bl_desc = acr_r375_generate_hs_bl_desc,
+ .hs_bl_desc_size = sizeof(struct acr_r375_flcn_bl_desc),
+ .shadow_blob = true,
+ .ls_ucode_img_load = acr_r367_ls_ucode_img_load,
+ .ls_fill_headers = acr_r367_ls_fill_headers,
+ .ls_write_wpr = acr_r367_ls_write_wpr,
+ .ls_func = {
+ [NVKM_SECBOOT_FALCON_FECS] = &acr_r375_ls_fecs_func,
+ [NVKM_SECBOOT_FALCON_GPCCS] = &acr_r375_ls_gpccs_func,
+ [NVKM_SECBOOT_FALCON_PMU] = &acr_r375_ls_pmu_func,
+ },
+};
+
+struct nvkm_acr *
+acr_r375_new(enum nvkm_secboot_falcon boot_falcon,
+ unsigned long managed_falcons)
+{
+ return acr_r352_new_(&acr_r375_func, boot_falcon, managed_falcons);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
index 27c9dfffb9a6..5c11e8c50964 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
@@ -87,6 +87,7 @@
#include <subdev/mc.h>
#include <subdev/timer.h>
#include <subdev/pmu.h>
+#include <engine/sec2.h>
const char *
nvkm_secboot_falcon_name[] = {
@@ -94,6 +95,7 @@ nvkm_secboot_falcon_name[] = {
[NVKM_SECBOOT_FALCON_RESERVED] = "<reserved>",
[NVKM_SECBOOT_FALCON_FECS] = "FECS",
[NVKM_SECBOOT_FALCON_GPCCS] = "GPCCS",
+ [NVKM_SECBOOT_FALCON_SEC2] = "SEC2",
[NVKM_SECBOOT_FALCON_END] = "<invalid>",
};
/**
@@ -131,13 +133,20 @@ nvkm_secboot_oneinit(struct nvkm_subdev *subdev)
switch (sb->acr->boot_falcon) {
case NVKM_SECBOOT_FALCON_PMU:
- sb->boot_falcon = subdev->device->pmu->falcon;
+ sb->halt_falcon = sb->boot_falcon = subdev->device->pmu->falcon;
+ break;
+ case NVKM_SECBOOT_FALCON_SEC2:
+ /* we must keep SEC2 alive forever since ACR will run on it */
+ nvkm_engine_ref(&subdev->device->sec2->engine);
+ sb->boot_falcon = subdev->device->sec2->falcon;
+ sb->halt_falcon = subdev->device->pmu->falcon;
break;
default:
nvkm_error(subdev, "Unmanaged boot falcon %s!\n",
nvkm_secboot_falcon_name[sb->acr->boot_falcon]);
return -EINVAL;
}
+ nvkm_debug(subdev, "using %s falcon for ACR\n", sb->boot_falcon->name);
/* Call chip-specific init function */
if (sb->func->oneinit)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
index 813c4eb0b25f..73ca1203281d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
@@ -34,12 +34,13 @@
*
*/
int
-gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob)
+gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
+ struct nvkm_falcon *falcon)
{
struct gm200_secboot *gsb = gm200_secboot(sb);
struct nvkm_subdev *subdev = &gsb->base.subdev;
- struct nvkm_falcon *falcon = gsb->base.boot_falcon;
struct nvkm_vma vma;
+ u32 start_address;
int ret;
ret = nvkm_falcon_get(falcon, subdev);
@@ -60,10 +61,12 @@ gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob)
nvkm_falcon_bind_context(falcon, gsb->inst);
/* Load the HS bootloader into the falcon's IMEM/DMEM */
- ret = sb->acr->func->load(sb->acr, &gsb->base, blob, vma.offset);
- if (ret)
+ ret = sb->acr->func->load(sb->acr, falcon, blob, vma.offset);
+ if (ret < 0)
goto end;
+ start_address = ret;
+
/* Disable interrupts as we will poll for the HALT bit */
nvkm_mc_intr_mask(sb->subdev.device, falcon->owner->index, false);
@@ -71,19 +74,17 @@ gm200_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob)
nvkm_falcon_wr32(falcon, 0x040, 0xdeada5a5);
/* Start the HS bootloader */
- nvkm_falcon_set_start_addr(falcon, sb->acr->start_address);
+ nvkm_falcon_set_start_addr(falcon, start_address);
nvkm_falcon_start(falcon);
ret = nvkm_falcon_wait_for_halt(falcon, 100);
if (ret)
goto end;
- /* If mailbox register contains an error code, then ACR has failed */
+ /*
+ * The mailbox register contains the (positive) error code - return this
+ * to the caller
+ */
ret = nvkm_falcon_rd32(falcon, 0x040);
- if (ret) {
- nvkm_error(subdev, "ACR boot failed, ret 0x%08x", ret);
- ret = -EINVAL;
- goto end;
- }
end:
/* Reenable interrupts */
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
index 45adf1a3bc20..6dc9fc384f24 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.h
@@ -38,6 +38,7 @@ struct gm200_secboot {
int gm200_secboot_oneinit(struct nvkm_secboot *);
int gm200_secboot_fini(struct nvkm_secboot *, bool);
void *gm200_secboot_dtor(struct nvkm_secboot *);
-int gm200_secboot_run_blob(struct nvkm_secboot *, struct nvkm_gpuobj *);
+int gm200_secboot_run_blob(struct nvkm_secboot *, struct nvkm_gpuobj *,
+ struct nvkm_falcon *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
index 6707b8edc086..29e6f73dfd7e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
@@ -107,9 +107,12 @@ gm20b_secboot_new(struct nvkm_device *device, int index,
struct gm200_secboot *gsb;
struct nvkm_acr *acr;
- acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS));
+ acr = acr_r352_new(BIT(NVKM_SECBOOT_FALCON_FECS) |
+ BIT(NVKM_SECBOOT_FALCON_PMU));
if (IS_ERR(acr))
return PTR_ERR(acr);
+ /* Support the initial GM20B firmware release without PMU */
+ acr->optional_falcons = BIT(NVKM_SECBOOT_FALCON_PMU);
gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
if (!gsb) {
@@ -137,3 +140,6 @@ MODULE_FIRMWARE("nvidia/gm20b/gr/sw_ctx.bin");
MODULE_FIRMWARE("nvidia/gm20b/gr/sw_nonctx.bin");
MODULE_FIRMWARE("nvidia/gm20b/gr/sw_bundle_init.bin");
MODULE_FIRMWARE("nvidia/gm20b/gr/sw_method_init.bin");
+MODULE_FIRMWARE("nvidia/gm20b/pmu/desc.bin");
+MODULE_FIRMWARE("nvidia/gm20b/pmu/image.bin");
+MODULE_FIRMWARE("nvidia/gm20b/pmu/sig.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
new file mode 100644
index 000000000000..f3b3c66349d2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
@@ -0,0 +1,252 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "acr.h"
+#include "gm200.h"
+
+#include "ls_ucode.h"
+#include "hs_ucode.h"
+#include <subdev/mc.h>
+#include <subdev/timer.h>
+#include <engine/falcon.h>
+#include <engine/nvdec.h>
+
+static bool
+gp102_secboot_scrub_required(struct nvkm_secboot *sb)
+{
+ struct nvkm_subdev *subdev = &sb->subdev;
+ struct nvkm_device *device = subdev->device;
+ u32 reg;
+
+ nvkm_wr32(device, 0x100cd0, 0x2);
+ reg = nvkm_rd32(device, 0x100cd0);
+
+ return (reg & BIT(4));
+}
+
+static int
+gp102_run_secure_scrub(struct nvkm_secboot *sb)
+{
+ struct nvkm_subdev *subdev = &sb->subdev;
+ struct nvkm_device *device = subdev->device;
+ struct nvkm_engine *engine;
+ struct nvkm_falcon *falcon;
+ void *scrub_image;
+ struct fw_bin_header *hsbin_hdr;
+ struct hsf_fw_header *fw_hdr;
+ struct hsf_load_header *lhdr;
+ void *scrub_data;
+ int ret;
+
+ nvkm_debug(subdev, "running VPR scrubber binary on NVDEC...\n");
+
+ engine = nvkm_engine_ref(&device->nvdec->engine);
+ if (IS_ERR(engine))
+ return PTR_ERR(engine);
+ falcon = device->nvdec->falcon;
+
+ nvkm_falcon_get(falcon, &sb->subdev);
+
+ scrub_image = hs_ucode_load_blob(subdev, falcon, "nvdec/scrubber");
+ if (IS_ERR(scrub_image))
+ return PTR_ERR(scrub_image);
+
+ nvkm_falcon_reset(falcon);
+ nvkm_falcon_bind_context(falcon, NULL);
+
+ hsbin_hdr = scrub_image;
+ fw_hdr = scrub_image + hsbin_hdr->header_offset;
+ lhdr = scrub_image + fw_hdr->hdr_offset;
+ scrub_data = scrub_image + hsbin_hdr->data_offset;
+
+ nvkm_falcon_load_imem(falcon, scrub_data, lhdr->non_sec_code_off,
+ lhdr->non_sec_code_size,
+ lhdr->non_sec_code_off >> 8, 0, false);
+ nvkm_falcon_load_imem(falcon, scrub_data + lhdr->apps[0],
+ ALIGN(lhdr->apps[0], 0x100),
+ lhdr->apps[1],
+ lhdr->apps[0] >> 8, 0, true);
+ nvkm_falcon_load_dmem(falcon, scrub_data + lhdr->data_dma_base, 0,
+ lhdr->data_size, 0);
+
+ kfree(scrub_image);
+
+ nvkm_falcon_set_start_addr(falcon, 0x0);
+ nvkm_falcon_start(falcon);
+
+ ret = nvkm_falcon_wait_for_halt(falcon, 500);
+ if (ret < 0) {
+ nvkm_error(subdev, "failed to run VPR scrubber binary!\n");
+ ret = -ETIMEDOUT;
+ goto end;
+ }
+
+ /* put nvdec in clean state - without reset it will remain in HS mode */
+ nvkm_falcon_reset(falcon);
+
+ if (gp102_secboot_scrub_required(sb)) {
+ nvkm_error(subdev, "VPR scrubber binary failed!\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ nvkm_debug(subdev, "VPR scrub successfully completed\n");
+
+end:
+ nvkm_falcon_put(falcon, &sb->subdev);
+ nvkm_engine_unref(&engine);
+ return ret;
+}
+
+static int
+gp102_secboot_run_blob(struct nvkm_secboot *sb, struct nvkm_gpuobj *blob,
+ struct nvkm_falcon *falcon)
+{
+ int ret;
+
+ /* make sure the VPR region is unlocked */
+ if (gp102_secboot_scrub_required(sb)) {
+ ret = gp102_run_secure_scrub(sb);
+ if (ret)
+ return ret;
+ }
+
+ return gm200_secboot_run_blob(sb, blob, falcon);
+}
+
+static const struct nvkm_secboot_func
+gp102_secboot = {
+ .dtor = gm200_secboot_dtor,
+ .oneinit = gm200_secboot_oneinit,
+ .fini = gm200_secboot_fini,
+ .run_blob = gp102_secboot_run_blob,
+};
+
+int
+gp102_secboot_new(struct nvkm_device *device, int index,
+ struct nvkm_secboot **psb)
+{
+ int ret;
+ struct gm200_secboot *gsb;
+ struct nvkm_acr *acr;
+
+ acr = acr_r367_new(NVKM_SECBOOT_FALCON_SEC2,
+ BIT(NVKM_SECBOOT_FALCON_FECS) |
+ BIT(NVKM_SECBOOT_FALCON_GPCCS) |
+ BIT(NVKM_SECBOOT_FALCON_SEC2));
+ if (IS_ERR(acr))
+ return PTR_ERR(acr);
+
+ gsb = kzalloc(sizeof(*gsb), GFP_KERNEL);
+ if (!gsb) {
+ psb = NULL;
+ return -ENOMEM;
+ }
+ *psb = &gsb->base;
+
+ ret = nvkm_secboot_ctor(&gp102_secboot, acr, device, index, &gsb->base);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+MODULE_FIRMWARE("nvidia/gp102/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp102/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/gp102/acr/ucode_load.bin");
+MODULE_FIRMWARE("nvidia/gp102/acr/ucode_unload.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp102/gr/sw_method_init.bin");
+MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin");
+MODULE_FIRMWARE("nvidia/gp104/acr/ucode_unload.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp104/gr/sw_method_init.bin");
+MODULE_FIRMWARE("nvidia/gp104/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin");
+MODULE_FIRMWARE("nvidia/gp106/acr/ucode_unload.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp106/gr/sw_method_init.bin");
+MODULE_FIRMWARE("nvidia/gp106/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin");
+MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin");
+MODULE_FIRMWARE("nvidia/gp107/acr/ucode_unload.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/gp107/gr/sw_method_init.bin");
+MODULE_FIRMWARE("nvidia/gp107/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin");
+MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c
new file mode 100644
index 000000000000..6b33182ddc2f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "hs_ucode.h"
+#include "ls_ucode.h"
+#include "acr.h"
+
+#include <engine/falcon.h>
+
+/**
+ * hs_ucode_patch_signature() - patch HS blob with correct signature for
+ * specified falcon.
+ */
+static void
+hs_ucode_patch_signature(const struct nvkm_falcon *falcon, void *acr_image,
+ bool new_format)
+{
+ struct fw_bin_header *hsbin_hdr = acr_image;
+ struct hsf_fw_header *fw_hdr = acr_image + hsbin_hdr->header_offset;
+ void *hs_data = acr_image + hsbin_hdr->data_offset;
+ void *sig;
+ u32 sig_size;
+ u32 patch_loc, patch_sig;
+
+ /*
+ * I had the brilliant idea to "improve" the binary format by
+ * removing this useless indirection. However to make NVIDIA files
+ * directly compatible, let's support both format.
+ */
+ if (new_format) {
+ patch_loc = fw_hdr->patch_loc;
+ patch_sig = fw_hdr->patch_sig;
+ } else {
+ patch_loc = *(u32 *)(acr_image + fw_hdr->patch_loc);
+ patch_sig = *(u32 *)(acr_image + fw_hdr->patch_sig);
+ }
+
+ /* Falcon in debug or production mode? */
+ if (falcon->debug) {
+ sig = acr_image + fw_hdr->sig_dbg_offset;
+ sig_size = fw_hdr->sig_dbg_size;
+ } else {
+ sig = acr_image + fw_hdr->sig_prod_offset;
+ sig_size = fw_hdr->sig_prod_size;
+ }
+
+ /* Patch signature */
+ memcpy(hs_data + patch_loc, sig + patch_sig, sig_size);
+}
+
+void *
+hs_ucode_load_blob(struct nvkm_subdev *subdev, const struct nvkm_falcon *falcon,
+ const char *fw)
+{
+ void *acr_image;
+ bool new_format;
+
+ acr_image = nvkm_acr_load_firmware(subdev, fw, 0);
+ if (IS_ERR(acr_image))
+ return acr_image;
+
+ /* detect the format to define how signature should be patched */
+ switch (((u32 *)acr_image)[0]) {
+ case 0x3b1d14f0:
+ new_format = true;
+ break;
+ case 0x000010de:
+ new_format = false;
+ break;
+ default:
+ nvkm_error(subdev, "unknown header for HS blob %s\n", fw);
+ return ERR_PTR(-EINVAL);
+ }
+
+ hs_ucode_patch_signature(falcon, acr_image, new_format);
+
+ return acr_image;
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h
new file mode 100644
index 000000000000..d8cfc6f7752a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/hs_ucode.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NVKM_SECBOOT_HS_UCODE_H__
+#define __NVKM_SECBOOT_HS_UCODE_H__
+
+#include <core/os.h>
+#include <core/subdev.h>
+
+struct nvkm_falcon;
+
+/**
+ * struct hsf_fw_header - HS firmware descriptor
+ * @sig_dbg_offset: offset of the debug signature
+ * @sig_dbg_size: size of the debug signature
+ * @sig_prod_offset: offset of the production signature
+ * @sig_prod_size: size of the production signature
+ * @patch_loc: offset of the offset (sic) of where the signature is
+ * @patch_sig: offset of the offset (sic) to add to sig_*_offset
+ * @hdr_offset: offset of the load header (see struct hs_load_header)
+ * @hdr_size: size of above header
+ *
+ * This structure is embedded in the HS firmware image at
+ * hs_bin_hdr.header_offset.
+ */
+struct hsf_fw_header {
+ u32 sig_dbg_offset;
+ u32 sig_dbg_size;
+ u32 sig_prod_offset;
+ u32 sig_prod_size;
+ u32 patch_loc;
+ u32 patch_sig;
+ u32 hdr_offset;
+ u32 hdr_size;
+};
+
+/**
+ * struct hsf_load_header - HS firmware load header
+ */
+struct hsf_load_header {
+ u32 non_sec_code_off;
+ u32 non_sec_code_size;
+ u32 data_dma_base;
+ u32 data_size;
+ u32 num_apps;
+ /*
+ * Organized as follows:
+ * - app0_code_off
+ * - app1_code_off
+ * - ...
+ * - appn_code_off
+ * - app0_code_size
+ * - app1_code_size
+ * - ...
+ */
+ u32 apps[0];
+};
+
+void *hs_ucode_load_blob(struct nvkm_subdev *, const struct nvkm_falcon *,
+ const char *);
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
index 00886cee57eb..4ff9138a2a83 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode.h
@@ -27,6 +27,7 @@
#include <core/subdev.h>
#include <subdev/secboot.h>
+struct nvkm_acr;
/**
* struct ls_ucode_img_desc - descriptor of firmware image
@@ -83,6 +84,7 @@ struct ls_ucode_img_desc {
* @ucode_desc: loaded or generated map of ucode_data
* @ucode_data: firmware payload (code and data)
* @ucode_size: size in bytes of data in ucode_data
+ * @ucode_off: offset of the ucode in ucode_data
* @sig: signature for this firmware
* @sig:size: size of the signature in bytes
*
@@ -97,6 +99,7 @@ struct ls_ucode_img {
struct ls_ucode_img_desc ucode_desc;
u8 *ucode_data;
u32 ucode_size;
+ u32 ucode_off;
u8 *sig;
u32 sig_size;
@@ -146,6 +149,9 @@ struct fw_bl_desc {
int acr_ls_ucode_load_fecs(const struct nvkm_subdev *, struct ls_ucode_img *);
int acr_ls_ucode_load_gpccs(const struct nvkm_subdev *, struct ls_ucode_img *);
-
+int acr_ls_ucode_load_pmu(const struct nvkm_subdev *, struct ls_ucode_img *);
+void acr_ls_pmu_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
+int acr_ls_ucode_load_sec2(const struct nvkm_subdev *, struct ls_ucode_img *);
+void acr_ls_sec2_post_run(const struct nvkm_acr *, const struct nvkm_secboot *);
#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
new file mode 100644
index 000000000000..ef0b298b70d7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/ls_ucode_msgqueue.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#include "ls_ucode.h"
+#include "acr.h"
+
+#include <core/firmware.h>
+#include <core/msgqueue.h>
+#include <subdev/pmu.h>
+#include <engine/sec2.h>
+
+/**
+ * acr_ls_ucode_load_msgqueue - load and prepare a ucode img for a msgqueue fw
+ *
+ * Load the LS microcode, desc and signature and pack them into a single
+ * blob.
+ */
+static int
+acr_ls_ucode_load_msgqueue(const struct nvkm_subdev *subdev, const char *name,
+ struct ls_ucode_img *img)
+{
+ const struct firmware *image, *desc, *sig;
+ char f[64];
+ int ret;
+
+ snprintf(f, sizeof(f), "%s/image", name);
+ ret = nvkm_firmware_get(subdev->device, f, &image);
+ if (ret)
+ return ret;
+ img->ucode_data = kmemdup(image->data, image->size, GFP_KERNEL);
+ nvkm_firmware_put(image);
+ if (!img->ucode_data)
+ return -ENOMEM;
+
+ snprintf(f, sizeof(f), "%s/desc", name);
+ ret = nvkm_firmware_get(subdev->device, f, &desc);
+ if (ret)
+ return ret;
+ memcpy(&img->ucode_desc, desc->data, sizeof(img->ucode_desc));
+ img->ucode_size = ALIGN(img->ucode_desc.app_start_offset + img->ucode_desc.app_size, 256);
+ nvkm_firmware_put(desc);
+
+ snprintf(f, sizeof(f), "%s/sig", name);
+ ret = nvkm_firmware_get(subdev->device, f, &sig);
+ if (ret)
+ return ret;
+ img->sig_size = sig->size;
+ img->sig = kmemdup(sig->data, sig->size, GFP_KERNEL);
+ nvkm_firmware_put(sig);
+ if (!img->sig)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void
+acr_ls_msgqueue_post_run(struct nvkm_msgqueue *queue,
+ struct nvkm_falcon *falcon, u32 addr_args)
+{
+ u32 cmdline_size = NVKM_MSGQUEUE_CMDLINE_SIZE;
+ u8 buf[cmdline_size];
+
+ memset(buf, 0, cmdline_size);
+ nvkm_msgqueue_write_cmdline(queue, buf);
+ nvkm_falcon_load_dmem(falcon, buf, addr_args, cmdline_size, 0);
+ /* rearm the queue so it will wait for the init message */
+ nvkm_msgqueue_reinit(queue);
+}
+
+int
+acr_ls_ucode_load_pmu(const struct nvkm_subdev *subdev,
+ struct ls_ucode_img *img)
+{
+ struct nvkm_pmu *pmu = subdev->device->pmu;
+ int ret;
+
+ ret = acr_ls_ucode_load_msgqueue(subdev, "pmu", img);
+ if (ret)
+ return ret;
+
+ /* Allocate the PMU queue corresponding to the FW version */
+ ret = nvkm_msgqueue_new(img->ucode_desc.app_version, pmu->falcon,
+ &pmu->queue);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void
+acr_ls_pmu_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb)
+{
+ struct nvkm_device *device = sb->subdev.device;
+ struct nvkm_pmu *pmu = device->pmu;
+ u32 addr_args = pmu->falcon->data.limit - NVKM_MSGQUEUE_CMDLINE_SIZE;
+
+ acr_ls_msgqueue_post_run(pmu->queue, pmu->falcon, addr_args);
+}
+
+int
+acr_ls_ucode_load_sec2(const struct nvkm_subdev *subdev,
+ struct ls_ucode_img *img)
+{
+ struct nvkm_sec2 *sec = subdev->device->sec2;
+ int ret;
+
+ ret = acr_ls_ucode_load_msgqueue(subdev, "sec2", img);
+ if (ret)
+ return ret;
+
+ /* Allocate the PMU queue corresponding to the FW version */
+ ret = nvkm_msgqueue_new(img->ucode_desc.app_version, sec->falcon,
+ &sec->queue);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void
+acr_ls_sec2_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb)
+{
+ struct nvkm_device *device = sb->subdev.device;
+ struct nvkm_sec2 *sec = device->sec2;
+ /* on SEC arguments are always at the beginning of EMEM */
+ u32 addr_args = 0x01000000;
+
+ acr_ls_msgqueue_post_run(sec->queue, sec->falcon, addr_args);
+}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
index 936a65f5658c..885e919a8720 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
@@ -30,11 +30,10 @@ struct nvkm_secboot_func {
int (*oneinit)(struct nvkm_secboot *);
int (*fini)(struct nvkm_secboot *, bool suspend);
void *(*dtor)(struct nvkm_secboot *);
- int (*run_blob)(struct nvkm_secboot *, struct nvkm_gpuobj *);
+ int (*run_blob)(struct nvkm_secboot *, struct nvkm_gpuobj *,
+ struct nvkm_falcon *);
};
-extern const char *nvkm_secboot_falcon_name[];
-
int nvkm_secboot_ctor(const struct nvkm_secboot_func *, struct nvkm_acr *,
struct nvkm_device *, int, struct nvkm_secboot *);
int nvkm_secboot_falcon_reset(struct nvkm_secboot *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
index efac3402f9dd..fea4957291da 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
@@ -82,7 +82,7 @@ gk104_top_oneinit(struct nvkm_top *top)
case 0x0000000a: A_(MSVLD ); break;
case 0x0000000b: A_(MSENC ); break;
case 0x0000000c: A_(VIC ); break;
- case 0x0000000d: A_(SEC ); break;
+ case 0x0000000d: A_(SEC2 ); break;
case 0x0000000e: B_(NVENC ); break;
case 0x0000000f: A_(NVENC1); break;
case 0x00000010: A_(NVDEC ); break;
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
index af267c35d813..ee5883f59be5 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
@@ -147,9 +147,6 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer,
struct drm_gem_object *obj = buffer->priv;
int ret = 0;
- if (WARN_ON(!obj->filp))
- return -EINVAL;
-
ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index d12b8978142f..72e1588580a1 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2984,6 +2984,12 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
(rdev->pdev->device == 0x6667)) {
max_sclk = 75000;
}
+ } else if (rdev->family == CHIP_OLAND) {
+ if ((rdev->pdev->device == 0x6604) &&
+ (rdev->pdev->subsystem_vendor == 0x1028) &&
+ (rdev->pdev->subsystem_device == 0x066F)) {
+ max_sclk = 75000;
+ }
}
if (rps->vce_active) {
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index b5bfbe50bd87..b0ff304ce3dc 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -32,6 +32,10 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
{
const struct drm_display_mode *mode = &crtc->crtc.state->adjusted_mode;
struct rcar_du_device *rcdu = crtc->group->dev;
+ struct vsp1_du_lif_config cfg = {
+ .width = mode->hdisplay,
+ .height = mode->vdisplay,
+ };
struct rcar_du_plane_state state = {
.state = {
.crtc = &crtc->crtc,
@@ -66,12 +70,12 @@ void rcar_du_vsp_enable(struct rcar_du_crtc *crtc)
*/
crtc->group->need_restart = true;
- vsp1_du_setup_lif(crtc->vsp->vsp, mode->hdisplay, mode->vdisplay);
+ vsp1_du_setup_lif(crtc->vsp->vsp, &cfg);
}
void rcar_du_vsp_disable(struct rcar_du_crtc *crtc)
{
- vsp1_du_setup_lif(crtc->vsp->vsp, 0, 0);
+ vsp1_du_setup_lif(crtc->vsp->vsp, NULL);
}
void rcar_du_vsp_atomic_begin(struct rcar_du_crtc *crtc)
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 93505bcfdf4b..c92faa8f7560 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -464,6 +464,7 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
+ unsigned long flags;
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
mutex_lock(&tilcdc_crtc->enable_lock);
@@ -484,7 +485,17 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc)
tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG,
LCDC_PALETTE_LOAD_MODE(DATA_ONLY),
LCDC_PALETTE_LOAD_MODE_MASK);
+
+ /* There is no real chance for a race here as the time stamp
+ * is taken before the raster DMA is started. The spin-lock is
+ * taken to have a memory barrier after taking the time-stamp
+ * and to avoid a context switch between taking the stamp and
+ * enabling the raster.
+ */
+ spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
+ tilcdc_crtc->last_vblank = ktime_get();
tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE);
+ spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
drm_crtc_vblank_on(crtc);
@@ -539,7 +550,6 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown)
}
drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
- tilcdc_crtc->last_vblank = 0;
tilcdc_crtc->enabled = false;
mutex_unlock(&tilcdc_crtc->enable_lock);
@@ -602,7 +612,6 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
- unsigned long flags;
WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
@@ -614,28 +623,30 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
drm_framebuffer_reference(fb);
crtc->primary->fb = fb;
+ tilcdc_crtc->event = event;
- spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
+ mutex_lock(&tilcdc_crtc->enable_lock);
- if (crtc->hwmode.vrefresh && ktime_to_ns(tilcdc_crtc->last_vblank)) {
+ if (tilcdc_crtc->enabled) {
+ unsigned long flags;
ktime_t next_vblank;
s64 tdiff;
- next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
- 1000000 / crtc->hwmode.vrefresh);
+ spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags);
+ next_vblank = ktime_add_us(tilcdc_crtc->last_vblank,
+ 1000000 / crtc->hwmode.vrefresh);
tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get()));
if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US)
tilcdc_crtc->next_fb = fb;
- }
-
- if (tilcdc_crtc->next_fb != fb)
- set_scanout(crtc, fb);
+ else
+ set_scanout(crtc, fb);
- tilcdc_crtc->event = event;
+ spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
+ }
- spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
+ mutex_unlock(&tilcdc_crtc->enable_lock);
return 0;
}
@@ -1047,5 +1058,5 @@ int tilcdc_crtc_create(struct drm_device *dev)
fail:
tilcdc_crtc_destroy(crtc);
- return -ENOMEM;
+ return ret;
}
diff --git a/drivers/gpu/ipu-v3/Makefile b/drivers/gpu/ipu-v3/Makefile
index 5f961416c4ee..1ab9bceee755 100644
--- a/drivers/gpu/ipu-v3/Makefile
+++ b/drivers/gpu/ipu-v3/Makefile
@@ -2,4 +2,4 @@ obj-$(CONFIG_IMX_IPUV3_CORE) += imx-ipu-v3.o
imx-ipu-v3-objs := ipu-common.o ipu-cpmem.o ipu-csi.o ipu-dc.o ipu-di.o \
ipu-dp.o ipu-dmfc.o ipu-ic.o ipu-image-convert.o \
- ipu-smfc.o ipu-vdi.o
+ ipu-pre.o ipu-prg.o ipu-smfc.o ipu-vdi.o
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 8368e6f766ee..7aefccec31b1 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -51,15 +51,17 @@ int ipu_get_num(struct ipu_soc *ipu)
}
EXPORT_SYMBOL_GPL(ipu_get_num);
-void ipu_srm_dp_sync_update(struct ipu_soc *ipu)
+void ipu_srm_dp_update(struct ipu_soc *ipu, bool sync)
{
u32 val;
val = ipu_cm_read(ipu, IPU_SRM_PRI2);
- val |= 0x8;
+ val &= ~DP_S_SRM_MODE_MASK;
+ val |= sync ? DP_S_SRM_MODE_NEXT_FRAME :
+ DP_S_SRM_MODE_NOW;
ipu_cm_write(ipu, val, IPU_SRM_PRI2);
}
-EXPORT_SYMBOL_GPL(ipu_srm_dp_sync_update);
+EXPORT_SYMBOL_GPL(ipu_srm_dp_update);
enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc)
{
@@ -81,6 +83,12 @@ enum ipu_color_space ipu_drm_fourcc_to_colorspace(u32 drm_fourcc)
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_RGBA8888:
case DRM_FORMAT_BGRA8888:
+ case DRM_FORMAT_RGB565_A8:
+ case DRM_FORMAT_BGR565_A8:
+ case DRM_FORMAT_RGB888_A8:
+ case DRM_FORMAT_BGR888_A8:
+ case DRM_FORMAT_RGBX8888_A8:
+ case DRM_FORMAT_BGRX8888_A8:
return IPUV3_COLORSPACE_RGB;
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
@@ -931,6 +939,7 @@ static const struct of_device_id imx_ipu_dt_ids[] = {
{ .compatible = "fsl,imx51-ipu", .data = &ipu_type_imx51, },
{ .compatible = "fsl,imx53-ipu", .data = &ipu_type_imx53, },
{ .compatible = "fsl,imx6q-ipu", .data = &ipu_type_imx6q, },
+ { .compatible = "fsl,imx6qp-ipu", .data = &ipu_type_imx6q, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_ipu_dt_ids);
@@ -1390,11 +1399,19 @@ static int ipu_probe(struct platform_device *pdev)
if (!ipu)
return -ENODEV;
+ ipu->id = of_alias_get_id(np, "ipu");
+
+ if (of_device_is_compatible(np, "fsl,imx6qp-ipu")) {
+ ipu->prg_priv = ipu_prg_lookup_by_phandle(&pdev->dev,
+ "fsl,prg", ipu->id);
+ if (!ipu->prg_priv)
+ return -EPROBE_DEFER;
+ }
+
for (i = 0; i < 64; i++)
ipu->channel[i].ipu = ipu;
ipu->devtype = devtype;
ipu->ipu_type = devtype->type;
- ipu->id = of_alias_get_id(np, "ipu");
spin_lock_init(&ipu->lock);
mutex_init(&ipu->channel_lock);
@@ -1520,7 +1537,23 @@ static struct platform_driver imx_ipu_driver = {
.remove = ipu_remove,
};
-module_platform_driver(imx_ipu_driver);
+static struct platform_driver * const drivers[] = {
+ &ipu_pre_drv,
+ &ipu_prg_drv,
+ &imx_ipu_driver,
+};
+
+static int __init imx_ipu_init(void)
+{
+ return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
+}
+module_init(imx_ipu_init);
+
+static void __exit imx_ipu_exit(void)
+{
+ platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
+}
+module_exit(imx_ipu_exit);
MODULE_ALIAS("platform:imx-ipuv3");
MODULE_DESCRIPTION("i.MX IPU v3 driver");
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index 4b2b67113d92..114160dfc3ad 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -537,6 +537,43 @@ static const struct ipu_rgb def_bgra_16 = {
#define UV2_OFFSET(pix, x, y) ((pix->width * pix->height) + \
(pix->width * y) + (x))
+#define NUM_ALPHA_CHANNELS 7
+
+/* See Table 37-12. Alpha channels mapping. */
+static int ipu_channel_albm(int ch_num)
+{
+ switch (ch_num) {
+ case IPUV3_CHANNEL_G_MEM_IC_PRP_VF: return 0;
+ case IPUV3_CHANNEL_G_MEM_IC_PP: return 1;
+ case IPUV3_CHANNEL_MEM_FG_SYNC: return 2;
+ case IPUV3_CHANNEL_MEM_FG_ASYNC: return 3;
+ case IPUV3_CHANNEL_MEM_BG_SYNC: return 4;
+ case IPUV3_CHANNEL_MEM_BG_ASYNC: return 5;
+ case IPUV3_CHANNEL_MEM_VDI_PLANE1_COMB: return 6;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void ipu_cpmem_set_separate_alpha(struct ipuv3_channel *ch)
+{
+ struct ipu_soc *ipu = ch->ipu;
+ int albm;
+ u32 val;
+
+ albm = ipu_channel_albm(ch->num);
+ if (albm < 0)
+ return;
+
+ ipu_ch_param_write_field(ch, IPU_FIELD_ALU, 1);
+ ipu_ch_param_write_field(ch, IPU_FIELD_ALBM, albm);
+ ipu_ch_param_write_field(ch, IPU_FIELD_CRE, 1);
+
+ val = ipu_idmac_read(ipu, IDMAC_SEP_ALPHA);
+ val |= BIT(ch->num);
+ ipu_idmac_write(ipu, val, IDMAC_SEP_ALPHA);
+}
+
int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc)
{
switch (drm_fourcc) {
@@ -599,22 +636,28 @@ int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc)
break;
case DRM_FORMAT_RGBA8888:
case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_RGBX8888_A8:
ipu_cpmem_set_format_rgb(ch, &def_rgbx_32);
break;
case DRM_FORMAT_BGRA8888:
case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_BGRX8888_A8:
ipu_cpmem_set_format_rgb(ch, &def_bgrx_32);
break;
case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_BGR888_A8:
ipu_cpmem_set_format_rgb(ch, &def_bgr_24);
break;
case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_RGB888_A8:
ipu_cpmem_set_format_rgb(ch, &def_rgb_24);
break;
case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_RGB565_A8:
ipu_cpmem_set_format_rgb(ch, &def_rgb_16);
break;
case DRM_FORMAT_BGR565:
+ case DRM_FORMAT_BGR565_A8:
ipu_cpmem_set_format_rgb(ch, &def_bgr_16);
break;
case DRM_FORMAT_ARGB1555:
@@ -636,6 +679,20 @@ int ipu_cpmem_set_fmt(struct ipuv3_channel *ch, u32 drm_fourcc)
return -EINVAL;
}
+ switch (drm_fourcc) {
+ case DRM_FORMAT_RGB565_A8:
+ case DRM_FORMAT_BGR565_A8:
+ case DRM_FORMAT_RGB888_A8:
+ case DRM_FORMAT_BGR888_A8:
+ case DRM_FORMAT_RGBX8888_A8:
+ case DRM_FORMAT_BGRX8888_A8:
+ ipu_ch_param_write_field(ch, IPU_FIELD_WID3, 7);
+ ipu_cpmem_set_separate_alpha(ch);
+ break;
+ default:
+ break;
+ }
+
return 0;
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_fmt);
@@ -644,6 +701,7 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
{
struct v4l2_pix_format *pix = &image->pix;
int offset, u_offset, v_offset;
+ int ret = 0;
pr_debug("%s: resolution: %dx%d stride: %d\n",
__func__, pix->width, pix->height,
@@ -719,14 +777,30 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
offset = image->rect.left * 3 +
image->rect.top * pix->bytesperline;
break;
+ case V4L2_PIX_FMT_SBGGR8:
+ case V4L2_PIX_FMT_SGBRG8:
+ case V4L2_PIX_FMT_SGRBG8:
+ case V4L2_PIX_FMT_SRGGB8:
+ offset = image->rect.left + image->rect.top * pix->bytesperline;
+ break;
+ case V4L2_PIX_FMT_SBGGR16:
+ case V4L2_PIX_FMT_SGBRG16:
+ case V4L2_PIX_FMT_SGRBG16:
+ case V4L2_PIX_FMT_SRGGB16:
+ offset = image->rect.left * 2 +
+ image->rect.top * pix->bytesperline;
+ break;
default:
- return -EINVAL;
+ /* This should not happen */
+ WARN_ON(1);
+ offset = 0;
+ ret = -EINVAL;
}
ipu_cpmem_set_buffer(ch, 0, image->phys0 + offset);
ipu_cpmem_set_buffer(ch, 1, image->phys1 + offset);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(ipu_cpmem_set_image);
diff --git a/drivers/gpu/ipu-v3/ipu-dc.c b/drivers/gpu/ipu-v3/ipu-dc.c
index 659475c1e44a..7a4b8362dda8 100644
--- a/drivers/gpu/ipu-v3/ipu-dc.c
+++ b/drivers/gpu/ipu-v3/ipu-dc.c
@@ -112,8 +112,6 @@ struct ipu_dc_priv {
struct ipu_dc channels[IPU_DC_NUM_CHANNELS];
struct mutex mutex;
struct completion comp;
- int dc_irq;
- int dp_irq;
int use_count;
};
@@ -262,47 +260,13 @@ void ipu_dc_enable_channel(struct ipu_dc *dc)
}
EXPORT_SYMBOL_GPL(ipu_dc_enable_channel);
-static irqreturn_t dc_irq_handler(int irq, void *dev_id)
-{
- struct ipu_dc *dc = dev_id;
- u32 reg;
-
- reg = readl(dc->base + DC_WR_CH_CONF);
- reg &= ~DC_WR_CH_CONF_PROG_TYPE_MASK;
- writel(reg, dc->base + DC_WR_CH_CONF);
-
- /* The Freescale BSP kernel clears DIx_COUNTER_RELEASE here */
-
- complete(&dc->priv->comp);
- return IRQ_HANDLED;
-}
-
void ipu_dc_disable_channel(struct ipu_dc *dc)
{
- struct ipu_dc_priv *priv = dc->priv;
- int irq;
- unsigned long ret;
u32 val;
- /* TODO: Handle MEM_FG_SYNC differently from MEM_BG_SYNC */
- if (dc->chno == 1)
- irq = priv->dc_irq;
- else if (dc->chno == 5)
- irq = priv->dp_irq;
- else
- return;
-
- init_completion(&priv->comp);
- enable_irq(irq);
- ret = wait_for_completion_timeout(&priv->comp, msecs_to_jiffies(50));
- disable_irq(irq);
- if (ret == 0) {
- dev_warn(priv->dev, "DC stop timeout after 50 ms\n");
-
- val = readl(dc->base + DC_WR_CH_CONF);
- val &= ~DC_WR_CH_CONF_PROG_TYPE_MASK;
- writel(val, dc->base + DC_WR_CH_CONF);
- }
+ val = readl(dc->base + DC_WR_CH_CONF);
+ val &= ~DC_WR_CH_CONF_PROG_TYPE_MASK;
+ writel(val, dc->base + DC_WR_CH_CONF);
}
EXPORT_SYMBOL_GPL(ipu_dc_disable_channel);
@@ -389,7 +353,7 @@ int ipu_dc_init(struct ipu_soc *ipu, struct device *dev,
struct ipu_dc_priv *priv;
static int channel_offsets[] = { 0, 0x1c, 0x38, 0x54, 0x58, 0x5c,
0x78, 0, 0x94, 0xb4};
- int i, ret;
+ int i;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -410,23 +374,6 @@ int ipu_dc_init(struct ipu_soc *ipu, struct device *dev,
priv->channels[i].base = priv->dc_reg + channel_offsets[i];
}
- priv->dc_irq = ipu_map_irq(ipu, IPU_IRQ_DC_FC_1);
- if (!priv->dc_irq)
- return -EINVAL;
- ret = devm_request_irq(dev, priv->dc_irq, dc_irq_handler, 0, NULL,
- &priv->channels[1]);
- if (ret < 0)
- return ret;
- disable_irq(priv->dc_irq);
- priv->dp_irq = ipu_map_irq(ipu, IPU_IRQ_DP_SF_END);
- if (!priv->dp_irq)
- return -EINVAL;
- ret = devm_request_irq(dev, priv->dp_irq, dc_irq_handler, 0, NULL,
- &priv->channels[5]);
- if (ret < 0)
- return ret;
- disable_irq(priv->dp_irq);
-
writel(DC_WR_CH_CONF_WORD_SIZE_24 | DC_WR_CH_CONF_DISP_ID_PARALLEL(1) |
DC_WR_CH_CONF_PROG_DI_ID,
priv->channels[1].base + DC_WR_CH_CONF);
diff --git a/drivers/gpu/ipu-v3/ipu-dp.c b/drivers/gpu/ipu-v3/ipu-dp.c
index 98686edbcdbb..9b2b3fa479c4 100644
--- a/drivers/gpu/ipu-v3/ipu-dp.c
+++ b/drivers/gpu/ipu-v3/ipu-dp.c
@@ -112,7 +112,7 @@ int ipu_dp_set_global_alpha(struct ipu_dp *dp, bool enable,
writel(reg & ~DP_COM_CONF_GWAM, flow->base + DP_COM_CONF);
}
- ipu_srm_dp_sync_update(priv->ipu);
+ ipu_srm_dp_update(priv->ipu, true);
mutex_unlock(&priv->mutex);
@@ -127,7 +127,7 @@ int ipu_dp_set_window_pos(struct ipu_dp *dp, u16 x_pos, u16 y_pos)
writel((x_pos << 16) | y_pos, flow->base + DP_FG_POS);
- ipu_srm_dp_sync_update(priv->ipu);
+ ipu_srm_dp_update(priv->ipu, true);
return 0;
}
@@ -207,7 +207,7 @@ int ipu_dp_setup_channel(struct ipu_dp *dp,
flow->out_cs, DP_COM_CONF_CSC_DEF_FG);
}
- ipu_srm_dp_sync_update(priv->ipu);
+ ipu_srm_dp_update(priv->ipu, true);
mutex_unlock(&priv->mutex);
@@ -247,7 +247,7 @@ int ipu_dp_enable_channel(struct ipu_dp *dp)
reg |= DP_COM_CONF_FG_EN;
writel(reg, flow->base + DP_COM_CONF);
- ipu_srm_dp_sync_update(priv->ipu);
+ ipu_srm_dp_update(priv->ipu, true);
mutex_unlock(&priv->mutex);
@@ -255,7 +255,7 @@ int ipu_dp_enable_channel(struct ipu_dp *dp)
}
EXPORT_SYMBOL_GPL(ipu_dp_enable_channel);
-void ipu_dp_disable_channel(struct ipu_dp *dp)
+void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync)
{
struct ipu_flow *flow = to_flow(dp);
struct ipu_dp_priv *priv = flow->priv;
@@ -275,10 +275,7 @@ void ipu_dp_disable_channel(struct ipu_dp *dp)
writel(reg, flow->base + DP_COM_CONF);
writel(0, flow->base + DP_FG_POS);
- ipu_srm_dp_sync_update(priv->ipu);
-
- if (ipu_idmac_channel_busy(priv->ipu, IPUV3_CHANNEL_MEM_BG_SYNC))
- ipu_wait_interrupt(priv->ipu, IPU_IRQ_DP_SF_END, 50);
+ ipu_srm_dp_update(priv->ipu, sync);
mutex_unlock(&priv->mutex);
}
diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c
index 805b6fa7b5f4..524a717ab28e 100644
--- a/drivers/gpu/ipu-v3/ipu-image-convert.c
+++ b/drivers/gpu/ipu-v3/ipu-image-convert.c
@@ -671,7 +671,12 @@ static void init_idmac_channel(struct ipu_image_convert_ctx *ctx,
ipu_ic_task_idma_init(chan->ic, channel, width, height,
burst_size, rot_mode);
- ipu_cpmem_set_axi_id(channel, 1);
+ /*
+ * Setting a non-zero AXI ID collides with the PRG AXI snooping, so
+ * only do this when there is no PRG present.
+ */
+ if (!channel->ipu->prg_priv)
+ ipu_cpmem_set_axi_id(channel, 1);
ipu_idmac_set_double_buffer(channel, ctx->double_buffering);
}
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c
new file mode 100644
index 000000000000..c55563379e2e
--- /dev/null
+++ b/drivers/gpu/ipu-v3/ipu-pre.c
@@ -0,0 +1,289 @@
+/*
+ * Copyright (c) 2017 Lucas Stach, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <drm/drm_fourcc.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <video/imx-ipu-v3.h>
+
+#include "ipu-prv.h"
+
+#define IPU_PRE_MAX_WIDTH 2048
+#define IPU_PRE_NUM_SCANLINES 8
+
+#define IPU_PRE_CTRL 0x000
+#define IPU_PRE_CTRL_SET 0x004
+#define IPU_PRE_CTRL_ENABLE (1 << 0)
+#define IPU_PRE_CTRL_BLOCK_EN (1 << 1)
+#define IPU_PRE_CTRL_BLOCK_16 (1 << 2)
+#define IPU_PRE_CTRL_SDW_UPDATE (1 << 4)
+#define IPU_PRE_CTRL_VFLIP (1 << 5)
+#define IPU_PRE_CTRL_SO (1 << 6)
+#define IPU_PRE_CTRL_INTERLACED_FIELD (1 << 7)
+#define IPU_PRE_CTRL_HANDSHAKE_EN (1 << 8)
+#define IPU_PRE_CTRL_HANDSHAKE_LINE_NUM(v) ((v & 0x3) << 9)
+#define IPU_PRE_CTRL_HANDSHAKE_ABORT_SKIP_EN (1 << 11)
+#define IPU_PRE_CTRL_EN_REPEAT (1 << 28)
+#define IPU_PRE_CTRL_TPR_REST_SEL (1 << 29)
+#define IPU_PRE_CTRL_CLKGATE (1 << 30)
+#define IPU_PRE_CTRL_SFTRST (1 << 31)
+
+#define IPU_PRE_CUR_BUF 0x030
+
+#define IPU_PRE_NEXT_BUF 0x040
+
+#define IPU_PRE_TPR_CTRL 0x070
+#define IPU_PRE_TPR_CTRL_TILE_FORMAT(v) ((v & 0xff) << 0)
+#define IPU_PRE_TPR_CTRL_TILE_FORMAT_MASK 0xff
+
+#define IPU_PRE_PREFETCH_ENG_CTRL 0x080
+#define IPU_PRE_PREF_ENG_CTRL_PREFETCH_EN (1 << 0)
+#define IPU_PRE_PREF_ENG_CTRL_RD_NUM_BYTES(v) ((v & 0x7) << 1)
+#define IPU_PRE_PREF_ENG_CTRL_INPUT_ACTIVE_BPP(v) ((v & 0x3) << 4)
+#define IPU_PRE_PREF_ENG_CTRL_INPUT_PIXEL_FORMAT(v) ((v & 0x7) << 8)
+#define IPU_PRE_PREF_ENG_CTRL_SHIFT_BYPASS (1 << 11)
+#define IPU_PRE_PREF_ENG_CTRL_FIELD_INVERSE (1 << 12)
+#define IPU_PRE_PREF_ENG_CTRL_PARTIAL_UV_SWAP (1 << 14)
+#define IPU_PRE_PREF_ENG_CTRL_TPR_COOR_OFFSET_EN (1 << 15)
+
+#define IPU_PRE_PREFETCH_ENG_INPUT_SIZE 0x0a0
+#define IPU_PRE_PREFETCH_ENG_INPUT_SIZE_WIDTH(v) ((v & 0xffff) << 0)
+#define IPU_PRE_PREFETCH_ENG_INPUT_SIZE_HEIGHT(v) ((v & 0xffff) << 16)
+
+#define IPU_PRE_PREFETCH_ENG_PITCH 0x0d0
+#define IPU_PRE_PREFETCH_ENG_PITCH_Y(v) ((v & 0xffff) << 0)
+#define IPU_PRE_PREFETCH_ENG_PITCH_UV(v) ((v & 0xffff) << 16)
+
+#define IPU_PRE_STORE_ENG_CTRL 0x110
+#define IPU_PRE_STORE_ENG_CTRL_STORE_EN (1 << 0)
+#define IPU_PRE_STORE_ENG_CTRL_WR_NUM_BYTES(v) ((v & 0x7) << 1)
+#define IPU_PRE_STORE_ENG_CTRL_OUTPUT_ACTIVE_BPP(v) ((v & 0x3) << 4)
+
+#define IPU_PRE_STORE_ENG_SIZE 0x130
+#define IPU_PRE_STORE_ENG_SIZE_INPUT_WIDTH(v) ((v & 0xffff) << 0)
+#define IPU_PRE_STORE_ENG_SIZE_INPUT_HEIGHT(v) ((v & 0xffff) << 16)
+
+#define IPU_PRE_STORE_ENG_PITCH 0x140
+#define IPU_PRE_STORE_ENG_PITCH_OUT_PITCH(v) ((v & 0xffff) << 0)
+
+#define IPU_PRE_STORE_ENG_ADDR 0x150
+
+struct ipu_pre {
+ struct list_head list;
+ struct device *dev;
+
+ void __iomem *regs;
+ struct clk *clk_axi;
+ struct gen_pool *iram;
+
+ dma_addr_t buffer_paddr;
+ void *buffer_virt;
+ bool in_use;
+};
+
+static DEFINE_MUTEX(ipu_pre_list_mutex);
+static LIST_HEAD(ipu_pre_list);
+static int available_pres;
+
+int ipu_pre_get_available_count(void)
+{
+ return available_pres;
+}
+
+struct ipu_pre *
+ipu_pre_lookup_by_phandle(struct device *dev, const char *name, int index)
+{
+ struct device_node *pre_node = of_parse_phandle(dev->of_node,
+ name, index);
+ struct ipu_pre *pre;
+
+ mutex_lock(&ipu_pre_list_mutex);
+ list_for_each_entry(pre, &ipu_pre_list, list) {
+ if (pre_node == pre->dev->of_node) {
+ mutex_unlock(&ipu_pre_list_mutex);
+ device_link_add(dev, pre->dev, DL_FLAG_AUTOREMOVE);
+ return pre;
+ }
+ }
+ mutex_unlock(&ipu_pre_list_mutex);
+
+ return NULL;
+}
+
+int ipu_pre_get(struct ipu_pre *pre)
+{
+ u32 val;
+
+ if (pre->in_use)
+ return -EBUSY;
+
+ clk_prepare_enable(pre->clk_axi);
+
+ /* first get the engine out of reset and remove clock gating */
+ writel(0, pre->regs + IPU_PRE_CTRL);
+
+ /* init defaults that should be applied to all streams */
+ val = IPU_PRE_CTRL_HANDSHAKE_ABORT_SKIP_EN |
+ IPU_PRE_CTRL_HANDSHAKE_EN |
+ IPU_PRE_CTRL_TPR_REST_SEL |
+ IPU_PRE_CTRL_BLOCK_16 | IPU_PRE_CTRL_SDW_UPDATE;
+ writel(val, pre->regs + IPU_PRE_CTRL);
+
+ pre->in_use = true;
+ return 0;
+}
+
+void ipu_pre_put(struct ipu_pre *pre)
+{
+ u32 val;
+
+ val = IPU_PRE_CTRL_SFTRST | IPU_PRE_CTRL_CLKGATE;
+ writel(val, pre->regs + IPU_PRE_CTRL);
+
+ clk_disable_unprepare(pre->clk_axi);
+
+ pre->in_use = false;
+}
+
+void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
+ unsigned int height, unsigned int stride, u32 format,
+ unsigned int bufaddr)
+{
+ const struct drm_format_info *info = drm_format_info(format);
+ u32 active_bpp = info->cpp[0] >> 1;
+ u32 val;
+
+ writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF);
+ writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
+
+ val = IPU_PRE_PREF_ENG_CTRL_INPUT_PIXEL_FORMAT(0) |
+ IPU_PRE_PREF_ENG_CTRL_INPUT_ACTIVE_BPP(active_bpp) |
+ IPU_PRE_PREF_ENG_CTRL_RD_NUM_BYTES(4) |
+ IPU_PRE_PREF_ENG_CTRL_SHIFT_BYPASS |
+ IPU_PRE_PREF_ENG_CTRL_PREFETCH_EN;
+ writel(val, pre->regs + IPU_PRE_PREFETCH_ENG_CTRL);
+
+ val = IPU_PRE_PREFETCH_ENG_INPUT_SIZE_WIDTH(width) |
+ IPU_PRE_PREFETCH_ENG_INPUT_SIZE_HEIGHT(height);
+ writel(val, pre->regs + IPU_PRE_PREFETCH_ENG_INPUT_SIZE);
+
+ val = IPU_PRE_PREFETCH_ENG_PITCH_Y(stride);
+ writel(val, pre->regs + IPU_PRE_PREFETCH_ENG_PITCH);
+
+ val = IPU_PRE_STORE_ENG_CTRL_OUTPUT_ACTIVE_BPP(active_bpp) |
+ IPU_PRE_STORE_ENG_CTRL_WR_NUM_BYTES(4) |
+ IPU_PRE_STORE_ENG_CTRL_STORE_EN;
+ writel(val, pre->regs + IPU_PRE_STORE_ENG_CTRL);
+
+ val = IPU_PRE_STORE_ENG_SIZE_INPUT_WIDTH(width) |
+ IPU_PRE_STORE_ENG_SIZE_INPUT_HEIGHT(height);
+ writel(val, pre->regs + IPU_PRE_STORE_ENG_SIZE);
+
+ val = IPU_PRE_STORE_ENG_PITCH_OUT_PITCH(stride);
+ writel(val, pre->regs + IPU_PRE_STORE_ENG_PITCH);
+
+ writel(pre->buffer_paddr, pre->regs + IPU_PRE_STORE_ENG_ADDR);
+
+ val = readl(pre->regs + IPU_PRE_CTRL);
+ val |= IPU_PRE_CTRL_EN_REPEAT | IPU_PRE_CTRL_ENABLE |
+ IPU_PRE_CTRL_SDW_UPDATE;
+ writel(val, pre->regs + IPU_PRE_CTRL);
+}
+
+void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr)
+{
+ writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF);
+ writel(IPU_PRE_CTRL_SDW_UPDATE, pre->regs + IPU_PRE_CTRL_SET);
+}
+
+u32 ipu_pre_get_baddr(struct ipu_pre *pre)
+{
+ return (u32)pre->buffer_paddr;
+}
+
+static int ipu_pre_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct ipu_pre *pre;
+
+ pre = devm_kzalloc(dev, sizeof(*pre), GFP_KERNEL);
+ if (!pre)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pre->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pre->regs))
+ return PTR_ERR(pre->regs);
+
+ pre->clk_axi = devm_clk_get(dev, "axi");
+ if (IS_ERR(pre->clk_axi))
+ return PTR_ERR(pre->clk_axi);
+
+ pre->iram = of_gen_pool_get(dev->of_node, "fsl,iram", 0);
+ if (!pre->iram)
+ return -EPROBE_DEFER;
+
+ /*
+ * Allocate IRAM buffer with maximum size. This could be made dynamic,
+ * but as there is no other user of this IRAM region and we can fit all
+ * max sized buffers into it, there is no need yet.
+ */
+ pre->buffer_virt = gen_pool_dma_alloc(pre->iram, IPU_PRE_MAX_WIDTH *
+ IPU_PRE_NUM_SCANLINES * 4,
+ &pre->buffer_paddr);
+ if (!pre->buffer_virt)
+ return -ENOMEM;
+
+ pre->dev = dev;
+ platform_set_drvdata(pdev, pre);
+ mutex_lock(&ipu_pre_list_mutex);
+ list_add(&pre->list, &ipu_pre_list);
+ available_pres++;
+ mutex_unlock(&ipu_pre_list_mutex);
+
+ return 0;
+}
+
+static int ipu_pre_remove(struct platform_device *pdev)
+{
+ struct ipu_pre *pre = platform_get_drvdata(pdev);
+
+ mutex_lock(&ipu_pre_list_mutex);
+ list_del(&pre->list);
+ available_pres--;
+ mutex_unlock(&ipu_pre_list_mutex);
+
+ if (pre->buffer_virt)
+ gen_pool_free(pre->iram, (unsigned long)pre->buffer_virt,
+ IPU_PRE_MAX_WIDTH * IPU_PRE_NUM_SCANLINES * 4);
+ return 0;
+}
+
+static const struct of_device_id ipu_pre_dt_ids[] = {
+ { .compatible = "fsl,imx6qp-pre", },
+ { /* sentinel */ },
+};
+
+struct platform_driver ipu_pre_drv = {
+ .probe = ipu_pre_probe,
+ .remove = ipu_pre_remove,
+ .driver = {
+ .name = "imx-ipu-pre",
+ .of_match_table = ipu_pre_dt_ids,
+ },
+};
diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c
new file mode 100644
index 000000000000..caca57febbd6
--- /dev/null
+++ b/drivers/gpu/ipu-v3/ipu-prg.c
@@ -0,0 +1,424 @@
+/*
+ * Copyright (c) 2016-2017 Lucas Stach, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <drm/drm_fourcc.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <video/imx-ipu-v3.h>
+
+#include "ipu-prv.h"
+
+#define IPU_PRG_CTL 0x00
+#define IPU_PRG_CTL_BYPASS(i) (1 << (0 + i))
+#define IPU_PRG_CTL_SOFT_ARID_MASK 0x3
+#define IPU_PRG_CTL_SOFT_ARID_SHIFT(i) (8 + i * 2)
+#define IPU_PRG_CTL_SOFT_ARID(i, v) ((v & 0x3) << (8 + 2 * i))
+#define IPU_PRG_CTL_SO(i) (1 << (16 + i))
+#define IPU_PRG_CTL_VFLIP(i) (1 << (19 + i))
+#define IPU_PRG_CTL_BLOCK_MODE(i) (1 << (22 + i))
+#define IPU_PRG_CTL_CNT_LOAD_EN(i) (1 << (25 + i))
+#define IPU_PRG_CTL_SOFTRST (1 << 30)
+#define IPU_PRG_CTL_SHADOW_EN (1 << 31)
+
+#define IPU_PRG_STATUS 0x04
+#define IPU_PRG_STATUS_BUFFER0_READY(i) (1 << (0 + i * 2))
+#define IPU_PRG_STATUS_BUFFER1_READY(i) (1 << (1 + i * 2))
+
+#define IPU_PRG_QOS 0x08
+#define IPU_PRG_QOS_ARID_MASK 0xf
+#define IPU_PRG_QOS_ARID_SHIFT(i) (0 + i * 4)
+
+#define IPU_PRG_REG_UPDATE 0x0c
+#define IPU_PRG_REG_UPDATE_REG_UPDATE (1 << 0)
+
+#define IPU_PRG_STRIDE(i) (0x10 + i * 0x4)
+#define IPU_PRG_STRIDE_STRIDE_MASK 0x3fff
+
+#define IPU_PRG_CROP_LINE 0x1c
+
+#define IPU_PRG_THD 0x20
+
+#define IPU_PRG_BADDR(i) (0x24 + i * 0x4)
+
+#define IPU_PRG_OFFSET(i) (0x30 + i * 0x4)
+
+#define IPU_PRG_ILO(i) (0x3c + i * 0x4)
+
+#define IPU_PRG_HEIGHT(i) (0x48 + i * 0x4)
+#define IPU_PRG_HEIGHT_PRE_HEIGHT_MASK 0xfff
+#define IPU_PRG_HEIGHT_PRE_HEIGHT_SHIFT 0
+#define IPU_PRG_HEIGHT_IPU_HEIGHT_MASK 0xfff
+#define IPU_PRG_HEIGHT_IPU_HEIGHT_SHIFT 16
+
+struct ipu_prg_channel {
+ bool enabled;
+ int used_pre;
+};
+
+struct ipu_prg {
+ struct list_head list;
+ struct device *dev;
+ int id;
+
+ void __iomem *regs;
+ struct clk *clk_ipg, *clk_axi;
+ struct regmap *iomuxc_gpr;
+ struct ipu_pre *pres[3];
+
+ struct ipu_prg_channel chan[3];
+};
+
+static DEFINE_MUTEX(ipu_prg_list_mutex);
+static LIST_HEAD(ipu_prg_list);
+
+struct ipu_prg *
+ipu_prg_lookup_by_phandle(struct device *dev, const char *name, int ipu_id)
+{
+ struct device_node *prg_node = of_parse_phandle(dev->of_node,
+ name, 0);
+ struct ipu_prg *prg;
+
+ mutex_lock(&ipu_prg_list_mutex);
+ list_for_each_entry(prg, &ipu_prg_list, list) {
+ if (prg_node == prg->dev->of_node) {
+ mutex_unlock(&ipu_prg_list_mutex);
+ device_link_add(dev, prg->dev, DL_FLAG_AUTOREMOVE);
+ prg->id = ipu_id;
+ return prg;
+ }
+ }
+ mutex_unlock(&ipu_prg_list_mutex);
+
+ return NULL;
+}
+
+int ipu_prg_max_active_channels(void)
+{
+ return ipu_pre_get_available_count();
+}
+EXPORT_SYMBOL_GPL(ipu_prg_max_active_channels);
+
+bool ipu_prg_present(struct ipu_soc *ipu)
+{
+ if (ipu->prg_priv)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(ipu_prg_present);
+
+bool ipu_prg_format_supported(struct ipu_soc *ipu, uint32_t format,
+ uint64_t modifier)
+{
+ const struct drm_format_info *info = drm_format_info(format);
+
+ if (info->num_planes != 1)
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(ipu_prg_format_supported);
+
+int ipu_prg_enable(struct ipu_soc *ipu)
+{
+ struct ipu_prg *prg = ipu->prg_priv;
+ int ret;
+
+ if (!prg)
+ return 0;
+
+ ret = clk_prepare_enable(prg->clk_axi);
+ if (ret)
+ goto fail_disable_ipg;
+
+ return 0;
+
+fail_disable_ipg:
+ clk_disable_unprepare(prg->clk_ipg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ipu_prg_enable);
+
+void ipu_prg_disable(struct ipu_soc *ipu)
+{
+ struct ipu_prg *prg = ipu->prg_priv;
+
+ if (!prg)
+ return;
+
+ clk_disable_unprepare(prg->clk_axi);
+}
+EXPORT_SYMBOL_GPL(ipu_prg_disable);
+
+/*
+ * The channel configuartion functions below are not thread safe, as they
+ * must be only called from the atomic commit path in the DRM driver, which
+ * is properly serialized.
+ */
+static int ipu_prg_ipu_to_prg_chan(int ipu_chan)
+{
+ /*
+ * This isn't clearly documented in the RM, but IPU to PRG channel
+ * assignment is fixed, as only with this mapping the control signals
+ * match up.
+ */
+ switch (ipu_chan) {
+ case IPUV3_CHANNEL_MEM_BG_SYNC:
+ return 0;
+ case IPUV3_CHANNEL_MEM_FG_SYNC:
+ return 1;
+ case IPUV3_CHANNEL_MEM_DC_SYNC:
+ return 2;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ipu_prg_get_pre(struct ipu_prg *prg, int prg_chan)
+{
+ int i, ret;
+
+ /* channel 0 is special as it is hardwired to one of the PREs */
+ if (prg_chan == 0) {
+ ret = ipu_pre_get(prg->pres[0]);
+ if (ret)
+ goto fail;
+ prg->chan[prg_chan].used_pre = 0;
+ return 0;
+ }
+
+ for (i = 1; i < 3; i++) {
+ ret = ipu_pre_get(prg->pres[i]);
+ if (!ret) {
+ u32 val, mux;
+ int shift;
+
+ prg->chan[prg_chan].used_pre = i;
+
+ /* configure the PRE to PRG channel mux */
+ shift = (i == 1) ? 12 : 14;
+ mux = (prg->id << 1) | (prg_chan - 1);
+ regmap_update_bits(prg->iomuxc_gpr, IOMUXC_GPR5,
+ 0x3 << shift, mux << shift);
+
+ /* check other mux, must not point to same channel */
+ shift = (i == 1) ? 14 : 12;
+ regmap_read(prg->iomuxc_gpr, IOMUXC_GPR5, &val);
+ if (((val >> shift) & 0x3) == mux) {
+ regmap_update_bits(prg->iomuxc_gpr, IOMUXC_GPR5,
+ 0x3 << shift,
+ (mux ^ 0x1) << shift);
+ }
+
+ return 0;
+ }
+ }
+
+fail:
+ dev_err(prg->dev, "could not get PRE for PRG chan %d", prg_chan);
+ return ret;
+}
+
+static void ipu_prg_put_pre(struct ipu_prg *prg, int prg_chan)
+{
+ struct ipu_prg_channel *chan = &prg->chan[prg_chan];
+
+ ipu_pre_put(prg->pres[chan->used_pre]);
+ chan->used_pre = -1;
+}
+
+void ipu_prg_channel_disable(struct ipuv3_channel *ipu_chan)
+{
+ int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num);
+ struct ipu_prg *prg = ipu_chan->ipu->prg_priv;
+ struct ipu_prg_channel *chan = &prg->chan[prg_chan];
+ u32 val;
+
+ if (!chan->enabled || prg_chan < 0)
+ return;
+
+ clk_prepare_enable(prg->clk_ipg);
+
+ val = readl(prg->regs + IPU_PRG_CTL);
+ val |= IPU_PRG_CTL_BYPASS(prg_chan);
+ writel(val, prg->regs + IPU_PRG_CTL);
+
+ val = IPU_PRG_REG_UPDATE_REG_UPDATE;
+ writel(val, prg->regs + IPU_PRG_REG_UPDATE);
+
+ clk_disable_unprepare(prg->clk_ipg);
+
+ ipu_prg_put_pre(prg, prg_chan);
+
+ chan->enabled = false;
+}
+EXPORT_SYMBOL_GPL(ipu_prg_channel_disable);
+
+int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan,
+ unsigned int axi_id, unsigned int width,
+ unsigned int height, unsigned int stride,
+ u32 format, unsigned long *eba)
+{
+ int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num);
+ struct ipu_prg *prg = ipu_chan->ipu->prg_priv;
+ struct ipu_prg_channel *chan = &prg->chan[prg_chan];
+ u32 val;
+ int ret;
+
+ if (prg_chan < 0)
+ return prg_chan;
+
+ if (chan->enabled) {
+ ipu_pre_update(prg->pres[chan->used_pre], *eba);
+ return 0;
+ }
+
+ ret = ipu_prg_get_pre(prg, prg_chan);
+ if (ret)
+ return ret;
+
+ ipu_pre_configure(prg->pres[chan->used_pre],
+ width, height, stride, format, *eba);
+
+
+ ret = clk_prepare_enable(prg->clk_ipg);
+ if (ret) {
+ ipu_prg_put_pre(prg, prg_chan);
+ return ret;
+ }
+
+ val = (stride - 1) & IPU_PRG_STRIDE_STRIDE_MASK;
+ writel(val, prg->regs + IPU_PRG_STRIDE(prg_chan));
+
+ val = ((height & IPU_PRG_HEIGHT_PRE_HEIGHT_MASK) <<
+ IPU_PRG_HEIGHT_PRE_HEIGHT_SHIFT) |
+ ((height & IPU_PRG_HEIGHT_IPU_HEIGHT_MASK) <<
+ IPU_PRG_HEIGHT_IPU_HEIGHT_SHIFT);
+ writel(val, prg->regs + IPU_PRG_HEIGHT(prg_chan));
+
+ val = ipu_pre_get_baddr(prg->pres[chan->used_pre]);
+ *eba = val;
+ writel(val, prg->regs + IPU_PRG_BADDR(prg_chan));
+
+ val = readl(prg->regs + IPU_PRG_CTL);
+ /* counter load enable */
+ val |= IPU_PRG_CTL_CNT_LOAD_EN(prg_chan);
+ /* config AXI ID */
+ val &= ~(IPU_PRG_CTL_SOFT_ARID_MASK <<
+ IPU_PRG_CTL_SOFT_ARID_SHIFT(prg_chan));
+ val |= IPU_PRG_CTL_SOFT_ARID(prg_chan, axi_id);
+ /* enable channel */
+ val &= ~IPU_PRG_CTL_BYPASS(prg_chan);
+ writel(val, prg->regs + IPU_PRG_CTL);
+
+ val = IPU_PRG_REG_UPDATE_REG_UPDATE;
+ writel(val, prg->regs + IPU_PRG_REG_UPDATE);
+
+ clk_disable_unprepare(prg->clk_ipg);
+
+ chan->enabled = true;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipu_prg_channel_configure);
+
+static int ipu_prg_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct ipu_prg *prg;
+ u32 val;
+ int i, ret;
+
+ prg = devm_kzalloc(dev, sizeof(*prg), GFP_KERNEL);
+ if (!prg)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ prg->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(prg->regs))
+ return PTR_ERR(prg->regs);
+
+
+ prg->clk_ipg = devm_clk_get(dev, "ipg");
+ if (IS_ERR(prg->clk_ipg))
+ return PTR_ERR(prg->clk_ipg);
+
+ prg->clk_axi = devm_clk_get(dev, "axi");
+ if (IS_ERR(prg->clk_axi))
+ return PTR_ERR(prg->clk_axi);
+
+ prg->iomuxc_gpr =
+ syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
+ if (IS_ERR(prg->iomuxc_gpr))
+ return PTR_ERR(prg->iomuxc_gpr);
+
+ for (i = 0; i < 3; i++) {
+ prg->pres[i] = ipu_pre_lookup_by_phandle(dev, "fsl,pres", i);
+ if (!prg->pres[i])
+ return -EPROBE_DEFER;
+ }
+
+ ret = clk_prepare_enable(prg->clk_ipg);
+ if (ret)
+ return ret;
+
+ /* init to free running mode */
+ val = readl(prg->regs + IPU_PRG_CTL);
+ val |= IPU_PRG_CTL_SHADOW_EN;
+ writel(val, prg->regs + IPU_PRG_CTL);
+
+ /* disable address threshold */
+ writel(0xffffffff, prg->regs + IPU_PRG_THD);
+
+ clk_disable_unprepare(prg->clk_ipg);
+
+ prg->dev = dev;
+ platform_set_drvdata(pdev, prg);
+ mutex_lock(&ipu_prg_list_mutex);
+ list_add(&prg->list, &ipu_prg_list);
+ mutex_unlock(&ipu_prg_list_mutex);
+
+ return 0;
+}
+
+static int ipu_prg_remove(struct platform_device *pdev)
+{
+ struct ipu_prg *prg = platform_get_drvdata(pdev);
+
+ mutex_lock(&ipu_prg_list_mutex);
+ list_del(&prg->list);
+ mutex_unlock(&ipu_prg_list_mutex);
+
+ return 0;
+}
+
+static const struct of_device_id ipu_prg_dt_ids[] = {
+ { .compatible = "fsl,imx6qp-prg", },
+ { /* sentinel */ },
+};
+
+struct platform_driver ipu_prg_drv = {
+ .probe = ipu_prg_probe,
+ .remove = ipu_prg_remove,
+ .driver = {
+ .name = "imx-ipu-prg",
+ .of_match_table = ipu_prg_dt_ids,
+ },
+};
diff --git a/drivers/gpu/ipu-v3/ipu-prv.h b/drivers/gpu/ipu-v3/ipu-prv.h
index 22e47b68b14a..ca2a223a0d1e 100644
--- a/drivers/gpu/ipu-v3/ipu-prv.h
+++ b/drivers/gpu/ipu-v3/ipu-prv.h
@@ -75,6 +75,11 @@ struct ipu_soc;
#define IPU_INT_CTRL(n) IPU_CM_REG(0x003C + 4 * (n))
#define IPU_INT_STAT(n) IPU_CM_REG(0x0200 + 4 * (n))
+/* SRM_PRI2 */
+#define DP_S_SRM_MODE_MASK (0x3 << 3)
+#define DP_S_SRM_MODE_NOW (0x3 << 3)
+#define DP_S_SRM_MODE_NEXT_FRAME (0x1 << 3)
+
/* FS_PROC_FLOW1 */
#define FS_PRPENC_ROT_SRC_SEL_MASK (0xf << 0)
#define FS_PRPENC_ROT_SRC_SEL_ENC (0x7 << 0)
@@ -168,6 +173,8 @@ struct ipu_ic_priv;
struct ipu_vdi;
struct ipu_image_convert_priv;
struct ipu_smfc_priv;
+struct ipu_pre;
+struct ipu_prg;
struct ipu_devtype;
@@ -202,6 +209,7 @@ struct ipu_soc {
struct ipu_vdi *vdi_priv;
struct ipu_image_convert_priv *image_convert_priv;
struct ipu_smfc_priv *smfc_priv;
+ struct ipu_prg *prg_priv;
};
static inline u32 ipu_idmac_read(struct ipu_soc *ipu, unsigned offset)
@@ -215,7 +223,7 @@ static inline void ipu_idmac_write(struct ipu_soc *ipu, u32 value,
writel(value, ipu->idmac_reg + offset);
}
-void ipu_srm_dp_sync_update(struct ipu_soc *ipu);
+void ipu_srm_dp_update(struct ipu_soc *ipu, bool sync);
int ipu_module_enable(struct ipu_soc *ipu, u32 mask);
int ipu_module_disable(struct ipu_soc *ipu, u32 mask);
@@ -259,4 +267,21 @@ void ipu_cpmem_exit(struct ipu_soc *ipu);
int ipu_smfc_init(struct ipu_soc *ipu, struct device *dev, unsigned long base);
void ipu_smfc_exit(struct ipu_soc *ipu);
+struct ipu_pre *ipu_pre_lookup_by_phandle(struct device *dev, const char *name,
+ int index);
+int ipu_pre_get_available_count(void);
+int ipu_pre_get(struct ipu_pre *pre);
+void ipu_pre_put(struct ipu_pre *pre);
+u32 ipu_pre_get_baddr(struct ipu_pre *pre);
+void ipu_pre_configure(struct ipu_pre *pre, unsigned int width,
+ unsigned int height,
+ unsigned int stride, u32 format, unsigned int bufaddr);
+void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr);
+
+struct ipu_prg *ipu_prg_lookup_by_phandle(struct device *dev, const char *name,
+ int ipu_id);
+
+extern struct platform_driver ipu_pre_drv;
+extern struct platform_driver ipu_prg_drv;
+
#endif /* __IPU_PRV_H__ */
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 81a80c82f1bd..bd0d1988feb2 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -543,7 +543,7 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
/*
* In case a device driver's probe() fails (e.g.,
* util_probe() -> vmbus_open() returns -ENOMEM) and the device is
- * rescinded later (e.g., we dynamically disble an Integrated Service
+ * rescinded later (e.g., we dynamically disable an Integrated Service
* in Hyper-V Manager), the driver's remove() invokes vmbus_close():
* here we should skip most of the below cleanup work.
*/
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index 0652281662a8..78792b4d6437 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -465,6 +465,7 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter,
u8 *tmp_buf;
int len = 0;
int xfersz = brcmstb_i2c_get_xfersz(dev);
+ u32 cond, cond_per_msg;
if (dev->is_suspended)
return -EBUSY;
@@ -481,10 +482,11 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter,
pmsg->buf ? pmsg->buf[0] : '0', pmsg->len);
if (i < (num - 1) && (msgs[i + 1].flags & I2C_M_NOSTART))
- brcmstb_set_i2c_start_stop(dev, ~(COND_START_STOP));
+ cond = ~COND_START_STOP;
else
- brcmstb_set_i2c_start_stop(dev,
- COND_RESTART | COND_NOSTOP);
+ cond = COND_RESTART | COND_NOSTOP;
+
+ brcmstb_set_i2c_start_stop(dev, cond);
/* Send slave address */
if (!(pmsg->flags & I2C_M_NOSTART)) {
@@ -497,13 +499,24 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter,
}
}
+ cond_per_msg = cond;
+
/* Perform data transfer */
while (len) {
bytes_to_xfer = min(len, xfersz);
- if (len <= xfersz && i == (num - 1))
- brcmstb_set_i2c_start_stop(dev,
- ~(COND_START_STOP));
+ if (len <= xfersz) {
+ if (i == (num - 1))
+ cond_per_msg = cond_per_msg &
+ ~(COND_RESTART | COND_NOSTOP);
+ else
+ cond_per_msg = cond;
+ } else {
+ cond_per_msg = (cond_per_msg & ~COND_RESTART) |
+ COND_NOSTOP;
+ }
+
+ brcmstb_set_i2c_start_stop(dev, cond_per_msg);
rc = brcmstb_i2c_xfer_bsc_data(dev, tmp_buf,
bytes_to_xfer, pmsg);
@@ -512,6 +525,8 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter,
len -= bytes_to_xfer;
tmp_buf += bytes_to_xfer;
+
+ cond_per_msg = COND_NOSTART | COND_NOSTOP;
}
}
diff --git a/drivers/i2c/busses/i2c-designware-baytrail.c b/drivers/i2c/busses/i2c-designware-baytrail.c
index 1590ad0a8081..1749a0f5a9fa 100644
--- a/drivers/i2c/busses/i2c-designware-baytrail.c
+++ b/drivers/i2c/busses/i2c-designware-baytrail.c
@@ -16,6 +16,7 @@
#include <linux/acpi.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
+#include <linux/pm_qos.h>
#include <asm/iosf_mbi.h>
@@ -23,19 +24,29 @@
#define SEMAPHORE_TIMEOUT 100
#define PUNIT_SEMAPHORE 0x7
+#define PUNIT_SEMAPHORE_CHT 0x10e
#define PUNIT_SEMAPHORE_BIT BIT(0)
#define PUNIT_SEMAPHORE_ACQUIRE BIT(1)
static unsigned long acquired;
-static int get_sem(struct device *dev, u32 *sem)
+static u32 get_sem_addr(struct dw_i2c_dev *dev)
{
+ if (dev->flags & MODEL_CHERRYTRAIL)
+ return PUNIT_SEMAPHORE_CHT;
+ else
+ return PUNIT_SEMAPHORE;
+}
+
+static int get_sem(struct dw_i2c_dev *dev, u32 *sem)
+{
+ u32 addr = get_sem_addr(dev);
u32 data;
int ret;
- ret = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, PUNIT_SEMAPHORE, &data);
+ ret = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, addr, &data);
if (ret) {
- dev_err(dev, "iosf failed to read punit semaphore\n");
+ dev_err(dev->dev, "iosf failed to read punit semaphore\n");
return ret;
}
@@ -44,22 +55,22 @@ static int get_sem(struct device *dev, u32 *sem)
return 0;
}
-static void reset_semaphore(struct device *dev)
+static void reset_semaphore(struct dw_i2c_dev *dev)
{
- u32 data;
+ if (iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ, get_sem_addr(dev),
+ 0, PUNIT_SEMAPHORE_BIT))
+ dev_err(dev->dev, "iosf failed to reset punit semaphore during write\n");
- if (iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, PUNIT_SEMAPHORE, &data)) {
- dev_err(dev, "iosf failed to reset punit semaphore during read\n");
- return;
- }
+ pm_qos_update_request(&dev->pm_qos, PM_QOS_DEFAULT_VALUE);
- data &= ~PUNIT_SEMAPHORE_BIT;
- if (iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, PUNIT_SEMAPHORE, data))
- dev_err(dev, "iosf failed to reset punit semaphore during write\n");
+ iosf_mbi_call_pmic_bus_access_notifier_chain(MBI_PMIC_BUS_ACCESS_END,
+ NULL);
+ iosf_mbi_punit_release();
}
static int baytrail_i2c_acquire(struct dw_i2c_dev *dev)
{
+ u32 addr = get_sem_addr(dev);
u32 sem = PUNIT_SEMAPHORE_ACQUIRE;
int ret;
unsigned long start, end;
@@ -72,18 +83,29 @@ static int baytrail_i2c_acquire(struct dw_i2c_dev *dev)
if (!dev->release_lock)
return 0;
+ iosf_mbi_punit_acquire();
+ iosf_mbi_call_pmic_bus_access_notifier_chain(MBI_PMIC_BUS_ACCESS_BEGIN,
+ NULL);
+
+ /*
+ * Disallow the CPU to enter C6 or C7 state, entering these states
+ * requires the punit to talk to the pmic and if this happens while
+ * we're holding the semaphore, the SoC hangs.
+ */
+ pm_qos_update_request(&dev->pm_qos, 0);
+
/* host driver writes to side band semaphore register */
- ret = iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, PUNIT_SEMAPHORE, sem);
+ ret = iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, addr, sem);
if (ret) {
dev_err(dev->dev, "iosf punit semaphore request failed\n");
- return ret;
+ goto out;
}
/* host driver waits for bit 0 to be set in semaphore register */
start = jiffies;
end = start + msecs_to_jiffies(SEMAPHORE_TIMEOUT);
do {
- ret = get_sem(dev->dev, &sem);
+ ret = get_sem(dev, &sem);
if (!ret && sem) {
acquired = jiffies;
dev_dbg(dev->dev, "punit semaphore acquired after %ums\n",
@@ -95,9 +117,10 @@ static int baytrail_i2c_acquire(struct dw_i2c_dev *dev)
} while (time_before(jiffies, end));
dev_err(dev->dev, "punit semaphore timed out, resetting\n");
- reset_semaphore(dev->dev);
+out:
+ reset_semaphore(dev);
- ret = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, PUNIT_SEMAPHORE, &sem);
+ ret = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, addr, &sem);
if (ret)
dev_err(dev->dev, "iosf failed to read punit semaphore\n");
else
@@ -116,12 +139,12 @@ static void baytrail_i2c_release(struct dw_i2c_dev *dev)
if (!dev->acquire_lock)
return;
- reset_semaphore(dev->dev);
+ reset_semaphore(dev);
dev_dbg(dev->dev, "punit semaphore held for %ums\n",
jiffies_to_msecs(jiffies - acquired));
}
-int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev)
+int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev)
{
acpi_status status;
unsigned long long shared_host = 0;
@@ -138,15 +161,25 @@ int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev)
if (ACPI_FAILURE(status))
return 0;
- if (shared_host) {
- dev_info(dev->dev, "I2C bus managed by PUNIT\n");
- dev->acquire_lock = baytrail_i2c_acquire;
- dev->release_lock = baytrail_i2c_release;
- dev->pm_runtime_disabled = true;
- }
+ if (!shared_host)
+ return 0;
if (!iosf_mbi_available())
return -EPROBE_DEFER;
+ dev_info(dev->dev, "I2C bus managed by PUNIT\n");
+ dev->acquire_lock = baytrail_i2c_acquire;
+ dev->release_lock = baytrail_i2c_release;
+ dev->pm_runtime_disabled = true;
+
+ pm_qos_add_request(&dev->pm_qos, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+
return 0;
}
+
+void i2c_dw_remove_lock_support(struct dw_i2c_dev *dev)
+{
+ if (dev->acquire_lock)
+ pm_qos_remove_request(&dev->pm_qos);
+}
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 7a3faa551cf8..15a534818d4f 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -177,13 +177,13 @@ static u32 dw_readl(struct dw_i2c_dev *dev, int offset)
{
u32 value;
- if (dev->accessor_flags & ACCESS_16BIT)
+ if (dev->flags & ACCESS_16BIT)
value = readw_relaxed(dev->base + offset) |
(readw_relaxed(dev->base + offset + 2) << 16);
else
value = readl_relaxed(dev->base + offset);
- if (dev->accessor_flags & ACCESS_SWAP)
+ if (dev->flags & ACCESS_SWAP)
return swab32(value);
else
return value;
@@ -191,10 +191,10 @@ static u32 dw_readl(struct dw_i2c_dev *dev, int offset)
static void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset)
{
- if (dev->accessor_flags & ACCESS_SWAP)
+ if (dev->flags & ACCESS_SWAP)
b = swab32(b);
- if (dev->accessor_flags & ACCESS_16BIT) {
+ if (dev->flags & ACCESS_16BIT) {
writew_relaxed((u16)b, dev->base + offset);
writew_relaxed((u16)(b >> 16), dev->base + offset + 2);
} else {
@@ -339,10 +339,10 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
reg = dw_readl(dev, DW_IC_COMP_TYPE);
if (reg == ___constant_swab32(DW_IC_COMP_TYPE_VALUE)) {
/* Configure register endianess access */
- dev->accessor_flags |= ACCESS_SWAP;
+ dev->flags |= ACCESS_SWAP;
} else if (reg == (DW_IC_COMP_TYPE_VALUE & 0x0000ffff)) {
/* Configure register access mode 16bit */
- dev->accessor_flags |= ACCESS_16BIT;
+ dev->flags |= ACCESS_16BIT;
} else if (reg != DW_IC_COMP_TYPE_VALUE) {
dev_err(dev->dev, "Unknown Synopsys component type: "
"0x%08x\n", reg);
@@ -924,7 +924,7 @@ static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
tx_aborted:
if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err)
complete(&dev->cmd_complete);
- else if (unlikely(dev->accessor_flags & ACCESS_INTR_MASK)) {
+ else if (unlikely(dev->flags & ACCESS_INTR_MASK)) {
/* workaround to trigger pending interrupt */
stat = dw_readl(dev, DW_IC_INTR_MASK);
i2c_dw_disable_int(dev);
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index c1db3a5a340f..846ea57f85af 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -23,6 +23,7 @@
*/
#include <linux/i2c.h>
+#include <linux/pm_qos.h>
#define DW_IC_DEFAULT_FUNCTIONALITY (I2C_FUNC_I2C | \
I2C_FUNC_SMBUS_BYTE | \
@@ -75,6 +76,7 @@
* @fp_lcnt: fast plus LCNT value
* @hs_hcnt: high speed HCNT value
* @hs_lcnt: high speed LCNT value
+ * @pm_qos: pm_qos_request used while holding a hardware lock on the bus
* @acquire_lock: function to acquire a hardware lock on the bus
* @release_lock: function to release a hardware lock on the bus
* @pm_runtime_disabled: true if pm runtime is disabled
@@ -88,6 +90,7 @@ struct dw_i2c_dev {
void __iomem *base;
struct completion cmd_complete;
struct clk *clk;
+ struct reset_control *rst;
u32 (*get_clk_rate_khz) (struct dw_i2c_dev *dev);
struct dw_pci_controller *controller;
int cmd_err;
@@ -103,7 +106,7 @@ struct dw_i2c_dev {
unsigned int status;
u32 abort_source;
int irq;
- u32 accessor_flags;
+ u32 flags;
struct i2c_adapter adapter;
u32 functionality;
u32 master_cfg;
@@ -122,6 +125,7 @@ struct dw_i2c_dev {
u16 fp_lcnt;
u16 hs_hcnt;
u16 hs_lcnt;
+ struct pm_qos_request pm_qos;
int (*acquire_lock)(struct dw_i2c_dev *dev);
void (*release_lock)(struct dw_i2c_dev *dev);
bool pm_runtime_disabled;
@@ -131,6 +135,8 @@ struct dw_i2c_dev {
#define ACCESS_16BIT 0x00000002
#define ACCESS_INTR_MASK 0x00000004
+#define MODEL_CHERRYTRAIL 0x00000100
+
extern int i2c_dw_init(struct dw_i2c_dev *dev);
extern void i2c_dw_disable(struct dw_i2c_dev *dev);
extern void i2c_dw_disable_int(struct dw_i2c_dev *dev);
@@ -138,7 +144,9 @@ extern u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev);
extern int i2c_dw_probe(struct dw_i2c_dev *dev);
#if IS_ENABLED(CONFIG_I2C_DESIGNWARE_BAYTRAIL)
-extern int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev);
+extern int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev);
+extern void i2c_dw_remove_lock_support(struct dw_i2c_dev *dev);
#else
-static inline int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev) { return 0; }
+static inline int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev) { return 0; }
+static inline void i2c_dw_remove_lock_support(struct dw_i2c_dev *dev) {}
#endif
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index d6423cfac588..ed485b69b449 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -45,6 +45,7 @@ enum dw_pci_ctl_id_t {
medfield,
merrifield,
baytrail,
+ cherrytrail,
haswell,
};
@@ -63,6 +64,7 @@ struct dw_pci_controller {
u32 rx_fifo_depth;
u32 clk_khz;
u32 functionality;
+ u32 flags;
struct dw_scl_sda_cfg *scl_sda_cfg;
int (*setup)(struct pci_dev *pdev, struct dw_pci_controller *c);
};
@@ -170,6 +172,15 @@ static struct dw_pci_controller dw_pci_controllers[] = {
.functionality = I2C_FUNC_10BIT_ADDR,
.scl_sda_cfg = &hsw_config,
},
+ [cherrytrail] = {
+ .bus_num = -1,
+ .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
+ .tx_fifo_depth = 32,
+ .rx_fifo_depth = 32,
+ .functionality = I2C_FUNC_10BIT_ADDR,
+ .flags = MODEL_CHERRYTRAIL,
+ .scl_sda_cfg = &byt_config,
+ },
};
#ifdef CONFIG_PM
@@ -237,6 +248,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
dev->base = pcim_iomap_table(pdev)[0];
dev->dev = &pdev->dev;
dev->irq = pdev->irq;
+ dev->flags |= controller->flags;
if (controller->setup) {
r = controller->setup(pdev, controller);
@@ -317,13 +329,13 @@ static const struct pci_device_id i2_designware_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x9c61), haswell },
{ PCI_VDEVICE(INTEL, 0x9c62), haswell },
/* Braswell / Cherrytrail */
- { PCI_VDEVICE(INTEL, 0x22C1), baytrail },
- { PCI_VDEVICE(INTEL, 0x22C2), baytrail },
- { PCI_VDEVICE(INTEL, 0x22C3), baytrail },
- { PCI_VDEVICE(INTEL, 0x22C4), baytrail },
- { PCI_VDEVICE(INTEL, 0x22C5), baytrail },
- { PCI_VDEVICE(INTEL, 0x22C6), baytrail },
- { PCI_VDEVICE(INTEL, 0x22C7), baytrail },
+ { PCI_VDEVICE(INTEL, 0x22C1), cherrytrail },
+ { PCI_VDEVICE(INTEL, 0x22C2), cherrytrail },
+ { PCI_VDEVICE(INTEL, 0x22C3), cherrytrail },
+ { PCI_VDEVICE(INTEL, 0x22C4), cherrytrail },
+ { PCI_VDEVICE(INTEL, 0x22C5), cherrytrail },
+ { PCI_VDEVICE(INTEL, 0x22C6), cherrytrail },
+ { PCI_VDEVICE(INTEL, 0x22C7), cherrytrail },
{ 0,}
};
MODULE_DEVICE_TABLE(pci, i2_designware_pci_ids);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 6ce431323125..d8665098dce9 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -38,6 +38,7 @@
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/io.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/acpi.h>
#include <linux/platform_data/i2c-designware.h>
@@ -112,7 +113,7 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev)
id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
if (id && id->driver_data)
- dev->accessor_flags |= (u32)id->driver_data;
+ dev->flags |= (u32)id->driver_data;
return 0;
}
@@ -123,7 +124,7 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = {
{ "INT3432", 0 },
{ "INT3433", 0 },
{ "80860F41", 0 },
- { "808622C1", 0 },
+ { "808622C1", MODEL_CHERRYTRAIL },
{ "AMD0010", ACCESS_INTR_MASK },
{ "AMDI0010", ACCESS_INTR_MASK },
{ "AMDI0510", 0 },
@@ -199,6 +200,14 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
dev->irq = irq;
platform_set_drvdata(pdev, dev);
+ dev->rst = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(dev->rst)) {
+ if (PTR_ERR(dev->rst) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ } else {
+ reset_control_deassert(dev->rst);
+ }
+
if (pdata) {
dev->clk_freq = pdata->i2c_scl_freq;
} else {
@@ -235,12 +244,13 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
&& dev->clk_freq != 1000000 && dev->clk_freq != 3400000) {
dev_err(&pdev->dev,
"Only 100kHz, 400kHz, 1MHz and 3.4MHz supported");
- return -EINVAL;
+ r = -EINVAL;
+ goto exit_reset;
}
- r = i2c_dw_eval_lock_support(dev);
+ r = i2c_dw_probe_lock_support(dev);
if (r)
- return r;
+ goto exit_reset;
dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY;
@@ -286,10 +296,18 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
}
r = i2c_dw_probe(dev);
- if (r && !dev->pm_runtime_disabled)
- pm_runtime_disable(&pdev->dev);
+ if (r)
+ goto exit_probe;
return r;
+
+exit_probe:
+ if (!dev->pm_runtime_disabled)
+ pm_runtime_disable(&pdev->dev);
+exit_reset:
+ if (!IS_ERR_OR_NULL(dev->rst))
+ reset_control_assert(dev->rst);
+ return r;
}
static int dw_i2c_plat_remove(struct platform_device *pdev)
@@ -306,6 +324,10 @@ static int dw_i2c_plat_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
if (!dev->pm_runtime_disabled)
pm_runtime_disable(&pdev->dev);
+ if (!IS_ERR_OR_NULL(dev->rst))
+ reset_control_assert(dev->rst);
+
+ i2c_dw_remove_lock_support(dev);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index cbd93ce0661f..736a82472101 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -457,7 +457,6 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id)
int_status = readl(i2c->regs + HSI2C_INT_STATUS);
writel(int_status, i2c->regs + HSI2C_INT_STATUS);
- trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS);
/* handle interrupt related to the transfer status */
if (i2c->variant->hw == HSI2C_EXYNOS7) {
@@ -482,11 +481,13 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id)
goto stop;
}
+ trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS);
if ((trans_status & HSI2C_MASTER_ST_MASK) == HSI2C_MASTER_ST_LOSE) {
i2c->state = -EAGAIN;
goto stop;
}
} else if (int_status & HSI2C_INT_I2C) {
+ trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS);
if (trans_status & HSI2C_NO_DEV_ACK) {
dev_dbg(i2c->dev, "No ACK from device\n");
i2c->state = -ENXIO;
diff --git a/drivers/i2c/busses/i2c-meson.c b/drivers/i2c/busses/i2c-meson.c
index 2aa61bbbd307..73b97c71a484 100644
--- a/drivers/i2c/busses/i2c-meson.c
+++ b/drivers/i2c/busses/i2c-meson.c
@@ -175,7 +175,7 @@ static void meson_i2c_put_data(struct meson_i2c *i2c, char *buf, int len)
wdata1 |= *buf++ << ((i - 4) * 8);
writel(wdata0, i2c->regs + REG_TOK_WDATA0);
- writel(wdata0, i2c->regs + REG_TOK_WDATA1);
+ writel(wdata1, i2c->regs + REG_TOK_WDATA1);
dev_dbg(i2c->dev, "%s: data %08x %08x len %d\n", __func__,
wdata0, wdata1, len);
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 4a7d9bc2142b..45d61714c81b 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -172,14 +172,6 @@ static const struct i2c_adapter_quirks mt6577_i2c_quirks = {
.max_comb_2nd_msg_len = 31,
};
-static const struct i2c_adapter_quirks mt8173_i2c_quirks = {
- .max_num_msgs = 65535,
- .max_write_len = 65535,
- .max_read_len = 65535,
- .max_comb_1st_msg_len = 65535,
- .max_comb_2nd_msg_len = 65535,
-};
-
static const struct mtk_i2c_compatible mt6577_compat = {
.quirks = &mt6577_i2c_quirks,
.pmic_i2c = 0,
@@ -199,7 +191,6 @@ static const struct mtk_i2c_compatible mt6589_compat = {
};
static const struct mtk_i2c_compatible mt8173_compat = {
- .quirks = &mt8173_i2c_quirks,
.pmic_i2c = 0,
.dcm = 1,
.auto_restart = 1,
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index 8f11d347b3ec..c811af4c8d81 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -218,8 +218,12 @@ static irqreturn_t riic_tend_isr(int irq, void *data)
}
if (riic->is_last || riic->err) {
- riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER);
+ riic_clear_set_bit(riic, ICIER_TEIE, ICIER_SPIE, RIIC_ICIER);
writeb(ICCR2_SP, riic->base + RIIC_ICCR2);
+ } else {
+ /* Transfer is complete, but do not send STOP */
+ riic_clear_set_bit(riic, ICIER_TEIE, 0, RIIC_ICIER);
+ complete(&riic->msg_done);
}
return IRQ_HANDLED;
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 83768e85a919..2178266bca79 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -429,6 +429,7 @@ void i2c_mux_del_adapters(struct i2c_mux_core *muxc)
while (muxc->num_adapters) {
struct i2c_adapter *adap = muxc->adapter[--muxc->num_adapters];
struct i2c_mux_priv *priv = adap->algo_data;
+ struct device_node *np = adap->dev.of_node;
muxc->adapter[muxc->num_adapters] = NULL;
@@ -438,6 +439,7 @@ void i2c_mux_del_adapters(struct i2c_mux_core *muxc)
sysfs_remove_link(&priv->adap.dev.kobj, "mux_device");
i2c_del_adapter(adap);
+ of_node_put(np);
kfree(priv);
}
}
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
index 1eef56a89b1f..f96601268f71 100644
--- a/drivers/irqchip/irq-crossbar.c
+++ b/drivers/irqchip/irq-crossbar.c
@@ -198,7 +198,8 @@ static const struct irq_domain_ops crossbar_domain_ops = {
static int __init crossbar_of_init(struct device_node *node)
{
- int i, size, max = 0, reserved = 0, entry;
+ u32 max = 0, entry, reg_size;
+ int i, size, reserved = 0;
const __be32 *irqsr;
int ret = -ENOMEM;
@@ -275,9 +276,9 @@ static int __init crossbar_of_init(struct device_node *node)
if (!cb->register_offsets)
goto err_irq_map;
- of_property_read_u32(node, "ti,reg-size", &size);
+ of_property_read_u32(node, "ti,reg-size", &reg_size);
- switch (size) {
+ switch (reg_size) {
case 1:
cb->write = crossbar_writeb;
break;
@@ -303,7 +304,7 @@ static int __init crossbar_of_init(struct device_node *node)
continue;
cb->register_offsets[i] = reserved;
- reserved += size;
+ reserved += reg_size;
}
of_property_read_u32(node, "ti,irqs-safe-map", &cb->safe_map);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 23201004fd7a..f77f840d2b5f 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1601,6 +1601,14 @@ static void __maybe_unused its_enable_quirk_cavium_23144(void *data)
its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
}
+static void __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
+{
+ struct its_node *its = data;
+
+ /* On QDF2400, the size of the ITE is 16Bytes */
+ its->ite_size = 16;
+}
+
static const struct gic_quirk its_quirks[] = {
#ifdef CONFIG_CAVIUM_ERRATUM_22375
{
@@ -1618,6 +1626,14 @@ static const struct gic_quirk its_quirks[] = {
.init = its_enable_quirk_cavium_23144,
},
#endif
+#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
+ {
+ .desc = "ITS: QDF2400 erratum 0065",
+ .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */
+ .mask = 0xffffffff,
+ .init = its_enable_quirk_qdf2400_e0065,
+ },
+#endif
{
}
};
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 11e13c56126f..2da3ff650e1d 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -2317,6 +2317,9 @@ static int gigaset_probe(struct usb_interface *interface,
return -ENODEV;
}
+ if (hostif->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
dev_info(&udev->dev,
"%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n",
__func__, le16_to_cpu(udev->descriptor.idVendor),
diff --git a/drivers/isdn/hisax/st5481_b.c b/drivers/isdn/hisax/st5481_b.c
index 409849165838..f64a36007800 100644
--- a/drivers/isdn/hisax/st5481_b.c
+++ b/drivers/isdn/hisax/st5481_b.c
@@ -239,7 +239,7 @@ static void st5481B_mode(struct st5481_bcs *bcs, int mode)
}
}
} else {
- // Disble B channel interrupts
+ // Disable B channel interrupts
st5481_usb_device_ctrl_msg(adapter, FFMSK_B1+(bcs->channel * 2), 0, NULL, NULL);
// Disable B channel FIFOs
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 3f041b187033..f757cef293f8 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -392,6 +392,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
* To get all the fields, copy all archdata
*/
dev->ofdev.dev.archdata = chip->lbus.pdev->dev.archdata;
+ dev->ofdev.dev.dma_ops = chip->lbus.pdev->dev.dma_ops;
#endif /* CONFIG_PCI */
#ifdef DEBUG
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index a126919ed102..5d13930f0f22 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -4,7 +4,6 @@
#include <linux/blkdev.h>
#include <linux/errno.h>
-#include <linux/blkdev.h>
#include <linux/kernel.h>
#include <linux/sched/clock.h>
#include <linux/llist.h>
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index f4ffd1eb8f44..dfb75979e455 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -989,26 +989,29 @@ static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule)
struct dm_offload *o = container_of(cb, struct dm_offload, cb);
struct bio_list list;
struct bio *bio;
+ int i;
INIT_LIST_HEAD(&o->cb.list);
if (unlikely(!current->bio_list))
return;
- list = *current->bio_list;
- bio_list_init(current->bio_list);
-
- while ((bio = bio_list_pop(&list))) {
- struct bio_set *bs = bio->bi_pool;
- if (unlikely(!bs) || bs == fs_bio_set) {
- bio_list_add(current->bio_list, bio);
- continue;
+ for (i = 0; i < 2; i++) {
+ list = current->bio_list[i];
+ bio_list_init(&current->bio_list[i]);
+
+ while ((bio = bio_list_pop(&list))) {
+ struct bio_set *bs = bio->bi_pool;
+ if (unlikely(!bs) || bs == fs_bio_set) {
+ bio_list_add(&current->bio_list[i], bio);
+ continue;
+ }
+
+ spin_lock(&bs->rescue_lock);
+ bio_list_add(&bs->rescue_list, bio);
+ queue_work(bs->rescue_workqueue, &bs->rescue_work);
+ spin_unlock(&bs->rescue_lock);
}
-
- spin_lock(&bs->rescue_lock);
- bio_list_add(&bs->rescue_list, bio);
- queue_work(bs->rescue_workqueue, &bs->rescue_work);
- spin_unlock(&bs->rescue_lock);
}
}
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 2b13117fb918..321ecac23027 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -777,7 +777,6 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
bm_lockres->flags |= DLM_LKF_NOQUEUE;
ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
if (ret == -EAGAIN) {
- memset(bm_lockres->lksb.sb_lvbptr, '\0', LVB_SIZE);
s = read_resync_info(mddev, bm_lockres);
if (s) {
pr_info("%s:%d Resync[%llu..%llu] in progress on %d\n",
@@ -974,6 +973,7 @@ static int leave(struct mddev *mddev)
lockres_free(cinfo->bitmap_lockres);
unlock_all_bitmaps(mddev);
dlm_release_lockspace(cinfo->lockspace, 2);
+ kfree(cinfo);
return 0;
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 548d1b8014f8..f6ae1d67bcd0 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -440,14 +440,6 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
}
EXPORT_SYMBOL(md_flush_request);
-void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
-{
- struct mddev *mddev = cb->data;
- md_wakeup_thread(mddev->thread);
- kfree(cb);
-}
-EXPORT_SYMBOL(md_unplug);
-
static inline struct mddev *mddev_get(struct mddev *mddev)
{
atomic_inc(&mddev->active);
@@ -1887,7 +1879,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
}
sb = page_address(rdev->sb_page);
sb->data_size = cpu_to_le64(num_sectors);
- sb->super_offset = rdev->sb_start;
+ sb->super_offset = cpu_to_le64(rdev->sb_start);
sb->sb_csum = calc_sb_1_csum(sb);
do {
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
@@ -2295,7 +2287,7 @@ static bool does_sb_need_changing(struct mddev *mddev)
/* Check if any mddev parameters have changed */
if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
(mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
- (mddev->layout != le64_to_cpu(sb->layout)) ||
+ (mddev->layout != le32_to_cpu(sb->layout)) ||
(mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
(mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
return true;
@@ -6458,11 +6450,10 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
mddev->layout = info->layout;
mddev->chunk_sectors = info->chunk_size >> 9;
- mddev->max_disks = MD_SB_DISKS;
-
if (mddev->persistent) {
- mddev->flags = 0;
- mddev->sb_flags = 0;
+ mddev->max_disks = MD_SB_DISKS;
+ mddev->flags = 0;
+ mddev->sb_flags = 0;
}
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
@@ -6533,8 +6524,12 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
return -ENOSPC;
}
rv = mddev->pers->resize(mddev, num_sectors);
- if (!rv)
- revalidate_disk(mddev->gendisk);
+ if (!rv) {
+ if (mddev->queue) {
+ set_capacity(mddev->gendisk, mddev->array_sectors);
+ revalidate_disk(mddev->gendisk);
+ }
+ }
return rv;
}
diff --git a/drivers/md/md.h b/drivers/md/md.h
index b8859cbf84b6..dde8ecb760c8 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -676,16 +676,10 @@ extern void mddev_resume(struct mddev *mddev);
extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
struct mddev *mddev);
-extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule);
extern void md_reload_sb(struct mddev *mddev, int raid_disk);
extern void md_update_sb(struct mddev *mddev, int force);
extern void md_kick_rdev_from_array(struct md_rdev * rdev);
struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
-static inline int mddev_check_plugged(struct mddev *mddev)
-{
- return !!blk_check_plugged(md_unplug, mddev,
- sizeof(struct blk_plug_cb));
-}
static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
{
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index fbc2d7851b49..a34f58772022 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1027,7 +1027,7 @@ static int get_unqueued_pending(struct r1conf *conf)
static void freeze_array(struct r1conf *conf, int extra)
{
/* Stop sync I/O and normal I/O and wait for everything to
- * go quite.
+ * go quiet.
* This is called in two situations:
* 1) management command handlers (reshape, remove disk, quiesce).
* 2) one normal I/O request failed.
@@ -1587,9 +1587,30 @@ static void raid1_make_request(struct mddev *mddev, struct bio *bio)
split = bio;
}
- if (bio_data_dir(split) == READ)
+ if (bio_data_dir(split) == READ) {
raid1_read_request(mddev, split);
- else
+
+ /*
+ * If a bio is splitted, the first part of bio will
+ * pass barrier but the bio is queued in
+ * current->bio_list (see generic_make_request). If
+ * there is a raise_barrier() called here, the second
+ * part of bio can't pass barrier. But since the first
+ * part bio isn't dispatched to underlaying disks yet,
+ * the barrier is never released, hence raise_barrier
+ * will alays wait. We have a deadlock.
+ * Note, this only happens in read path. For write
+ * path, the first part of bio is dispatched in a
+ * schedule() call (because of blk plug) or offloaded
+ * to raid10d.
+ * Quitting from the function immediately can change
+ * the bio order queued in bio_list and avoid the deadlock.
+ */
+ if (split != bio) {
+ generic_make_request(bio);
+ break;
+ }
+ } else
raid1_write_request(mddev, split);
} while (split != bio);
}
@@ -3246,8 +3267,6 @@ static int raid1_resize(struct mddev *mddev, sector_t sectors)
return ret;
}
md_set_array_sectors(mddev, newsize);
- set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk(mddev->gendisk);
if (sectors > mddev->dev_sectors &&
mddev->recovery_cp > mddev->dev_sectors) {
mddev->recovery_cp = mddev->dev_sectors;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 063c43d83b72..e89a8d78a9ed 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -974,7 +974,8 @@ static void wait_barrier(struct r10conf *conf)
!conf->barrier ||
(atomic_read(&conf->nr_pending) &&
current->bio_list &&
- !bio_list_empty(current->bio_list)),
+ (!bio_list_empty(&current->bio_list[0]) ||
+ !bio_list_empty(&current->bio_list[1]))),
conf->resync_lock);
conf->nr_waiting--;
if (!conf->nr_waiting)
@@ -1477,11 +1478,24 @@ retry_write:
mbio->bi_bdev = (void*)rdev;
atomic_inc(&r10_bio->remaining);
+
+ cb = blk_check_plugged(raid10_unplug, mddev,
+ sizeof(*plug));
+ if (cb)
+ plug = container_of(cb, struct raid10_plug_cb,
+ cb);
+ else
+ plug = NULL;
spin_lock_irqsave(&conf->device_lock, flags);
- bio_list_add(&conf->pending_bio_list, mbio);
- conf->pending_count++;
+ if (plug) {
+ bio_list_add(&plug->pending, mbio);
+ plug->pending_cnt++;
+ } else {
+ bio_list_add(&conf->pending_bio_list, mbio);
+ conf->pending_count++;
+ }
spin_unlock_irqrestore(&conf->device_lock, flags);
- if (!mddev_check_plugged(mddev))
+ if (!plug)
md_wakeup_thread(mddev->thread);
}
}
@@ -1571,7 +1585,25 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
split = bio;
}
+ /*
+ * If a bio is splitted, the first part of bio will pass
+ * barrier but the bio is queued in current->bio_list (see
+ * generic_make_request). If there is a raise_barrier() called
+ * here, the second part of bio can't pass barrier. But since
+ * the first part bio isn't dispatched to underlaying disks
+ * yet, the barrier is never released, hence raise_barrier will
+ * alays wait. We have a deadlock.
+ * Note, this only happens in read path. For write path, the
+ * first part of bio is dispatched in a schedule() call
+ * (because of blk plug) or offloaded to raid10d.
+ * Quitting from the function immediately can change the bio
+ * order queued in bio_list and avoid the deadlock.
+ */
__make_request(mddev, split);
+ if (split != bio && bio_data_dir(bio) == READ) {
+ generic_make_request(bio);
+ break;
+ }
} while (split != bio);
/* In case raid10d snuck in to freeze_array */
@@ -3943,10 +3975,6 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
return ret;
}
md_set_array_sectors(mddev, size);
- if (mddev->queue) {
- set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk(mddev->gendisk);
- }
if (sectors > mddev->dev_sectors &&
mddev->recovery_cp > oldsize) {
mddev->recovery_cp = oldsize;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4fb09b3fcb41..ed5cd705b985 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1401,7 +1401,8 @@ static int set_syndrome_sources(struct page **srcs,
(test_bit(R5_Wantdrain, &dev->flags) ||
test_bit(R5_InJournal, &dev->flags))) ||
(srctype == SYNDROME_SRC_WRITTEN &&
- dev->written)) {
+ (dev->written ||
+ test_bit(R5_InJournal, &dev->flags)))) {
if (test_bit(R5_InJournal, &dev->flags))
srcs[slot] = sh->dev[i].orig_page;
else
@@ -7605,8 +7606,6 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
return ret;
}
md_set_array_sectors(mddev, newsize);
- set_capacity(mddev->gendisk, mddev->array_sectors);
- revalidate_disk(mddev->gendisk);
if (sectors > mddev->dev_sectors &&
mddev->recovery_cp > mddev->dev_sectors) {
mddev->recovery_cp = mddev->dev_sectors;
diff --git a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h
index 7a681d8202c7..4442e478db72 100644
--- a/drivers/media/dvb-frontends/drx39xyj/drx_driver.h
+++ b/drivers/media/dvb-frontends/drx39xyj/drx_driver.h
@@ -256,8 +256,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
*
* The actual DAP implementation may be restricted to only one of the modes.
* A compiler warning or error will be generated if the DAP implementation
-* overides or cannot handle the mode defined below.
-*
+* overrides or cannot handle the mode defined below.
*/
#ifndef DRXDAP_SINGLE_MASTER
#define DRXDAP_SINGLE_MASTER 1
@@ -272,7 +271,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
*
* This maximum size may be restricted by the actual DAP implementation.
* A compiler warning or error will be generated if the DAP implementation
-* overides or cannot handle the chunksize defined below.
+* overrides or cannot handle the chunksize defined below.
*
* Beware that the DAP uses DRXDAP_MAX_WCHUNKSIZE to create a temporary data
* buffer. Do not undefine or choose too large, unless your system is able to
@@ -292,8 +291,7 @@ int drxbsp_tuner_default_i2c_write_read(struct tuner_instance *tuner,
*
* This maximum size may be restricted by the actual DAP implementation.
* A compiler warning or error will be generated if the DAP implementation
-* overides or cannot handle the chunksize defined below.
-*
+* overrides or cannot handle the chunksize defined below.
*/
#ifndef DRXDAP_MAX_RCHUNKSIZE
#define DRXDAP_MAX_RCHUNKSIZE 60
diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c
index b4b583f7137a..b4c0f10fc3b0 100644
--- a/drivers/media/platform/vsp1/vsp1_drm.c
+++ b/drivers/media/platform/vsp1/vsp1_drm.c
@@ -54,12 +54,11 @@ EXPORT_SYMBOL_GPL(vsp1_du_init);
/**
* vsp1_du_setup_lif - Setup the output part of the VSP pipeline
* @dev: the VSP device
- * @width: output frame width in pixels
- * @height: output frame height in pixels
+ * @cfg: the LIF configuration
*
- * Configure the output part of VSP DRM pipeline for the given frame @width and
- * @height. This sets up formats on the BRU source pad, the WPF0 sink and source
- * pads, and the LIF sink pad.
+ * Configure the output part of VSP DRM pipeline for the given frame @cfg.width
+ * and @cfg.height. This sets up formats on the BRU source pad, the WPF0 sink
+ * and source pads, and the LIF sink pad.
*
* As the media bus code on the BRU source pad is conditioned by the
* configuration of the BRU sink 0 pad, we also set up the formats on all BRU
@@ -69,8 +68,7 @@ EXPORT_SYMBOL_GPL(vsp1_du_init);
*
* Return 0 on success or a negative error code on failure.
*/
-int vsp1_du_setup_lif(struct device *dev, unsigned int width,
- unsigned int height)
+int vsp1_du_setup_lif(struct device *dev, const struct vsp1_du_lif_config *cfg)
{
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
struct vsp1_pipeline *pipe = &vsp1->drm->pipe;
@@ -79,11 +77,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width,
unsigned int i;
int ret;
- dev_dbg(vsp1->dev, "%s: configuring LIF with format %ux%u\n",
- __func__, width, height);
-
- if (width == 0 || height == 0) {
- /* Zero width or height means the CRTC is being disabled, stop
+ if (!cfg) {
+ /* NULL configuration means the CRTC is being disabled, stop
* the pipeline and turn the light off.
*/
ret = vsp1_pipeline_stop(pipe);
@@ -108,6 +103,9 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width,
return 0;
}
+ dev_dbg(vsp1->dev, "%s: configuring LIF with format %ux%u\n",
+ __func__, cfg->width, cfg->height);
+
/* Configure the format at the BRU sinks and propagate it through the
* pipeline.
*/
@@ -117,8 +115,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width,
for (i = 0; i < bru->entity.source_pad; ++i) {
format.pad = i;
- format.format.width = width;
- format.format.height = height;
+ format.format.width = cfg->width;
+ format.format.height = cfg->height;
format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32;
format.format.field = V4L2_FIELD_NONE;
@@ -133,8 +131,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width,
}
format.pad = bru->entity.source_pad;
- format.format.width = width;
- format.format.height = height;
+ format.format.width = cfg->width;
+ format.format.height = cfg->height;
format.format.code = MEDIA_BUS_FMT_ARGB8888_1X32;
format.format.field = V4L2_FIELD_NONE;
@@ -180,7 +178,8 @@ int vsp1_du_setup_lif(struct device *dev, unsigned int width,
/* Verify that the format at the output of the pipeline matches the
* requested frame size and media bus code.
*/
- if (format.format.width != width || format.format.height != height ||
+ if (format.format.width != cfg->width ||
+ format.format.height != cfg->height ||
format.format.code != MEDIA_BUS_FMT_ARGB8888_1X32) {
dev_dbg(vsp1->dev, "%s: format mismatch\n", __func__);
return -EPIPE;
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 393dccaabdd0..1688893a65bb 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -436,6 +436,8 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
return -ERESTARTSYS;
ir = irctls[iminor(inode)];
+ mutex_unlock(&lirc_dev_lock);
+
if (!ir) {
retval = -ENODEV;
goto error;
@@ -476,8 +478,6 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
}
error:
- mutex_unlock(&lirc_dev_lock);
-
nonseekable_open(inode, file);
return retval;
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index b109f8246b96..ec4b25bd2ec2 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -176,12 +176,13 @@ static void nvt_write_wakeup_codes(struct rc_dev *dev,
{
u8 tolerance, config;
struct nvt_dev *nvt = dev->priv;
+ unsigned long flags;
int i;
/* hardcode the tolerance to 10% */
tolerance = DIV_ROUND_UP(count, 10);
- spin_lock(&nvt->lock);
+ spin_lock_irqsave(&nvt->lock, flags);
nvt_clear_cir_wake_fifo(nvt);
nvt_cir_wake_reg_write(nvt, count, CIR_WAKE_FIFO_CMP_DEEP);
@@ -203,7 +204,7 @@ static void nvt_write_wakeup_codes(struct rc_dev *dev,
nvt_cir_wake_reg_write(nvt, config, CIR_WAKE_IRCON);
- spin_unlock(&nvt->lock);
+ spin_unlock_irqrestore(&nvt->lock, flags);
}
static ssize_t wakeup_data_show(struct device *dev,
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 2424946740e6..d84533699668 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -1663,6 +1663,7 @@ static int rc_setup_rx_device(struct rc_dev *dev)
{
int rc;
struct rc_map *rc_map;
+ u64 rc_type;
if (!dev->map_name)
return -EINVAL;
@@ -1677,15 +1678,18 @@ static int rc_setup_rx_device(struct rc_dev *dev)
if (rc)
return rc;
- if (dev->change_protocol) {
- u64 rc_type = (1ll << rc_map->rc_type);
+ rc_type = BIT_ULL(rc_map->rc_type);
+ if (dev->change_protocol) {
rc = dev->change_protocol(dev, &rc_type);
if (rc < 0)
goto out_table;
dev->enabled_protocols = rc_type;
}
+ if (dev->driver_type == RC_DRIVER_IR_RAW)
+ ir_raw_load_modules(&rc_type);
+
set_bit(EV_KEY, dev->input_dev->evbit);
set_bit(EV_REP, dev->input_dev->evbit);
set_bit(EV_MSC, dev->input_dev->evbit);
@@ -1777,12 +1781,6 @@ int rc_register_device(struct rc_dev *dev)
dev->input_name ?: "Unspecified device", path ?: "N/A");
kfree(path);
- if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
- rc = rc_setup_rx_device(dev);
- if (rc)
- goto out_dev;
- }
-
if (dev->driver_type == RC_DRIVER_IR_RAW ||
dev->driver_type == RC_DRIVER_IR_RAW_TX) {
if (!raw_init) {
@@ -1791,7 +1789,13 @@ int rc_register_device(struct rc_dev *dev)
}
rc = ir_raw_event_register(dev);
if (rc < 0)
- goto out_rx;
+ goto out_dev;
+ }
+
+ if (dev->driver_type != RC_DRIVER_IR_RAW_TX) {
+ rc = rc_setup_rx_device(dev);
+ if (rc)
+ goto out_raw;
}
/* Allow the RC sysfs nodes to be accessible */
@@ -1803,8 +1807,8 @@ int rc_register_device(struct rc_dev *dev)
return 0;
-out_rx:
- rc_free_rx_device(dev);
+out_raw:
+ ir_raw_event_unregister(dev);
out_dev:
device_del(&dev->dev);
out_unlock:
diff --git a/drivers/media/rc/serial_ir.c b/drivers/media/rc/serial_ir.c
index 923fb2299553..41b54e40176c 100644
--- a/drivers/media/rc/serial_ir.c
+++ b/drivers/media/rc/serial_ir.c
@@ -487,10 +487,69 @@ static void serial_ir_timeout(unsigned long arg)
ir_raw_event_handle(serial_ir.rcdev);
}
+/* Needed by serial_ir_probe() */
+static int serial_ir_tx(struct rc_dev *dev, unsigned int *txbuf,
+ unsigned int count);
+static int serial_ir_tx_duty_cycle(struct rc_dev *dev, u32 cycle);
+static int serial_ir_tx_carrier(struct rc_dev *dev, u32 carrier);
+static int serial_ir_open(struct rc_dev *rcdev);
+static void serial_ir_close(struct rc_dev *rcdev);
+
static int serial_ir_probe(struct platform_device *dev)
{
+ struct rc_dev *rcdev;
int i, nlow, nhigh, result;
+ rcdev = devm_rc_allocate_device(&dev->dev, RC_DRIVER_IR_RAW);
+ if (!rcdev)
+ return -ENOMEM;
+
+ if (hardware[type].send_pulse && hardware[type].send_space)
+ rcdev->tx_ir = serial_ir_tx;
+ if (hardware[type].set_send_carrier)
+ rcdev->s_tx_carrier = serial_ir_tx_carrier;
+ if (hardware[type].set_duty_cycle)
+ rcdev->s_tx_duty_cycle = serial_ir_tx_duty_cycle;
+
+ switch (type) {
+ case IR_HOMEBREW:
+ rcdev->input_name = "Serial IR type home-brew";
+ break;
+ case IR_IRDEO:
+ rcdev->input_name = "Serial IR type IRdeo";
+ break;
+ case IR_IRDEO_REMOTE:
+ rcdev->input_name = "Serial IR type IRdeo remote";
+ break;
+ case IR_ANIMAX:
+ rcdev->input_name = "Serial IR type AnimaX";
+ break;
+ case IR_IGOR:
+ rcdev->input_name = "Serial IR type IgorPlug";
+ break;
+ }
+
+ rcdev->input_phys = KBUILD_MODNAME "/input0";
+ rcdev->input_id.bustype = BUS_HOST;
+ rcdev->input_id.vendor = 0x0001;
+ rcdev->input_id.product = 0x0001;
+ rcdev->input_id.version = 0x0100;
+ rcdev->open = serial_ir_open;
+ rcdev->close = serial_ir_close;
+ rcdev->dev.parent = &serial_ir.pdev->dev;
+ rcdev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
+ rcdev->driver_name = KBUILD_MODNAME;
+ rcdev->map_name = RC_MAP_RC6_MCE;
+ rcdev->min_timeout = 1;
+ rcdev->timeout = IR_DEFAULT_TIMEOUT;
+ rcdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
+ rcdev->rx_resolution = 250000;
+
+ serial_ir.rcdev = rcdev;
+
+ setup_timer(&serial_ir.timeout_timer, serial_ir_timeout,
+ (unsigned long)&serial_ir);
+
result = devm_request_irq(&dev->dev, irq, serial_ir_irq_handler,
share_irq ? IRQF_SHARED : 0,
KBUILD_MODNAME, &hardware);
@@ -516,9 +575,6 @@ static int serial_ir_probe(struct platform_device *dev)
return -EBUSY;
}
- setup_timer(&serial_ir.timeout_timer, serial_ir_timeout,
- (unsigned long)&serial_ir);
-
result = hardware_init_port();
if (result < 0)
return result;
@@ -552,7 +608,8 @@ static int serial_ir_probe(struct platform_device *dev)
sense ? "low" : "high");
dev_dbg(&dev->dev, "Interrupt %d, port %04x obtained\n", irq, io);
- return 0;
+
+ return devm_rc_register_device(&dev->dev, rcdev);
}
static int serial_ir_open(struct rc_dev *rcdev)
@@ -723,7 +780,6 @@ static void serial_ir_exit(void)
static int __init serial_ir_init_module(void)
{
- struct rc_dev *rcdev;
int result;
switch (type) {
@@ -754,63 +810,9 @@ static int __init serial_ir_init_module(void)
sense = !!sense;
result = serial_ir_init();
- if (result)
- return result;
-
- rcdev = devm_rc_allocate_device(&serial_ir.pdev->dev, RC_DRIVER_IR_RAW);
- if (!rcdev) {
- result = -ENOMEM;
- goto serial_cleanup;
- }
-
- if (hardware[type].send_pulse && hardware[type].send_space)
- rcdev->tx_ir = serial_ir_tx;
- if (hardware[type].set_send_carrier)
- rcdev->s_tx_carrier = serial_ir_tx_carrier;
- if (hardware[type].set_duty_cycle)
- rcdev->s_tx_duty_cycle = serial_ir_tx_duty_cycle;
-
- switch (type) {
- case IR_HOMEBREW:
- rcdev->input_name = "Serial IR type home-brew";
- break;
- case IR_IRDEO:
- rcdev->input_name = "Serial IR type IRdeo";
- break;
- case IR_IRDEO_REMOTE:
- rcdev->input_name = "Serial IR type IRdeo remote";
- break;
- case IR_ANIMAX:
- rcdev->input_name = "Serial IR type AnimaX";
- break;
- case IR_IGOR:
- rcdev->input_name = "Serial IR type IgorPlug";
- break;
- }
-
- rcdev->input_phys = KBUILD_MODNAME "/input0";
- rcdev->input_id.bustype = BUS_HOST;
- rcdev->input_id.vendor = 0x0001;
- rcdev->input_id.product = 0x0001;
- rcdev->input_id.version = 0x0100;
- rcdev->open = serial_ir_open;
- rcdev->close = serial_ir_close;
- rcdev->dev.parent = &serial_ir.pdev->dev;
- rcdev->allowed_protocols = RC_BIT_ALL_IR_DECODER;
- rcdev->driver_name = KBUILD_MODNAME;
- rcdev->map_name = RC_MAP_RC6_MCE;
- rcdev->min_timeout = 1;
- rcdev->timeout = IR_DEFAULT_TIMEOUT;
- rcdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT;
- rcdev->rx_resolution = 250000;
-
- serial_ir.rcdev = rcdev;
-
- result = rc_register_device(rcdev);
-
if (!result)
return 0;
-serial_cleanup:
+
serial_ir_exit();
return result;
}
@@ -818,7 +820,6 @@ serial_cleanup:
static void __exit serial_ir_exit_module(void)
{
del_timer_sync(&serial_ir.timeout_timer);
- rc_unregister_device(serial_ir.rcdev);
serial_ir_exit();
}
diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
index 6ca502d834b4..4f42d57f81d9 100644
--- a/drivers/media/usb/dvb-usb/dw2102.c
+++ b/drivers/media/usb/dvb-usb/dw2102.c
@@ -68,6 +68,7 @@
struct dw2102_state {
u8 initialized;
u8 last_lock;
+ u8 data[MAX_XFER_SIZE + 4];
struct i2c_client *i2c_client_demod;
struct i2c_client *i2c_client_tuner;
@@ -661,62 +662,72 @@ static int su3000_i2c_transfer(struct i2c_adapter *adap, struct i2c_msg msg[],
int num)
{
struct dvb_usb_device *d = i2c_get_adapdata(adap);
- u8 obuf[0x40], ibuf[0x40];
+ struct dw2102_state *state;
if (!d)
return -ENODEV;
+
+ state = d->priv;
+
if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
return -EAGAIN;
+ if (mutex_lock_interruptible(&d->data_mutex) < 0) {
+ mutex_unlock(&d->i2c_mutex);
+ return -EAGAIN;
+ }
switch (num) {
case 1:
switch (msg[0].addr) {
case SU3000_STREAM_CTRL:
- obuf[0] = msg[0].buf[0] + 0x36;
- obuf[1] = 3;
- obuf[2] = 0;
- if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 0, 0) < 0)
+ state->data[0] = msg[0].buf[0] + 0x36;
+ state->data[1] = 3;
+ state->data[2] = 0;
+ if (dvb_usb_generic_rw(d, state->data, 3,
+ state->data, 0, 0) < 0)
err("i2c transfer failed.");
break;
case DW2102_RC_QUERY:
- obuf[0] = 0x10;
- if (dvb_usb_generic_rw(d, obuf, 1, ibuf, 2, 0) < 0)
+ state->data[0] = 0x10;
+ if (dvb_usb_generic_rw(d, state->data, 1,
+ state->data, 2, 0) < 0)
err("i2c transfer failed.");
- msg[0].buf[1] = ibuf[0];
- msg[0].buf[0] = ibuf[1];
+ msg[0].buf[1] = state->data[0];
+ msg[0].buf[0] = state->data[1];
break;
default:
/* always i2c write*/
- obuf[0] = 0x08;
- obuf[1] = msg[0].addr;
- obuf[2] = msg[0].len;
+ state->data[0] = 0x08;
+ state->data[1] = msg[0].addr;
+ state->data[2] = msg[0].len;
- memcpy(&obuf[3], msg[0].buf, msg[0].len);
+ memcpy(&state->data[3], msg[0].buf, msg[0].len);
- if (dvb_usb_generic_rw(d, obuf, msg[0].len + 3,
- ibuf, 1, 0) < 0)
+ if (dvb_usb_generic_rw(d, state->data, msg[0].len + 3,
+ state->data, 1, 0) < 0)
err("i2c transfer failed.");
}
break;
case 2:
/* always i2c read */
- obuf[0] = 0x09;
- obuf[1] = msg[0].len;
- obuf[2] = msg[1].len;
- obuf[3] = msg[0].addr;
- memcpy(&obuf[4], msg[0].buf, msg[0].len);
-
- if (dvb_usb_generic_rw(d, obuf, msg[0].len + 4,
- ibuf, msg[1].len + 1, 0) < 0)
+ state->data[0] = 0x09;
+ state->data[1] = msg[0].len;
+ state->data[2] = msg[1].len;
+ state->data[3] = msg[0].addr;
+ memcpy(&state->data[4], msg[0].buf, msg[0].len);
+
+ if (dvb_usb_generic_rw(d, state->data, msg[0].len + 4,
+ state->data, msg[1].len + 1, 0) < 0)
err("i2c transfer failed.");
- memcpy(msg[1].buf, &ibuf[1], msg[1].len);
+ memcpy(msg[1].buf, &state->data[1], msg[1].len);
break;
default:
warn("more than 2 i2c messages at a time is not handled yet.");
break;
}
+ mutex_unlock(&d->data_mutex);
mutex_unlock(&d->i2c_mutex);
return num;
}
@@ -844,17 +855,23 @@ static int su3000_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff)
static int su3000_power_ctrl(struct dvb_usb_device *d, int i)
{
struct dw2102_state *state = (struct dw2102_state *)d->priv;
- u8 obuf[] = {0xde, 0};
+ int ret = 0;
info("%s: %d, initialized %d", __func__, i, state->initialized);
if (i && !state->initialized) {
+ mutex_lock(&d->data_mutex);
+
+ state->data[0] = 0xde;
+ state->data[1] = 0;
+
state->initialized = 1;
/* reset board */
- return dvb_usb_generic_rw(d, obuf, 2, NULL, 0, 0);
+ ret = dvb_usb_generic_rw(d, state->data, 2, NULL, 0, 0);
+ mutex_unlock(&d->data_mutex);
}
- return 0;
+ return ret;
}
static int su3000_read_mac_address(struct dvb_usb_device *d, u8 mac[6])
@@ -1309,49 +1326,57 @@ static int prof_7500_frontend_attach(struct dvb_usb_adapter *d)
return 0;
}
-static int su3000_frontend_attach(struct dvb_usb_adapter *d)
+static int su3000_frontend_attach(struct dvb_usb_adapter *adap)
{
- u8 obuf[3] = { 0xe, 0x80, 0 };
- u8 ibuf[] = { 0 };
+ struct dvb_usb_device *d = adap->dev;
+ struct dw2102_state *state = d->priv;
+
+ mutex_lock(&d->data_mutex);
+
+ state->data[0] = 0xe;
+ state->data[1] = 0x80;
+ state->data[2] = 0;
- if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
err("command 0x0e transfer failed.");
- obuf[0] = 0xe;
- obuf[1] = 0x02;
- obuf[2] = 1;
+ state->data[0] = 0xe;
+ state->data[1] = 0x02;
+ state->data[2] = 1;
- if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
err("command 0x0e transfer failed.");
msleep(300);
- obuf[0] = 0xe;
- obuf[1] = 0x83;
- obuf[2] = 0;
+ state->data[0] = 0xe;
+ state->data[1] = 0x83;
+ state->data[2] = 0;
- if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
err("command 0x0e transfer failed.");
- obuf[0] = 0xe;
- obuf[1] = 0x83;
- obuf[2] = 1;
+ state->data[0] = 0xe;
+ state->data[1] = 0x83;
+ state->data[2] = 1;
- if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
err("command 0x0e transfer failed.");
- obuf[0] = 0x51;
+ state->data[0] = 0x51;
- if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0)
+ if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
err("command 0x51 transfer failed.");
- d->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config,
- &d->dev->i2c_adap);
- if (d->fe_adap[0].fe == NULL)
+ mutex_unlock(&d->data_mutex);
+
+ adap->fe_adap[0].fe = dvb_attach(ds3000_attach, &su3000_ds3000_config,
+ &d->i2c_adap);
+ if (adap->fe_adap[0].fe == NULL)
return -EIO;
- if (dvb_attach(ts2020_attach, d->fe_adap[0].fe,
+ if (dvb_attach(ts2020_attach, adap->fe_adap[0].fe,
&dw2104_ts2020_config,
- &d->dev->i2c_adap)) {
+ &d->i2c_adap)) {
info("Attached DS3000/TS2020!");
return 0;
}
@@ -1360,47 +1385,55 @@ static int su3000_frontend_attach(struct dvb_usb_adapter *d)
return -EIO;
}
-static int t220_frontend_attach(struct dvb_usb_adapter *d)
+static int t220_frontend_attach(struct dvb_usb_adapter *adap)
{
- u8 obuf[3] = { 0xe, 0x87, 0 };
- u8 ibuf[] = { 0 };
+ struct dvb_usb_device *d = adap->dev;
+ struct dw2102_state *state = d->priv;
+
+ mutex_lock(&d->data_mutex);
- if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+ state->data[0] = 0xe;
+ state->data[1] = 0x87;
+ state->data[2] = 0x0;
+
+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
err("command 0x0e transfer failed.");
- obuf[0] = 0xe;
- obuf[1] = 0x86;
- obuf[2] = 1;
+ state->data[0] = 0xe;
+ state->data[1] = 0x86;
+ state->data[2] = 1;
- if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
err("command 0x0e transfer failed.");
- obuf[0] = 0xe;
- obuf[1] = 0x80;
- obuf[2] = 0;
+ state->data[0] = 0xe;
+ state->data[1] = 0x80;
+ state->data[2] = 0;
- if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
err("command 0x0e transfer failed.");
msleep(50);
- obuf[0] = 0xe;
- obuf[1] = 0x80;
- obuf[2] = 1;
+ state->data[0] = 0xe;
+ state->data[1] = 0x80;
+ state->data[2] = 1;
- if (dvb_usb_generic_rw(d->dev, obuf, 3, ibuf, 1, 0) < 0)
+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
err("command 0x0e transfer failed.");
- obuf[0] = 0x51;
+ state->data[0] = 0x51;
- if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0)
+ if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
err("command 0x51 transfer failed.");
- d->fe_adap[0].fe = dvb_attach(cxd2820r_attach, &cxd2820r_config,
- &d->dev->i2c_adap, NULL);
- if (d->fe_adap[0].fe != NULL) {
- if (dvb_attach(tda18271_attach, d->fe_adap[0].fe, 0x60,
- &d->dev->i2c_adap, &tda18271_config)) {
+ mutex_unlock(&d->data_mutex);
+
+ adap->fe_adap[0].fe = dvb_attach(cxd2820r_attach, &cxd2820r_config,
+ &d->i2c_adap, NULL);
+ if (adap->fe_adap[0].fe != NULL) {
+ if (dvb_attach(tda18271_attach, adap->fe_adap[0].fe, 0x60,
+ &d->i2c_adap, &tda18271_config)) {
info("Attached TDA18271HD/CXD2820R!");
return 0;
}
@@ -1410,23 +1443,30 @@ static int t220_frontend_attach(struct dvb_usb_adapter *d)
return -EIO;
}
-static int m88rs2000_frontend_attach(struct dvb_usb_adapter *d)
+static int m88rs2000_frontend_attach(struct dvb_usb_adapter *adap)
{
- u8 obuf[] = { 0x51 };
- u8 ibuf[] = { 0 };
+ struct dvb_usb_device *d = adap->dev;
+ struct dw2102_state *state = d->priv;
+
+ mutex_lock(&d->data_mutex);
- if (dvb_usb_generic_rw(d->dev, obuf, 1, ibuf, 1, 0) < 0)
+ state->data[0] = 0x51;
+
+ if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
err("command 0x51 transfer failed.");
- d->fe_adap[0].fe = dvb_attach(m88rs2000_attach, &s421_m88rs2000_config,
- &d->dev->i2c_adap);
+ mutex_unlock(&d->data_mutex);
- if (d->fe_adap[0].fe == NULL)
+ adap->fe_adap[0].fe = dvb_attach(m88rs2000_attach,
+ &s421_m88rs2000_config,
+ &d->i2c_adap);
+
+ if (adap->fe_adap[0].fe == NULL)
return -EIO;
- if (dvb_attach(ts2020_attach, d->fe_adap[0].fe,
+ if (dvb_attach(ts2020_attach, adap->fe_adap[0].fe,
&dw2104_ts2020_config,
- &d->dev->i2c_adap)) {
+ &d->i2c_adap)) {
info("Attached RS2000/TS2020!");
return 0;
}
@@ -1439,44 +1479,50 @@ static int tt_s2_4600_frontend_attach(struct dvb_usb_adapter *adap)
{
struct dvb_usb_device *d = adap->dev;
struct dw2102_state *state = d->priv;
- u8 obuf[3] = { 0xe, 0x80, 0 };
- u8 ibuf[] = { 0 };
struct i2c_adapter *i2c_adapter;
struct i2c_client *client;
struct i2c_board_info board_info;
struct m88ds3103_platform_data m88ds3103_pdata = {};
struct ts2020_config ts2020_config = {};
- if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
+ mutex_lock(&d->data_mutex);
+
+ state->data[0] = 0xe;
+ state->data[1] = 0x80;
+ state->data[2] = 0x0;
+
+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
err("command 0x0e transfer failed.");
- obuf[0] = 0xe;
- obuf[1] = 0x02;
- obuf[2] = 1;
+ state->data[0] = 0xe;
+ state->data[1] = 0x02;
+ state->data[2] = 1;
- if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
err("command 0x0e transfer failed.");
msleep(300);
- obuf[0] = 0xe;
- obuf[1] = 0x83;
- obuf[2] = 0;
+ state->data[0] = 0xe;
+ state->data[1] = 0x83;
+ state->data[2] = 0;
- if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
err("command 0x0e transfer failed.");
- obuf[0] = 0xe;
- obuf[1] = 0x83;
- obuf[2] = 1;
+ state->data[0] = 0xe;
+ state->data[1] = 0x83;
+ state->data[2] = 1;
- if (dvb_usb_generic_rw(d, obuf, 3, ibuf, 1, 0) < 0)
+ if (dvb_usb_generic_rw(d, state->data, 3, state->data, 1, 0) < 0)
err("command 0x0e transfer failed.");
- obuf[0] = 0x51;
+ state->data[0] = 0x51;
- if (dvb_usb_generic_rw(d, obuf, 1, ibuf, 1, 0) < 0)
+ if (dvb_usb_generic_rw(d, state->data, 1, state->data, 1, 0) < 0)
err("command 0x51 transfer failed.");
+ mutex_unlock(&d->data_mutex);
+
/* attach demod */
m88ds3103_pdata.clk = 27000000;
m88ds3103_pdata.i2c_wr_max = 33;
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c
index 6fb773dbcd0c..93be82fc338a 100644
--- a/drivers/misc/sgi-gru/grufault.c
+++ b/drivers/misc/sgi-gru/grufault.c
@@ -219,15 +219,20 @@ static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr,
int write, unsigned long *paddr, int *pageshift)
{
pgd_t *pgdp;
- pmd_t *pmdp;
+ p4d_t *p4dp;
pud_t *pudp;
+ pmd_t *pmdp;
pte_t pte;
pgdp = pgd_offset(vma->vm_mm, vaddr);
if (unlikely(pgd_none(*pgdp)))
goto err;
- pudp = pud_offset(pgdp, vaddr);
+ p4dp = p4d_offset(pgdp, vaddr);
+ if (unlikely(p4d_none(*p4dp)))
+ goto err;
+
+ pudp = pud_offset(p4dp, vaddr);
if (unlikely(pud_none(*pudp)))
goto err;
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 1ae872bfc3ba..747645c74134 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -186,7 +186,7 @@ static inline int write_enable(struct spi_nor *nor)
}
/*
- * Send write disble instruction to the chip.
+ * Send write disable instruction to the chip.
*/
static inline int write_disable(struct spi_nor *nor)
{
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 248f60d171a5..ffea9859f5a7 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -2272,10 +2272,7 @@ static int xgbe_one_poll(struct napi_struct *napi, int budget)
processed = xgbe_rx_poll(channel, budget);
/* If we processed everything, we are done */
- if (processed < budget) {
- /* Turn off polling */
- napi_complete_done(napi, processed);
-
+ if ((processed < budget) && napi_complete_done(napi, processed)) {
/* Enable Tx and Rx interrupts */
if (pdata->channel_irq_mode)
xgbe_enable_rx_tx_int(pdata, channel);
@@ -2317,10 +2314,7 @@ static int xgbe_all_poll(struct napi_struct *napi, int budget)
} while ((processed < budget) && (processed != last_processed));
/* If we processed everything, we are done */
- if (processed < budget) {
- /* Turn off polling */
- napi_complete_done(napi, processed);
-
+ if ((processed < budget) && napi_complete_done(napi, processed)) {
/* Enable Tx and Rx interrupts */
xgbe_enable_rx_tx_ints(pdata);
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 581de71a958a..4c6c882c6a1c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -213,9 +213,9 @@ void aq_pci_func_free_irqs(struct aq_pci_func_s *self)
if (!((1U << i) & self->msix_entry_mask))
continue;
- free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]);
if (pdev->msix_enabled)
irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL);
+ free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]);
self->msix_entry_mask &= ~(1U << i);
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index d8d06fdfc42b..ac76fc251d26 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13292,17 +13292,15 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
- /* VF with OLD Hypervisor or old PF do not support filtering */
if (IS_PF(bp)) {
if (chip_is_e1x)
bp->accept_any_vlan = true;
else
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-#ifdef CONFIG_BNX2X_SRIOV
- } else if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
- dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-#endif
}
+ /* For VF we'll know whether to enable VLAN filtering after
+ * getting a response to CHANNEL_TLV_ACQUIRE from PF.
+ */
dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
dev->features |= NETIF_F_HIGHDMA;
@@ -13738,7 +13736,7 @@ static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
if (!netif_running(bp->dev)) {
DP(BNX2X_MSG_PTP,
"PTP adjfreq called while the interface is down\n");
- return -EFAULT;
+ return -ENETDOWN;
}
if (ppb < 0) {
@@ -13797,6 +13795,12 @@ static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
+ if (!netif_running(bp->dev)) {
+ DP(BNX2X_MSG_PTP,
+ "PTP adjtime called while the interface is down\n");
+ return -ENETDOWN;
+ }
+
DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
timecounter_adjtime(&bp->timecounter, delta);
@@ -13809,6 +13813,12 @@ static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
u64 ns;
+ if (!netif_running(bp->dev)) {
+ DP(BNX2X_MSG_PTP,
+ "PTP gettime called while the interface is down\n");
+ return -ENETDOWN;
+ }
+
ns = timecounter_read(&bp->timecounter);
DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
@@ -13824,6 +13834,12 @@ static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
u64 ns;
+ if (!netif_running(bp->dev)) {
+ DP(BNX2X_MSG_PTP,
+ "PTP settime called while the interface is down\n");
+ return -ENETDOWN;
+ }
+
ns = timespec64_to_ns(ts);
DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
@@ -13991,6 +14007,14 @@ static int bnx2x_init_one(struct pci_dev *pdev,
rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
if (rc)
goto init_one_freemem;
+
+#ifdef CONFIG_BNX2X_SRIOV
+ /* VF with OLD Hypervisor or old PF do not support filtering */
+ if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ }
+#endif
}
/* Enable SRIOV if capability found in configuration space */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 6fad22adbbb9..bdfd53b46bc5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -434,7 +434,9 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
/* Add/Remove the filter */
rc = bnx2x_config_vlan_mac(bp, &ramrod);
- if (rc && rc != -EEXIST) {
+ if (rc == -EEXIST)
+ return 0;
+ if (rc) {
BNX2X_ERR("Failed to %s %s\n",
filter->add ? "add" : "delete",
(filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?
@@ -444,6 +446,8 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
return rc;
}
+ filter->applied = true;
+
return 0;
}
@@ -469,8 +473,10 @@ int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* Rollback if needed */
if (i != filters->count) {
BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
- i, filters->count + 1);
+ i, filters->count);
while (--i >= 0) {
+ if (!filters->filters[i].applied)
+ continue;
filters->filters[i].add = !filters->filters[i].add;
bnx2x_vf_mac_vlan_config(bp, vf, qid,
&filters->filters[i],
@@ -1899,7 +1905,8 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
continue;
}
- DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid);
+ DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
+ "add addresses for vf %d\n", vf->abs_vfid);
for_each_vfq(vf, j) {
struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
@@ -1920,11 +1927,12 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
cpu_to_le32(U64_HI(q_stats_addr));
cur_query_entry->address.lo =
cpu_to_le32(U64_LO(q_stats_addr));
- DP(BNX2X_MSG_IOV,
- "added address %x %x for vf %d queue %d client %d\n",
- cur_query_entry->address.hi,
- cur_query_entry->address.lo, cur_query_entry->funcID,
- j, cur_query_entry->index);
+ DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
+ "added address %x %x for vf %d queue %d client %d\n",
+ cur_query_entry->address.hi,
+ cur_query_entry->address.lo,
+ cur_query_entry->funcID,
+ j, cur_query_entry->index);
cur_query_entry++;
cur_data_offset += sizeof(struct per_queue_stats);
stats_count++;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 7a6d406f4c11..888d0b6632e8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -114,6 +114,7 @@ struct bnx2x_vf_mac_vlan_filter {
(BNX2X_VF_FILTER_MAC | BNX2X_VF_FILTER_VLAN) /*shortcut*/
bool add;
+ bool applied;
u8 *mac;
u16 vid;
};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index bfae300cf25f..76a4668c50fe 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -868,7 +868,7 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
struct bnx2x *bp = netdev_priv(dev);
struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
- int rc, i = 0;
+ int rc = 0, i = 0;
struct netdev_hw_addr *ha;
if (bp->state != BNX2X_STATE_OPEN) {
@@ -883,6 +883,15 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
/* Get Rx mode requested */
DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
+ /* We support PFVF_MAX_MULTICAST_PER_VF mcast addresses tops */
+ if (netdev_mc_count(dev) > PFVF_MAX_MULTICAST_PER_VF) {
+ DP(NETIF_MSG_IFUP,
+ "VF supports not more than %d multicast MAC addresses\n",
+ PFVF_MAX_MULTICAST_PER_VF);
+ rc = -EINVAL;
+ goto out;
+ }
+
netdev_for_each_mc_addr(ha, dev) {
DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
bnx2x_mc_addr(ha));
@@ -890,16 +899,6 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
i++;
}
- /* We support four PFVF_MAX_MULTICAST_PER_VF mcast
- * addresses tops
- */
- if (i >= PFVF_MAX_MULTICAST_PER_VF) {
- DP(NETIF_MSG_IFUP,
- "VF supports not more than %d multicast MAC addresses\n",
- PFVF_MAX_MULTICAST_PER_VF);
- return -EINVAL;
- }
-
req->n_multicast = i;
req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
req->vf_qid = 0;
@@ -924,7 +923,7 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
out:
bnx2x_vfpf_finalize(bp, &req->first_tlv);
- return 0;
+ return rc;
}
/* request pf to add a vlan for the vf */
@@ -1778,6 +1777,23 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
goto op_err;
}
+ /* build vlan list */
+ fl = NULL;
+
+ rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
+ VFPF_VLAN_FILTER);
+ if (rc)
+ goto op_err;
+
+ if (fl) {
+ /* set vlan list */
+ rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
+ msg->vf_qid,
+ false);
+ if (rc)
+ goto op_err;
+ }
+
}
if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 235733e91c79..32de4589d16a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -4465,6 +4465,10 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
}
#endif
+ if (BNXT_PF(bp) && (le16_to_cpu(resp->flags) &
+ FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED))
+ bp->flags |= BNXT_FLAG_FW_LLDP_AGENT;
+
switch (resp->port_partition_type) {
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
@@ -5507,8 +5511,9 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
}
- link_info->support_auto_speeds =
- le16_to_cpu(resp->supported_speeds_auto_mode);
+ if (resp->supported_speeds_auto_mode)
+ link_info->support_auto_speeds =
+ le16_to_cpu(resp->supported_speeds_auto_mode);
hwrm_phy_qcaps_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -6495,8 +6500,14 @@ static void bnxt_reset_task(struct bnxt *bp, bool silent)
if (!silent)
bnxt_dbg_dump_states(bp);
if (netif_running(bp->dev)) {
+ int rc;
+
+ if (!silent)
+ bnxt_ulp_stop(bp);
bnxt_close_nic(bp, false, false);
- bnxt_open_nic(bp, false, false);
+ rc = bnxt_open_nic(bp, false, false);
+ if (!silent && !rc)
+ bnxt_ulp_start(bp);
}
}
@@ -7444,6 +7455,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
+ rc = bnxt_hwrm_func_reset(bp);
+ if (rc)
+ goto init_err_pci_clean;
+
bnxt_hwrm_fw_set_time(bp);
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
@@ -7554,10 +7569,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_pci_clean;
- rc = bnxt_hwrm_func_reset(bp);
- if (rc)
- goto init_err_pci_clean;
-
rc = bnxt_init_int_mode(bp);
if (rc)
goto init_err_pci_clean;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index faf26a2f726b..c7a5b84a5cb2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -993,6 +993,7 @@ struct bnxt {
BNXT_FLAG_ROCEV2_CAP)
#define BNXT_FLAG_NO_AGG_RINGS 0x20000
#define BNXT_FLAG_RX_PAGE_MODE 0x40000
+ #define BNXT_FLAG_FW_LLDP_AGENT 0x80000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index fdf2d8caf7bf..03532061d211 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -474,7 +474,7 @@ void bnxt_dcb_init(struct bnxt *bp)
return;
bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE;
- if (BNXT_PF(bp))
+ if (BNXT_PF(bp) && !(bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
bp->dcbx_cap |= DCB_CAP_DCBX_HOST;
else
bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index f92896835d2a..69015fa50f20 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1,7 +1,7 @@
/*
* Broadcom GENET (Gigabit Ethernet) controller driver
*
- * Copyright (c) 2014 Broadcom Corporation
+ * Copyright (c) 2014-2017 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -450,6 +450,22 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
genet_dma_ring_regs[r]);
}
+static int bcmgenet_begin(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ /* Turn on the clock */
+ return clk_prepare_enable(priv->clk);
+}
+
+static void bcmgenet_complete(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ /* Turn off the clock */
+ clk_disable_unprepare(priv->clk);
+}
+
static int bcmgenet_get_link_ksettings(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
@@ -778,8 +794,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
/* Misc UniMAC counters */
STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
- UMAC_RBUF_OVFL_CNT),
- STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
+ UMAC_RBUF_OVFL_CNT_V1),
+ STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt,
+ UMAC_RBUF_ERR_CNT_V1),
STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
@@ -821,6 +838,45 @@ static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
}
}
+static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset)
+{
+ u16 new_offset;
+ u32 val;
+
+ switch (offset) {
+ case UMAC_RBUF_OVFL_CNT_V1:
+ if (GENET_IS_V2(priv))
+ new_offset = RBUF_OVFL_CNT_V2;
+ else
+ new_offset = RBUF_OVFL_CNT_V3PLUS;
+
+ val = bcmgenet_rbuf_readl(priv, new_offset);
+ /* clear if overflowed */
+ if (val == ~0)
+ bcmgenet_rbuf_writel(priv, 0, new_offset);
+ break;
+ case UMAC_RBUF_ERR_CNT_V1:
+ if (GENET_IS_V2(priv))
+ new_offset = RBUF_ERR_CNT_V2;
+ else
+ new_offset = RBUF_ERR_CNT_V3PLUS;
+
+ val = bcmgenet_rbuf_readl(priv, new_offset);
+ /* clear if overflowed */
+ if (val == ~0)
+ bcmgenet_rbuf_writel(priv, 0, new_offset);
+ break;
+ default:
+ val = bcmgenet_umac_readl(priv, offset);
+ /* clear if overflowed */
+ if (val == ~0)
+ bcmgenet_umac_writel(priv, 0, offset);
+ break;
+ }
+
+ return val;
+}
+
static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
{
int i, j = 0;
@@ -836,19 +892,28 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
case BCMGENET_STAT_NETDEV:
case BCMGENET_STAT_SOFT:
continue;
- case BCMGENET_STAT_MIB_RX:
- case BCMGENET_STAT_MIB_TX:
case BCMGENET_STAT_RUNT:
- if (s->type != BCMGENET_STAT_MIB_RX)
- offset = BCMGENET_STAT_OFFSET;
+ offset += BCMGENET_STAT_OFFSET;
+ /* fall through */
+ case BCMGENET_STAT_MIB_TX:
+ offset += BCMGENET_STAT_OFFSET;
+ /* fall through */
+ case BCMGENET_STAT_MIB_RX:
val = bcmgenet_umac_readl(priv,
UMAC_MIB_START + j + offset);
+ offset = 0; /* Reset Offset */
break;
case BCMGENET_STAT_MISC:
- val = bcmgenet_umac_readl(priv, s->reg_offset);
- /* clear if overflowed */
- if (val == ~0)
- bcmgenet_umac_writel(priv, 0, s->reg_offset);
+ if (GENET_IS_V1(priv)) {
+ val = bcmgenet_umac_readl(priv, s->reg_offset);
+ /* clear if overflowed */
+ if (val == ~0)
+ bcmgenet_umac_writel(priv, 0,
+ s->reg_offset);
+ } else {
+ val = bcmgenet_update_stat_misc(priv,
+ s->reg_offset);
+ }
break;
}
@@ -973,6 +1038,8 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
/* standard ethtool support functions. */
static const struct ethtool_ops bcmgenet_ethtool_ops = {
+ .begin = bcmgenet_begin,
+ .complete = bcmgenet_complete,
.get_strings = bcmgenet_get_strings,
.get_sset_count = bcmgenet_get_sset_count,
.get_ethtool_stats = bcmgenet_get_ethtool_stats,
@@ -1167,7 +1234,6 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
struct enet_cb *tx_cb_ptr;
- struct netdev_queue *txq;
unsigned int pkts_compl = 0;
unsigned int bytes_compl = 0;
unsigned int c_index;
@@ -1219,13 +1285,8 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
dev->stats.tx_packets += pkts_compl;
dev->stats.tx_bytes += bytes_compl;
- txq = netdev_get_tx_queue(dev, ring->queue);
- netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
-
- if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
- if (netif_tx_queue_stopped(txq))
- netif_tx_wake_queue(txq);
- }
+ netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
+ pkts_compl, bytes_compl);
return pkts_compl;
}
@@ -1248,8 +1309,16 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
struct bcmgenet_tx_ring *ring =
container_of(napi, struct bcmgenet_tx_ring, napi);
unsigned int work_done = 0;
+ struct netdev_queue *txq;
+ unsigned long flags;
- work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
+ spin_lock_irqsave(&ring->lock, flags);
+ work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
+ if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
+ txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
+ netif_tx_wake_queue(txq);
+ }
+ spin_unlock_irqrestore(&ring->lock, flags);
if (work_done == 0) {
napi_complete(napi);
@@ -2457,24 +2526,28 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
/* Interrupt bottom half */
static void bcmgenet_irq_task(struct work_struct *work)
{
+ unsigned long flags;
+ unsigned int status;
struct bcmgenet_priv *priv = container_of(
work, struct bcmgenet_priv, bcmgenet_irq_work);
netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
- if (priv->irq0_stat & UMAC_IRQ_MPD_R) {
- priv->irq0_stat &= ~UMAC_IRQ_MPD_R;
+ spin_lock_irqsave(&priv->lock, flags);
+ status = priv->irq0_stat;
+ priv->irq0_stat = 0;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (status & UMAC_IRQ_MPD_R) {
netif_dbg(priv, wol, priv->dev,
"magic packet detected, waking up\n");
bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
}
/* Link UP/DOWN event */
- if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) {
+ if (status & UMAC_IRQ_LINK_EVENT)
phy_mac_interrupt(priv->phydev,
- !!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
- priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
- }
+ !!(status & UMAC_IRQ_LINK_UP));
}
/* bcmgenet_isr1: handle Rx and Tx priority queues */
@@ -2483,22 +2556,21 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
struct bcmgenet_priv *priv = dev_id;
struct bcmgenet_rx_ring *rx_ring;
struct bcmgenet_tx_ring *tx_ring;
- unsigned int index;
+ unsigned int index, status;
- /* Save irq status for bottom-half processing. */
- priv->irq1_stat =
- bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
+ /* Read irq status */
+ status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
/* clear interrupts */
- bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
+ bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR);
netif_dbg(priv, intr, priv->dev,
- "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
+ "%s: IRQ=0x%x\n", __func__, status);
/* Check Rx priority queue interrupts */
for (index = 0; index < priv->hw_params->rx_queues; index++) {
- if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
+ if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
continue;
rx_ring = &priv->rx_rings[index];
@@ -2511,7 +2583,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
/* Check Tx priority queue interrupts */
for (index = 0; index < priv->hw_params->tx_queues; index++) {
- if (!(priv->irq1_stat & BIT(index)))
+ if (!(status & BIT(index)))
continue;
tx_ring = &priv->tx_rings[index];
@@ -2531,19 +2603,20 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
struct bcmgenet_priv *priv = dev_id;
struct bcmgenet_rx_ring *rx_ring;
struct bcmgenet_tx_ring *tx_ring;
+ unsigned int status;
+ unsigned long flags;
- /* Save irq status for bottom-half processing. */
- priv->irq0_stat =
- bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
+ /* Read irq status */
+ status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
/* clear interrupts */
- bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
+ bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR);
netif_dbg(priv, intr, priv->dev,
- "IRQ=0x%x\n", priv->irq0_stat);
+ "IRQ=0x%x\n", status);
- if (priv->irq0_stat & UMAC_IRQ_RXDMA_DONE) {
+ if (status & UMAC_IRQ_RXDMA_DONE) {
rx_ring = &priv->rx_rings[DESC_INDEX];
if (likely(napi_schedule_prep(&rx_ring->napi))) {
@@ -2552,7 +2625,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
}
}
- if (priv->irq0_stat & UMAC_IRQ_TXDMA_DONE) {
+ if (status & UMAC_IRQ_TXDMA_DONE) {
tx_ring = &priv->tx_rings[DESC_INDEX];
if (likely(napi_schedule_prep(&tx_ring->napi))) {
@@ -2561,22 +2634,23 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
}
}
- if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
- UMAC_IRQ_PHY_DET_F |
- UMAC_IRQ_LINK_EVENT |
- UMAC_IRQ_HFB_SM |
- UMAC_IRQ_HFB_MM |
- UMAC_IRQ_MPD_R)) {
- /* all other interested interrupts handled in bottom half */
- schedule_work(&priv->bcmgenet_irq_work);
- }
-
if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
- priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
- priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
+ status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
wake_up(&priv->wq);
}
+ /* all other interested interrupts handled in bottom half */
+ status &= (UMAC_IRQ_LINK_EVENT |
+ UMAC_IRQ_MPD_R);
+ if (status) {
+ /* Save irq status for bottom-half processing. */
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->irq0_stat |= status;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ schedule_work(&priv->bcmgenet_irq_work);
+ }
+
return IRQ_HANDLED;
}
@@ -2801,6 +2875,8 @@ err_irq0:
err_fini_dma:
bcmgenet_fini_dma(priv);
err_clk_disable:
+ if (priv->internal_phy)
+ bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
clk_disable_unprepare(priv->clk);
return ret;
}
@@ -3177,6 +3253,12 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
*/
gphy_rev = reg & 0xffff;
+ /* This is reserved so should require special treatment */
+ if (gphy_rev == 0 || gphy_rev == 0x01ff) {
+ pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
+ return;
+ }
+
/* This is the good old scheme, just GPHY major, no minor nor patch */
if ((gphy_rev & 0xf0) != 0)
priv->gphy_rev = gphy_rev << 8;
@@ -3185,12 +3267,6 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
else if ((gphy_rev & 0xff00) != 0)
priv->gphy_rev = gphy_rev;
- /* This is reserved so should require special treatment */
- else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
- pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
- return;
- }
-
#ifdef CONFIG_PHYS_ADDR_T_64BIT
if (!(params->flags & GENET_HAS_40BITS))
pr_warn("GENET does not support 40-bits PA\n");
@@ -3233,6 +3309,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
const void *macaddr;
struct resource *r;
int err = -EIO;
+ const char *phy_mode_str;
/* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
@@ -3276,6 +3353,8 @@ static int bcmgenet_probe(struct platform_device *pdev)
goto err;
}
+ spin_lock_init(&priv->lock);
+
SET_NETDEV_DEV(dev, &pdev->dev);
dev_set_drvdata(&pdev->dev, dev);
ether_addr_copy(dev->dev_addr, macaddr);
@@ -3338,6 +3417,13 @@ static int bcmgenet_probe(struct platform_device *pdev)
priv->clk_eee = NULL;
}
+ /* If this is an internal GPHY, power it on now, before UniMAC is
+ * brought out of reset as absolutely no UniMAC activity is allowed
+ */
+ if (dn && !of_property_read_string(dn, "phy-mode", &phy_mode_str) &&
+ !strcasecmp(phy_mode_str, "internal"))
+ bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+
err = reset_umac(priv);
if (err)
goto err_clk_disable;
@@ -3502,6 +3588,8 @@ static int bcmgenet_resume(struct device *d)
return 0;
out_clk_disable:
+ if (priv->internal_phy)
+ bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
clk_disable_unprepare(priv->clk);
return ret;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 1e2dc34d331a..db7f289d65ae 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 Broadcom Corporation
+ * Copyright (c) 2014-2017 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -214,7 +214,9 @@ struct bcmgenet_mib_counters {
#define MDIO_REG_SHIFT 16
#define MDIO_REG_MASK 0x1F
-#define UMAC_RBUF_OVFL_CNT 0x61C
+#define UMAC_RBUF_OVFL_CNT_V1 0x61C
+#define RBUF_OVFL_CNT_V2 0x80
+#define RBUF_OVFL_CNT_V3PLUS 0x94
#define UMAC_MPD_CTRL 0x620
#define MPD_EN (1 << 0)
@@ -224,7 +226,9 @@ struct bcmgenet_mib_counters {
#define UMAC_MPD_PW_MS 0x624
#define UMAC_MPD_PW_LS 0x628
-#define UMAC_RBUF_ERR_CNT 0x634
+#define UMAC_RBUF_ERR_CNT_V1 0x634
+#define RBUF_ERR_CNT_V2 0x84
+#define RBUF_ERR_CNT_V3PLUS 0x98
#define UMAC_MDF_ERR_CNT 0x638
#define UMAC_MDF_CTRL 0x650
#define UMAC_MDF_ADDR 0x654
@@ -619,11 +623,13 @@ struct bcmgenet_priv {
struct work_struct bcmgenet_irq_work;
int irq0;
int irq1;
- unsigned int irq0_stat;
- unsigned int irq1_stat;
int wol_irq;
bool wol_irq_disabled;
+ /* shared status */
+ spinlock_t lock;
+ unsigned int irq0_stat;
+
/* HW descriptors/checksum variables */
bool desc_64b_en;
bool desc_rxchk_en;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index be9c0e3f5ade..92f46b1375c3 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -152,7 +152,7 @@ struct octnic_gather {
*/
struct octeon_sg_entry *sg;
- u64 sg_dma_ptr;
+ dma_addr_t sg_dma_ptr;
};
struct handshake {
@@ -734,6 +734,9 @@ static void delete_glists(struct lio *lio)
struct octnic_gather *g;
int i;
+ kfree(lio->glist_lock);
+ lio->glist_lock = NULL;
+
if (!lio->glist)
return;
@@ -741,23 +744,26 @@ static void delete_glists(struct lio *lio)
do {
g = (struct octnic_gather *)
list_delete_head(&lio->glist[i]);
- if (g) {
- if (g->sg) {
- dma_unmap_single(&lio->oct_dev->
- pci_dev->dev,
- g->sg_dma_ptr,
- g->sg_size,
- DMA_TO_DEVICE);
- kfree((void *)((unsigned long)g->sg -
- g->adjust));
- }
+ if (g)
kfree(g);
- }
} while (g);
+
+ if (lio->glists_virt_base && lio->glists_virt_base[i]) {
+ lio_dma_free(lio->oct_dev,
+ lio->glist_entry_size * lio->tx_qsize,
+ lio->glists_virt_base[i],
+ lio->glists_dma_base[i]);
+ }
}
- kfree((void *)lio->glist);
- kfree((void *)lio->glist_lock);
+ kfree(lio->glists_virt_base);
+ lio->glists_virt_base = NULL;
+
+ kfree(lio->glists_dma_base);
+ lio->glists_dma_base = NULL;
+
+ kfree(lio->glist);
+ lio->glist = NULL;
}
/**
@@ -772,13 +778,30 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),
GFP_KERNEL);
if (!lio->glist_lock)
- return 1;
+ return -ENOMEM;
lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),
GFP_KERNEL);
if (!lio->glist) {
- kfree((void *)lio->glist_lock);
- return 1;
+ kfree(lio->glist_lock);
+ lio->glist_lock = NULL;
+ return -ENOMEM;
+ }
+
+ lio->glist_entry_size =
+ ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
+
+ /* allocate memory to store virtual and dma base address of
+ * per glist consistent memory
+ */
+ lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
+ GFP_KERNEL);
+ lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
+ GFP_KERNEL);
+
+ if (!lio->glists_virt_base || !lio->glists_dma_base) {
+ delete_glists(lio);
+ return -ENOMEM;
}
for (i = 0; i < num_iqs; i++) {
@@ -788,6 +811,16 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
INIT_LIST_HEAD(&lio->glist[i]);
+ lio->glists_virt_base[i] =
+ lio_dma_alloc(oct,
+ lio->glist_entry_size * lio->tx_qsize,
+ &lio->glists_dma_base[i]);
+
+ if (!lio->glists_virt_base[i]) {
+ delete_glists(lio);
+ return -ENOMEM;
+ }
+
for (j = 0; j < lio->tx_qsize; j++) {
g = kzalloc_node(sizeof(*g), GFP_KERNEL,
numa_node);
@@ -796,43 +829,18 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
if (!g)
break;
- g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) *
- OCT_SG_ENTRY_SIZE);
+ g->sg = lio->glists_virt_base[i] +
+ (j * lio->glist_entry_size);
- g->sg = kmalloc_node(g->sg_size + 8,
- GFP_KERNEL, numa_node);
- if (!g->sg)
- g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
- if (!g->sg) {
- kfree(g);
- break;
- }
-
- /* The gather component should be aligned on 64-bit
- * boundary
- */
- if (((unsigned long)g->sg) & 7) {
- g->adjust = 8 - (((unsigned long)g->sg) & 7);
- g->sg = (struct octeon_sg_entry *)
- ((unsigned long)g->sg + g->adjust);
- }
- g->sg_dma_ptr = dma_map_single(&oct->pci_dev->dev,
- g->sg, g->sg_size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&oct->pci_dev->dev,
- g->sg_dma_ptr)) {
- kfree((void *)((unsigned long)g->sg -
- g->adjust));
- kfree(g);
- break;
- }
+ g->sg_dma_ptr = lio->glists_dma_base[i] +
+ (j * lio->glist_entry_size);
list_add_tail(&g->list, &lio->glist[i]);
}
if (j != lio->tx_qsize) {
delete_glists(lio);
- return 1;
+ return -ENOMEM;
}
}
@@ -1885,9 +1893,6 @@ static void free_netsgbuf(void *buf)
i++;
}
- dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
- g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
-
iq = skb_iq(lio, skb);
spin_lock(&lio->glist_lock[iq]);
list_add_tail(&g->list, &lio->glist[iq]);
@@ -1933,9 +1938,6 @@ static void free_netsgbuf_with_resp(void *buf)
i++;
}
- dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
- g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
-
iq = skb_iq(lio, skb);
spin_lock(&lio->glist_lock[iq]);
@@ -3273,8 +3275,6 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
i++;
}
- dma_sync_single_for_device(&oct->pci_dev->dev, g->sg_dma_ptr,
- g->sg_size, DMA_TO_DEVICE);
dptr = g->sg_dma_ptr;
if (OCTEON_CN23XX_PF(oct))
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index 9d5e03502c76..7b83be4ce1fe 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -108,6 +108,8 @@ struct octnic_gather {
* received from the IP layer.
*/
struct octeon_sg_entry *sg;
+
+ dma_addr_t sg_dma_ptr;
};
struct octeon_device_priv {
@@ -490,6 +492,9 @@ static void delete_glists(struct lio *lio)
struct octnic_gather *g;
int i;
+ kfree(lio->glist_lock);
+ lio->glist_lock = NULL;
+
if (!lio->glist)
return;
@@ -497,17 +502,26 @@ static void delete_glists(struct lio *lio)
do {
g = (struct octnic_gather *)
list_delete_head(&lio->glist[i]);
- if (g) {
- if (g->sg)
- kfree((void *)((unsigned long)g->sg -
- g->adjust));
+ if (g)
kfree(g);
- }
} while (g);
+
+ if (lio->glists_virt_base && lio->glists_virt_base[i]) {
+ lio_dma_free(lio->oct_dev,
+ lio->glist_entry_size * lio->tx_qsize,
+ lio->glists_virt_base[i],
+ lio->glists_dma_base[i]);
+ }
}
+ kfree(lio->glists_virt_base);
+ lio->glists_virt_base = NULL;
+
+ kfree(lio->glists_dma_base);
+ lio->glists_dma_base = NULL;
+
kfree(lio->glist);
- kfree(lio->glist_lock);
+ lio->glist = NULL;
}
/**
@@ -522,13 +536,30 @@ static int setup_glists(struct lio *lio, int num_iqs)
lio->glist_lock =
kzalloc(sizeof(*lio->glist_lock) * num_iqs, GFP_KERNEL);
if (!lio->glist_lock)
- return 1;
+ return -ENOMEM;
lio->glist =
kzalloc(sizeof(*lio->glist) * num_iqs, GFP_KERNEL);
if (!lio->glist) {
kfree(lio->glist_lock);
- return 1;
+ lio->glist_lock = NULL;
+ return -ENOMEM;
+ }
+
+ lio->glist_entry_size =
+ ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
+
+ /* allocate memory to store virtual and dma base address of
+ * per glist consistent memory
+ */
+ lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
+ GFP_KERNEL);
+ lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
+ GFP_KERNEL);
+
+ if (!lio->glists_virt_base || !lio->glists_dma_base) {
+ delete_glists(lio);
+ return -ENOMEM;
}
for (i = 0; i < num_iqs; i++) {
@@ -536,34 +567,33 @@ static int setup_glists(struct lio *lio, int num_iqs)
INIT_LIST_HEAD(&lio->glist[i]);
+ lio->glists_virt_base[i] =
+ lio_dma_alloc(lio->oct_dev,
+ lio->glist_entry_size * lio->tx_qsize,
+ &lio->glists_dma_base[i]);
+
+ if (!lio->glists_virt_base[i]) {
+ delete_glists(lio);
+ return -ENOMEM;
+ }
+
for (j = 0; j < lio->tx_qsize; j++) {
g = kzalloc(sizeof(*g), GFP_KERNEL);
if (!g)
break;
- g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) *
- OCT_SG_ENTRY_SIZE);
+ g->sg = lio->glists_virt_base[i] +
+ (j * lio->glist_entry_size);
- g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
- if (!g->sg) {
- kfree(g);
- break;
- }
+ g->sg_dma_ptr = lio->glists_dma_base[i] +
+ (j * lio->glist_entry_size);
- /* The gather component should be aligned on 64-bit
- * boundary
- */
- if (((unsigned long)g->sg) & 7) {
- g->adjust = 8 - (((unsigned long)g->sg) & 7);
- g->sg = (struct octeon_sg_entry *)
- ((unsigned long)g->sg + g->adjust);
- }
list_add_tail(&g->list, &lio->glist[i]);
}
if (j != lio->tx_qsize) {
delete_glists(lio);
- return 1;
+ return -ENOMEM;
}
}
@@ -1324,10 +1354,6 @@ static void free_netsgbuf(void *buf)
i++;
}
- dma_unmap_single(&lio->oct_dev->pci_dev->dev,
- finfo->dptr, g->sg_size,
- DMA_TO_DEVICE);
-
iq = skb_iq(lio, skb);
spin_lock(&lio->glist_lock[iq]);
@@ -1374,10 +1400,6 @@ static void free_netsgbuf_with_resp(void *buf)
i++;
}
- dma_unmap_single(&lio->oct_dev->pci_dev->dev,
- finfo->dptr, g->sg_size,
- DMA_TO_DEVICE);
-
iq = skb_iq(lio, skb);
spin_lock(&lio->glist_lock[iq]);
@@ -2382,23 +2404,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
i++;
}
- dptr = dma_map_single(&oct->pci_dev->dev,
- g->sg, g->sg_size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
- dev_err(&oct->pci_dev->dev, "%s DMA mapping error 4\n",
- __func__);
- dma_unmap_single(&oct->pci_dev->dev, g->sg[0].ptr[0],
- skb->len - skb->data_len,
- DMA_TO_DEVICE);
- for (j = 1; j <= frags; j++) {
- frag = &skb_shinfo(skb)->frags[j - 1];
- dma_unmap_page(&oct->pci_dev->dev,
- g->sg[j >> 2].ptr[j & 3],
- frag->size, DMA_TO_DEVICE);
- }
- return NETDEV_TX_BUSY;
- }
+ dptr = g->sg_dma_ptr;
ndata.cmd.cmd3.dptr = dptr;
finfo->dptr = dptr;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
index b3dc2e9651a8..d29ebc531151 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
@@ -71,17 +71,17 @@
#define CN23XX_MAX_RINGS_PER_VF 8
#define CN23XX_MAX_INPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
-#define CN23XX_MAX_IQ_DESCRIPTORS 2048
+#define CN23XX_MAX_IQ_DESCRIPTORS 512
#define CN23XX_DB_MIN 1
#define CN23XX_DB_MAX 8
#define CN23XX_DB_TIMEOUT 1
#define CN23XX_MAX_OUTPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
-#define CN23XX_MAX_OQ_DESCRIPTORS 2048
+#define CN23XX_MAX_OQ_DESCRIPTORS 512
#define CN23XX_OQ_BUF_SIZE 1536
#define CN23XX_OQ_PKTSPER_INTR 128
/*#define CAVIUM_ONLY_CN23XX_RX_PERF*/
-#define CN23XX_OQ_REFIL_THRESHOLD 128
+#define CN23XX_OQ_REFIL_THRESHOLD 16
#define CN23XX_OQ_INTR_PKT 64
#define CN23XX_OQ_INTR_TIME 100
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index 0be87d119a97..79f809479af6 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -155,11 +155,6 @@ octeon_droq_destroy_ring_buffers(struct octeon_device *oct,
recv_buffer_destroy(droq->recv_buf_list[i].buffer,
pg_info);
- if (droq->desc_ring && droq->desc_ring[i].info_ptr)
- lio_unmap_ring_info(oct->pci_dev,
- (u64)droq->
- desc_ring[i].info_ptr,
- OCT_DROQ_INFO_SIZE);
droq->recv_buf_list[i].buffer = NULL;
}
@@ -211,10 +206,7 @@ int octeon_delete_droq(struct octeon_device *oct, u32 q_no)
vfree(droq->recv_buf_list);
if (droq->info_base_addr)
- cnnic_free_aligned_dma(oct->pci_dev, droq->info_list,
- droq->info_alloc_size,
- droq->info_base_addr,
- droq->info_list_dma);
+ lio_free_info_buffer(oct, droq);
if (droq->desc_ring)
lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
@@ -294,12 +286,7 @@ int octeon_init_droq(struct octeon_device *oct,
dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no,
droq->max_count);
- droq->info_list =
- cnnic_numa_alloc_aligned_dma((droq->max_count *
- OCT_DROQ_INFO_SIZE),
- &droq->info_alloc_size,
- &droq->info_base_addr,
- numa_node);
+ droq->info_list = lio_alloc_info_buffer(oct, droq);
if (!droq->info_list) {
dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n");
lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
index e62074090681..6982c0af5ecc 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
@@ -325,10 +325,10 @@ struct octeon_droq {
size_t desc_ring_dma;
/** Info ptr list are allocated at this virtual address. */
- size_t info_base_addr;
+ void *info_base_addr;
/** DMA mapped address of the info list */
- size_t info_list_dma;
+ dma_addr_t info_list_dma;
/** Allocated size of info list. */
u32 info_alloc_size;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
index aa36e9ae7676..bed9ef17bc26 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
@@ -140,48 +140,6 @@ err_release_region:
return 1;
}
-static inline void *
-cnnic_numa_alloc_aligned_dma(u32 size,
- u32 *alloc_size,
- size_t *orig_ptr,
- int numa_node)
-{
- int retries = 0;
- void *ptr = NULL;
-
-#define OCTEON_MAX_ALLOC_RETRIES 1
- do {
- struct page *page = NULL;
-
- page = alloc_pages_node(numa_node,
- GFP_KERNEL,
- get_order(size));
- if (!page)
- page = alloc_pages(GFP_KERNEL,
- get_order(size));
- ptr = (void *)page_address(page);
- if ((unsigned long)ptr & 0x07) {
- __free_pages(page, get_order(size));
- ptr = NULL;
- /* Increment the size required if the first
- * attempt failed.
- */
- if (!retries)
- size += 7;
- }
- retries++;
- } while ((retries <= OCTEON_MAX_ALLOC_RETRIES) && !ptr);
-
- *alloc_size = size;
- *orig_ptr = (unsigned long)ptr;
- if ((unsigned long)ptr & 0x07)
- ptr = (void *)(((unsigned long)ptr + 7) & ~(7UL));
- return ptr;
-}
-
-#define cnnic_free_aligned_dma(pci_dev, ptr, size, orig_ptr, dma_addr) \
- free_pages(orig_ptr, get_order(size))
-
static inline int
sleep_cond(wait_queue_head_t *wait_queue, int *condition)
{
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
index 6bb89419006e..eef2a1e8a7e3 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -62,6 +62,9 @@ struct lio {
/** Array of gather component linked lists */
struct list_head *glist;
+ void **glists_virt_base;
+ dma_addr_t *glists_dma_base;
+ u32 glist_entry_size;
/** Pointer to the NIC properties for the Octeon device this network
* interface is associated with.
@@ -344,6 +347,29 @@ static inline void tx_buffer_free(void *buffer)
#define lio_dma_free(oct, size, virt_addr, dma_addr) \
dma_free_coherent(&(oct)->pci_dev->dev, size, virt_addr, dma_addr)
+static inline void *
+lio_alloc_info_buffer(struct octeon_device *oct,
+ struct octeon_droq *droq)
+{
+ void *virt_ptr;
+
+ virt_ptr = lio_dma_alloc(oct, (droq->max_count * OCT_DROQ_INFO_SIZE),
+ &droq->info_list_dma);
+ if (virt_ptr) {
+ droq->info_alloc_size = droq->max_count * OCT_DROQ_INFO_SIZE;
+ droq->info_base_addr = virt_ptr;
+ }
+
+ return virt_ptr;
+}
+
+static inline void lio_free_info_buffer(struct octeon_device *oct,
+ struct octeon_droq *droq)
+{
+ lio_dma_free(oct, droq->info_alloc_size, droq->info_base_addr,
+ droq->info_list_dma);
+}
+
static inline
void *get_rbd(struct sk_buff *skb)
{
@@ -359,22 +385,7 @@ void *get_rbd(struct sk_buff *skb)
static inline u64
lio_map_ring_info(struct octeon_droq *droq, u32 i)
{
- dma_addr_t dma_addr;
- struct octeon_device *oct = droq->oct_dev;
-
- dma_addr = dma_map_single(&oct->pci_dev->dev, &droq->info_list[i],
- OCT_DROQ_INFO_SIZE, DMA_FROM_DEVICE);
-
- WARN_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr));
-
- return (u64)dma_addr;
-}
-
-static inline void
-lio_unmap_ring_info(struct pci_dev *pci_dev,
- u64 info_ptr, u32 size)
-{
- dma_unmap_single(&pci_dev->dev, info_ptr, size, DMA_FROM_DEVICE);
+ return droq->info_list_dma + (i * sizeof(struct octeon_droq_info));
}
static inline u64
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index e739c7153562..2269ff562d95 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -269,6 +269,7 @@ struct nicvf {
#define MAX_QUEUES_PER_QSET 8
struct queue_set *qs;
struct nicvf_cq_poll *napi[8];
+ void *iommu_domain;
u8 vf_id;
u8 sqs_id;
bool sqs_mode;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 6feaa24bcfd4..24017588f531 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -16,6 +16,7 @@
#include <linux/log2.h>
#include <linux/prefetch.h>
#include <linux/irq.h>
+#include <linux/iommu.h>
#include "nic_reg.h"
#include "nic.h"
@@ -525,7 +526,12 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
/* Get actual TSO descriptors and free them */
tso_sqe =
(struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
+ nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
+ tso_sqe->subdesc_cnt);
nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1);
+ } else {
+ nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr,
+ hdr->subdesc_cnt);
}
nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
prefetch(skb);
@@ -576,6 +582,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
{
struct sk_buff *skb;
struct nicvf *nic = netdev_priv(netdev);
+ struct nicvf *snic = nic;
int err = 0;
int rq_idx;
@@ -592,7 +599,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
if (err && !cqe_rx->rb_cnt)
return;
- skb = nicvf_get_rcv_skb(nic, cqe_rx);
+ skb = nicvf_get_rcv_skb(snic, cqe_rx);
if (!skb) {
netdev_dbg(nic->netdev, "Packet not received\n");
return;
@@ -1643,6 +1650,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!pass1_silicon(nic->pdev))
nic->hw_tso = true;
+ /* Get iommu domain for iova to physical addr conversion */
+ nic->iommu_domain = iommu_get_domain_for_dev(dev);
+
pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
if (sdevid == 0xA134)
nic->t88 = true;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index ac0390be3b12..f13289f0d238 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -10,6 +10,7 @@
#include <linux/netdevice.h>
#include <linux/ip.h>
#include <linux/etherdevice.h>
+#include <linux/iommu.h>
#include <net/ip.h>
#include <net/tso.h>
@@ -18,6 +19,16 @@
#include "q_struct.h"
#include "nicvf_queues.h"
+#define NICVF_PAGE_ORDER ((PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0)
+
+static inline u64 nicvf_iova_to_phys(struct nicvf *nic, dma_addr_t dma_addr)
+{
+ /* Translation is installed only when IOMMU is present */
+ if (nic->iommu_domain)
+ return iommu_iova_to_phys(nic->iommu_domain, dma_addr);
+ return dma_addr;
+}
+
static void nicvf_get_page(struct nicvf *nic)
{
if (!nic->rb_pageref || !nic->rb_page)
@@ -87,7 +98,7 @@ static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
u32 buf_len, u64 **rbuf)
{
- int order = (PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0;
+ int order = NICVF_PAGE_ORDER;
/* Check if request can be accomodated in previous allocated page */
if (nic->rb_page &&
@@ -97,22 +108,27 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
}
nicvf_get_page(nic);
- nic->rb_page = NULL;
/* Allocate a new page */
+ nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
+ order);
if (!nic->rb_page) {
- nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
- order);
- if (!nic->rb_page) {
- this_cpu_inc(nic->pnicvf->drv_stats->
- rcv_buffer_alloc_failures);
- return -ENOMEM;
- }
- nic->rb_page_offset = 0;
+ this_cpu_inc(nic->pnicvf->drv_stats->rcv_buffer_alloc_failures);
+ return -ENOMEM;
}
-
+ nic->rb_page_offset = 0;
ret:
- *rbuf = (u64 *)((u64)page_address(nic->rb_page) + nic->rb_page_offset);
+ /* HW will ensure data coherency, CPU sync not required */
+ *rbuf = (u64 *)((u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page,
+ nic->rb_page_offset, buf_len,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC));
+ if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) {
+ if (!nic->rb_page_offset)
+ __free_pages(nic->rb_page, order);
+ nic->rb_page = NULL;
+ return -ENOMEM;
+ }
nic->rb_page_offset += buf_len;
return 0;
@@ -158,16 +174,21 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
rbdr->dma_size = buf_size;
rbdr->enable = true;
rbdr->thresh = RBDR_THRESH;
+ rbdr->head = 0;
+ rbdr->tail = 0;
nic->rb_page = NULL;
for (idx = 0; idx < ring_len; idx++) {
err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN,
&rbuf);
- if (err)
+ if (err) {
+ /* To free already allocated and mapped ones */
+ rbdr->tail = idx - 1;
return err;
+ }
desc = GET_RBDR_DESC(rbdr, idx);
- desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
+ desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN;
}
nicvf_get_page(nic);
@@ -179,7 +200,7 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
{
int head, tail;
- u64 buf_addr;
+ u64 buf_addr, phys_addr;
struct rbdr_entry_t *desc;
if (!rbdr)
@@ -192,18 +213,26 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
head = rbdr->head;
tail = rbdr->tail;
- /* Free SKBs */
+ /* Release page references */
while (head != tail) {
desc = GET_RBDR_DESC(rbdr, head);
- buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
- put_page(virt_to_page(phys_to_virt(buf_addr)));
+ buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN;
+ phys_addr = nicvf_iova_to_phys(nic, buf_addr);
+ dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ if (phys_addr)
+ put_page(virt_to_page(phys_to_virt(phys_addr)));
head++;
head &= (rbdr->dmem.q_len - 1);
}
- /* Free SKB of tail desc */
+ /* Release buffer of tail desc */
desc = GET_RBDR_DESC(rbdr, tail);
- buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
- put_page(virt_to_page(phys_to_virt(buf_addr)));
+ buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN;
+ phys_addr = nicvf_iova_to_phys(nic, buf_addr);
+ dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ if (phys_addr)
+ put_page(virt_to_page(phys_to_virt(phys_addr)));
/* Free RBDR ring */
nicvf_free_q_desc_mem(nic, &rbdr->dmem);
@@ -250,7 +279,7 @@ refill:
break;
desc = GET_RBDR_DESC(rbdr, tail);
- desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
+ desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN;
refill_rb_cnt--;
new_rb++;
}
@@ -361,9 +390,29 @@ static int nicvf_init_snd_queue(struct nicvf *nic,
return 0;
}
+void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
+ int hdr_sqe, u8 subdesc_cnt)
+{
+ u8 idx;
+ struct sq_gather_subdesc *gather;
+
+ /* Unmap DMA mapped skb data buffers */
+ for (idx = 0; idx < subdesc_cnt; idx++) {
+ hdr_sqe++;
+ hdr_sqe &= (sq->dmem.q_len - 1);
+ gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, hdr_sqe);
+ /* HW will ensure data coherency, CPU sync not required */
+ dma_unmap_page_attrs(&nic->pdev->dev, gather->addr,
+ gather->size, DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ }
+}
+
static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
{
struct sk_buff *skb;
+ struct sq_hdr_subdesc *hdr;
+ struct sq_hdr_subdesc *tso_sqe;
if (!sq)
return;
@@ -379,8 +428,22 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
smp_rmb();
while (sq->head != sq->tail) {
skb = (struct sk_buff *)sq->skbuff[sq->head];
- if (skb)
- dev_kfree_skb_any(skb);
+ if (!skb)
+ goto next;
+ hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
+ /* Check for dummy descriptor used for HW TSO offload on 88xx */
+ if (hdr->dont_send) {
+ /* Get actual TSO descriptors and unmap them */
+ tso_sqe =
+ (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
+ nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
+ tso_sqe->subdesc_cnt);
+ } else {
+ nicvf_unmap_sndq_buffers(nic, sq, sq->head,
+ hdr->subdesc_cnt);
+ }
+ dev_kfree_skb_any(skb);
+next:
sq->head++;
sq->head &= (sq->dmem.q_len - 1);
}
@@ -559,9 +622,11 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
nicvf_send_msg_to_pf(nic, &mbx);
if (!nic->sqs_mode && (qidx == 0)) {
- /* Enable checking L3/L4 length and TCP/UDP checksums */
+ /* Enable checking L3/L4 length and TCP/UDP checksums
+ * Also allow IPv6 pkts with zero UDP checksum.
+ */
nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0,
- (BIT(24) | BIT(23) | BIT(21)));
+ (BIT(24) | BIT(23) | BIT(21) | BIT(20)));
nicvf_config_vlan_stripping(nic, nic->netdev->features);
}
@@ -882,6 +947,14 @@ static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
return qentry;
}
+/* Rollback to previous tail pointer when descriptors not used */
+static inline void nicvf_rollback_sq_desc(struct snd_queue *sq,
+ int qentry, int desc_cnt)
+{
+ sq->tail = qentry;
+ atomic_add(desc_cnt, &sq->free_cnt);
+}
+
/* Free descriptor back to SQ for future use */
void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
{
@@ -1207,8 +1280,9 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
struct sk_buff *skb, u8 sq_num)
{
int i, size;
- int subdesc_cnt, tso_sqe = 0;
+ int subdesc_cnt, hdr_sqe = 0;
int qentry;
+ u64 dma_addr;
subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
if (subdesc_cnt > atomic_read(&sq->free_cnt))
@@ -1223,12 +1297,21 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
/* Add SQ header subdesc */
nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
skb, skb->len);
- tso_sqe = qentry;
+ hdr_sqe = qentry;
/* Add SQ gather subdescs */
qentry = nicvf_get_nxt_sqentry(sq, qentry);
size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
- nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data));
+ /* HW will ensure data coherency, CPU sync not required */
+ dma_addr = dma_map_page_attrs(&nic->pdev->dev, virt_to_page(skb->data),
+ offset_in_page(skb->data), size,
+ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
+ nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
+ return 0;
+ }
+
+ nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
/* Check for scattered buffer */
if (!skb_is_nonlinear(skb))
@@ -1241,15 +1324,26 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
qentry = nicvf_get_nxt_sqentry(sq, qentry);
size = skb_frag_size(frag);
- nicvf_sq_add_gather_subdesc(sq, qentry, size,
- virt_to_phys(
- skb_frag_address(frag)));
+ dma_addr = dma_map_page_attrs(&nic->pdev->dev,
+ skb_frag_page(frag),
+ frag->page_offset, size,
+ DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
+ /* Free entire chain of mapped buffers
+ * here 'i' = frags mapped + above mapped skb->data
+ */
+ nicvf_unmap_sndq_buffers(nic, sq, hdr_sqe, i);
+ nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
+ return 0;
+ }
+ nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
}
doorbell:
if (nic->t88 && skb_shinfo(skb)->gso_size) {
qentry = nicvf_get_nxt_sqentry(sq, qentry);
- nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb);
+ nicvf_sq_add_cqe_subdesc(sq, qentry, hdr_sqe, skb);
}
nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt);
@@ -1282,6 +1376,7 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
int offset;
u16 *rb_lens = NULL;
u64 *rb_ptrs = NULL;
+ u64 phys_addr;
rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
/* Except 88xx pass1 on all other chips CQE_RX2_S is added to
@@ -1296,15 +1391,23 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
else
rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64));
- netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n",
- __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
-
for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
payload_len = rb_lens[frag_num(frag)];
+ phys_addr = nicvf_iova_to_phys(nic, *rb_ptrs);
+ if (!phys_addr) {
+ if (skb)
+ dev_kfree_skb_any(skb);
+ return NULL;
+ }
+
if (!frag) {
/* First fragment */
+ dma_unmap_page_attrs(&nic->pdev->dev,
+ *rb_ptrs - cqe_rx->align_pad,
+ RCV_FRAG_LEN, DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
skb = nicvf_rb_ptr_to_skb(nic,
- *rb_ptrs - cqe_rx->align_pad,
+ phys_addr - cqe_rx->align_pad,
payload_len);
if (!skb)
return NULL;
@@ -1312,8 +1415,11 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
skb_put(skb, payload_len);
} else {
/* Add fragments */
- page = virt_to_page(phys_to_virt(*rb_ptrs));
- offset = phys_to_virt(*rb_ptrs) - page_address(page);
+ dma_unmap_page_attrs(&nic->pdev->dev, *rb_ptrs,
+ RCV_FRAG_LEN, DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ page = virt_to_page(phys_to_virt(phys_addr));
+ offset = phys_to_virt(phys_addr) - page_address(page);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
offset, payload_len, RCV_FRAG_LEN);
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 5cb84da99a2d..10cb4b84625b 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -87,7 +87,7 @@
#define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13))
#define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13))
#define RBDR_THRESH (RCV_BUF_COUNT / 2)
-#define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */
+#define DMA_BUFFER_LEN 1536 /* In multiples of 128bytes */
#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
@@ -301,6 +301,8 @@ struct queue_set {
#define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
+void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
+ int hdr_sqe, u8 subdesc_cnt);
void nicvf_config_vlan_stripping(struct nicvf *nic,
netdev_features_t features);
int nicvf_set_qset_resources(struct nicvf *nic);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 4c8e8cf730bb..64a1095e4d14 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -123,14 +123,44 @@ static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero)
return 1;
}
+static int max_bgx_per_node;
+static void set_max_bgx_per_node(struct pci_dev *pdev)
+{
+ u16 sdevid;
+
+ if (max_bgx_per_node)
+ return;
+
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
+ switch (sdevid) {
+ case PCI_SUBSYS_DEVID_81XX_BGX:
+ max_bgx_per_node = MAX_BGX_PER_CN81XX;
+ break;
+ case PCI_SUBSYS_DEVID_83XX_BGX:
+ max_bgx_per_node = MAX_BGX_PER_CN83XX;
+ break;
+ case PCI_SUBSYS_DEVID_88XX_BGX:
+ default:
+ max_bgx_per_node = MAX_BGX_PER_CN88XX;
+ break;
+ }
+}
+
+static struct bgx *get_bgx(int node, int bgx_idx)
+{
+ int idx = (node * max_bgx_per_node) + bgx_idx;
+
+ return bgx_vnic[idx];
+}
+
/* Return number of BGX present in HW */
unsigned bgx_get_map(int node)
{
int i;
unsigned map = 0;
- for (i = 0; i < MAX_BGX_PER_NODE; i++) {
- if (bgx_vnic[(node * MAX_BGX_PER_NODE) + i])
+ for (i = 0; i < max_bgx_per_node; i++) {
+ if (bgx_vnic[(node * max_bgx_per_node) + i])
map |= (1 << i);
}
@@ -143,7 +173,7 @@ int bgx_get_lmac_count(int node, int bgx_idx)
{
struct bgx *bgx;
- bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+ bgx = get_bgx(node, bgx_idx);
if (bgx)
return bgx->lmac_count;
@@ -158,7 +188,7 @@ void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
struct bgx *bgx;
struct lmac *lmac;
- bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+ bgx = get_bgx(node, bgx_idx);
if (!bgx)
return;
@@ -172,7 +202,7 @@ EXPORT_SYMBOL(bgx_get_lmac_link_state);
const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
{
- struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+ struct bgx *bgx = get_bgx(node, bgx_idx);
if (bgx)
return bgx->lmac[lmacid].mac;
@@ -183,7 +213,7 @@ EXPORT_SYMBOL(bgx_get_lmac_mac);
void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
{
- struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+ struct bgx *bgx = get_bgx(node, bgx_idx);
if (!bgx)
return;
@@ -194,7 +224,7 @@ EXPORT_SYMBOL(bgx_set_lmac_mac);
void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
{
- struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+ struct bgx *bgx = get_bgx(node, bgx_idx);
struct lmac *lmac;
u64 cfg;
@@ -217,7 +247,7 @@ EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause)
{
struct pfc *pfc = (struct pfc *)pause;
- struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ struct bgx *bgx = get_bgx(node, bgx_idx);
struct lmac *lmac;
u64 cfg;
@@ -237,7 +267,7 @@ EXPORT_SYMBOL(bgx_lmac_get_pfc);
void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause)
{
struct pfc *pfc = (struct pfc *)pause;
- struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ struct bgx *bgx = get_bgx(node, bgx_idx);
struct lmac *lmac;
u64 cfg;
@@ -369,7 +399,7 @@ u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
{
struct bgx *bgx;
- bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+ bgx = get_bgx(node, bgx_idx);
if (!bgx)
return 0;
@@ -383,7 +413,7 @@ u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
{
struct bgx *bgx;
- bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+ bgx = get_bgx(node, bgx_idx);
if (!bgx)
return 0;
@@ -411,7 +441,7 @@ void bgx_lmac_internal_loopback(int node, int bgx_idx,
struct lmac *lmac;
u64 cfg;
- bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+ bgx = get_bgx(node, bgx_idx);
if (!bgx)
return;
@@ -1011,12 +1041,6 @@ static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid)
dev_info(dev, "%s: 40G_KR4\n", (char *)str);
break;
case BGX_MODE_QSGMII:
- if ((lmacid == 0) &&
- (bgx_get_lane2sds_cfg(bgx, lmac) != lmacid))
- return;
- if ((lmacid == 2) &&
- (bgx_get_lane2sds_cfg(bgx, lmac) == lmacid))
- return;
dev_info(dev, "%s: QSGMII\n", (char *)str);
break;
case BGX_MODE_RGMII:
@@ -1334,11 +1358,13 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_release_regions;
}
+ set_max_bgx_per_node(pdev);
+
pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid);
if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) {
bgx->bgx_id = (pci_resource_start(pdev,
PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK;
- bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE;
+ bgx->bgx_id += nic_get_node_id(pdev) * max_bgx_per_node;
bgx->max_lmac = MAX_LMAC_PER_BGX;
bgx_vnic[bgx->bgx_id] = bgx;
} else {
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index a60f189429bb..c5080f2cead5 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -22,7 +22,6 @@
#define MAX_BGX_PER_CN88XX 2
#define MAX_BGX_PER_CN81XX 3 /* 2 BGXs + 1 RGX */
#define MAX_BGX_PER_CN83XX 4
-#define MAX_BGX_PER_NODE 4
#define MAX_LMAC_PER_BGX 4
#define MAX_BGX_CHANS_PER_LMAC 16
#define MAX_DMAC_PER_LMAC 8
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 275c2e2349ad..c44036d5761a 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2589,8 +2589,6 @@ static int emac_dt_mdio_probe(struct emac_instance *dev)
static int emac_dt_phy_connect(struct emac_instance *dev,
struct device_node *phy_handle)
{
- int res;
-
dev->phy.def = devm_kzalloc(&dev->ofdev->dev, sizeof(*dev->phy.def),
GFP_KERNEL);
if (!dev->phy.def)
@@ -2617,7 +2615,7 @@ static int emac_dt_phy_probe(struct emac_instance *dev)
{
struct device_node *np = dev->ofdev->dev.of_node;
struct device_node *phy_handle;
- int res = 0;
+ int res = 1;
phy_handle = of_parse_phandle(np, "phy-handle", 0);
@@ -2714,13 +2712,24 @@ static int emac_init_phy(struct emac_instance *dev)
if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
int res = emac_dt_phy_probe(dev);
- mutex_unlock(&emac_phy_map_lock);
- if (!res)
+ switch (res) {
+ case 1:
+ /* No phy-handle property configured.
+ * Continue with the existing phy probe
+ * and setup code.
+ */
+ break;
+
+ case 0:
+ mutex_unlock(&emac_phy_map_lock);
goto init_phy;
- dev_err(&dev->ofdev->dev, "failed to attach dt phy (%d).\n",
- res);
- return res;
+ default:
+ mutex_unlock(&emac_phy_map_lock);
+ dev_err(&dev->ofdev->dev, "failed to attach dt phy (%d).\n",
+ res);
+ return res;
+ }
}
if (dev->phy_address != 0xffffffff)
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 9198e6bd5160..5f11b4dc95d2 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -404,7 +404,7 @@ static int ibmvnic_open(struct net_device *netdev)
send_map_query(adapter);
for (i = 0; i < rxadd_subcrqs; i++) {
init_rx_pool(adapter, &adapter->rx_pool[i],
- IBMVNIC_BUFFS_PER_POOL, i,
+ adapter->req_rx_add_entries_per_subcrq, i,
be64_to_cpu(size_array[i]), 1);
if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) {
dev_err(dev, "Couldn't alloc rx pool\n");
@@ -419,23 +419,23 @@ static int ibmvnic_open(struct net_device *netdev)
for (i = 0; i < tx_subcrqs; i++) {
tx_pool = &adapter->tx_pool[i];
tx_pool->tx_buff =
- kcalloc(adapter->max_tx_entries_per_subcrq,
+ kcalloc(adapter->req_tx_entries_per_subcrq,
sizeof(struct ibmvnic_tx_buff), GFP_KERNEL);
if (!tx_pool->tx_buff)
goto tx_pool_alloc_failed;
if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
- adapter->max_tx_entries_per_subcrq *
+ adapter->req_tx_entries_per_subcrq *
adapter->req_mtu))
goto tx_ltb_alloc_failed;
tx_pool->free_map =
- kcalloc(adapter->max_tx_entries_per_subcrq,
+ kcalloc(adapter->req_tx_entries_per_subcrq,
sizeof(int), GFP_KERNEL);
if (!tx_pool->free_map)
goto tx_fm_alloc_failed;
- for (j = 0; j < adapter->max_tx_entries_per_subcrq; j++)
+ for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
tx_pool->free_map[j] = j;
tx_pool->consumer_index = 0;
@@ -705,6 +705,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_tx_buff *tx_buff = NULL;
+ struct ibmvnic_sub_crq_queue *tx_scrq;
struct ibmvnic_tx_pool *tx_pool;
unsigned int tx_send_failed = 0;
unsigned int tx_map_failed = 0;
@@ -724,6 +725,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
int ret = 0;
tx_pool = &adapter->tx_pool[queue_num];
+ tx_scrq = adapter->tx_scrq[queue_num];
txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
be32_to_cpu(adapter->login_rsp_buf->
@@ -744,7 +746,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_pool->consumer_index =
(tx_pool->consumer_index + 1) %
- adapter->max_tx_entries_per_subcrq;
+ adapter->req_tx_entries_per_subcrq;
tx_buff = &tx_pool->tx_buff[index];
tx_buff->skb = skb;
@@ -817,7 +819,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
if (tx_pool->consumer_index == 0)
tx_pool->consumer_index =
- adapter->max_tx_entries_per_subcrq - 1;
+ adapter->req_tx_entries_per_subcrq - 1;
else
tx_pool->consumer_index--;
@@ -826,6 +828,14 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
ret = NETDEV_TX_BUSY;
goto out;
}
+
+ atomic_inc(&tx_scrq->used);
+
+ if (atomic_read(&tx_scrq->used) >= adapter->req_tx_entries_per_subcrq) {
+ netdev_info(netdev, "Stopping queue %d\n", queue_num);
+ netif_stop_subqueue(netdev, queue_num);
+ }
+
tx_packets++;
tx_bytes += skb->len;
txq->trans_start = jiffies;
@@ -1213,6 +1223,7 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
scrq->adapter = adapter;
scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
scrq->cur = 0;
+ atomic_set(&scrq->used, 0);
scrq->rx_skb_top = NULL;
spin_lock_init(&scrq->lock);
@@ -1355,14 +1366,28 @@ restart_loop:
DMA_TO_DEVICE);
}
- if (txbuff->last_frag)
+ if (txbuff->last_frag) {
+ atomic_dec(&scrq->used);
+
+ if (atomic_read(&scrq->used) <=
+ (adapter->req_tx_entries_per_subcrq / 2) &&
+ netif_subqueue_stopped(adapter->netdev,
+ txbuff->skb)) {
+ netif_wake_subqueue(adapter->netdev,
+ scrq->pool_index);
+ netdev_dbg(adapter->netdev,
+ "Started queue %d\n",
+ scrq->pool_index);
+ }
+
dev_kfree_skb_any(txbuff->skb);
+ }
adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
producer_index] = index;
adapter->tx_pool[pool].producer_index =
(adapter->tx_pool[pool].producer_index + 1) %
- adapter->max_tx_entries_per_subcrq;
+ adapter->req_tx_entries_per_subcrq;
}
/* remove tx_comp scrq*/
next->tx_comp.first = 0;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 422824f1f42a..1993b42666f7 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -863,6 +863,7 @@ struct ibmvnic_sub_crq_queue {
spinlock_t lock;
struct sk_buff *rx_skb_top;
struct ibmvnic_adapter *adapter;
+ atomic_t used;
};
struct ibmvnic_long_term_buff {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index ddb4ca4ff930..117170014e88 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -14,6 +14,7 @@ config MLX5_CORE
config MLX5_CORE_EN
bool "Mellanox Technologies ConnectX-4 Ethernet support"
depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
+ depends on IPV6=y || IPV6=n || MLX5_CORE=m
imply PTP_1588_CLOCK
default n
---help---
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 0523ed47f597..8fa23f6a1f67 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -302,6 +302,9 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_dcbx *dcbx = &priv->dcbx;
+ if (mode & DCB_CAP_DCBX_LLD_MANAGED)
+ return 1;
+
if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) {
if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_AUTO)
return 0;
@@ -315,13 +318,10 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
return 1;
}
- if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
+ if (!(mode & DCB_CAP_DCBX_HOST))
return 1;
- if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
- !(mode & DCB_CAP_DCBX_VER_CEE) ||
- !(mode & DCB_CAP_DCBX_VER_IEEE) ||
- !(mode & DCB_CAP_DCBX_HOST))
+ if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
return 1;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 31e3cb7ee5fe..5621dcfda4f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -204,9 +204,6 @@ mlx5e_test_loopback_validate(struct sk_buff *skb,
struct iphdr *iph;
/* We are only going to peek, no need to clone the SKB */
- if (skb->protocol != htons(ETH_P_IP))
- goto out;
-
if (MLX5E_TEST_PKT_SIZE - ETH_HLEN > skb_headlen(skb))
goto out;
@@ -249,7 +246,7 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
lbtp->loopback_ok = false;
init_completion(&lbtp->comp);
- lbtp->pt.type = htons(ETH_P_ALL);
+ lbtp->pt.type = htons(ETH_P_IP);
lbtp->pt.func = mlx5e_test_loopback_validate;
lbtp->pt.dev = priv->netdev;
lbtp->pt.af_packet_priv = lbtp;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 44406a5ec15d..79481f4cf264 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -48,9 +48,14 @@
#include "eswitch.h"
#include "vxlan.h"
+enum {
+ MLX5E_TC_FLOW_ESWITCH = BIT(0),
+};
+
struct mlx5e_tc_flow {
struct rhash_head node;
u64 cookie;
+ u8 flags;
struct mlx5_flow_handle *rule;
struct list_head encap; /* flows sharing the same encap */
struct mlx5_esw_flow_attr *attr;
@@ -177,7 +182,7 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
mlx5_fc_destroy(priv->mdev, counter);
}
- if (esw && esw->mode == SRIOV_OFFLOADS) {
+ if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
mlx5_eswitch_del_vlan_action(esw, flow->attr);
if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
mlx5e_detach_encap(priv, flow);
@@ -598,6 +603,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
}
static int parse_cls_flower(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow *flow,
struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f)
{
@@ -609,7 +615,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
err = __parse_cls_flower(priv, spec, f, &min_inline);
- if (!err && esw->mode == SRIOV_OFFLOADS &&
+ if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) &&
rep->vport != FDB_UPLINK_VPORT) {
if (min_inline > esw->offloads.inline_mode) {
netdev_warn(priv->netdev,
@@ -1132,23 +1138,19 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
struct tc_cls_flower_offload *f)
{
struct mlx5e_tc_table *tc = &priv->fs.tc;
- int err = 0;
- bool fdb_flow = false;
+ int err, attr_size = 0;
u32 flow_tag, action;
struct mlx5e_tc_flow *flow;
struct mlx5_flow_spec *spec;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ u8 flow_flags = 0;
- if (esw && esw->mode == SRIOV_OFFLOADS)
- fdb_flow = true;
-
- if (fdb_flow)
- flow = kzalloc(sizeof(*flow) +
- sizeof(struct mlx5_esw_flow_attr),
- GFP_KERNEL);
- else
- flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+ if (esw && esw->mode == SRIOV_OFFLOADS) {
+ flow_flags = MLX5E_TC_FLOW_ESWITCH;
+ attr_size = sizeof(struct mlx5_esw_flow_attr);
+ }
+ flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
spec = mlx5_vzalloc(sizeof(*spec));
if (!spec || !flow) {
err = -ENOMEM;
@@ -1156,12 +1158,13 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
}
flow->cookie = f->cookie;
+ flow->flags = flow_flags;
- err = parse_cls_flower(priv, spec, f);
+ err = parse_cls_flower(priv, flow, spec, f);
if (err < 0)
goto err_free;
- if (fdb_flow) {
+ if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1);
err = parse_tc_fdb_actions(priv, f->exts, flow);
if (err < 0)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 2478516a61e2..ded27bb9a3b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1136,7 +1136,7 @@ static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
u32 *match_criteria)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct list_head *prev = ft->node.children.prev;
+ struct list_head *prev = &ft->node.children;
unsigned int candidate_index = 0;
struct mlx5_flow_group *fg;
void *match_criteria_addr;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c4242a4e8130..e2bd600d19de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1352,6 +1352,7 @@ static int init_one(struct pci_dev *pdev,
if (err)
goto clean_load;
+ pci_save_state(pdev);
return 0;
clean_load:
@@ -1407,9 +1408,8 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
mlx5_enter_error_state(dev);
mlx5_unload_one(dev, priv, false);
- /* In case of kernel call save the pci state and drain the health wq */
+ /* In case of kernel call drain the health wq */
if (state) {
- pci_save_state(pdev);
mlx5_drain_health_wq(dev);
mlx5_pci_disable_device(dev);
}
@@ -1461,6 +1461,7 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
pci_set_master(pdev);
pci_restore_state(pdev);
+ pci_save_state(pdev);
if (wait_vital(pdev)) {
dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 0899e2d310e2..d9616daf8a70 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -769,7 +769,7 @@ static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid)
#define MLXSW_REG_SPVM_ID 0x200F
#define MLXSW_REG_SPVM_BASE_LEN 0x04 /* base length, without records */
#define MLXSW_REG_SPVM_REC_LEN 0x04 /* record length */
-#define MLXSW_REG_SPVM_REC_MAX_COUNT 256
+#define MLXSW_REG_SPVM_REC_MAX_COUNT 255
#define MLXSW_REG_SPVM_LEN (MLXSW_REG_SPVM_BASE_LEN + \
MLXSW_REG_SPVM_REC_LEN * MLXSW_REG_SPVM_REC_MAX_COUNT)
@@ -1702,7 +1702,7 @@ static inline void mlxsw_reg_sfmr_pack(char *payload,
#define MLXSW_REG_SPVMLR_ID 0x2020
#define MLXSW_REG_SPVMLR_BASE_LEN 0x04 /* base length, without records */
#define MLXSW_REG_SPVMLR_REC_LEN 0x04 /* record length */
-#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 256
+#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 255
#define MLXSW_REG_SPVMLR_LEN (MLXSW_REG_SPVMLR_BASE_LEN + \
MLXSW_REG_SPVMLR_REC_LEN * \
MLXSW_REG_SPVMLR_REC_MAX_COUNT)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 22ab42925377..ae6cccc666e4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -303,11 +303,11 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
ingress,
MLXSW_SP_ACL_PROFILE_FLOWER);
- if (WARN_ON(IS_ERR(ruleset)))
+ if (IS_ERR(ruleset))
return;
rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
- if (!WARN_ON(!rule)) {
+ if (rule) {
mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index d42d03df751a..7e3a6fed3da6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -422,8 +422,9 @@ static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+ u32 align = elems_per_page * DQ_RANGE_ALIGN;
- p_conn->cid_count = roundup(p_conn->cid_count, elems_per_page);
+ p_conn->cid_count = roundup(p_conn->cid_count, align);
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index e2a081ceaf52..e518f914eab1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -2389,9 +2389,8 @@ qed_chain_alloc_sanity_check(struct qed_dev *cdev,
* size/capacity fields are of a u32 type.
*/
if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 &&
- chain_size > 0x10000) ||
- (cnt_type == QED_CHAIN_CNT_TYPE_U32 &&
- chain_size > 0x100000000ULL)) {
+ chain_size > ((u32)U16_MAX + 1)) ||
+ (cnt_type == QED_CHAIN_CNT_TYPE_U32 && chain_size > U32_MAX)) {
DP_NOTICE(cdev,
"The actual chain size (0x%llx) is larger than the maximal possible value\n",
chain_size);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index 3a44d6b395fa..098766f7fe88 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -190,6 +190,9 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
+ p_init->ooo_enable = p_params->ooo_enable;
+ p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] +
+ p_params->ll2_ooo_queue_id;
p_init->func_params.log_page_size = p_params->log_page_size;
val = p_params->num_tasks;
p_init->func_params.num_tasks = cpu_to_le16(val);
@@ -786,6 +789,23 @@ static void qed_iscsi_release_connection(struct qed_hwfn *p_hwfn,
spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
}
+void qed_iscsi_free_connection(struct qed_hwfn *p_hwfn,
+ struct qed_iscsi_conn *p_conn)
+{
+ qed_chain_free(p_hwfn->cdev, &p_conn->xhq);
+ qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
+ qed_chain_free(p_hwfn->cdev, &p_conn->r2tq);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct tcp_upload_params),
+ p_conn->tcp_upload_params_virt_addr,
+ p_conn->tcp_upload_params_phys_addr);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(struct scsi_terminate_extra_params),
+ p_conn->queue_cnts_virt_addr,
+ p_conn->queue_cnts_phys_addr);
+ kfree(p_conn);
+}
+
struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_iscsi_info *p_iscsi_info;
@@ -807,6 +827,17 @@ void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
void qed_iscsi_free(struct qed_hwfn *p_hwfn,
struct qed_iscsi_info *p_iscsi_info)
{
+ struct qed_iscsi_conn *p_conn = NULL;
+
+ while (!list_empty(&p_hwfn->p_iscsi_info->free_list)) {
+ p_conn = list_first_entry(&p_hwfn->p_iscsi_info->free_list,
+ struct qed_iscsi_conn, list_entry);
+ if (p_conn) {
+ list_del(&p_conn->list_entry);
+ qed_iscsi_free_connection(p_hwfn, p_conn);
+ }
+ }
+
kfree(p_iscsi_info);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 9a0b9af10a57..0d3cef409c96 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -211,6 +211,8 @@ static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
/* If need to reuse or there's no replacement buffer, repost this */
if (rc)
goto out_post;
+ dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
+ cdev->ll2->rx_size, DMA_FROM_DEVICE);
skb = build_skb(buffer->data, 0);
if (!skb) {
@@ -474,7 +476,7 @@ qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn,
union core_rx_cqe_union *p_cqe,
- unsigned long lock_flags,
+ unsigned long *p_lock_flags,
bool b_last_cqe)
{
struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
@@ -495,10 +497,10 @@ static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
"Mismatch between active_descq and the LL2 Rx chain\n");
list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
- spin_unlock_irqrestore(&p_rx->lock, lock_flags);
+ spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
- spin_lock_irqsave(&p_rx->lock, lock_flags);
+ spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
return 0;
}
@@ -538,7 +540,8 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
break;
case CORE_RX_CQE_TYPE_REGULAR:
rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
- cqe, flags, b_last_cqe);
+ cqe, &flags,
+ b_last_cqe);
break;
default:
rc = -EIO;
@@ -968,7 +971,7 @@ static int qed_ll2_start_ooo(struct qed_dev *cdev,
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
- struct qed_ll2_conn ll2_info;
+ struct qed_ll2_conn ll2_info = { 0 };
int rc;
ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
index 7d731c6cb892..378afce58b3f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
@@ -159,6 +159,8 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
if (!p_ooo_info->ooo_history.p_cqes)
goto no_history_mem;
+ p_ooo_info->ooo_history.num_of_cqes = QED_MAX_NUM_OOO_HISTORY_ENTRIES;
+
return p_ooo_info;
no_history_mem:
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 6d31f92ef2b6..84ac50f92c9c 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -1162,8 +1162,8 @@ struct ob_mac_tso_iocb_rsp {
struct ib_mac_iocb_rsp {
u8 opcode; /* 0x20 */
u8 flags1;
-#define IB_MAC_IOCB_RSP_OI 0x01 /* Overide intr delay */
-#define IB_MAC_IOCB_RSP_I 0x02 /* Disble Intr Generation */
+#define IB_MAC_IOCB_RSP_OI 0x01 /* Override intr delay */
+#define IB_MAC_IOCB_RSP_I 0x02 /* Disable Intr Generation */
#define IB_MAC_CSUM_ERR_MASK 0x1c /* A mask to use for csum errs */
#define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */
#define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 65077c77082a..91e9bd7159ab 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -1535,32 +1535,33 @@ static int smc_close(struct net_device *dev)
* Ethtool support
*/
static int
-smc_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+smc_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct smc_local *lp = netdev_priv(dev);
int ret;
- cmd->maxtxpkt = 1;
- cmd->maxrxpkt = 1;
-
if (lp->phy_type != 0) {
spin_lock_irq(&lp->lock);
- ret = mii_ethtool_gset(&lp->mii, cmd);
+ ret = mii_ethtool_get_link_ksettings(&lp->mii, cmd);
spin_unlock_irq(&lp->lock);
} else {
- cmd->supported = SUPPORTED_10baseT_Half |
+ u32 supported = SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_TP | SUPPORTED_AUI;
if (lp->ctl_rspeed == 10)
- ethtool_cmd_speed_set(cmd, SPEED_10);
+ cmd->base.speed = SPEED_10;
else if (lp->ctl_rspeed == 100)
- ethtool_cmd_speed_set(cmd, SPEED_100);
+ cmd->base.speed = SPEED_100;
+
+ cmd->base.autoneg = AUTONEG_DISABLE;
+ cmd->base.port = 0;
+ cmd->base.duplex = lp->tcr_cur_mode & TCR_SWFDUP ?
+ DUPLEX_FULL : DUPLEX_HALF;
- cmd->autoneg = AUTONEG_DISABLE;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->port = 0;
- cmd->duplex = lp->tcr_cur_mode & TCR_SWFDUP ? DUPLEX_FULL : DUPLEX_HALF;
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.supported, supported);
ret = 0;
}
@@ -1569,24 +1570,26 @@ smc_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
}
static int
-smc_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+smc_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct smc_local *lp = netdev_priv(dev);
int ret;
if (lp->phy_type != 0) {
spin_lock_irq(&lp->lock);
- ret = mii_ethtool_sset(&lp->mii, cmd);
+ ret = mii_ethtool_set_link_ksettings(&lp->mii, cmd);
spin_unlock_irq(&lp->lock);
} else {
- if (cmd->autoneg != AUTONEG_DISABLE ||
- cmd->speed != SPEED_10 ||
- (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) ||
- (cmd->port != PORT_TP && cmd->port != PORT_AUI))
+ if (cmd->base.autoneg != AUTONEG_DISABLE ||
+ cmd->base.speed != SPEED_10 ||
+ (cmd->base.duplex != DUPLEX_HALF &&
+ cmd->base.duplex != DUPLEX_FULL) ||
+ (cmd->base.port != PORT_TP && cmd->base.port != PORT_AUI))
return -EINVAL;
-// lp->port = cmd->port;
- lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL;
+// lp->port = cmd->base.port;
+ lp->ctl_rfduplx = cmd->base.duplex == DUPLEX_FULL;
// if (netif_running(dev))
// smc_set_port(dev);
@@ -1744,8 +1747,6 @@ static int smc_ethtool_seteeprom(struct net_device *dev,
static const struct ethtool_ops smc_ethtool_ops = {
- .get_settings = smc_ethtool_getsettings,
- .set_settings = smc_ethtool_setsettings,
.get_drvinfo = smc_ethtool_getdrvinfo,
.get_msglevel = smc_ethtool_getmsglevel,
@@ -1755,6 +1756,8 @@ static const struct ethtool_ops smc_ethtool_ops = {
.get_eeprom_len = smc_ethtool_geteeprom_len,
.get_eeprom = smc_ethtool_geteeprom,
.set_eeprom = smc_ethtool_seteeprom,
+ .get_link_ksettings = smc_ethtool_get_link_ksettings,
+ .set_link_ksettings = smc_ethtool_set_link_ksettings,
};
static const struct net_device_ops smc_netdev_ops = {
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index d3e73ac158ae..f9f3dba7a588 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -700,6 +700,8 @@ struct net_device_context {
u32 tx_checksum_mask;
+ u32 tx_send_table[VRSS_SEND_TAB_SIZE];
+
/* Ethtool settings */
u8 duplex;
u32 speed;
@@ -757,7 +759,6 @@ struct netvsc_device {
struct nvsp_message revoke_packet;
- u32 send_table[VRSS_SEND_TAB_SIZE];
u32 max_chn;
u32 num_chn;
spinlock_t sc_lock; /* Protects num_sc_offered variable */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index d35ebd993b38..4c1d8cca247b 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1136,15 +1136,11 @@ static void netvsc_receive(struct net_device *ndev,
static void netvsc_send_table(struct hv_device *hdev,
struct nvsp_message *nvmsg)
{
- struct netvsc_device *nvscdev;
struct net_device *ndev = hv_get_drvdata(hdev);
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
int i;
u32 count, *tab;
- nvscdev = get_outbound_net_device(hdev);
- if (!nvscdev)
- return;
-
count = nvmsg->msg.v5_msg.send_table.count;
if (count != VRSS_SEND_TAB_SIZE) {
netdev_err(ndev, "Received wrong send-table size:%u\n", count);
@@ -1155,7 +1151,7 @@ static void netvsc_send_table(struct hv_device *hdev,
nvmsg->msg.v5_msg.send_table.offset);
for (i = 0; i < count; i++)
- nvscdev->send_table[i] = tab[i];
+ net_device_ctx->tx_send_table[i] = tab[i];
}
static void netvsc_send_vf(struct net_device_context *net_device_ctx,
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index bc05c895d958..5ede87f30463 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -206,17 +206,15 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
- struct netvsc_device *nvsc_dev = net_device_ctx->nvdev;
+ unsigned int num_tx_queues = ndev->real_num_tx_queues;
struct sock *sk = skb->sk;
int q_idx = sk_tx_queue_get(sk);
- if (q_idx < 0 || skb->ooo_okay ||
- q_idx >= ndev->real_num_tx_queues) {
+ if (q_idx < 0 || skb->ooo_okay || q_idx >= num_tx_queues) {
u16 hash = __skb_tx_hash(ndev, skb, VRSS_SEND_TAB_SIZE);
int new_idx;
- new_idx = nvsc_dev->send_table[hash]
- % nvsc_dev->num_chn;
+ new_idx = net_device_ctx->tx_send_table[hash] % num_tx_queues;
if (q_idx != new_idx && sk &&
sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
@@ -225,9 +223,6 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
q_idx = new_idx;
}
- if (unlikely(!nvsc_dev->chan_table[q_idx].channel))
- q_idx = 0;
-
return q_idx;
}
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index f9d0fa315a47..272b051a0199 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1883,17 +1883,6 @@ static int m88e1510_probe(struct phy_device *phydev)
return m88e1510_hwmon_probe(phydev);
}
-static void marvell_remove(struct phy_device *phydev)
-{
-#ifdef CONFIG_HWMON
-
- struct marvell_priv *priv = phydev->priv;
-
- if (priv && priv->hwmon_dev)
- hwmon_device_unregister(priv->hwmon_dev);
-#endif
-}
-
static struct phy_driver marvell_drivers[] = {
{
.phy_id = MARVELL_PHY_ID_88E1101,
@@ -1974,7 +1963,6 @@ static struct phy_driver marvell_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.probe = &m88e1121_probe,
- .remove = &marvell_remove,
.config_init = &m88e1121_config_init,
.config_aneg = &m88e1121_config_aneg,
.read_status = &marvell_read_status,
@@ -2087,7 +2075,6 @@ static struct phy_driver marvell_drivers[] = {
.features = PHY_GBIT_FEATURES | SUPPORTED_FIBRE,
.flags = PHY_HAS_INTERRUPT,
.probe = &m88e1510_probe,
- .remove = &marvell_remove,
.config_init = &m88e1510_config_init,
.config_aneg = &m88e1510_config_aneg,
.read_status = &marvell_read_status,
@@ -2109,7 +2096,6 @@ static struct phy_driver marvell_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.probe = m88e1510_probe,
- .remove = &marvell_remove,
.config_init = &marvell_config_init,
.config_aneg = &m88e1510_config_aneg,
.read_status = &marvell_read_status,
@@ -2127,7 +2113,6 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1545",
.probe = m88e1510_probe,
- .remove = &marvell_remove,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.config_init = &marvell_config_init,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index daec6555f3b1..5198ccfa347f 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1864,7 +1864,7 @@ static struct phy_driver genphy_driver[] = {
.phy_id = 0xffffffff,
.phy_id_mask = 0xffffffff,
.name = "Generic PHY",
- .soft_reset = genphy_soft_reset,
+ .soft_reset = genphy_no_soft_reset,
.config_init = genphy_config_init,
.features = PHY_GBIT_FEATURES | SUPPORTED_MII |
SUPPORTED_AUI | SUPPORTED_FIBRE |
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
index 93ffedfa2994..1e2d4f1179da 100644
--- a/drivers/net/phy/spi_ks8995.c
+++ b/drivers/net/phy/spi_ks8995.c
@@ -491,13 +491,14 @@ static int ks8995_probe(struct spi_device *spi)
if (err)
return err;
- ks->regs_attr.size = ks->chip->regs_size;
memcpy(&ks->regs_attr, &ks8995_registers_attr, sizeof(ks->regs_attr));
+ ks->regs_attr.size = ks->chip->regs_size;
err = ks8995_reset(ks);
if (err)
return err;
+ sysfs_attr_init(&ks->regs_attr.attr);
err = sysfs_create_bin_file(&spi->dev.kobj, &ks->regs_attr);
if (err) {
dev_err(&spi->dev, "unable to create sysfs file, err=%d\n",
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 4a24b5d15f5a..1b52520715ae 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2072,6 +2072,7 @@ static int team_dev_type_check_change(struct net_device *dev,
static void team_setup(struct net_device *dev)
{
ether_setup(dev);
+ dev->max_mtu = ETH_MAX_MTU;
dev->netdev_ops = &team_netdev_ops;
dev->ethtool_ops = &team_ethtool_ops;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index dc1b1dd9157c..34cc3c590aa5 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -822,7 +822,18 @@ static void tun_net_uninit(struct net_device *dev)
/* Net device open. */
static int tun_net_open(struct net_device *dev)
{
+ struct tun_struct *tun = netdev_priv(dev);
+ int i;
+
netif_tx_start_all_queues(dev);
+
+ for (i = 0; i < tun->numqueues; i++) {
+ struct tun_file *tfile;
+
+ tfile = rtnl_dereference(tun->tfiles[i]);
+ tfile->socket.sk->sk_write_space(tfile->socket.sk);
+ }
+
return 0;
}
@@ -1103,9 +1114,10 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
if (!skb_array_empty(&tfile->tx_array))
mask |= POLLIN | POLLRDNORM;
- if (sock_writeable(sk) ||
- (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
- sock_writeable(sk)))
+ if (tun->dev->flags & IFF_UP &&
+ (sock_writeable(sk) ||
+ (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
+ sock_writeable(sk))))
mask |= POLLOUT | POLLWRNORM;
if (tun->dev->reg_state != NETREG_REGISTERED)
@@ -2570,7 +2582,6 @@ static int __init tun_init(void)
int ret = 0;
pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
- pr_info("%s\n", DRV_COPYRIGHT);
ret = rtnl_link_register(&tun_link_ops);
if (ret) {
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 22379da63400..fea687f35b5a 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -340,6 +340,7 @@ static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ int len = skb->len;
netdev_tx_t ret = is_ip_tx_frame(skb, dev);
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
@@ -347,7 +348,7 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
u64_stats_update_begin(&dstats->syncp);
dstats->tx_pkts++;
- dstats->tx_bytes += skb->len;
+ dstats->tx_bytes += len;
u64_stats_update_end(&dstats->syncp);
} else {
this_cpu_inc(dev->dstats->tx_drps);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index e375560cc74e..bdb6ae16d4a8 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2976,6 +2976,44 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
return 0;
}
+static int __vxlan_dev_create(struct net *net, struct net_device *dev,
+ struct vxlan_config *conf)
+{
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ int err;
+
+ err = vxlan_dev_configure(net, dev, conf, false);
+ if (err)
+ return err;
+
+ dev->ethtool_ops = &vxlan_ethtool_ops;
+
+ /* create an fdb entry for a valid default destination */
+ if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
+ err = vxlan_fdb_create(vxlan, all_zeros_mac,
+ &vxlan->default_dst.remote_ip,
+ NUD_REACHABLE | NUD_PERMANENT,
+ NLM_F_EXCL | NLM_F_CREATE,
+ vxlan->cfg.dst_port,
+ vxlan->default_dst.remote_vni,
+ vxlan->default_dst.remote_vni,
+ vxlan->default_dst.remote_ifindex,
+ NTF_SELF);
+ if (err)
+ return err;
+ }
+
+ err = register_netdevice(dev);
+ if (err) {
+ vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni);
+ return err;
+ }
+
+ list_add(&vxlan->next, &vn->vxlan_list);
+ return 0;
+}
+
static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
struct net_device *dev, struct vxlan_config *conf,
bool changelink)
@@ -3172,8 +3210,6 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
static int vxlan_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
- struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
- struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_config conf;
int err;
@@ -3181,36 +3217,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
if (err)
return err;
- err = vxlan_dev_configure(src_net, dev, &conf, false);
- if (err)
- return err;
-
- dev->ethtool_ops = &vxlan_ethtool_ops;
-
- /* create an fdb entry for a valid default destination */
- if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
- err = vxlan_fdb_create(vxlan, all_zeros_mac,
- &vxlan->default_dst.remote_ip,
- NUD_REACHABLE | NUD_PERMANENT,
- NLM_F_EXCL | NLM_F_CREATE,
- vxlan->cfg.dst_port,
- vxlan->default_dst.remote_vni,
- vxlan->default_dst.remote_vni,
- vxlan->default_dst.remote_ifindex,
- NTF_SELF);
- if (err)
- return err;
- }
-
- err = register_netdevice(dev);
- if (err) {
- vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni);
- return err;
- }
-
- list_add(&vxlan->next, &vn->vxlan_list);
-
- return 0;
+ return __vxlan_dev_create(src_net, dev, &conf);
}
static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
@@ -3440,7 +3447,7 @@ struct net_device *vxlan_dev_create(struct net *net, const char *name,
if (IS_ERR(dev))
return dev;
- err = vxlan_dev_configure(net, dev, conf, false);
+ err = __vxlan_dev_create(net, dev, conf);
if (err < 0) {
free_netdev(dev);
return ERR_PTR(err);
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index a5045b5279d7..6742ae605660 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -381,8 +381,8 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
/* set bd status and length */
bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
- iowrite16be(bd_status, &bd->status);
iowrite16be(skb->len, &bd->length);
+ iowrite16be(bd_status, &bd->status);
/* Move to next BD in the ring */
if (!(bd_status & T_W_S))
@@ -457,7 +457,7 @@ static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
struct sk_buff *skb;
hdlc_device *hdlc = dev_to_hdlc(dev);
struct qe_bd *bd;
- u32 bd_status;
+ u16 bd_status;
u16 length, howmany = 0;
u8 *bdbuffer;
int i;
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index e7f5910a6519..f8eb66ef2944 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -467,6 +467,9 @@ int i2400mu_probe(struct usb_interface *iface,
struct i2400mu *i2400mu;
struct usb_device *usb_dev = interface_to_usbdev(iface);
+ if (iface->cur_altsetting->desc.bNumEndpoints < 4)
+ return -ENODEV;
+
if (usb_dev->speed != USB_SPEED_HIGH)
dev_err(dev, "device not connected as high speed\n");
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 829b26cd4549..8397f6c92451 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -165,13 +165,17 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
struct xenvif_queue *queue = NULL;
- unsigned int num_queues = vif->num_queues;
+ unsigned int num_queues;
u16 index;
struct xenvif_rx_cb *cb;
BUG_ON(skb->dev != dev);
- /* Drop the packet if queues are not set up */
+ /* Drop the packet if queues are not set up.
+ * This handler should be called inside an RCU read section
+ * so we don't need to enter it here explicitly.
+ */
+ num_queues = READ_ONCE(vif->num_queues);
if (num_queues < 1)
goto drop;
@@ -222,18 +226,18 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
struct xenvif_queue *queue = NULL;
+ unsigned int num_queues;
u64 rx_bytes = 0;
u64 rx_packets = 0;
u64 tx_bytes = 0;
u64 tx_packets = 0;
unsigned int index;
- spin_lock(&vif->lock);
- if (vif->queues == NULL)
- goto out;
+ rcu_read_lock();
+ num_queues = READ_ONCE(vif->num_queues);
/* Aggregate tx and rx stats from each queue */
- for (index = 0; index < vif->num_queues; ++index) {
+ for (index = 0; index < num_queues; ++index) {
queue = &vif->queues[index];
rx_bytes += queue->stats.rx_bytes;
rx_packets += queue->stats.rx_packets;
@@ -241,8 +245,7 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
tx_packets += queue->stats.tx_packets;
}
-out:
- spin_unlock(&vif->lock);
+ rcu_read_unlock();
vif->dev->stats.rx_bytes = rx_bytes;
vif->dev->stats.rx_packets = rx_packets;
@@ -378,10 +381,13 @@ static void xenvif_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 * data)
{
struct xenvif *vif = netdev_priv(dev);
- unsigned int num_queues = vif->num_queues;
+ unsigned int num_queues;
int i;
unsigned int queue_index;
+ rcu_read_lock();
+ num_queues = READ_ONCE(vif->num_queues);
+
for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
unsigned long accum = 0;
for (queue_index = 0; queue_index < num_queues; ++queue_index) {
@@ -390,6 +396,8 @@ static void xenvif_get_ethtool_stats(struct net_device *dev,
}
data[i] = accum;
}
+
+ rcu_read_unlock();
}
static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index f9bcf4a665bc..602d408fa25e 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -214,7 +214,7 @@ static void xenvif_fatal_tx_err(struct xenvif *vif)
netdev_err(vif->dev, "fatal error; disabling device\n");
vif->disabled = true;
/* Disable the vif from queue 0's kthread */
- if (vif->queues)
+ if (vif->num_queues)
xenvif_kick_thread(&vif->queues[0]);
}
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index d2d7cd9145b1..a56d3eab35dd 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -495,26 +495,26 @@ static void backend_disconnect(struct backend_info *be)
struct xenvif *vif = be->vif;
if (vif) {
+ unsigned int num_queues = vif->num_queues;
unsigned int queue_index;
- struct xenvif_queue *queues;
xen_unregister_watchers(vif);
#ifdef CONFIG_DEBUG_FS
xenvif_debugfs_delif(vif);
#endif /* CONFIG_DEBUG_FS */
xenvif_disconnect_data(vif);
- for (queue_index = 0;
- queue_index < vif->num_queues;
- ++queue_index)
- xenvif_deinit_queue(&vif->queues[queue_index]);
- spin_lock(&vif->lock);
- queues = vif->queues;
+ /* At this point some of the handlers may still be active
+ * so we need to have additional synchronization here.
+ */
vif->num_queues = 0;
- vif->queues = NULL;
- spin_unlock(&vif->lock);
+ synchronize_net();
- vfree(queues);
+ for (queue_index = 0; queue_index < num_queues; ++queue_index)
+ xenvif_deinit_queue(&vif->queues[queue_index]);
+
+ vfree(vif->queues);
+ vif->queues = NULL;
xenvif_disconnect_ctrl(vif);
}
diff --git a/drivers/pci/dwc/pci-exynos.c b/drivers/pci/dwc/pci-exynos.c
index 993b650ef275..44f774c12fb2 100644
--- a/drivers/pci/dwc/pci-exynos.c
+++ b/drivers/pci/dwc/pci-exynos.c
@@ -132,10 +132,6 @@ static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev,
struct device *dev = pci->dev;
struct resource *res;
- /* If using the PHY framework, doesn't need to get other resource */
- if (ep->using_phy)
- return 0;
-
ep->mem_res = devm_kzalloc(dev, sizeof(*ep->mem_res), GFP_KERNEL);
if (!ep->mem_res)
return -ENOMEM;
@@ -145,6 +141,10 @@ static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev,
if (IS_ERR(ep->mem_res->elbi_base))
return PTR_ERR(ep->mem_res->elbi_base);
+ /* If using the PHY framework, doesn't need to get other resource */
+ if (ep->using_phy)
+ return 0;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
ep->mem_res->phy_base = devm_ioremap_resource(dev, res);
if (IS_ERR(ep->mem_res->phy_base))
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 973472c23d89..1dfa10cc566b 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -478,7 +478,7 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
{
- struct pci_dev *child, *parent = link->pdev;
+ struct pci_dev *child = link->downstream, *parent = link->pdev;
struct pci_bus *linkbus = parent->subordinate;
struct aspm_register_info upreg, dwreg;
@@ -491,9 +491,7 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
/* Get upstream/downstream components' register state */
pcie_get_aspm_reg(parent, &upreg);
- child = pci_function_0(linkbus);
pcie_get_aspm_reg(child, &dwreg);
- link->downstream = child;
/*
* If ASPM not supported, don't mess with the clocks and link,
@@ -800,6 +798,7 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
INIT_LIST_HEAD(&link->children);
INIT_LIST_HEAD(&link->link);
link->pdev = pdev;
+ link->downstream = pci_function_0(pdev->subordinate);
/*
* Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index f754453fe754..673683660b5c 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2174,6 +2174,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
quirk_blacklist_vpd);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_QLOGIC, 0x2261, quirk_blacklist_vpd);
/*
* For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index f8e9e1c2b2f6..c978be5eb9eb 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -422,6 +422,20 @@ static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, in
return 0;
}
+static int msm_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ struct msm_pinctrl *pctrl = gpiochip_get_data(chip);
+ const struct msm_pingroup *g;
+ u32 val;
+
+ g = &pctrl->soc->groups[offset];
+
+ val = readl(pctrl->regs + g->ctl_reg);
+
+ /* 0 = output, 1 = input */
+ return val & BIT(g->oe_bit) ? 0 : 1;
+}
+
static int msm_gpio_get(struct gpio_chip *chip, unsigned offset)
{
const struct msm_pingroup *g;
@@ -510,6 +524,7 @@ static void msm_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
static struct gpio_chip msm_gpio_template = {
.direction_input = msm_gpio_direction_input,
.direction_output = msm_gpio_direction_output,
+ .get_direction = msm_gpio_get_direction,
.get = msm_gpio_get,
.set = msm_gpio_set,
.request = gpiochip_generic_request,
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
index 77a0236ee781..83f8864fa76a 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld11.c
@@ -390,22 +390,22 @@ static const struct pinctrl_pin_desc uniphier_ld11_pins[] = {
UNIPHIER_PINCTRL_PIN(140, "AO1D0", 140,
140, UNIPHIER_PIN_DRV_1BIT,
140, UNIPHIER_PIN_PULL_DOWN),
- UNIPHIER_PINCTRL_PIN(141, "TCON0", 141,
+ UNIPHIER_PINCTRL_PIN(141, "AO1D1", 141,
141, UNIPHIER_PIN_DRV_1BIT,
141, UNIPHIER_PIN_PULL_DOWN),
- UNIPHIER_PINCTRL_PIN(142, "TCON1", 142,
+ UNIPHIER_PINCTRL_PIN(142, "AO1D2", 142,
142, UNIPHIER_PIN_DRV_1BIT,
142, UNIPHIER_PIN_PULL_DOWN),
- UNIPHIER_PINCTRL_PIN(143, "TCON2", 143,
+ UNIPHIER_PINCTRL_PIN(143, "XIRQ9", 143,
143, UNIPHIER_PIN_DRV_1BIT,
143, UNIPHIER_PIN_PULL_DOWN),
- UNIPHIER_PINCTRL_PIN(144, "TCON3", 144,
+ UNIPHIER_PINCTRL_PIN(144, "XIRQ10", 144,
144, UNIPHIER_PIN_DRV_1BIT,
144, UNIPHIER_PIN_PULL_DOWN),
- UNIPHIER_PINCTRL_PIN(145, "TCON4", 145,
+ UNIPHIER_PINCTRL_PIN(145, "XIRQ11", 145,
145, UNIPHIER_PIN_DRV_1BIT,
145, UNIPHIER_PIN_PULL_DOWN),
- UNIPHIER_PINCTRL_PIN(146, "TCON5", 146,
+ UNIPHIER_PINCTRL_PIN(146, "XIRQ13", 146,
146, UNIPHIER_PIN_DRV_1BIT,
146, UNIPHIER_PIN_PULL_DOWN),
UNIPHIER_PINCTRL_PIN(147, "PWMA", 147,
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 5be4783e40d4..dea98ffb6f60 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -103,15 +103,6 @@ static struct quirk_entry quirk_asus_x200ca = {
.wapf = 2,
};
-static struct quirk_entry quirk_no_rfkill = {
- .no_rfkill = true,
-};
-
-static struct quirk_entry quirk_no_rfkill_wapf4 = {
- .wapf = 4,
- .no_rfkill = true,
-};
-
static struct quirk_entry quirk_asus_ux303ub = {
.wmi_backlight_native = true,
};
@@ -194,7 +185,7 @@ static const struct dmi_system_id asus_quirks[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "X456UA"),
},
- .driver_data = &quirk_no_rfkill_wapf4,
+ .driver_data = &quirk_asus_wapf4,
},
{
.callback = dmi_matched,
@@ -203,7 +194,7 @@ static const struct dmi_system_id asus_quirks[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_MATCH(DMI_PRODUCT_NAME, "X456UF"),
},
- .driver_data = &quirk_no_rfkill_wapf4,
+ .driver_data = &quirk_asus_wapf4,
},
{
.callback = dmi_matched,
@@ -369,42 +360,6 @@ static const struct dmi_system_id asus_quirks[] = {
},
{
.callback = dmi_matched,
- .ident = "ASUSTeK COMPUTER INC. X555UB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "X555UB"),
- },
- .driver_data = &quirk_no_rfkill,
- },
- {
- .callback = dmi_matched,
- .ident = "ASUSTeK COMPUTER INC. N552VW",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "N552VW"),
- },
- .driver_data = &quirk_no_rfkill,
- },
- {
- .callback = dmi_matched,
- .ident = "ASUSTeK COMPUTER INC. U303LB",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "U303LB"),
- },
- .driver_data = &quirk_no_rfkill,
- },
- {
- .callback = dmi_matched,
- .ident = "ASUSTeK COMPUTER INC. Z550MA",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Z550MA"),
- },
- .driver_data = &quirk_no_rfkill,
- },
- {
- .callback = dmi_matched,
.ident = "ASUSTeK COMPUTER INC. UX303UB",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 43cb680adbb4..8fe5890bf539 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -159,6 +159,8 @@ MODULE_LICENSE("GPL");
#define USB_INTEL_XUSB2PR 0xD0
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
+static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL };
+
struct bios_args {
u32 arg0;
u32 arg1;
@@ -2051,6 +2053,16 @@ static int asus_wmi_fan_init(struct asus_wmi *asus)
return 0;
}
+static bool ashs_present(void)
+{
+ int i = 0;
+ while (ashs_ids[i]) {
+ if (acpi_dev_found(ashs_ids[i++]))
+ return true;
+ }
+ return false;
+}
+
/*
* WMI Driver
*/
@@ -2095,7 +2107,11 @@ static int asus_wmi_add(struct platform_device *pdev)
if (err)
goto fail_leds;
- if (!asus->driver->quirks->no_rfkill) {
+ asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result);
+ if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT))
+ asus->driver->wlan_ctrl_by_user = 1;
+
+ if (!(asus->driver->wlan_ctrl_by_user && ashs_present())) {
err = asus_wmi_rfkill_init(asus);
if (err)
goto fail_rfkill;
@@ -2134,10 +2150,6 @@ static int asus_wmi_add(struct platform_device *pdev)
if (err)
goto fail_debugfs;
- asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result);
- if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT))
- asus->driver->wlan_ctrl_by_user = 1;
-
return 0;
fail_debugfs:
diff --git a/drivers/platform/x86/asus-wmi.h b/drivers/platform/x86/asus-wmi.h
index fdff626c3b51..c9589d9342bb 100644
--- a/drivers/platform/x86/asus-wmi.h
+++ b/drivers/platform/x86/asus-wmi.h
@@ -39,7 +39,6 @@ struct key_entry;
struct asus_wmi;
struct quirk_entry {
- bool no_rfkill;
bool hotplug_wireless;
bool scalar_panel_brightness;
bool store_backlight_power;
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 2b218b1d13e5..e12cc3504d48 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -78,18 +78,18 @@
#define FUJITSU_LCD_N_LEVELS 8
-#define ACPI_FUJITSU_CLASS "fujitsu"
-#define ACPI_FUJITSU_HID "FUJ02B1"
-#define ACPI_FUJITSU_DRIVER_NAME "Fujitsu laptop FUJ02B1 ACPI brightness driver"
-#define ACPI_FUJITSU_DEVICE_NAME "Fujitsu FUJ02B1"
-#define ACPI_FUJITSU_HOTKEY_HID "FUJ02E3"
-#define ACPI_FUJITSU_HOTKEY_DRIVER_NAME "Fujitsu laptop FUJ02E3 ACPI hotkeys driver"
-#define ACPI_FUJITSU_HOTKEY_DEVICE_NAME "Fujitsu FUJ02E3"
+#define ACPI_FUJITSU_CLASS "fujitsu"
+#define ACPI_FUJITSU_BL_HID "FUJ02B1"
+#define ACPI_FUJITSU_BL_DRIVER_NAME "Fujitsu laptop FUJ02B1 ACPI brightness driver"
+#define ACPI_FUJITSU_BL_DEVICE_NAME "Fujitsu FUJ02B1"
+#define ACPI_FUJITSU_LAPTOP_HID "FUJ02E3"
+#define ACPI_FUJITSU_LAPTOP_DRIVER_NAME "Fujitsu laptop FUJ02E3 ACPI hotkeys driver"
+#define ACPI_FUJITSU_LAPTOP_DEVICE_NAME "Fujitsu FUJ02E3"
#define ACPI_FUJITSU_NOTIFY_CODE1 0x80
/* FUNC interface - command values */
-#define FUNC_RFKILL 0x1000
+#define FUNC_FLAGS 0x1000
#define FUNC_LEDS 0x1001
#define FUNC_BUTTONS 0x1002
#define FUNC_BACKLIGHT 0x1004
@@ -97,6 +97,11 @@
/* FUNC interface - responses */
#define UNSUPPORTED_CMD 0x80000000
+/* FUNC interface - status flags */
+#define FLAG_RFKILL 0x020
+#define FLAG_LID 0x100
+#define FLAG_DOCK 0x200
+
#if IS_ENABLED(CONFIG_LEDS_CLASS)
/* FUNC interface - LED control */
#define FUNC_LED_OFF 0x1
@@ -136,7 +141,7 @@
#endif
/* Device controlling the backlight and associated keys */
-struct fujitsu_t {
+struct fujitsu_bl {
acpi_handle acpi_handle;
struct acpi_device *dev;
struct input_dev *input;
@@ -150,12 +155,12 @@ struct fujitsu_t {
unsigned int brightness_level;
};
-static struct fujitsu_t *fujitsu;
+static struct fujitsu_bl *fujitsu_bl;
static int use_alt_lcd_levels = -1;
static int disable_brightness_adjust = -1;
-/* Device used to access other hotkeys on the laptop */
-struct fujitsu_hotkey_t {
+/* Device used to access hotkeys and other features on the laptop */
+struct fujitsu_laptop {
acpi_handle acpi_handle;
struct acpi_device *dev;
struct input_dev *input;
@@ -163,17 +168,15 @@ struct fujitsu_hotkey_t {
struct platform_device *pf_device;
struct kfifo fifo;
spinlock_t fifo_lock;
- int rfkill_supported;
- int rfkill_state;
+ int flags_supported;
+ int flags_state;
int logolamp_registered;
int kblamps_registered;
int radio_led_registered;
int eco_led_registered;
};
-static struct fujitsu_hotkey_t *fujitsu_hotkey;
-
-static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event);
+static struct fujitsu_laptop *fujitsu_laptop;
#if IS_ENABLED(CONFIG_LEDS_CLASS)
static enum led_brightness logolamp_get(struct led_classdev *cdev);
@@ -222,8 +225,6 @@ static struct led_classdev eco_led = {
static u32 dbg_level = 0x03;
#endif
-static void acpi_fujitsu_notify(struct acpi_device *device, u32 event);
-
/* Fujitsu ACPI interface function */
static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
@@ -239,7 +240,7 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
unsigned long long value;
acpi_handle handle = NULL;
- status = acpi_get_handle(fujitsu_hotkey->acpi_handle, "FUNC", &handle);
+ status = acpi_get_handle(fujitsu_laptop->acpi_handle, "FUNC", &handle);
if (ACPI_FAILURE(status)) {
vdbg_printk(FUJLAPTOP_DBG_ERROR,
"FUNC interface is not present\n");
@@ -300,9 +301,9 @@ static int radio_led_set(struct led_classdev *cdev,
enum led_brightness brightness)
{
if (brightness >= LED_FULL)
- return call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, RADIO_LED_ON);
+ return call_fext_func(FUNC_FLAGS, 0x5, RADIO_LED_ON, RADIO_LED_ON);
else
- return call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, 0x0);
+ return call_fext_func(FUNC_FLAGS, 0x5, RADIO_LED_ON, 0x0);
}
static int eco_led_set(struct led_classdev *cdev,
@@ -346,7 +347,7 @@ static enum led_brightness radio_led_get(struct led_classdev *cdev)
{
enum led_brightness brightness = LED_OFF;
- if (call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0) & RADIO_LED_ON)
+ if (call_fext_func(FUNC_FLAGS, 0x4, 0x0, 0x0) & RADIO_LED_ON)
brightness = LED_FULL;
return brightness;
@@ -373,10 +374,10 @@ static int set_lcd_level(int level)
vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBLL [%d]\n",
level);
- if (level < 0 || level >= fujitsu->max_brightness)
+ if (level < 0 || level >= fujitsu_bl->max_brightness)
return -EINVAL;
- status = acpi_get_handle(fujitsu->acpi_handle, "SBLL", &handle);
+ status = acpi_get_handle(fujitsu_bl->acpi_handle, "SBLL", &handle);
if (ACPI_FAILURE(status)) {
vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBLL not present\n");
return -ENODEV;
@@ -398,10 +399,10 @@ static int set_lcd_level_alt(int level)
vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBL2 [%d]\n",
level);
- if (level < 0 || level >= fujitsu->max_brightness)
+ if (level < 0 || level >= fujitsu_bl->max_brightness)
return -EINVAL;
- status = acpi_get_handle(fujitsu->acpi_handle, "SBL2", &handle);
+ status = acpi_get_handle(fujitsu_bl->acpi_handle, "SBL2", &handle);
if (ACPI_FAILURE(status)) {
vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBL2 not present\n");
return -ENODEV;
@@ -421,19 +422,19 @@ static int get_lcd_level(void)
vdbg_printk(FUJLAPTOP_DBG_TRACE, "get lcd level via GBLL\n");
- status =
- acpi_evaluate_integer(fujitsu->acpi_handle, "GBLL", NULL, &state);
+ status = acpi_evaluate_integer(fujitsu_bl->acpi_handle, "GBLL", NULL,
+ &state);
if (ACPI_FAILURE(status))
return 0;
- fujitsu->brightness_level = state & 0x0fffffff;
+ fujitsu_bl->brightness_level = state & 0x0fffffff;
if (state & 0x80000000)
- fujitsu->brightness_changed = 1;
+ fujitsu_bl->brightness_changed = 1;
else
- fujitsu->brightness_changed = 0;
+ fujitsu_bl->brightness_changed = 0;
- return fujitsu->brightness_level;
+ return fujitsu_bl->brightness_level;
}
static int get_max_brightness(void)
@@ -443,14 +444,14 @@ static int get_max_brightness(void)
vdbg_printk(FUJLAPTOP_DBG_TRACE, "get max lcd level via RBLL\n");
- status =
- acpi_evaluate_integer(fujitsu->acpi_handle, "RBLL", NULL, &state);
+ status = acpi_evaluate_integer(fujitsu_bl->acpi_handle, "RBLL", NULL,
+ &state);
if (ACPI_FAILURE(status))
return -1;
- fujitsu->max_brightness = state;
+ fujitsu_bl->max_brightness = state;
- return fujitsu->max_brightness;
+ return fujitsu_bl->max_brightness;
}
/* Backlight device stuff */
@@ -483,7 +484,7 @@ static int bl_update_status(struct backlight_device *b)
return ret;
}
-static const struct backlight_ops fujitsubl_ops = {
+static const struct backlight_ops fujitsu_bl_ops = {
.get_brightness = bl_get_brightness,
.update_status = bl_update_status,
};
@@ -511,7 +512,7 @@ show_brightness_changed(struct device *dev,
int ret;
- ret = fujitsu->brightness_changed;
+ ret = fujitsu_bl->brightness_changed;
if (ret < 0)
return ret;
@@ -539,7 +540,7 @@ static ssize_t store_lcd_level(struct device *dev,
int level, ret;
if (sscanf(buf, "%i", &level) != 1
- || (level < 0 || level >= fujitsu->max_brightness))
+ || (level < 0 || level >= fujitsu_bl->max_brightness))
return -EINVAL;
if (use_alt_lcd_levels)
@@ -567,9 +568,9 @@ static ssize_t
show_lid_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
- if (!(fujitsu_hotkey->rfkill_supported & 0x100))
+ if (!(fujitsu_laptop->flags_supported & FLAG_LID))
return sprintf(buf, "unknown\n");
- if (fujitsu_hotkey->rfkill_state & 0x100)
+ if (fujitsu_laptop->flags_state & FLAG_LID)
return sprintf(buf, "open\n");
else
return sprintf(buf, "closed\n");
@@ -579,9 +580,9 @@ static ssize_t
show_dock_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
- if (!(fujitsu_hotkey->rfkill_supported & 0x200))
+ if (!(fujitsu_laptop->flags_supported & FLAG_DOCK))
return sprintf(buf, "unknown\n");
- if (fujitsu_hotkey->rfkill_state & 0x200)
+ if (fujitsu_laptop->flags_state & FLAG_DOCK)
return sprintf(buf, "docked\n");
else
return sprintf(buf, "undocked\n");
@@ -591,9 +592,9 @@ static ssize_t
show_radios_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
- if (!(fujitsu_hotkey->rfkill_supported & 0x20))
+ if (!(fujitsu_laptop->flags_supported & FLAG_RFKILL))
return sprintf(buf, "unknown\n");
- if (fujitsu_hotkey->rfkill_state & 0x20)
+ if (fujitsu_laptop->flags_state & FLAG_RFKILL)
return sprintf(buf, "on\n");
else
return sprintf(buf, "killed\n");
@@ -607,7 +608,7 @@ static DEVICE_ATTR(lid, 0444, show_lid_state, ignore_store);
static DEVICE_ATTR(dock, 0444, show_dock_state, ignore_store);
static DEVICE_ATTR(radios, 0444, show_radios_state, ignore_store);
-static struct attribute *fujitsupf_attributes[] = {
+static struct attribute *fujitsu_pf_attributes[] = {
&dev_attr_brightness_changed.attr,
&dev_attr_max_brightness.attr,
&dev_attr_lcd_level.attr,
@@ -617,11 +618,11 @@ static struct attribute *fujitsupf_attributes[] = {
NULL
};
-static struct attribute_group fujitsupf_attribute_group = {
- .attrs = fujitsupf_attributes
+static struct attribute_group fujitsu_pf_attribute_group = {
+ .attrs = fujitsu_pf_attributes
};
-static struct platform_driver fujitsupf_driver = {
+static struct platform_driver fujitsu_pf_driver = {
.driver = {
.name = "fujitsu-laptop",
}
@@ -630,39 +631,30 @@ static struct platform_driver fujitsupf_driver = {
static void __init dmi_check_cb_common(const struct dmi_system_id *id)
{
pr_info("Identified laptop model '%s'\n", id->ident);
- if (use_alt_lcd_levels == -1) {
- if (acpi_has_method(NULL,
- "\\_SB.PCI0.LPCB.FJEX.SBL2"))
- use_alt_lcd_levels = 1;
- else
- use_alt_lcd_levels = 0;
- vdbg_printk(FUJLAPTOP_DBG_TRACE, "auto-detected usealt as "
- "%i\n", use_alt_lcd_levels);
- }
}
static int __init dmi_check_cb_s6410(const struct dmi_system_id *id)
{
dmi_check_cb_common(id);
- fujitsu->keycode1 = KEY_SCREENLOCK; /* "Lock" */
- fujitsu->keycode2 = KEY_HELP; /* "Mobility Center" */
+ fujitsu_bl->keycode1 = KEY_SCREENLOCK; /* "Lock" */
+ fujitsu_bl->keycode2 = KEY_HELP; /* "Mobility Center" */
return 1;
}
static int __init dmi_check_cb_s6420(const struct dmi_system_id *id)
{
dmi_check_cb_common(id);
- fujitsu->keycode1 = KEY_SCREENLOCK; /* "Lock" */
- fujitsu->keycode2 = KEY_HELP; /* "Mobility Center" */
+ fujitsu_bl->keycode1 = KEY_SCREENLOCK; /* "Lock" */
+ fujitsu_bl->keycode2 = KEY_HELP; /* "Mobility Center" */
return 1;
}
static int __init dmi_check_cb_p8010(const struct dmi_system_id *id)
{
dmi_check_cb_common(id);
- fujitsu->keycode1 = KEY_HELP; /* "Support" */
- fujitsu->keycode3 = KEY_SWITCHVIDEOMODE; /* "Presentation" */
- fujitsu->keycode4 = KEY_WWW; /* "Internet" */
+ fujitsu_bl->keycode1 = KEY_HELP; /* "Support" */
+ fujitsu_bl->keycode3 = KEY_SWITCHVIDEOMODE; /* "Presentation" */
+ fujitsu_bl->keycode4 = KEY_WWW; /* "Internet" */
return 1;
}
@@ -693,7 +685,7 @@ static const struct dmi_system_id fujitsu_dmi_table[] __initconst = {
/* ACPI device for LCD brightness control */
-static int acpi_fujitsu_add(struct acpi_device *device)
+static int acpi_fujitsu_bl_add(struct acpi_device *device)
{
int state = 0;
struct input_dev *input;
@@ -702,22 +694,22 @@ static int acpi_fujitsu_add(struct acpi_device *device)
if (!device)
return -EINVAL;
- fujitsu->acpi_handle = device->handle;
- sprintf(acpi_device_name(device), "%s", ACPI_FUJITSU_DEVICE_NAME);
+ fujitsu_bl->acpi_handle = device->handle;
+ sprintf(acpi_device_name(device), "%s", ACPI_FUJITSU_BL_DEVICE_NAME);
sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS);
- device->driver_data = fujitsu;
+ device->driver_data = fujitsu_bl;
- fujitsu->input = input = input_allocate_device();
+ fujitsu_bl->input = input = input_allocate_device();
if (!input) {
error = -ENOMEM;
goto err_stop;
}
- snprintf(fujitsu->phys, sizeof(fujitsu->phys),
+ snprintf(fujitsu_bl->phys, sizeof(fujitsu_bl->phys),
"%s/video/input0", acpi_device_hid(device));
input->name = acpi_device_name(device);
- input->phys = fujitsu->phys;
+ input->phys = fujitsu_bl->phys;
input->id.bustype = BUS_HOST;
input->id.product = 0x06;
input->dev.parent = &device->dev;
@@ -730,7 +722,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
if (error)
goto err_free_input_dev;
- error = acpi_bus_update_power(fujitsu->acpi_handle, &state);
+ error = acpi_bus_update_power(fujitsu_bl->acpi_handle, &state);
if (error) {
pr_err("Error reading power state\n");
goto err_unregister_input_dev;
@@ -740,7 +732,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
acpi_device_name(device), acpi_device_bid(device),
!device->power.state ? "on" : "off");
- fujitsu->dev = device;
+ fujitsu_bl->dev = device;
if (acpi_has_method(device->handle, METHOD_NAME__INI)) {
vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
@@ -750,6 +742,15 @@ static int acpi_fujitsu_add(struct acpi_device *device)
pr_err("_INI Method failed\n");
}
+ if (use_alt_lcd_levels == -1) {
+ if (acpi_has_method(NULL, "\\_SB.PCI0.LPCB.FJEX.SBL2"))
+ use_alt_lcd_levels = 1;
+ else
+ use_alt_lcd_levels = 0;
+ vdbg_printk(FUJLAPTOP_DBG_TRACE, "auto-detected usealt as %i\n",
+ use_alt_lcd_levels);
+ }
+
/* do config (detect defaults) */
use_alt_lcd_levels = use_alt_lcd_levels == 1 ? 1 : 0;
disable_brightness_adjust = disable_brightness_adjust == 1 ? 1 : 0;
@@ -758,7 +759,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
use_alt_lcd_levels, disable_brightness_adjust);
if (get_max_brightness() <= 0)
- fujitsu->max_brightness = FUJITSU_LCD_N_LEVELS;
+ fujitsu_bl->max_brightness = FUJITSU_LCD_N_LEVELS;
get_lcd_level();
return 0;
@@ -772,38 +773,38 @@ err_stop:
return error;
}
-static int acpi_fujitsu_remove(struct acpi_device *device)
+static int acpi_fujitsu_bl_remove(struct acpi_device *device)
{
- struct fujitsu_t *fujitsu = acpi_driver_data(device);
- struct input_dev *input = fujitsu->input;
+ struct fujitsu_bl *fujitsu_bl = acpi_driver_data(device);
+ struct input_dev *input = fujitsu_bl->input;
input_unregister_device(input);
- fujitsu->acpi_handle = NULL;
+ fujitsu_bl->acpi_handle = NULL;
return 0;
}
/* Brightness notify */
-static void acpi_fujitsu_notify(struct acpi_device *device, u32 event)
+static void acpi_fujitsu_bl_notify(struct acpi_device *device, u32 event)
{
struct input_dev *input;
int keycode;
int oldb, newb;
- input = fujitsu->input;
+ input = fujitsu_bl->input;
switch (event) {
case ACPI_FUJITSU_NOTIFY_CODE1:
keycode = 0;
- oldb = fujitsu->brightness_level;
+ oldb = fujitsu_bl->brightness_level;
get_lcd_level();
- newb = fujitsu->brightness_level;
+ newb = fujitsu_bl->brightness_level;
vdbg_printk(FUJLAPTOP_DBG_TRACE,
"brightness button event [%i -> %i (%i)]\n",
- oldb, newb, fujitsu->brightness_changed);
+ oldb, newb, fujitsu_bl->brightness_changed);
if (oldb < newb) {
if (disable_brightness_adjust != 1) {
@@ -840,7 +841,7 @@ static void acpi_fujitsu_notify(struct acpi_device *device, u32 event)
/* ACPI device for hotkey handling */
-static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
+static int acpi_fujitsu_laptop_add(struct acpi_device *device)
{
int result = 0;
int state = 0;
@@ -851,42 +852,42 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
if (!device)
return -EINVAL;
- fujitsu_hotkey->acpi_handle = device->handle;
+ fujitsu_laptop->acpi_handle = device->handle;
sprintf(acpi_device_name(device), "%s",
- ACPI_FUJITSU_HOTKEY_DEVICE_NAME);
+ ACPI_FUJITSU_LAPTOP_DEVICE_NAME);
sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS);
- device->driver_data = fujitsu_hotkey;
+ device->driver_data = fujitsu_laptop;
/* kfifo */
- spin_lock_init(&fujitsu_hotkey->fifo_lock);
- error = kfifo_alloc(&fujitsu_hotkey->fifo, RINGBUFFERSIZE * sizeof(int),
+ spin_lock_init(&fujitsu_laptop->fifo_lock);
+ error = kfifo_alloc(&fujitsu_laptop->fifo, RINGBUFFERSIZE * sizeof(int),
GFP_KERNEL);
if (error) {
pr_err("kfifo_alloc failed\n");
goto err_stop;
}
- fujitsu_hotkey->input = input = input_allocate_device();
+ fujitsu_laptop->input = input = input_allocate_device();
if (!input) {
error = -ENOMEM;
goto err_free_fifo;
}
- snprintf(fujitsu_hotkey->phys, sizeof(fujitsu_hotkey->phys),
+ snprintf(fujitsu_laptop->phys, sizeof(fujitsu_laptop->phys),
"%s/video/input0", acpi_device_hid(device));
input->name = acpi_device_name(device);
- input->phys = fujitsu_hotkey->phys;
+ input->phys = fujitsu_laptop->phys;
input->id.bustype = BUS_HOST;
input->id.product = 0x06;
input->dev.parent = &device->dev;
set_bit(EV_KEY, input->evbit);
- set_bit(fujitsu->keycode1, input->keybit);
- set_bit(fujitsu->keycode2, input->keybit);
- set_bit(fujitsu->keycode3, input->keybit);
- set_bit(fujitsu->keycode4, input->keybit);
- set_bit(fujitsu->keycode5, input->keybit);
+ set_bit(fujitsu_bl->keycode1, input->keybit);
+ set_bit(fujitsu_bl->keycode2, input->keybit);
+ set_bit(fujitsu_bl->keycode3, input->keybit);
+ set_bit(fujitsu_bl->keycode4, input->keybit);
+ set_bit(fujitsu_bl->keycode5, input->keybit);
set_bit(KEY_TOUCHPAD_TOGGLE, input->keybit);
set_bit(KEY_UNKNOWN, input->keybit);
@@ -894,7 +895,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
if (error)
goto err_free_input_dev;
- error = acpi_bus_update_power(fujitsu_hotkey->acpi_handle, &state);
+ error = acpi_bus_update_power(fujitsu_laptop->acpi_handle, &state);
if (error) {
pr_err("Error reading power state\n");
goto err_unregister_input_dev;
@@ -904,7 +905,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
acpi_device_name(device), acpi_device_bid(device),
!device->power.state ? "on" : "off");
- fujitsu_hotkey->dev = device;
+ fujitsu_laptop->dev = device;
if (acpi_has_method(device->handle, METHOD_NAME__INI)) {
vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
@@ -920,27 +921,27 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
; /* No action, result is discarded */
vdbg_printk(FUJLAPTOP_DBG_INFO, "Discarded %i ringbuffer entries\n", i);
- fujitsu_hotkey->rfkill_supported =
- call_fext_func(FUNC_RFKILL, 0x0, 0x0, 0x0);
+ fujitsu_laptop->flags_supported =
+ call_fext_func(FUNC_FLAGS, 0x0, 0x0, 0x0);
/* Make sure our bitmask of supported functions is cleared if the
RFKILL function block is not implemented, like on the S7020. */
- if (fujitsu_hotkey->rfkill_supported == UNSUPPORTED_CMD)
- fujitsu_hotkey->rfkill_supported = 0;
+ if (fujitsu_laptop->flags_supported == UNSUPPORTED_CMD)
+ fujitsu_laptop->flags_supported = 0;
- if (fujitsu_hotkey->rfkill_supported)
- fujitsu_hotkey->rfkill_state =
- call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0);
+ if (fujitsu_laptop->flags_supported)
+ fujitsu_laptop->flags_state =
+ call_fext_func(FUNC_FLAGS, 0x4, 0x0, 0x0);
/* Suspect this is a keymap of the application panel, print it */
pr_info("BTNI: [0x%x]\n", call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0));
#if IS_ENABLED(CONFIG_LEDS_CLASS)
if (call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) {
- result = led_classdev_register(&fujitsu->pf_device->dev,
+ result = led_classdev_register(&fujitsu_bl->pf_device->dev,
&logolamp_led);
if (result == 0) {
- fujitsu_hotkey->logolamp_registered = 1;
+ fujitsu_laptop->logolamp_registered = 1;
} else {
pr_err("Could not register LED handler for logo lamp, error %i\n",
result);
@@ -949,10 +950,10 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
if ((call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & KEYBOARD_LAMPS) &&
(call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) == 0x0)) {
- result = led_classdev_register(&fujitsu->pf_device->dev,
+ result = led_classdev_register(&fujitsu_bl->pf_device->dev,
&kblamps_led);
if (result == 0) {
- fujitsu_hotkey->kblamps_registered = 1;
+ fujitsu_laptop->kblamps_registered = 1;
} else {
pr_err("Could not register LED handler for keyboard lamps, error %i\n",
result);
@@ -966,10 +967,10 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
* that an RF LED is present.
*/
if (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) & BIT(24)) {
- result = led_classdev_register(&fujitsu->pf_device->dev,
+ result = led_classdev_register(&fujitsu_bl->pf_device->dev,
&radio_led);
if (result == 0) {
- fujitsu_hotkey->radio_led_registered = 1;
+ fujitsu_laptop->radio_led_registered = 1;
} else {
pr_err("Could not register LED handler for radio LED, error %i\n",
result);
@@ -983,10 +984,10 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
*/
if ((call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & BIT(14)) &&
(call_fext_func(FUNC_LEDS, 0x2, ECO_LED, 0x0) != UNSUPPORTED_CMD)) {
- result = led_classdev_register(&fujitsu->pf_device->dev,
+ result = led_classdev_register(&fujitsu_bl->pf_device->dev,
&eco_led);
if (result == 0) {
- fujitsu_hotkey->eco_led_registered = 1;
+ fujitsu_laptop->eco_led_registered = 1;
} else {
pr_err("Could not register LED handler for eco LED, error %i\n",
result);
@@ -1002,47 +1003,47 @@ err_unregister_input_dev:
err_free_input_dev:
input_free_device(input);
err_free_fifo:
- kfifo_free(&fujitsu_hotkey->fifo);
+ kfifo_free(&fujitsu_laptop->fifo);
err_stop:
return error;
}
-static int acpi_fujitsu_hotkey_remove(struct acpi_device *device)
+static int acpi_fujitsu_laptop_remove(struct acpi_device *device)
{
- struct fujitsu_hotkey_t *fujitsu_hotkey = acpi_driver_data(device);
- struct input_dev *input = fujitsu_hotkey->input;
+ struct fujitsu_laptop *fujitsu_laptop = acpi_driver_data(device);
+ struct input_dev *input = fujitsu_laptop->input;
#if IS_ENABLED(CONFIG_LEDS_CLASS)
- if (fujitsu_hotkey->logolamp_registered)
+ if (fujitsu_laptop->logolamp_registered)
led_classdev_unregister(&logolamp_led);
- if (fujitsu_hotkey->kblamps_registered)
+ if (fujitsu_laptop->kblamps_registered)
led_classdev_unregister(&kblamps_led);
- if (fujitsu_hotkey->radio_led_registered)
+ if (fujitsu_laptop->radio_led_registered)
led_classdev_unregister(&radio_led);
- if (fujitsu_hotkey->eco_led_registered)
+ if (fujitsu_laptop->eco_led_registered)
led_classdev_unregister(&eco_led);
#endif
input_unregister_device(input);
- kfifo_free(&fujitsu_hotkey->fifo);
+ kfifo_free(&fujitsu_laptop->fifo);
- fujitsu_hotkey->acpi_handle = NULL;
+ fujitsu_laptop->acpi_handle = NULL;
return 0;
}
-static void acpi_fujitsu_hotkey_press(int keycode)
+static void acpi_fujitsu_laptop_press(int keycode)
{
- struct input_dev *input = fujitsu_hotkey->input;
+ struct input_dev *input = fujitsu_laptop->input;
int status;
- status = kfifo_in_locked(&fujitsu_hotkey->fifo,
+ status = kfifo_in_locked(&fujitsu_laptop->fifo,
(unsigned char *)&keycode, sizeof(keycode),
- &fujitsu_hotkey->fifo_lock);
+ &fujitsu_laptop->fifo_lock);
if (status != sizeof(keycode)) {
vdbg_printk(FUJLAPTOP_DBG_WARN,
"Could not push keycode [0x%x]\n", keycode);
@@ -1054,16 +1055,16 @@ static void acpi_fujitsu_hotkey_press(int keycode)
"Push keycode into ringbuffer [%d]\n", keycode);
}
-static void acpi_fujitsu_hotkey_release(void)
+static void acpi_fujitsu_laptop_release(void)
{
- struct input_dev *input = fujitsu_hotkey->input;
+ struct input_dev *input = fujitsu_laptop->input;
int keycode, status;
while (true) {
- status = kfifo_out_locked(&fujitsu_hotkey->fifo,
+ status = kfifo_out_locked(&fujitsu_laptop->fifo,
(unsigned char *)&keycode,
sizeof(keycode),
- &fujitsu_hotkey->fifo_lock);
+ &fujitsu_laptop->fifo_lock);
if (status != sizeof(keycode))
return;
input_report_key(input, keycode, 0);
@@ -1073,14 +1074,14 @@ static void acpi_fujitsu_hotkey_release(void)
}
}
-static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
+static void acpi_fujitsu_laptop_notify(struct acpi_device *device, u32 event)
{
struct input_dev *input;
int keycode;
unsigned int irb = 1;
int i;
- input = fujitsu_hotkey->input;
+ input = fujitsu_laptop->input;
if (event != ACPI_FUJITSU_NOTIFY_CODE1) {
keycode = KEY_UNKNOWN;
@@ -1093,9 +1094,9 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
return;
}
- if (fujitsu_hotkey->rfkill_supported)
- fujitsu_hotkey->rfkill_state =
- call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0);
+ if (fujitsu_laptop->flags_supported)
+ fujitsu_laptop->flags_state =
+ call_fext_func(FUNC_FLAGS, 0x4, 0x0, 0x0);
i = 0;
while ((irb =
@@ -1103,19 +1104,19 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
&& (i++) < MAX_HOTKEY_RINGBUFFER_SIZE) {
switch (irb & 0x4ff) {
case KEY1_CODE:
- keycode = fujitsu->keycode1;
+ keycode = fujitsu_bl->keycode1;
break;
case KEY2_CODE:
- keycode = fujitsu->keycode2;
+ keycode = fujitsu_bl->keycode2;
break;
case KEY3_CODE:
- keycode = fujitsu->keycode3;
+ keycode = fujitsu_bl->keycode3;
break;
case KEY4_CODE:
- keycode = fujitsu->keycode4;
+ keycode = fujitsu_bl->keycode4;
break;
case KEY5_CODE:
- keycode = fujitsu->keycode5;
+ keycode = fujitsu_bl->keycode5;
break;
case 0:
keycode = 0;
@@ -1128,17 +1129,17 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
}
if (keycode > 0)
- acpi_fujitsu_hotkey_press(keycode);
+ acpi_fujitsu_laptop_press(keycode);
else if (keycode == 0)
- acpi_fujitsu_hotkey_release();
+ acpi_fujitsu_laptop_release();
}
/* On some models (first seen on the Skylake-based Lifebook
* E736/E746/E756), the touchpad toggle hotkey (Fn+F4) is
- * handled in software; its state is queried using FUNC_RFKILL
+ * handled in software; its state is queried using FUNC_FLAGS
*/
- if ((fujitsu_hotkey->rfkill_supported & BIT(26)) &&
- (call_fext_func(FUNC_RFKILL, 0x1, 0x0, 0x0) & BIT(26))) {
+ if ((fujitsu_laptop->flags_supported & BIT(26)) &&
+ (call_fext_func(FUNC_FLAGS, 0x1, 0x0, 0x0) & BIT(26))) {
keycode = KEY_TOUCHPAD_TOGGLE;
input_report_key(input, keycode, 1);
input_sync(input);
@@ -1150,83 +1151,81 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
/* Initialization */
-static const struct acpi_device_id fujitsu_device_ids[] = {
- {ACPI_FUJITSU_HID, 0},
+static const struct acpi_device_id fujitsu_bl_device_ids[] = {
+ {ACPI_FUJITSU_BL_HID, 0},
{"", 0},
};
-static struct acpi_driver acpi_fujitsu_driver = {
- .name = ACPI_FUJITSU_DRIVER_NAME,
+static struct acpi_driver acpi_fujitsu_bl_driver = {
+ .name = ACPI_FUJITSU_BL_DRIVER_NAME,
.class = ACPI_FUJITSU_CLASS,
- .ids = fujitsu_device_ids,
+ .ids = fujitsu_bl_device_ids,
.ops = {
- .add = acpi_fujitsu_add,
- .remove = acpi_fujitsu_remove,
- .notify = acpi_fujitsu_notify,
+ .add = acpi_fujitsu_bl_add,
+ .remove = acpi_fujitsu_bl_remove,
+ .notify = acpi_fujitsu_bl_notify,
},
};
-static const struct acpi_device_id fujitsu_hotkey_device_ids[] = {
- {ACPI_FUJITSU_HOTKEY_HID, 0},
+static const struct acpi_device_id fujitsu_laptop_device_ids[] = {
+ {ACPI_FUJITSU_LAPTOP_HID, 0},
{"", 0},
};
-static struct acpi_driver acpi_fujitsu_hotkey_driver = {
- .name = ACPI_FUJITSU_HOTKEY_DRIVER_NAME,
+static struct acpi_driver acpi_fujitsu_laptop_driver = {
+ .name = ACPI_FUJITSU_LAPTOP_DRIVER_NAME,
.class = ACPI_FUJITSU_CLASS,
- .ids = fujitsu_hotkey_device_ids,
+ .ids = fujitsu_laptop_device_ids,
.ops = {
- .add = acpi_fujitsu_hotkey_add,
- .remove = acpi_fujitsu_hotkey_remove,
- .notify = acpi_fujitsu_hotkey_notify,
+ .add = acpi_fujitsu_laptop_add,
+ .remove = acpi_fujitsu_laptop_remove,
+ .notify = acpi_fujitsu_laptop_notify,
},
};
static const struct acpi_device_id fujitsu_ids[] __used = {
- {ACPI_FUJITSU_HID, 0},
- {ACPI_FUJITSU_HOTKEY_HID, 0},
+ {ACPI_FUJITSU_BL_HID, 0},
+ {ACPI_FUJITSU_LAPTOP_HID, 0},
{"", 0}
};
MODULE_DEVICE_TABLE(acpi, fujitsu_ids);
static int __init fujitsu_init(void)
{
- int ret, result, max_brightness;
+ int ret, max_brightness;
if (acpi_disabled)
return -ENODEV;
- fujitsu = kzalloc(sizeof(struct fujitsu_t), GFP_KERNEL);
- if (!fujitsu)
+ fujitsu_bl = kzalloc(sizeof(struct fujitsu_bl), GFP_KERNEL);
+ if (!fujitsu_bl)
return -ENOMEM;
- fujitsu->keycode1 = KEY_PROG1;
- fujitsu->keycode2 = KEY_PROG2;
- fujitsu->keycode3 = KEY_PROG3;
- fujitsu->keycode4 = KEY_PROG4;
- fujitsu->keycode5 = KEY_RFKILL;
+ fujitsu_bl->keycode1 = KEY_PROG1;
+ fujitsu_bl->keycode2 = KEY_PROG2;
+ fujitsu_bl->keycode3 = KEY_PROG3;
+ fujitsu_bl->keycode4 = KEY_PROG4;
+ fujitsu_bl->keycode5 = KEY_RFKILL;
dmi_check_system(fujitsu_dmi_table);
- result = acpi_bus_register_driver(&acpi_fujitsu_driver);
- if (result < 0) {
- ret = -ENODEV;
+ ret = acpi_bus_register_driver(&acpi_fujitsu_bl_driver);
+ if (ret)
goto fail_acpi;
- }
/* Register platform stuff */
- fujitsu->pf_device = platform_device_alloc("fujitsu-laptop", -1);
- if (!fujitsu->pf_device) {
+ fujitsu_bl->pf_device = platform_device_alloc("fujitsu-laptop", -1);
+ if (!fujitsu_bl->pf_device) {
ret = -ENOMEM;
goto fail_platform_driver;
}
- ret = platform_device_add(fujitsu->pf_device);
+ ret = platform_device_add(fujitsu_bl->pf_device);
if (ret)
goto fail_platform_device1;
ret =
- sysfs_create_group(&fujitsu->pf_device->dev.kobj,
- &fujitsupf_attribute_group);
+ sysfs_create_group(&fujitsu_bl->pf_device->dev.kobj,
+ &fujitsu_pf_attribute_group);
if (ret)
goto fail_platform_device2;
@@ -1236,90 +1235,88 @@ static int __init fujitsu_init(void)
struct backlight_properties props;
memset(&props, 0, sizeof(struct backlight_properties));
- max_brightness = fujitsu->max_brightness;
+ max_brightness = fujitsu_bl->max_brightness;
props.type = BACKLIGHT_PLATFORM;
props.max_brightness = max_brightness - 1;
- fujitsu->bl_device = backlight_device_register("fujitsu-laptop",
- NULL, NULL,
- &fujitsubl_ops,
- &props);
- if (IS_ERR(fujitsu->bl_device)) {
- ret = PTR_ERR(fujitsu->bl_device);
- fujitsu->bl_device = NULL;
+ fujitsu_bl->bl_device = backlight_device_register("fujitsu-laptop",
+ NULL, NULL,
+ &fujitsu_bl_ops,
+ &props);
+ if (IS_ERR(fujitsu_bl->bl_device)) {
+ ret = PTR_ERR(fujitsu_bl->bl_device);
+ fujitsu_bl->bl_device = NULL;
goto fail_sysfs_group;
}
- fujitsu->bl_device->props.brightness = fujitsu->brightness_level;
+ fujitsu_bl->bl_device->props.brightness = fujitsu_bl->brightness_level;
}
- ret = platform_driver_register(&fujitsupf_driver);
+ ret = platform_driver_register(&fujitsu_pf_driver);
if (ret)
goto fail_backlight;
- /* Register hotkey driver */
+ /* Register laptop driver */
- fujitsu_hotkey = kzalloc(sizeof(struct fujitsu_hotkey_t), GFP_KERNEL);
- if (!fujitsu_hotkey) {
+ fujitsu_laptop = kzalloc(sizeof(struct fujitsu_laptop), GFP_KERNEL);
+ if (!fujitsu_laptop) {
ret = -ENOMEM;
- goto fail_hotkey;
+ goto fail_laptop;
}
- result = acpi_bus_register_driver(&acpi_fujitsu_hotkey_driver);
- if (result < 0) {
- ret = -ENODEV;
- goto fail_hotkey1;
- }
+ ret = acpi_bus_register_driver(&acpi_fujitsu_laptop_driver);
+ if (ret)
+ goto fail_laptop1;
/* Sync backlight power status (needs FUJ02E3 device, hence deferred) */
if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
if (call_fext_func(FUNC_BACKLIGHT, 0x2, 0x4, 0x0) == 3)
- fujitsu->bl_device->props.power = FB_BLANK_POWERDOWN;
+ fujitsu_bl->bl_device->props.power = FB_BLANK_POWERDOWN;
else
- fujitsu->bl_device->props.power = FB_BLANK_UNBLANK;
+ fujitsu_bl->bl_device->props.power = FB_BLANK_UNBLANK;
}
pr_info("driver " FUJITSU_DRIVER_VERSION " successfully loaded\n");
return 0;
-fail_hotkey1:
- kfree(fujitsu_hotkey);
-fail_hotkey:
- platform_driver_unregister(&fujitsupf_driver);
+fail_laptop1:
+ kfree(fujitsu_laptop);
+fail_laptop:
+ platform_driver_unregister(&fujitsu_pf_driver);
fail_backlight:
- backlight_device_unregister(fujitsu->bl_device);
+ backlight_device_unregister(fujitsu_bl->bl_device);
fail_sysfs_group:
- sysfs_remove_group(&fujitsu->pf_device->dev.kobj,
- &fujitsupf_attribute_group);
+ sysfs_remove_group(&fujitsu_bl->pf_device->dev.kobj,
+ &fujitsu_pf_attribute_group);
fail_platform_device2:
- platform_device_del(fujitsu->pf_device);
+ platform_device_del(fujitsu_bl->pf_device);
fail_platform_device1:
- platform_device_put(fujitsu->pf_device);
+ platform_device_put(fujitsu_bl->pf_device);
fail_platform_driver:
- acpi_bus_unregister_driver(&acpi_fujitsu_driver);
+ acpi_bus_unregister_driver(&acpi_fujitsu_bl_driver);
fail_acpi:
- kfree(fujitsu);
+ kfree(fujitsu_bl);
return ret;
}
static void __exit fujitsu_cleanup(void)
{
- acpi_bus_unregister_driver(&acpi_fujitsu_hotkey_driver);
+ acpi_bus_unregister_driver(&acpi_fujitsu_laptop_driver);
- kfree(fujitsu_hotkey);
+ kfree(fujitsu_laptop);
- platform_driver_unregister(&fujitsupf_driver);
+ platform_driver_unregister(&fujitsu_pf_driver);
- backlight_device_unregister(fujitsu->bl_device);
+ backlight_device_unregister(fujitsu_bl->bl_device);
- sysfs_remove_group(&fujitsu->pf_device->dev.kobj,
- &fujitsupf_attribute_group);
+ sysfs_remove_group(&fujitsu_bl->pf_device->dev.kobj,
+ &fujitsu_pf_attribute_group);
- platform_device_unregister(fujitsu->pf_device);
+ platform_device_unregister(fujitsu_bl->pf_device);
- acpi_bus_unregister_driver(&acpi_fujitsu_driver);
+ acpi_bus_unregister_driver(&acpi_fujitsu_bl_driver);
- kfree(fujitsu);
+ kfree(fujitsu_bl);
pr_info("driver unloaded\n");
}
@@ -1341,7 +1338,3 @@ MODULE_AUTHOR("Jonathan Woithe, Peter Gruber, Tony Vroon");
MODULE_DESCRIPTION("Fujitsu laptop extras support");
MODULE_VERSION(FUJITSU_DRIVER_VERSION);
MODULE_LICENSE("GPL");
-
-MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1D3:*:cvrS6410:*");
-MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1E6:*:cvrS6420:*");
-MODULE_ALIAS("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*");
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 230043c1c90f..4bf55b5d78be 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1241,19 +1241,32 @@ config SCSI_LPFC
tristate "Emulex LightPulse Fibre Channel Support"
depends on PCI && SCSI
depends on SCSI_FC_ATTRS
- depends on NVME_FC && NVME_TARGET_FC
select CRC_T10DIF
- help
+ ---help---
This lpfc driver supports the Emulex LightPulse
Family of Fibre Channel PCI host adapters.
config SCSI_LPFC_DEBUG_FS
bool "Emulex LightPulse Fibre Channel debugfs Support"
depends on SCSI_LPFC && DEBUG_FS
- help
+ ---help---
This makes debugging information from the lpfc driver
available via the debugfs filesystem.
+config LPFC_NVME_INITIATOR
+ bool "Emulex LightPulse Fibre Channel NVME Initiator Support"
+ depends on SCSI_LPFC && NVME_FC
+ ---help---
+ This enables NVME Initiator support in the Emulex lpfc driver.
+
+config LPFC_NVME_TARGET
+ bool "Emulex LightPulse Fibre Channel NVME Initiator Support"
+ depends on SCSI_LPFC && NVME_TARGET_FC
+ ---help---
+ This enables NVME Target support in the Emulex lpfc driver.
+ Target enablement must still be enabled on a per adapter
+ basis by module parameters.
+
config SCSI_SIM710
tristate "Simple 53c710 SCSI support (Compaq, NCR machines)"
depends on (EISA || MCA) && SCSI
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 2e5338dec621..7b0410e0f569 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -468,7 +468,7 @@ err_out:
return -1;
err_blink:
- return (status > 16) & 0xFF;
+ return (status >> 16) & 0xFF;
}
static inline u32 aac_get_vector(struct aac_dev *dev)
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 109e2c99e6c1..95d8f25cbcca 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -6278,7 +6278,7 @@ ahd_reset(struct ahd_softc *ahd, int reinit)
* does not disable its parity logic prior to
* the start of the reset. This may cause a
* parity error to be detected and thus a
- * spurious SERR or PERR assertion. Disble
+ * spurious SERR or PERR assertion. Disable
* PERR and SERR responses during the CHIPRST.
*/
mod_cmd = cmd & ~(PCIM_CMD_PERRESPEN|PCIM_CMD_SERRESPEN);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 07c08ce68d70..894b1e3ebd56 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -561,8 +561,12 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)
WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
task->state = state;
- if (!list_empty(&task->running))
+ spin_lock_bh(&conn->taskqueuelock);
+ if (!list_empty(&task->running)) {
+ pr_debug_once("%s while task on list", __func__);
list_del_init(&task->running);
+ }
+ spin_unlock_bh(&conn->taskqueuelock);
if (conn->task == task)
conn->task = NULL;
@@ -784,7 +788,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
if (session->tt->xmit_task(task))
goto free_task;
} else {
+ spin_lock_bh(&conn->taskqueuelock);
list_add_tail(&task->running, &conn->mgmtqueue);
+ spin_unlock_bh(&conn->taskqueuelock);
iscsi_conn_queue_work(conn);
}
@@ -1475,8 +1481,10 @@ void iscsi_requeue_task(struct iscsi_task *task)
* this may be on the requeue list already if the xmit_task callout
* is handling the r2ts while we are adding new ones
*/
+ spin_lock_bh(&conn->taskqueuelock);
if (list_empty(&task->running))
list_add_tail(&task->running, &conn->requeue);
+ spin_unlock_bh(&conn->taskqueuelock);
iscsi_conn_queue_work(conn);
}
EXPORT_SYMBOL_GPL(iscsi_requeue_task);
@@ -1513,22 +1521,26 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
* only have one nop-out as a ping from us and targets should not
* overflow us with nop-ins
*/
+ spin_lock_bh(&conn->taskqueuelock);
check_mgmt:
while (!list_empty(&conn->mgmtqueue)) {
conn->task = list_entry(conn->mgmtqueue.next,
struct iscsi_task, running);
list_del_init(&conn->task->running);
+ spin_unlock_bh(&conn->taskqueuelock);
if (iscsi_prep_mgmt_task(conn, conn->task)) {
/* regular RX path uses back_lock */
spin_lock_bh(&conn->session->back_lock);
__iscsi_put_task(conn->task);
spin_unlock_bh(&conn->session->back_lock);
conn->task = NULL;
+ spin_lock_bh(&conn->taskqueuelock);
continue;
}
rc = iscsi_xmit_task(conn);
if (rc)
goto done;
+ spin_lock_bh(&conn->taskqueuelock);
}
/* process pending command queue */
@@ -1536,19 +1548,24 @@ check_mgmt:
conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task,
running);
list_del_init(&conn->task->running);
+ spin_unlock_bh(&conn->taskqueuelock);
if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
fail_scsi_task(conn->task, DID_IMM_RETRY);
+ spin_lock_bh(&conn->taskqueuelock);
continue;
}
rc = iscsi_prep_scsi_cmd_pdu(conn->task);
if (rc) {
if (rc == -ENOMEM || rc == -EACCES) {
+ spin_lock_bh(&conn->taskqueuelock);
list_add_tail(&conn->task->running,
&conn->cmdqueue);
conn->task = NULL;
+ spin_unlock_bh(&conn->taskqueuelock);
goto done;
} else
fail_scsi_task(conn->task, DID_ABORT);
+ spin_lock_bh(&conn->taskqueuelock);
continue;
}
rc = iscsi_xmit_task(conn);
@@ -1559,6 +1576,7 @@ check_mgmt:
* we need to check the mgmt queue for nops that need to
* be sent to aviod starvation
*/
+ spin_lock_bh(&conn->taskqueuelock);
if (!list_empty(&conn->mgmtqueue))
goto check_mgmt;
}
@@ -1578,12 +1596,15 @@ check_mgmt:
conn->task = task;
list_del_init(&conn->task->running);
conn->task->state = ISCSI_TASK_RUNNING;
+ spin_unlock_bh(&conn->taskqueuelock);
rc = iscsi_xmit_task(conn);
if (rc)
goto done;
+ spin_lock_bh(&conn->taskqueuelock);
if (!list_empty(&conn->mgmtqueue))
goto check_mgmt;
}
+ spin_unlock_bh(&conn->taskqueuelock);
spin_unlock_bh(&conn->session->frwd_lock);
return -ENODATA;
@@ -1739,7 +1760,9 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
goto prepd_reject;
}
} else {
+ spin_lock_bh(&conn->taskqueuelock);
list_add_tail(&task->running, &conn->cmdqueue);
+ spin_unlock_bh(&conn->taskqueuelock);
iscsi_conn_queue_work(conn);
}
@@ -2897,6 +2920,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
INIT_LIST_HEAD(&conn->mgmtqueue);
INIT_LIST_HEAD(&conn->cmdqueue);
INIT_LIST_HEAD(&conn->requeue);
+ spin_lock_init(&conn->taskqueuelock);
INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
/* allocate login_task used for the login/text sequences */
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 0bba2e30b4f0..257bbdd0f0b8 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -99,12 +99,13 @@ struct lpfc_sli2_slim;
#define FC_MAX_ADPTMSG 64
#define MAX_HBAEVT 32
+#define MAX_HBAS_NO_RESET 16
/* Number of MSI-X vectors the driver uses */
#define LPFC_MSIX_VECTORS 2
/* lpfc wait event data ready flag */
-#define LPFC_DATA_READY (1<<0)
+#define LPFC_DATA_READY 0 /* bit 0 */
/* queue dump line buffer size */
#define LPFC_LBUF_SZ 128
@@ -692,6 +693,7 @@ struct lpfc_hba {
* capability
*/
#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */
+#define NVME_XRI_ABORT_EVENT 0x100000
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 5c783ef7f260..5c3be3e6f5e2 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3010,6 +3010,12 @@ MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
lpfc_poll_show, lpfc_poll_store);
+int lpfc_no_hba_reset_cnt;
+unsigned long lpfc_no_hba_reset[MAX_HBAS_NO_RESET] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+module_param_array(lpfc_no_hba_reset, ulong, &lpfc_no_hba_reset_cnt, 0444);
+MODULE_PARM_DESC(lpfc_no_hba_reset, "WWPN of HBAs that should not be reset");
+
LPFC_ATTR(sli_mode, 0, 0, 3,
"SLI mode selector:"
" 0 - auto (SLI-3 if supported),"
@@ -4451,7 +4457,8 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
phba->cfg_fcp_imax = (uint32_t)val;
- for (i = 0; i < phba->io_channel_irqs; i++)
+
+ for (i = 0; i < phba->io_channel_irqs; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
lpfc_modify_hba_eq_delay(phba, i);
return strlen(buf);
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 843dd73004da..54e6ac42fbcd 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -384,7 +384,7 @@ void lpfc_free_sysfs_attr(struct lpfc_vport *);
extern struct device_attribute *lpfc_hba_attrs[];
extern struct device_attribute *lpfc_vport_attrs[];
extern struct scsi_host_template lpfc_template;
-extern struct scsi_host_template lpfc_template_s3;
+extern struct scsi_host_template lpfc_template_no_hr;
extern struct scsi_host_template lpfc_template_nvme;
extern struct scsi_host_template lpfc_vport_template;
extern struct fc_function_template lpfc_transport_functions;
@@ -554,3 +554,5 @@ void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba,
struct lpfc_wcqe_complete *abts_cmpl);
extern int lpfc_enable_nvmet_cnt;
extern unsigned long long lpfc_enable_nvmet[];
+extern int lpfc_no_hba_reset_cnt;
+extern unsigned long lpfc_no_hba_reset[];
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index c22bb3f887e1..d3e9af983015 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -939,8 +939,8 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
"FC4 x%08x, Data: x%08x x%08x\n",
ndlp, did, ndlp->nlp_fc4_type,
FC_TYPE_FCP, FC_TYPE_NVME);
+ ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
}
- ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
lpfc_issue_els_prli(vport, ndlp, 0);
} else
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 9f4798e9d938..913eed822cb8 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -3653,17 +3653,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
idiag.ptr_private = phba->sli4_hba.nvmels_cq;
goto pass_check;
}
- /* NVME LS complete queue */
- if (phba->sli4_hba.nvmels_cq &&
- phba->sli4_hba.nvmels_cq->queue_id == queid) {
- /* Sanity check */
- rc = lpfc_idiag_que_param_check(
- phba->sli4_hba.nvmels_cq, index, count);
- if (rc)
- goto error_out;
- idiag.ptr_private = phba->sli4_hba.nvmels_cq;
- goto pass_check;
- }
/* FCP complete queue */
if (phba->sli4_hba.fcp_cq) {
for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
@@ -3738,17 +3727,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
idiag.ptr_private = phba->sli4_hba.nvmels_wq;
goto pass_check;
}
- /* NVME LS work queue */
- if (phba->sli4_hba.nvmels_wq &&
- phba->sli4_hba.nvmels_wq->queue_id == queid) {
- /* Sanity check */
- rc = lpfc_idiag_que_param_check(
- phba->sli4_hba.nvmels_wq, index, count);
- if (rc)
- goto error_out;
- idiag.ptr_private = phba->sli4_hba.nvmels_wq;
- goto pass_check;
- }
/* FCP work queue */
if (phba->sli4_hba.fcp_wq) {
for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 2d26440e6f2f..d9c61d030034 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -5177,15 +5177,15 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
static uint32_t
lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
- struct lpfc_hba *phba)
+ struct lpfc_vport *vport)
{
desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
- memcpy(desc->port_names.wwnn, phba->wwnn,
+ memcpy(desc->port_names.wwnn, &vport->fc_nodename,
sizeof(desc->port_names.wwnn));
- memcpy(desc->port_names.wwpn, phba->wwpn,
+ memcpy(desc->port_names.wwpn, &vport->fc_portname,
sizeof(desc->port_names.wwpn));
desc->length = cpu_to_be32(sizeof(desc->port_names));
@@ -5279,7 +5279,7 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *)
(len + pcmd), &rdp_context->link_stat);
len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *)
- (len + pcmd), phba);
+ (len + pcmd), vport);
len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *)
(len + pcmd), vport, ndlp);
len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd),
@@ -8371,11 +8371,17 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
spin_unlock_irq(shost->host_lock);
- if (vport->port_type == LPFC_PHYSICAL_PORT
- && !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
- lpfc_issue_init_vfi(vport);
- else
+ if (mb->mbxStatus == MBX_NOT_FINISHED)
+ break;
+ if ((vport->port_type == LPFC_PHYSICAL_PORT) &&
+ !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) {
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_issue_init_vfi(vport);
+ else
+ lpfc_initial_flogi(vport);
+ } else {
lpfc_initial_fdisc(vport);
+ }
break;
}
} else {
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 194a14d5f8a9..180b072beef6 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -313,8 +313,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
ndlp->nlp_state, ndlp->nlp_rpi);
}
- if (!(vport->load_flag & FC_UNLOADING) &&
- !(ndlp->nlp_flag & NLP_DELAY_TMO) &&
+ if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
!(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
(ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
(ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&
@@ -641,6 +640,8 @@ lpfc_work_done(struct lpfc_hba *phba)
lpfc_handle_rrq_active(phba);
if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
lpfc_sli4_fcp_xri_abort_event_proc(phba);
+ if (phba->hba_flag & NVME_XRI_ABORT_EVENT)
+ lpfc_sli4_nvme_xri_abort_event_proc(phba);
if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
lpfc_sli4_els_xri_abort_event_proc(phba);
if (phba->hba_flag & ASYNC_EVENT)
@@ -2173,7 +2174,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
uint32_t boot_flag, addr_mode;
uint16_t fcf_index, next_fcf_index;
struct lpfc_fcf_rec *fcf_rec = NULL;
- uint16_t vlan_id;
+ uint16_t vlan_id = LPFC_FCOE_NULL_VID;
bool select_new_fcf;
int rc;
@@ -4020,9 +4021,11 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rdata = rport->dd_data;
/* break the link before dropping the ref */
ndlp->rport = NULL;
- if (rdata && rdata->pnode == ndlp)
- lpfc_nlp_put(ndlp);
- rdata->pnode = NULL;
+ if (rdata) {
+ if (rdata->pnode == ndlp)
+ lpfc_nlp_put(ndlp);
+ rdata->pnode = NULL;
+ }
/* drop reference for earlier registeration */
put_device(&rport->dev);
}
@@ -4344,9 +4347,8 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
- init_timer(&ndlp->nlp_delayfunc);
- ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
- ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
+ setup_timer(&ndlp->nlp_delayfunc, lpfc_els_retry_delay,
+ (unsigned long)ndlp);
ndlp->nlp_DID = did;
ndlp->vport = vport;
ndlp->phba = vport->phba;
@@ -4606,9 +4608,9 @@ lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba *phba,
pring = qp->pring;
if (!pring)
continue;
- spin_lock_irq(&pring->ring_lock);
+ spin_lock(&pring->ring_lock);
__lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list);
- spin_unlock_irq(&pring->ring_lock);
+ spin_unlock(&pring->ring_lock);
}
spin_unlock_irq(&phba->hbalock);
}
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index cfdb068a3bfc..15277705cb6b 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1001,7 +1001,7 @@ struct eq_delay_info {
uint32_t phase;
uint32_t delay_multi;
};
-#define LPFC_MAX_EQ_DELAY 8
+#define LPFC_MAX_EQ_DELAY_EQID_CNT 8
struct sgl_page_pairs {
uint32_t sgl_pg0_addr_lo;
@@ -1070,7 +1070,7 @@ struct lpfc_mbx_modify_eq_delay {
union {
struct {
uint32_t num_eq;
- struct eq_delay_info eq[LPFC_MAX_EQ_DELAY];
+ struct eq_delay_info eq[LPFC_MAX_EQ_DELAY_EQID_CNT];
} request;
struct {
uint32_t word0;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 0ee429d773f3..2697d49da4d7 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3555,6 +3555,44 @@ out_free_mem:
return rc;
}
+static uint64_t
+lpfc_get_wwpn(struct lpfc_hba *phba)
+{
+ uint64_t wwn;
+ int rc;
+ LPFC_MBOXQ_t *mboxq;
+ MAILBOX_t *mb;
+
+
+ mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL);
+ if (!mboxq)
+ return (uint64_t)-1;
+
+ /* First get WWN of HBA instance */
+ lpfc_read_nv(phba, mboxq);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "6019 Mailbox failed , mbxCmd x%x "
+ "READ_NV, mbxStatus x%x\n",
+ bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+ bf_get(lpfc_mqe_status, &mboxq->u.mqe));
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return (uint64_t) -1;
+ }
+ mb = &mboxq->u.mb;
+ memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
+ /* wwn is WWPN of HBA instance */
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ return be64_to_cpu(wwn);
+ else
+ return (((wwn & 0xffffffff00000000) >> 32) |
+ ((wwn & 0x00000000ffffffff) << 32));
+
+}
+
/**
* lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping
* @phba: pointer to lpfc hba data structure.
@@ -3676,17 +3714,32 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
struct lpfc_vport *vport;
struct Scsi_Host *shost = NULL;
int error = 0;
+ int i;
+ uint64_t wwn;
+ bool use_no_reset_hba = false;
+
+ wwn = lpfc_get_wwpn(phba);
+
+ for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
+ if (wwn == lpfc_no_hba_reset[i]) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "6020 Setting use_no_reset port=%llx\n",
+ wwn);
+ use_no_reset_hba = true;
+ break;
+ }
+ }
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
if (dev != &phba->pcidev->dev) {
shost = scsi_host_alloc(&lpfc_vport_template,
sizeof(struct lpfc_vport));
} else {
- if (phba->sli_rev == LPFC_SLI_REV4)
+ if (!use_no_reset_hba)
shost = scsi_host_alloc(&lpfc_template,
sizeof(struct lpfc_vport));
else
- shost = scsi_host_alloc(&lpfc_template_s3,
+ shost = scsi_host_alloc(&lpfc_template_no_hr,
sizeof(struct lpfc_vport));
}
} else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
@@ -3734,17 +3787,14 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
INIT_LIST_HEAD(&vport->rcv_buffer_list);
spin_lock_init(&vport->work_port_lock);
- init_timer(&vport->fc_disctmo);
- vport->fc_disctmo.function = lpfc_disc_timeout;
- vport->fc_disctmo.data = (unsigned long)vport;
+ setup_timer(&vport->fc_disctmo, lpfc_disc_timeout,
+ (unsigned long)vport);
- init_timer(&vport->els_tmofunc);
- vport->els_tmofunc.function = lpfc_els_timeout;
- vport->els_tmofunc.data = (unsigned long)vport;
+ setup_timer(&vport->els_tmofunc, lpfc_els_timeout,
+ (unsigned long)vport);
- init_timer(&vport->delayed_disc_tmo);
- vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo;
- vport->delayed_disc_tmo.data = (unsigned long)vport;
+ setup_timer(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo,
+ (unsigned long)vport);
error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
if (error)
@@ -5406,21 +5456,15 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
INIT_LIST_HEAD(&phba->luns);
/* MBOX heartbeat timer */
- init_timer(&psli->mbox_tmo);
- psli->mbox_tmo.function = lpfc_mbox_timeout;
- psli->mbox_tmo.data = (unsigned long) phba;
+ setup_timer(&psli->mbox_tmo, lpfc_mbox_timeout, (unsigned long)phba);
/* Fabric block timer */
- init_timer(&phba->fabric_block_timer);
- phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
- phba->fabric_block_timer.data = (unsigned long) phba;
+ setup_timer(&phba->fabric_block_timer, lpfc_fabric_block_timeout,
+ (unsigned long)phba);
/* EA polling mode timer */
- init_timer(&phba->eratt_poll);
- phba->eratt_poll.function = lpfc_poll_eratt;
- phba->eratt_poll.data = (unsigned long) phba;
+ setup_timer(&phba->eratt_poll, lpfc_poll_eratt,
+ (unsigned long)phba);
/* Heartbeat timer */
- init_timer(&phba->hb_tmofunc);
- phba->hb_tmofunc.function = lpfc_hb_timeout;
- phba->hb_tmofunc.data = (unsigned long)phba;
+ setup_timer(&phba->hb_tmofunc, lpfc_hb_timeout, (unsigned long)phba);
return 0;
}
@@ -5446,9 +5490,8 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
*/
/* FCP polling mode timer */
- init_timer(&phba->fcp_poll_timer);
- phba->fcp_poll_timer.function = lpfc_poll_timeout;
- phba->fcp_poll_timer.data = (unsigned long) phba;
+ setup_timer(&phba->fcp_poll_timer, lpfc_poll_timeout,
+ (unsigned long)phba);
/* Host attention work mask setup */
phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
@@ -5482,7 +5525,8 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
/* Initialize the host templates the configured values. */
lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
- lpfc_template_s3.sg_tablesize = phba->cfg_sg_seg_cnt;
+ lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
+ lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
/* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
if (phba->cfg_enable_bg) {
@@ -5617,14 +5661,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
* Initialize timers used by driver
*/
- init_timer(&phba->rrq_tmr);
- phba->rrq_tmr.function = lpfc_rrq_timeout;
- phba->rrq_tmr.data = (unsigned long)phba;
+ setup_timer(&phba->rrq_tmr, lpfc_rrq_timeout, (unsigned long)phba);
/* FCF rediscover timer */
- init_timer(&phba->fcf.redisc_wait);
- phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
- phba->fcf.redisc_wait.data = (unsigned long)phba;
+ setup_timer(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo,
+ (unsigned long)phba);
/*
* Control structure for handling external multi-buffer mailbox
@@ -5706,6 +5747,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Initialize the host templates with the updated values. */
lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+ lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
@@ -5736,6 +5778,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
/* Initialize the Abort nvme buffer list used by driver */
spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
+ /* Fast-path XRI aborted CQ Event work queue list */
+ INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
}
/* This abort list used by worker thread */
@@ -8712,12 +8756,9 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
}
}
- /*
- * Configure EQ delay multipier for interrupt coalescing using
- * MODIFY_EQ_DELAY for all EQs created, LPFC_MAX_EQ_DELAY at a time.
- */
- for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY)
+ for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
lpfc_modify_hba_eq_delay(phba, qidx);
+
return 0;
out_destroy:
@@ -8973,6 +9014,11 @@ lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
/* Pending ELS XRI abort events */
list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
&cqelist);
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ /* Pending NVME XRI abort events */
+ list_splice_init(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
+ &cqelist);
+ }
/* Pending asynnc events */
list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
&cqelist);
@@ -10400,12 +10446,7 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
fc_remove_host(shost);
scsi_remove_host(shost);
- /* Perform ndlp cleanup on the physical port. The nvme and nvmet
- * localports are destroyed after to cleanup all transport memory.
- */
lpfc_cleanup(vport);
- lpfc_nvmet_destroy_targetport(phba);
- lpfc_nvme_destroy_localport(vport);
/*
* Bring down the SLI Layer. This step disable all interrupts,
@@ -12018,6 +12059,7 @@ static struct pci_driver lpfc_driver = {
.id_table = lpfc_id_table,
.probe = lpfc_pci_probe_one,
.remove = lpfc_pci_remove_one,
+ .shutdown = lpfc_pci_remove_one,
.suspend = lpfc_pci_suspend_one,
.resume = lpfc_pci_resume_one,
.err_handler = &lpfc_err_handler,
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index c61d8d692ede..5986c7957199 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -646,7 +646,6 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
}
dma_buf->iocbq = lpfc_sli_get_iocbq(phba);
- dma_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
if (!dma_buf->iocbq) {
kfree(dma_buf->context);
pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt,
@@ -658,6 +657,7 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
"2621 Ran out of nvmet iocb/WQEs\n");
return NULL;
}
+ dma_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
nvmewqe = dma_buf->iocbq;
wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
/* Initialize WQE */
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 609a908ea9db..0a4c19081409 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -316,7 +316,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
- bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL);
+ bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ);
bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
/* Word 6 */
@@ -620,15 +620,15 @@ lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
* Embed the payload in the last half of the WQE
* WQE words 16-30 get the NVME CMD IU payload
*
- * WQE Word 16 is already setup with flags
- * WQE words 17-19 get payload Words 2-4
+ * WQE words 16-19 get payload Words 1-4
* WQE words 20-21 get payload Words 6-7
* WQE words 22-29 get payload Words 16-23
*/
- wptr = &wqe->words[17]; /* WQE ptr */
+ wptr = &wqe->words[16]; /* WQE ptr */
dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */
- dptr += 2; /* Skip Words 0-1 in payload */
+ dptr++; /* Skip Word 0 in payload */
+ *wptr++ = *dptr++; /* Word 1 */
*wptr++ = *dptr++; /* Word 2 */
*wptr++ = *dptr++; /* Word 3 */
*wptr++ = *dptr++; /* Word 4 */
@@ -978,9 +978,6 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
NVME_WRITE_CMD);
- /* Word 16 */
- wqe->words[16] = LPFC_NVME_EMBED_WRITE;
-
phba->fc4NvmeOutputRequests++;
} else {
/* Word 7 */
@@ -1002,9 +999,6 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
NVME_READ_CMD);
- /* Word 16 */
- wqe->words[16] = LPFC_NVME_EMBED_READ;
-
phba->fc4NvmeInputRequests++;
}
} else {
@@ -1026,9 +1020,6 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
/* Word 11 */
bf_set(wqe_cmd_type, &wqe->generic.wqe_com, NVME_READ_CMD);
- /* Word 16 */
- wqe->words[16] = LPFC_NVME_EMBED_CMD;
-
phba->fc4NvmeControlRequests++;
}
/*
@@ -1286,6 +1277,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
pnvme_fcreq->private = (void *)lpfc_ncmd;
lpfc_ncmd->nvmeCmd = pnvme_fcreq;
lpfc_ncmd->nrport = rport;
+ lpfc_ncmd->ndlp = ndlp;
lpfc_ncmd->start_time = jiffies;
lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp);
@@ -1319,7 +1311,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
"sid: x%x did: x%x oxid: x%x\n",
ret, vport->fc_myDID, ndlp->nlp_DID,
lpfc_ncmd->cur_iocbq.sli4_xritag);
- ret = -EINVAL;
+ ret = -EBUSY;
goto out_free_nvme_buf;
}
@@ -1821,10 +1813,10 @@ lpfc_post_nvme_sgl_list(struct lpfc_hba *phba,
pdma_phys_sgl1, cur_xritag);
if (status) {
/* failure, put on abort nvme list */
- lpfc_ncmd->exch_busy = 1;
+ lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
} else {
/* success, put on NVME buffer list */
- lpfc_ncmd->exch_busy = 0;
+ lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
lpfc_ncmd->status = IOSTAT_SUCCESS;
num_posted++;
}
@@ -1854,10 +1846,10 @@ lpfc_post_nvme_sgl_list(struct lpfc_hba *phba,
struct lpfc_nvme_buf, list);
if (status) {
/* failure, put on abort nvme list */
- lpfc_ncmd->exch_busy = 1;
+ lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
} else {
/* success, put on NVME buffer list */
- lpfc_ncmd->exch_busy = 0;
+ lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
lpfc_ncmd->status = IOSTAT_SUCCESS;
num_posted++;
}
@@ -2099,7 +2091,7 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
unsigned long iflag = 0;
lpfc_ncmd->nonsg_phys = 0;
- if (lpfc_ncmd->exch_busy) {
+ if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock,
iflag);
lpfc_ncmd->nvmeCmd = NULL;
@@ -2135,11 +2127,12 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
int
lpfc_nvme_create_localport(struct lpfc_vport *vport)
{
+ int ret = 0;
struct lpfc_hba *phba = vport->phba;
struct nvme_fc_port_info nfcp_info;
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
- int len, ret = 0;
+ int len;
/* Initialize this localport instance. The vport wwn usage ensures
* that NPIV is accounted for.
@@ -2156,8 +2149,12 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
/* localport is allocated from the stack, but the registration
* call allocates heap memory as well as the private area.
*/
+#ifdef CONFIG_LPFC_NVME_INITIATOR
ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
&vport->phba->pcidev->dev, &localport);
+#else
+ ret = -ENOMEM;
+#endif
if (!ret) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
"6005 Successfully registered local "
@@ -2173,10 +2170,10 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
lport->vport = vport;
INIT_LIST_HEAD(&lport->rport_list);
vport->nvmei_support = 1;
+ len = lpfc_new_nvme_buf(vport, phba->sli4_hba.nvme_xri_max);
+ vport->phba->total_nvme_bufs += len;
}
- len = lpfc_new_nvme_buf(vport, phba->sli4_hba.nvme_xri_max);
- vport->phba->total_nvme_bufs += len;
return ret;
}
@@ -2193,6 +2190,7 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
void
lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
{
+#ifdef CONFIG_LPFC_NVME_INITIATOR
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL;
@@ -2208,7 +2206,6 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
"6011 Destroying NVME localport %p\n",
localport);
-
list_for_each_entry_safe(rport, rport_next, &lport->rport_list, list) {
/* The last node ref has to get released now before the rport
* private memory area is released by the transport.
@@ -2222,6 +2219,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
"6008 rport fail destroy %x\n", ret);
wait_for_completion_timeout(&rport->rport_unreg_done, 5);
}
+
/* lport's rport list is clear. Unregister
* lport and release resources.
*/
@@ -2245,6 +2243,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
"Failed, status x%x\n",
ret);
}
+#endif
}
void
@@ -2275,6 +2274,7 @@ lpfc_nvme_update_localport(struct lpfc_vport *vport)
int
lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
+#ifdef CONFIG_LPFC_NVME_INITIATOR
int ret = 0;
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
@@ -2348,7 +2348,6 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
-
ret = nvme_fc_register_remoteport(localport, &rpinfo,
&remote_port);
if (!ret) {
@@ -2384,6 +2383,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
ndlp->nlp_type, ndlp->nlp_DID, ndlp);
}
return ret;
+#else
+ return 0;
+#endif
}
/* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
@@ -2401,6 +2403,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
void
lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
{
+#ifdef CONFIG_LPFC_NVME_INITIATOR
int ret;
struct nvme_fc_local_port *localport;
struct lpfc_nvme_lport *lport;
@@ -2458,7 +2461,61 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
return;
input_err:
+#endif
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
"6168: State error: lport %p, rport%p FCID x%06x\n",
vport->localport, ndlp->rport, ndlp->nlp_DID);
}
+
+/**
+ * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
+ * @phba: pointer to lpfc hba data structure.
+ * @axri: pointer to the fcp xri abort wcqe structure.
+ *
+ * This routine is invoked by the worker thread to process a SLI4 fast-path
+ * FCP aborted xri.
+ **/
+void
+lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
+ struct sli4_wcqe_xri_aborted *axri)
+{
+ uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
+ uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
+ struct lpfc_nvme_buf *lpfc_ncmd, *next_lpfc_ncmd;
+ struct lpfc_nodelist *ndlp;
+ unsigned long iflag = 0;
+ int rrq_empty = 0;
+
+ if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
+ return;
+ spin_lock_irqsave(&phba->hbalock, iflag);
+ spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd,
+ &phba->sli4_hba.lpfc_abts_nvme_buf_list,
+ list) {
+ if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) {
+ list_del(&lpfc_ncmd->list);
+ lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
+ lpfc_ncmd->status = IOSTAT_SUCCESS;
+ spin_unlock(
+ &phba->sli4_hba.abts_nvme_buf_list_lock);
+
+ rrq_empty = list_empty(&phba->active_rrq_list);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+ ndlp = lpfc_ncmd->ndlp;
+ if (ndlp) {
+ lpfc_set_rrq_active(
+ phba, ndlp,
+ lpfc_ncmd->cur_iocbq.sli4_lxritag,
+ rxid, 1);
+ lpfc_sli4_abts_err_handler(phba, ndlp, axri);
+ }
+ lpfc_release_nvme_buf(phba, lpfc_ncmd);
+ if (rrq_empty)
+ lpfc_worker_wake_up(phba);
+ return;
+ }
+ }
+ spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
+ spin_unlock_irqrestore(&phba->hbalock, iflag);
+}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index b2fae5e813f8..1347deb8dd6c 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -57,6 +57,7 @@ struct lpfc_nvme_buf {
struct list_head list;
struct nvmefc_fcp_req *nvmeCmd;
struct lpfc_nvme_rport *nrport;
+ struct lpfc_nodelist *ndlp;
uint32_t timeout;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index c421e1738ee9..b7739a554fe0 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -571,6 +571,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6102 Bad state IO x%x aborted\n",
ctxp->oxid);
+ rc = -ENXIO;
goto aerr;
}
@@ -580,6 +581,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
"6152 FCP Drop IO x%x: Prep\n",
ctxp->oxid);
+ rc = -ENXIO;
goto aerr;
}
@@ -618,8 +620,9 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
ctxp->wqeq->hba_wqidx = 0;
nvmewqeq->context2 = NULL;
nvmewqeq->context3 = NULL;
+ rc = -EBUSY;
aerr:
- return -ENXIO;
+ return rc;
}
static void
@@ -668,9 +671,13 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP |
NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED;
+#ifdef CONFIG_LPFC_NVME_TARGET
error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
&phba->pcidev->dev,
&phba->targetport);
+#else
+ error = -ENOMEM;
+#endif
if (error) {
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
"6025 Cannot register NVME targetport "
@@ -731,9 +738,25 @@ lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
return 0;
}
+/**
+ * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
+ * @phba: pointer to lpfc hba data structure.
+ * @axri: pointer to the nvmet xri abort wcqe structure.
+ *
+ * This routine is invoked by the worker thread to process a SLI4 fast-path
+ * NVMET aborted xri.
+ **/
+void
+lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
+ struct sli4_wcqe_xri_aborted *axri)
+{
+ /* TODO: work in progress */
+}
+
void
lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
{
+#ifdef CONFIG_LPFC_NVME_TARGET
struct lpfc_nvmet_tgtport *tgtp;
if (phba->nvmet_support == 0)
@@ -745,6 +768,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
}
phba->targetport = NULL;
+#endif
}
/**
@@ -764,6 +788,7 @@ static void
lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct hbq_dmabuf *nvmebuf)
{
+#ifdef CONFIG_LPFC_NVME_TARGET
struct lpfc_nvmet_tgtport *tgtp;
struct fc_frame_header *fc_hdr;
struct lpfc_nvmet_rcv_ctx *ctxp;
@@ -844,6 +869,7 @@ dropit:
atomic_inc(&tgtp->xmt_ls_abort);
lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
+#endif
}
/**
@@ -865,6 +891,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
struct rqb_dmabuf *nvmebuf,
uint64_t isr_timestamp)
{
+#ifdef CONFIG_LPFC_NVME_TARGET
struct lpfc_nvmet_rcv_ctx *ctxp;
struct lpfc_nvmet_tgtport *tgtp;
struct fc_frame_header *fc_hdr;
@@ -955,7 +982,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
- "6159 FCP Drop IO x%x: nvmet_fc_rcv_fcp_req x%x\n",
+ "6159 FCP Drop IO x%x: err x%x\n",
ctxp->oxid, rc);
dropit:
lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
@@ -970,6 +997,7 @@ dropit:
/* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
}
+#endif
}
/**
@@ -1114,7 +1142,7 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
- bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_DD_SOL_CTL);
+ bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
/* Word 6 */
@@ -1445,7 +1473,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
case NVMET_FCOP_RSP:
/* Words 0 - 2 */
- sgel = &rsp->sg[0];
physaddr = rsp->rspdma;
wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
@@ -1681,8 +1708,8 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
struct lpfc_nodelist *ndlp;
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
- "6067 %s: Entrypoint: sid %x xri %x\n", __func__,
- sid, xri);
+ "6067 Abort: sid %x xri x%x/x%x\n",
+ sid, xri, ctxp->wqeq->sli4_xritag);
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
@@ -1693,7 +1720,7 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
atomic_inc(&tgtp->xmt_abort_rsp_error);
lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
"6134 Drop ABTS - wrong NDLP state x%x.\n",
- ndlp->nlp_state);
+ (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
/* No failure to an ABTS request. */
return 0;
@@ -1791,7 +1818,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
atomic_inc(&tgtp->xmt_abort_rsp_error);
lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
"6160 Drop ABTS - wrong NDLP state x%x.\n",
- ndlp->nlp_state);
+ (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
/* No failure to an ABTS request. */
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 9d6384af9fce..54fd0c81ceaf 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -5953,12 +5953,13 @@ struct scsi_host_template lpfc_template_nvme = {
.track_queue_depth = 0,
};
-struct scsi_host_template lpfc_template_s3 = {
+struct scsi_host_template lpfc_template_no_hr = {
.module = THIS_MODULE,
.name = LPFC_DRIVER_NAME,
.proc_name = LPFC_DRIVER_NAME,
.info = lpfc_info,
.queuecommand = lpfc_queuecommand,
+ .eh_timed_out = fc_eh_timed_out,
.eh_abort_handler = lpfc_abort_handler,
.eh_device_reset_handler = lpfc_device_reset_handler,
.eh_target_reset_handler = lpfc_target_reset_handler,
@@ -6015,7 +6016,6 @@ struct scsi_host_template lpfc_vport_template = {
.eh_abort_handler = lpfc_abort_handler,
.eh_device_reset_handler = lpfc_device_reset_handler,
.eh_target_reset_handler = lpfc_target_reset_handler,
- .eh_bus_reset_handler = lpfc_bus_reset_handler,
.slave_alloc = lpfc_slave_alloc,
.slave_configure = lpfc_slave_configure,
.slave_destroy = lpfc_slave_destroy,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index e43e5e23c24b..1c9fa45df7eb 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1,3 +1,4 @@
+
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
@@ -952,7 +953,7 @@ __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
start_sglq = sglq;
while (!found) {
if (!sglq)
- return NULL;
+ break;
if (ndlp && ndlp->active_rrqs_xri_bitmap &&
test_bit(sglq->sli4_lxritag,
ndlp->active_rrqs_xri_bitmap)) {
@@ -12213,6 +12214,41 @@ void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
}
/**
+ * lpfc_sli4_nvme_xri_abort_event_proc - Process nvme xri abort event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked by the worker thread to process all the pending
+ * SLI4 NVME abort XRI events.
+ **/
+void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba)
+{
+ struct lpfc_cq_event *cq_event;
+
+ /* First, declare the fcp xri abort event has been handled */
+ spin_lock_irq(&phba->hbalock);
+ phba->hba_flag &= ~NVME_XRI_ABORT_EVENT;
+ spin_unlock_irq(&phba->hbalock);
+ /* Now, handle all the fcp xri abort events */
+ while (!list_empty(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue)) {
+ /* Get the first event from the head of the event queue */
+ spin_lock_irq(&phba->hbalock);
+ list_remove_head(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
+ cq_event, struct lpfc_cq_event, list);
+ spin_unlock_irq(&phba->hbalock);
+ /* Notify aborted XRI for NVME work queue */
+ if (phba->nvmet_support) {
+ lpfc_sli4_nvmet_xri_aborted(phba,
+ &cq_event->cqe.wcqe_axri);
+ } else {
+ lpfc_sli4_nvme_xri_aborted(phba,
+ &cq_event->cqe.wcqe_axri);
+ }
+ /* Free the event processed back to the free pool */
+ lpfc_sli4_cq_event_release(phba, cq_event);
+ }
+}
+
+/**
* lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
* @phba: pointer to lpfc hba data structure.
*
@@ -12709,10 +12745,22 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
spin_unlock_irqrestore(&phba->hbalock, iflags);
workposted = true;
break;
+ case LPFC_NVME:
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ list_add_tail(&cq_event->list,
+ &phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
+ /* Set the nvme xri abort event flag */
+ phba->hba_flag |= NVME_XRI_ABORT_EVENT;
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ workposted = true;
+ break;
default:
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
- "0603 Invalid work queue CQE subtype (x%x)\n",
- cq->subtype);
+ "0603 Invalid CQ subtype %d: "
+ "%08x %08x %08x %08x\n",
+ cq->subtype, wcqe->word0, wcqe->parameter,
+ wcqe->word2, wcqe->word3);
+ lpfc_sli4_cq_event_release(phba, cq_event);
workposted = false;
break;
}
@@ -13827,6 +13875,8 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
* @startq: The starting FCP EQ to modify
*
* This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
+ * The command allows up to LPFC_MAX_EQ_DELAY_EQID_CNT EQ ID's to be
+ * updated in one mailbox command.
*
* The @phba struct is used to send mailbox command to HBA. The @startq
* is used to get the starting FCP EQ to change.
@@ -13879,7 +13929,7 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq)
eq_delay->u.request.eq[cnt].phase = 0;
eq_delay->u.request.eq[cnt].delay_multi = dmult;
cnt++;
- if (cnt >= LPFC_MAX_EQ_DELAY)
+ if (cnt >= LPFC_MAX_EQ_DELAY_EQID_CNT)
break;
}
eq_delay->u.request.num_eq = cnt;
@@ -15185,17 +15235,17 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
drq = drqp[idx];
cq = cqp[idx];
- if (hrq->entry_count != drq->entry_count) {
- status = -EINVAL;
- goto out;
- }
-
/* sanity check on queue memory */
if (!hrq || !drq || !cq) {
status = -ENODEV;
goto out;
}
+ if (hrq->entry_count != drq->entry_count) {
+ status = -EINVAL;
+ goto out;
+ }
+
if (idx == 0) {
bf_set(lpfc_mbx_rq_create_num_pages,
&rq_create->u.request,
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 91153c9f6d18..710458cf11d6 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -642,6 +642,7 @@ struct lpfc_sli4_hba {
struct list_head sp_asynce_work_queue;
struct list_head sp_fcp_xri_aborted_work_queue;
struct list_head sp_els_xri_aborted_work_queue;
+ struct list_head sp_nvme_xri_aborted_work_queue;
struct list_head sp_unsol_work_queue;
struct lpfc_sli4_link link_state;
struct lpfc_sli4_lnk_info lnk_info;
@@ -794,9 +795,14 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
+void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba);
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
struct sli4_wcqe_xri_aborted *);
+void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
+ struct sli4_wcqe_xri_aborted *axri);
+void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
+ struct sli4_wcqe_xri_aborted *axri);
void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
struct sli4_wcqe_xri_aborted *);
void lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 86c6c9b26b82..d4e95e28f4e3 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "11.2.0.7"
+#define LPFC_DRIVER_VERSION "11.2.0.10"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 7fe7e6ed595b..8981806fb13f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -1442,9 +1442,6 @@ void mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
u64 sas_address, u16 handle, u8 phy_number, u8 link_rate);
extern struct sas_function_template mpt3sas_transport_functions;
extern struct scsi_transport_template *mpt3sas_transport_template;
-extern int scsi_internal_device_block(struct scsi_device *sdev);
-extern int scsi_internal_device_unblock(struct scsi_device *sdev,
- enum scsi_device_state new_state);
/* trigger data externs */
void mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 46e866c36c8a..919ba2bb15f1 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -2859,7 +2859,7 @@ _scsih_internal_device_block(struct scsi_device *sdev,
sas_device_priv_data->sas_target->handle);
sas_device_priv_data->block = 1;
- r = scsi_internal_device_block(sdev);
+ r = scsi_internal_device_block(sdev, false);
if (r == -EINVAL)
sdev_printk(KERN_WARNING, sdev,
"device_block failed with return(%d) for handle(0x%04x)\n",
@@ -2895,7 +2895,7 @@ _scsih_internal_device_unblock(struct scsi_device *sdev,
"performing a block followed by an unblock\n",
r, sas_device_priv_data->sas_target->handle);
sas_device_priv_data->block = 1;
- r = scsi_internal_device_block(sdev);
+ r = scsi_internal_device_block(sdev, false);
if (r)
sdev_printk(KERN_WARNING, sdev, "retried device_block "
"failed with return(%d) for handle(0x%04x)\n",
@@ -4677,7 +4677,6 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
struct MPT3SAS_DEVICE *sas_device_priv_data;
u32 response_code = 0;
unsigned long flags;
- unsigned int sector_sz;
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
@@ -4742,20 +4741,6 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
}
xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
-
- /* In case of bogus fw or device, we could end up having
- * unaligned partial completion. We can force alignment here,
- * then scsi-ml does not need to handle this misbehavior.
- */
- sector_sz = scmd->device->sector_size;
- if (unlikely(!blk_rq_is_passthrough(scmd->request) && sector_sz &&
- xfer_cnt % sector_sz)) {
- sdev_printk(KERN_INFO, scmd->device,
- "unaligned partial completion avoided (xfer_cnt=%u, sector_sz=%u)\n",
- xfer_cnt, sector_sz);
- xfer_cnt = round_down(xfer_cnt, sector_sz);
- }
-
scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h
index 23bd70628a2f..7d173f48a81e 100644
--- a/drivers/scsi/qedf/qedf_dbg.h
+++ b/drivers/scsi/qedf/qedf_dbg.h
@@ -81,14 +81,17 @@ struct qedf_dbg_ctx {
#define QEDF_INFO(pdev, level, fmt, ...) \
qedf_dbg_info(pdev, __func__, __LINE__, level, fmt, \
## __VA_ARGS__)
-
-extern void qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
+__printf(4, 5)
+void qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
const char *fmt, ...);
-extern void qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
+__printf(4, 5)
+void qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
const char *, ...);
-extern void qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func,
+__printf(4, 5)
+void qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func,
u32 line, const char *, ...);
-extern void qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
+__printf(5, 6)
+void qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
u32 info, const char *fmt, ...);
/* GRC Dump related defines */
diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c
index 868d423380d1..ed58b9104f58 100644
--- a/drivers/scsi/qedf/qedf_fip.c
+++ b/drivers/scsi/qedf/qedf_fip.c
@@ -203,7 +203,7 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
case FIP_DT_MAC:
mp = (struct fip_mac_desc *)desc;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
- "fd_mac=%pM.\n", __func__, mp->fd_mac);
+ "fd_mac=%pM\n", mp->fd_mac);
ether_addr_copy(cvl_mac, mp->fd_mac);
break;
case FIP_DT_NAME:
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index ee0dcf9d3aba..46debe5034af 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -1342,7 +1342,7 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
} else {
refcount = kref_read(&io_req->refcount);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
- "%d:0:%d:%d xid=0x%0x op=0x%02x "
+ "%d:0:%d:%lld xid=0x%0x op=0x%02x "
"lba=%02x%02x%02x%02x cdb_status=%d "
"fcp_resid=0x%x refcount=%d.\n",
qedf->lport->host->host_no, sc_cmd->device->id,
@@ -1426,7 +1426,7 @@ void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
sc_cmd->result = result << 16;
refcount = kref_read(&io_req->refcount);
- QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%d: Completing "
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing "
"sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
"allowed=%d retries=%d refcount=%d.\n",
qedf->lport->host->host_no, sc_cmd->device->id,
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index d9d7a86b5f8b..8e2a160490e6 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -2456,8 +2456,8 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf)
}
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
- "BDQ PBL addr=0x%p dma=0x%llx.\n", qedf->bdq_pbl,
- qedf->bdq_pbl_dma);
+ "BDQ PBL addr=0x%p dma=%pad\n",
+ qedf->bdq_pbl, &qedf->bdq_pbl_dma);
/*
* Populate BDQ PBL with physical and virtual address of individual
diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c
index 955936274241..59417199bf36 100644
--- a/drivers/scsi/qedi/qedi_debugfs.c
+++ b/drivers/scsi/qedi/qedi_debugfs.c
@@ -14,7 +14,7 @@
#include <linux/debugfs.h>
#include <linux/module.h>
-int do_not_recover;
+int qedi_do_not_recover;
static struct dentry *qedi_dbg_root;
void
@@ -74,22 +74,22 @@ qedi_dbg_exit(void)
static ssize_t
qedi_dbg_do_not_recover_enable(struct qedi_dbg_ctx *qedi_dbg)
{
- if (!do_not_recover)
- do_not_recover = 1;
+ if (!qedi_do_not_recover)
+ qedi_do_not_recover = 1;
QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n",
- do_not_recover);
+ qedi_do_not_recover);
return 0;
}
static ssize_t
qedi_dbg_do_not_recover_disable(struct qedi_dbg_ctx *qedi_dbg)
{
- if (do_not_recover)
- do_not_recover = 0;
+ if (qedi_do_not_recover)
+ qedi_do_not_recover = 0;
QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n",
- do_not_recover);
+ qedi_do_not_recover);
return 0;
}
@@ -141,7 +141,7 @@ qedi_dbg_do_not_recover_cmd_read(struct file *filp, char __user *buffer,
if (*ppos)
return 0;
- cnt = sprintf(buffer, "do_not_recover=%d\n", do_not_recover);
+ cnt = sprintf(buffer, "do_not_recover=%d\n", qedi_do_not_recover);
cnt = min_t(int, count, cnt - *ppos);
*ppos += cnt;
return cnt;
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index c9f0ef4e11b3..2bce3efc66a4 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -1461,9 +1461,9 @@ static void qedi_tmf_work(struct work_struct *work)
get_itt(tmf_hdr->rtt), get_itt(ctask->itt), cmd->task_id,
qedi_conn->iscsi_conn_id);
- if (do_not_recover) {
+ if (qedi_do_not_recover) {
QEDI_ERR(&qedi->dbg_ctx, "DONT SEND CLEANUP/ABORT %d\n",
- do_not_recover);
+ qedi_do_not_recover);
goto abort_ret;
}
diff --git a/drivers/scsi/qedi/qedi_gbl.h b/drivers/scsi/qedi/qedi_gbl.h
index 8e488de88ece..63d793f46064 100644
--- a/drivers/scsi/qedi/qedi_gbl.h
+++ b/drivers/scsi/qedi/qedi_gbl.h
@@ -12,8 +12,14 @@
#include "qedi_iscsi.h"
+#ifdef CONFIG_DEBUG_FS
+extern int qedi_do_not_recover;
+#else
+#define qedi_do_not_recover (0)
+#endif
+
extern uint qedi_io_tracing;
-extern int do_not_recover;
+
extern struct scsi_host_template qedi_host_template;
extern struct iscsi_transport qedi_iscsi_transport;
extern const struct qed_iscsi_ops *qedi_ops;
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index b9f79d36142d..4cc474364c50 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -833,7 +833,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
return ERR_PTR(ret);
}
- if (do_not_recover) {
+ if (qedi_do_not_recover) {
ret = -ENOMEM;
return ERR_PTR(ret);
}
@@ -957,7 +957,7 @@ static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
struct qedi_endpoint *qedi_ep;
int ret = 0;
- if (do_not_recover)
+ if (qedi_do_not_recover)
return 1;
qedi_ep = ep->dd_data;
@@ -1025,7 +1025,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
}
if (test_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
- if (do_not_recover) {
+ if (qedi_do_not_recover) {
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"Do not recover cid=0x%x\n",
qedi_ep->iscsi_cid);
@@ -1039,7 +1039,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
}
}
- if (do_not_recover)
+ if (qedi_do_not_recover)
goto ep_exit_recover;
switch (qedi_ep->state) {
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 5eda21d903e9..8e3d92807cb8 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1805,7 +1805,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
*/
qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params);
- qedi_setup_int(qedi);
+ rc = qedi_setup_int(qedi);
if (rc)
goto stop_iscsi_func;
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index 67c0d5aa3212..de952935b5d2 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -3,6 +3,7 @@ config SCSI_QLA_FC
depends on PCI && SCSI
depends on SCSI_FC_ATTRS
select FW_LOADER
+ select BTREE
---help---
This qla2xxx driver supports all QLogic Fibre Channel
PCI and PCIe host adapters.
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index f610103994af..435ff7fd6384 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2154,8 +2154,6 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
"Timer for the VP[%d] has stopped\n", vha->vp_idx);
}
- BUG_ON(atomic_read(&vha->vref_count));
-
qla2x00_free_fcports(vha);
mutex_lock(&ha->vport_lock);
@@ -2166,7 +2164,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l,
vha->gnl.ldma);
- if (vha->qpair->vp_idx == vha->vp_idx) {
+ if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) {
if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS)
ql_log(ql_log_warn, vha, 0x7087,
"Queue Pair delete failed.\n");
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 21d9fb7fc887..51b4179469d1 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -2707,13 +2707,9 @@ ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
"%-+5d 0 1 2 3 4 5 6 7 8 9 A B C D E F\n", size);
ql_dbg(level, vha, id,
"----- -----------------------------------------------\n");
- for (cnt = 0; cnt < size; cnt++, buf++) {
- if (cnt % 16 == 0)
- ql_dbg(level, vha, id, "%04x:", cnt & ~0xFU);
- printk(" %02x", *buf);
- if (cnt % 16 == 15)
- printk("\n");
+ for (cnt = 0; cnt < size; cnt += 16) {
+ ql_dbg(level, vha, id, "%04x: ", cnt);
+ print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1,
+ buf + cnt, min(16U, size - cnt), false);
}
- if (cnt % 16 != 0)
- printk("\n");
}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index e1fc4e66966a..c6bffe929fe7 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -348,6 +348,7 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
#define ql_dbg_tgt 0x00004000 /* Target mode */
#define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */
#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */
+#define ql_dbg_tgt_dif 0x00000800 /* Target mode dif */
extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
uint32_t, void **);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 625d438e3cce..ae119018dfaa 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -25,6 +25,7 @@
#include <linux/firmware.h>
#include <linux/aer.h>
#include <linux/mutex.h>
+#include <linux/btree.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -395,11 +396,15 @@ struct srb_iocb {
struct completion comp;
} abt;
struct ct_arg ctarg;
+#define MAX_IOCB_MB_REG 28
+#define SIZEOF_IOCB_MB_REG (MAX_IOCB_MB_REG * sizeof(uint16_t))
struct {
- __le16 in_mb[28]; /* fr fw */
- __le16 out_mb[28]; /* to fw */
+ __le16 in_mb[MAX_IOCB_MB_REG]; /* from FW */
+ __le16 out_mb[MAX_IOCB_MB_REG]; /* to FW */
void *out, *in;
dma_addr_t out_dma, in_dma;
+ struct completion comp;
+ int rc;
} mbx;
struct {
struct imm_ntfy_from_isp *ntfy;
@@ -437,7 +442,7 @@ typedef struct srb {
uint32_t handle;
uint16_t flags;
uint16_t type;
- char *name;
+ const char *name;
int iocbs;
struct qla_qpair *qpair;
u32 gen1; /* scratch */
@@ -2300,6 +2305,8 @@ typedef struct fc_port {
struct ct_sns_desc ct_desc;
enum discovery_state disc_state;
enum login_state fw_login_state;
+ unsigned long plogi_nack_done_deadline;
+
u32 login_gen, last_login_gen;
u32 rscn_gen, last_rscn_gen;
u32 chip_reset;
@@ -3106,6 +3113,16 @@ struct qla_chip_state_84xx {
uint32_t gold_fw_version;
};
+struct qla_dif_statistics {
+ uint64_t dif_input_bytes;
+ uint64_t dif_output_bytes;
+ uint64_t dif_input_requests;
+ uint64_t dif_output_requests;
+ uint32_t dif_guard_err;
+ uint32_t dif_ref_tag_err;
+ uint32_t dif_app_tag_err;
+};
+
struct qla_statistics {
uint32_t total_isp_aborts;
uint64_t input_bytes;
@@ -3118,6 +3135,8 @@ struct qla_statistics {
uint32_t stat_max_pend_cmds;
uint32_t stat_max_qfull_cmds_alloc;
uint32_t stat_max_qfull_cmds_dropped;
+
+ struct qla_dif_statistics qla_dif_stats;
};
struct bidi_statistics {
@@ -3125,6 +3144,16 @@ struct bidi_statistics {
unsigned long long transfer_bytes;
};
+struct qla_tc_param {
+ struct scsi_qla_host *vha;
+ uint32_t blk_sz;
+ uint32_t bufflen;
+ struct scatterlist *sg;
+ struct scatterlist *prot_sg;
+ struct crc_context *ctx;
+ uint8_t *ctx_dsd_alloced;
+};
+
/* Multi queue support */
#define MBC_INITIALIZE_MULTIQ 0x1f
#define QLA_QUE_PAGE 0X1000
@@ -3272,6 +3301,8 @@ struct qlt_hw_data {
uint8_t tgt_node_name[WWN_SIZE];
struct dentry *dfs_tgt_sess;
+ struct dentry *dfs_tgt_port_database;
+
struct list_head q_full_list;
uint32_t num_pend_cmds;
uint32_t num_qfull_cmds_alloc;
@@ -3281,6 +3312,7 @@ struct qlt_hw_data {
spinlock_t sess_lock;
int rspq_vector_cpuid;
spinlock_t atio_lock ____cacheline_aligned;
+ struct btree_head32 host_map;
};
#define MAX_QFULL_CMDS_ALLOC 8192
@@ -3290,6 +3322,10 @@ struct qlt_hw_data {
#define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */
+#define QLA_EARLY_LINKUP(_ha) \
+ ((_ha->flags.n2n_ae || _ha->flags.lip_ae) && \
+ _ha->flags.fw_started && !_ha->flags.fw_init_done)
+
/*
* Qlogic host adapter specific data structure.
*/
@@ -3339,7 +3375,11 @@ struct qla_hw_data {
uint32_t fawwpn_enabled:1;
uint32_t exlogins_enabled:1;
uint32_t exchoffld_enabled:1;
- /* 35 bits */
+
+ uint32_t lip_ae:1;
+ uint32_t n2n_ae:1;
+ uint32_t fw_started:1;
+ uint32_t fw_init_done:1;
} flags;
/* This spinlock is used to protect "io transactions", you must
@@ -3432,7 +3472,6 @@ struct qla_hw_data {
#define P2P_LOOP 3
uint8_t interrupts_on;
uint32_t isp_abort_cnt;
-
#define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532
#define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432
#define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001
@@ -3913,6 +3952,7 @@ typedef struct scsi_qla_host {
struct list_head vp_fcports; /* list of fcports */
struct list_head work_list;
spinlock_t work_lock;
+ struct work_struct iocb_work;
/* Commonly used flags and state information. */
struct Scsi_Host *host;
@@ -4076,6 +4116,7 @@ typedef struct scsi_qla_host {
/* Count of active session/fcport */
int fcport_count;
wait_queue_head_t fcport_waitQ;
+ wait_queue_head_t vref_waitq;
} scsi_qla_host_t;
struct qla27xx_image_status {
@@ -4131,14 +4172,17 @@ struct qla2_sgx {
mb(); \
if (__vha->flags.delete_progress) { \
atomic_dec(&__vha->vref_count); \
+ wake_up(&__vha->vref_waitq); \
__bail = 1; \
} else { \
__bail = 0; \
} \
} while (0)
-#define QLA_VHA_MARK_NOT_BUSY(__vha) \
+#define QLA_VHA_MARK_NOT_BUSY(__vha) do { \
atomic_dec(&__vha->vref_count); \
+ wake_up(&__vha->vref_waitq); \
+} while (0) \
#define QLA_QPAIR_MARK_BUSY(__qpair, __bail) do { \
atomic_inc(&__qpair->ref_count); \
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index b48cce696bac..989e17b0758c 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -19,11 +19,11 @@ qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
struct fc_port *sess = NULL;
- struct qla_tgt *tgt= vha->vha_tgt.qla_tgt;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
- seq_printf(s, "%s\n",vha->host_str);
+ seq_printf(s, "%s\n", vha->host_str);
if (tgt) {
- seq_printf(s, "Port ID Port Name Handle\n");
+ seq_puts(s, "Port ID Port Name Handle\n");
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
list_for_each_entry(sess, &vha->vp_fcports, list)
@@ -44,7 +44,6 @@ qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file)
return single_open(file, qla2x00_dfs_tgt_sess_show, vha);
}
-
static const struct file_operations dfs_tgt_sess_ops = {
.open = qla2x00_dfs_tgt_sess_open,
.read = seq_read,
@@ -53,6 +52,78 @@ static const struct file_operations dfs_tgt_sess_ops = {
};
static int
+qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
+{
+ scsi_qla_host_t *vha = s->private;
+ struct qla_hw_data *ha = vha->hw;
+ struct gid_list_info *gid_list;
+ dma_addr_t gid_list_dma;
+ fc_port_t fc_port;
+ char *id_iter;
+ int rc, i;
+ uint16_t entries, loop_id;
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+
+ seq_printf(s, "%s\n", vha->host_str);
+ if (tgt) {
+ gid_list = dma_alloc_coherent(&ha->pdev->dev,
+ qla2x00_gid_list_size(ha),
+ &gid_list_dma, GFP_KERNEL);
+ if (!gid_list) {
+ ql_dbg(ql_dbg_user, vha, 0x705c,
+ "DMA allocation failed for %u\n",
+ qla2x00_gid_list_size(ha));
+ return 0;
+ }
+
+ rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma,
+ &entries);
+ if (rc != QLA_SUCCESS)
+ goto out_free_id_list;
+
+ id_iter = (char *)gid_list;
+
+ seq_puts(s, "Port Name Port ID Loop ID\n");
+
+ for (i = 0; i < entries; i++) {
+ struct gid_list_info *gid =
+ (struct gid_list_info *)id_iter;
+ loop_id = le16_to_cpu(gid->loop_id);
+ memset(&fc_port, 0, sizeof(fc_port_t));
+
+ fc_port.loop_id = loop_id;
+
+ rc = qla24xx_gpdb_wait(vha, &fc_port, 0);
+ seq_printf(s, "%8phC %02x%02x%02x %d\n",
+ fc_port.port_name, fc_port.d_id.b.domain,
+ fc_port.d_id.b.area, fc_port.d_id.b.al_pa,
+ fc_port.loop_id);
+ id_iter += ha->gid_list_info_size;
+ }
+out_free_id_list:
+ dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+ gid_list, gid_list_dma);
+ }
+
+ return 0;
+}
+
+static int
+qla2x00_dfs_tgt_port_database_open(struct inode *inode, struct file *file)
+{
+ scsi_qla_host_t *vha = inode->i_private;
+
+ return single_open(file, qla2x00_dfs_tgt_port_database_show, vha);
+}
+
+static const struct file_operations dfs_tgt_port_database_ops = {
+ .open = qla2x00_dfs_tgt_port_database_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int
qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
{
struct scsi_qla_host *vha = s->private;
@@ -114,6 +185,21 @@ qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
seq_printf(s, "num Q full sent = %lld\n",
vha->tgt_counters.num_q_full_sent);
+ /* DIF stats */
+ seq_printf(s, "DIF Inp Bytes = %lld\n",
+ vha->qla_stats.qla_dif_stats.dif_input_bytes);
+ seq_printf(s, "DIF Outp Bytes = %lld\n",
+ vha->qla_stats.qla_dif_stats.dif_output_bytes);
+ seq_printf(s, "DIF Inp Req = %lld\n",
+ vha->qla_stats.qla_dif_stats.dif_input_requests);
+ seq_printf(s, "DIF Outp Req = %lld\n",
+ vha->qla_stats.qla_dif_stats.dif_output_requests);
+ seq_printf(s, "DIF Guard err = %d\n",
+ vha->qla_stats.qla_dif_stats.dif_guard_err);
+ seq_printf(s, "DIF Ref tag err = %d\n",
+ vha->qla_stats.qla_dif_stats.dif_ref_tag_err);
+ seq_printf(s, "DIF App tag err = %d\n",
+ vha->qla_stats.qla_dif_stats.dif_app_tag_err);
return 0;
}
@@ -281,6 +367,14 @@ create_nodes:
goto out;
}
+ ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database",
+ S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_port_database_ops);
+ if (!ha->tgt.dfs_tgt_port_database) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Unable to create debugFS tgt_port_database node.\n");
+ goto out;
+ }
+
ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
&dfs_fce_ops);
if (!ha->dfs_fce) {
@@ -311,6 +405,11 @@ qla2x00_dfs_remove(scsi_qla_host_t *vha)
ha->tgt.dfs_tgt_sess = NULL;
}
+ if (ha->tgt.dfs_tgt_port_database) {
+ debugfs_remove(ha->tgt.dfs_tgt_port_database);
+ ha->tgt.dfs_tgt_port_database = NULL;
+ }
+
if (ha->dfs_fw_resource_cnt) {
debugfs_remove(ha->dfs_fw_resource_cnt);
ha->dfs_fw_resource_cnt = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index b3d6441d1d90..5b2451745e9f 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -193,6 +193,7 @@ extern int qla24xx_post_upd_fcport_work(struct scsi_qla_host *, fc_port_t *);
void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *,
uint16_t *);
int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *);
+int qla24xx_async_abort_cmd(srb_t *);
/*
* Global Functions in qla_mid.c source file.
@@ -256,11 +257,11 @@ extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *,
- uint32_t *, uint16_t, struct qla_tgt_cmd *);
+ uint32_t *, uint16_t, struct qla_tc_param *);
extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *,
- uint32_t *, uint16_t, struct qla_tgt_cmd *);
+ uint32_t *, uint16_t, struct qla_tc_param *);
extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
- uint32_t *, uint16_t, struct qla_tgt_cmd *);
+ uint32_t *, uint16_t, struct qla_tc_param *);
extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *);
extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *);
extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *,
@@ -368,7 +369,7 @@ qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, struct link_statistics *,
extern int
qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,
- dma_addr_t, uint);
+ dma_addr_t, uint16_t);
extern int qla24xx_abort_command(srb_t *);
extern int qla24xx_async_abort_command(srb_t *);
@@ -472,6 +473,13 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
extern int
qla26xx_dport_diagnostics(scsi_qla_host_t *, void *, uint, uint);
+int qla24xx_send_mb_cmd(struct scsi_qla_host *, mbx_cmd_t *);
+int qla24xx_gpdb_wait(struct scsi_qla_host *, fc_port_t *, u8);
+int qla24xx_gidlist_wait(struct scsi_qla_host *, void *, dma_addr_t,
+ uint16_t *);
+int __qla24xx_parse_gpdb(struct scsi_qla_host *, fc_port_t *,
+ struct port_database_24xx *);
+
/*
* Global Function Prototypes in qla_isr.c source file.
*/
@@ -846,5 +854,7 @@ extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *,
uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **);
void qla24xx_delete_sess_fn(struct work_struct *);
void qlt_unknown_atio_work_fn(struct work_struct *);
+void qlt_update_host_map(struct scsi_qla_host *, port_id_t);
+void qlt_remove_target_resources(struct qla_hw_data *);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 32fb9007f137..f9d2fe7b1ade 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -629,7 +629,6 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
struct srb *sp = s;
struct scsi_qla_host *vha = sp->vha;
struct qla_hw_data *ha = vha->hw;
- uint64_t zero = 0;
struct port_database_24xx *pd;
fc_port_t *fcport = sp->fcport;
u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb;
@@ -649,48 +648,7 @@ void qla24xx_async_gpdb_sp_done(void *s, int res)
pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in;
- /* Check for logged in state. */
- if (pd->current_login_state != PDS_PRLI_COMPLETE &&
- pd->last_login_state != PDS_PRLI_COMPLETE) {
- ql_dbg(ql_dbg_mbx, vha, 0xffff,
- "Unable to verify login-state (%x/%x) for "
- "loop_id %x.\n", pd->current_login_state,
- pd->last_login_state, fcport->loop_id);
- rval = QLA_FUNCTION_FAILED;
- goto gpd_error_out;
- }
-
- if (fcport->loop_id == FC_NO_LOOP_ID ||
- (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
- memcmp(fcport->port_name, pd->port_name, 8))) {
- /* We lost the device mid way. */
- rval = QLA_NOT_LOGGED_IN;
- goto gpd_error_out;
- }
-
- /* Names are little-endian. */
- memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
-
- /* Get port_id of device. */
- fcport->d_id.b.domain = pd->port_id[0];
- fcport->d_id.b.area = pd->port_id[1];
- fcport->d_id.b.al_pa = pd->port_id[2];
- fcport->d_id.b.rsvd_1 = 0;
-
- /* If not target must be initiator or unknown type. */
- if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
- fcport->port_type = FCT_INITIATOR;
- else
- fcport->port_type = FCT_TARGET;
-
- /* Passback COS information. */
- fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
- FC_COS_CLASS2 : FC_COS_CLASS3;
-
- if (pd->prli_svc_param_word_3[0] & BIT_7) {
- fcport->flags |= FCF_CONF_COMP_SUPPORTED;
- fcport->conf_compl_supported = 1;
- }
+ rval = __qla24xx_parse_gpdb(vha, fcport, pd);
gpd_error_out:
memset(&ea, 0, sizeof(ea));
@@ -876,10 +834,14 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
fcport->login_retry--;
if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
- (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
(fcport->fw_login_state == DSC_LS_PRLI_PEND))
return 0;
+ if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
+ if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
+ return 0;
+ }
+
/* for pure Target Mode. Login will not be initiated */
if (vha->host->active_mode == MODE_TARGET)
return 0;
@@ -1041,10 +1003,14 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha,
fcport->flags);
if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) ||
- (fcport->fw_login_state == DSC_LS_PLOGI_COMP) ||
(fcport->fw_login_state == DSC_LS_PRLI_PEND))
return;
+ if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
+ if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline))
+ return;
+ }
+
if (fcport->flags & FCF_ASYNC_SENT) {
fcport->login_retry++;
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
@@ -1258,7 +1224,7 @@ qla24xx_abort_sp_done(void *ptr, int res)
complete(&abt->u.abt.comp);
}
-static int
+int
qla24xx_async_abort_cmd(srb_t *cmd_sp)
{
scsi_qla_host_t *vha = cmd_sp->vha;
@@ -3212,6 +3178,7 @@ next_check:
} else {
ql_dbg(ql_dbg_init, vha, 0x00d3,
"Init Firmware -- success.\n");
+ ha->flags.fw_started = 1;
}
return (rval);
@@ -3374,8 +3341,8 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
uint8_t domain;
char connect_type[22];
struct qla_hw_data *ha = vha->hw;
- unsigned long flags;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+ port_id_t id;
/* Get host addresses. */
rval = qla2x00_get_adapter_id(vha,
@@ -3453,13 +3420,11 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
/* Save Host port and loop ID. */
/* byte order - Big Endian */
- vha->d_id.b.domain = domain;
- vha->d_id.b.area = area;
- vha->d_id.b.al_pa = al_pa;
-
- spin_lock_irqsave(&ha->vport_slock, flags);
- qlt_update_vp_map(vha, SET_AL_PA);
- spin_unlock_irqrestore(&ha->vport_slock, flags);
+ id.b.domain = domain;
+ id.b.area = area;
+ id.b.al_pa = al_pa;
+ id.b.rsvd_1 = 0;
+ qlt_update_host_map(vha, id);
if (!vha->flags.init_done)
ql_log(ql_log_info, vha, 0x2010,
@@ -4036,6 +4001,7 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
atomic_set(&vha->loop_state, LOOP_READY);
ql_dbg(ql_dbg_disc, vha, 0x2069,
"LOOP READY.\n");
+ ha->flags.fw_init_done = 1;
/*
* Process any ATIO queue entries that came in
@@ -5148,6 +5114,7 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
}
}
atomic_dec(&vha->vref_count);
+ wake_up(&vha->vref_waitq);
}
spin_unlock_irqrestore(&ha->vport_slock, flags);
}
@@ -5526,6 +5493,11 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
if (!(IS_P3P_TYPE(ha)))
ha->isp_ops->reset_chip(vha);
+ ha->flags.n2n_ae = 0;
+ ha->flags.lip_ae = 0;
+ ha->current_topology = 0;
+ ha->flags.fw_started = 0;
+ ha->flags.fw_init_done = 0;
ha->chip_reset++;
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
@@ -6802,6 +6774,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
return;
if (!ha->fw_major_version)
return;
+ if (!ha->flags.fw_started)
+ return;
ret = qla2x00_stop_firmware(vha);
for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
@@ -6815,6 +6789,9 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
"Attempting retry of stop-firmware command.\n");
ret = qla2x00_stop_firmware(vha);
}
+
+ ha->flags.fw_started = 0;
+ ha->flags.fw_init_done = 0;
}
int
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 535079280288..ea027f6a7fd4 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -889,7 +889,7 @@ qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
int
qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
- uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
+ uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
{
void *next_dsd;
uint8_t avail_dsds = 0;
@@ -898,7 +898,6 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
struct scatterlist *sg_prot;
uint32_t *cur_dsd = dsd;
uint16_t used_dsds = tot_dsds;
-
uint32_t prot_int; /* protection interval */
uint32_t partial;
struct qla2_sgx sgx;
@@ -966,7 +965,7 @@ alloc_and_fill:
} else {
list_add_tail(&dsd_ptr->list,
&(tc->ctx->dsd_list));
- tc->ctx_dsd_alloced = 1;
+ *tc->ctx_dsd_alloced = 1;
}
@@ -1005,7 +1004,7 @@ alloc_and_fill:
int
qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
- uint16_t tot_dsds, struct qla_tgt_cmd *tc)
+ uint16_t tot_dsds, struct qla_tc_param *tc)
{
void *next_dsd;
uint8_t avail_dsds = 0;
@@ -1066,7 +1065,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
} else {
list_add_tail(&dsd_ptr->list,
&(tc->ctx->dsd_list));
- tc->ctx_dsd_alloced = 1;
+ *tc->ctx_dsd_alloced = 1;
}
/* add new list to cmd iocb or last list */
@@ -1092,7 +1091,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
int
qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
- uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
+ uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc)
{
void *next_dsd;
uint8_t avail_dsds = 0;
@@ -1158,7 +1157,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
} else {
list_add_tail(&dsd_ptr->list,
&(tc->ctx->dsd_list));
- tc->ctx_dsd_alloced = 1;
+ *tc->ctx_dsd_alloced = 1;
}
/* add new list to cmd iocb or last list */
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 3c66ea29de27..3203367a4f42 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -708,6 +708,8 @@ skip_rio:
"mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
ha->isp_ops->fw_dump(vha, 1);
+ ha->flags.fw_init_done = 0;
+ ha->flags.fw_started = 0;
if (IS_FWI2_CAPABLE(ha)) {
if (mb[1] == 0 && mb[2] == 0) {
@@ -761,6 +763,9 @@ skip_rio:
break;
case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
+ ha->flags.lip_ae = 1;
+ ha->flags.n2n_ae = 0;
+
ql_dbg(ql_dbg_async, vha, 0x5009,
"LIP occurred (%x).\n", mb[1]);
@@ -797,6 +802,10 @@ skip_rio:
break;
case MBA_LOOP_DOWN: /* Loop Down Event */
+ ha->flags.n2n_ae = 0;
+ ha->flags.lip_ae = 0;
+ ha->current_topology = 0;
+
mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
? RD_REG_WORD(&reg24->mailbox4) : 0;
mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(&reg82->mailbox_out[4])
@@ -866,6 +875,9 @@ skip_rio:
/* case MBA_DCBX_COMPLETE: */
case MBA_POINT_TO_POINT: /* Point-to-Point */
+ ha->flags.lip_ae = 0;
+ ha->flags.n2n_ae = 1;
+
if (IS_QLA2100(ha))
break;
@@ -1620,9 +1632,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
QLA_LOGIO_LOGIN_RETRIED : 0;
if (logio->entry_status) {
ql_log(ql_log_warn, fcport->vha, 0x5034,
- "Async-%s error entry - hdl=%x"
+ "Async-%s error entry - %8phC hdl=%x"
"portid=%02x%02x%02x entry-status=%x.\n",
- type, sp->handle, fcport->d_id.b.domain,
+ type, fcport->port_name, sp->handle, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa,
logio->entry_status);
ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
@@ -1633,8 +1645,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
ql_dbg(ql_dbg_async, fcport->vha, 0x5036,
- "Async-%s complete - hdl=%x portid=%02x%02x%02x "
- "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
+ "Async-%s complete - %8phC hdl=%x portid=%02x%02x%02x "
+ "iop0=%x.\n", type, fcport->port_name, sp->handle,
+ fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa,
le32_to_cpu(logio->io_parameter[0]));
@@ -1674,6 +1687,17 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
case LSC_SCODE_NPORT_USED:
data[0] = MBS_LOOP_ID_USED;
break;
+ case LSC_SCODE_CMD_FAILED:
+ if (iop[1] == 0x0606) {
+ /*
+ * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI,
+ * Target side acked.
+ */
+ data[0] = MBS_COMMAND_COMPLETE;
+ goto logio_done;
+ }
+ data[0] = MBS_COMMAND_ERROR;
+ break;
case LSC_SCODE_NOXCB:
vha->hw->exch_starvation++;
if (vha->hw->exch_starvation > 5) {
@@ -1695,8 +1719,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
}
ql_dbg(ql_dbg_async, fcport->vha, 0x5037,
- "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
- "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
+ "Async-%s failed - %8phC hdl=%x portid=%02x%02x%02x comp=%x "
+ "iop0=%x iop1=%x.\n", type, fcport->port_name,
+ sp->handle, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa,
le16_to_cpu(logio->comp_status),
le32_to_cpu(logio->io_parameter[0]),
@@ -2679,7 +2704,7 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
return;
abt = &sp->u.iocb_cmd;
- abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle);
+ abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle);
sp->done(sp, 0);
}
@@ -2693,7 +2718,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
struct sts_entry_24xx *pkt;
struct qla_hw_data *ha = vha->hw;
- if (!vha->flags.online)
+ if (!ha->flags.fw_started)
return;
while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 35079f417417..a113ab3592a7 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -10,6 +10,28 @@
#include <linux/delay.h>
#include <linux/gfp.h>
+static struct mb_cmd_name {
+ uint16_t cmd;
+ const char *str;
+} mb_str[] = {
+ {MBC_GET_PORT_DATABASE, "GPDB"},
+ {MBC_GET_ID_LIST, "GIDList"},
+ {MBC_GET_LINK_PRIV_STATS, "Stats"},
+};
+
+static const char *mb_to_str(uint16_t cmd)
+{
+ int i;
+ struct mb_cmd_name *e;
+
+ for (i = 0; i < ARRAY_SIZE(mb_str); i++) {
+ e = mb_str + i;
+ if (cmd == e->cmd)
+ return e->str;
+ }
+ return "unknown";
+}
+
static struct rom_cmd {
uint16_t cmd;
} rom_cmds[] = {
@@ -2818,7 +2840,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
int
qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
- dma_addr_t stats_dma, uint options)
+ dma_addr_t stats_dma, uint16_t options)
{
int rval;
mbx_cmd_t mc;
@@ -2828,19 +2850,17 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
"Entered %s.\n", __func__);
- mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
- mcp->mb[2] = MSW(stats_dma);
- mcp->mb[3] = LSW(stats_dma);
- mcp->mb[6] = MSW(MSD(stats_dma));
- mcp->mb[7] = LSW(MSD(stats_dma));
- mcp->mb[8] = sizeof(struct link_statistics) / 4;
- mcp->mb[9] = vha->vp_idx;
- mcp->mb[10] = options;
- mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
- mcp->in_mb = MBX_2|MBX_1|MBX_0;
- mcp->tov = MBX_TOV_SECONDS;
- mcp->flags = IOCTL_CMD;
- rval = qla2x00_mailbox_command(vha, mcp);
+ memset(&mc, 0, sizeof(mc));
+ mc.mb[0] = MBC_GET_LINK_PRIV_STATS;
+ mc.mb[2] = MSW(stats_dma);
+ mc.mb[3] = LSW(stats_dma);
+ mc.mb[6] = MSW(MSD(stats_dma));
+ mc.mb[7] = LSW(MSD(stats_dma));
+ mc.mb[8] = sizeof(struct link_statistics) / 4;
+ mc.mb[9] = cpu_to_le16(vha->vp_idx);
+ mc.mb[10] = cpu_to_le16(options);
+
+ rval = qla24xx_send_mb_cmd(vha, &mc);
if (rval == QLA_SUCCESS) {
if (mcp->mb[0] != MBS_COMMAND_COMPLETE) {
@@ -3603,6 +3623,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
scsi_qla_host_t *vp = NULL;
unsigned long flags;
int found;
+ port_id_t id;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
"Entered %s.\n", __func__);
@@ -3610,28 +3631,27 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
if (rptid_entry->entry_status != 0)
return;
+ id.b.domain = rptid_entry->port_id[2];
+ id.b.area = rptid_entry->port_id[1];
+ id.b.al_pa = rptid_entry->port_id[0];
+ id.b.rsvd_1 = 0;
+
if (rptid_entry->format == 0) {
/* loop */
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7,
+ ql_dbg(ql_dbg_async, vha, 0x10b7,
"Format 0 : Number of VPs setup %d, number of "
"VPs acquired %d.\n", rptid_entry->vp_setup,
rptid_entry->vp_acquired);
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8,
+ ql_dbg(ql_dbg_async, vha, 0x10b8,
"Primary port id %02x%02x%02x.\n",
rptid_entry->port_id[2], rptid_entry->port_id[1],
rptid_entry->port_id[0]);
- vha->d_id.b.domain = rptid_entry->port_id[2];
- vha->d_id.b.area = rptid_entry->port_id[1];
- vha->d_id.b.al_pa = rptid_entry->port_id[0];
-
- spin_lock_irqsave(&ha->vport_slock, flags);
- qlt_update_vp_map(vha, SET_AL_PA);
- spin_unlock_irqrestore(&ha->vport_slock, flags);
+ qlt_update_host_map(vha, id);
} else if (rptid_entry->format == 1) {
/* fabric */
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9,
+ ql_dbg(ql_dbg_async, vha, 0x10b9,
"Format 1: VP[%d] enabled - status %d - with "
"port id %02x%02x%02x.\n", rptid_entry->vp_idx,
rptid_entry->vp_status,
@@ -3653,12 +3673,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
WWN_SIZE);
}
- vha->d_id.b.domain = rptid_entry->port_id[2];
- vha->d_id.b.area = rptid_entry->port_id[1];
- vha->d_id.b.al_pa = rptid_entry->port_id[0];
- spin_lock_irqsave(&ha->vport_slock, flags);
- qlt_update_vp_map(vha, SET_AL_PA);
- spin_unlock_irqrestore(&ha->vport_slock, flags);
+ qlt_update_host_map(vha, id);
}
fc_host_port_name(vha->host) =
@@ -3694,12 +3709,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
if (!found)
return;
- vp->d_id.b.domain = rptid_entry->port_id[2];
- vp->d_id.b.area = rptid_entry->port_id[1];
- vp->d_id.b.al_pa = rptid_entry->port_id[0];
- spin_lock_irqsave(&ha->vport_slock, flags);
- qlt_update_vp_map(vp, SET_AL_PA);
- spin_unlock_irqrestore(&ha->vport_slock, flags);
+ qlt_update_host_map(vp, id);
/*
* Cannot configure here as we are still sitting on the
@@ -5827,3 +5837,225 @@ qla26xx_dport_diagnostics(scsi_qla_host_t *vha,
return rval;
}
+
+static void qla2x00_async_mb_sp_done(void *s, int res)
+{
+ struct srb *sp = s;
+
+ sp->u.iocb_cmd.u.mbx.rc = res;
+
+ complete(&sp->u.iocb_cmd.u.mbx.comp);
+ /* don't free sp here. Let the caller do the free */
+}
+
+/*
+ * This mailbox uses the iocb interface to send MB command.
+ * This allows non-critial (non chip setup) command to go
+ * out in parrallel.
+ */
+int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ srb_t *sp;
+ struct srb_iocb *c;
+
+ if (!vha->hw->flags.fw_started)
+ goto done;
+
+ sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
+ if (!sp)
+ goto done;
+
+ sp->type = SRB_MB_IOCB;
+ sp->name = mb_to_str(mcp->mb[0]);
+
+ qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
+
+ memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG);
+
+ c = &sp->u.iocb_cmd;
+ c->timeout = qla2x00_async_iocb_timeout;
+ init_completion(&c->u.mbx.comp);
+
+ sp->done = qla2x00_async_mb_sp_done;
+
+ rval = qla2x00_start_sp(sp);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ "%s: %s Failed submission. %x.\n",
+ __func__, sp->name, rval);
+ goto done_free_sp;
+ }
+
+ ql_dbg(ql_dbg_mbx, vha, 0xffff, "MB:%s hndl %x submitted\n",
+ sp->name, sp->handle);
+
+ wait_for_completion(&c->u.mbx.comp);
+ memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG);
+
+ rval = c->u.mbx.rc;
+ switch (rval) {
+ case QLA_FUNCTION_TIMEOUT:
+ ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s Timeout. %x.\n",
+ __func__, sp->name, rval);
+ break;
+ case QLA_SUCCESS:
+ ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s done.\n",
+ __func__, sp->name);
+ sp->free(sp);
+ break;
+ default:
+ ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s Failed. %x.\n",
+ __func__, sp->name, rval);
+ sp->free(sp);
+ break;
+ }
+
+ return rval;
+
+done_free_sp:
+ sp->free(sp);
+done:
+ return rval;
+}
+
+/*
+ * qla24xx_gpdb_wait
+ * NOTE: Do not call this routine from DPC thread
+ */
+int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ dma_addr_t pd_dma;
+ struct port_database_24xx *pd;
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+
+ if (!vha->hw->flags.fw_started)
+ goto done;
+
+ pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
+ if (pd == NULL) {
+ ql_log(ql_log_warn, vha, 0xffff,
+ "Failed to allocate port database structure.\n");
+ goto done_free_sp;
+ }
+ memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE));
+
+ memset(&mc, 0, sizeof(mc));
+ mc.mb[0] = MBC_GET_PORT_DATABASE;
+ mc.mb[1] = cpu_to_le16(fcport->loop_id);
+ mc.mb[2] = MSW(pd_dma);
+ mc.mb[3] = LSW(pd_dma);
+ mc.mb[6] = MSW(MSD(pd_dma));
+ mc.mb[7] = LSW(MSD(pd_dma));
+ mc.mb[9] = cpu_to_le16(vha->vp_idx);
+ mc.mb[10] = cpu_to_le16((uint16_t)opt);
+
+ rval = qla24xx_send_mb_cmd(vha, &mc);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ "%s: %8phC fail\n", __func__, fcport->port_name);
+ goto done_free_sp;
+ }
+
+ rval = __qla24xx_parse_gpdb(vha, fcport, pd);
+
+ ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %8phC done\n",
+ __func__, fcport->port_name);
+
+done_free_sp:
+ if (pd)
+ dma_pool_free(ha->s_dma_pool, pd, pd_dma);
+done:
+ return rval;
+}
+
+int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport,
+ struct port_database_24xx *pd)
+{
+ int rval = QLA_SUCCESS;
+ uint64_t zero = 0;
+
+ /* Check for logged in state. */
+ if (pd->current_login_state != PDS_PRLI_COMPLETE &&
+ pd->last_login_state != PDS_PRLI_COMPLETE) {
+ ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ "Unable to verify login-state (%x/%x) for "
+ "loop_id %x.\n", pd->current_login_state,
+ pd->last_login_state, fcport->loop_id);
+ rval = QLA_FUNCTION_FAILED;
+ goto gpd_error_out;
+ }
+
+ if (fcport->loop_id == FC_NO_LOOP_ID ||
+ (memcmp(fcport->port_name, (uint8_t *)&zero, 8) &&
+ memcmp(fcport->port_name, pd->port_name, 8))) {
+ /* We lost the device mid way. */
+ rval = QLA_NOT_LOGGED_IN;
+ goto gpd_error_out;
+ }
+
+ /* Names are little-endian. */
+ memcpy(fcport->node_name, pd->node_name, WWN_SIZE);
+ memcpy(fcport->port_name, pd->port_name, WWN_SIZE);
+
+ /* Get port_id of device. */
+ fcport->d_id.b.domain = pd->port_id[0];
+ fcport->d_id.b.area = pd->port_id[1];
+ fcport->d_id.b.al_pa = pd->port_id[2];
+ fcport->d_id.b.rsvd_1 = 0;
+
+ /* If not target must be initiator or unknown type. */
+ if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0)
+ fcport->port_type = FCT_INITIATOR;
+ else
+ fcport->port_type = FCT_TARGET;
+
+ /* Passback COS information. */
+ fcport->supported_classes = (pd->flags & PDF_CLASS_2) ?
+ FC_COS_CLASS2 : FC_COS_CLASS3;
+
+ if (pd->prli_svc_param_word_3[0] & BIT_7) {
+ fcport->flags |= FCF_CONF_COMP_SUPPORTED;
+ fcport->conf_compl_supported = 1;
+ }
+
+gpd_error_out:
+ return rval;
+}
+
+/*
+ * qla24xx_gidlist__wait
+ * NOTE: don't call this routine from DPC thread.
+ */
+int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
+ void *id_list, dma_addr_t id_list_dma, uint16_t *entries)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ mbx_cmd_t mc;
+
+ if (!vha->hw->flags.fw_started)
+ goto done;
+
+ memset(&mc, 0, sizeof(mc));
+ mc.mb[0] = MBC_GET_ID_LIST;
+ mc.mb[2] = MSW(id_list_dma);
+ mc.mb[3] = LSW(id_list_dma);
+ mc.mb[6] = MSW(MSD(id_list_dma));
+ mc.mb[7] = LSW(MSD(id_list_dma));
+ mc.mb[8] = 0;
+ mc.mb[9] = cpu_to_le16(vha->vp_idx);
+
+ rval = qla24xx_send_mb_cmd(vha, &mc);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ "%s: fail\n", __func__);
+ } else {
+ *entries = mc.mb[1];
+ ql_dbg(ql_dbg_mbx, vha, 0xffff,
+ "%s: done\n", __func__);
+ }
+done:
+ return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index c6d6f0d912ff..09a490c98763 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -74,13 +74,14 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
* ensures no active vp_list traversal while the vport is removed
* from the queue)
*/
- spin_lock_irqsave(&ha->vport_slock, flags);
- while (atomic_read(&vha->vref_count)) {
- spin_unlock_irqrestore(&ha->vport_slock, flags);
-
- msleep(500);
+ wait_event_timeout(vha->vref_waitq, atomic_read(&vha->vref_count),
+ 10*HZ);
- spin_lock_irqsave(&ha->vport_slock, flags);
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ if (atomic_read(&vha->vref_count)) {
+ ql_dbg(ql_dbg_vport, vha, 0xfffa,
+ "vha->vref_count=%u timeout\n", vha->vref_count.counter);
+ vha->vref_count = (atomic_t)ATOMIC_INIT(0);
}
list_del(&vha->list);
qlt_update_vp_map(vha, RESET_VP_IDX);
@@ -269,6 +270,7 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
spin_lock_irqsave(&ha->vport_slock, flags);
atomic_dec(&vha->vref_count);
+ wake_up(&vha->vref_waitq);
}
i++;
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 1fed235a1b4a..41d5b09f7326 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -2560,6 +2560,20 @@ qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
return atomic_read(&vha->loop_state) == LOOP_READY;
}
+static void qla2x00_iocb_work_fn(struct work_struct *work)
+{
+ struct scsi_qla_host *vha = container_of(work,
+ struct scsi_qla_host, iocb_work);
+ int cnt = 0;
+
+ while (!list_empty(&vha->work_list)) {
+ qla2x00_do_work(vha);
+ cnt++;
+ if (cnt > 10)
+ break;
+ }
+}
+
/*
* PCI driver interface
*/
@@ -3078,6 +3092,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
*/
qla2xxx_wake_dpc(base_vha);
+ INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn);
INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error);
if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) {
@@ -3469,6 +3484,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
qla2x00_free_sysfs_attr(base_vha, true);
fc_remove_host(base_vha->host);
+ qlt_remove_target_resources(ha);
scsi_remove_host(base_vha->host);
@@ -4268,6 +4284,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
spin_lock_init(&vha->work_lock);
spin_lock_init(&vha->cmd_list_lock);
init_waitqueue_head(&vha->fcport_waitQ);
+ init_waitqueue_head(&vha->vref_waitq);
vha->gnl.size = sizeof(struct get_name_list_extended) *
(ha->max_loop_id + 1);
@@ -4319,7 +4336,11 @@ qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
spin_lock_irqsave(&vha->work_lock, flags);
list_add_tail(&e->list, &vha->work_list);
spin_unlock_irqrestore(&vha->work_lock, flags);
- qla2xxx_wake_dpc(vha);
+
+ if (QLA_EARLY_LINKUP(vha->hw))
+ schedule_work(&vha->iocb_work);
+ else
+ qla2xxx_wake_dpc(vha);
return QLA_SUCCESS;
}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 45f5077684f0..0e03ca2ab3e5 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -130,6 +130,9 @@ static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha,
fc_port_t *fcport, bool local);
void qlt_unreg_sess(struct fc_port *sess);
+static void qlt_24xx_handle_abts(struct scsi_qla_host *,
+ struct abts_recv_from_24xx *);
+
/*
* Global Variables
*/
@@ -140,6 +143,20 @@ static struct workqueue_struct *qla_tgt_wq;
static DEFINE_MUTEX(qla_tgt_mutex);
static LIST_HEAD(qla_tgt_glist);
+static const char *prot_op_str(u32 prot_op)
+{
+ switch (prot_op) {
+ case TARGET_PROT_NORMAL: return "NORMAL";
+ case TARGET_PROT_DIN_INSERT: return "DIN_INSERT";
+ case TARGET_PROT_DOUT_INSERT: return "DOUT_INSERT";
+ case TARGET_PROT_DIN_STRIP: return "DIN_STRIP";
+ case TARGET_PROT_DOUT_STRIP: return "DOUT_STRIP";
+ case TARGET_PROT_DIN_PASS: return "DIN_PASS";
+ case TARGET_PROT_DOUT_PASS: return "DOUT_PASS";
+ default: return "UNKNOWN";
+ }
+}
+
/* This API intentionally takes dest as a parameter, rather than returning
* int value to avoid caller forgetting to issue wmb() after the store */
void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
@@ -170,21 +187,23 @@ static inline
struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
uint8_t *d_id)
{
- struct qla_hw_data *ha = vha->hw;
- uint8_t vp_idx;
-
- if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0]))
- return NULL;
+ struct scsi_qla_host *host;
+ uint32_t key = 0;
- if (vha->d_id.b.al_pa == d_id[2])
+ if ((vha->d_id.b.area == d_id[1]) && (vha->d_id.b.domain == d_id[0]) &&
+ (vha->d_id.b.al_pa == d_id[2]))
return vha;
- BUG_ON(ha->tgt.tgt_vp_map == NULL);
- vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx;
- if (likely(test_bit(vp_idx, ha->vp_idx_map)))
- return ha->tgt.tgt_vp_map[vp_idx].vha;
+ key = (uint32_t)d_id[0] << 16;
+ key |= (uint32_t)d_id[1] << 8;
+ key |= (uint32_t)d_id[2];
- return NULL;
+ host = btree_lookup32(&vha->hw->tgt.host_map, key);
+ if (!host)
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+ "Unable to find host %06x\n", key);
+
+ return host;
}
static inline
@@ -389,6 +408,8 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
(struct abts_recv_from_24xx *)atio;
struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
entry->vp_index);
+ unsigned long flags;
+
if (unlikely(!host)) {
ql_dbg(ql_dbg_tgt, vha, 0xffff,
"qla_target(%d): Response pkt (ABTS_RECV_24XX) "
@@ -396,9 +417,12 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
vha->vp_idx, entry->vp_index);
break;
}
- qlt_response_pkt(host, (response_t *)atio);
+ if (!ha_locked)
+ spin_lock_irqsave(&host->hw->hardware_lock, flags);
+ qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio);
+ if (!ha_locked)
+ spin_unlock_irqrestore(&host->hw->hardware_lock, flags);
break;
-
}
/* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */
@@ -554,6 +578,7 @@ void qla2x00_async_nack_sp_done(void *s, int res)
sp->fcport->login_gen++;
sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP;
sp->fcport->logout_on_delete = 1;
+ sp->fcport->plogi_nack_done_deadline = jiffies + HZ;
break;
case SRB_NACK_PRLI:
@@ -613,6 +638,7 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport,
break;
case SRB_NACK_PRLI:
fcport->fw_login_state = DSC_LS_PRLI_PEND;
+ fcport->deleted = 0;
c = "PRLI";
break;
case SRB_NACK_LOGO:
@@ -1215,7 +1241,7 @@ static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
}
/* Get list of logged in devices */
- rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries);
+ rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries);
if (rc != QLA_SUCCESS) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
"qla_target(%d): get_id_list() failed: %x\n",
@@ -1551,6 +1577,9 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
request_t *pkt;
struct nack_to_isp *nack;
+ if (!ha->flags.fw_started)
+ return;
+
ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
/* Send marker if required */
@@ -2013,6 +2042,70 @@ void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
}
EXPORT_SYMBOL(qlt_free_mcmd);
+/*
+ * ha->hardware_lock supposed to be held on entry. Might drop it, then
+ * reacquire
+ */
+void qlt_send_resp_ctio(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
+ uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq)
+{
+ struct atio_from_isp *atio = &cmd->atio;
+ struct ctio7_to_24xx *ctio;
+ uint16_t temp;
+
+ ql_dbg(ql_dbg_tgt_dif, vha, 0x3066,
+ "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, "
+ "sense_key=%02x, asc=%02x, ascq=%02x",
+ vha, atio, scsi_status, sense_key, asc, ascq);
+
+ ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
+ if (!ctio) {
+ ql_dbg(ql_dbg_async, vha, 0x3067,
+ "qla2x00t(%ld): %s failed: unable to allocate request packet",
+ vha->host_no, __func__);
+ goto out;
+ }
+
+ ctio->entry_type = CTIO_TYPE7;
+ ctio->entry_count = 1;
+ ctio->handle = QLA_TGT_SKIP_HANDLE;
+ ctio->nport_handle = cmd->sess->loop_id;
+ ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
+ ctio->vp_index = vha->vp_idx;
+ ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+ ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+ ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+ ctio->exchange_addr = atio->u.isp24.exchange_addr;
+ ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
+ cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS);
+ temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
+ ctio->u.status1.ox_id = cpu_to_le16(temp);
+ ctio->u.status1.scsi_status =
+ cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status);
+ ctio->u.status1.response_len = cpu_to_le16(18);
+ ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio));
+
+ if (ctio->u.status1.residual != 0)
+ ctio->u.status1.scsi_status |=
+ cpu_to_le16(SS_RESIDUAL_UNDER);
+
+ /* Response code and sense key */
+ put_unaligned_le32(((0x70 << 24) | (sense_key << 8)),
+ (&ctio->u.status1.sense_data)[0]);
+ /* Additional sense length */
+ put_unaligned_le32(0x0a, (&ctio->u.status1.sense_data)[1]);
+ /* ASC and ASCQ */
+ put_unaligned_le32(((asc << 24) | (ascq << 16)),
+ (&ctio->u.status1.sense_data)[3]);
+
+ /* Memory Barrier */
+ wmb();
+
+ qla2x00_start_iocbs(vha, vha->req);
+out:
+ return;
+}
+
/* callback from target fabric module code */
void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
{
@@ -2261,7 +2354,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
*/
return -EAGAIN;
} else
- ha->tgt.cmds[h-1] = prm->cmd;
+ ha->tgt.cmds[h - 1] = prm->cmd;
pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
pkt->nport_handle = prm->cmd->loop_id;
@@ -2391,6 +2484,50 @@ static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
return cmd->bufflen > 0;
}
+static void qlt_print_dif_err(struct qla_tgt_prm *prm)
+{
+ struct qla_tgt_cmd *cmd;
+ struct scsi_qla_host *vha;
+
+ /* asc 0x10=dif error */
+ if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) {
+ cmd = prm->cmd;
+ vha = cmd->vha;
+ /* ASCQ */
+ switch (prm->sense_buffer[13]) {
+ case 1:
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] "
+ "se_cmd=%p tag[%x]",
+ cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
+ cmd->atio.u.isp24.exchange_addr);
+ break;
+ case 2:
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] "
+ "se_cmd=%p tag[%x]",
+ cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
+ cmd->atio.u.isp24.exchange_addr);
+ break;
+ case 3:
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] "
+ "se_cmd=%p tag[%x]",
+ cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
+ cmd->atio.u.isp24.exchange_addr);
+ break;
+ default:
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ "BE detected Dif ERR: lba[%llx|%lld] len[%x] "
+ "se_cmd=%p tag[%x]",
+ cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd,
+ cmd->atio.u.isp24.exchange_addr);
+ break;
+ }
+ ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xffff, cmd->cdb, 16);
+ }
+}
+
/*
* Called without ha->hardware_lock held
*/
@@ -2512,18 +2649,9 @@ skip_explict_conf:
for (i = 0; i < prm->sense_buffer_len/4; i++)
((uint32_t *)ctio->u.status1.sense_data)[i] =
cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
-#if 0
- if (unlikely((prm->sense_buffer_len % 4) != 0)) {
- static int q;
- if (q < 10) {
- ql_dbg(ql_dbg_tgt, vha, 0xe04f,
- "qla_target(%d): %d bytes of sense "
- "lost", prm->tgt->ha->vp_idx,
- prm->sense_buffer_len % 4);
- q++;
- }
- }
-#endif
+
+ qlt_print_dif_err(prm);
+
} else {
ctio->u.status1.flags &=
~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
@@ -2537,19 +2665,9 @@ skip_explict_conf:
/* Sense with len > 24, is it possible ??? */
}
-
-
-/* diff */
static inline int
qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
{
- /*
- * Uncomment when corresponding SCSI changes are done.
- *
- if (!sp->cmd->prot_chk)
- return 0;
- *
- */
switch (se_cmd->prot_op) {
case TARGET_PROT_DOUT_INSERT:
case TARGET_PROT_DIN_STRIP:
@@ -2570,16 +2688,38 @@ qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
return 0;
}
+static inline int
+qla_tgt_ref_mask_check(struct se_cmd *se_cmd)
+{
+ switch (se_cmd->prot_op) {
+ case TARGET_PROT_DIN_INSERT:
+ case TARGET_PROT_DOUT_INSERT:
+ case TARGET_PROT_DIN_STRIP:
+ case TARGET_PROT_DOUT_STRIP:
+ case TARGET_PROT_DIN_PASS:
+ case TARGET_PROT_DOUT_PASS:
+ return 1;
+ default:
+ return 0;
+ }
+ return 0;
+}
+
/*
- * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
- *
+ * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command
*/
-static inline void
-qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx)
+static void
+qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx,
+ uint16_t *pfw_prot_opts)
{
+ struct se_cmd *se_cmd = &cmd->se_cmd;
uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
+ scsi_qla_host_t *vha = cmd->tgt->vha;
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t t32 = 0;
- /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2
+ /*
+ * wait till Mode Sense/Select cmd, modepage Ah, subpage 2
* have been immplemented by TCM, before AppTag is avail.
* Look for modesense_handlers[]
*/
@@ -2587,65 +2727,73 @@ qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx)
ctx->app_tag_mask[0] = 0x0;
ctx->app_tag_mask[1] = 0x0;
+ if (IS_PI_UNINIT_CAPABLE(ha)) {
+ if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
+ (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
+ *pfw_prot_opts |= PO_DIS_VALD_APP_ESC;
+ else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
+ *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
+ }
+
+ t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts);
+
switch (se_cmd->prot_type) {
case TARGET_DIF_TYPE0_PROT:
/*
- * No check for ql2xenablehba_err_chk, as it would be an
- * I/O error if hba tag generation is not done.
+ * No check for ql2xenablehba_err_chk, as it
+ * would be an I/O error if hba tag generation
+ * is not done.
*/
ctx->ref_tag = cpu_to_le32(lba);
-
- if (!qlt_hba_err_chk_enabled(se_cmd))
- break;
-
/* enable ALL bytes of the ref tag */
ctx->ref_tag_mask[0] = 0xff;
ctx->ref_tag_mask[1] = 0xff;
ctx->ref_tag_mask[2] = 0xff;
ctx->ref_tag_mask[3] = 0xff;
break;
- /*
- * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
- * 16 bit app tag.
- */
case TARGET_DIF_TYPE1_PROT:
- ctx->ref_tag = cpu_to_le32(lba);
-
- if (!qlt_hba_err_chk_enabled(se_cmd))
- break;
-
- /* enable ALL bytes of the ref tag */
- ctx->ref_tag_mask[0] = 0xff;
- ctx->ref_tag_mask[1] = 0xff;
- ctx->ref_tag_mask[2] = 0xff;
- ctx->ref_tag_mask[3] = 0xff;
- break;
- /*
- * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
- * match LBA in CDB + N
- */
+ /*
+ * For TYPE 1 protection: 16 bit GUARD tag, 32 bit
+ * REF tag, and 16 bit app tag.
+ */
+ ctx->ref_tag = cpu_to_le32(lba);
+ if (!qla_tgt_ref_mask_check(se_cmd) ||
+ !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
+ *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
+ break;
+ }
+ /* enable ALL bytes of the ref tag */
+ ctx->ref_tag_mask[0] = 0xff;
+ ctx->ref_tag_mask[1] = 0xff;
+ ctx->ref_tag_mask[2] = 0xff;
+ ctx->ref_tag_mask[3] = 0xff;
+ break;
case TARGET_DIF_TYPE2_PROT:
- ctx->ref_tag = cpu_to_le32(lba);
-
- if (!qlt_hba_err_chk_enabled(se_cmd))
- break;
-
- /* enable ALL bytes of the ref tag */
- ctx->ref_tag_mask[0] = 0xff;
- ctx->ref_tag_mask[1] = 0xff;
- ctx->ref_tag_mask[2] = 0xff;
- ctx->ref_tag_mask[3] = 0xff;
- break;
-
- /* For Type 3 protection: 16 bit GUARD only */
+ /*
+ * For TYPE 2 protection: 16 bit GUARD + 32 bit REF
+ * tag has to match LBA in CDB + N
+ */
+ ctx->ref_tag = cpu_to_le32(lba);
+ if (!qla_tgt_ref_mask_check(se_cmd) ||
+ !(ha->tgt.tgt_ops->chk_dif_tags(t32))) {
+ *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
+ break;
+ }
+ /* enable ALL bytes of the ref tag */
+ ctx->ref_tag_mask[0] = 0xff;
+ ctx->ref_tag_mask[1] = 0xff;
+ ctx->ref_tag_mask[2] = 0xff;
+ ctx->ref_tag_mask[3] = 0xff;
+ break;
case TARGET_DIF_TYPE3_PROT:
- ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
- ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
- break;
+ /* For TYPE 3 protection: 16 bit GUARD only */
+ *pfw_prot_opts |= PO_DIS_REF_TAG_VALD;
+ ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
+ ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
+ break;
}
}
-
static inline int
qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
{
@@ -2664,6 +2812,7 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
struct se_cmd *se_cmd = &cmd->se_cmd;
uint32_t h;
struct atio_from_isp *atio = &prm->cmd->atio;
+ struct qla_tc_param tc;
uint16_t t16;
ha = vha->hw;
@@ -2689,16 +2838,15 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
case TARGET_PROT_DIN_INSERT:
case TARGET_PROT_DOUT_STRIP:
transfer_length = data_bytes;
- data_bytes += dif_bytes;
+ if (cmd->prot_sg_cnt)
+ data_bytes += dif_bytes;
break;
-
case TARGET_PROT_DIN_STRIP:
case TARGET_PROT_DOUT_INSERT:
case TARGET_PROT_DIN_PASS:
case TARGET_PROT_DOUT_PASS:
transfer_length = data_bytes + dif_bytes;
break;
-
default:
BUG();
break;
@@ -2734,7 +2882,6 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
break;
}
-
/* ---- PKT ---- */
/* Update entry type to indicate Command Type CRC_2 IOCB */
pkt->entry_type = CTIO_CRC2;
@@ -2752,9 +2899,8 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
} else
ha->tgt.cmds[h-1] = prm->cmd;
-
pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
- pkt->nport_handle = prm->cmd->loop_id;
+ pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id);
pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
@@ -2775,12 +2921,10 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
-
pkt->dseg_count = prm->tot_dsds;
/* Fibre channel byte count */
pkt->transfer_length = cpu_to_le32(transfer_length);
-
/* ----- CRC context -------- */
/* Allocate CRC context from global pool */
@@ -2800,13 +2944,12 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
/* Set handle */
crc_ctx_pkt->handle = pkt->handle;
- qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt);
+ qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts);
pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
-
if (!bundling) {
cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
} else {
@@ -2827,16 +2970,24 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
crc_ctx_pkt->guard_seed = cpu_to_le16(0);
+ memset((uint8_t *)&tc, 0 , sizeof(tc));
+ tc.vha = vha;
+ tc.blk_sz = cmd->blk_sz;
+ tc.bufflen = cmd->bufflen;
+ tc.sg = cmd->sg;
+ tc.prot_sg = cmd->prot_sg;
+ tc.ctx = crc_ctx_pkt;
+ tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced;
/* Walks data segments */
pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
if (!bundling && prm->prot_seg_cnt) {
if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
- prm->tot_dsds, cmd))
+ prm->tot_dsds, &tc))
goto crc_queuing_error;
} else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
- (prm->tot_dsds - prm->prot_seg_cnt), cmd))
+ (prm->tot_dsds - prm->prot_seg_cnt), &tc))
goto crc_queuing_error;
if (bundling && prm->prot_seg_cnt) {
@@ -2845,18 +2996,18 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
- prm->prot_seg_cnt, cmd))
+ prm->prot_seg_cnt, &tc))
goto crc_queuing_error;
}
return QLA_SUCCESS;
crc_queuing_error:
/* Cleanup will be performed by the caller */
+ vha->hw->tgt.cmds[h - 1] = NULL;
return QLA_FUNCTION_FAILED;
}
-
/*
* Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
* QLA_TGT_XMIT_STATUS for >= 24xx silicon
@@ -2906,7 +3057,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
else
vha->tgt_counters.core_qla_que_buf++;
- if (!vha->flags.online || cmd->reset_count != ha->chip_reset) {
+ if (!ha->flags.fw_started || cmd->reset_count != ha->chip_reset) {
/*
* Either the port is not online or this request was from
* previous life, just abort the processing.
@@ -3047,7 +3198,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
spin_lock_irqsave(&ha->hardware_lock, flags);
- if (!vha->flags.online || (cmd->reset_count != ha->chip_reset) ||
+ if (!ha->flags.fw_started || (cmd->reset_count != ha->chip_reset) ||
(cmd->sess && cmd->sess->deleted)) {
/*
* Either the port is not online or this request was from
@@ -3104,139 +3255,113 @@ EXPORT_SYMBOL(qlt_rdy_to_xfer);
/*
- * Checks the guard or meta-data for the type of error
- * detected by the HBA.
+ * it is assumed either hardware_lock or qpair lock is held.
*/
-static inline int
+static void
qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
- struct ctio_crc_from_fw *sts)
+ struct ctio_crc_from_fw *sts)
{
uint8_t *ap = &sts->actual_dif[0];
uint8_t *ep = &sts->expected_dif[0];
- uint32_t e_ref_tag, a_ref_tag;
- uint16_t e_app_tag, a_app_tag;
- uint16_t e_guard, a_guard;
uint64_t lba = cmd->se_cmd.t_task_lba;
+ uint8_t scsi_status, sense_key, asc, ascq;
+ unsigned long flags;
- a_guard = be16_to_cpu(*(uint16_t *)(ap + 0));
- a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
- a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
-
- e_guard = be16_to_cpu(*(uint16_t *)(ep + 0));
- e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
- e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
-
- ql_dbg(ql_dbg_tgt, vha, 0xe075,
- "iocb(s) %p Returned STATUS.\n", sts);
-
- ql_dbg(ql_dbg_tgt, vha, 0xf075,
- "dif check TGT cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
- cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
- a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard);
-
- /*
- * Ignore sector if:
- * For type 3: ref & app tag is all 'f's
- * For type 0,1,2: app tag is all 'f's
- */
- if ((a_app_tag == 0xffff) &&
- ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) ||
- (a_ref_tag == 0xffffffff))) {
- uint32_t blocks_done;
-
- /* 2TB boundary case covered automatically with this */
- blocks_done = e_ref_tag - (uint32_t)lba + 1;
- cmd->se_cmd.bad_sector = e_ref_tag;
- cmd->se_cmd.pi_err = 0;
- ql_dbg(ql_dbg_tgt, vha, 0xf074,
- "need to return scsi good\n");
-
- /* Update protection tag */
- if (cmd->prot_sg_cnt) {
- uint32_t i, k = 0, num_ent;
- struct scatterlist *sg, *sgl;
-
-
- sgl = cmd->prot_sg;
-
- /* Patch the corresponding protection tags */
- for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) {
- num_ent = sg_dma_len(sg) / 8;
- if (k + num_ent < blocks_done) {
- k += num_ent;
- continue;
- }
- k = blocks_done;
- break;
- }
+ cmd->trc_flags |= TRC_DIF_ERR;
- if (k != blocks_done) {
- ql_log(ql_log_warn, vha, 0xf076,
- "unexpected tag values tag:lba=%u:%llu)\n",
- e_ref_tag, (unsigned long long)lba);
- goto out;
- }
+ cmd->a_guard = be16_to_cpu(*(uint16_t *)(ap + 0));
+ cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
+ cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
-#if 0
- struct sd_dif_tuple *spt;
- /* TODO:
- * This section came from initiator. Is it valid here?
- * should ulp be override with actual val???
- */
- spt = page_address(sg_page(sg)) + sg->offset;
- spt += j;
+ cmd->e_guard = be16_to_cpu(*(uint16_t *)(ep + 0));
+ cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
+ cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
- spt->app_tag = 0xffff;
- if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3)
- spt->ref_tag = 0xffffffff;
-#endif
- }
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xf075,
+ "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state);
- return 0;
- }
+ scsi_status = sense_key = asc = ascq = 0;
- /* check guard */
- if (e_guard != a_guard) {
- cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
- cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
-
- ql_log(ql_log_warn, vha, 0xe076,
- "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
- cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
- a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
- a_guard, e_guard, cmd);
- goto out;
+ /* check appl tag */
+ if (cmd->e_app_tag != cmd->a_app_tag) {
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] "
+ "Ref[%x|%x], App[%x|%x], "
+ "Guard [%x|%x] cmd=%p ox_id[%04x]",
+ cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
+ cmd->a_ref_tag, cmd->e_ref_tag,
+ cmd->a_app_tag, cmd->e_app_tag,
+ cmd->a_guard, cmd->e_guard,
+ cmd, cmd->atio.u.isp24.fcp_hdr.ox_id);
+
+ cmd->dif_err_code = DIF_ERR_APP;
+ scsi_status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ABORTED_COMMAND;
+ asc = 0x10;
+ ascq = 0x2;
}
/* check ref tag */
- if (e_ref_tag != a_ref_tag) {
- cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
- cmd->se_cmd.bad_sector = e_ref_tag;
-
- ql_log(ql_log_warn, vha, 0xe077,
- "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
- cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
- a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
- a_guard, e_guard, cmd);
+ if (cmd->e_ref_tag != cmd->a_ref_tag) {
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] "
+ "Ref[%x|%x], App[%x|%x], "
+ "Guard[%x|%x] cmd=%p ox_id[%04x] ",
+ cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
+ cmd->a_ref_tag, cmd->e_ref_tag,
+ cmd->a_app_tag, cmd->e_app_tag,
+ cmd->a_guard, cmd->e_guard,
+ cmd, cmd->atio.u.isp24.fcp_hdr.ox_id);
+
+ cmd->dif_err_code = DIF_ERR_REF;
+ scsi_status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ABORTED_COMMAND;
+ asc = 0x10;
+ ascq = 0x3;
goto out;
}
- /* check appl tag */
- if (e_app_tag != a_app_tag) {
- cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
- cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
-
- ql_log(ql_log_warn, vha, 0xe078,
- "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
- cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
- a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
- a_guard, e_guard, cmd);
- goto out;
+ /* check guard */
+ if (cmd->e_guard != cmd->a_guard) {
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] "
+ "Ref[%x|%x], App[%x|%x], "
+ "Guard [%x|%x] cmd=%p ox_id[%04x]",
+ cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks,
+ cmd->a_ref_tag, cmd->e_ref_tag,
+ cmd->a_app_tag, cmd->e_app_tag,
+ cmd->a_guard, cmd->e_guard,
+ cmd, cmd->atio.u.isp24.fcp_hdr.ox_id);
+ cmd->dif_err_code = DIF_ERR_GRD;
+ scsi_status = SAM_STAT_CHECK_CONDITION;
+ sense_key = ABORTED_COMMAND;
+ asc = 0x10;
+ ascq = 0x1;
}
out:
- return 1;
-}
+ switch (cmd->state) {
+ case QLA_TGT_STATE_NEED_DATA:
+ /* handle_data will load DIF error code */
+ cmd->state = QLA_TGT_STATE_DATA_IN;
+ vha->hw->tgt.tgt_ops->handle_data(cmd);
+ break;
+ default:
+ spin_lock_irqsave(&cmd->cmd_lock, flags);
+ if (cmd->aborted) {
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+ vha->hw->tgt.tgt_ops->free_cmd(cmd);
+ break;
+ }
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+ qlt_send_resp_ctio(vha, cmd, scsi_status, sense_key, asc, ascq);
+ /* assume scsi status gets out on the wire.
+ * Will not wait for completion.
+ */
+ vha->hw->tgt.tgt_ops->free_cmd(cmd);
+ break;
+ }
+}
/* If hardware_lock held on entry, might drop it, then reaquire */
/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
@@ -3251,7 +3376,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
"Sending TERM ELS CTIO (ha=%p)\n", ha);
- pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
+ pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
if (pkt == NULL) {
ql_dbg(ql_dbg_tgt, vha, 0xe080,
"qla_target(%d): %s failed: unable to allocate "
@@ -3543,6 +3668,16 @@ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
{
int term = 0;
+ if (cmd->se_cmd.prot_op)
+ ql_dbg(ql_dbg_tgt_dif, vha, 0xffff,
+ "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] "
+ "se_cmd=%p tag[%x] op %#x/%s",
+ cmd->lba, cmd->lba,
+ cmd->num_blks, &cmd->se_cmd,
+ cmd->atio.u.isp24.exchange_addr,
+ cmd->se_cmd.prot_op,
+ prot_op_str(cmd->se_cmd.prot_op));
+
if (ctio != NULL) {
struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
term = !(c->flags &
@@ -3760,32 +3895,15 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
struct ctio_crc_from_fw *crc =
(struct ctio_crc_from_fw *)ctio;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
- "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n",
+ "qla_target(%d): CTIO with DIF_ERROR status %x "
+ "received (state %x, ulp_cmd %p) actual_dif[0x%llx] "
+ "expect_dif[0x%llx]\n",
vha->vp_idx, status, cmd->state, se_cmd,
*((u64 *)&crc->actual_dif[0]),
*((u64 *)&crc->expected_dif[0]));
- if (qlt_handle_dif_error(vha, cmd, ctio)) {
- if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
- /* scsi Write/xfer rdy complete */
- goto skip_term;
- } else {
- /* scsi read/xmit respond complete
- * call handle dif to send scsi status
- * rather than terminate exchange.
- */
- cmd->state = QLA_TGT_STATE_PROCESSED;
- ha->tgt.tgt_ops->handle_dif_err(cmd);
- return;
- }
- } else {
- /* Need to generate a SCSI good completion.
- * because FW did not send scsi status.
- */
- status = 0;
- goto skip_term;
- }
- break;
+ qlt_handle_dif_error(vha, cmd, ctio);
+ return;
}
default:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
@@ -3808,7 +3926,6 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
return;
}
}
-skip_term:
if (cmd->state == QLA_TGT_STATE_PROCESSED) {
cmd->trc_flags |= TRC_CTIO_DONE;
@@ -4584,7 +4701,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
}
if (sess != NULL) {
- if (sess->fw_login_state == DSC_LS_PLOGI_PEND) {
+ if (sess->fw_login_state != DSC_LS_PLOGI_PEND &&
+ sess->fw_login_state != DSC_LS_PLOGI_COMP) {
/*
* Impatient initiator sent PRLI before last
* PLOGI could finish. Will force him to re-try,
@@ -4623,15 +4741,23 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
/* Make session global (not used in fabric mode) */
if (ha->current_topology != ISP_CFG_F) {
- set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
- qla2xxx_wake_dpc(vha);
+ if (sess) {
+ ql_dbg(ql_dbg_disc, vha, 0xffff,
+ "%s %d %8phC post nack\n",
+ __func__, __LINE__, sess->port_name);
+ qla24xx_post_nack_work(vha, sess, iocb,
+ SRB_NACK_PRLI);
+ res = 0;
+ } else {
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
} else {
if (sess) {
ql_dbg(ql_dbg_disc, vha, 0xffff,
- "%s %d %8phC post nack\n",
- __func__, __LINE__, sess->port_name);
-
+ "%s %d %8phC post nack\n",
+ __func__, __LINE__, sess->port_name);
qla24xx_post_nack_work(vha, sess, iocb,
SRB_NACK_PRLI);
res = 0;
@@ -4639,7 +4765,6 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
}
break;
-
case ELS_TPRLO:
if (le16_to_cpu(iocb->u.isp24.flags) &
NOTIFY24XX_FLAGS_GLOBAL_TPRLO) {
@@ -5079,16 +5204,22 @@ qlt_send_busy(struct scsi_qla_host *vha,
static int
qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
- struct atio_from_isp *atio)
+ struct atio_from_isp *atio, bool ha_locked)
{
struct qla_hw_data *ha = vha->hw;
uint16_t status;
+ unsigned long flags;
if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
return 0;
+ if (!ha_locked)
+ spin_lock_irqsave(&ha->hardware_lock, flags);
status = temp_sam_status;
qlt_send_busy(vha, atio, status);
+ if (!ha_locked)
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
return 1;
}
@@ -5103,7 +5234,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
unsigned long flags;
if (unlikely(tgt == NULL)) {
- ql_dbg(ql_dbg_io, vha, 0x3064,
+ ql_dbg(ql_dbg_tgt, vha, 0x3064,
"ATIO pkt, but no tgt (ha %p)", ha);
return;
}
@@ -5133,7 +5264,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
- rc = qlt_chk_qfull_thresh_hold(vha, atio);
+ rc = qlt_chk_qfull_thresh_hold(vha, atio, ha_locked);
if (rc != 0) {
tgt->atio_irq_cmd_count--;
return;
@@ -5256,7 +5387,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
break;
}
- rc = qlt_chk_qfull_thresh_hold(vha, atio);
+ rc = qlt_chk_qfull_thresh_hold(vha, atio, true);
if (rc != 0) {
tgt->irq_cmd_count--;
return;
@@ -5531,7 +5662,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
fcport->loop_id = loop_id;
- rc = qla2x00_get_port_database(vha, fcport, 0);
+ rc = qla24xx_gpdb_wait(vha, fcport, 0);
if (rc != QLA_SUCCESS) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
"qla_target(%d): Failed to retrieve fcport "
@@ -5713,30 +5844,23 @@ static void qlt_abort_work(struct qla_tgt *tgt,
}
}
- spin_lock_irqsave(&ha->hardware_lock, flags);
-
- if (tgt->tgt_stop)
- goto out_term;
-
rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
+ ha->tgt.tgt_ops->put_sess(sess);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
+
if (rc != 0)
goto out_term;
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (sess)
- ha->tgt.tgt_ops->put_sess(sess);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
return;
out_term2:
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (sess)
+ ha->tgt.tgt_ops->put_sess(sess);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
out_term:
+ spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
- if (sess)
- ha->tgt.tgt_ops->put_sess(sess);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
}
static void qlt_tmr_work(struct qla_tgt *tgt,
@@ -5756,7 +5880,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
if (tgt->tgt_stop)
- goto out_term;
+ goto out_term2;
s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
@@ -5768,11 +5892,11 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
spin_lock_irqsave(&ha->tgt.sess_lock, flags);
if (!sess)
- goto out_term;
+ goto out_term2;
} else {
if (sess->deleted) {
sess = NULL;
- goto out_term;
+ goto out_term2;
}
if (!kref_get_unless_zero(&sess->sess_kref)) {
@@ -5780,7 +5904,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
"%s: kref_get fail %8phC\n",
__func__, sess->port_name);
sess = NULL;
- goto out_term;
+ goto out_term2;
}
}
@@ -5790,17 +5914,19 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
- if (rc != 0)
- goto out_term;
-
ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+
+ if (rc != 0)
+ goto out_term;
return;
+out_term2:
+ if (sess)
+ ha->tgt.tgt_ops->put_sess(sess);
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
out_term:
qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0);
- ha->tgt.tgt_ops->put_sess(sess);
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
}
static void qlt_sess_work_fn(struct work_struct *work)
@@ -5893,13 +6019,13 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
- if (base_vha->fc_vport)
- return 0;
-
mutex_lock(&qla_tgt_mutex);
list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
mutex_unlock(&qla_tgt_mutex);
+ if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target)
+ ha->tgt.tgt_ops->add_target(base_vha);
+
return 0;
}
@@ -5928,6 +6054,17 @@ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
return 0;
}
+void qlt_remove_target_resources(struct qla_hw_data *ha)
+{
+ struct scsi_qla_host *node;
+ u32 key = 0;
+
+ btree_for_each_safe32(&ha->tgt.host_map, key, node)
+ btree_remove32(&ha->tgt.host_map, key);
+
+ btree_destroy32(&ha->tgt.host_map);
+}
+
static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
unsigned char *b)
{
@@ -6234,7 +6371,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
struct atio_from_isp *pkt;
int cnt, i;
- if (!vha->flags.online)
+ if (!ha->flags.fw_started)
return;
while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
@@ -6581,6 +6718,8 @@ qlt_modify_vp_config(struct scsi_qla_host *vha,
void
qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
{
+ int rc;
+
if (!QLA_TGT_MODE_ENABLED())
return;
@@ -6600,6 +6739,13 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
qlt_unknown_atio_work_fn);
qlt_clear_mode(base_vha);
+
+ rc = btree_init32(&ha->tgt.host_map);
+ if (rc)
+ ql_log(ql_log_info, base_vha, 0xffff,
+ "Unable to initialize ha->host_map btree\n");
+
+ qlt_update_vp_map(base_vha, SET_VP_IDX);
}
irqreturn_t
@@ -6642,6 +6788,8 @@ qlt_handle_abts_recv_work(struct work_struct *work)
spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_response_pkt_all_vps(vha, (response_t *)&op->atio);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ kfree(op);
}
void
@@ -6706,25 +6854,69 @@ qlt_mem_free(struct qla_hw_data *ha)
void
qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
{
+ void *slot;
+ u32 key;
+ int rc;
+
if (!QLA_TGT_MODE_ENABLED())
return;
+ key = vha->d_id.b24;
+
switch (cmd) {
case SET_VP_IDX:
vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
break;
case SET_AL_PA:
- vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx;
+ slot = btree_lookup32(&vha->hw->tgt.host_map, key);
+ if (!slot) {
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+ "Save vha in host_map %p %06x\n", vha, key);
+ rc = btree_insert32(&vha->hw->tgt.host_map,
+ key, vha, GFP_ATOMIC);
+ if (rc)
+ ql_log(ql_log_info, vha, 0xffff,
+ "Unable to insert s_id into host_map: %06x\n",
+ key);
+ return;
+ }
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+ "replace existing vha in host_map %p %06x\n", vha, key);
+ btree_update32(&vha->hw->tgt.host_map, key, vha);
break;
case RESET_VP_IDX:
vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
break;
case RESET_AL_PA:
- vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0;
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+ "clear vha in host_map %p %06x\n", vha, key);
+ slot = btree_lookup32(&vha->hw->tgt.host_map, key);
+ if (slot)
+ btree_remove32(&vha->hw->tgt.host_map, key);
+ vha->d_id.b24 = 0;
break;
}
}
+void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id)
+{
+ unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!vha->d_id.b24) {
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ vha->d_id = id;
+ qlt_update_vp_map(vha, SET_AL_PA);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+ } else if (vha->d_id.b24 != id.b24) {
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ qlt_update_vp_map(vha, RESET_AL_PA);
+ vha->d_id = id;
+ qlt_update_vp_map(vha, SET_AL_PA);
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+ }
+}
+
static int __init qlt_parse_ini_mode(void)
{
if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index a7f90dcaae37..d64420251194 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -378,6 +378,14 @@ static inline void adjust_corrupted_atio(struct atio_from_isp *atio)
atio->u.isp24.fcp_cmnd.add_cdb_len = 0;
}
+static inline int get_datalen_for_atio(struct atio_from_isp *atio)
+{
+ int len = atio->u.isp24.fcp_cmnd.add_cdb_len;
+
+ return (be32_to_cpu(get_unaligned((uint32_t *)
+ &atio->u.isp24.fcp_cmnd.add_cdb[len * 4])));
+}
+
#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
/*
@@ -667,7 +675,6 @@ struct qla_tgt_func_tmpl {
int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
unsigned char *, uint32_t, int, int, int);
void (*handle_data)(struct qla_tgt_cmd *);
- void (*handle_dif_err)(struct qla_tgt_cmd *);
int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint16_t,
uint32_t);
void (*free_cmd)(struct qla_tgt_cmd *);
@@ -684,6 +691,9 @@ struct qla_tgt_func_tmpl {
void (*clear_nacl_from_fcport_map)(struct fc_port *);
void (*put_sess)(struct fc_port *);
void (*shutdown_sess)(struct fc_port *);
+ int (*get_dif_tags)(struct qla_tgt_cmd *cmd, uint16_t *pfw_prot_opts);
+ int (*chk_dif_tags)(uint32_t tag);
+ void (*add_target)(struct scsi_qla_host *);
};
int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
@@ -720,8 +730,8 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
#define QLA_TGT_ABORT_ALL 0xFFFE
#define QLA_TGT_NEXUS_LOSS_SESS 0xFFFD
#define QLA_TGT_NEXUS_LOSS 0xFFFC
-#define QLA_TGT_ABTS 0xFFFB
-#define QLA_TGT_2G_ABORT_TASK 0xFFFA
+#define QLA_TGT_ABTS 0xFFFB
+#define QLA_TGT_2G_ABORT_TASK 0xFFFA
/* Notify Acknowledge flags */
#define NOTIFY_ACK_RES_COUNT BIT_8
@@ -845,6 +855,7 @@ enum trace_flags {
TRC_CMD_FREE = BIT_17,
TRC_DATA_IN = BIT_18,
TRC_ABORT = BIT_19,
+ TRC_DIF_ERR = BIT_20,
};
struct qla_tgt_cmd {
@@ -862,7 +873,6 @@ struct qla_tgt_cmd {
unsigned int sg_mapped:1;
unsigned int free_sg:1;
unsigned int write_data_transferred:1;
- unsigned int ctx_dsd_alloced:1;
unsigned int q_full:1;
unsigned int term_exchg:1;
unsigned int cmd_sent_to_fw:1;
@@ -885,11 +895,25 @@ struct qla_tgt_cmd {
struct list_head cmd_list;
struct atio_from_isp atio;
- /* t10dif */
+
+ uint8_t ctx_dsd_alloced;
+
+ /* T10-DIF */
+#define DIF_ERR_NONE 0
+#define DIF_ERR_GRD 1
+#define DIF_ERR_REF 2
+#define DIF_ERR_APP 3
+ int8_t dif_err_code;
struct scatterlist *prot_sg;
uint32_t prot_sg_cnt;
- uint32_t blk_sz;
+ uint32_t blk_sz, num_blks;
+ uint8_t scsi_status, sense_key, asc, ascq;
+
struct crc_context *ctx;
+ uint8_t *cdb;
+ uint64_t lba;
+ uint16_t a_guard, e_guard, a_app_tag, e_app_tag;
+ uint32_t a_ref_tag, e_ref_tag;
uint64_t jiffies_at_alloc;
uint64_t jiffies_at_free;
@@ -1053,4 +1077,7 @@ extern int qlt_free_qfull_cmds(struct scsi_qla_host *);
extern void qlt_logo_completion_handler(fc_port_t *, int);
extern void qlt_do_generation_tick(struct scsi_qla_host *, int *);
+void qlt_send_resp_ctio(scsi_qla_host_t *, struct qla_tgt_cmd *, uint8_t,
+ uint8_t, uint8_t, uint8_t);
+
#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 3cb1964b7786..45bc84e8e3bf 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.07.00.38-k"
+#define QLA2XXX_VERSION "9.00.00.00-k"
-#define QLA_DRIVER_MAJOR_VER 8
-#define QLA_DRIVER_MINOR_VER 7
+#define QLA_DRIVER_MAJOR_VER 9
+#define QLA_DRIVER_MINOR_VER 0
#define QLA_DRIVER_PATCH_VER 0
#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 8e8ab0fa9672..7443e4efa3ae 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -531,6 +531,24 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
return;
}
+ switch (cmd->dif_err_code) {
+ case DIF_ERR_GRD:
+ cmd->se_cmd.pi_err =
+ TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
+ break;
+ case DIF_ERR_REF:
+ cmd->se_cmd.pi_err =
+ TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
+ break;
+ case DIF_ERR_APP:
+ cmd->se_cmd.pi_err =
+ TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
+ break;
+ case DIF_ERR_NONE:
+ default:
+ break;
+ }
+
if (cmd->se_cmd.pi_err)
transport_generic_request_failure(&cmd->se_cmd,
cmd->se_cmd.pi_err);
@@ -555,25 +573,23 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
}
-static void tcm_qla2xxx_handle_dif_work(struct work_struct *work)
+static int tcm_qla2xxx_chk_dif_tags(uint32_t tag)
{
- struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
-
- /* take an extra kref to prevent cmd free too early.
- * need to wait for SCSI status/check condition to
- * finish responding generate by transport_generic_request_failure.
- */
- kref_get(&cmd->se_cmd.cmd_kref);
- transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err);
+ return 0;
}
-/*
- * Called from qla_target.c:qlt_do_ctio_completion()
- */
-static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd)
+static int tcm_qla2xxx_dif_tags(struct qla_tgt_cmd *cmd,
+ uint16_t *pfw_prot_opts)
{
- INIT_WORK(&cmd->work, tcm_qla2xxx_handle_dif_work);
- queue_work(tcm_qla2xxx_free_wq, &cmd->work);
+ struct se_cmd *se_cmd = &cmd->se_cmd;
+
+ if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD))
+ *pfw_prot_opts |= PO_DISABLE_GUARD_CHECK;
+
+ if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG))
+ *pfw_prot_opts |= PO_DIS_APP_TAG_VALD;
+
+ return 0;
}
/*
@@ -1610,7 +1626,6 @@ static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id,
static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
.handle_cmd = tcm_qla2xxx_handle_cmd,
.handle_data = tcm_qla2xxx_handle_data,
- .handle_dif_err = tcm_qla2xxx_handle_dif_err,
.handle_tmr = tcm_qla2xxx_handle_tmr,
.free_cmd = tcm_qla2xxx_free_cmd,
.free_mcmd = tcm_qla2xxx_free_mcmd,
@@ -1622,6 +1637,8 @@ static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
.clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
.put_sess = tcm_qla2xxx_put_sess,
.shutdown_sess = tcm_qla2xxx_shutdown_sess,
+ .get_dif_tags = tcm_qla2xxx_dif_tags,
+ .chk_dif_tags = tcm_qla2xxx_chk_dif_tags,
};
static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index ba2286652ff6..19125d72f322 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2932,6 +2932,8 @@ EXPORT_SYMBOL(scsi_target_resume);
/**
* scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
* @sdev: device to block
+ * @wait: Whether or not to wait until ongoing .queuecommand() /
+ * .queue_rq() calls have finished.
*
* Block request made by scsi lld's to temporarily stop all
* scsi commands on the specified device. May sleep.
@@ -2949,7 +2951,7 @@ EXPORT_SYMBOL(scsi_target_resume);
* remove the rport mutex lock and unlock calls from srp_queuecommand().
*/
int
-scsi_internal_device_block(struct scsi_device *sdev)
+scsi_internal_device_block(struct scsi_device *sdev, bool wait)
{
struct request_queue *q = sdev->request_queue;
unsigned long flags;
@@ -2969,12 +2971,16 @@ scsi_internal_device_block(struct scsi_device *sdev)
* request queue.
*/
if (q->mq_ops) {
- blk_mq_quiesce_queue(q);
+ if (wait)
+ blk_mq_quiesce_queue(q);
+ else
+ blk_mq_stop_hw_queues(q);
} else {
spin_lock_irqsave(q->queue_lock, flags);
blk_stop_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
- scsi_wait_for_queuecommand(sdev);
+ if (wait)
+ scsi_wait_for_queuecommand(sdev);
}
return 0;
@@ -3036,7 +3042,7 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
static void
device_block(struct scsi_device *sdev, void *data)
{
- scsi_internal_device_block(sdev);
+ scsi_internal_device_block(sdev, true);
}
static int
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 99bfc985e190..f11bd102d6d5 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -188,8 +188,5 @@ static inline void scsi_dh_remove_device(struct scsi_device *sdev) { }
*/
#define SCSI_DEVICE_BLOCK_MAX_TIMEOUT 600 /* units in seconds */
-extern int scsi_internal_device_block(struct scsi_device *sdev);
-extern int scsi_internal_device_unblock(struct scsi_device *sdev,
- enum scsi_device_state new_state);
#endif /* _SCSI_PRIV_H */
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index c7839f6c35cc..fcfeddc79331 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1783,6 +1783,8 @@ static int sd_done(struct scsi_cmnd *SCpnt)
{
int result = SCpnt->result;
unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
+ unsigned int sector_size = SCpnt->device->sector_size;
+ unsigned int resid;
struct scsi_sense_hdr sshdr;
struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk);
struct request *req = SCpnt->request;
@@ -1813,6 +1815,21 @@ static int sd_done(struct scsi_cmnd *SCpnt)
scsi_set_resid(SCpnt, blk_rq_bytes(req));
}
break;
+ default:
+ /*
+ * In case of bogus fw or device, we could end up having
+ * an unaligned partial completion. Check this here and force
+ * alignment.
+ */
+ resid = scsi_get_resid(SCpnt);
+ if (resid & (sector_size - 1)) {
+ sd_printk(KERN_INFO, sdkp,
+ "Unaligned partial completion (resid=%u, sector_sz=%u)\n",
+ resid, sector_size);
+ resid = min(scsi_bufflen(SCpnt),
+ round_up(resid, sector_size));
+ scsi_set_resid(SCpnt, resid);
+ }
}
if (result) {
@@ -3075,23 +3092,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
put_device(&sdkp->dev);
}
-struct sd_devt {
- int idx;
- struct disk_devt disk_devt;
-};
-
-static void sd_devt_release(struct disk_devt *disk_devt)
-{
- struct sd_devt *sd_devt = container_of(disk_devt, struct sd_devt,
- disk_devt);
-
- spin_lock(&sd_index_lock);
- ida_remove(&sd_index_ida, sd_devt->idx);
- spin_unlock(&sd_index_lock);
-
- kfree(sd_devt);
-}
-
/**
* sd_probe - called during driver initialization and whenever a
* new scsi device is attached to the system. It is called once
@@ -3113,7 +3113,6 @@ static void sd_devt_release(struct disk_devt *disk_devt)
static int sd_probe(struct device *dev)
{
struct scsi_device *sdp = to_scsi_device(dev);
- struct sd_devt *sd_devt;
struct scsi_disk *sdkp;
struct gendisk *gd;
int index;
@@ -3139,13 +3138,9 @@ static int sd_probe(struct device *dev)
if (!sdkp)
goto out;
- sd_devt = kzalloc(sizeof(*sd_devt), GFP_KERNEL);
- if (!sd_devt)
- goto out_free;
-
gd = alloc_disk(SD_MINORS);
if (!gd)
- goto out_free_devt;
+ goto out_free;
do {
if (!ida_pre_get(&sd_index_ida, GFP_KERNEL))
@@ -3161,11 +3156,6 @@ static int sd_probe(struct device *dev)
goto out_put;
}
- atomic_set(&sd_devt->disk_devt.count, 1);
- sd_devt->disk_devt.release = sd_devt_release;
- sd_devt->idx = index;
- gd->disk_devt = &sd_devt->disk_devt;
-
error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
if (error) {
sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n");
@@ -3205,12 +3195,11 @@ static int sd_probe(struct device *dev)
return 0;
out_free_index:
- put_disk_devt(&sd_devt->disk_devt);
- sd_devt = NULL;
+ spin_lock(&sd_index_lock);
+ ida_remove(&sd_index_ida, index);
+ spin_unlock(&sd_index_lock);
out_put:
put_disk(gd);
- out_free_devt:
- kfree(sd_devt);
out_free:
kfree(sdkp);
out:
@@ -3271,7 +3260,10 @@ static void scsi_disk_release(struct device *dev)
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct gendisk *disk = sdkp->disk;
- put_disk_devt(disk->disk_devt);
+ spin_lock(&sd_index_lock);
+ ida_remove(&sd_index_ida, sdkp->index);
+ spin_unlock(&sd_index_lock);
+
disk->private_data = NULL;
put_disk(disk);
put_device(&sdkp->device->sdev_gendev);
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 638e5f427c90..016639d7fef1 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -400,8 +400,6 @@ MODULE_PARM_DESC(storvsc_vcpus_per_sub_channel, "Ratio of VCPUs to subchannels")
*/
static int storvsc_timeout = 180;
-static int msft_blist_flags = BLIST_TRY_VPD_PAGES;
-
#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
static struct scsi_transport_template *fc_transport_template;
#endif
@@ -1383,6 +1381,22 @@ static int storvsc_do_io(struct hv_device *device,
return ret;
}
+static int storvsc_device_alloc(struct scsi_device *sdevice)
+{
+ /*
+ * Set blist flag to permit the reading of the VPD pages even when
+ * the target may claim SPC-2 compliance. MSFT targets currently
+ * claim SPC-2 compliance while they implement post SPC-2 features.
+ * With this flag we can correctly handle WRITE_SAME_16 issues.
+ *
+ * Hypervisor reports SCSI_UNKNOWN type for DVD ROM device but
+ * still supports REPORT LUN.
+ */
+ sdevice->sdev_bflags = BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES;
+
+ return 0;
+}
+
static int storvsc_device_configure(struct scsi_device *sdevice)
{
@@ -1396,14 +1410,6 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
sdevice->no_write_same = 1;
/*
- * Add blist flags to permit the reading of the VPD pages even when
- * the target may claim SPC-2 compliance. MSFT targets currently
- * claim SPC-2 compliance while they implement post SPC-2 features.
- * With this patch we can correctly handle WRITE_SAME_16 issues.
- */
- sdevice->sdev_bflags |= msft_blist_flags;
-
- /*
* If the host is WIN8 or WIN8 R2, claim conformance to SPC-3
* if the device is a MSFT virtual device. If the host is
* WIN10 or newer, allow write_same.
@@ -1661,6 +1667,7 @@ static struct scsi_host_template scsi_driver = {
.eh_host_reset_handler = storvsc_host_reset_handler,
.proc_name = "storvsc_host",
.eh_timed_out = storvsc_eh_timed_out,
+ .slave_alloc = storvsc_device_alloc,
.slave_configure = storvsc_device_configure,
.cmd_per_lun = 255,
.this_id = -1,
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 318e4a1f76c9..54deeb754db5 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -146,7 +146,7 @@ enum attr_idn {
/* Descriptor idn for Query requests */
enum desc_idn {
QUERY_DESC_IDN_DEVICE = 0x0,
- QUERY_DESC_IDN_CONFIGURAION = 0x1,
+ QUERY_DESC_IDN_CONFIGURATION = 0x1,
QUERY_DESC_IDN_UNIT = 0x2,
QUERY_DESC_IDN_RFU_0 = 0x3,
QUERY_DESC_IDN_INTERCONNECT = 0x4,
@@ -162,19 +162,13 @@ enum desc_header_offset {
QUERY_DESC_DESC_TYPE_OFFSET = 0x01,
};
-enum ufs_desc_max_size {
- QUERY_DESC_DEVICE_MAX_SIZE = 0x40,
- QUERY_DESC_CONFIGURAION_MAX_SIZE = 0x90,
- QUERY_DESC_UNIT_MAX_SIZE = 0x23,
- QUERY_DESC_INTERCONNECT_MAX_SIZE = 0x06,
- /*
- * Max. 126 UNICODE characters (2 bytes per character) plus 2 bytes
- * of descriptor header.
- */
- QUERY_DESC_STRING_MAX_SIZE = 0xFE,
- QUERY_DESC_GEOMETRY_MAX_SIZE = 0x44,
- QUERY_DESC_POWER_MAX_SIZE = 0x62,
- QUERY_DESC_RFU_MAX_SIZE = 0x00,
+enum ufs_desc_def_size {
+ QUERY_DESC_DEVICE_DEF_SIZE = 0x40,
+ QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90,
+ QUERY_DESC_UNIT_DEF_SIZE = 0x23,
+ QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06,
+ QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44,
+ QUERY_DESC_POWER_DEF_SIZE = 0x62,
};
/* Unit descriptor parameters offsets in bytes*/
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index dc6efbd1be8e..1359913bf840 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -100,19 +100,6 @@
#define ufshcd_hex_dump(prefix_str, buf, len) \
print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
-static u32 ufs_query_desc_max_size[] = {
- QUERY_DESC_DEVICE_MAX_SIZE,
- QUERY_DESC_CONFIGURAION_MAX_SIZE,
- QUERY_DESC_UNIT_MAX_SIZE,
- QUERY_DESC_RFU_MAX_SIZE,
- QUERY_DESC_INTERCONNECT_MAX_SIZE,
- QUERY_DESC_STRING_MAX_SIZE,
- QUERY_DESC_RFU_MAX_SIZE,
- QUERY_DESC_GEOMETRY_MAX_SIZE,
- QUERY_DESC_POWER_MAX_SIZE,
- QUERY_DESC_RFU_MAX_SIZE,
-};
-
enum {
UFSHCD_MAX_CHANNEL = 0,
UFSHCD_MAX_ID = 1,
@@ -2857,7 +2844,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
goto out;
}
- if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
+ if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
__func__, *buf_len);
err = -EINVAL;
@@ -2938,6 +2925,92 @@ static int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
}
/**
+ * ufshcd_read_desc_length - read the specified descriptor length from header
+ * @hba: Pointer to adapter instance
+ * @desc_id: descriptor idn value
+ * @desc_index: descriptor index
+ * @desc_length: pointer to variable to read the length of descriptor
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+static int ufshcd_read_desc_length(struct ufs_hba *hba,
+ enum desc_idn desc_id,
+ int desc_index,
+ int *desc_length)
+{
+ int ret;
+ u8 header[QUERY_DESC_HDR_SIZE];
+ int header_len = QUERY_DESC_HDR_SIZE;
+
+ if (desc_id >= QUERY_DESC_IDN_MAX)
+ return -EINVAL;
+
+ ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
+ desc_id, desc_index, 0, header,
+ &header_len);
+
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
+ __func__, desc_id);
+ return ret;
+ } else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
+ dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
+ __func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
+ desc_id);
+ ret = -EINVAL;
+ }
+
+ *desc_length = header[QUERY_DESC_LENGTH_OFFSET];
+ return ret;
+
+}
+
+/**
+ * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
+ * @hba: Pointer to adapter instance
+ * @desc_id: descriptor idn value
+ * @desc_len: mapped desc length (out)
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
+ enum desc_idn desc_id, int *desc_len)
+{
+ switch (desc_id) {
+ case QUERY_DESC_IDN_DEVICE:
+ *desc_len = hba->desc_size.dev_desc;
+ break;
+ case QUERY_DESC_IDN_POWER:
+ *desc_len = hba->desc_size.pwr_desc;
+ break;
+ case QUERY_DESC_IDN_GEOMETRY:
+ *desc_len = hba->desc_size.geom_desc;
+ break;
+ case QUERY_DESC_IDN_CONFIGURATION:
+ *desc_len = hba->desc_size.conf_desc;
+ break;
+ case QUERY_DESC_IDN_UNIT:
+ *desc_len = hba->desc_size.unit_desc;
+ break;
+ case QUERY_DESC_IDN_INTERCONNECT:
+ *desc_len = hba->desc_size.interc_desc;
+ break;
+ case QUERY_DESC_IDN_STRING:
+ *desc_len = QUERY_DESC_MAX_SIZE;
+ break;
+ case QUERY_DESC_IDN_RFU_0:
+ case QUERY_DESC_IDN_RFU_1:
+ *desc_len = 0;
+ break;
+ default:
+ *desc_len = 0;
+ return -EINVAL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
+
+/**
* ufshcd_read_desc_param - read the specified descriptor parameter
* @hba: Pointer to adapter instance
* @desc_id: descriptor idn value
@@ -2951,42 +3024,49 @@ static int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
static int ufshcd_read_desc_param(struct ufs_hba *hba,
enum desc_idn desc_id,
int desc_index,
- u32 param_offset,
+ u8 param_offset,
u8 *param_read_buf,
- u32 param_size)
+ u8 param_size)
{
int ret;
u8 *desc_buf;
- u32 buff_len;
+ int buff_len;
bool is_kmalloc = true;
- /* safety checks */
- if (desc_id >= QUERY_DESC_IDN_MAX)
+ /* Safety check */
+ if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
return -EINVAL;
- buff_len = ufs_query_desc_max_size[desc_id];
- if ((param_offset + param_size) > buff_len)
- return -EINVAL;
+ /* Get the max length of descriptor from structure filled up at probe
+ * time.
+ */
+ ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
- if (!param_offset && (param_size == buff_len)) {
- /* memory space already available to hold full descriptor */
- desc_buf = param_read_buf;
- is_kmalloc = false;
- } else {
- /* allocate memory to hold full descriptor */
+ /* Sanity checks */
+ if (ret || !buff_len) {
+ dev_err(hba->dev, "%s: Failed to get full descriptor length",
+ __func__);
+ return ret;
+ }
+
+ /* Check whether we need temp memory */
+ if (param_offset != 0 || param_size < buff_len) {
desc_buf = kmalloc(buff_len, GFP_KERNEL);
if (!desc_buf)
return -ENOMEM;
+ } else {
+ desc_buf = param_read_buf;
+ is_kmalloc = false;
}
+ /* Request for full descriptor */
ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
- desc_id, desc_index, 0, desc_buf,
- &buff_len);
+ desc_id, desc_index, 0,
+ desc_buf, &buff_len);
if (ret) {
dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
__func__, desc_id, desc_index, param_offset, ret);
-
goto out;
}
@@ -2998,25 +3078,9 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba,
goto out;
}
- /*
- * While reading variable size descriptors (like string descriptor),
- * some UFS devices may report the "LENGTH" (field in "Transaction
- * Specific fields" of Query Response UPIU) same as what was requested
- * in Query Request UPIU instead of reporting the actual size of the
- * variable size descriptor.
- * Although it's safe to ignore the "LENGTH" field for variable size
- * descriptors as we can always derive the length of the descriptor from
- * the descriptor header fields. Hence this change impose the length
- * match check only for fixed size descriptors (for which we always
- * request the correct size as part of Query Request UPIU).
- */
- if ((desc_id != QUERY_DESC_IDN_STRING) &&
- (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
- dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
- __func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
- ret = -EINVAL;
- goto out;
- }
+ /* Check wherher we will not copy more data, than available */
+ if (is_kmalloc && param_size > buff_len)
+ param_size = buff_len;
if (is_kmalloc)
memcpy(param_read_buf, &desc_buf[param_offset], param_size);
@@ -5919,8 +5983,8 @@ static int ufshcd_set_icc_levels_attr(struct ufs_hba *hba, u32 icc_level)
static void ufshcd_init_icc_levels(struct ufs_hba *hba)
{
int ret;
- int buff_len = QUERY_DESC_POWER_MAX_SIZE;
- u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE];
+ int buff_len = hba->desc_size.pwr_desc;
+ u8 desc_buf[hba->desc_size.pwr_desc];
ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
if (ret) {
@@ -6017,11 +6081,10 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
{
int err;
u8 model_index;
- u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1] = {0};
- u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE];
+ u8 str_desc_buf[QUERY_DESC_MAX_SIZE + 1] = {0};
+ u8 desc_buf[hba->desc_size.dev_desc];
- err = ufshcd_read_device_desc(hba, desc_buf,
- QUERY_DESC_DEVICE_MAX_SIZE);
+ err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
if (err) {
dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
__func__, err);
@@ -6038,14 +6101,14 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
- QUERY_DESC_STRING_MAX_SIZE, ASCII_STD);
+ QUERY_DESC_MAX_SIZE, ASCII_STD);
if (err) {
dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
__func__, err);
goto out;
}
- str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0';
+ str_desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
strlcpy(dev_desc->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
MAX_MODEL_LEN));
@@ -6251,6 +6314,51 @@ static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
hba->req_abort_count = 0;
}
+static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
+{
+ int err;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
+ &hba->desc_size.dev_desc);
+ if (err)
+ hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
+ &hba->desc_size.pwr_desc);
+ if (err)
+ hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
+ &hba->desc_size.interc_desc);
+ if (err)
+ hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
+ &hba->desc_size.conf_desc);
+ if (err)
+ hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
+ &hba->desc_size.unit_desc);
+ if (err)
+ hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
+
+ err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
+ &hba->desc_size.geom_desc);
+ if (err)
+ hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
+}
+
+static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
+{
+ hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
+ hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
+ hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
+ hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
+ hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
+ hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
+}
+
/**
* ufshcd_probe_hba - probe hba to detect device and initialize
* @hba: per-adapter instance
@@ -6285,6 +6393,9 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
if (ret)
goto out;
+ /* Init check for device descriptor sizes */
+ ufshcd_init_desc_sizes(hba);
+
ret = ufs_get_device_desc(hba, &card);
if (ret) {
dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
@@ -6320,6 +6431,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
/* set the state as operational after switching to desired gear */
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+
/*
* If we are in error handling context or in power management callbacks
* context, no need to scan the host
@@ -7774,6 +7886,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->mmio_base = mmio_base;
hba->irq = irq;
+ /* Set descriptor lengths to specification defaults */
+ ufshcd_def_desc_sizes(hba);
+
err = ufshcd_hba_init(hba);
if (err)
goto out_error;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 7630600217a2..cdc8bd05f7df 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -220,6 +220,15 @@ struct ufs_dev_cmd {
struct ufs_query query;
};
+struct ufs_desc_size {
+ int dev_desc;
+ int pwr_desc;
+ int geom_desc;
+ int interc_desc;
+ int unit_desc;
+ int conf_desc;
+};
+
/**
* struct ufs_clk_info - UFS clock related info
* @list: list headed by hba->clk_list_head
@@ -483,6 +492,7 @@ struct ufs_stats {
* @clk_list_head: UFS host controller clocks list node head
* @pwr_info: holds current power mode
* @max_pwr_info: keeps the device max valid pwm
+ * @desc_size: descriptor sizes reported by device
* @urgent_bkops_lvl: keeps track of urgent bkops level for device
* @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
* device is known or not.
@@ -666,6 +676,7 @@ struct ufs_hba {
bool is_urgent_bkops_lvl_checked;
struct rw_semaphore clk_scaling_lock;
+ struct ufs_desc_size desc_size;
};
/* Returns true if clocks can be gated. Otherwise false */
@@ -832,6 +843,10 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
enum flag_idn idn, bool *flag_res);
int ufshcd_hold(struct ufs_hba *hba, bool async);
void ufshcd_release(struct ufs_hba *hba);
+
+int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
+ int *desc_length);
+
u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
/* Wrapper functions for safely calling variant operations */
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index ef474a748744..c374e3b5c678 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -1487,7 +1487,7 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
irq_flag &= ~PCI_IRQ_MSI;
error = pci_alloc_irq_vectors(adapter->dev, 1, 1, irq_flag);
- if (error)
+ if (error < 0)
goto out_reset_adapter;
adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c
index b7b87ecefcdf..9fca8d225ee0 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-socket.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-socket.c
@@ -532,7 +532,7 @@ lnet_sock_accept(struct socket **newsockp, struct socket *sock)
newsock->ops = sock->ops;
- rc = sock->ops->accept(sock, newsock, O_NONBLOCK);
+ rc = sock->ops->accept(sock, newsock, O_NONBLOCK, false);
if (rc == -EAGAIN) {
/* Nothing ready, so wait for activity */
init_waitqueue_entry(&wait, current);
@@ -540,7 +540,7 @@ lnet_sock_accept(struct socket **newsockp, struct socket *sock)
set_current_state(TASK_INTERRUPTIBLE);
schedule();
remove_wait_queue(sk_sleep(sock->sk), &wait);
- rc = sock->ops->accept(sock, newsock, O_NONBLOCK);
+ rc = sock->ops->accept(sock, newsock, O_NONBLOCK, false);
}
if (rc)
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 7f8cf875157c..65a285631994 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -336,7 +336,6 @@ static int cvm_oct_poll(struct oct_rx_group *rx_group, int budget)
if (likely((port < TOTAL_NUMBER_OF_PORTS) &&
cvm_oct_device[port])) {
struct net_device *dev = cvm_oct_device[port];
- struct octeon_ethernet *priv = netdev_priv(dev);
/*
* Only accept packets for devices that are
diff --git a/drivers/staging/vc04_services/Kconfig b/drivers/staging/vc04_services/Kconfig
index e61e4ca064a8..74094fff4367 100644
--- a/drivers/staging/vc04_services/Kconfig
+++ b/drivers/staging/vc04_services/Kconfig
@@ -1,6 +1,7 @@
config BCM2835_VCHIQ
tristate "Videocore VCHIQ"
depends on HAS_DMA
+ depends on OF
depends on RASPBERRYPI_FIRMWARE || (COMPILE_TEST && !RASPBERRYPI_FIRMWARE)
default y
help
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index f5e330099bfc..fd7c16a7ca6e 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -43,7 +43,7 @@
#include "target_core_ua.h"
static sense_reason_t core_alua_check_transition(int state, int valid,
- int *primary);
+ int *primary, int explicit);
static int core_alua_set_tg_pt_secondary_state(
struct se_lun *lun, int explicit, int offline);
@@ -335,8 +335,8 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
* the state is a primary or secondary target port asymmetric
* access state.
*/
- rc = core_alua_check_transition(alua_access_state,
- valid_states, &primary);
+ rc = core_alua_check_transition(alua_access_state, valid_states,
+ &primary, 1);
if (rc) {
/*
* If the SET TARGET PORT GROUPS attempts to establish
@@ -691,7 +691,7 @@ target_alua_state_check(struct se_cmd *cmd)
if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
return 0;
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
+ if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
return 0;
/*
@@ -762,7 +762,7 @@ target_alua_state_check(struct se_cmd *cmd)
* Check implicit and explicit ALUA state change request.
*/
static sense_reason_t
-core_alua_check_transition(int state, int valid, int *primary)
+core_alua_check_transition(int state, int valid, int *primary, int explicit)
{
/*
* OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
@@ -804,11 +804,14 @@ core_alua_check_transition(int state, int valid, int *primary)
*primary = 0;
break;
case ALUA_ACCESS_STATE_TRANSITION:
- /*
- * Transitioning is set internally, and
- * cannot be selected manually.
- */
- goto not_supported;
+ if (!(valid & ALUA_T_SUP) || explicit)
+ /*
+ * Transitioning is set internally and by tcmu daemon,
+ * and cannot be selected through a STPG.
+ */
+ goto not_supported;
+ *primary = 0;
+ break;
default:
pr_err("Unknown ALUA access state: 0x%02x\n", state);
return TCM_INVALID_PARAMETER_LIST;
@@ -1013,7 +1016,7 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp)
static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
{
struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work,
- struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work);
+ struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work);
struct se_device *dev = tg_pt_gp->tg_pt_gp_dev;
bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status ==
ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG);
@@ -1070,32 +1073,19 @@ static int core_alua_do_transition_tg_pt(
if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state)
return 0;
- if (new_state == ALUA_ACCESS_STATE_TRANSITION)
+ if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION)
return -EAGAIN;
/*
* Flush any pending transitions
*/
- if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs &&
- atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) ==
- ALUA_ACCESS_STATE_TRANSITION) {
- /* Just in case */
- tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
- tg_pt_gp->tg_pt_gp_transition_complete = &wait;
- flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
- wait_for_completion(&wait);
- tg_pt_gp->tg_pt_gp_transition_complete = NULL;
- return 0;
- }
+ if (!explicit)
+ flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
/*
* Save the old primary ALUA access state, and set the current state
* to ALUA_ACCESS_STATE_TRANSITION.
*/
- tg_pt_gp->tg_pt_gp_alua_previous_state =
- atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
- tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
-
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
ALUA_ACCESS_STATE_TRANSITION);
tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ?
@@ -1104,6 +1094,13 @@ static int core_alua_do_transition_tg_pt(
core_alua_queue_state_change_ua(tg_pt_gp);
+ if (new_state == ALUA_ACCESS_STATE_TRANSITION)
+ return 0;
+
+ tg_pt_gp->tg_pt_gp_alua_previous_state =
+ atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
+ tg_pt_gp->tg_pt_gp_alua_pending_state = new_state;
+
/*
* Check for the optional ALUA primary state transition delay
*/
@@ -1117,17 +1114,9 @@ static int core_alua_do_transition_tg_pt(
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
- if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
- unsigned long transition_tmo;
-
- transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ;
- queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
- &tg_pt_gp->tg_pt_gp_transition_work,
- transition_tmo);
- } else {
+ schedule_work(&tg_pt_gp->tg_pt_gp_transition_work);
+ if (explicit) {
tg_pt_gp->tg_pt_gp_transition_complete = &wait;
- queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq,
- &tg_pt_gp->tg_pt_gp_transition_work, 0);
wait_for_completion(&wait);
tg_pt_gp->tg_pt_gp_transition_complete = NULL;
}
@@ -1149,8 +1138,12 @@ int core_alua_do_port_transition(
struct t10_alua_tg_pt_gp *tg_pt_gp;
int primary, valid_states, rc = 0;
+ if (l_dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA)
+ return -ENODEV;
+
valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
- if (core_alua_check_transition(new_state, valid_states, &primary) != 0)
+ if (core_alua_check_transition(new_state, valid_states, &primary,
+ explicit) != 0)
return -EINVAL;
local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
@@ -1695,8 +1688,8 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev,
mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
- INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
- core_alua_do_transition_tg_pt_work);
+ INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work,
+ core_alua_do_transition_tg_pt_work);
tg_pt_gp->tg_pt_gp_dev = dev;
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED);
@@ -1804,7 +1797,7 @@ void core_alua_free_tg_pt_gp(
dev->t10_alua.alua_tg_pt_gps_counter--;
spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
- flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work);
+ flush_work(&tg_pt_gp->tg_pt_gp_transition_work);
/*
* Allow a struct t10_alua_tg_pt_gp_member * referenced by
@@ -1973,7 +1966,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
unsigned char buf[TG_PT_GROUP_NAME_BUF];
int move = 0;
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH ||
+ if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
return -ENODEV;
@@ -2230,7 +2223,7 @@ ssize_t core_alua_store_offline_bit(
unsigned long tmp;
int ret;
- if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH ||
+ if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ||
(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
return -ENODEV;
@@ -2316,7 +2309,8 @@ ssize_t core_alua_store_secondary_write_metadata(
int core_setup_alua(struct se_device *dev)
{
- if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
+ if (!(dev->transport->transport_flags &
+ TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
struct t10_alua_lu_gp_member *lu_gp_mem;
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 54b36c9835be..38b5025e4c7a 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -421,6 +421,10 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo)
pr_err("Missing tfo->aborted_task()\n");
return -EINVAL;
}
+ if (!tfo->check_stop_free) {
+ pr_err("Missing tfo->check_stop_free()\n");
+ return -EINVAL;
+ }
/*
* We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
* tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index a8f8e53f2f57..94cda7991e80 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -154,7 +154,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
buf = kzalloc(12, GFP_KERNEL);
if (!buf)
- return;
+ goto out_free;
memset(cdb, 0, MAX_COMMAND_SIZE);
cdb[0] = MODE_SENSE;
@@ -169,9 +169,10 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
* If MODE_SENSE still returns zero, set the default value to 1024.
*/
sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
+out_free:
if (!sdev->sector_size)
sdev->sector_size = 1024;
-out_free:
+
kfree(buf);
}
@@ -314,9 +315,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
sd->lun, sd->queue_depth);
}
- dev->dev_attrib.hw_block_size = sd->sector_size;
+ dev->dev_attrib.hw_block_size =
+ min_not_zero((int)sd->sector_size, 512);
dev->dev_attrib.hw_max_sectors =
- min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
+ min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q));
dev->dev_attrib.hw_queue_depth = sd->queue_depth;
/*
@@ -339,8 +341,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
/*
* For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
*/
- if (sd->type == TYPE_TAPE)
+ if (sd->type == TYPE_TAPE) {
pscsi_tape_read_blocksize(dev, sd);
+ dev->dev_attrib.hw_block_size = sd->sector_size;
+ }
return 0;
}
@@ -406,7 +410,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
/*
* Called with struct Scsi_Host->host_lock called.
*/
-static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
+static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd)
__releases(sh->host_lock)
{
struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
@@ -433,28 +437,6 @@ static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
return 0;
}
-/*
- * Called with struct Scsi_Host->host_lock called.
- */
-static int pscsi_create_type_other(struct se_device *dev,
- struct scsi_device *sd)
- __releases(sh->host_lock)
-{
- struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
- struct Scsi_Host *sh = sd->host;
- int ret;
-
- spin_unlock_irq(sh->host_lock);
- ret = pscsi_add_device_to_list(dev, sd);
- if (ret)
- return ret;
-
- pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n",
- phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
- sd->channel, sd->id, sd->lun);
- return 0;
-}
-
static int pscsi_configure_device(struct se_device *dev)
{
struct se_hba *hba = dev->se_hba;
@@ -542,11 +524,8 @@ static int pscsi_configure_device(struct se_device *dev)
case TYPE_DISK:
ret = pscsi_create_type_disk(dev, sd);
break;
- case TYPE_ROM:
- ret = pscsi_create_type_rom(dev, sd);
- break;
default:
- ret = pscsi_create_type_other(dev, sd);
+ ret = pscsi_create_type_nondisk(dev, sd);
break;
}
@@ -611,8 +590,7 @@ static void pscsi_free_device(struct se_device *dev)
else if (pdv->pdv_lld_host)
scsi_host_put(pdv->pdv_lld_host);
- if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
- scsi_device_put(sd);
+ scsi_device_put(sd);
pdv->pdv_sd = NULL;
}
@@ -1064,7 +1042,6 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
return pdv->pdv_bd->bd_part->nr_sects;
- dump_stack();
return 0;
}
@@ -1103,7 +1080,8 @@ static void pscsi_req_done(struct request *req, int uptodate)
static const struct target_backend_ops pscsi_ops = {
.name = "pscsi",
.owner = THIS_MODULE,
- .transport_flags = TRANSPORT_FLAG_PASSTHROUGH,
+ .transport_flags = TRANSPORT_FLAG_PASSTHROUGH |
+ TRANSPORT_FLAG_PASSTHROUGH_ALUA,
.attach_hba = pscsi_attach_hba,
.detach_hba = pscsi_detach_hba,
.pmode_enable_hba = pscsi_pmode_enable_hba,
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 68d8aef7ab78..c194063f169b 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -1105,9 +1105,15 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
return ret;
break;
case VERIFY:
+ case VERIFY_16:
size = 0;
- sectors = transport_get_sectors_10(cdb);
- cmd->t_task_lba = transport_lba_32(cdb);
+ if (cdb[0] == VERIFY) {
+ sectors = transport_get_sectors_10(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
+ } else {
+ sectors = transport_get_sectors_16(cdb);
+ cmd->t_task_lba = transport_lba_64(cdb);
+ }
cmd->execute_cmd = sbc_emulate_noop;
goto check_lba;
case REZERO_UNIT:
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index c0dbfa016575..6fb191914f45 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -602,7 +602,8 @@ int core_tpg_add_lun(
if (ret)
goto out_kill_ref;
- if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
+ if (!(dev->transport->transport_flags &
+ TRANSPORT_FLAG_PASSTHROUGH_ALUA) &&
!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 434d9d693989..b1a3cdb29468 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -636,8 +636,7 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
* Fabric modules are expected to return '1' here if the se_cmd being
* passed is released at this point, or zero if not being released.
*/
- return cmd->se_tfo->check_stop_free ? cmd->se_tfo->check_stop_free(cmd)
- : 0;
+ return cmd->se_tfo->check_stop_free(cmd);
}
static void transport_lun_remove_cmd(struct se_cmd *cmd)
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index c3adefe95e50..c6874c38a10b 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -28,6 +28,7 @@
#include <linux/stringify.h>
#include <linux/bitops.h>
#include <linux/highmem.h>
+#include <linux/configfs.h>
#include <net/genetlink.h>
#include <scsi/scsi_common.h>
#include <scsi/scsi_proto.h>
@@ -112,6 +113,7 @@ struct tcmu_dev {
spinlock_t commands_lock;
struct timer_list timeout;
+ unsigned int cmd_time_out;
char dev_config[TCMU_CONFIG_LEN];
};
@@ -172,7 +174,9 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
tcmu_cmd->se_cmd = se_cmd;
tcmu_cmd->tcmu_dev = udev;
- tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT);
+ if (udev->cmd_time_out)
+ tcmu_cmd->deadline = jiffies +
+ msecs_to_jiffies(udev->cmd_time_out);
idr_preload(GFP_KERNEL);
spin_lock_irq(&udev->commands_lock);
@@ -451,7 +455,11 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
pr_debug("sleeping for ring space\n");
spin_unlock_irq(&udev->cmdr_lock);
- ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
+ if (udev->cmd_time_out)
+ ret = schedule_timeout(
+ msecs_to_jiffies(udev->cmd_time_out));
+ else
+ ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
finish_wait(&udev->wait_cmdr, &__wait);
if (!ret) {
pr_warn("tcmu: command timed out\n");
@@ -526,8 +534,9 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
/* TODO: only if FLUSH and FUA? */
uio_event_notify(&udev->uio_info);
- mod_timer(&udev->timeout,
- round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT)));
+ if (udev->cmd_time_out)
+ mod_timer(&udev->timeout, round_jiffies_up(jiffies +
+ msecs_to_jiffies(udev->cmd_time_out)));
return TCM_NO_SENSE;
}
@@ -742,6 +751,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
}
udev->hba = hba;
+ udev->cmd_time_out = TCMU_TIME_OUT;
init_waitqueue_head(&udev->wait_cmdr);
spin_lock_init(&udev->cmdr_lock);
@@ -960,7 +970,8 @@ static int tcmu_configure_device(struct se_device *dev)
if (dev->dev_attrib.hw_block_size == 0)
dev->dev_attrib.hw_block_size = 512;
/* Other attributes can be configured in userspace */
- dev->dev_attrib.hw_max_sectors = 128;
+ if (!dev->dev_attrib.hw_max_sectors)
+ dev->dev_attrib.hw_max_sectors = 128;
dev->dev_attrib.hw_queue_depth = 128;
ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name,
@@ -997,6 +1008,11 @@ static void tcmu_dev_call_rcu(struct rcu_head *p)
kfree(udev);
}
+static bool tcmu_dev_configured(struct tcmu_dev *udev)
+{
+ return udev->uio_info.uio_dev ? true : false;
+}
+
static void tcmu_free_device(struct se_device *dev)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
@@ -1018,8 +1034,7 @@ static void tcmu_free_device(struct se_device *dev)
spin_unlock_irq(&udev->commands_lock);
WARN_ON(!all_expired);
- /* Device was configured */
- if (udev->uio_info.uio_dev) {
+ if (tcmu_dev_configured(udev)) {
tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name,
udev->uio_info.uio_dev->minor);
@@ -1031,16 +1046,42 @@ static void tcmu_free_device(struct se_device *dev)
}
enum {
- Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_err,
+ Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
+ Opt_err,
};
static match_table_t tokens = {
{Opt_dev_config, "dev_config=%s"},
{Opt_dev_size, "dev_size=%u"},
{Opt_hw_block_size, "hw_block_size=%u"},
+ {Opt_hw_max_sectors, "hw_max_sectors=%u"},
{Opt_err, NULL}
};
+static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib)
+{
+ unsigned long tmp_ul;
+ char *arg_p;
+ int ret;
+
+ arg_p = match_strdup(arg);
+ if (!arg_p)
+ return -ENOMEM;
+
+ ret = kstrtoul(arg_p, 0, &tmp_ul);
+ kfree(arg_p);
+ if (ret < 0) {
+ pr_err("kstrtoul() failed for dev attrib\n");
+ return ret;
+ }
+ if (!tmp_ul) {
+ pr_err("dev attrib must be nonzero\n");
+ return -EINVAL;
+ }
+ *dev_attrib = tmp_ul;
+ return 0;
+}
+
static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
const char *page, ssize_t count)
{
@@ -1048,7 +1089,6 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
char *orig, *ptr, *opts, *arg_p;
substring_t args[MAX_OPT_ARGS];
int ret = 0, token;
- unsigned long tmp_ul;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
@@ -1082,26 +1122,19 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
pr_err("kstrtoul() failed for dev_size=\n");
break;
case Opt_hw_block_size:
- arg_p = match_strdup(&args[0]);
- if (!arg_p) {
- ret = -ENOMEM;
- break;
- }
- ret = kstrtoul(arg_p, 0, &tmp_ul);
- kfree(arg_p);
- if (ret < 0) {
- pr_err("kstrtoul() failed for hw_block_size=\n");
- break;
- }
- if (!tmp_ul) {
- pr_err("hw_block_size must be nonzero\n");
- break;
- }
- dev->dev_attrib.hw_block_size = tmp_ul;
+ ret = tcmu_set_dev_attrib(&args[0],
+ &(dev->dev_attrib.hw_block_size));
+ break;
+ case Opt_hw_max_sectors:
+ ret = tcmu_set_dev_attrib(&args[0],
+ &(dev->dev_attrib.hw_max_sectors));
break;
default:
break;
}
+
+ if (ret)
+ break;
}
kfree(orig);
@@ -1134,7 +1167,48 @@ tcmu_parse_cdb(struct se_cmd *cmd)
return passthrough_parse_cdb(cmd, tcmu_queue_cmd);
}
-static const struct target_backend_ops tcmu_ops = {
+static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = container_of(da->da_dev,
+ struct tcmu_dev, se_dev);
+
+ return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
+}
+
+static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page,
+ size_t count)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = container_of(da->da_dev,
+ struct tcmu_dev, se_dev);
+ u32 val;
+ int ret;
+
+ if (da->da_dev->export_count) {
+ pr_err("Unable to set tcmu cmd_time_out while exports exist\n");
+ return -EINVAL;
+ }
+
+ ret = kstrtou32(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ if (!val) {
+ pr_err("Illegal value for cmd_time_out\n");
+ return -EINVAL;
+ }
+
+ udev->cmd_time_out = val * MSEC_PER_SEC;
+ return count;
+}
+CONFIGFS_ATTR(tcmu_, cmd_time_out);
+
+static struct configfs_attribute **tcmu_attrs;
+
+static struct target_backend_ops tcmu_ops = {
.name = "user",
.owner = THIS_MODULE,
.transport_flags = TRANSPORT_FLAG_PASSTHROUGH,
@@ -1148,12 +1222,12 @@ static const struct target_backend_ops tcmu_ops = {
.show_configfs_dev_params = tcmu_show_configfs_dev_params,
.get_device_type = sbc_get_device_type,
.get_blocks = tcmu_get_blocks,
- .tb_dev_attrib_attrs = passthrough_attrib_attrs,
+ .tb_dev_attrib_attrs = NULL,
};
static int __init tcmu_module_init(void)
{
- int ret;
+ int ret, i, len = 0;
BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
@@ -1175,12 +1249,31 @@ static int __init tcmu_module_init(void)
goto out_unreg_device;
}
+ for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
+ len += sizeof(struct configfs_attribute *);
+ }
+ len += sizeof(struct configfs_attribute *) * 2;
+
+ tcmu_attrs = kzalloc(len, GFP_KERNEL);
+ if (!tcmu_attrs) {
+ ret = -ENOMEM;
+ goto out_unreg_genl;
+ }
+
+ for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) {
+ tcmu_attrs[i] = passthrough_attrib_attrs[i];
+ }
+ tcmu_attrs[i] = &tcmu_attr_cmd_time_out;
+ tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs;
+
ret = transport_backend_register(&tcmu_ops);
if (ret)
- goto out_unreg_genl;
+ goto out_attrs;
return 0;
+out_attrs:
+ kfree(tcmu_attrs);
out_unreg_genl:
genl_unregister_family(&tcmu_genl_family);
out_unreg_device:
@@ -1194,6 +1287,7 @@ out_free_cache:
static void __exit tcmu_module_exit(void)
{
target_backend_unregister(&tcmu_ops);
+ kfree(tcmu_attrs);
genl_unregister_family(&tcmu_genl_family);
root_device_unregister(tcmu_root_device);
kmem_cache_destroy(tcmu_cmd_cache);
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index 1bacbc3b19a0..e94aea8c0d05 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -114,7 +114,7 @@
#define DEFAULT_TX_BUF_COUNT 3
struct n_hdlc_buf {
- struct n_hdlc_buf *link;
+ struct list_head list_item;
int count;
char buf[1];
};
@@ -122,8 +122,7 @@ struct n_hdlc_buf {
#define N_HDLC_BUF_SIZE (sizeof(struct n_hdlc_buf) + maxframe)
struct n_hdlc_buf_list {
- struct n_hdlc_buf *head;
- struct n_hdlc_buf *tail;
+ struct list_head list;
int count;
spinlock_t spinlock;
};
@@ -136,7 +135,6 @@ struct n_hdlc_buf_list {
* @backup_tty - TTY to use if tty gets closed
* @tbusy - reentrancy flag for tx wakeup code
* @woke_up - FIXME: describe this field
- * @tbuf - currently transmitting tx buffer
* @tx_buf_list - list of pending transmit frame buffers
* @rx_buf_list - list of received frame buffers
* @tx_free_buf_list - list unused transmit frame buffers
@@ -149,7 +147,6 @@ struct n_hdlc {
struct tty_struct *backup_tty;
int tbusy;
int woke_up;
- struct n_hdlc_buf *tbuf;
struct n_hdlc_buf_list tx_buf_list;
struct n_hdlc_buf_list rx_buf_list;
struct n_hdlc_buf_list tx_free_buf_list;
@@ -159,6 +156,8 @@ struct n_hdlc {
/*
* HDLC buffer list manipulation functions
*/
+static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list,
+ struct n_hdlc_buf *buf);
static void n_hdlc_buf_put(struct n_hdlc_buf_list *list,
struct n_hdlc_buf *buf);
static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *list);
@@ -208,16 +207,9 @@ static void flush_tx_queue(struct tty_struct *tty)
{
struct n_hdlc *n_hdlc = tty2n_hdlc(tty);
struct n_hdlc_buf *buf;
- unsigned long flags;
while ((buf = n_hdlc_buf_get(&n_hdlc->tx_buf_list)))
n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, buf);
- spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
- if (n_hdlc->tbuf) {
- n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, n_hdlc->tbuf);
- n_hdlc->tbuf = NULL;
- }
- spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
}
static struct tty_ldisc_ops n_hdlc_ldisc = {
@@ -283,7 +275,6 @@ static void n_hdlc_release(struct n_hdlc *n_hdlc)
} else
break;
}
- kfree(n_hdlc->tbuf);
kfree(n_hdlc);
} /* end of n_hdlc_release() */
@@ -402,13 +393,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
n_hdlc->woke_up = 0;
spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock, flags);
- /* get current transmit buffer or get new transmit */
- /* buffer from list of pending transmit buffers */
-
- tbuf = n_hdlc->tbuf;
- if (!tbuf)
- tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
-
+ tbuf = n_hdlc_buf_get(&n_hdlc->tx_buf_list);
while (tbuf) {
if (debuglevel >= DEBUG_LEVEL_INFO)
printk("%s(%d)sending frame %p, count=%d\n",
@@ -420,7 +405,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
/* rollback was possible and has been done */
if (actual == -ERESTARTSYS) {
- n_hdlc->tbuf = tbuf;
+ n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf);
break;
}
/* if transmit error, throw frame away by */
@@ -435,10 +420,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
/* free current transmit buffer */
n_hdlc_buf_put(&n_hdlc->tx_free_buf_list, tbuf);
-
- /* this tx buffer is done */
- n_hdlc->tbuf = NULL;
-
+
/* wait up sleeping writers */
wake_up_interruptible(&tty->write_wait);
@@ -448,10 +430,12 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
if (debuglevel >= DEBUG_LEVEL_INFO)
printk("%s(%d)frame %p pending\n",
__FILE__,__LINE__,tbuf);
-
- /* buffer not accepted by driver */
- /* set this buffer as pending buffer */
- n_hdlc->tbuf = tbuf;
+
+ /*
+ * the buffer was not accepted by driver,
+ * return it back into tx queue
+ */
+ n_hdlc_buf_return(&n_hdlc->tx_buf_list, tbuf);
break;
}
}
@@ -749,7 +733,8 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
int error = 0;
int count;
unsigned long flags;
-
+ struct n_hdlc_buf *buf = NULL;
+
if (debuglevel >= DEBUG_LEVEL_INFO)
printk("%s(%d)n_hdlc_tty_ioctl() called %d\n",
__FILE__,__LINE__,cmd);
@@ -763,8 +748,10 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
/* report count of read data available */
/* in next available frame (if any) */
spin_lock_irqsave(&n_hdlc->rx_buf_list.spinlock,flags);
- if (n_hdlc->rx_buf_list.head)
- count = n_hdlc->rx_buf_list.head->count;
+ buf = list_first_entry_or_null(&n_hdlc->rx_buf_list.list,
+ struct n_hdlc_buf, list_item);
+ if (buf)
+ count = buf->count;
else
count = 0;
spin_unlock_irqrestore(&n_hdlc->rx_buf_list.spinlock,flags);
@@ -776,8 +763,10 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
count = tty_chars_in_buffer(tty);
/* add size of next output frame in queue */
spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock,flags);
- if (n_hdlc->tx_buf_list.head)
- count += n_hdlc->tx_buf_list.head->count;
+ buf = list_first_entry_or_null(&n_hdlc->tx_buf_list.list,
+ struct n_hdlc_buf, list_item);
+ if (buf)
+ count += buf->count;
spin_unlock_irqrestore(&n_hdlc->tx_buf_list.spinlock,flags);
error = put_user(count, (int __user *)arg);
break;
@@ -825,14 +814,14 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
poll_wait(filp, &tty->write_wait, wait);
/* set bits for operations that won't block */
- if (n_hdlc->rx_buf_list.head)
+ if (!list_empty(&n_hdlc->rx_buf_list.list))
mask |= POLLIN | POLLRDNORM; /* readable */
if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
mask |= POLLHUP;
if (tty_hung_up_p(filp))
mask |= POLLHUP;
if (!tty_is_writelocked(tty) &&
- n_hdlc->tx_free_buf_list.head)
+ !list_empty(&n_hdlc->tx_free_buf_list.list))
mask |= POLLOUT | POLLWRNORM; /* writable */
}
return mask;
@@ -856,7 +845,12 @@ static struct n_hdlc *n_hdlc_alloc(void)
spin_lock_init(&n_hdlc->tx_free_buf_list.spinlock);
spin_lock_init(&n_hdlc->rx_buf_list.spinlock);
spin_lock_init(&n_hdlc->tx_buf_list.spinlock);
-
+
+ INIT_LIST_HEAD(&n_hdlc->rx_free_buf_list.list);
+ INIT_LIST_HEAD(&n_hdlc->tx_free_buf_list.list);
+ INIT_LIST_HEAD(&n_hdlc->rx_buf_list.list);
+ INIT_LIST_HEAD(&n_hdlc->tx_buf_list.list);
+
/* allocate free rx buffer list */
for(i=0;i<DEFAULT_RX_BUF_COUNT;i++) {
buf = kmalloc(N_HDLC_BUF_SIZE, GFP_KERNEL);
@@ -884,53 +878,65 @@ static struct n_hdlc *n_hdlc_alloc(void)
} /* end of n_hdlc_alloc() */
/**
+ * n_hdlc_buf_return - put the HDLC buffer after the head of the specified list
+ * @buf_list - pointer to the buffer list
+ * @buf - pointer to the buffer
+ */
+static void n_hdlc_buf_return(struct n_hdlc_buf_list *buf_list,
+ struct n_hdlc_buf *buf)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&buf_list->spinlock, flags);
+
+ list_add(&buf->list_item, &buf_list->list);
+ buf_list->count++;
+
+ spin_unlock_irqrestore(&buf_list->spinlock, flags);
+}
+
+/**
* n_hdlc_buf_put - add specified HDLC buffer to tail of specified list
- * @list - pointer to buffer list
+ * @buf_list - pointer to buffer list
* @buf - pointer to buffer
*/
-static void n_hdlc_buf_put(struct n_hdlc_buf_list *list,
+static void n_hdlc_buf_put(struct n_hdlc_buf_list *buf_list,
struct n_hdlc_buf *buf)
{
unsigned long flags;
- spin_lock_irqsave(&list->spinlock,flags);
-
- buf->link=NULL;
- if (list->tail)
- list->tail->link = buf;
- else
- list->head = buf;
- list->tail = buf;
- (list->count)++;
-
- spin_unlock_irqrestore(&list->spinlock,flags);
-
+
+ spin_lock_irqsave(&buf_list->spinlock, flags);
+
+ list_add_tail(&buf->list_item, &buf_list->list);
+ buf_list->count++;
+
+ spin_unlock_irqrestore(&buf_list->spinlock, flags);
} /* end of n_hdlc_buf_put() */
/**
* n_hdlc_buf_get - remove and return an HDLC buffer from list
- * @list - pointer to HDLC buffer list
+ * @buf_list - pointer to HDLC buffer list
*
* Remove and return an HDLC buffer from the head of the specified HDLC buffer
* list.
* Returns a pointer to HDLC buffer if available, otherwise %NULL.
*/
-static struct n_hdlc_buf* n_hdlc_buf_get(struct n_hdlc_buf_list *list)
+static struct n_hdlc_buf *n_hdlc_buf_get(struct n_hdlc_buf_list *buf_list)
{
unsigned long flags;
struct n_hdlc_buf *buf;
- spin_lock_irqsave(&list->spinlock,flags);
-
- buf = list->head;
+
+ spin_lock_irqsave(&buf_list->spinlock, flags);
+
+ buf = list_first_entry_or_null(&buf_list->list,
+ struct n_hdlc_buf, list_item);
if (buf) {
- list->head = buf->link;
- (list->count)--;
+ list_del(&buf->list_item);
+ buf_list->count--;
}
- if (!list->head)
- list->tail = NULL;
-
- spin_unlock_irqrestore(&list->spinlock,flags);
+
+ spin_unlock_irqrestore(&buf_list->spinlock, flags);
return buf;
-
} /* end of n_hdlc_buf_get() */
static char hdlc_banner[] __initdata =
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index b4f86c219db1..7a17aedbf902 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1031,8 +1031,10 @@ static int s3c64xx_serial_startup(struct uart_port *port)
if (ourport->dma) {
ret = s3c24xx_serial_request_dma(ourport);
if (ret < 0) {
- dev_warn(port->dev, "DMA request failed\n");
- return ret;
+ dev_warn(port->dev,
+ "DMA request failed, DMA will not be used\n");
+ devm_kfree(port->dev, ourport->dma);
+ ourport->dma = NULL;
}
}
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 2092e46b1380..f8d0747810e7 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -250,6 +250,7 @@ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
val = dwc3_omap_read_utmi_ctrl(omap);
val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG;
dwc3_omap_write_utmi_ctrl(omap, val);
+ break;
case OMAP_DWC3_VBUS_OFF:
val = dwc3_omap_read_utmi_ctrl(omap);
@@ -392,7 +393,7 @@ static void dwc3_omap_set_utmi_mode(struct dwc3_omap *omap)
{
u32 reg;
struct device_node *node = omap->dev->of_node;
- int utmi_mode = 0;
+ u32 utmi_mode = 0;
reg = dwc3_omap_read_utmi_ctrl(omap);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 4db97ecae885..0d75158e43fe 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1342,6 +1342,68 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
if (r == req) {
/* wait until it is processed */
dwc3_stop_active_transfer(dwc, dep->number, true);
+
+ /*
+ * If request was already started, this means we had to
+ * stop the transfer. With that we also need to ignore
+ * all TRBs used by the request, however TRBs can only
+ * be modified after completion of END_TRANSFER
+ * command. So what we do here is that we wait for
+ * END_TRANSFER completion and only after that, we jump
+ * over TRBs by clearing HWO and incrementing dequeue
+ * pointer.
+ *
+ * Note that we have 2 possible types of transfers here:
+ *
+ * i) Linear buffer request
+ * ii) SG-list based request
+ *
+ * SG-list based requests will have r->num_pending_sgs
+ * set to a valid number (> 0). Linear requests,
+ * normally use a single TRB.
+ *
+ * For each of these two cases, if r->unaligned flag is
+ * set, one extra TRB has been used to align transfer
+ * size to wMaxPacketSize.
+ *
+ * All of these cases need to be taken into
+ * consideration so we don't mess up our TRB ring
+ * pointers.
+ */
+ wait_event_lock_irq(dep->wait_end_transfer,
+ !(dep->flags & DWC3_EP_END_TRANSFER_PENDING),
+ dwc->lock);
+
+ if (!r->trb)
+ goto out1;
+
+ if (r->num_pending_sgs) {
+ struct dwc3_trb *trb;
+ int i = 0;
+
+ for (i = 0; i < r->num_pending_sgs; i++) {
+ trb = r->trb + i;
+ trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+ dwc3_ep_inc_deq(dep);
+ }
+
+ if (r->unaligned) {
+ trb = r->trb + r->num_pending_sgs + 1;
+ trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+ dwc3_ep_inc_deq(dep);
+ }
+ } else {
+ struct dwc3_trb *trb = r->trb;
+
+ trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+ dwc3_ep_inc_deq(dep);
+
+ if (r->unaligned) {
+ trb = r->trb + 1;
+ trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+ dwc3_ep_inc_deq(dep);
+ }
+ }
goto out1;
}
dev_err(dwc->dev, "request %p was not queued to %s\n",
@@ -1352,6 +1414,7 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
out1:
/* giveback the request */
+ dep->queued_requests--;
dwc3_gadget_giveback(dep, req, -ECONNRESET);
out0:
@@ -2126,12 +2189,12 @@ static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
return 1;
}
- if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
- return 1;
-
count = trb->size & DWC3_TRB_SIZE_MASK;
req->remaining += count;
+ if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
+ return 1;
+
if (dep->direction) {
if (count) {
trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
@@ -3228,15 +3291,10 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
int dwc3_gadget_suspend(struct dwc3 *dwc)
{
- int ret;
-
if (!dwc->gadget_driver)
return 0;
- ret = dwc3_gadget_run_stop(dwc, false, false);
- if (ret < 0)
- return ret;
-
+ dwc3_gadget_run_stop(dwc, false, false);
dwc3_disconnect_gadget(dwc);
__dwc3_gadget_stop(dwc);
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index 3129bcf74d7d..265e223ab645 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -28,23 +28,23 @@ struct dwc3;
#define gadget_to_dwc(g) (container_of(g, struct dwc3, gadget))
/* DEPCFG parameter 1 */
-#define DWC3_DEPCFG_INT_NUM(n) ((n) << 0)
+#define DWC3_DEPCFG_INT_NUM(n) (((n) & 0x1f) << 0)
#define DWC3_DEPCFG_XFER_COMPLETE_EN (1 << 8)
#define DWC3_DEPCFG_XFER_IN_PROGRESS_EN (1 << 9)
#define DWC3_DEPCFG_XFER_NOT_READY_EN (1 << 10)
#define DWC3_DEPCFG_FIFO_ERROR_EN (1 << 11)
#define DWC3_DEPCFG_STREAM_EVENT_EN (1 << 13)
-#define DWC3_DEPCFG_BINTERVAL_M1(n) ((n) << 16)
+#define DWC3_DEPCFG_BINTERVAL_M1(n) (((n) & 0xff) << 16)
#define DWC3_DEPCFG_STREAM_CAPABLE (1 << 24)
-#define DWC3_DEPCFG_EP_NUMBER(n) ((n) << 25)
+#define DWC3_DEPCFG_EP_NUMBER(n) (((n) & 0x1f) << 25)
#define DWC3_DEPCFG_BULK_BASED (1 << 30)
#define DWC3_DEPCFG_FIFO_BASED (1 << 31)
/* DEPCFG parameter 0 */
-#define DWC3_DEPCFG_EP_TYPE(n) ((n) << 1)
-#define DWC3_DEPCFG_MAX_PACKET_SIZE(n) ((n) << 3)
-#define DWC3_DEPCFG_FIFO_NUMBER(n) ((n) << 17)
-#define DWC3_DEPCFG_BURST_SIZE(n) ((n) << 22)
+#define DWC3_DEPCFG_EP_TYPE(n) (((n) & 0x3) << 1)
+#define DWC3_DEPCFG_MAX_PACKET_SIZE(n) (((n) & 0x7ff) << 3)
+#define DWC3_DEPCFG_FIFO_NUMBER(n) (((n) & 0x1f) << 17)
+#define DWC3_DEPCFG_BURST_SIZE(n) (((n) & 0xf) << 22)
#define DWC3_DEPCFG_DATA_SEQ_NUM(n) ((n) << 26)
/* This applies for core versions earlier than 1.94a */
#define DWC3_DEPCFG_IGN_SEQ_NUM (1 << 31)
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 78c44979dde3..cbff3b02840d 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -269,6 +269,7 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item,
ret = unregister_gadget(gi);
if (ret)
goto err;
+ kfree(name);
} else {
if (gi->composite.gadget_driver.udc_name) {
ret = -EBUSY;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index a5b7cd615698..a0085571824d 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1834,11 +1834,14 @@ static int ffs_func_eps_enable(struct ffs_function *func)
spin_lock_irqsave(&func->ffs->eps_lock, flags);
while(count--) {
struct usb_endpoint_descriptor *ds;
+ struct usb_ss_ep_comp_descriptor *comp_desc = NULL;
+ int needs_comp_desc = false;
int desc_idx;
- if (ffs->gadget->speed == USB_SPEED_SUPER)
+ if (ffs->gadget->speed == USB_SPEED_SUPER) {
desc_idx = 2;
- else if (ffs->gadget->speed == USB_SPEED_HIGH)
+ needs_comp_desc = true;
+ } else if (ffs->gadget->speed == USB_SPEED_HIGH)
desc_idx = 1;
else
desc_idx = 0;
@@ -1855,6 +1858,14 @@ static int ffs_func_eps_enable(struct ffs_function *func)
ep->ep->driver_data = ep;
ep->ep->desc = ds;
+
+ comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds +
+ USB_DT_ENDPOINT_SIZE);
+ ep->ep->maxburst = comp_desc->bMaxBurst + 1;
+
+ if (needs_comp_desc)
+ ep->ep->comp_desc = comp_desc;
+
ret = usb_ep_enable(ep->ep);
if (likely(!ret)) {
epfile->ep = ep;
@@ -2253,7 +2264,7 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
if (len < sizeof(*d) ||
d->bFirstInterfaceNumber >= ffs->interfaces_count ||
- d->Reserved1)
+ !d->Reserved1)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
if (d->Reserved2[i])
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index 27ed51b5082f..29b41b5dee04 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -258,13 +258,6 @@ uvc_function_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
memcpy(&uvc_event->req, ctrl, sizeof(uvc_event->req));
v4l2_event_queue(&uvc->vdev, &v4l2_event);
- /* Pass additional setup data to userspace */
- if (uvc->event_setup_out && uvc->event_length) {
- uvc->control_req->length = uvc->event_length;
- return usb_ep_queue(uvc->func.config->cdev->gadget->ep0,
- uvc->control_req, GFP_ATOMIC);
- }
-
return 0;
}
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index a2615d64d07c..a2c916869293 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -84,8 +84,7 @@ static int ep_open(struct inode *, struct file *);
/* /dev/gadget/$CHIP represents ep0 and the whole device */
enum ep0_state {
- /* DISBLED is the initial state.
- */
+ /* DISABLED is the initial state. */
STATE_DEV_DISABLED = 0,
/* Only one open() of /dev/gadget/$CHIP; only one file tracks
@@ -1782,8 +1781,10 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
spin_lock_irq (&dev->lock);
value = -EINVAL;
- if (dev->buf)
+ if (dev->buf) {
+ kfree(kbuf);
goto fail;
+ }
dev->buf = kbuf;
/* full or low speed config */
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 11bbce28bc23..2035906b8ced 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -610,7 +610,7 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
{
struct usba_ep *ep = to_usba_ep(_ep);
struct usba_udc *udc = ep->udc;
- unsigned long flags, ept_cfg, maxpacket;
+ unsigned long flags, maxpacket;
unsigned int nr_trans;
DBG(DBG_GADGET, "%s: ep_enable: desc=%p\n", ep->ep.name, desc);
@@ -630,7 +630,7 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
ep->is_in = 0;
DBG(DBG_ERR, "%s: EPT_CFG = 0x%lx (maxpacket = %lu)\n",
- ep->ep.name, ept_cfg, maxpacket);
+ ep->ep.name, ep->ept_cfg, maxpacket);
if (usb_endpoint_dir_in(desc)) {
ep->is_in = 1;
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index c60abe3a68f9..8cabc5944d5f 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -1031,6 +1031,8 @@ static int dummy_udc_probe(struct platform_device *pdev)
int rc;
dum = *((void **)dev_get_platdata(&pdev->dev));
+ /* Clear usb_gadget region for new registration to udc-core */
+ memzero_explicit(&dum->gadget, sizeof(struct usb_gadget));
dum->gadget.name = gadget_name;
dum->gadget.ops = &dummy_ops;
dum->gadget.max_speed = USB_SPEED_SUPER;
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 85504419ab31..3828c2ec8623 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -1146,15 +1146,15 @@ static int scan_dma_completions(struct net2280_ep *ep)
*/
while (!list_empty(&ep->queue)) {
struct net2280_request *req;
- u32 tmp;
+ u32 req_dma_count;
req = list_entry(ep->queue.next,
struct net2280_request, queue);
if (!req->valid)
break;
rmb();
- tmp = le32_to_cpup(&req->td->dmacount);
- if ((tmp & BIT(VALID_BIT)) != 0)
+ req_dma_count = le32_to_cpup(&req->td->dmacount);
+ if ((req_dma_count & BIT(VALID_BIT)) != 0)
break;
/* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short"
@@ -1163,40 +1163,41 @@ static int scan_dma_completions(struct net2280_ep *ep)
*/
if (unlikely(req->td->dmadesc == 0)) {
/* paranoia */
- tmp = readl(&ep->dma->dmacount);
- if (tmp & DMA_BYTE_COUNT_MASK)
+ u32 const ep_dmacount = readl(&ep->dma->dmacount);
+
+ if (ep_dmacount & DMA_BYTE_COUNT_MASK)
break;
/* single transfer mode */
- dma_done(ep, req, tmp, 0);
+ dma_done(ep, req, req_dma_count, 0);
num_completed++;
break;
} else if (!ep->is_in &&
(req->req.length % ep->ep.maxpacket) &&
!(ep->dev->quirks & PLX_PCIE)) {
- tmp = readl(&ep->regs->ep_stat);
+ u32 const ep_stat = readl(&ep->regs->ep_stat);
/* AVOID TROUBLE HERE by not issuing short reads from
* your gadget driver. That helps avoids errata 0121,
* 0122, and 0124; not all cases trigger the warning.
*/
- if ((tmp & BIT(NAK_OUT_PACKETS)) == 0) {
+ if ((ep_stat & BIT(NAK_OUT_PACKETS)) == 0) {
ep_warn(ep->dev, "%s lost packet sync!\n",
ep->ep.name);
req->req.status = -EOVERFLOW;
} else {
- tmp = readl(&ep->regs->ep_avail);
- if (tmp) {
+ u32 const ep_avail = readl(&ep->regs->ep_avail);
+ if (ep_avail) {
/* fifo gets flushed later */
ep->out_overflow = 1;
ep_dbg(ep->dev,
"%s dma, discard %d len %d\n",
- ep->ep.name, tmp,
+ ep->ep.name, ep_avail,
req->req.length);
req->req.status = -EOVERFLOW;
}
}
}
- dma_done(ep, req, tmp, 0);
+ dma_done(ep, req, req_dma_count, 0);
num_completed++;
}
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index e1335ad5bce9..832c4fdbe985 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -2534,9 +2534,10 @@ static int pxa_udc_remove(struct platform_device *_dev)
usb_del_gadget_udc(&udc->gadget);
pxa_cleanup_debugfs(udc);
- if (!IS_ERR_OR_NULL(udc->transceiver))
+ if (!IS_ERR_OR_NULL(udc->transceiver)) {
usb_unregister_notifier(udc->transceiver, &pxa27x_udc_phy);
- usb_put_phy(udc->transceiver);
+ usb_put_phy(udc->transceiver);
+ }
udc->transceiver = NULL;
the_controller = NULL;
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 414e3c376dbb..5302f988e7e6 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -350,7 +350,7 @@ static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case USB_PORT_FEAT_SUSPEND:
dev_dbg(hcd->self.controller, "SetPortFeat: SUSPEND\n");
- if (valid_port(wIndex)) {
+ if (valid_port(wIndex) && ohci_at91->sfr_regmap) {
ohci_at91_port_suspend(ohci_at91->sfr_regmap,
1);
return 0;
@@ -393,7 +393,7 @@ static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
case USB_PORT_FEAT_SUSPEND:
dev_dbg(hcd->self.controller, "ClearPortFeature: SUSPEND\n");
- if (valid_port(wIndex)) {
+ if (valid_port(wIndex) && ohci_at91->sfr_regmap) {
ohci_at91_port_suspend(ohci_at91->sfr_regmap,
0);
return 0;
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 363d125300ea..2b4a00fa735d 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -109,7 +109,7 @@ static void xhci_print_cap_regs(struct xhci_hcd *xhci)
xhci_dbg(xhci, "RTSOFF 0x%x:\n", temp & RTSOFF_MASK);
/* xhci 1.1 controllers have the HCCPARAMS2 register */
- if (hci_version > 100) {
+ if (hci_version > 0x100) {
temp = readl(&xhci->cap_regs->hcc_params2);
xhci_dbg(xhci, "HCC PARAMS2 0x%x:\n", (unsigned int) temp);
xhci_dbg(xhci, " HC %s Force save context capability",
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 9066ec9e0c2e..67d5dc79b6b5 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -382,7 +382,6 @@ static int usb_wakeup_of_property_parse(struct xhci_hcd_mtk *mtk,
static int xhci_mtk_setup(struct usb_hcd *hcd);
static const struct xhci_driver_overrides xhci_mtk_overrides __initconst = {
- .extra_priv_size = sizeof(struct xhci_hcd),
.reset = xhci_mtk_setup,
};
@@ -678,13 +677,13 @@ static int xhci_mtk_probe(struct platform_device *pdev)
goto power_off_phys;
}
- if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
- xhci->shared_hcd->can_do_streams = 1;
-
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret)
goto put_usb3_hcd;
+ if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
+ xhci->shared_hcd->can_do_streams = 1;
+
ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
if (ret)
goto dealloc_usb2_hcd;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 6d33b42ffcf5..bd02a6cd8e2c 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -286,6 +286,8 @@ static int xhci_plat_remove(struct platform_device *dev)
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct clk *clk = xhci->clk;
+ xhci->xhc_state |= XHCI_STATE_REMOVING;
+
usb_remove_hcd(xhci->shared_hcd);
usb_phy_shutdown(hcd->usb_phy);
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index a59fafb4b329..74436f8ca538 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -1308,7 +1308,6 @@ static int tegra_xhci_setup(struct usb_hcd *hcd)
}
static const struct xhci_driver_overrides tegra_xhci_overrides __initconst = {
- .extra_priv_size = sizeof(struct xhci_hcd),
.reset = tegra_xhci_setup,
};
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 6d6c46000e56..50aee8b7718b 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -868,7 +868,7 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
spin_lock_irqsave(&xhci->lock, flags);
- /* disble usb3 ports Wake bits*/
+ /* disable usb3 ports Wake bits */
port_index = xhci->num_usb3_ports;
port_array = xhci->usb3_ports;
while (port_index--) {
@@ -879,7 +879,7 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
writel(t2, port_array[port_index]);
}
- /* disble usb2 ports Wake bits*/
+ /* disable usb2 ports Wake bits */
port_index = xhci->num_usb2_ports;
port_array = xhci->usb2_ports;
while (port_index--) {
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 095778ff984d..37c63cb39714 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -781,12 +781,6 @@ static int iowarrior_probe(struct usb_interface *interface,
iface_desc = interface->cur_altsetting;
dev->product_id = le16_to_cpu(udev->descriptor.idProduct);
- if (iface_desc->desc.bNumEndpoints < 1) {
- dev_err(&interface->dev, "Invalid number of endpoints\n");
- retval = -EINVAL;
- goto error;
- }
-
/* set up the endpoint information */
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
@@ -797,6 +791,21 @@ static int iowarrior_probe(struct usb_interface *interface,
/* this one will match for the IOWarrior56 only */
dev->int_out_endpoint = endpoint;
}
+
+ if (!dev->int_in_endpoint) {
+ dev_err(&interface->dev, "no interrupt-in endpoint found\n");
+ retval = -ENODEV;
+ goto error;
+ }
+
+ if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) {
+ if (!dev->int_out_endpoint) {
+ dev_err(&interface->dev, "no interrupt-out endpoint found\n");
+ retval = -ENODEV;
+ goto error;
+ }
+ }
+
/* we have to check the report_size often, so remember it in the endianness suitable for our machine */
dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint);
if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) &&
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index 4e18600dc9b4..91f66d68bcb7 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -375,18 +375,24 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
if (of_get_property(np, "dynamic-power-switching", NULL))
hub->conf_data2 |= BIT(7);
- if (of_get_property(np, "oc-delay-100us", NULL)) {
- hub->conf_data2 &= ~BIT(5);
- hub->conf_data2 &= ~BIT(4);
- } else if (of_get_property(np, "oc-delay-4ms", NULL)) {
- hub->conf_data2 &= ~BIT(5);
- hub->conf_data2 |= BIT(4);
- } else if (of_get_property(np, "oc-delay-8ms", NULL)) {
- hub->conf_data2 |= BIT(5);
- hub->conf_data2 &= ~BIT(4);
- } else if (of_get_property(np, "oc-delay-16ms", NULL)) {
- hub->conf_data2 |= BIT(5);
- hub->conf_data2 |= BIT(4);
+ if (!of_property_read_u32(np, "oc-delay-us", property_u32)) {
+ if (*property_u32 == 100) {
+ /* 100 us*/
+ hub->conf_data2 &= ~BIT(5);
+ hub->conf_data2 &= ~BIT(4);
+ } else if (*property_u32 == 4000) {
+ /* 4 ms */
+ hub->conf_data2 &= ~BIT(5);
+ hub->conf_data2 |= BIT(4);
+ } else if (*property_u32 == 16000) {
+ /* 16 ms */
+ hub->conf_data2 |= BIT(5);
+ hub->conf_data2 |= BIT(4);
+ } else {
+ /* 8 ms (DEFAULT) */
+ hub->conf_data2 |= BIT(5);
+ hub->conf_data2 &= ~BIT(4);
+ }
}
if (of_get_property(np, "compound-device", NULL))
@@ -432,30 +438,9 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
}
}
- hub->max_power_sp = USB251XB_DEF_MAX_POWER_SELF;
- if (!of_property_read_u32(np, "max-sp-power", property_u32))
- hub->max_power_sp = min_t(u8, be32_to_cpu(*property_u32) / 2,
- 250);
-
- hub->max_power_bp = USB251XB_DEF_MAX_POWER_BUS;
- if (!of_property_read_u32(np, "max-bp-power", property_u32))
- hub->max_power_bp = min_t(u8, be32_to_cpu(*property_u32) / 2,
- 250);
-
- hub->max_current_sp = USB251XB_DEF_MAX_CURRENT_SELF;
- if (!of_property_read_u32(np, "max-sp-current", property_u32))
- hub->max_current_sp = min_t(u8, be32_to_cpu(*property_u32) / 2,
- 250);
-
- hub->max_current_bp = USB251XB_DEF_MAX_CURRENT_BUS;
- if (!of_property_read_u32(np, "max-bp-current", property_u32))
- hub->max_current_bp = min_t(u8, be32_to_cpu(*property_u32) / 2,
- 250);
-
hub->power_on_time = USB251XB_DEF_POWER_ON_TIME;
- if (!of_property_read_u32(np, "power-on-time", property_u32))
- hub->power_on_time = min_t(u8, be32_to_cpu(*property_u32) / 2,
- 255);
+ if (!of_property_read_u32(np, "power-on-time-ms", property_u32))
+ hub->power_on_time = min_t(u8, *property_u32 / 2, 255);
if (of_property_read_u16_array(np, "language-id", &hub->lang_id, 1))
hub->lang_id = USB251XB_DEF_LANGUAGE_ID;
@@ -492,6 +477,10 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
/* The following parameters are currently not exposed to devicetree, but
* may be as soon as needed.
*/
+ hub->max_power_sp = USB251XB_DEF_MAX_POWER_SELF;
+ hub->max_power_bp = USB251XB_DEF_MAX_POWER_BUS;
+ hub->max_current_sp = USB251XB_DEF_MAX_CURRENT_SELF;
+ hub->max_current_bp = USB251XB_DEF_MAX_CURRENT_BUS;
hub->bat_charge_en = USB251XB_DEF_BATTERY_CHARGING_ENABLE;
hub->boost_up = USB251XB_DEF_BOOST_UP;
hub->boost_x = USB251XB_DEF_BOOST_X;
diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c
index db68156568e6..b3b33cf7ddf6 100644
--- a/drivers/usb/phy/phy-isp1301.c
+++ b/drivers/usb/phy/phy-isp1301.c
@@ -33,6 +33,12 @@ static const struct i2c_device_id isp1301_id[] = {
};
MODULE_DEVICE_TABLE(i2c, isp1301_id);
+static const struct of_device_id isp1301_of_match[] = {
+ {.compatible = "nxp,isp1301" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, isp1301_of_match);
+
static struct i2c_client *isp1301_i2c_client;
static int __isp1301_write(struct isp1301 *isp, u8 reg, u8 value, u8 clear)
@@ -130,6 +136,7 @@ static int isp1301_remove(struct i2c_client *client)
static struct i2c_driver isp1301_driver = {
.driver = {
.name = DRV_NAME,
+ .of_match_table = of_match_ptr(isp1301_of_match),
},
.probe = isp1301_probe,
.remove = isp1301_remove,
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index ab78111e0968..6537d3ca2797 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -1500,7 +1500,7 @@ static int digi_read_oob_callback(struct urb *urb)
return -1;
/* handle each oob command */
- for (i = 0; i < urb->actual_length - 4; i += 4) {
+ for (i = 0; i < urb->actual_length - 3; i += 4) {
opcode = buf[i];
line = buf[i + 1];
status = buf[i + 2];
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index ceaeebaa6f90..a76b95d32157 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -1674,6 +1674,12 @@ static void edge_interrupt_callback(struct urb *urb)
function = TIUMP_GET_FUNC_FROM_CODE(data[0]);
dev_dbg(dev, "%s - port_number %d, function %d, info 0x%x\n", __func__,
port_number, function, data[1]);
+
+ if (port_number >= edge_serial->serial->num_ports) {
+ dev_err(dev, "bad port number %d\n", port_number);
+ goto exit;
+ }
+
port = edge_serial->serial->port[port_number];
edge_port = usb_get_serial_port_data(port);
if (!edge_port) {
@@ -1755,7 +1761,7 @@ static void edge_bulk_in_callback(struct urb *urb)
port_number = edge_port->port->port_number;
- if (edge_port->lsr_event) {
+ if (urb->actual_length > 0 && edge_port->lsr_event) {
edge_port->lsr_event = 0;
dev_dbg(dev, "%s ===== Port %u LSR Status = %02x, Data = %02x ======\n",
__func__, port_number, edge_port->lsr_mask, *data);
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index a180b17d2432..dd706953b466 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -31,7 +31,6 @@
#define BT_IGNITIONPRO_ID 0x2000
/* function prototypes */
-static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port);
static void omninet_process_read_urb(struct urb *urb);
static void omninet_write_bulk_callback(struct urb *urb);
static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
@@ -60,7 +59,6 @@ static struct usb_serial_driver zyxel_omninet_device = {
.attach = omninet_attach,
.port_probe = omninet_port_probe,
.port_remove = omninet_port_remove,
- .open = omninet_open,
.write = omninet_write,
.write_room = omninet_write_room,
.write_bulk_callback = omninet_write_bulk_callback,
@@ -140,17 +138,6 @@ static int omninet_port_remove(struct usb_serial_port *port)
return 0;
}
-static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port)
-{
- struct usb_serial *serial = port->serial;
- struct usb_serial_port *wport;
-
- wport = serial->port[1];
- tty_port_tty_set(&wport->port, tty);
-
- return usb_serial_generic_open(tty, port);
-}
-
#define OMNINET_HEADERLEN 4
#define OMNINET_BULKOUTSIZE 64
#define OMNINET_PAYLOADSIZE (OMNINET_BULKOUTSIZE - OMNINET_HEADERLEN)
diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c
index 93c6c9b08daa..8a069aa154ed 100644
--- a/drivers/usb/serial/safe_serial.c
+++ b/drivers/usb/serial/safe_serial.c
@@ -200,6 +200,11 @@ static void safe_process_read_urb(struct urb *urb)
if (!safe)
goto out;
+ if (length < 2) {
+ dev_err(&port->dev, "malformed packet\n");
+ return;
+ }
+
fcs = fcs_compute10(data, length, CRC10_INITFCS);
if (fcs) {
dev_err(&port->dev, "%s - bad CRC %x\n", __func__, fcs);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 16cc18369111..9129f6cb8230 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2071,6 +2071,20 @@ UNUSUAL_DEV( 0x1370, 0x6828, 0x0110, 0x0110,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE ),
+/*
+ * Reported by Tobias Jakobi <tjakobi@math.uni-bielefeld.de>
+ * The INIC-3619 bridge is used in the StarTech SLSODDU33B
+ * SATA-USB enclosure for slimline optical drives.
+ *
+ * The quirk enables MakeMKV to properly exchange keys with
+ * an installed BD drive.
+ */
+UNUSUAL_DEV( 0x13fd, 0x3609, 0x0209, 0x0209,
+ "Initio Corporation",
+ "INIC-3619",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_RESIDUE ),
+
/* Reported by Qinglin Ye <yestyle@gmail.com> */
UNUSUAL_DEV( 0x13fe, 0x3600, 0x0100, 0x0100,
"Kingston",
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index c77a0751a311..f3bf8f4e2d6c 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -36,6 +36,7 @@
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/highmem.h>
+#include <linux/refcount.h>
#include <xen/xen.h>
#include <xen/grant_table.h>
@@ -86,7 +87,7 @@ struct grant_map {
int index;
int count;
int flags;
- atomic_t users;
+ refcount_t users;
struct unmap_notify notify;
struct ioctl_gntdev_grant_ref *grants;
struct gnttab_map_grant_ref *map_ops;
@@ -166,7 +167,7 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
add->index = 0;
add->count = count;
- atomic_set(&add->users, 1);
+ refcount_set(&add->users, 1);
return add;
@@ -212,7 +213,7 @@ static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map)
if (!map)
return;
- if (!atomic_dec_and_test(&map->users))
+ if (!refcount_dec_and_test(&map->users))
return;
atomic_sub(map->count, &pages_mapped);
@@ -400,7 +401,7 @@ static void gntdev_vma_open(struct vm_area_struct *vma)
struct grant_map *map = vma->vm_private_data;
pr_debug("gntdev_vma_open %p\n", vma);
- atomic_inc(&map->users);
+ refcount_inc(&map->users);
}
static void gntdev_vma_close(struct vm_area_struct *vma)
@@ -1004,7 +1005,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
goto unlock_out;
}
- atomic_inc(&map->users);
+ refcount_inc(&map->users);
vma->vm_ops = &gntdev_vmops;
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index f8afc6dcc29f..e8cef1ad0fe3 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -681,3 +681,50 @@ xen_swiotlb_set_dma_mask(struct device *dev, u64 dma_mask)
return 0;
}
EXPORT_SYMBOL_GPL(xen_swiotlb_set_dma_mask);
+
+/*
+ * Create userspace mapping for the DMA-coherent memory.
+ * This function should be called with the pages from the current domain only,
+ * passing pages mapped from other domains would lead to memory corruption.
+ */
+int
+xen_swiotlb_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
+{
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ if (__generic_dma_ops(dev)->mmap)
+ return __generic_dma_ops(dev)->mmap(dev, vma, cpu_addr,
+ dma_addr, size, attrs);
+#endif
+ return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mmap);
+
+/*
+ * This function should be called with the pages from the current domain only,
+ * passing pages mapped from other domains would lead to memory corruption.
+ */
+int
+xen_swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t handle, size_t size,
+ unsigned long attrs)
+{
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ if (__generic_dma_ops(dev)->get_sgtable) {
+#if 0
+ /*
+ * This check verifies that the page belongs to the current domain and
+ * is not one mapped from another domain.
+ * This check is for debug only, and should not go to production build
+ */
+ unsigned long bfn = PHYS_PFN(dma_to_phys(dev, handle));
+ BUG_ON (!page_is_ram(bfn));
+#endif
+ return __generic_dma_ops(dev)->get_sgtable(dev, sgt, cpu_addr,
+ handle, size, attrs);
+ }
+#endif
+ return dma_common_get_sgtable(dev, sgt, cpu_addr, handle, size);
+}
+EXPORT_SYMBOL_GPL(xen_swiotlb_get_sgtable);
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 4d343eed08f5..1f4733b80c87 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -55,7 +55,6 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/miscdevice.h>
-#include <linux/init.h>
#include <xen/xenbus.h>
#include <xen/xen.h>