Skip to content
Snippets Groups Projects
nouveau_bo.c 33.3 KiB
Newer Older
nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
		struct ttm_operation_ctx *ctx,
		struct ttm_resource *new_reg,
		struct ttm_place *hop)
	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
	struct nouveau_bo *nvbo = nouveau_bo(bo);
	struct ttm_resource *old_reg = bo->resource;
	struct nouveau_drm_tile *new_tile = NULL;
	if (new_reg->mem_type == TTM_PL_TT) {
		ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg);
		if (ret)
			return ret;
	}

	nouveau_bo_move_ntfy(bo, new_reg);
	ret = ttm_bo_wait_ctx(bo, ctx);
		goto out_ntfy;
	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
		ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile);
			goto out_ntfy;
	if (!old_reg || (old_reg->mem_type == TTM_PL_SYSTEM &&
			 !bo->ttm)) {
		ttm_bo_move_null(bo, new_reg);
	if (old_reg->mem_type == TTM_PL_SYSTEM &&
	    new_reg->mem_type == TTM_PL_TT) {
		ttm_bo_move_null(bo, new_reg);
		goto out;
	}

	if (old_reg->mem_type == TTM_PL_TT &&
	    new_reg->mem_type == TTM_PL_SYSTEM) {
		nouveau_ttm_tt_unbind(bo->bdev, bo->ttm);
		ttm_resource_free(bo, &bo->resource);
		ttm_bo_assign_mem(bo, new_reg);
	/* Hardware assisted copy. */
		if ((old_reg->mem_type == TTM_PL_SYSTEM &&
		     new_reg->mem_type == TTM_PL_VRAM) ||
		    (old_reg->mem_type == TTM_PL_VRAM &&
		     new_reg->mem_type == TTM_PL_SYSTEM)) {
			hop->fpfn = 0;
			hop->lpfn = 0;
			hop->mem_type = TTM_PL_TT;
			hop->flags = 0;
			return -EMULTIHOP;
		}
		ret = nouveau_bo_move_m2mf(bo, evict, ctx,
					   new_reg);
	if (ret) {
		/* Fallback to software copy. */
		ret = ttm_bo_move_memcpy(bo, ctx, new_reg);
	}
	if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
		if (ret)
			nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
		else
			nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
	}
out_ntfy:
	if (ret) {
		nouveau_bo_move_ntfy(bo, bo->resource);
static void
nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm,
			       struct ttm_resource *reg)
{
	struct nouveau_mem *mem = nouveau_mem(reg);

	if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
		switch (reg->mem_type) {
		case TTM_PL_TT:
			if (mem->kind)
				nvif_object_unmap_handle(&mem->mem.object);
			break;
		case TTM_PL_VRAM:
			nvif_object_unmap_handle(&mem->mem.object);
			break;
		default:
			break;
		}
	}
}

nouveau_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *reg)
	struct nouveau_drm *drm = nouveau_bdev(bdev);
	struct nvkm_device *device = nvxx_device(&drm->client.device);
	struct nouveau_mem *mem = nouveau_mem(reg);
	struct nvif_mmu *mmu = &drm->client.mmu;
	mutex_lock(&drm->ttm.io_reserve_mutex);
retry:
	switch (reg->mem_type) {
	case TTM_PL_SYSTEM:
		/* System memory */
Simona Vetter's avatar
Simona Vetter committed
#if IS_ENABLED(CONFIG_AGP)
		if (drm->agp.bridge) {
			reg->bus.offset = (reg->start << PAGE_SHIFT) +
				drm->agp.base;
			reg->bus.is_iomem = !drm->agp.cma;
			reg->bus.caching = ttm_write_combined;
		if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 ||
		    !mem->kind) {
		fallthrough;	/* tiled memory */
		reg->bus.offset = (reg->start << PAGE_SHIFT) +
			device->func->resource_addr(device, 1);
		reg->bus.is_iomem = true;

		/* Some BARs do not support being ioremapped WC */
		if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
		    mmu->type[drm->ttm.type_vram].type & NVIF_MEM_UNCACHED)
			reg->bus.caching = ttm_uncached;
		else
			reg->bus.caching = ttm_write_combined;

		if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) {
			union {
				struct nv50_mem_map_v0 nv50;
				struct gf100_mem_map_v0 gf100;
			} args;
			u64 handle, length;
			u32 argc = 0;

			switch (mem->mem.object.oclass) {
			case NVIF_CLASS_MEM_NV50:
				args.nv50.version = 0;
				args.nv50.ro = 0;
				args.nv50.kind = mem->kind;
				args.nv50.comp = mem->comp;
				break;
			case NVIF_CLASS_MEM_GF100:
				args.gf100.version = 0;
				args.gf100.ro = 0;
				args.gf100.kind = mem->kind;
				break;
			default:
				WARN_ON(1);
				break;
			}

			ret = nvif_object_map_handle(&mem->mem.object,

out:
	if (ret == -ENOSPC) {
		struct nouveau_bo *nvbo;

		nvbo = list_first_entry_or_null(&drm->ttm.io_reserve_lru,
						typeof(*nvbo),
						io_reserve_lru);
		if (nvbo) {
			list_del_init(&nvbo->io_reserve_lru);
			drm_vma_node_unmap(&nvbo->bo.base.vma_node,
					   bdev->dev_mapping);
			nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource);
			goto retry;
		}

	}
	mutex_unlock(&drm->ttm.io_reserve_mutex);
	return ret;
nouveau_ttm_io_mem_free(struct ttm_device *bdev, struct ttm_resource *reg)
	struct nouveau_drm *drm = nouveau_bdev(bdev);
	mutex_lock(&drm->ttm.io_reserve_mutex);
	nouveau_ttm_io_mem_free_locked(drm, reg);
	mutex_unlock(&drm->ttm.io_reserve_mutex);
vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
	struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
	struct nouveau_bo *nvbo = nouveau_bo(bo);
	struct nvkm_device *device = nvxx_device(&drm->client.device);
	u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;

	/* as long as the bo isn't in vram, and isn't tiled, we've got
	 * nothing to do here.
	 */
	if (bo->resource->mem_type != TTM_PL_VRAM) {
		if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
		if (bo->resource->mem_type != TTM_PL_SYSTEM)
			return 0;

		nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);

	} else {
		/* make sure bo is in mappable vram */
		if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
		    bo->resource->start + PFN_UP(bo->resource->size) < mappable)
		for (i = 0; i < nvbo->placement.num_placement; ++i) {
			nvbo->placements[i].fpfn = 0;
			nvbo->placements[i].lpfn = mappable;
		for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
			nvbo->busy_placements[i].fpfn = 0;
			nvbo->busy_placements[i].lpfn = mappable;
		}
		nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
	ret = nouveau_bo_validate(nvbo, false, false);
	if (unlikely(ret == -EBUSY || ret == -ERESTARTSYS))
		return VM_FAULT_NOPAGE;
	else if (unlikely(ret))
		return VM_FAULT_SIGBUS;
	ttm_bo_move_to_lru_tail_unlocked(bo);
	return 0;
nouveau_ttm_tt_populate(struct ttm_device *bdev,
			struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
	struct ttm_tt *ttm_dma = (void *)ttm;
	struct nouveau_drm *drm;
	bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
	if (ttm_tt_is_populated(ttm))
Dave Airlie's avatar
Dave Airlie committed
	if (slave && ttm->sg) {
		drm_prime_sg_to_dma_addr_array(ttm->sg, ttm_dma->dma_address,
					       ttm->num_pages);
	drm = nouveau_bdev(bdev);
	return ttm_pool_alloc(&drm->ttm.bdev.pool, ttm, ctx);
nouveau_ttm_tt_unpopulate(struct ttm_device *bdev,
			  struct ttm_tt *ttm)
	struct nouveau_drm *drm;
	bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
Dave Airlie's avatar
Dave Airlie committed

	if (slave)
		return;
	nouveau_ttm_tt_unbind(bdev, ttm);

	drm = nouveau_bdev(bdev);
	return ttm_pool_free(&drm->ttm.bdev.pool, ttm);
nouveau_ttm_tt_destroy(struct ttm_device *bdev,
		       struct ttm_tt *ttm)
{
#if IS_ENABLED(CONFIG_AGP)
	struct nouveau_drm *drm = nouveau_bdev(bdev);
	if (drm->agp.bridge) {
		ttm_agp_destroy(ttm);
		return;
	}
#endif
	nouveau_sgdma_destroy(bdev, ttm);
}

nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool exclusive)
	struct dma_resv *resv = nvbo->bo.base.resv;
	if (!fence)
		return;

	dma_resv_add_fence(resv, &fence->base, exclusive ?
			   DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
static void
nouveau_bo_delete_mem_notify(struct ttm_buffer_object *bo)
{
	nouveau_bo_move_ntfy(bo, NULL);
struct ttm_device_funcs nouveau_bo_driver = {
	.ttm_tt_create = &nouveau_ttm_tt_create,
	.ttm_tt_populate = &nouveau_ttm_tt_populate,
	.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
	.ttm_tt_destroy = &nouveau_ttm_tt_destroy,
	.eviction_valuable = ttm_bo_eviction_valuable,
	.evict_flags = nouveau_bo_evict_flags,
	.delete_mem_notify = nouveau_bo_delete_mem_notify,
	.move = nouveau_bo_move,
	.io_mem_reserve = &nouveau_ttm_io_mem_reserve,
	.io_mem_free = &nouveau_ttm_io_mem_free,