linamh/sys-kernel/geos_one-sources/files/drm-nouveau.patch

15642 lines
533 KiB
Diff
Raw Normal View History

diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 649757f..014fa6f 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -113,3 +113,9 @@ config DRM_SAVAGE
help
Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
chipset. If M is selected the module will be called savage.
+
+config DRM_NOUVEAU
+ tristate "Nouveau (nvidia) cards"
+ depends on DRM
+ help
+ Choose for nvidia support
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 48567a9..e68042b 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -26,4 +26,5 @@ obj-$(CONFIG_DRM_I915) += i915/
obj-$(CONFIG_DRM_SIS) += sis/
obj-$(CONFIG_DRM_SAVAGE)+= savage/
obj-$(CONFIG_DRM_VIA) +=via/
+obj-$(CONFIG_DRM_NOUVEAU) += nouveau/
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 112ba7a..6205d56 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -49,8 +49,8 @@ unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource
EXPORT_SYMBOL(drm_get_resource_len);
-static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
- drm_local_map_t *map)
+struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
+ drm_local_map_t *map)
{
struct drm_map_list *entry;
list_for_each_entry(entry, &dev->maplist, head) {
@@ -63,6 +63,7 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
return NULL;
}
+EXPORT_SYMBOL(drm_find_matching_map);
static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
unsigned long user_token, int hashed_handle)
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
new file mode 100644
index 0000000..f01f82a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -0,0 +1,19 @@
+#
+# Makefile for the drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm
+nouveau-y := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
+ nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \
+ nouveau_sgdma.o nouveau_dma.o nouveau_bo.o nouveau_fence.o \
+ nv04_timer.o \
+ nv04_mc.o nv40_mc.o nv50_mc.o \
+ nv04_fb.o nv10_fb.o nv40_fb.o \
+ nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
+ nv04_graph.o nv10_graph.o nv20_graph.o \
+ nv40_graph.o nv50_graph.o \
+ nv04_instmem.o nv50_instmem.o
+
+nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
+
+obj-$(CONFIG_DRM_NOUVEAU)+= nouveau.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
new file mode 100644
index 0000000..ab3b23a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -0,0 +1,296 @@
+/*
+ * Copyright 2007 Dave Airlied
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+/*
+ * Authors: Dave Airlied <airlied@linux.ie>
+ * Ben Skeggs <darktama@iinet.net.au>
+ * Jeremy Kolb <jkolb@brandeis.edu>
+ */
+
+#include "drmP.h"
+#include "nouveau_drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+
+static struct drm_ttm_backend *
+nouveau_bo_create_ttm_backend_entry(struct drm_device * dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ switch (dev_priv->gart_info.type) {
+ case NOUVEAU_GART_AGP:
+ return drm_agp_init_ttm(dev);
+ case NOUVEAU_GART_SGDMA:
+ return nouveau_sgdma_init_ttm(dev);
+ default:
+ DRM_ERROR("Unknown GART type %d\n", dev_priv->gart_info.type);
+ break;
+ }
+
+ return NULL;
+}
+
+static int
+nouveau_bo_fence_type(struct drm_buffer_object *bo,
+ uint32_t *fclass, uint32_t *type)
+{
+ /* When we get called, *fclass is set to the requested fence class */
+
+ if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
+ *type = 3;
+ else
+ *type = 1;
+ return 0;
+
+}
+
+static int
+nouveau_bo_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags)
+{
+ /* We'll do this from user space. */
+ return 0;
+}
+
+static int
+nouveau_bo_init_mem_type(struct drm_device *dev, uint32_t type,
+ struct drm_mem_type_manager *man)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ switch (type) {
+ case DRM_BO_MEM_LOCAL:
+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
+ _DRM_FLAG_MEMTYPE_CACHED;
+ man->drm_bus_maptype = 0;
+ break;
+ case DRM_BO_MEM_VRAM:
+ man->flags = _DRM_FLAG_MEMTYPE_FIXED |
+ _DRM_FLAG_MEMTYPE_MAPPABLE |
+ _DRM_FLAG_NEEDS_IOREMAP;
+ man->io_addr = NULL;
+ man->drm_bus_maptype = _DRM_FRAME_BUFFER;
+ man->io_offset = drm_get_resource_start(dev, 1);
+ man->io_size = drm_get_resource_len(dev, 1);
+ if (man->io_size > nouveau_mem_fb_amount(dev))
+ man->io_size = nouveau_mem_fb_amount(dev);
+ break;
+ case DRM_BO_MEM_PRIV0:
+ /* Unmappable VRAM */
+ man->flags = _DRM_FLAG_MEMTYPE_CMA;
+ man->drm_bus_maptype = 0;
+ break;
+ case DRM_BO_MEM_TT:
+ switch (dev_priv->gart_info.type) {
+ case NOUVEAU_GART_AGP:
+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
+ _DRM_FLAG_MEMTYPE_CSELECT |
+ _DRM_FLAG_NEEDS_IOREMAP;
+ man->drm_bus_maptype = _DRM_AGP;
+ break;
+ case NOUVEAU_GART_SGDMA:
+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
+ _DRM_FLAG_MEMTYPE_CSELECT |
+ _DRM_FLAG_MEMTYPE_CMA;
+ man->drm_bus_maptype = _DRM_SCATTER_GATHER;
+ break;
+ default:
+ DRM_ERROR("Unknown GART type: %d\n",
+ dev_priv->gart_info.type);
+ return -EINVAL;
+ }
+
+ man->io_offset = dev_priv->gart_info.aper_base;
+ man->io_size = dev_priv->gart_info.aper_size;
+ man->io_addr = NULL;
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static uint64_t
+nouveau_bo_evict_flags(struct drm_buffer_object *bo)
+{
+ switch (bo->mem.mem_type) {
+ case DRM_BO_MEM_LOCAL:
+ case DRM_BO_MEM_TT:
+ return DRM_BO_FLAG_MEM_LOCAL;
+ default:
+ return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED;
+ }
+ return 0;
+}
+
+
+/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
+ * DRM_BO_MEM_{VRAM,PRIV0,TT} directly.
+ */
+static int
+nouveau_bo_move_m2mf(struct drm_buffer_object *bo, int evict, int no_wait,
+ struct drm_bo_mem_reg *new_mem)
+{
+ struct drm_device *dev = bo->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_drm_channel *dchan = &dev_priv->channel;
+ struct drm_bo_mem_reg *old_mem = &bo->mem;
+ uint32_t srch, dsth, page_count;
+
+ /* Can happen during init/takedown */
+ if (!dchan->chan)
+ return -EINVAL;
+
+ srch = old_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB;
+ dsth = new_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB;
+ if (srch != dchan->m2mf_dma_source || dsth != dchan->m2mf_dma_destin) {
+ dchan->m2mf_dma_source = srch;
+ dchan->m2mf_dma_destin = dsth;
+
+ BEGIN_RING(NvSubM2MF,
+ NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE, 2);
+ OUT_RING (dchan->m2mf_dma_source);
+ OUT_RING (dchan->m2mf_dma_destin);
+ }
+
+ page_count = new_mem->num_pages;
+ while (page_count) {
+ int line_count = (page_count > 2047) ? 2047 : page_count;
+
+ BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
+ OUT_RING (old_mem->mm_node->start << PAGE_SHIFT);
+ OUT_RING (new_mem->mm_node->start << PAGE_SHIFT);
+ OUT_RING (PAGE_SIZE); /* src_pitch */
+ OUT_RING (PAGE_SIZE); /* dst_pitch */
+ OUT_RING (PAGE_SIZE); /* line_length */
+ OUT_RING (line_count);
+ OUT_RING ((1<<8)|(1<<0));
+ OUT_RING (0);
+ BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
+ OUT_RING (0);
+
+ page_count -= line_count;
+ }
+
+ return drm_bo_move_accel_cleanup(bo, evict, no_wait, dchan->chan->id,
+ DRM_FENCE_TYPE_EXE, 0, new_mem);
+}
+
+/* Flip pages into the GART and move if we can. */
+static int
+nouveau_bo_move_flipd(struct drm_buffer_object *bo, int evict, int no_wait,
+ struct drm_bo_mem_reg *new_mem)
+{
+ struct drm_device *dev = bo->dev;
+ struct drm_bo_mem_reg tmp_mem;
+ int ret;
+
+ tmp_mem = *new_mem;
+ tmp_mem.mm_node = NULL;
+ tmp_mem.proposed_flags = (DRM_BO_FLAG_MEM_TT |
+ DRM_BO_FLAG_CACHED |
+ DRM_BO_FLAG_FORCE_CACHING);
+
+ ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
+ if (ret)
+ return ret;
+
+ ret = drm_ttm_bind(bo->ttm, &tmp_mem);
+ if (ret)
+ goto out_cleanup;
+
+ ret = nouveau_bo_move_m2mf(bo, 1, no_wait, &tmp_mem);
+ if (ret)
+ goto out_cleanup;
+
+ ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
+
+out_cleanup:
+ if (tmp_mem.mm_node) {
+ mutex_lock(&dev->struct_mutex);
+ if (tmp_mem.mm_node != bo->pinned_node)
+ drm_mm_put_block(tmp_mem.mm_node);
+ tmp_mem.mm_node = NULL;
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ return ret;
+}
+
+static int
+nouveau_bo_move(struct drm_buffer_object *bo, int evict, int no_wait,
+ struct drm_bo_mem_reg *new_mem)
+{
+ struct drm_bo_mem_reg *old_mem = &bo->mem;
+
+ if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
+ if (old_mem->mem_type == DRM_BO_MEM_LOCAL)
+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ if (nouveau_bo_move_flipd(bo, evict, no_wait, new_mem))
+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ }
+ else
+ if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
+ if (1 /*nouveau_bo_move_flips(bo, evict, no_wait, new_mem)*/)
+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ }
+ else {
+ if (nouveau_bo_move_m2mf(bo, evict, no_wait, new_mem))
+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ }
+
+ return 0;
+}
+
+static void
+nouveau_bo_flush_ttm(struct drm_ttm *ttm)
+{
+}
+
+static uint32_t nouveau_mem_prios[] = {
+ DRM_BO_MEM_PRIV0,
+ DRM_BO_MEM_VRAM,
+ DRM_BO_MEM_TT,
+ DRM_BO_MEM_LOCAL
+};
+static uint32_t nouveau_busy_prios[] = {
+ DRM_BO_MEM_TT,
+ DRM_BO_MEM_PRIV0,
+ DRM_BO_MEM_VRAM,
+ DRM_BO_MEM_LOCAL
+};
+
+struct drm_bo_driver nouveau_bo_driver = {
+ .mem_type_prio = nouveau_mem_prios,
+ .mem_busy_prio = nouveau_busy_prios,
+ .num_mem_type_prio = sizeof(nouveau_mem_prios)/sizeof(uint32_t),
+ .num_mem_busy_prio = sizeof(nouveau_busy_prios)/sizeof(uint32_t),
+ .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
+ .fence_type = nouveau_bo_fence_type,
+ .invalidate_caches = nouveau_bo_invalidate_caches,
+ .init_mem_type = nouveau_bo_init_mem_type,
+ .evict_flags = nouveau_bo_evict_flags,
+ .move = nouveau_bo_move,
+ .ttm_cache_flush= nouveau_bo_flush_ttm,
+ .command_stream_barrier = NULL
+};
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
new file mode 100644
index 0000000..e519dc4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+
+int
+nouveau_dma_channel_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_drm_channel *dchan = &dev_priv->channel;
+ struct nouveau_gpuobj *gpuobj = NULL;
+ struct mem_block *pushbuf;
+ int grclass, ret, i;
+
+ DRM_DEBUG("\n");
+
+ pushbuf = nouveau_mem_alloc(dev, 0, 0x8000,
+ NOUVEAU_MEM_FB | NOUVEAU_MEM_MAPPED,
+ (struct drm_file *)-2);
+ if (!pushbuf) {
+ DRM_ERROR("Failed to allocate DMA push buffer\n");
+ return -ENOMEM;
+ }
+
+ /* Allocate channel */
+ ret = nouveau_fifo_alloc(dev, &dchan->chan, (struct drm_file *)-2,
+ pushbuf, NvDmaFB, NvDmaTT);
+ if (ret) {
+ DRM_ERROR("Error allocating GPU channel: %d\n", ret);
+ return ret;
+ }
+ DRM_DEBUG("Using FIFO channel %d\n", dchan->chan->id);
+
+ /* Map push buffer */
+ drm_core_ioremap(dchan->chan->pushbuf_mem->map, dev);
+ if (!dchan->chan->pushbuf_mem->map->handle) {
+ DRM_ERROR("Failed to ioremap push buffer\n");
+ return -EINVAL;
+ }
+ dchan->pushbuf = (void*)dchan->chan->pushbuf_mem->map->handle;
+
+ /* Initialise DMA vars */
+ dchan->max = (dchan->chan->pushbuf_mem->size >> 2) - 2;
+ dchan->put = dchan->chan->pushbuf_base >> 2;
+ dchan->cur = dchan->put;
+ dchan->free = dchan->max - dchan->cur;
+
+ /* Insert NOPS for NOUVEAU_DMA_SKIPS */
+ dchan->free -= NOUVEAU_DMA_SKIPS;
+ dchan->push_free = NOUVEAU_DMA_SKIPS;
+ for (i=0; i < NOUVEAU_DMA_SKIPS; i++)
+ OUT_RING(0);
+
+ /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier */
+ if ((ret = nouveau_notifier_alloc(dchan->chan, NvNotify0, 1,
+ &dchan->notify0_offset))) {
+ DRM_ERROR("Error allocating NvNotify0: %d\n", ret);
+ return ret;
+ }
+
+ /* We use NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
+ if (dev_priv->card_type < NV_50) grclass = NV_MEMORY_TO_MEMORY_FORMAT;
+ else grclass = NV50_MEMORY_TO_MEMORY_FORMAT;
+ if ((ret = nouveau_gpuobj_gr_new(dchan->chan, grclass, &gpuobj))) {
+ DRM_ERROR("Error creating NvM2MF: %d\n", ret);
+ return ret;
+ }
+
+ if ((ret = nouveau_gpuobj_ref_add(dev, dchan->chan, NvM2MF,
+ gpuobj, NULL))) {
+ DRM_ERROR("Error referencing NvM2MF: %d\n", ret);
+ return ret;
+ }
+ dchan->m2mf_dma_source = NvDmaFB;
+ dchan->m2mf_dma_destin = NvDmaFB;
+
+ BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
+ OUT_RING (NvM2MF);
+ BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_NOTIFY, 1);
+ OUT_RING (NvNotify0);
+ BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE, 2);
+ OUT_RING (dchan->m2mf_dma_source);
+ OUT_RING (dchan->m2mf_dma_destin);
+ FIRE_RING();
+
+ return 0;
+}
+
+void
+nouveau_dma_channel_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_drm_channel *dchan = &dev_priv->channel;
+
+ DRM_DEBUG("\n");
+
+ if (dchan->chan) {
+ nouveau_fifo_free(dchan->chan);
+ dchan->chan = NULL;
+ }
+}
+
+#define READ_GET() ((NV_READ(dchan->chan->get) - \
+ dchan->chan->pushbuf_base) >> 2)
+#define WRITE_PUT(val) do { \
+ NV_WRITE(dchan->chan->put, \
+ ((val) << 2) + dchan->chan->pushbuf_base); \
+} while(0)
+
+int
+nouveau_dma_wait(struct drm_device *dev, int size)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_drm_channel *dchan = &dev_priv->channel;
+ uint32_t get;
+
+ while (dchan->free < size) {
+ get = READ_GET();
+
+ if (dchan->put >= get) {
+ dchan->free = dchan->max - dchan->cur;
+
+ if (dchan->free < size) {
+ dchan->push_free = 1;
+ OUT_RING(0x20000000|dchan->chan->pushbuf_base);
+ if (get <= NOUVEAU_DMA_SKIPS) {
+ /*corner case - will be idle*/
+ if (dchan->put <= NOUVEAU_DMA_SKIPS)
+ WRITE_PUT(NOUVEAU_DMA_SKIPS + 1);
+
+ do {
+ get = READ_GET();
+ } while (get <= NOUVEAU_DMA_SKIPS);
+ }
+
+ WRITE_PUT(NOUVEAU_DMA_SKIPS);
+ dchan->cur = dchan->put = NOUVEAU_DMA_SKIPS;
+ dchan->free = get - (NOUVEAU_DMA_SKIPS + 1);
+ }
+ } else {
+ dchan->free = get - dchan->cur - 1;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
new file mode 100644
index 0000000..ce3c58c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_DMA_H__
+#define __NOUVEAU_DMA_H__
+
+typedef enum {
+ NvSubM2MF = 0,
+} nouveau_subchannel_id_t;
+
+typedef enum {
+ NvM2MF = 0x80039001,
+ NvDmaFB = 0x8003d001,
+ NvDmaTT = 0x8003d002,
+ NvNotify0 = 0x8003d003
+} nouveau_object_handle_t;
+
+#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039
+#define NV_MEMORY_TO_MEMORY_FORMAT_NAME 0x00000000
+#define NV_MEMORY_TO_MEMORY_FORMAT_SET_REF 0x00000050
+#define NV_MEMORY_TO_MEMORY_FORMAT_NOP 0x00000100
+#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY 0x00000104
+#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE 0x00000000
+#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE_LE_AWAKEN 0x00000001
+#define NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_NOTIFY 0x00000180
+#define NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE 0x00000184
+#define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN 0x0000030c
+
+#define NV50_MEMORY_TO_MEMORY_FORMAT 0x00005039
+#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK200 0x00000200
+#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK21C 0x0000021c
+#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN_HIGH 0x00000238
+#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_OUT_HIGH 0x0000023c
+
+#define BEGIN_RING(subc, mthd, cnt) do { \
+ int push_size = (cnt) + 1; \
+ if (dchan->push_free) { \
+ DRM_ERROR("prior packet incomplete: %d\n", dchan->push_free); \
+ break; \
+ } \
+ if (dchan->free < push_size) { \
+ if (nouveau_dma_wait(dev, push_size)) { \
+ DRM_ERROR("FIFO timeout\n"); \
+ break; \
+ } \
+ } \
+ dchan->free -= push_size; \
+ dchan->push_free = push_size; \
+ OUT_RING(((cnt)<<18) | ((subc)<<15) | mthd); \
+} while(0)
+
+#define OUT_RING(data) do { \
+ if (dchan->push_free == 0) { \
+ DRM_ERROR("no space left in packet\n"); \
+ break; \
+ } \
+ dchan->pushbuf[dchan->cur++] = (data); \
+ dchan->push_free--; \
+} while(0)
+
+#define FIRE_RING() do { \
+ if (dchan->push_free) { \
+ DRM_ERROR("packet incomplete: %d\n", dchan->push_free); \
+ break; \
+ } \
+ if (dchan->cur != dchan->put) { \
+ DRM_MEMORYBARRIER(); \
+ dchan->put = dchan->cur; \
+ NV_WRITE(dchan->chan->put, dchan->put << 2); \
+ } \
+} while(0)
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
new file mode 100644
index 0000000..4a4277f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2005 Stephane Marchesin.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+#include "drm_pciids.h"
+
+static struct pci_device_id pciidlist[] = {
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID),
+ .class = PCI_BASE_CLASS_DISPLAY << 16,
+ .class_mask = 0xff << 16,
+ },
+ {
+ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID),
+ .class = PCI_BASE_CLASS_DISPLAY << 16,
+ .class_mask = 0xff << 16,
+ }
+};
+
+extern struct drm_ioctl_desc nouveau_ioctls[];
+extern int nouveau_max_ioctl;
+
+static struct drm_driver driver = {
+ .driver_features =
+ DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
+ .load = nouveau_load,
+ .firstopen = nouveau_firstopen,
+ .lastclose = nouveau_lastclose,
+ .unload = nouveau_unload,
+ .preclose = nouveau_preclose,
+ .irq_preinstall = nouveau_irq_preinstall,
+ .irq_postinstall = nouveau_irq_postinstall,
+ .irq_uninstall = nouveau_irq_uninstall,
+ .irq_handler = nouveau_irq_handler,
+ .reclaim_buffers = drm_core_reclaim_buffers,
+ .get_map_ofs = drm_core_get_map_ofs,
+ .get_reg_ofs = drm_core_get_reg_ofs,
+ .ioctls = nouveau_ioctls,
+ .fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+#if defined(CONFIG_COMPAT)
+ .compat_ioctl = nouveau_compat_ioctl,
+#endif
+ },
+ .pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ },
+
+ .bo_driver = &nouveau_bo_driver,
+ .fence_driver = &nouveau_fence_driver,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+#ifdef GIT_REVISION
+ .date = GIT_REVISION,
+#else
+ .date = DRIVER_DATE,
+#endif
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static int __init nouveau_init(void)
+{
+ driver.num_ioctls = nouveau_max_ioctl;
+ return drm_init(&driver);
+}
+
+static void __exit nouveau_exit(void)
+{
+ drm_exit(&driver);
+}
+
+module_init(nouveau_init);
+module_exit(nouveau_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
new file mode 100644
index 0000000..a97e3e9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -0,0 +1,621 @@
+/*
+ * Copyright 2005 Stephane Marchesin.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NOUVEAU_DRV_H__
+#define __NOUVEAU_DRV_H__
+
+#define DRIVER_AUTHOR "Stephane Marchesin"
+#define DRIVER_EMAIL "dri-devel@lists.sourceforge.net"
+
+#define DRIVER_NAME "nouveau"
+#define DRIVER_DESC "nVidia Riva/TNT/GeForce"
+#define DRIVER_DATE "20060213"
+
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 11
+
+#define NOUVEAU_FAMILY 0x0000FFFF
+#define NOUVEAU_FLAGS 0xFFFF0000
+
+#include "nouveau_drm.h"
+#include "nouveau_reg.h"
+
+struct mem_block {
+ struct mem_block *next;
+ struct mem_block *prev;
+ uint64_t start;
+ uint64_t size;
+ struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
+ int flags;
+ drm_local_map_t *map;
+ drm_handle_t map_handle;
+};
+
+enum nouveau_flags {
+ NV_NFORCE =0x10000000,
+ NV_NFORCE2 =0x20000000
+};
+
+#define NVOBJ_ENGINE_SW 0
+#define NVOBJ_ENGINE_GR 1
+#define NVOBJ_ENGINE_INT 0xdeadbeef
+
+#define NVOBJ_FLAG_ALLOW_NO_REFS (1 << 0)
+#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
+#define NVOBJ_FLAG_ZERO_FREE (1 << 2)
+#define NVOBJ_FLAG_FAKE (1 << 3)
+struct nouveau_gpuobj {
+ struct list_head list;
+
+ int im_channel;
+ struct mem_block *im_pramin;
+ struct mem_block *im_backing;
+ int im_bound;
+
+ uint32_t flags;
+ int refcount;
+
+ uint32_t engine;
+ uint32_t class;
+
+ void (*dtor)(struct drm_device *, struct nouveau_gpuobj *);
+ void *priv;
+};
+
+struct nouveau_gpuobj_ref {
+ struct list_head list;
+
+ struct nouveau_gpuobj *gpuobj;
+ uint32_t instance;
+
+ int channel;
+ int handle;
+};
+
+struct nouveau_channel
+{
+ struct drm_device *dev;
+ int id;
+
+ /* owner of this fifo */
+ struct drm_file *file_priv;
+ /* mapping of the fifo itself */
+ drm_local_map_t *map;
+ /* mapping of the regs controling the fifo */
+ drm_local_map_t *regs;
+
+ /* Fencing */
+ uint32_t next_sequence;
+
+ /* DMA push buffer */
+ struct nouveau_gpuobj_ref *pushbuf;
+ struct mem_block *pushbuf_mem;
+ uint32_t pushbuf_base;
+
+ /* FIFO user control regs */
+ uint32_t user, user_size;
+ uint32_t put;
+ uint32_t get;
+ uint32_t ref_cnt;
+
+ /* Notifier memory */
+ struct mem_block *notifier_block;
+ struct mem_block *notifier_heap;
+ drm_local_map_t *notifier_map;
+
+ /* PFIFO context */
+ struct nouveau_gpuobj_ref *ramfc;
+
+ /* PGRAPH context */
+ /* XXX may be merge 2 pointers as private data ??? */
+ struct nouveau_gpuobj_ref *ramin_grctx;
+ void *pgraph_ctx;
+
+ /* NV50 VM */
+ struct nouveau_gpuobj *vm_pd;
+ struct nouveau_gpuobj_ref *vm_gart_pt;
+ struct nouveau_gpuobj_ref *vm_vram_pt;
+
+ /* Objects */
+ struct nouveau_gpuobj_ref *ramin; /* Private instmem */
+ struct mem_block *ramin_heap; /* Private PRAMIN heap */
+ struct nouveau_gpuobj_ref *ramht; /* Hash table */
+ struct list_head ramht_refs; /* Objects referenced by RAMHT */
+};
+
+struct nouveau_drm_channel {
+ struct nouveau_channel *chan;
+
+ /* DMA state */
+ int max, put, cur, free;
+ int push_free;
+ volatile uint32_t *pushbuf;
+
+ /* Notifiers */
+ uint32_t notify0_offset;
+
+ /* Buffer moves */
+ uint32_t m2mf_dma_source;
+ uint32_t m2mf_dma_destin;
+};
+
+struct nouveau_config {
+ struct {
+ int location;
+ int size;
+ } cmdbuf;
+};
+
+struct nouveau_instmem_engine {
+ void *priv;
+
+ int (*init)(struct drm_device *dev);
+ void (*takedown)(struct drm_device *dev);
+
+ int (*populate)(struct drm_device *, struct nouveau_gpuobj *,
+ uint32_t *size);
+ void (*clear)(struct drm_device *, struct nouveau_gpuobj *);
+ int (*bind)(struct drm_device *, struct nouveau_gpuobj *);
+ int (*unbind)(struct drm_device *, struct nouveau_gpuobj *);
+};
+
+struct nouveau_mc_engine {
+ int (*init)(struct drm_device *dev);
+ void (*takedown)(struct drm_device *dev);
+};
+
+struct nouveau_timer_engine {
+ int (*init)(struct drm_device *dev);
+ void (*takedown)(struct drm_device *dev);
+ uint64_t (*read)(struct drm_device *dev);
+};
+
+struct nouveau_fb_engine {
+ int (*init)(struct drm_device *dev);
+ void (*takedown)(struct drm_device *dev);
+};
+
+struct nouveau_fifo_engine {
+ void *priv;
+
+ int channels;
+
+ int (*init)(struct drm_device *);
+ void (*takedown)(struct drm_device *);
+
+ int (*channel_id)(struct drm_device *);
+
+ int (*create_context)(struct nouveau_channel *);
+ void (*destroy_context)(struct nouveau_channel *);
+ int (*load_context)(struct nouveau_channel *);
+ int (*save_context)(struct nouveau_channel *);
+};
+
+struct nouveau_pgraph_engine {
+ int (*init)(struct drm_device *);
+ void (*takedown)(struct drm_device *);
+
+ int (*create_context)(struct nouveau_channel *);
+ void (*destroy_context)(struct nouveau_channel *);
+ int (*load_context)(struct nouveau_channel *);
+ int (*save_context)(struct nouveau_channel *);
+};
+
+struct nouveau_engine {
+ struct nouveau_instmem_engine instmem;
+ struct nouveau_mc_engine mc;
+ struct nouveau_timer_engine timer;
+ struct nouveau_fb_engine fb;
+ struct nouveau_pgraph_engine graph;
+ struct nouveau_fifo_engine fifo;
+};
+
+#define NOUVEAU_MAX_CHANNEL_NR 128
+struct drm_nouveau_private {
+ enum {
+ NOUVEAU_CARD_INIT_DOWN,
+ NOUVEAU_CARD_INIT_DONE,
+ NOUVEAU_CARD_INIT_FAILED
+ } init_state;
+
+ int ttm;
+
+ /* the card type, takes NV_* as values */
+ int card_type;
+ /* exact chipset, derived from NV_PMC_BOOT_0 */
+ int chipset;
+ int flags;
+
+ drm_local_map_t *mmio;
+ drm_local_map_t *fb;
+ drm_local_map_t *ramin; /* NV40 onwards */
+
+ int fifo_alloc_count;
+ struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
+
+ struct nouveau_engine Engine;
+ struct nouveau_drm_channel channel;
+
+ /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
+ struct nouveau_gpuobj *ramht;
+ uint32_t ramin_rsvd_vram;
+ uint32_t ramht_offset;
+ uint32_t ramht_size;
+ uint32_t ramht_bits;
+ uint32_t ramfc_offset;
+ uint32_t ramfc_size;
+ uint32_t ramro_offset;
+ uint32_t ramro_size;
+
+ /* base physical adresses */
+ uint64_t fb_phys;
+ uint64_t fb_available_size;
+
+ struct {
+ enum {
+ NOUVEAU_GART_NONE = 0,
+ NOUVEAU_GART_AGP,
+ NOUVEAU_GART_SGDMA
+ } type;
+ uint64_t aper_base;
+ uint64_t aper_size;
+
+ struct nouveau_gpuobj *sg_ctxdma;
+ struct page *sg_dummy_page;
+ dma_addr_t sg_dummy_bus;
+
+ /* nottm hack */
+ struct drm_ttm_backend *sg_be;
+ unsigned long sg_handle;
+ } gart_info;
+
+ /* G8x global VRAM page table */
+ struct nouveau_gpuobj *vm_vram_pt;
+
+ /* the mtrr covering the FB */
+ int fb_mtrr;
+
+ struct mem_block *agp_heap;
+ struct mem_block *fb_heap;
+ struct mem_block *fb_nomap_heap;
+ struct mem_block *ramin_heap;
+ struct mem_block *pci_heap;
+
+ /* context table pointed to be NV_PGRAPH_CHANNEL_CTX_TABLE (0x400780) */
+ uint32_t ctx_table_size;
+ struct nouveau_gpuobj_ref *ctx_table;
+
+ struct nouveau_config config;
+
+ struct list_head gpuobj_list;
+
+ struct nouveau_suspend_resume {
+ uint32_t fifo_mode;
+ uint32_t graph_ctx_control;
+ uint32_t graph_state;
+ uint32_t *ramin_copy;
+ uint64_t ramin_size;
+ } susres;
+};
+
+#define NOUVEAU_CHECK_INITIALISED_WITH_RETURN do { \
+ struct drm_nouveau_private *nv = dev->dev_private; \
+ if (nv->init_state != NOUVEAU_CARD_INIT_DONE) { \
+ DRM_ERROR("called without init\n"); \
+ return -EINVAL; \
+ } \
+} while(0)
+
+#define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id,cl,ch) do { \
+ struct drm_nouveau_private *nv = dev->dev_private; \
+ if (!nouveau_fifo_owner(dev, (cl), (id))) { \
+ DRM_ERROR("pid %d doesn't own channel %d\n", \
+ DRM_CURRENTPID, (id)); \
+ return -EPERM; \
+ } \
+ (ch) = nv->fifos[(id)]; \
+} while(0)
+
+/* nouveau_state.c */
+extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
+extern int nouveau_load(struct drm_device *, unsigned long flags);
+extern int nouveau_firstopen(struct drm_device *);
+extern void nouveau_lastclose(struct drm_device *);
+extern int nouveau_unload(struct drm_device *);
+extern int nouveau_ioctl_getparam(struct drm_device *, void *data,
+ struct drm_file *);
+extern int nouveau_ioctl_setparam(struct drm_device *, void *data,
+ struct drm_file *);
+extern void nouveau_wait_for_idle(struct drm_device *);
+extern int nouveau_card_init(struct drm_device *);
+extern int nouveau_ioctl_card_init(struct drm_device *, void *data,
+ struct drm_file *);
+extern int nouveau_ioctl_suspend(struct drm_device *, void *data,
+ struct drm_file *);
+extern int nouveau_ioctl_resume(struct drm_device *, void *data,
+ struct drm_file *);
+
+/* nouveau_mem.c */
+extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start,
+ uint64_t size);
+extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *,
+ uint64_t size, int align2,
+ struct drm_file *, int tail);
+extern void nouveau_mem_takedown(struct mem_block **heap);
+extern void nouveau_mem_free_block(struct mem_block *);
+extern uint64_t nouveau_mem_fb_amount(struct drm_device *);
+extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap);
+extern int nouveau_ioctl_mem_alloc(struct drm_device *, void *data,
+ struct drm_file *);
+extern int nouveau_ioctl_mem_free(struct drm_device *, void *data,
+ struct drm_file *);
+extern int nouveau_ioctl_mem_tile(struct drm_device *, void *data,
+ struct drm_file *);
+extern struct mem_block* nouveau_mem_alloc(struct drm_device *,
+ int alignment, uint64_t size,
+ int flags, struct drm_file *);
+extern void nouveau_mem_free(struct drm_device *dev, struct mem_block*);
+extern int nouveau_mem_init(struct drm_device *);
+extern int nouveau_mem_init_ttm(struct drm_device *);
+extern void nouveau_mem_close(struct drm_device *);
+
+/* nouveau_notifier.c */
+extern int nouveau_notifier_init_channel(struct nouveau_channel *);
+extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
+extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle,
+ int cout, uint32_t *offset);
+extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data,
+ struct drm_file *);
+extern int nouveau_ioctl_notifier_free(struct drm_device *, void *data,
+ struct drm_file *);
+
+/* nouveau_fifo.c */
+extern int nouveau_fifo_init(struct drm_device *);
+extern int nouveau_fifo_ctx_size(struct drm_device *);
+extern void nouveau_fifo_cleanup(struct drm_device *, struct drm_file *);
+extern int nouveau_fifo_owner(struct drm_device *, struct drm_file *,
+ int channel);
+extern int nouveau_fifo_alloc(struct drm_device *dev,
+ struct nouveau_channel **chan,
+ struct drm_file *file_priv,
+ struct mem_block *pushbuf,
+ uint32_t fb_ctxdma, uint32_t tt_ctxdma);
+extern void nouveau_fifo_free(struct nouveau_channel *);
+extern int nouveau_channel_idle(struct nouveau_channel *chan);
+
+/* nouveau_object.c */
+extern int nouveau_gpuobj_early_init(struct drm_device *);
+extern int nouveau_gpuobj_init(struct drm_device *);
+extern void nouveau_gpuobj_takedown(struct drm_device *);
+extern void nouveau_gpuobj_late_takedown(struct drm_device *);
+extern int nouveau_gpuobj_channel_init(struct nouveau_channel *,
+ uint32_t vram_h, uint32_t tt_h);
+extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
+extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *,
+ int size, int align, uint32_t flags,
+ struct nouveau_gpuobj **);
+extern int nouveau_gpuobj_del(struct drm_device *, struct nouveau_gpuobj **);
+extern int nouveau_gpuobj_ref_add(struct drm_device *, struct nouveau_channel *,
+ uint32_t handle, struct nouveau_gpuobj *,
+ struct nouveau_gpuobj_ref **);
+extern int nouveau_gpuobj_ref_del(struct drm_device *,
+ struct nouveau_gpuobj_ref **);
+extern int nouveau_gpuobj_ref_find(struct nouveau_channel *, uint32_t handle,
+ struct nouveau_gpuobj_ref **ref_ret);
+extern int nouveau_gpuobj_new_ref(struct drm_device *,
+ struct nouveau_channel *alloc_chan,
+ struct nouveau_channel *ref_chan,
+ uint32_t handle, int size, int align,
+ uint32_t flags, struct nouveau_gpuobj_ref **);
+extern int nouveau_gpuobj_new_fake(struct drm_device *,
+ uint32_t p_offset, uint32_t b_offset,
+ uint32_t size, uint32_t flags,
+ struct nouveau_gpuobj **,
+ struct nouveau_gpuobj_ref**);
+extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
+ uint64_t offset, uint64_t size, int access,
+ int target, struct nouveau_gpuobj **);
+extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *,
+ uint64_t offset, uint64_t size,
+ int access, struct nouveau_gpuobj **,
+ uint32_t *o_ret);
+extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class,
+ struct nouveau_gpuobj **);
+extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
+ struct drm_file *);
+extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
+ struct drm_file *);
+
+/* nouveau_irq.c */
+extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
+extern void nouveau_irq_preinstall(struct drm_device *);
+extern int nouveau_irq_postinstall(struct drm_device *);
+extern void nouveau_irq_uninstall(struct drm_device *);
+
+/* nouveau_sgdma.c */
+extern int nouveau_sgdma_init(struct drm_device *);
+extern void nouveau_sgdma_takedown(struct drm_device *);
+extern int nouveau_sgdma_get_page(struct drm_device *, uint32_t offset,
+ uint32_t *page);
+extern struct drm_ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *);
+extern int nouveau_sgdma_nottm_hack_init(struct drm_device *);
+extern void nouveau_sgdma_nottm_hack_takedown(struct drm_device *);
+
+/* nouveau_dma.c */
+extern int nouveau_dma_channel_init(struct drm_device *);
+extern void nouveau_dma_channel_takedown(struct drm_device *);
+extern int nouveau_dma_wait(struct drm_device *, int size);
+
+/* nv04_fb.c */
+extern int nv04_fb_init(struct drm_device *);
+extern void nv04_fb_takedown(struct drm_device *);
+
+/* nv10_fb.c */
+extern int nv10_fb_init(struct drm_device *);
+extern void nv10_fb_takedown(struct drm_device *);
+
+/* nv40_fb.c */
+extern int nv40_fb_init(struct drm_device *);
+extern void nv40_fb_takedown(struct drm_device *);
+
+/* nv04_fifo.c */
+extern int nv04_fifo_channel_id(struct drm_device *);
+extern int nv04_fifo_create_context(struct nouveau_channel *);
+extern void nv04_fifo_destroy_context(struct nouveau_channel *);
+extern int nv04_fifo_load_context(struct nouveau_channel *);
+extern int nv04_fifo_save_context(struct nouveau_channel *);
+
+/* nv10_fifo.c */
+extern int nv10_fifo_channel_id(struct drm_device *);
+extern int nv10_fifo_create_context(struct nouveau_channel *);
+extern void nv10_fifo_destroy_context(struct nouveau_channel *);
+extern int nv10_fifo_load_context(struct nouveau_channel *);
+extern int nv10_fifo_save_context(struct nouveau_channel *);
+
+/* nv40_fifo.c */
+extern int nv40_fifo_init(struct drm_device *);
+extern int nv40_fifo_create_context(struct nouveau_channel *);
+extern void nv40_fifo_destroy_context(struct nouveau_channel *);
+extern int nv40_fifo_load_context(struct nouveau_channel *);
+extern int nv40_fifo_save_context(struct nouveau_channel *);
+
+/* nv50_fifo.c */
+extern int nv50_fifo_init(struct drm_device *);
+extern void nv50_fifo_takedown(struct drm_device *);
+extern int nv50_fifo_channel_id(struct drm_device *);
+extern int nv50_fifo_create_context(struct nouveau_channel *);
+extern void nv50_fifo_destroy_context(struct nouveau_channel *);
+extern int nv50_fifo_load_context(struct nouveau_channel *);
+extern int nv50_fifo_save_context(struct nouveau_channel *);
+
+/* nv04_graph.c */
+extern void nouveau_nv04_context_switch(struct drm_device *);
+extern int nv04_graph_init(struct drm_device *);
+extern void nv04_graph_takedown(struct drm_device *);
+extern int nv04_graph_create_context(struct nouveau_channel *);
+extern void nv04_graph_destroy_context(struct nouveau_channel *);
+extern int nv04_graph_load_context(struct nouveau_channel *);
+extern int nv04_graph_save_context(struct nouveau_channel *);
+
+/* nv10_graph.c */
+extern void nouveau_nv10_context_switch(struct drm_device *);
+extern int nv10_graph_init(struct drm_device *);
+extern void nv10_graph_takedown(struct drm_device *);
+extern int nv10_graph_create_context(struct nouveau_channel *);
+extern void nv10_graph_destroy_context(struct nouveau_channel *);
+extern int nv10_graph_load_context(struct nouveau_channel *);
+extern int nv10_graph_save_context(struct nouveau_channel *);
+
+/* nv20_graph.c */
+extern int nv20_graph_create_context(struct nouveau_channel *);
+extern void nv20_graph_destroy_context(struct nouveau_channel *);
+extern int nv20_graph_load_context(struct nouveau_channel *);
+extern int nv20_graph_save_context(struct nouveau_channel *);
+extern int nv20_graph_init(struct drm_device *);
+extern void nv20_graph_takedown(struct drm_device *);
+extern int nv30_graph_init(struct drm_device *);
+
+/* nv40_graph.c */
+extern int nv40_graph_init(struct drm_device *);
+extern void nv40_graph_takedown(struct drm_device *);
+extern int nv40_graph_create_context(struct nouveau_channel *);
+extern void nv40_graph_destroy_context(struct nouveau_channel *);
+extern int nv40_graph_load_context(struct nouveau_channel *);
+extern int nv40_graph_save_context(struct nouveau_channel *);
+
+/* nv50_graph.c */
+extern int nv50_graph_init(struct drm_device *);
+extern void nv50_graph_takedown(struct drm_device *);
+extern int nv50_graph_create_context(struct nouveau_channel *);
+extern void nv50_graph_destroy_context(struct nouveau_channel *);
+extern int nv50_graph_load_context(struct nouveau_channel *);
+extern int nv50_graph_save_context(struct nouveau_channel *);
+
+/* nv04_instmem.c */
+extern int nv04_instmem_init(struct drm_device *);
+extern void nv04_instmem_takedown(struct drm_device *);
+extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
+ uint32_t *size);
+extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
+extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
+extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
+
+/* nv50_instmem.c */
+extern int nv50_instmem_init(struct drm_device *);
+extern void nv50_instmem_takedown(struct drm_device *);
+extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
+ uint32_t *size);
+extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
+extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
+extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
+
+/* nv04_mc.c */
+extern int nv04_mc_init(struct drm_device *);
+extern void nv04_mc_takedown(struct drm_device *);
+
+/* nv40_mc.c */
+extern int nv40_mc_init(struct drm_device *);
+extern void nv40_mc_takedown(struct drm_device *);
+
+/* nv50_mc.c */
+extern int nv50_mc_init(struct drm_device *);
+extern void nv50_mc_takedown(struct drm_device *);
+
+/* nv04_timer.c */
+extern int nv04_timer_init(struct drm_device *);
+extern uint64_t nv04_timer_read(struct drm_device *);
+extern void nv04_timer_takedown(struct drm_device *);
+
+extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
+
+/* nouveau_buffer.c */
+extern struct drm_bo_driver nouveau_bo_driver;
+
+/* nouveau_fence.c */
+extern struct drm_fence_driver nouveau_fence_driver;
+extern void nouveau_fence_handler(struct drm_device *dev, int channel);
+
+#if defined(__powerpc__)
+#define NV_READ(reg) in_be32((void __iomem *)(dev_priv->mmio)->handle + (reg) )
+#define NV_WRITE(reg,val) out_be32((void __iomem *)(dev_priv->mmio)->handle + (reg) , (val) )
+#else
+#define NV_READ(reg) DRM_READ32( dev_priv->mmio, (reg) )
+#define NV_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) )
+#endif
+
+/* PRAMIN access */
+#if defined(__powerpc__)
+#define NV_RI32(o) in_be32((void __iomem *)(dev_priv->ramin)->handle+(o))
+#define NV_WI32(o,v) out_be32((void __iomem*)(dev_priv->ramin)->handle+(o), (v))
+#else
+#define NV_RI32(o) DRM_READ32(dev_priv->ramin, (o))
+#define NV_WI32(o,v) DRM_WRITE32(dev_priv->ramin, (o), (v))
+#endif
+
+#define INSTANCE_RD(o,i) NV_RI32((o)->im_pramin->start + ((i)<<2))
+#define INSTANCE_WR(o,i,v) NV_WI32((o)->im_pramin->start + ((i)<<2), (v))
+
+#endif /* __NOUVEAU_DRV_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
new file mode 100644
index 0000000..4ad51ae
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+
+static int
+nouveau_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG("class=%d, flags=0x%08x\n", class, flags);
+
+ /* DRM's channel always uses IRQs to signal fences */
+ if (class == dev_priv->channel.chan->id)
+ return 1;
+
+ /* Other channels don't use IRQs at all yet */
+ return 0;
+}
+
+static int
+nouveau_fence_emit(struct drm_device *dev, uint32_t class, uint32_t flags,
+ uint32_t *breadcrumb, uint32_t *native_type)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->fifos[class];
+ struct nouveau_drm_channel *dchan = &dev_priv->channel;
+
+ DRM_DEBUG("class=%d, flags=0x%08x\n", class, flags);
+
+ /* We can't emit fences on client channels, update sequence number
+ * and userspace will emit the fence
+ */
+ *breadcrumb = ++chan->next_sequence;
+ *native_type = DRM_FENCE_TYPE_EXE;
+ if (chan != dchan->chan) {
+ DRM_DEBUG("user fence 0x%08x\n", *breadcrumb);
+ return 0;
+ }
+
+ DRM_DEBUG("emit 0x%08x\n", *breadcrumb);
+ BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_SET_REF, 1);
+ OUT_RING (*breadcrumb);
+ BEGIN_RING(NvSubM2MF, 0x0150, 1);
+ OUT_RING (0);
+ FIRE_RING ();
+
+ return 0;
+}
+
+static void
+nouveau_fence_poll(struct drm_device *dev, uint32_t class, uint32_t waiting_types)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_fence_class_manager *fc = &dev->fm.fence_class[class];
+ struct nouveau_channel *chan = dev_priv->fifos[class];
+
+ DRM_DEBUG("class=%d\n", class);
+ DRM_DEBUG("pending: 0x%08x 0x%08x\n", waiting_types, fc->waiting_types);
+
+ if (waiting_types & DRM_FENCE_TYPE_EXE) {
+ uint32_t sequence = NV_READ(chan->ref_cnt);
+
+ DRM_DEBUG("got 0x%08x\n", sequence);
+ drm_fence_handler(dev, class, sequence, waiting_types, 0);
+ }
+}
+
+void
+nouveau_fence_handler(struct drm_device *dev, int channel)
+{
+ struct drm_fence_manager *fm = &dev->fm;
+ struct drm_fence_class_manager *fc = &fm->fence_class[channel];
+
+ DRM_DEBUG("class=%d\n", channel);
+
+ write_lock(&fm->lock);
+ nouveau_fence_poll(dev, channel, fc->waiting_types);
+ write_unlock(&fm->lock);
+}
+
+struct drm_fence_driver nouveau_fence_driver = {
+ .num_classes = 8,
+ .wrap_diff = (1 << 30),
+ .flush_diff = (1 << 29),
+ .sequence_mask = 0xffffffffU,
+ .has_irq = nouveau_fence_has_irq,
+ .emit = nouveau_fence_emit,
+ .flush = NULL,
+ .poll = nouveau_fence_poll,
+ .needed_flush = NULL,
+ .wait = NULL
+};
diff --git a/drivers/gpu/drm/nouveau/nouveau_fifo.c b/drivers/gpu/drm/nouveau/nouveau_fifo.c
new file mode 100644
index 0000000..92ea8fc
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_fifo.c
@@ -0,0 +1,601 @@
+/*
+ * Copyright 2005-2006 Stephane Marchesin
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+
+/* returns the size of fifo context */
+int nouveau_fifo_ctx_size(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv=dev->dev_private;
+
+ if (dev_priv->card_type >= NV_40)
+ return 128;
+ else if (dev_priv->card_type >= NV_17)
+ return 64;
+ else
+ return 32;
+}
+
+/***********************************
+ * functions doing the actual work
+ ***********************************/
+
+static int nouveau_fifo_instmem_configure(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ NV_WRITE(NV03_PFIFO_RAMHT,
+ (0x03 << 24) /* search 128 */ |
+ ((dev_priv->ramht_bits - 9) << 16) |
+ (dev_priv->ramht_offset >> 8)
+ );
+
+ NV_WRITE(NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
+
+ switch(dev_priv->card_type)
+ {
+ case NV_40:
+ switch (dev_priv->chipset) {
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ NV_WRITE(0x2230, 1);
+ break;
+ default:
+ break;
+ }
+ NV_WRITE(NV40_PFIFO_RAMFC, 0x30002);
+ break;
+ case NV_44:
+ NV_WRITE(NV40_PFIFO_RAMFC, ((nouveau_mem_fb_amount(dev)-512*1024+dev_priv->ramfc_offset)>>16) |
+ (2 << 16));
+ break;
+ case NV_30:
+ case NV_20:
+ case NV_17:
+ NV_WRITE(NV03_PFIFO_RAMFC, (dev_priv->ramfc_offset>>8) |
+ (1 << 16) /* 64 Bytes entry*/);
+ /* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */
+ break;
+ case NV_11:
+ case NV_10:
+ case NV_04:
+ NV_WRITE(NV03_PFIFO_RAMFC, dev_priv->ramfc_offset>>8);
+ break;
+ }
+
+ return 0;
+}
+
+int nouveau_fifo_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
+ ~NV_PMC_ENABLE_PFIFO);
+ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
+ NV_PMC_ENABLE_PFIFO);
+
+ /* Enable PFIFO error reporting */
+ NV_WRITE(NV03_PFIFO_INTR_0, 0xFFFFFFFF);
+ NV_WRITE(NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
+
+ NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
+
+ ret = nouveau_fifo_instmem_configure(dev);
+ if (ret) {
+ DRM_ERROR("Failed to configure instance memory\n");
+ return ret;
+ }
+
+ /* FIXME remove all the stuff that's done in nouveau_fifo_alloc */
+
+ DRM_DEBUG("Setting defaults for remaining PFIFO regs\n");
+
+ /* All channels into PIO mode */
+ NV_WRITE(NV04_PFIFO_MODE, 0x00000000);
+
+ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000);
+ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
+ /* Channel 0 active, PIO mode */
+ NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, 0x00000000);
+ /* PUT and GET to 0 */
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT, 0x00000000);
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, 0x00000000);
+ /* No cmdbuf object */
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, 0x00000000);
+ NV_WRITE(NV03_PFIFO_CACHE0_PUSH0, 0x00000000);
+ NV_WRITE(NV04_PFIFO_CACHE0_PULL0, 0x00000000);
+ NV_WRITE(NV04_PFIFO_SIZE, 0x0000FFFF);
+ NV_WRITE(NV04_PFIFO_CACHE1_HASH, 0x0000FFFF);
+ NV_WRITE(NV04_PFIFO_CACHE0_PULL1, 0x00000001);
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, 0x00000000);
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000);
+ NV_WRITE(NV04_PFIFO_CACHE1_ENGINE, 0x00000000);
+
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ 0x00000000);
+
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001);
+ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001);
+ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001);
+ NV_WRITE(NV04_PFIFO_CACHE1_PULL1, 0x00000001);
+
+ /* FIXME on NV04 */
+ if (dev_priv->card_type >= NV_10) {
+ NV_WRITE(NV10_PGRAPH_CTX_USER, 0x0);
+ NV_WRITE(NV04_PFIFO_DELAY_0, 0xff /* retrycount*/ );
+ if (dev_priv->card_type >= NV_40)
+ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x00002001);
+ else
+ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10110000);
+ } else {
+ NV_WRITE(NV04_PGRAPH_CTX_USER, 0x0);
+ NV_WRITE(NV04_PFIFO_DELAY_0, 0xff /* retrycount*/ );
+ NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10110000);
+ }
+
+ NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, 0x001fffff);
+ NV_WRITE(NV03_PFIFO_CACHES, 0x00000001);
+ return 0;
+}
+
+static int
+nouveau_fifo_pushbuf_ctxdma_init(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct mem_block *pb = chan->pushbuf_mem;
+ struct nouveau_gpuobj *pushbuf = NULL;
+ int ret;
+
+ if (pb->flags & NOUVEAU_MEM_AGP) {
+ ret = nouveau_gpuobj_gart_dma_new(chan, pb->start, pb->size,
+ NV_DMA_ACCESS_RO,
+ &pushbuf,
+ &chan->pushbuf_base);
+ } else
+ if (pb->flags & NOUVEAU_MEM_PCI) {
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ pb->start, pb->size,
+ NV_DMA_ACCESS_RO,
+ NV_DMA_TARGET_PCI_NONLINEAR,
+ &pushbuf);
+ chan->pushbuf_base = 0;
+ } else if (dev_priv->card_type != NV_04) {
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ pb->start, pb->size,
+ NV_DMA_ACCESS_RO,
+ NV_DMA_TARGET_VIDMEM, &pushbuf);
+ chan->pushbuf_base = 0;
+ } else {
+ /* NV04 cmdbuf hack, from original ddx.. not sure of it's
+ * exact reason for existing :) PCI access to cmdbuf in
+ * VRAM.
+ */
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ pb->start +
+ drm_get_resource_start(dev, 1),
+ pb->size, NV_DMA_ACCESS_RO,
+ NV_DMA_TARGET_PCI, &pushbuf);
+ chan->pushbuf_base = 0;
+ }
+
+ if ((ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf,
+ &chan->pushbuf))) {
+ DRM_ERROR("Error referencing push buffer ctxdma: %d\n", ret);
+ if (pushbuf != dev_priv->gart_info.sg_ctxdma)
+ nouveau_gpuobj_del(dev, &pushbuf);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct mem_block *
+nouveau_fifo_user_pushbuf_alloc(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_config *config = &dev_priv->config;
+ struct mem_block *pb;
+ int pb_min_size = max(NV03_FIFO_SIZE,PAGE_SIZE);
+
+ /* Defaults for unconfigured values */
+ if (!config->cmdbuf.location)
+ config->cmdbuf.location = NOUVEAU_MEM_FB;
+ if (!config->cmdbuf.size || config->cmdbuf.size < pb_min_size)
+ config->cmdbuf.size = pb_min_size;
+
+ pb = nouveau_mem_alloc(dev, 0, config->cmdbuf.size,
+ config->cmdbuf.location | NOUVEAU_MEM_MAPPED,
+ (struct drm_file *)-2);
+ if (!pb)
+ DRM_ERROR("Couldn't allocate DMA push buffer.\n");
+
+ return pb;
+}
+
+/* allocates and initializes a fifo for user space consumption */
+int
+nouveau_fifo_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
+ struct drm_file *file_priv, struct mem_block *pushbuf,
+ uint32_t vram_handle, uint32_t tt_handle)
+{
+ int ret;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->Engine;
+ struct nouveau_channel *chan;
+ int channel;
+
+ /*
+ * Alright, here is the full story
+ * Nvidia cards have multiple hw fifo contexts (praise them for that,
+ * no complicated crash-prone context switches)
+ * We allocate a new context for each app and let it write to it directly
+ * (woo, full userspace command submission !)
+ * When there are no more contexts, you lost
+ */
+ for (channel = 0; channel < engine->fifo.channels; channel++) {
+ if (dev_priv->fifos[channel] == NULL)
+ break;
+ }
+
+ /* no more fifos. you lost. */
+ if (channel == engine->fifo.channels)
+ return -EINVAL;
+
+ dev_priv->fifos[channel] = drm_calloc(1, sizeof(struct nouveau_channel),
+ DRM_MEM_DRIVER);
+ if (!dev_priv->fifos[channel])
+ return -ENOMEM;
+ dev_priv->fifo_alloc_count++;
+ chan = dev_priv->fifos[channel];
+ chan->dev = dev;
+ chan->id = channel;
+ chan->file_priv = file_priv;
+ chan->pushbuf_mem = pushbuf;
+
+ DRM_INFO("Allocating FIFO number %d\n", channel);
+
+ /* Locate channel's user control regs */
+ if (dev_priv->card_type < NV_40) {
+ chan->user = NV03_USER(channel);
+ chan->user_size = NV03_USER_SIZE;
+ chan->put = NV03_USER_DMA_PUT(channel);
+ chan->get = NV03_USER_DMA_GET(channel);
+ chan->ref_cnt = NV03_USER_REF_CNT(channel);
+ } else
+ if (dev_priv->card_type < NV_50) {
+ chan->user = NV40_USER(channel);
+ chan->user_size = NV40_USER_SIZE;
+ chan->put = NV40_USER_DMA_PUT(channel);
+ chan->get = NV40_USER_DMA_GET(channel);
+ chan->ref_cnt = NV40_USER_REF_CNT(channel);
+ } else {
+ chan->user = NV50_USER(channel);
+ chan->user_size = NV50_USER_SIZE;
+ chan->put = NV50_USER_DMA_PUT(channel);
+ chan->get = NV50_USER_DMA_GET(channel);
+ chan->ref_cnt = NV50_USER_REF_CNT(channel);
+ }
+
+ /* Allocate space for per-channel fixed notifier memory */
+ ret = nouveau_notifier_init_channel(chan);
+ if (ret) {
+ nouveau_fifo_free(chan);
+ return ret;
+ }
+
+ /* Setup channel's default objects */
+ ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle);
+ if (ret) {
+ nouveau_fifo_free(chan);
+ return ret;
+ }
+
+ /* Create a dma object for the push buffer */
+ ret = nouveau_fifo_pushbuf_ctxdma_init(chan);
+ if (ret) {
+ nouveau_fifo_free(chan);
+ return ret;
+ }
+
+ nouveau_wait_for_idle(dev);
+
+ /* disable the fifo caches */
+ NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH)&(~0x1));
+ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000);
+ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
+
+ /* Create a graphics context for new channel */
+ ret = engine->graph.create_context(chan);
+ if (ret) {
+ nouveau_fifo_free(chan);
+ return ret;
+ }
+
+ /* Construct inital RAMFC for new channel */
+ ret = engine->fifo.create_context(chan);
+ if (ret) {
+ nouveau_fifo_free(chan);
+ return ret;
+ }
+
+ /* setup channel's default get/put values
+ * XXX: quite possibly extremely pointless..
+ */
+ NV_WRITE(chan->get, chan->pushbuf_base);
+ NV_WRITE(chan->put, chan->pushbuf_base);
+
+ /* If this is the first channel, setup PFIFO ourselves. For any
+ * other case, the GPU will handle this when it switches contexts.
+ */
+ if (dev_priv->fifo_alloc_count == 1) {
+ ret = engine->fifo.load_context(chan);
+ if (ret) {
+ nouveau_fifo_free(chan);
+ return ret;
+ }
+
+ ret = engine->graph.load_context(chan);
+ if (ret) {
+ nouveau_fifo_free(chan);
+ return ret;
+ }
+ }
+
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH,
+ NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
+ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001);
+ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001);
+ NV_WRITE(NV04_PFIFO_CACHE1_PULL1, 0x00000001);
+
+ /* reenable the fifo caches */
+ NV_WRITE(NV03_PFIFO_CACHES, 1);
+
+ DRM_INFO("%s: initialised FIFO %d\n", __func__, channel);
+ *chan_ret = chan;
+ return 0;
+}
+
+int
+nouveau_channel_idle(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->Engine;
+ uint32_t caches;
+ int idle;
+
+ caches = NV_READ(NV03_PFIFO_CACHES);
+ NV_WRITE(NV03_PFIFO_CACHES, caches & ~1);
+
+ if (engine->fifo.channel_id(dev) != chan->id) {
+ struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj;
+
+ if (INSTANCE_RD(ramfc, 0) != INSTANCE_RD(ramfc, 1))
+ idle = 0;
+ else
+ idle = 1;
+ } else {
+ idle = (NV_READ(NV04_PFIFO_CACHE1_DMA_GET) ==
+ NV_READ(NV04_PFIFO_CACHE1_DMA_PUT));
+ }
+
+ NV_WRITE(NV03_PFIFO_CACHES, caches);
+ return idle;
+}
+
+/* stops a fifo */
+void nouveau_fifo_free(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->Engine;
+ uint64_t t_start;
+
+ DRM_INFO("%s: freeing fifo %d\n", __func__, chan->id);
+
+ /* Give the channel a chance to idle, wait 2s (hopefully) */
+ t_start = engine->timer.read(dev);
+ while (!nouveau_channel_idle(chan)) {
+ if (engine->timer.read(dev) - t_start > 2000000000ULL) {
+ DRM_ERROR("Failed to idle channel %d before destroy."
+ "Prepare for strangeness..\n", chan->id);
+ break;
+ }
+ }
+
+ /*XXX: Maybe should wait for PGRAPH to finish with the stuff it fetched
+ * from CACHE1 too?
+ */
+
+ /* disable the fifo caches */
+ NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH)&(~0x1));
+ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000);
+ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
+
+ // FIXME XXX needs more code
+
+ engine->fifo.destroy_context(chan);
+
+ /* Cleanup PGRAPH state */
+ engine->graph.destroy_context(chan);
+
+ /* reenable the fifo caches */
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH,
+ NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
+ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001);
+ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001);
+ NV_WRITE(NV03_PFIFO_CACHES, 0x00000001);
+
+ /* Deallocate push buffer */
+ nouveau_gpuobj_ref_del(dev, &chan->pushbuf);
+ if (chan->pushbuf_mem) {
+ nouveau_mem_free(dev, chan->pushbuf_mem);
+ chan->pushbuf_mem = NULL;
+ }
+
+ /* Destroy objects belonging to the channel */
+ nouveau_gpuobj_channel_takedown(chan);
+
+ nouveau_notifier_takedown_channel(chan);
+
+ dev_priv->fifos[chan->id] = NULL;
+ dev_priv->fifo_alloc_count--;
+ drm_free(chan, sizeof(*chan), DRM_MEM_DRIVER);
+}
+
+/* cleanups all the fifos from file_priv */
+void nouveau_fifo_cleanup(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->Engine;
+ int i;
+
+ DRM_DEBUG("clearing FIFO enables from file_priv\n");
+ for(i = 0; i < engine->fifo.channels; i++) {
+ struct nouveau_channel *chan = dev_priv->fifos[i];
+
+ if (chan && chan->file_priv == file_priv)
+ nouveau_fifo_free(chan);
+ }
+}
+
+int
+nouveau_fifo_owner(struct drm_device *dev, struct drm_file *file_priv,
+ int channel)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->Engine;
+
+ if (channel >= engine->fifo.channels)
+ return 0;
+ if (dev_priv->fifos[channel] == NULL)
+ return 0;
+ return (dev_priv->fifos[channel]->file_priv == file_priv);
+}
+
+/***********************************
+ * ioctls wrapping the functions
+ ***********************************/
+
+static int nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_channel_alloc *init = data;
+ struct drm_map_list *entry;
+ struct nouveau_channel *chan;
+ struct mem_block *pushbuf;
+ int res;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+ if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
+ return -EINVAL;
+
+ pushbuf = nouveau_fifo_user_pushbuf_alloc(dev);
+ if (!pushbuf)
+ return -ENOMEM;
+
+ res = nouveau_fifo_alloc(dev, &chan, file_priv, pushbuf,
+ init->fb_ctxdma_handle,
+ init->tt_ctxdma_handle);
+ if (res)
+ return res;
+ init->channel = chan->id;
+ init->put_base = chan->pushbuf_base;
+
+ /* make the fifo available to user space */
+ /* first, the fifo control regs */
+ init->ctrl = dev_priv->mmio->offset + chan->user;
+ init->ctrl_size = chan->user_size;
+ res = drm_addmap(dev, init->ctrl, init->ctrl_size, _DRM_REGISTERS,
+ 0, &chan->regs);
+ if (res != 0)
+ return res;
+
+ entry = drm_find_matching_map(dev, chan->regs);
+ if (!entry)
+ return -EINVAL;
+ init->ctrl = entry->user_token;
+
+ /* pass back FIFO map info to the caller */
+ init->cmdbuf = chan->pushbuf_mem->map_handle;
+ init->cmdbuf_size = chan->pushbuf_mem->size;
+
+ /* and the notifier block */
+ init->notifier = chan->notifier_block->map_handle;
+ init->notifier_size = chan->notifier_block->size;
+
+ return 0;
+}
+
+static int nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_channel_free *cfree = data;
+ struct nouveau_channel *chan;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan);
+
+ nouveau_fifo_free(chan);
+ return 0;
+}
+
+/***********************************
+ * finally, the ioctl table
+ ***********************************/
+
+struct drm_ioctl_desc nouveau_ioctls[] = {
+ DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT, nouveau_ioctl_card_init, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_ALLOC, nouveau_ioctl_mem_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_FREE, nouveau_ioctl_mem_free, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_TILE, nouveau_ioctl_mem_tile, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_SUSPEND, nouveau_ioctl_suspend, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_RESUME, nouveau_ioctl_resume, DRM_AUTH),
+};
+
+int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
new file mode 100644
index 0000000..4f53a50
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
@@ -0,0 +1,68 @@
+/**
+ * \file mga_ioc32.c
+ *
+ * 32-bit ioctl compatibility routines for the MGA DRM.
+ *
+ * \author Dave Airlie <airlied@linux.ie> with code from patches by Egbert Eich
+ *
+ *
+ * Copyright (C) Paul Mackerras 2005
+ * Copyright (C) Egbert Eich 2003,2004
+ * Copyright (C) Dave Airlie 2005
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <linux/compat.h>
+
+#include "drmP.h"
+#include "drm.h"
+
+#include "nouveau_drm.h"
+
+/**
+ * Called whenever a 32-bit process running under a 64-bit kernel
+ * performs an ioctl on /dev/dri/card<n>.
+ *
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ */
+long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ unsigned int nr = DRM_IOCTL_NR(cmd);
+ drm_ioctl_compat_t *fn = NULL;
+ int ret;
+
+ if (nr < DRM_COMMAND_BASE)
+ return drm_compat_ioctl(filp, cmd, arg);
+
+ lock_kernel(); /* XXX for now */
+ if (fn != NULL)
+ ret = (*fn)(filp, cmd, arg);
+ else
+ ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
new file mode 100644
index 0000000..2a3d8a0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -0,0 +1,568 @@
+/*
+ * Copyright (C) 2006 Ben Skeggs.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ * Ben Skeggs <darktama@iinet.net.au>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_reg.h"
+#include "nouveau_swmthd.h"
+
+void
+nouveau_irq_preinstall(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /* Master disable */
+ NV_WRITE(NV03_PMC_INTR_EN_0, 0);
+}
+
+int
+nouveau_irq_postinstall(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /* Master enable */
+ NV_WRITE(NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
+
+ return 0;
+}
+
+void
+nouveau_irq_uninstall(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /* Master disable */
+ NV_WRITE(NV03_PMC_INTR_EN_0, 0);
+}
+
+static void
+nouveau_fifo_irq_handler(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->Engine;
+ uint32_t status, reassign;
+
+ reassign = NV_READ(NV03_PFIFO_CACHES) & 1;
+ while ((status = NV_READ(NV03_PFIFO_INTR_0))) {
+ uint32_t chid, get;
+
+ NV_WRITE(NV03_PFIFO_CACHES, 0);
+
+ chid = engine->fifo.channel_id(dev);
+ get = NV_READ(NV03_PFIFO_CACHE1_GET);
+
+ if (status & NV_PFIFO_INTR_CACHE_ERROR) {
+ uint32_t mthd, data;
+ int ptr;
+
+ ptr = get >> 2;
+ if (dev_priv->card_type < NV_40) {
+ mthd = NV_READ(NV04_PFIFO_CACHE1_METHOD(ptr));
+ data = NV_READ(NV04_PFIFO_CACHE1_DATA(ptr));
+ } else {
+ mthd = NV_READ(NV40_PFIFO_CACHE1_METHOD(ptr));
+ data = NV_READ(NV40_PFIFO_CACHE1_DATA(ptr));
+ }
+
+ DRM_INFO("PFIFO_CACHE_ERROR - "
+ "Ch %d/%d Mthd 0x%04x Data 0x%08x\n",
+ chid, (mthd >> 13) & 7, mthd & 0x1ffc, data);
+
+ NV_WRITE(NV03_PFIFO_CACHE1_GET, get + 4);
+ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 1);
+
+ status &= ~NV_PFIFO_INTR_CACHE_ERROR;
+ NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
+ }
+
+ if (status & NV_PFIFO_INTR_DMA_PUSHER) {
+ DRM_INFO("PFIFO_DMA_PUSHER - Ch %d\n", chid);
+
+ status &= ~NV_PFIFO_INTR_DMA_PUSHER;
+ NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_DMA_PUSHER);
+
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000);
+ if (NV_READ(NV04_PFIFO_CACHE1_DMA_PUT) != get)
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, get + 4);
+ }
+
+ if (status) {
+ DRM_INFO("Unhandled PFIFO_INTR - 0x%08x\n", status);
+ NV_WRITE(NV03_PFIFO_INTR_0, status);
+ NV_WRITE(NV03_PMC_INTR_EN_0, 0);
+ }
+
+ NV_WRITE(NV03_PFIFO_CACHES, reassign);
+ }
+
+ NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
+}
+
+struct nouveau_bitfield_names {
+ uint32_t mask;
+ const char * name;
+};
+
+static struct nouveau_bitfield_names nouveau_nstatus_names[] =
+{
+ { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
+ { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
+ { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
+ { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
+};
+
+static struct nouveau_bitfield_names nouveau_nstatus_names_nv10[] =
+{
+ { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
+ { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
+ { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
+ { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
+};
+
+static struct nouveau_bitfield_names nouveau_nsource_names[] =
+{
+ { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
+ { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
+ { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
+ { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
+ { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
+ { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
+ { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
+ { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
+ { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
+ { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
+ { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
+ { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
+ { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
+ { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
+ { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
+ { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
+ { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
+ { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
+ { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
+};
+
+static void
+nouveau_print_bitfield_names(uint32_t value,
+ const struct nouveau_bitfield_names *namelist,
+ const int namelist_len)
+{
+ int i;
+ for(i=0; i<namelist_len; ++i) {
+ uint32_t mask = namelist[i].mask;
+ if(value & mask) {
+ printk(" %s", namelist[i].name);
+ value &= ~mask;
+ }
+ }
+ if(value)
+ printk(" (unknown bits 0x%08x)", value);
+}
+
+static int
+nouveau_graph_chid_from_grctx(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t inst;
+ int i;
+
+ if (dev_priv->card_type < NV_40)
+ return dev_priv->Engine.fifo.channels;
+ else
+ if (dev_priv->card_type < NV_50)
+ inst = (NV_READ(0x40032c) & 0xfffff) << 4;
+ else
+ inst = NV_READ(0x40032c) & 0xfffff;
+
+ for (i = 0; i < dev_priv->Engine.fifo.channels; i++) {
+ struct nouveau_channel *chan = dev_priv->fifos[i];
+
+ if (!chan || !chan->ramin_grctx)
+ continue;
+
+ if (dev_priv->card_type < NV_50) {
+ if (inst == chan->ramin_grctx->instance)
+ break;
+ } else {
+ if (inst == INSTANCE_RD(chan->ramin_grctx->gpuobj, 0))
+ break;
+ }
+ }
+
+ return i;
+}
+
+static int
+nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->Engine;
+ int channel;
+
+ if (dev_priv->card_type < NV_10)
+ channel = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
+ else
+ if (dev_priv->card_type < NV_40)
+ channel = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
+ else
+ channel = nouveau_graph_chid_from_grctx(dev);
+
+ if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) {
+ DRM_ERROR("AIII, invalid/inactive channel id %d\n", channel);
+ return -EINVAL;
+ }
+
+ *channel_ret = channel;
+ return 0;
+}
+
+struct nouveau_pgraph_trap {
+ int channel;
+ int class;
+ int subc, mthd, size;
+ uint32_t data, data2;
+ uint32_t nsource, nstatus;
+};
+
+static void
+nouveau_graph_trap_info(struct drm_device *dev,
+ struct nouveau_pgraph_trap *trap)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t address;
+
+ trap->nsource = trap->nstatus = 0;
+ if (dev_priv->card_type < NV_50) {
+ trap->nsource = NV_READ(NV03_PGRAPH_NSOURCE);
+ trap->nstatus = NV_READ(NV03_PGRAPH_NSTATUS);
+ }
+
+ if (nouveau_graph_trapped_channel(dev, &trap->channel))
+ trap->channel = -1;
+ address = NV_READ(NV04_PGRAPH_TRAPPED_ADDR);
+
+ trap->mthd = address & 0x1FFC;
+ trap->data = NV_READ(NV04_PGRAPH_TRAPPED_DATA);
+ if (dev_priv->card_type < NV_10) {
+ trap->subc = (address >> 13) & 0x7;
+ } else {
+ trap->subc = (address >> 16) & 0x7;
+ trap->data2 = NV_READ(NV10_PGRAPH_TRAPPED_DATA_HIGH);
+ }
+
+ if (dev_priv->card_type < NV_10) {
+ trap->class = NV_READ(0x400180 + trap->subc*4) & 0xFF;
+ } else if (dev_priv->card_type < NV_40) {
+ trap->class = NV_READ(0x400160 + trap->subc*4) & 0xFFF;
+ } else if (dev_priv->card_type < NV_50) {
+ trap->class = NV_READ(0x400160 + trap->subc*4) & 0xFFFF;
+ } else {
+ trap->class = NV_READ(0x400814);
+ }
+}
+
+static void
+nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
+ struct nouveau_pgraph_trap *trap)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
+
+ DRM_INFO("%s - nSource:", id);
+ nouveau_print_bitfield_names(nsource, nouveau_nsource_names,
+ ARRAY_SIZE(nouveau_nsource_names));
+ printk(", nStatus:");
+ if (dev_priv->card_type < NV_10)
+ nouveau_print_bitfield_names(nstatus, nouveau_nstatus_names,
+ ARRAY_SIZE(nouveau_nstatus_names));
+ else
+ nouveau_print_bitfield_names(nstatus, nouveau_nstatus_names_nv10,
+ ARRAY_SIZE(nouveau_nstatus_names_nv10));
+ printk("\n");
+
+ DRM_INFO("%s - Ch %d/%d Class 0x%04x Mthd 0x%04x Data 0x%08x:0x%08x\n",
+ id, trap->channel, trap->subc, trap->class, trap->mthd,
+ trap->data2, trap->data);
+}
+
+static inline void
+nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
+{
+ struct nouveau_pgraph_trap trap;
+ int unhandled = 0;
+
+ nouveau_graph_trap_info(dev, &trap);
+
+ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+ /* NV4 (nvidia TNT 1) reports software methods with
+ * PGRAPH NOTIFY ILLEGAL_MTHD
+ */
+ DRM_DEBUG("Got NV04 software method method %x for class %#x\n",
+ trap.mthd, trap.class);
+
+ if (nouveau_sw_method_execute(dev, trap.class, trap.mthd)) {
+ DRM_ERROR("Unable to execute NV04 software method %x "
+ "for object class %x. Please report.\n",
+ trap.mthd, trap.class);
+ unhandled = 1;
+ }
+ } else {
+ unhandled = 1;
+ }
+
+ if (unhandled)
+ nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
+}
+
+static inline void
+nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
+{
+ struct nouveau_pgraph_trap trap;
+ int unhandled = 0;
+
+ nouveau_graph_trap_info(dev, &trap);
+ trap.nsource = nsource;
+
+ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+ if (trap.channel >= 0 && trap.mthd == 0x0150) {
+ nouveau_fence_handler(dev, trap.channel);
+ } else
+ if (nouveau_sw_method_execute(dev, trap.class, trap.mthd)) {
+ unhandled = 1;
+ }
+ } else {
+ unhandled = 1;
+ }
+
+ if (unhandled)
+ nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap);
+}
+
+static inline void
+nouveau_pgraph_intr_context_switch(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->Engine;
+ uint32_t chid;
+
+ chid = engine->fifo.channel_id(dev);
+ DRM_DEBUG("PGRAPH context switch interrupt channel %x\n", chid);
+
+ switch(dev_priv->card_type) {
+ case NV_04:
+ case NV_05:
+ nouveau_nv04_context_switch(dev);
+ break;
+ case NV_10:
+ case NV_11:
+ case NV_17:
+ nouveau_nv10_context_switch(dev);
+ break;
+ default:
+ DRM_ERROR("Context switch not implemented\n");
+ break;
+ }
+}
+
+static void
+nouveau_pgraph_irq_handler(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t status;
+
+ while ((status = NV_READ(NV03_PGRAPH_INTR))) {
+ uint32_t nsource = NV_READ(NV03_PGRAPH_NSOURCE);
+
+ if (status & NV_PGRAPH_INTR_NOTIFY) {
+ nouveau_pgraph_intr_notify(dev, nsource);
+
+ status &= ~NV_PGRAPH_INTR_NOTIFY;
+ NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
+ }
+
+ if (status & NV_PGRAPH_INTR_ERROR) {
+ nouveau_pgraph_intr_error(dev, nsource);
+
+ status &= ~NV_PGRAPH_INTR_ERROR;
+ NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR);
+ }
+
+ if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
+ nouveau_pgraph_intr_context_switch(dev);
+
+ status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+ NV_WRITE(NV03_PGRAPH_INTR,
+ NV_PGRAPH_INTR_CONTEXT_SWITCH);
+ }
+
+ if (status) {
+ DRM_INFO("Unhandled PGRAPH_INTR - 0x%08x\n", status);
+ NV_WRITE(NV03_PGRAPH_INTR, status);
+ }
+
+ if ((NV_READ(NV04_PGRAPH_FIFO) & (1 << 0)) == 0)
+ NV_WRITE(NV04_PGRAPH_FIFO, 1);
+ }
+
+ NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
+}
+
+static void
+nv50_pgraph_irq_handler(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t status;
+
+ status = NV_READ(NV03_PGRAPH_INTR);
+
+ if (status & 0x00000020) {
+ nouveau_pgraph_intr_error(dev,
+ NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
+
+ status &= ~0x00000020;
+ NV_WRITE(NV03_PGRAPH_INTR, 0x00000020);
+ }
+
+ if (status & 0x00100000) {
+ nouveau_pgraph_intr_error(dev,
+ NV03_PGRAPH_NSOURCE_DATA_ERROR);
+
+ status &= ~0x00100000;
+ NV_WRITE(NV03_PGRAPH_INTR, 0x00100000);
+ }
+
+ if (status & 0x00200000) {
+ nouveau_pgraph_intr_error(dev,
+ NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
+
+ status &= ~0x00200000;
+ NV_WRITE(NV03_PGRAPH_INTR, 0x00200000);
+ }
+
+ if (status) {
+ DRM_INFO("Unhandled PGRAPH_INTR - 0x%08x\n", status);
+ NV_WRITE(NV03_PGRAPH_INTR, status);
+ }
+
+ {
+ const int isb = (1 << 16) | (1 << 0);
+
+ if ((NV_READ(0x400500) & isb) != isb)
+ NV_WRITE(0x400500, NV_READ(0x400500) | isb);
+ }
+
+ NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
+}
+
+static void
+nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (crtc&1) {
+ NV_WRITE(NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
+ }
+
+ if (crtc&2) {
+ NV_WRITE(NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
+ }
+}
+
+static void
+nouveau_nv50_display_irq_handler(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t val = NV_READ(NV50_DISPLAY_SUPERVISOR);
+
+ DRM_INFO("NV50_DISPLAY_INTR - 0x%08X\n", val);
+
+ NV_WRITE(NV50_DISPLAY_SUPERVISOR, val);
+}
+
+static void
+nouveau_nv50_i2c_irq_handler(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ DRM_INFO("NV50_I2C_INTR - 0x%08X\n", NV_READ(NV50_I2C_CONTROLLER));
+
+ /* This seems to be the way to acknowledge an interrupt. */
+ NV_WRITE(NV50_I2C_CONTROLLER, 0x7FFF7FFF);
+}
+
+irqreturn_t
+nouveau_irq_handler(DRM_IRQ_ARGS)
+{
+ struct drm_device *dev = (struct drm_device*)arg;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t status;
+
+ status = NV_READ(NV03_PMC_INTR_0);
+ if (!status)
+ return IRQ_NONE;
+
+ if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
+ nouveau_fifo_irq_handler(dev);
+ status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
+ }
+
+ if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
+ if (dev_priv->card_type >= NV_50)
+ nv50_pgraph_irq_handler(dev);
+ else
+ nouveau_pgraph_irq_handler(dev);
+
+ status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
+ }
+
+ if (status & NV_PMC_INTR_0_CRTCn_PENDING) {
+ nouveau_crtc_irq_handler(dev, (status>>24)&3);
+ status &= ~NV_PMC_INTR_0_CRTCn_PENDING;
+ }
+
+ if (status & NV_PMC_INTR_0_NV50_DISPLAY_PENDING) {
+ nouveau_nv50_display_irq_handler(dev);
+ status &= ~NV_PMC_INTR_0_NV50_DISPLAY_PENDING;
+ }
+
+ if (status & NV_PMC_INTR_0_NV50_I2C_PENDING) {
+ nouveau_nv50_i2c_irq_handler(dev);
+ status &= ~NV_PMC_INTR_0_NV50_I2C_PENDING;
+ }
+
+ if (status)
+ DRM_ERROR("Unhandled PMC INTR status bits 0x%08x\n", status);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
new file mode 100644
index 0000000..5e7ac9e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -0,0 +1,868 @@
+/*
+ * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
+ * Copyright 2005 Stephane Marchesin
+ *
+ * The Weather Channel (TM) funded Tungsten Graphics to develop the
+ * initial release of the Radeon 8500 driver under the XFree86 license.
+ * This notice must be preserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Keith Whitwell <keith@tungstengraphics.com>
+ */
+
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_sarea.h"
+#include "nouveau_drv.h"
+
+static struct mem_block *
+split_block(struct mem_block *p, uint64_t start, uint64_t size,
+ struct drm_file *file_priv)
+{
+ /* Maybe cut off the start of an existing block */
+ if (start > p->start) {
+ struct mem_block *newblock =
+ drm_alloc(sizeof(*newblock), DRM_MEM_BUFS);
+ if (!newblock)
+ goto out;
+ newblock->start = start;
+ newblock->size = p->size - (start - p->start);
+ newblock->file_priv = NULL;
+ newblock->next = p->next;
+ newblock->prev = p;
+ p->next->prev = newblock;
+ p->next = newblock;
+ p->size -= newblock->size;
+ p = newblock;
+ }
+
+ /* Maybe cut off the end of an existing block */
+ if (size < p->size) {
+ struct mem_block *newblock =
+ drm_alloc(sizeof(*newblock), DRM_MEM_BUFS);
+ if (!newblock)
+ goto out;
+ newblock->start = start + size;
+ newblock->size = p->size - size;
+ newblock->file_priv = NULL;
+ newblock->next = p->next;
+ newblock->prev = p;
+ p->next->prev = newblock;
+ p->next = newblock;
+ p->size = size;
+ }
+
+out:
+ /* Our block is in the middle */
+ p->file_priv = file_priv;
+ return p;
+}
+
+struct mem_block *
+nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size,
+ int align2, struct drm_file *file_priv, int tail)
+{
+ struct mem_block *p;
+ uint64_t mask = (1 << align2) - 1;
+
+ if (!heap)
+ return NULL;
+
+ if (tail) {
+ list_for_each_prev(p, heap) {
+ uint64_t start = ((p->start + p->size) - size) & ~mask;
+
+ if (p->file_priv == 0 && start >= p->start &&
+ start + size <= p->start + p->size)
+ return split_block(p, start, size, file_priv);
+ }
+ } else {
+ list_for_each(p, heap) {
+ uint64_t start = (p->start + mask) & ~mask;
+
+ if (p->file_priv == 0 &&
+ start + size <= p->start + p->size)
+ return split_block(p, start, size, file_priv);
+ }
+ }
+
+ return NULL;
+}
+
+static struct mem_block *find_block(struct mem_block *heap, uint64_t start)
+{
+ struct mem_block *p;
+
+ list_for_each(p, heap)
+ if (p->start == start)
+ return p;
+
+ return NULL;
+}
+
+void nouveau_mem_free_block(struct mem_block *p)
+{
+ p->file_priv = NULL;
+
+ /* Assumes a single contiguous range. Needs a special file_priv in
+ * 'heap' to stop it being subsumed.
+ */
+ if (p->next->file_priv == 0) {
+ struct mem_block *q = p->next;
+ p->size += q->size;
+ p->next = q->next;
+ p->next->prev = p;
+ drm_free(q, sizeof(*q), DRM_MEM_BUFS);
+ }
+
+ if (p->prev->file_priv == 0) {
+ struct mem_block *q = p->prev;
+ q->size += p->size;
+ q->next = p->next;
+ q->next->prev = q;
+ drm_free(p, sizeof(*q), DRM_MEM_BUFS);
+ }
+}
+
+/* Initialize. How to check for an uninitialized heap?
+ */
+int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start,
+ uint64_t size)
+{
+ struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS);
+
+ if (!blocks)
+ return -ENOMEM;
+
+ *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS);
+ if (!*heap) {
+ drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS);
+ return -ENOMEM;
+ }
+
+ blocks->start = start;
+ blocks->size = size;
+ blocks->file_priv = NULL;
+ blocks->next = blocks->prev = *heap;
+
+ memset(*heap, 0, sizeof(**heap));
+ (*heap)->file_priv = (struct drm_file *) - 1;
+ (*heap)->next = (*heap)->prev = blocks;
+ return 0;
+}
+
+/*
+ * Free all blocks associated with the releasing file_priv
+ */
+void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap)
+{
+ struct mem_block *p;
+
+ if (!heap || !heap->next)
+ return;
+
+ list_for_each(p, heap) {
+ if (p->file_priv == file_priv)
+ p->file_priv = NULL;
+ }
+
+ /* Assumes a single contiguous range. Needs a special file_priv in
+ * 'heap' to stop it being subsumed.
+ */
+ list_for_each(p, heap) {
+ while ((p->file_priv == 0) && (p->next->file_priv == 0) &&
+ (p->next!=heap)) {
+ struct mem_block *q = p->next;
+ p->size += q->size;
+ p->next = q->next;
+ p->next->prev = p;
+ drm_free(q, sizeof(*q), DRM_MEM_DRIVER);
+ }
+ }
+}
+
+/*
+ * Cleanup everything
+ */
+void nouveau_mem_takedown(struct mem_block **heap)
+{
+ struct mem_block *p;
+
+ if (!*heap)
+ return;
+
+ for (p = (*heap)->next; p != *heap;) {
+ struct mem_block *q = p;
+ p = p->next;
+ drm_free(q, sizeof(*q), DRM_MEM_DRIVER);
+ }
+
+ drm_free(*heap, sizeof(**heap), DRM_MEM_DRIVER);
+ *heap = NULL;
+}
+
+void nouveau_mem_close(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ nouveau_mem_takedown(&dev_priv->agp_heap);
+ nouveau_mem_takedown(&dev_priv->fb_heap);
+ if (dev_priv->pci_heap)
+ nouveau_mem_takedown(&dev_priv->pci_heap);
+}
+
+/*XXX won't work on BSD because of pci_read_config_dword */
+static uint32_t
+nouveau_mem_fb_amount_igp(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct pci_dev *bridge;
+ uint32_t mem;
+
+ bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0,1));
+ if (!bridge) {
+ DRM_ERROR("no bridge device\n");
+ return 0;
+ }
+
+ if (dev_priv->flags&NV_NFORCE) {
+ pci_read_config_dword(bridge, 0x7C, &mem);
+ return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
+ } else
+ if(dev_priv->flags&NV_NFORCE2) {
+ pci_read_config_dword(bridge, 0x84, &mem);
+ return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
+ }
+
+ DRM_ERROR("impossible!\n");
+
+ return 0;
+}
+
+/* returns the amount of FB ram in bytes */
+uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv=dev->dev_private;
+ switch(dev_priv->card_type)
+ {
+ case NV_04:
+ case NV_05:
+ if (NV_READ(NV03_BOOT_0) & 0x00000100) {
+ return (((NV_READ(NV03_BOOT_0) >> 12) & 0xf)*2+2)*1024*1024;
+ } else
+ switch(NV_READ(NV03_BOOT_0)&NV03_BOOT_0_RAM_AMOUNT)
+ {
+ case NV04_BOOT_0_RAM_AMOUNT_32MB:
+ return 32*1024*1024;
+ case NV04_BOOT_0_RAM_AMOUNT_16MB:
+ return 16*1024*1024;
+ case NV04_BOOT_0_RAM_AMOUNT_8MB:
+ return 8*1024*1024;
+ case NV04_BOOT_0_RAM_AMOUNT_4MB:
+ return 4*1024*1024;
+ }
+ break;
+ case NV_10:
+ case NV_11:
+ case NV_17:
+ case NV_20:
+ case NV_30:
+ case NV_40:
+ case NV_44:
+ case NV_50:
+ default:
+ if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
+ return nouveau_mem_fb_amount_igp(dev);
+ } else {
+ uint64_t mem;
+
+ mem = (NV_READ(NV04_FIFO_DATA) &
+ NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >>
+ NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT;
+ return mem*1024*1024;
+ }
+ break;
+ }
+
+ DRM_ERROR("Unable to detect video ram size. Please report your setup to " DRIVER_EMAIL "\n");
+ return 0;
+}
+
+static void nouveau_mem_reset_agp(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t saved_pci_nv_1, saved_pci_nv_19, pmc_enable;
+
+ saved_pci_nv_1 = NV_READ(NV04_PBUS_PCI_NV_1);
+ saved_pci_nv_19 = NV_READ(NV04_PBUS_PCI_NV_19);
+
+ /* clear busmaster bit */
+ NV_WRITE(NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4);
+ /* clear SBA and AGP bits */
+ NV_WRITE(NV04_PBUS_PCI_NV_19, saved_pci_nv_19 & 0xfffff0ff);
+
+ /* power cycle pgraph, if enabled */
+ pmc_enable = NV_READ(NV03_PMC_ENABLE);
+ if (pmc_enable & NV_PMC_ENABLE_PGRAPH) {
+ NV_WRITE(NV03_PMC_ENABLE, pmc_enable & ~NV_PMC_ENABLE_PGRAPH);
+ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
+ NV_PMC_ENABLE_PGRAPH);
+ }
+
+ /* and restore (gives effect of resetting AGP) */
+ NV_WRITE(NV04_PBUS_PCI_NV_19, saved_pci_nv_19);
+ NV_WRITE(NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
+}
+
+static int
+nouveau_mem_init_agp(struct drm_device *dev, int ttm)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_agp_info info;
+ struct drm_agp_mode mode;
+ int ret;
+
+ nouveau_mem_reset_agp(dev);
+
+ ret = drm_agp_acquire(dev);
+ if (ret) {
+ DRM_ERROR("Unable to acquire AGP: %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_agp_info(dev, &info);
+ if (ret) {
+ DRM_ERROR("Unable to get AGP info: %d\n", ret);
+ return ret;
+ }
+
+ /* see agp.h for the AGPSTAT_* modes available */
+ mode.mode = info.mode;
+ ret = drm_agp_enable(dev, mode);
+ if (ret) {
+ DRM_ERROR("Unable to enable AGP: %d\n", ret);
+ return ret;
+ }
+
+ if (!ttm) {
+ struct drm_agp_buffer agp_req;
+ struct drm_agp_binding bind_req;
+
+ agp_req.size = info.aperture_size;
+ agp_req.type = 0;
+ ret = drm_agp_alloc(dev, &agp_req);
+ if (ret) {
+ DRM_ERROR("Unable to alloc AGP: %d\n", ret);
+ return ret;
+ }
+
+ bind_req.handle = agp_req.handle;
+ bind_req.offset = 0;
+ ret = drm_agp_bind(dev, &bind_req);
+ if (ret) {
+ DRM_ERROR("Unable to bind AGP: %d\n", ret);
+ return ret;
+ }
+ }
+
+ dev_priv->gart_info.type = NOUVEAU_GART_AGP;
+ dev_priv->gart_info.aper_base = info.aperture_base;
+ dev_priv->gart_info.aper_size = info.aperture_size;
+ return 0;
+}
+
+#define HACK_OLD_MM
+int
+nouveau_mem_init_ttm(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t vram_size, bar1_size;
+ int ret;
+
+ dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL;
+ dev_priv->fb_phys = drm_get_resource_start(dev,1);
+ dev_priv->gart_info.type = NOUVEAU_GART_NONE;
+
+ drm_bo_driver_init(dev);
+
+ /* non-mappable vram */
+ dev_priv->fb_available_size = nouveau_mem_fb_amount(dev);
+ dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
+ vram_size = dev_priv->fb_available_size >> PAGE_SHIFT;
+ bar1_size = drm_get_resource_len(dev, 1) >> PAGE_SHIFT;
+ if (bar1_size < vram_size) {
+ if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_PRIV0,
+ bar1_size, vram_size - bar1_size, 1))) {
+ DRM_ERROR("Failed PRIV0 mm init: %d\n", ret);
+ return ret;
+ }
+ vram_size = bar1_size;
+ }
+
+ /* mappable vram */
+#ifdef HACK_OLD_MM
+ vram_size /= 4;
+#endif
+ if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0, vram_size, 1))) {
+ DRM_ERROR("Failed VRAM mm init: %d\n", ret);
+ return ret;
+ }
+
+ /* GART */
+#if !defined(__powerpc__) && !defined(__ia64__)
+ if (drm_device_is_agp(dev) && dev->agp) {
+ if ((ret = nouveau_mem_init_agp(dev, 1)))
+ DRM_ERROR("Error initialising AGP: %d\n", ret);
+ }
+#endif
+
+ if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
+ if ((ret = nouveau_sgdma_init(dev)))
+ DRM_ERROR("Error initialising PCI SGDMA: %d\n", ret);
+ }
+
+ if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_TT, 0,
+ dev_priv->gart_info.aper_size >>
+ PAGE_SHIFT, 1))) {
+ DRM_ERROR("Failed TT mm init: %d\n", ret);
+ return ret;
+ }
+
+#ifdef HACK_OLD_MM
+ vram_size <<= PAGE_SHIFT;
+ DRM_INFO("Old MM using %dKiB VRAM\n", (vram_size * 3) >> 10);
+ if (nouveau_mem_init_heap(&dev_priv->fb_heap, vram_size, vram_size * 3))
+ return -ENOMEM;
+#endif
+
+ return 0;
+}
+
+int nouveau_mem_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t fb_size;
+ int ret = 0;
+
+ dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL;
+ dev_priv->fb_phys = 0;
+ dev_priv->gart_info.type = NOUVEAU_GART_NONE;
+
+ /* setup a mtrr over the FB */
+ dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1),
+ nouveau_mem_fb_amount(dev),
+ DRM_MTRR_WC);
+
+ /* Init FB */
+ dev_priv->fb_phys=drm_get_resource_start(dev,1);
+ fb_size = nouveau_mem_fb_amount(dev);
+ /* On G80, limit VRAM to 512MiB temporarily due to limits in how
+ * we handle VRAM page tables.
+ */
+ if (dev_priv->card_type >= NV_50 && fb_size > (512 * 1024 * 1024))
+ fb_size = (512 * 1024 * 1024);
+ /* On at least NV40, RAMIN is actually at the end of vram.
+ * We don't want to allocate this... */
+ if (dev_priv->card_type >= NV_40)
+ fb_size -= dev_priv->ramin_rsvd_vram;
+ dev_priv->fb_available_size = fb_size;
+ DRM_DEBUG("Available VRAM: %dKiB\n", fb_size>>10);
+
+ if (fb_size>256*1024*1024) {
+ /* On cards with > 256Mb, you can't map everything.
+ * So we create a second FB heap for that type of memory */
+ if (nouveau_mem_init_heap(&dev_priv->fb_heap,
+ 0, 256*1024*1024))
+ return -ENOMEM;
+ if (nouveau_mem_init_heap(&dev_priv->fb_nomap_heap,
+ 256*1024*1024, fb_size-256*1024*1024))
+ return -ENOMEM;
+ } else {
+ if (nouveau_mem_init_heap(&dev_priv->fb_heap, 0, fb_size))
+ return -ENOMEM;
+ dev_priv->fb_nomap_heap=NULL;
+ }
+
+#if !defined(__powerpc__) && !defined(__ia64__)
+ /* Init AGP / NV50 PCIEGART */
+ if (drm_device_is_agp(dev) && dev->agp) {
+ if ((ret = nouveau_mem_init_agp(dev, 0)))
+ DRM_ERROR("Error initialising AGP: %d\n", ret);
+ }
+#endif
+
+ /*Note: this is *not* just NV50 code, but only used on NV50 for now */
+ if (dev_priv->gart_info.type == NOUVEAU_GART_NONE &&
+ dev_priv->card_type >= NV_50) {
+ ret = nouveau_sgdma_init(dev);
+ if (!ret) {
+ ret = nouveau_sgdma_nottm_hack_init(dev);
+ if (ret)
+ nouveau_sgdma_takedown(dev);
+ }
+
+ if (ret)
+ DRM_ERROR("Error initialising SG DMA: %d\n", ret);
+ }
+
+ if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
+ if (nouveau_mem_init_heap(&dev_priv->agp_heap,
+ 0, dev_priv->gart_info.aper_size)) {
+ if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
+ nouveau_sgdma_nottm_hack_takedown(dev);
+ nouveau_sgdma_takedown(dev);
+ }
+ }
+ }
+
+ /* NV04-NV40 PCIEGART */
+ if (!dev_priv->agp_heap && dev_priv->card_type < NV_50) {
+ struct drm_scatter_gather sgreq;
+
+ DRM_DEBUG("Allocating sg memory for PCI DMA\n");
+ sgreq.size = 16 << 20; //16MB of PCI scatter-gather zone
+
+ if (drm_sg_alloc(dev, &sgreq)) {
+ DRM_ERROR("Unable to allocate %ldMB of scatter-gather"
+ " pages for PCI DMA!",sgreq.size>>20);
+ } else {
+ if (nouveau_mem_init_heap(&dev_priv->pci_heap, 0,
+ dev->sg->pages * PAGE_SIZE)) {
+ DRM_ERROR("Unable to initialize pci_heap!");
+ }
+ }
+ }
+
+ /* G8x: Allocate shared page table to map real VRAM pages into */
+ if (dev_priv->card_type >= NV_50) {
+ unsigned size = ((512 * 1024 * 1024) / 65536) * 8;
+
+ ret = nouveau_gpuobj_new(dev, NULL, size, 0,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ALLOW_NO_REFS,
+ &dev_priv->vm_vram_pt);
+ if (ret) {
+ DRM_ERROR("Error creating VRAM page table: %d\n", ret);
+ return ret;
+ }
+ }
+
+
+ return 0;
+}
+
+struct mem_block *
+nouveau_mem_alloc(struct drm_device *dev, int alignment, uint64_t size,
+ int flags, struct drm_file *file_priv)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct mem_block *block;
+ int type, tail = !(flags & NOUVEAU_MEM_USER);
+
+ /*
+ * Make things easier on ourselves: all allocations are page-aligned.
+ * We need that to map allocated regions into the user space
+ */
+ if (alignment < PAGE_SHIFT)
+ alignment = PAGE_SHIFT;
+
+ /* Align allocation sizes to 64KiB blocks on G8x. We use a 64KiB
+ * page size in the GPU VM.
+ */
+ if (flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50) {
+ size = (size + 65535) & ~65535;
+ if (alignment < 16)
+ alignment = 16;
+ }
+
+ /*
+ * Warn about 0 sized allocations, but let it go through. It'll return 1 page
+ */
+ if (size == 0)
+ DRM_INFO("warning : 0 byte allocation\n");
+
+ /*
+ * Keep alloc size a multiple of the page size to keep drm_addmap() happy
+ */
+ if (size & (~PAGE_MASK))
+ size = ((size/PAGE_SIZE) + 1) * PAGE_SIZE;
+
+
+#define NOUVEAU_MEM_ALLOC_AGP {\
+ type=NOUVEAU_MEM_AGP;\
+ block = nouveau_mem_alloc_block(dev_priv->agp_heap, size,\
+ alignment, file_priv, tail); \
+ if (block) goto alloc_ok;\
+ }
+
+#define NOUVEAU_MEM_ALLOC_PCI {\
+ type = NOUVEAU_MEM_PCI;\
+ block = nouveau_mem_alloc_block(dev_priv->pci_heap, size, \
+ alignment, file_priv, tail); \
+ if ( block ) goto alloc_ok;\
+ }
+
+#define NOUVEAU_MEM_ALLOC_FB {\
+ type=NOUVEAU_MEM_FB;\
+ if (!(flags&NOUVEAU_MEM_MAPPED)) {\
+ block = nouveau_mem_alloc_block(dev_priv->fb_nomap_heap,\
+ size, alignment, \
+ file_priv, tail); \
+ if (block) goto alloc_ok;\
+ }\
+ block = nouveau_mem_alloc_block(dev_priv->fb_heap, size,\
+ alignment, file_priv, tail);\
+ if (block) goto alloc_ok;\
+ }
+
+
+ if (flags&NOUVEAU_MEM_FB) NOUVEAU_MEM_ALLOC_FB
+ if (flags&NOUVEAU_MEM_AGP) NOUVEAU_MEM_ALLOC_AGP
+ if (flags&NOUVEAU_MEM_PCI) NOUVEAU_MEM_ALLOC_PCI
+ if (flags&NOUVEAU_MEM_FB_ACCEPTABLE) NOUVEAU_MEM_ALLOC_FB
+ if (flags&NOUVEAU_MEM_AGP_ACCEPTABLE) NOUVEAU_MEM_ALLOC_AGP
+ if (flags&NOUVEAU_MEM_PCI_ACCEPTABLE) NOUVEAU_MEM_ALLOC_PCI
+
+
+ return NULL;
+
+alloc_ok:
+ block->flags=type;
+
+ /* On G8x, map memory into VM */
+ if (block->flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50 &&
+ !(flags & NOUVEAU_MEM_NOVM)) {
+ struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt;
+ unsigned offset = block->start;
+ unsigned count = block->size / 65536;
+ unsigned tile = 0;
+
+ if (!pt) {
+ DRM_ERROR("vm alloc without vm pt\n");
+ nouveau_mem_free_block(block);
+ return NULL;
+ }
+
+ /* The tiling stuff is *not* what NVIDIA does - but both the
+ * 2D and 3D engines seem happy with this simpler method.
+ * Should look into why NVIDIA do what they do at some point.
+ */
+ if (flags & NOUVEAU_MEM_TILE) {
+ if (flags & NOUVEAU_MEM_TILE_ZETA)
+ tile = 0x00002800;
+ else
+ tile = 0x00007000;
+ }
+
+ while (count--) {
+ unsigned pte = offset / 65536;
+
+ INSTANCE_WR(pt, (pte * 2) + 0, offset | 1);
+ INSTANCE_WR(pt, (pte * 2) + 1, 0x00000000 | tile);
+ offset += 65536;
+ }
+ } else {
+ block->flags |= NOUVEAU_MEM_NOVM;
+ }
+
+ if (flags&NOUVEAU_MEM_MAPPED)
+ {
+ struct drm_map_list *entry;
+ int ret = 0;
+ block->flags|=NOUVEAU_MEM_MAPPED;
+
+ if (type == NOUVEAU_MEM_AGP) {
+ if (dev_priv->gart_info.type != NOUVEAU_GART_SGDMA)
+ ret = drm_addmap(dev, block->start, block->size,
+ _DRM_AGP, 0, &block->map);
+ else
+ ret = drm_addmap(dev, block->start, block->size,
+ _DRM_SCATTER_GATHER, 0, &block->map);
+ }
+ else if (type == NOUVEAU_MEM_FB)
+ ret = drm_addmap(dev, block->start + dev_priv->fb_phys,
+ block->size, _DRM_FRAME_BUFFER,
+ 0, &block->map);
+ else if (type == NOUVEAU_MEM_PCI)
+ ret = drm_addmap(dev, block->start, block->size,
+ _DRM_SCATTER_GATHER, 0, &block->map);
+
+ if (ret) {
+ nouveau_mem_free_block(block);
+ return NULL;
+ }
+
+ entry = drm_find_matching_map(dev, block->map);
+ if (!entry) {
+ nouveau_mem_free_block(block);
+ return NULL;
+ }
+ block->map_handle = entry->user_token;
+ }
+
+ DRM_DEBUG("allocated %lld bytes at 0x%llx type=0x%08x\n", block->size, block->start, block->flags);
+ return block;
+}
+
+void nouveau_mem_free(struct drm_device* dev, struct mem_block* block)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG("freeing 0x%llx type=0x%08x\n", block->start, block->flags);
+
+ if (block->flags&NOUVEAU_MEM_MAPPED)
+ drm_rmmap(dev, block->map);
+
+ /* G8x: Remove pages from vm */
+ if (block->flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50 &&
+ !(block->flags & NOUVEAU_MEM_NOVM)) {
+ struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt;
+ unsigned offset = block->start;
+ unsigned count = block->size / 65536;
+
+ if (!pt) {
+ DRM_ERROR("vm free without vm pt\n");
+ goto out_free;
+ }
+
+ while (count--) {
+ unsigned pte = offset / 65536;
+ INSTANCE_WR(pt, (pte * 2) + 0, 0);
+ INSTANCE_WR(pt, (pte * 2) + 1, 0);
+ offset += 65536;
+ }
+ }
+
+out_free:
+ nouveau_mem_free_block(block);
+}
+
+/*
+ * Ioctls
+ */
+
+int
+nouveau_ioctl_mem_alloc(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_mem_alloc *alloc = data;
+ struct mem_block *block;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+ if (alloc->flags & NOUVEAU_MEM_INTERNAL)
+ return -EINVAL;
+
+ block = nouveau_mem_alloc(dev, alloc->alignment, alloc->size,
+ alloc->flags | NOUVEAU_MEM_USER, file_priv);
+ if (!block)
+ return -ENOMEM;
+ alloc->map_handle=block->map_handle;
+ alloc->offset=block->start;
+ alloc->flags=block->flags;
+
+ if (dev_priv->card_type >= NV_50 && alloc->flags & NOUVEAU_MEM_FB)
+ alloc->offset += 512*1024*1024;
+
+ return 0;
+}
+
+int
+nouveau_ioctl_mem_free(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_mem_free *memfree = data;
+ struct mem_block *block;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+ if (dev_priv->card_type >= NV_50 && memfree->flags & NOUVEAU_MEM_FB)
+ memfree->offset -= 512*1024*1024;
+
+ block=NULL;
+ if (memfree->flags & NOUVEAU_MEM_FB)
+ block = find_block(dev_priv->fb_heap, memfree->offset);
+ else if (memfree->flags & NOUVEAU_MEM_AGP)
+ block = find_block(dev_priv->agp_heap, memfree->offset);
+ else if (memfree->flags & NOUVEAU_MEM_PCI)
+ block = find_block(dev_priv->pci_heap, memfree->offset);
+ if (!block)
+ return -EFAULT;
+ if (block->file_priv != file_priv)
+ return -EPERM;
+
+ nouveau_mem_free(dev, block);
+ return 0;
+}
+
+int
+nouveau_ioctl_mem_tile(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_mem_tile *memtile = data;
+ struct mem_block *block = NULL;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+ if (dev_priv->card_type < NV_50)
+ return -EINVAL;
+
+ if (memtile->flags & NOUVEAU_MEM_FB) {
+ memtile->offset -= 512*1024*1024;
+ block = find_block(dev_priv->fb_heap, memtile->offset);
+ }
+
+ if (!block)
+ return -EINVAL;
+
+ if (block->file_priv != file_priv)
+ return -EPERM;
+
+ {
+ struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt;
+ unsigned offset = block->start + memtile->delta;
+ unsigned count = memtile->size / 65536;
+ unsigned tile = 0;
+
+ if (memtile->flags & NOUVEAU_MEM_TILE) {
+ if (memtile->flags & NOUVEAU_MEM_TILE_ZETA)
+ tile = 0x00002800;
+ else
+ tile = 0x00007000;
+ }
+
+ while (count--) {
+ unsigned pte = offset / 65536;
+
+ INSTANCE_WR(pt, (pte * 2) + 0, offset | 1);
+ INSTANCE_WR(pt, (pte * 2) + 1, 0x00000000 | tile);
+ offset += 65536;
+ }
+ }
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
new file mode 100644
index 0000000..edece4d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+int
+nouveau_notifier_init_channel(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ int flags, ret;
+
+ flags = (NOUVEAU_MEM_PCI | NOUVEAU_MEM_MAPPED |
+ NOUVEAU_MEM_FB_ACCEPTABLE);
+
+ chan->notifier_block = nouveau_mem_alloc(dev, 0, PAGE_SIZE, flags,
+ (struct drm_file *)-2);
+ if (!chan->notifier_block)
+ return -ENOMEM;
+ DRM_DEBUG("Allocated notifier block in 0x%08x\n",
+ chan->notifier_block->flags);
+
+ ret = nouveau_mem_init_heap(&chan->notifier_heap,
+ 0, chan->notifier_block->size);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void
+nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+
+ if (chan->notifier_block) {
+ nouveau_mem_free(dev, chan->notifier_block);
+ chan->notifier_block = NULL;
+ }
+
+ nouveau_mem_takedown(&chan->notifier_heap);
+}
+
+static void
+nouveau_notifier_gpuobj_dtor(struct drm_device *dev,
+ struct nouveau_gpuobj *gpuobj)
+{
+ DRM_DEBUG("\n");
+
+ if (gpuobj->priv)
+ nouveau_mem_free_block(gpuobj->priv);
+}
+
+int
+nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
+ int count, uint32_t *b_offset)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *nobj = NULL;
+ struct mem_block *mem;
+ uint32_t offset;
+ int target, ret;
+
+ if (!chan->notifier_heap) {
+ DRM_ERROR("Channel %d doesn't have a notifier heap!\n",
+ chan->id);
+ return -EINVAL;
+ }
+
+ mem = nouveau_mem_alloc_block(chan->notifier_heap, count*32, 0,
+ (struct drm_file *)-2, 0);
+ if (!mem) {
+ DRM_ERROR("Channel %d notifier block full\n", chan->id);
+ return -ENOMEM;
+ }
+ mem->flags = NOUVEAU_MEM_NOTIFIER;
+
+ offset = chan->notifier_block->start;
+ if (chan->notifier_block->flags & NOUVEAU_MEM_FB) {
+ target = NV_DMA_TARGET_VIDMEM;
+ } else
+ if (chan->notifier_block->flags & NOUVEAU_MEM_AGP) {
+ if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA &&
+ dev_priv->card_type < NV_50) {
+ ret = nouveau_sgdma_get_page(dev, offset, &offset);
+ if (ret)
+ return ret;
+ target = NV_DMA_TARGET_PCI;
+ } else {
+ target = NV_DMA_TARGET_AGP;
+ }
+ } else
+ if (chan->notifier_block->flags & NOUVEAU_MEM_PCI) {
+ target = NV_DMA_TARGET_PCI_NONLINEAR;
+ } else {
+ DRM_ERROR("Bad DMA target, flags 0x%08x!\n",
+ chan->notifier_block->flags);
+ return -EINVAL;
+ }
+ offset += mem->start;
+
+ if ((ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ offset, mem->size,
+ NV_DMA_ACCESS_RW, target, &nobj))) {
+ nouveau_mem_free_block(mem);
+ DRM_ERROR("Error creating notifier ctxdma: %d\n", ret);
+ return ret;
+ }
+ nobj->dtor = nouveau_notifier_gpuobj_dtor;
+ nobj->priv = mem;
+
+ if ((ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL))) {
+ nouveau_gpuobj_del(dev, &nobj);
+ nouveau_mem_free_block(mem);
+ DRM_ERROR("Error referencing notifier ctxdma: %d\n", ret);
+ return ret;
+ }
+
+ *b_offset = mem->start;
+ return 0;
+}
+
+int
+nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_notifierobj_alloc *na = data;
+ struct nouveau_channel *chan;
+ int ret;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan);
+
+ ret = nouveau_notifier_alloc(chan, na->handle, na->count, &na->offset);
+ if (ret)
+ return ret;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
new file mode 100644
index 0000000..ea2ed5a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -0,0 +1,1173 @@
+/*
+ * Copyright (C) 2006 Ben Skeggs.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ * Ben Skeggs <darktama@iinet.net.au>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+/* NVidia uses context objects to drive drawing operations.
+
+ Context objects can be selected into 8 subchannels in the FIFO,
+ and then used via DMA command buffers.
+
+ A context object is referenced by a user defined handle (CARD32). The HW
+ looks up graphics objects in a hash table in the instance RAM.
+
+ An entry in the hash table consists of 2 CARD32. The first CARD32 contains
+ the handle, the second one a bitfield, that contains the address of the
+ object in instance RAM.
+
+ The format of the second CARD32 seems to be:
+
+ NV4 to NV30:
+
+ 15: 0 instance_addr >> 4
+ 17:16 engine (here uses 1 = graphics)
+ 28:24 channel id (here uses 0)
+ 31 valid (use 1)
+
+ NV40:
+
+ 15: 0 instance_addr >> 4 (maybe 19-0)
+ 21:20 engine (here uses 1 = graphics)
+ I'm unsure about the other bits, but using 0 seems to work.
+
+ The key into the hash table depends on the object handle and channel id and
+ is given as:
+*/
+static uint32_t
+nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle)
+{
+ struct drm_nouveau_private *dev_priv=dev->dev_private;
+ uint32_t hash = 0;
+ int i;
+
+ DRM_DEBUG("ch%d handle=0x%08x\n", channel, handle);
+
+ for (i=32;i>0;i-=dev_priv->ramht_bits) {
+ hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1));
+ handle >>= dev_priv->ramht_bits;
+ }
+ if (dev_priv->card_type < NV_50)
+ hash ^= channel << (dev_priv->ramht_bits - 4);
+ hash <<= 3;
+
+ DRM_DEBUG("hash=0x%08x\n", hash);
+ return hash;
+}
+
+static int
+nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
+ uint32_t offset)
+{
+ struct drm_nouveau_private *dev_priv=dev->dev_private;
+ uint32_t ctx = INSTANCE_RD(ramht, (offset + 4)/4);
+
+ if (dev_priv->card_type < NV_40)
+ return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
+ return (ctx != 0);
+}
+
+static int
+nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
+{
+ struct drm_nouveau_private *dev_priv=dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->fifos[ref->channel];
+ struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
+ struct nouveau_gpuobj *gpuobj = ref->gpuobj;
+ uint32_t ctx, co, ho;
+
+ if (!ramht) {
+ DRM_ERROR("No hash table!\n");
+ return -EINVAL;
+ }
+
+ if (dev_priv->card_type < NV_40) {
+ ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) |
+ (ref->channel << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
+ (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
+ } else
+ if (dev_priv->card_type < NV_50) {
+ ctx = (ref->instance >> 4) |
+ (ref->channel << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
+ (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
+ } else {
+ ctx = (ref->instance >> 4) |
+ (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
+ }
+
+ co = ho = nouveau_ramht_hash_handle(dev, ref->channel, ref->handle);
+ do {
+ if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
+ DRM_DEBUG("insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
+ ref->channel, co, ref->handle, ctx);
+ INSTANCE_WR(ramht, (co + 0)/4, ref->handle);
+ INSTANCE_WR(ramht, (co + 4)/4, ctx);
+
+ list_add_tail(&ref->list, &chan->ramht_refs);
+ return 0;
+ }
+ DRM_DEBUG("collision ch%d 0x%08x: h=0x%08x\n",
+ ref->channel, co, INSTANCE_RD(ramht, co/4));
+
+ co += 8;
+ if (co >= dev_priv->ramht_size) {
+ DRM_INFO("no space left after collision\n");
+ co = 0;
+ /* exit as it seems to cause crash with nouveau_demo and
+ * 0xdead0001 object */
+ break;
+ }
+ } while (co != ho);
+
+ DRM_ERROR("RAMHT space exhausted. ch=%d\n", ref->channel);
+ return -ENOMEM;
+}
+
+static void
+nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->fifos[ref->channel];
+ struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
+ uint32_t co, ho;
+
+ if (!ramht) {
+ DRM_ERROR("No hash table!\n");
+ return;
+ }
+
+ co = ho = nouveau_ramht_hash_handle(dev, ref->channel, ref->handle);
+ do {
+ if (nouveau_ramht_entry_valid(dev, ramht, co) &&
+ (ref->handle == INSTANCE_RD(ramht, (co/4)))) {
+ DRM_DEBUG("remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
+ ref->channel, co, ref->handle,
+ INSTANCE_RD(ramht, (co + 4)));
+ INSTANCE_WR(ramht, (co + 0)/4, 0x00000000);
+ INSTANCE_WR(ramht, (co + 4)/4, 0x00000000);
+
+ list_del(&ref->list);
+ return;
+ }
+
+ co += 8;
+ if (co >= dev_priv->ramht_size)
+ co = 0;
+ } while (co != ho);
+
+ DRM_ERROR("RAMHT entry not found. ch=%d, handle=0x%08x\n",
+ ref->channel, ref->handle);
+}
+
+int
+nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
+ int size, int align, uint32_t flags,
+ struct nouveau_gpuobj **gpuobj_ret)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->Engine;
+ struct nouveau_gpuobj *gpuobj;
+ struct mem_block *pramin = NULL;
+ int ret;
+
+ DRM_DEBUG("ch%d size=%d align=%d flags=0x%08x\n",
+ chan ? chan->id : -1, size, align, flags);
+
+ if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
+ return -EINVAL;
+
+ gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER);
+ if (!gpuobj)
+ return -ENOMEM;
+ DRM_DEBUG("gpuobj %p\n", gpuobj);
+ gpuobj->flags = flags;
+ gpuobj->im_channel = chan ? chan->id : -1;
+
+ list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
+
+ /* Choose between global instmem heap, and per-channel private
+ * instmem heap. On <NV50 allow requests for private instmem
+ * to be satisfied from global heap if no per-channel area
+ * available.
+ */
+ if (chan) {
+ if (chan->ramin_heap) {
+ DRM_DEBUG("private heap\n");
+ pramin = chan->ramin_heap;
+ } else
+ if (dev_priv->card_type < NV_50) {
+ DRM_DEBUG("global heap fallback\n");
+ pramin = dev_priv->ramin_heap;
+ }
+ } else {
+ DRM_DEBUG("global heap\n");
+ pramin = dev_priv->ramin_heap;
+ }
+
+ if (!pramin) {
+ DRM_ERROR("No PRAMIN heap!\n");
+ return -EINVAL;
+ }
+
+ if (!chan && (ret = engine->instmem.populate(dev, gpuobj, &size))) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return ret;
+ }
+
+ /* Allocate a chunk of the PRAMIN aperture */
+ gpuobj->im_pramin = nouveau_mem_alloc_block(pramin, size,
+ drm_order(align),
+ (struct drm_file *)-2, 0);
+ if (!gpuobj->im_pramin) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return -ENOMEM;
+ }
+ gpuobj->im_pramin->flags = NOUVEAU_MEM_INSTANCE;
+
+ if (!chan && (ret = engine->instmem.bind(dev, gpuobj))) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return ret;
+ }
+
+ if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
+ int i;
+
+ for (i = 0; i < gpuobj->im_pramin->size; i += 4)
+ INSTANCE_WR(gpuobj, i/4, 0);
+ }
+
+ *gpuobj_ret = gpuobj;
+ return 0;
+}
+
+int
+nouveau_gpuobj_early_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG("\n");
+
+ INIT_LIST_HEAD(&dev_priv->gpuobj_list);
+
+ return 0;
+}
+
+int
+nouveau_gpuobj_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ DRM_DEBUG("\n");
+
+ if (dev_priv->card_type < NV_50) {
+ if ((ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramht_offset,
+ ~0, dev_priv->ramht_size,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ALLOW_NO_REFS,
+ &dev_priv->ramht, NULL)))
+ return ret;
+ }
+
+ return 0;
+}
+
+void
+nouveau_gpuobj_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG("\n");
+
+ nouveau_gpuobj_del(dev, &dev_priv->ramht);
+}
+
+void
+nouveau_gpuobj_late_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gpuobj = NULL;
+ struct list_head *entry, *tmp;
+
+ DRM_DEBUG("\n");
+
+ list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) {
+ gpuobj = list_entry(entry, struct nouveau_gpuobj, list);
+
+ DRM_ERROR("gpuobj %p still exists at takedown, refs=%d\n",
+ gpuobj, gpuobj->refcount);
+ gpuobj->refcount = 0;
+ nouveau_gpuobj_del(dev, &gpuobj);
+ }
+}
+
+int
+nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->Engine;
+ struct nouveau_gpuobj *gpuobj;
+
+ DRM_DEBUG("gpuobj %p\n", pgpuobj ? *pgpuobj : NULL);
+
+ if (!dev_priv || !pgpuobj || !(*pgpuobj))
+ return -EINVAL;
+ gpuobj = *pgpuobj;
+
+ if (gpuobj->refcount != 0) {
+ DRM_ERROR("gpuobj refcount is %d\n", gpuobj->refcount);
+ return -EINVAL;
+ }
+
+ if (gpuobj->dtor)
+ gpuobj->dtor(dev, gpuobj);
+
+ if (gpuobj->im_backing) {
+ if (gpuobj->flags & NVOBJ_FLAG_FAKE)
+ drm_free(gpuobj->im_backing,
+ sizeof(*gpuobj->im_backing), DRM_MEM_DRIVER);
+ else
+ engine->instmem.clear(dev, gpuobj);
+ }
+
+ if (gpuobj->im_pramin) {
+ if (gpuobj->flags & NVOBJ_FLAG_FAKE)
+ drm_free(gpuobj->im_pramin, sizeof(*gpuobj->im_pramin),
+ DRM_MEM_DRIVER);
+ else
+ nouveau_mem_free_block(gpuobj->im_pramin);
+ }
+
+ list_del(&gpuobj->list);
+
+ *pgpuobj = NULL;
+ drm_free(gpuobj, sizeof(*gpuobj), DRM_MEM_DRIVER);
+ return 0;
+}
+
+static int
+nouveau_gpuobj_instance_get(struct drm_device *dev,
+ struct nouveau_channel *chan,
+ struct nouveau_gpuobj *gpuobj, uint32_t *inst)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *cpramin;
+
+ /* <NV50 use PRAMIN address everywhere */
+ if (dev_priv->card_type < NV_50) {
+ *inst = gpuobj->im_pramin->start;
+ return 0;
+ }
+
+ if (chan && gpuobj->im_channel != chan->id) {
+ DRM_ERROR("Channel mismatch: obj %d, ref %d\n",
+ gpuobj->im_channel, chan->id);
+ return -EINVAL;
+ }
+
+ /* NV50 channel-local instance */
+ if (chan > 0) {
+ cpramin = chan->ramin->gpuobj;
+ *inst = gpuobj->im_pramin->start - cpramin->im_pramin->start;
+ return 0;
+ }
+
+ /* NV50 global (VRAM) instance */
+ if (gpuobj->im_channel < 0) {
+ /* ...from global heap */
+ if (!gpuobj->im_backing) {
+ DRM_ERROR("AII, no VRAM backing gpuobj\n");
+ return -EINVAL;
+ }
+ *inst = gpuobj->im_backing->start;
+ return 0;
+ } else {
+ /* ...from local heap */
+ cpramin = dev_priv->fifos[gpuobj->im_channel]->ramin->gpuobj;
+ *inst = cpramin->im_backing->start +
+ (gpuobj->im_pramin->start - cpramin->im_pramin->start);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+int
+nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan,
+ uint32_t handle, struct nouveau_gpuobj *gpuobj,
+ struct nouveau_gpuobj_ref **ref_ret)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj_ref *ref;
+ uint32_t instance;
+ int ret;
+
+ DRM_DEBUG("ch%d h=0x%08x gpuobj=%p\n",
+ chan ? chan->id : -1, handle, gpuobj);
+
+ if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL))
+ return -EINVAL;
+
+ if (!chan && !ref_ret)
+ return -EINVAL;
+
+ ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance);
+ if (ret)
+ return ret;
+
+ ref = drm_calloc(1, sizeof(*ref), DRM_MEM_DRIVER);
+ if (!ref)
+ return -ENOMEM;
+ ref->gpuobj = gpuobj;
+ ref->channel = chan ? chan->id : -1;
+ ref->instance = instance;
+
+ if (!ref_ret) {
+ ref->handle = handle;
+
+ ret = nouveau_ramht_insert(dev, ref);
+ if (ret) {
+ drm_free(ref, sizeof(*ref), DRM_MEM_DRIVER);
+ return ret;
+ }
+ } else {
+ ref->handle = ~0;
+ *ref_ret = ref;
+ }
+
+ ref->gpuobj->refcount++;
+ return 0;
+}
+
+int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **pref)
+{
+ struct nouveau_gpuobj_ref *ref;
+
+ DRM_DEBUG("ref %p\n", pref ? *pref : NULL);
+
+ if (!dev || !pref || *pref == NULL)
+ return -EINVAL;
+ ref = *pref;
+
+ if (ref->handle != ~0)
+ nouveau_ramht_remove(dev, ref);
+
+ if (ref->gpuobj) {
+ ref->gpuobj->refcount--;
+
+ if (ref->gpuobj->refcount == 0) {
+ if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS))
+ nouveau_gpuobj_del(dev, &ref->gpuobj);
+ }
+ }
+
+ *pref = NULL;
+ drm_free(ref, sizeof(ref), DRM_MEM_DRIVER);
+ return 0;
+}
+
+int
+nouveau_gpuobj_new_ref(struct drm_device *dev,
+ struct nouveau_channel *oc, struct nouveau_channel *rc,
+ uint32_t handle, int size, int align, uint32_t flags,
+ struct nouveau_gpuobj_ref **ref)
+{
+ struct nouveau_gpuobj *gpuobj = NULL;
+ int ret;
+
+ if ((ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj)))
+ return ret;
+
+ if ((ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref))) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle,
+ struct nouveau_gpuobj_ref **ref_ret)
+{
+ struct nouveau_gpuobj_ref *ref;
+ struct list_head *entry, *tmp;
+
+ list_for_each_safe(entry, tmp, &chan->ramht_refs) {
+ ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
+
+ if (ref->handle == handle) {
+ if (ref_ret)
+ *ref_ret = ref;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+int
+nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
+ uint32_t b_offset, uint32_t size,
+ uint32_t flags, struct nouveau_gpuobj **pgpuobj,
+ struct nouveau_gpuobj_ref **pref)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gpuobj = NULL;
+ int i;
+
+ DRM_DEBUG("p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n",
+ p_offset, b_offset, size, flags);
+
+ gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER);
+ if (!gpuobj)
+ return -ENOMEM;
+ DRM_DEBUG("gpuobj %p\n", gpuobj);
+ gpuobj->im_channel = -1;
+ gpuobj->flags = flags | NVOBJ_FLAG_FAKE;
+
+ list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
+
+ if (p_offset != ~0) {
+ gpuobj->im_pramin = drm_calloc(1, sizeof(struct mem_block),
+ DRM_MEM_DRIVER);
+ if (!gpuobj->im_pramin) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return -ENOMEM;
+ }
+ gpuobj->im_pramin->start = p_offset;
+ gpuobj->im_pramin->size = size;
+ }
+
+ if (b_offset != ~0) {
+ gpuobj->im_backing = drm_calloc(1, sizeof(struct mem_block),
+ DRM_MEM_DRIVER);
+ if (!gpuobj->im_backing) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return -ENOMEM;
+ }
+ gpuobj->im_backing->start = b_offset;
+ gpuobj->im_backing->size = size;
+ }
+
+ if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
+ for (i = 0; i < gpuobj->im_pramin->size; i += 4)
+ INSTANCE_WR(gpuobj, i/4, 0);
+ }
+
+ if (pref) {
+ if ((i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref))) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return i;
+ }
+ }
+
+ if (pgpuobj)
+ *pgpuobj = gpuobj;
+ return 0;
+}
+
+
+static int
+nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /*XXX: dodgy hack for now */
+ if (dev_priv->card_type >= NV_50)
+ return 24;
+ if (dev_priv->card_type >= NV_40)
+ return 32;
+ return 16;
+}
+
+/*
+ DMA objects are used to reference a piece of memory in the
+ framebuffer, PCI or AGP address space. Each object is 16 bytes big
+ and looks as follows:
+
+ entry[0]
+ 11:0 class (seems like I can always use 0 here)
+ 12 page table present?
+ 13 page entry linear?
+ 15:14 access: 0 rw, 1 ro, 2 wo
+ 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
+ 31:20 dma adjust (bits 0-11 of the address)
+ entry[1]
+ dma limit (size of transfer)
+ entry[X]
+ 1 0 readonly, 1 readwrite
+ 31:12 dma frame address of the page (bits 12-31 of the address)
+ entry[N]
+ page table terminator, same value as the first pte, as does nvidia
+ rivatv uses 0xffffffff
+
+ Non linear page tables need a list of frame addresses afterwards,
+ the rivatv project has some info on this.
+
+ The method below creates a DMA object in instance RAM and returns a handle
+ to it that can be used to set up context objects.
+*/
+int
+nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
+ uint64_t offset, uint64_t size, int access,
+ int target, struct nouveau_gpuobj **gpuobj)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+ uint32_t is_scatter_gather = 0;
+
+ /* Total number of pages covered by the request.
+ */
+ const unsigned int page_count = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+
+
+ DRM_DEBUG("ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
+ chan->id, class, offset, size);
+ DRM_DEBUG("access=%d target=%d\n", access, target);
+
+ switch (target) {
+ case NV_DMA_TARGET_AGP:
+ offset += dev_priv->gart_info.aper_base;
+ break;
+ case NV_DMA_TARGET_PCI_NONLINEAR:
+ /*assume the "offset" is a virtual memory address*/
+ is_scatter_gather = 1;
+ /*put back the right value*/
+ target = NV_DMA_TARGET_PCI;
+ break;
+ default:
+ break;
+ }
+
+ ret = nouveau_gpuobj_new(dev, chan,
+ is_scatter_gather ? ((page_count << 2) + 12) : nouveau_gpuobj_class_instmem_size(dev, class),
+ 16,
+ NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
+ gpuobj);
+ if (ret) {
+ DRM_ERROR("Error creating gpuobj: %d\n", ret);
+ return ret;
+ }
+
+ if (dev_priv->card_type < NV_50) {
+ uint32_t frame, adjust, pte_flags = 0;
+ adjust = offset & 0x00000fff;
+ if (access != NV_DMA_ACCESS_RO)
+ pte_flags |= (1<<1);
+
+ if ( ! is_scatter_gather )
+ {
+ frame = offset & ~0x00000fff;
+
+ INSTANCE_WR(*gpuobj, 0, ((1<<12) | (1<<13) |
+ (adjust << 20) |
+ (access << 14) |
+ (target << 16) |
+ class));
+ INSTANCE_WR(*gpuobj, 1, size - 1);
+ INSTANCE_WR(*gpuobj, 2, frame | pte_flags);
+ INSTANCE_WR(*gpuobj, 3, frame | pte_flags);
+ }
+ else
+ {
+ /* Intial page entry in the scatter-gather area that
+ * corresponds to the base offset
+ */
+ unsigned int idx = offset / PAGE_SIZE;
+
+ uint32_t instance_offset;
+ unsigned int i;
+
+ if ((idx + page_count) > dev->sg->pages) {
+ DRM_ERROR("Requested page range exceedes "
+ "allocated scatter-gather range!");
+ return -E2BIG;
+ }
+
+ DRM_DEBUG("Creating PCI DMA object using virtual zone starting at %#llx, size %d\n", offset, (uint32_t)size);
+ INSTANCE_WR(*gpuobj, 0, ((1<<12) | (0<<13) |
+ (adjust << 20) |
+ (access << 14) |
+ (target << 16) |
+ class));
+ INSTANCE_WR(*gpuobj, 1, (uint32_t) size-1);
+
+
+ /*write starting at the third dword*/
+ instance_offset = 2;
+
+ /*for each PAGE, get its bus address, fill in the page table entry, and advance*/
+ for (i = 0; i < page_count; i++) {
+ if (dev->sg->busaddr[idx] == 0) {
+ dev->sg->busaddr[idx] =
+ pci_map_page(dev->pdev,
+ dev->sg->pagelist[idx],
+ 0,
+ PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(&dev->primary->kdev, dev->sg->busaddr[idx])) {
+ return -ENOMEM;
+ }
+ }
+
+ frame = (uint32_t) dev->sg->busaddr[idx];
+ INSTANCE_WR(*gpuobj, instance_offset,
+ frame | pte_flags);
+
+ idx++;
+ instance_offset ++;
+ }
+ }
+ } else {
+ uint32_t flags0, flags5;
+
+ if (target == NV_DMA_TARGET_VIDMEM) {
+ flags0 = 0x00190000;
+ flags5 = 0x00010000;
+ } else {
+ flags0 = 0x7fc00000;
+ flags5 = 0x00080000;
+ }
+
+ INSTANCE_WR(*gpuobj, 0, flags0 | class);
+ INSTANCE_WR(*gpuobj, 1, offset + size - 1);
+ INSTANCE_WR(*gpuobj, 2, offset);
+ INSTANCE_WR(*gpuobj, 5, flags5);
+ }
+
+ (*gpuobj)->engine = NVOBJ_ENGINE_SW;
+ (*gpuobj)->class = class;
+ return 0;
+}
+
+int
+nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
+ uint64_t offset, uint64_t size, int access,
+ struct nouveau_gpuobj **gpuobj,
+ uint32_t *o_ret)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP ||
+ (dev_priv->card_type >= NV_50 &&
+ dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) {
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ offset, size, access,
+ NV_DMA_TARGET_AGP, gpuobj);
+ if (o_ret)
+ *o_ret = 0;
+ } else
+ if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
+ *gpuobj = dev_priv->gart_info.sg_ctxdma;
+ if (offset & ~0xffffffffULL) {
+ DRM_ERROR("obj offset exceeds 32-bits\n");
+ return -EINVAL;
+ }
+ if (o_ret)
+ *o_ret = (uint32_t)offset;
+ ret = (*gpuobj != NULL) ? 0 : -EINVAL;
+ } else {
+ DRM_ERROR("Invalid GART type %d\n", dev_priv->gart_info.type);
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+/* Context objects in the instance RAM have the following structure.
+ * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
+
+ NV4 - NV30:
+
+ entry[0]
+ 11:0 class
+ 12 chroma key enable
+ 13 user clip enable
+ 14 swizzle enable
+ 17:15 patch config:
+ scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
+ 18 synchronize enable
+ 19 endian: 1 big, 0 little
+ 21:20 dither mode
+ 23 single step enable
+ 24 patch status: 0 invalid, 1 valid
+ 25 context_surface 0: 1 valid
+ 26 context surface 1: 1 valid
+ 27 context pattern: 1 valid
+ 28 context rop: 1 valid
+ 29,30 context beta, beta4
+ entry[1]
+ 7:0 mono format
+ 15:8 color format
+ 31:16 notify instance address
+ entry[2]
+ 15:0 dma 0 instance address
+ 31:16 dma 1 instance address
+ entry[3]
+ dma method traps
+
+ NV40:
+ No idea what the exact format is. Here's what can be deducted:
+
+ entry[0]:
+ 11:0 class (maybe uses more bits here?)
+ 17 user clip enable
+ 21:19 patch config
+ 25 patch status valid ?
+ entry[1]:
+ 15:0 DMA notifier (maybe 20:0)
+ entry[2]:
+ 15:0 DMA 0 instance (maybe 20:0)
+ 24 big endian
+ entry[3]:
+ 15:0 DMA 1 instance (maybe 20:0)
+ entry[4]:
+ entry[5]:
+ set to 0?
+*/
+int
+nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
+ struct nouveau_gpuobj **gpuobj)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ DRM_DEBUG("ch%d class=0x%04x\n", chan->id, class);
+
+ ret = nouveau_gpuobj_new(dev, chan,
+ nouveau_gpuobj_class_instmem_size(dev, class),
+ 16,
+ NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
+ gpuobj);
+ if (ret) {
+ DRM_ERROR("Error creating gpuobj: %d\n", ret);
+ return ret;
+ }
+
+ if (dev_priv->card_type >= NV_50) {
+ INSTANCE_WR(*gpuobj, 0, class);
+ INSTANCE_WR(*gpuobj, 5, 0x00010000);
+ } else {
+ switch (class) {
+ case NV_CLASS_NULL:
+ INSTANCE_WR(*gpuobj, 0, 0x00001030);
+ INSTANCE_WR(*gpuobj, 1, 0xFFFFFFFF);
+ break;
+ default:
+ if (dev_priv->card_type >= NV_40) {
+ INSTANCE_WR(*gpuobj, 0, class);
+#ifdef __BIG_ENDIAN
+ INSTANCE_WR(*gpuobj, 2, 0x01000000);
+#endif
+ } else {
+#ifdef __BIG_ENDIAN
+ INSTANCE_WR(*gpuobj, 0, class | 0x00080000);
+#else
+ INSTANCE_WR(*gpuobj, 0, class);
+#endif
+ }
+ }
+ }
+
+ (*gpuobj)->engine = NVOBJ_ENGINE_GR;
+ (*gpuobj)->class = class;
+ return 0;
+}
+
+static int
+nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *pramin = NULL;
+ int size, base, ret;
+
+ DRM_DEBUG("ch%d\n", chan->id);
+
+ /* Base amount for object storage (4KiB enough?) */
+ size = 0x1000;
+ base = 0;
+
+ /* PGRAPH context */
+
+ if (dev_priv->card_type == NV_50) {
+ /* Various fixed table thingos */
+ size += 0x1400; /* mostly unknown stuff */
+ size += 0x4000; /* vm pd */
+ base = 0x6000;
+ /* RAMHT, not sure about setting size yet, 32KiB to be safe */
+ size += 0x8000;
+ /* RAMFC */
+ size += 0x1000;
+ /* PGRAPH context */
+ size += 0x60000;
+ }
+
+ DRM_DEBUG("ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n",
+ chan->id, size, base);
+ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0,
+ &chan->ramin);
+ if (ret) {
+ DRM_ERROR("Error allocating channel PRAMIN: %d\n", ret);
+ return ret;
+ }
+ pramin = chan->ramin->gpuobj;
+
+ ret = nouveau_mem_init_heap(&chan->ramin_heap,
+ pramin->im_pramin->start + base, size);
+ if (ret) {
+ DRM_ERROR("Error creating PRAMIN heap: %d\n", ret);
+ nouveau_gpuobj_ref_del(dev, &chan->ramin);
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
+ uint32_t vram_h, uint32_t tt_h)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *vram = NULL, *tt = NULL;
+ int ret, i;
+
+ INIT_LIST_HEAD(&chan->ramht_refs);
+
+ DRM_DEBUG("ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
+
+ /* Reserve a block of PRAMIN for the channel
+ *XXX: maybe on <NV50 too at some point
+ */
+ if (0 || dev_priv->card_type == NV_50) {
+ ret = nouveau_gpuobj_channel_init_pramin(chan);
+ if (ret)
+ return ret;
+ }
+
+ /* NV50 VM
+ * - Allocate per-channel page-directory
+ * - Point offset 0-512MiB at shared PCIEGART table
+ * - Point offset 512-1024MiB at shared VRAM table
+ */
+ if (dev_priv->card_type >= NV_50) {
+ uint32_t vm_offset;
+
+ vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200;
+ vm_offset += chan->ramin->gpuobj->im_pramin->start;
+ if ((ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000,
+ 0, &chan->vm_pd, NULL)))
+ return ret;
+ for (i=0; i<0x4000; i+=8) {
+ INSTANCE_WR(chan->vm_pd, (i+0)/4, 0x00000000);
+ INSTANCE_WR(chan->vm_pd, (i+4)/4, 0xdeadcafe);
+ }
+
+ if ((ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
+ dev_priv->gart_info.sg_ctxdma,
+ &chan->vm_gart_pt)))
+ return ret;
+ INSTANCE_WR(chan->vm_pd, (0+0)/4,
+ chan->vm_gart_pt->instance | 0x03);
+ INSTANCE_WR(chan->vm_pd, (0+4)/4, 0x00000000);
+
+ if ((ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
+ dev_priv->vm_vram_pt,
+ &chan->vm_vram_pt)))
+ return ret;
+ INSTANCE_WR(chan->vm_pd, (8+0)/4,
+ chan->vm_vram_pt->instance | 0x61);
+ INSTANCE_WR(chan->vm_pd, (8+4)/4, 0x00000000);
+ }
+
+ /* RAMHT */
+ if (dev_priv->card_type < NV_50) {
+ ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht,
+ &chan->ramht);
+ if (ret)
+ return ret;
+ } else {
+ ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0,
+ 0x8000, 16,
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &chan->ramht);
+ if (ret)
+ return ret;
+ }
+
+ /* VRAM ctxdma */
+ if (dev_priv->card_type >= NV_50) {
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ 0, 0x100000000ULL,
+ NV_DMA_ACCESS_RW,
+ NV_DMA_TARGET_AGP, &vram);
+ if (ret) {
+ DRM_ERROR("Error creating VRAM ctxdma: %d\n", ret);
+ return ret;
+ }
+ } else
+ if ((ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ 0, dev_priv->fb_available_size,
+ NV_DMA_ACCESS_RW,
+ NV_DMA_TARGET_VIDMEM, &vram))) {
+ DRM_ERROR("Error creating VRAM ctxdma: %d\n", ret);
+ return ret;
+ }
+
+ if ((ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL))) {
+ DRM_ERROR("Error referencing VRAM ctxdma: %d\n", ret);
+ return ret;
+ }
+
+ /* TT memory ctxdma */
+ if (dev_priv->card_type >= NV_50) {
+ tt = vram;
+ } else
+ if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
+ ret = nouveau_gpuobj_gart_dma_new(chan, 0,
+ dev_priv->gart_info.aper_size,
+ NV_DMA_ACCESS_RW, &tt, NULL);
+ } else
+ if (dev_priv->pci_heap) {
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ 0, dev->sg->pages * PAGE_SIZE,
+ NV_DMA_ACCESS_RW,
+ NV_DMA_TARGET_PCI_NONLINEAR, &tt);
+ } else {
+ DRM_ERROR("Invalid GART type %d\n", dev_priv->gart_info.type);
+ ret = -EINVAL;
+ }
+
+ if (ret) {
+ DRM_ERROR("Error creating TT ctxdma: %d\n", ret);
+ return ret;
+ }
+
+ ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL);
+ if (ret) {
+ DRM_ERROR("Error referencing TT ctxdma: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void
+nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct list_head *entry, *tmp;
+ struct nouveau_gpuobj_ref *ref;
+
+ DRM_DEBUG("ch%d\n", chan->id);
+
+ list_for_each_safe(entry, tmp, &chan->ramht_refs) {
+ ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
+
+ nouveau_gpuobj_ref_del(dev, &ref);
+ }
+
+ nouveau_gpuobj_ref_del(dev, &chan->ramht);
+
+ nouveau_gpuobj_del(dev, &chan->vm_pd);
+ nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt);
+ nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt);
+
+ if (chan->ramin_heap)
+ nouveau_mem_takedown(&chan->ramin_heap);
+ if (chan->ramin)
+ nouveau_gpuobj_ref_del(dev, &chan->ramin);
+
+}
+
+int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct nouveau_channel *chan;
+ struct drm_nouveau_grobj_alloc *init = data;
+ struct nouveau_gpuobj *gr = NULL;
+ int ret;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
+
+ //FIXME: check args, only allow trusted objects to be created
+
+ if (init->handle == ~0)
+ return -EINVAL;
+
+ if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0)
+ return -EEXIST;
+
+ ret = nouveau_gpuobj_gr_new(chan, init->class, &gr);
+ if (ret) {
+ DRM_ERROR("Error creating gr object: %d (%d/0x%08x)\n",
+ ret, init->channel, init->handle);
+ return ret;
+ }
+
+ if ((ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL))) {
+ DRM_ERROR("Error referencing gr object: %d (%d/0x%08x\n)",
+ ret, init->channel, init->handle);
+ nouveau_gpuobj_del(dev, &gr);
+ return ret;
+ }
+
+ return 0;
+}
+
+int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_gpuobj_free *objfree = data;
+ struct nouveau_gpuobj_ref *ref;
+ struct nouveau_channel *chan;
+ int ret;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
+
+ if ((ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref)))
+ return ret;
+ nouveau_gpuobj_ref_del(dev, &ref);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
new file mode 100644
index 0000000..1ae0177
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -0,0 +1,593 @@
+
+
+#define NV03_BOOT_0 0x00100000
+# define NV03_BOOT_0_RAM_AMOUNT 0x00000003
+# define NV03_BOOT_0_RAM_AMOUNT_8MB 0x00000000
+# define NV03_BOOT_0_RAM_AMOUNT_2MB 0x00000001
+# define NV03_BOOT_0_RAM_AMOUNT_4MB 0x00000002
+# define NV03_BOOT_0_RAM_AMOUNT_8MB_SDRAM 0x00000003
+# define NV04_BOOT_0_RAM_AMOUNT_32MB 0x00000000
+# define NV04_BOOT_0_RAM_AMOUNT_4MB 0x00000001
+# define NV04_BOOT_0_RAM_AMOUNT_8MB 0x00000002
+# define NV04_BOOT_0_RAM_AMOUNT_16MB 0x00000003
+
+#define NV04_FIFO_DATA 0x0010020c
+# define NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK 0xfff00000
+# define NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT 20
+
+#define NV_RAMIN 0x00700000
+
+#define NV_RAMHT_HANDLE_OFFSET 0
+#define NV_RAMHT_CONTEXT_OFFSET 4
+# define NV_RAMHT_CONTEXT_VALID (1<<31)
+# define NV_RAMHT_CONTEXT_CHANNEL_SHIFT 24
+# define NV_RAMHT_CONTEXT_ENGINE_SHIFT 16
+# define NV_RAMHT_CONTEXT_ENGINE_SOFTWARE 0
+# define NV_RAMHT_CONTEXT_ENGINE_GRAPHICS 1
+# define NV_RAMHT_CONTEXT_INSTANCE_SHIFT 0
+# define NV40_RAMHT_CONTEXT_CHANNEL_SHIFT 23
+# define NV40_RAMHT_CONTEXT_ENGINE_SHIFT 20
+# define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT 0
+
+/* DMA object defines */
+#define NV_DMA_ACCESS_RW 0
+#define NV_DMA_ACCESS_RO 1
+#define NV_DMA_ACCESS_WO 2
+#define NV_DMA_TARGET_VIDMEM 0
+#define NV_DMA_TARGET_PCI 2
+#define NV_DMA_TARGET_AGP 3
+/*The following is not a real value used by nvidia cards, it's changed by nouveau_object_dma_create*/
+#define NV_DMA_TARGET_PCI_NONLINEAR 8
+
+/* Some object classes we care about in the drm */
+#define NV_CLASS_DMA_FROM_MEMORY 0x00000002
+#define NV_CLASS_DMA_TO_MEMORY 0x00000003
+#define NV_CLASS_NULL 0x00000030
+#define NV_CLASS_DMA_IN_MEMORY 0x0000003D
+
+#define NV03_USER(i) (0x00800000+(i*NV03_USER_SIZE))
+#define NV03_USER__SIZE 16
+#define NV10_USER__SIZE 32
+#define NV03_USER_SIZE 0x00010000
+#define NV03_USER_DMA_PUT(i) (0x00800040+(i*NV03_USER_SIZE))
+#define NV03_USER_DMA_PUT__SIZE 16
+#define NV10_USER_DMA_PUT__SIZE 32
+#define NV03_USER_DMA_GET(i) (0x00800044+(i*NV03_USER_SIZE))
+#define NV03_USER_DMA_GET__SIZE 16
+#define NV10_USER_DMA_GET__SIZE 32
+#define NV03_USER_REF_CNT(i) (0x00800048+(i*NV03_USER_SIZE))
+#define NV03_USER_REF_CNT__SIZE 16
+#define NV10_USER_REF_CNT__SIZE 32
+
+#define NV40_USER(i) (0x00c00000+(i*NV40_USER_SIZE))
+#define NV40_USER_SIZE 0x00001000
+#define NV40_USER_DMA_PUT(i) (0x00c00040+(i*NV40_USER_SIZE))
+#define NV40_USER_DMA_PUT__SIZE 32
+#define NV40_USER_DMA_GET(i) (0x00c00044+(i*NV40_USER_SIZE))
+#define NV40_USER_DMA_GET__SIZE 32
+#define NV40_USER_REF_CNT(i) (0x00c00048+(i*NV40_USER_SIZE))
+#define NV40_USER_REF_CNT__SIZE 32
+
+#define NV50_USER(i) (0x00c00000+(i*NV50_USER_SIZE))
+#define NV50_USER_SIZE 0x00002000
+#define NV50_USER_DMA_PUT(i) (0x00c00040+(i*NV50_USER_SIZE))
+#define NV50_USER_DMA_PUT__SIZE 128
+#define NV50_USER_DMA_GET(i) (0x00c00044+(i*NV50_USER_SIZE))
+#define NV50_USER_DMA_GET__SIZE 128
+/*XXX: I don't think this actually exists.. */
+#define NV50_USER_REF_CNT(i) (0x00c00048+(i*NV50_USER_SIZE))
+#define NV50_USER_REF_CNT__SIZE 128
+
+#define NV03_FIFO_SIZE 0x8000UL
+
+#define NV03_PMC_BOOT_0 0x00000000
+#define NV03_PMC_BOOT_1 0x00000004
+#define NV03_PMC_INTR_0 0x00000100
+# define NV_PMC_INTR_0_PFIFO_PENDING (1<< 8)
+# define NV_PMC_INTR_0_PGRAPH_PENDING (1<<12)
+# define NV_PMC_INTR_0_NV50_I2C_PENDING (1<<21)
+# define NV_PMC_INTR_0_CRTC0_PENDING (1<<24)
+# define NV_PMC_INTR_0_CRTC1_PENDING (1<<25)
+# define NV_PMC_INTR_0_NV50_DISPLAY_PENDING (1<<26)
+# define NV_PMC_INTR_0_CRTCn_PENDING (3<<24)
+#define NV03_PMC_INTR_EN_0 0x00000140
+# define NV_PMC_INTR_EN_0_MASTER_ENABLE (1<< 0)
+#define NV03_PMC_ENABLE 0x00000200
+# define NV_PMC_ENABLE_PFIFO (1<< 8)
+# define NV_PMC_ENABLE_PGRAPH (1<<12)
+/* Disabling the below bit breaks newer (G7X only?) mobile chipsets,
+ * the card will hang early on in the X init process.
+ */
+# define NV_PMC_ENABLE_UNK13 (1<<13)
+#define NV40_PMC_1700 0x00001700
+#define NV40_PMC_1704 0x00001704
+#define NV40_PMC_1708 0x00001708
+#define NV40_PMC_170C 0x0000170C
+
+/* probably PMC ? */
+#define NV50_PUNK_BAR0_PRAMIN 0x00001700
+#define NV50_PUNK_BAR_CFG_BASE 0x00001704
+#define NV50_PUNK_BAR_CFG_BASE_VALID (1<<30)
+#define NV50_PUNK_BAR1_CTXDMA 0x00001708
+#define NV50_PUNK_BAR1_CTXDMA_VALID (1<<31)
+#define NV50_PUNK_BAR3_CTXDMA 0x0000170C
+#define NV50_PUNK_BAR3_CTXDMA_VALID (1<<31)
+#define NV50_PUNK_UNK1710 0x00001710
+
+#define NV04_PBUS_PCI_NV_1 0x00001804
+#define NV04_PBUS_PCI_NV_19 0x0000184C
+
+#define NV04_PTIMER_INTR_0 0x00009100
+#define NV04_PTIMER_INTR_EN_0 0x00009140
+#define NV04_PTIMER_NUMERATOR 0x00009200
+#define NV04_PTIMER_DENOMINATOR 0x00009210
+#define NV04_PTIMER_TIME_0 0x00009400
+#define NV04_PTIMER_TIME_1 0x00009410
+#define NV04_PTIMER_ALARM_0 0x00009420
+
+#define NV50_I2C_CONTROLLER 0x0000E054
+
+#define NV04_PFB_CFG0 0x00100200
+#define NV04_PFB_CFG1 0x00100204
+#define NV40_PFB_020C 0x0010020C
+#define NV10_PFB_TILE(i) (0x00100240 + (i*16))
+#define NV10_PFB_TILE__SIZE 8
+#define NV10_PFB_TLIMIT(i) (0x00100244 + (i*16))
+#define NV10_PFB_TSIZE(i) (0x00100248 + (i*16))
+#define NV10_PFB_TSTATUS(i) (0x0010024C + (i*16))
+#define NV10_PFB_CLOSE_PAGE2 0x0010033C
+#define NV40_PFB_TILE(i) (0x00100600 + (i*16))
+#define NV40_PFB_TILE__SIZE_0 12
+#define NV40_PFB_TILE__SIZE_1 15
+#define NV40_PFB_TLIMIT(i) (0x00100604 + (i*16))
+#define NV40_PFB_TSIZE(i) (0x00100608 + (i*16))
+#define NV40_PFB_TSTATUS(i) (0x0010060C + (i*16))
+#define NV40_PFB_UNK_800 0x00100800
+
+#define NV04_PGRAPH_DEBUG_0 0x00400080
+#define NV04_PGRAPH_DEBUG_1 0x00400084
+#define NV04_PGRAPH_DEBUG_2 0x00400088
+#define NV04_PGRAPH_DEBUG_3 0x0040008c
+#define NV10_PGRAPH_DEBUG_4 0x00400090
+#define NV03_PGRAPH_INTR 0x00400100
+#define NV03_PGRAPH_NSTATUS 0x00400104
+# define NV04_PGRAPH_NSTATUS_STATE_IN_USE (1<<11)
+# define NV04_PGRAPH_NSTATUS_INVALID_STATE (1<<12)
+# define NV04_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<13)
+# define NV04_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<14)
+# define NV10_PGRAPH_NSTATUS_STATE_IN_USE (1<<23)
+# define NV10_PGRAPH_NSTATUS_INVALID_STATE (1<<24)
+# define NV10_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<25)
+# define NV10_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<26)
+#define NV03_PGRAPH_NSOURCE 0x00400108
+# define NV03_PGRAPH_NSOURCE_NOTIFICATION (1<< 0)
+# define NV03_PGRAPH_NSOURCE_DATA_ERROR (1<< 1)
+# define NV03_PGRAPH_NSOURCE_PROTECTION_ERROR (1<< 2)
+# define NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION (1<< 3)
+# define NV03_PGRAPH_NSOURCE_LIMIT_COLOR (1<< 4)
+# define NV03_PGRAPH_NSOURCE_LIMIT_ZETA (1<< 5)
+# define NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD (1<< 6)
+# define NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION (1<< 7)
+# define NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION (1<< 8)
+# define NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION (1<< 9)
+# define NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION (1<<10)
+# define NV03_PGRAPH_NSOURCE_STATE_INVALID (1<<11)
+# define NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY (1<<12)
+# define NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE (1<<13)
+# define NV03_PGRAPH_NSOURCE_METHOD_CNT (1<<14)
+# define NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION (1<<15)
+# define NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION (1<<16)
+# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_A (1<<17)
+# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_B (1<<18)
+#define NV03_PGRAPH_INTR_EN 0x00400140
+#define NV40_PGRAPH_INTR_EN 0x0040013C
+# define NV_PGRAPH_INTR_NOTIFY (1<< 0)
+# define NV_PGRAPH_INTR_MISSING_HW (1<< 4)
+# define NV_PGRAPH_INTR_CONTEXT_SWITCH (1<<12)
+# define NV_PGRAPH_INTR_BUFFER_NOTIFY (1<<16)
+# define NV_PGRAPH_INTR_ERROR (1<<20)
+#define NV10_PGRAPH_CTX_CONTROL 0x00400144
+#define NV10_PGRAPH_CTX_USER 0x00400148
+#define NV10_PGRAPH_CTX_SWITCH1 0x0040014C
+#define NV10_PGRAPH_CTX_SWITCH2 0x00400150
+#define NV10_PGRAPH_CTX_SWITCH3 0x00400154
+#define NV10_PGRAPH_CTX_SWITCH4 0x00400158
+#define NV10_PGRAPH_CTX_SWITCH5 0x0040015C
+#define NV04_PGRAPH_CTX_SWITCH1 0x00400160
+#define NV10_PGRAPH_CTX_CACHE1 0x00400160
+#define NV04_PGRAPH_CTX_SWITCH2 0x00400164
+#define NV04_PGRAPH_CTX_SWITCH3 0x00400168
+#define NV04_PGRAPH_CTX_SWITCH4 0x0040016C
+#define NV04_PGRAPH_CTX_CONTROL 0x00400170
+#define NV04_PGRAPH_CTX_USER 0x00400174
+#define NV04_PGRAPH_CTX_CACHE1 0x00400180
+#define NV10_PGRAPH_CTX_CACHE2 0x00400180
+#define NV03_PGRAPH_CTX_CONTROL 0x00400190
+#define NV03_PGRAPH_CTX_USER 0x00400194
+#define NV04_PGRAPH_CTX_CACHE2 0x004001A0
+#define NV10_PGRAPH_CTX_CACHE3 0x004001A0
+#define NV04_PGRAPH_CTX_CACHE3 0x004001C0
+#define NV10_PGRAPH_CTX_CACHE4 0x004001C0
+#define NV04_PGRAPH_CTX_CACHE4 0x004001E0
+#define NV10_PGRAPH_CTX_CACHE5 0x004001E0
+#define NV40_PGRAPH_CTXCTL_0304 0x00400304
+#define NV40_PGRAPH_CTXCTL_0304_XFER_CTX 0x00000001
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT 0x00400308
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_MASK 0xff000000
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT 24
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK 0x00ffffff
+#define NV40_PGRAPH_CTXCTL_0310 0x00400310
+#define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE 0x00000020
+#define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD 0x00000040
+#define NV40_PGRAPH_CTXCTL_030C 0x0040030c
+#define NV40_PGRAPH_CTXCTL_UCODE_INDEX 0x00400324
+#define NV40_PGRAPH_CTXCTL_UCODE_DATA 0x00400328
+#define NV40_PGRAPH_CTXCTL_CUR 0x0040032c
+#define NV40_PGRAPH_CTXCTL_CUR_LOADED 0x01000000
+#define NV40_PGRAPH_CTXCTL_CUR_INST_MASK 0x000FFFFF
+#define NV03_PGRAPH_ABS_X_RAM 0x00400400
+#define NV03_PGRAPH_ABS_Y_RAM 0x00400480
+#define NV03_PGRAPH_X_MISC 0x00400500
+#define NV03_PGRAPH_Y_MISC 0x00400504
+#define NV04_PGRAPH_VALID1 0x00400508
+#define NV04_PGRAPH_SOURCE_COLOR 0x0040050C
+#define NV04_PGRAPH_MISC24_0 0x00400510
+#define NV03_PGRAPH_XY_LOGIC_MISC0 0x00400514
+#define NV03_PGRAPH_XY_LOGIC_MISC1 0x00400518
+#define NV03_PGRAPH_XY_LOGIC_MISC2 0x0040051C
+#define NV03_PGRAPH_XY_LOGIC_MISC3 0x00400520
+#define NV03_PGRAPH_CLIPX_0 0x00400524
+#define NV03_PGRAPH_CLIPX_1 0x00400528
+#define NV03_PGRAPH_CLIPY_0 0x0040052C
+#define NV03_PGRAPH_CLIPY_1 0x00400530
+#define NV03_PGRAPH_ABS_ICLIP_XMAX 0x00400534
+#define NV03_PGRAPH_ABS_ICLIP_YMAX 0x00400538
+#define NV03_PGRAPH_ABS_UCLIP_XMIN 0x0040053C
+#define NV03_PGRAPH_ABS_UCLIP_YMIN 0x00400540
+#define NV03_PGRAPH_ABS_UCLIP_XMAX 0x00400544
+#define NV03_PGRAPH_ABS_UCLIP_YMAX 0x00400548
+#define NV03_PGRAPH_ABS_UCLIPA_XMIN 0x00400560
+#define NV03_PGRAPH_ABS_UCLIPA_YMIN 0x00400564
+#define NV03_PGRAPH_ABS_UCLIPA_XMAX 0x00400568
+#define NV03_PGRAPH_ABS_UCLIPA_YMAX 0x0040056C
+#define NV04_PGRAPH_MISC24_1 0x00400570
+#define NV04_PGRAPH_MISC24_2 0x00400574
+#define NV04_PGRAPH_VALID2 0x00400578
+#define NV04_PGRAPH_PASSTHRU_0 0x0040057C
+#define NV04_PGRAPH_PASSTHRU_1 0x00400580
+#define NV04_PGRAPH_PASSTHRU_2 0x00400584
+#define NV10_PGRAPH_DIMX_TEXTURE 0x00400588
+#define NV10_PGRAPH_WDIMX_TEXTURE 0x0040058C
+#define NV04_PGRAPH_COMBINE_0_ALPHA 0x00400590
+#define NV04_PGRAPH_COMBINE_0_COLOR 0x00400594
+#define NV04_PGRAPH_COMBINE_1_ALPHA 0x00400598
+#define NV04_PGRAPH_COMBINE_1_COLOR 0x0040059C
+#define NV04_PGRAPH_FORMAT_0 0x004005A8
+#define NV04_PGRAPH_FORMAT_1 0x004005AC
+#define NV04_PGRAPH_FILTER_0 0x004005B0
+#define NV04_PGRAPH_FILTER_1 0x004005B4
+#define NV03_PGRAPH_MONO_COLOR0 0x00400600
+#define NV04_PGRAPH_ROP3 0x00400604
+#define NV04_PGRAPH_BETA_AND 0x00400608
+#define NV04_PGRAPH_BETA_PREMULT 0x0040060C
+#define NV04_PGRAPH_LIMIT_VIOL_PIX 0x00400610
+#define NV04_PGRAPH_FORMATS 0x00400618
+#define NV10_PGRAPH_DEBUG_2 0x00400620
+#define NV04_PGRAPH_BOFFSET0 0x00400640
+#define NV04_PGRAPH_BOFFSET1 0x00400644
+#define NV04_PGRAPH_BOFFSET2 0x00400648
+#define NV04_PGRAPH_BOFFSET3 0x0040064C
+#define NV04_PGRAPH_BOFFSET4 0x00400650
+#define NV04_PGRAPH_BOFFSET5 0x00400654
+#define NV04_PGRAPH_BBASE0 0x00400658
+#define NV04_PGRAPH_BBASE1 0x0040065C
+#define NV04_PGRAPH_BBASE2 0x00400660
+#define NV04_PGRAPH_BBASE3 0x00400664
+#define NV04_PGRAPH_BBASE4 0x00400668
+#define NV04_PGRAPH_BBASE5 0x0040066C
+#define NV04_PGRAPH_BPITCH0 0x00400670
+#define NV04_PGRAPH_BPITCH1 0x00400674
+#define NV04_PGRAPH_BPITCH2 0x00400678
+#define NV04_PGRAPH_BPITCH3 0x0040067C
+#define NV04_PGRAPH_BPITCH4 0x00400680
+#define NV04_PGRAPH_BLIMIT0 0x00400684
+#define NV04_PGRAPH_BLIMIT1 0x00400688
+#define NV04_PGRAPH_BLIMIT2 0x0040068C
+#define NV04_PGRAPH_BLIMIT3 0x00400690
+#define NV04_PGRAPH_BLIMIT4 0x00400694
+#define NV04_PGRAPH_BLIMIT5 0x00400698
+#define NV04_PGRAPH_BSWIZZLE2 0x0040069C
+#define NV04_PGRAPH_BSWIZZLE5 0x004006A0
+#define NV03_PGRAPH_STATUS 0x004006B0
+#define NV04_PGRAPH_STATUS 0x00400700
+#define NV04_PGRAPH_TRAPPED_ADDR 0x00400704
+#define NV04_PGRAPH_TRAPPED_DATA 0x00400708
+#define NV04_PGRAPH_SURFACE 0x0040070C
+#define NV10_PGRAPH_TRAPPED_DATA_HIGH 0x0040070C
+#define NV04_PGRAPH_STATE 0x00400710
+#define NV10_PGRAPH_SURFACE 0x00400710
+#define NV04_PGRAPH_NOTIFY 0x00400714
+#define NV10_PGRAPH_STATE 0x00400714
+#define NV10_PGRAPH_NOTIFY 0x00400718
+
+#define NV04_PGRAPH_FIFO 0x00400720
+
+#define NV04_PGRAPH_BPIXEL 0x00400724
+#define NV10_PGRAPH_RDI_INDEX 0x00400750
+#define NV04_PGRAPH_FFINTFC_ST2 0x00400754
+#define NV10_PGRAPH_RDI_DATA 0x00400754
+#define NV04_PGRAPH_DMA_PITCH 0x00400760
+#define NV10_PGRAPH_FFINTFC_ST2 0x00400764
+#define NV04_PGRAPH_DVD_COLORFMT 0x00400764
+#define NV04_PGRAPH_SCALED_FORMAT 0x00400768
+#define NV10_PGRAPH_DMA_PITCH 0x00400770
+#define NV10_PGRAPH_DVD_COLORFMT 0x00400774
+#define NV10_PGRAPH_SCALED_FORMAT 0x00400778
+#define NV20_PGRAPH_CHANNEL_CTX_TABLE 0x00400780
+#define NV20_PGRAPH_CHANNEL_CTX_POINTER 0x00400784
+#define NV20_PGRAPH_CHANNEL_CTX_XFER 0x00400788
+#define NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD 0x00000001
+#define NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE 0x00000002
+#define NV04_PGRAPH_PATT_COLOR0 0x00400800
+#define NV04_PGRAPH_PATT_COLOR1 0x00400804
+#define NV04_PGRAPH_PATTERN 0x00400808
+#define NV04_PGRAPH_PATTERN_SHAPE 0x00400810
+#define NV04_PGRAPH_CHROMA 0x00400814
+#define NV04_PGRAPH_CONTROL0 0x00400818
+#define NV04_PGRAPH_CONTROL1 0x0040081C
+#define NV04_PGRAPH_CONTROL2 0x00400820
+#define NV04_PGRAPH_BLEND 0x00400824
+#define NV04_PGRAPH_STORED_FMT 0x00400830
+#define NV04_PGRAPH_PATT_COLORRAM 0x00400900
+#define NV40_PGRAPH_TILE0(i) (0x00400900 + (i*16))
+#define NV40_PGRAPH_TLIMIT0(i) (0x00400904 + (i*16))
+#define NV40_PGRAPH_TSIZE0(i) (0x00400908 + (i*16))
+#define NV40_PGRAPH_TSTATUS0(i) (0x0040090C + (i*16))
+#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16))
+#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16))
+#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16))
+#define NV10_PGRAPH_TSTATUS(i) (0x00400B0C + (i*16))
+#define NV04_PGRAPH_U_RAM 0x00400D00
+#define NV47_PGRAPH_TILE0(i) (0x00400D00 + (i*16))
+#define NV47_PGRAPH_TLIMIT0(i) (0x00400D04 + (i*16))
+#define NV47_PGRAPH_TSIZE0(i) (0x00400D08 + (i*16))
+#define NV47_PGRAPH_TSTATUS0(i) (0x00400D0C + (i*16))
+#define NV04_PGRAPH_V_RAM 0x00400D40
+#define NV04_PGRAPH_W_RAM 0x00400D80
+#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40
+#define NV10_PGRAPH_COMBINER1_IN_ALPHA 0x00400E44
+#define NV10_PGRAPH_COMBINER0_IN_RGB 0x00400E48
+#define NV10_PGRAPH_COMBINER1_IN_RGB 0x00400E4C
+#define NV10_PGRAPH_COMBINER_COLOR0 0x00400E50
+#define NV10_PGRAPH_COMBINER_COLOR1 0x00400E54
+#define NV10_PGRAPH_COMBINER0_OUT_ALPHA 0x00400E58
+#define NV10_PGRAPH_COMBINER1_OUT_ALPHA 0x00400E5C
+#define NV10_PGRAPH_COMBINER0_OUT_RGB 0x00400E60
+#define NV10_PGRAPH_COMBINER1_OUT_RGB 0x00400E64
+#define NV10_PGRAPH_COMBINER_FINAL0 0x00400E68
+#define NV10_PGRAPH_COMBINER_FINAL1 0x00400E6C
+#define NV10_PGRAPH_WINDOWCLIP_HORIZONTAL 0x00400F00
+#define NV10_PGRAPH_WINDOWCLIP_VERTICAL 0x00400F20
+#define NV10_PGRAPH_XFMODE0 0x00400F40
+#define NV10_PGRAPH_XFMODE1 0x00400F44
+#define NV10_PGRAPH_GLOBALSTATE0 0x00400F48
+#define NV10_PGRAPH_GLOBALSTATE1 0x00400F4C
+#define NV10_PGRAPH_PIPE_ADDRESS 0x00400F50
+#define NV10_PGRAPH_PIPE_DATA 0x00400F54
+#define NV04_PGRAPH_DMA_START_0 0x00401000
+#define NV04_PGRAPH_DMA_START_1 0x00401004
+#define NV04_PGRAPH_DMA_LENGTH 0x00401008
+#define NV04_PGRAPH_DMA_MISC 0x0040100C
+#define NV04_PGRAPH_DMA_DATA_0 0x00401020
+#define NV04_PGRAPH_DMA_DATA_1 0x00401024
+#define NV04_PGRAPH_DMA_RM 0x00401030
+#define NV04_PGRAPH_DMA_A_XLATE_INST 0x00401040
+#define NV04_PGRAPH_DMA_A_CONTROL 0x00401044
+#define NV04_PGRAPH_DMA_A_LIMIT 0x00401048
+#define NV04_PGRAPH_DMA_A_TLB_PTE 0x0040104C
+#define NV04_PGRAPH_DMA_A_TLB_TAG 0x00401050
+#define NV04_PGRAPH_DMA_A_ADJ_OFFSET 0x00401054
+#define NV04_PGRAPH_DMA_A_OFFSET 0x00401058
+#define NV04_PGRAPH_DMA_A_SIZE 0x0040105C
+#define NV04_PGRAPH_DMA_A_Y_SIZE 0x00401060
+#define NV04_PGRAPH_DMA_B_XLATE_INST 0x00401080
+#define NV04_PGRAPH_DMA_B_CONTROL 0x00401084
+#define NV04_PGRAPH_DMA_B_LIMIT 0x00401088
+#define NV04_PGRAPH_DMA_B_TLB_PTE 0x0040108C
+#define NV04_PGRAPH_DMA_B_TLB_TAG 0x00401090
+#define NV04_PGRAPH_DMA_B_ADJ_OFFSET 0x00401094
+#define NV04_PGRAPH_DMA_B_OFFSET 0x00401098
+#define NV04_PGRAPH_DMA_B_SIZE 0x0040109C
+#define NV04_PGRAPH_DMA_B_Y_SIZE 0x004010A0
+#define NV40_PGRAPH_TILE1(i) (0x00406900 + (i*16))
+#define NV40_PGRAPH_TLIMIT1(i) (0x00406904 + (i*16))
+#define NV40_PGRAPH_TSIZE1(i) (0x00406908 + (i*16))
+#define NV40_PGRAPH_TSTATUS1(i) (0x0040690C + (i*16))
+
+
+/* It's a guess that this works on NV03. Confirmed on NV04, though */
+#define NV04_PFIFO_DELAY_0 0x00002040
+#define NV04_PFIFO_DMA_TIMESLICE 0x00002044
+#define NV04_PFIFO_NEXT_CHANNEL 0x00002050
+#define NV03_PFIFO_INTR_0 0x00002100
+#define NV03_PFIFO_INTR_EN_0 0x00002140
+# define NV_PFIFO_INTR_CACHE_ERROR (1<< 0)
+# define NV_PFIFO_INTR_RUNOUT (1<< 4)
+# define NV_PFIFO_INTR_RUNOUT_OVERFLOW (1<< 8)
+# define NV_PFIFO_INTR_DMA_PUSHER (1<<12)
+# define NV_PFIFO_INTR_DMA_PT (1<<16)
+# define NV_PFIFO_INTR_SEMAPHORE (1<<20)
+# define NV_PFIFO_INTR_ACQUIRE_TIMEOUT (1<<24)
+#define NV03_PFIFO_RAMHT 0x00002210
+#define NV03_PFIFO_RAMFC 0x00002214
+#define NV03_PFIFO_RAMRO 0x00002218
+#define NV40_PFIFO_RAMFC 0x00002220
+#define NV03_PFIFO_CACHES 0x00002500
+#define NV04_PFIFO_MODE 0x00002504
+#define NV04_PFIFO_DMA 0x00002508
+#define NV04_PFIFO_SIZE 0x0000250c
+#define NV50_PFIFO_CTX_TABLE(c) (0x2600+(c)*4)
+#define NV50_PFIFO_CTX_TABLE__SIZE 128
+#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED (1<<31)
+#define NV50_PFIFO_CTX_TABLE_UNK30_BAD (1<<30)
+#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80 0x0FFFFFFF
+#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84 0x00FFFFFF
+#define NV03_PFIFO_CACHE0_PUSH0 0x00003000
+#define NV03_PFIFO_CACHE0_PULL0 0x00003040
+#define NV04_PFIFO_CACHE0_PULL0 0x00003050
+#define NV04_PFIFO_CACHE0_PULL1 0x00003054
+#define NV03_PFIFO_CACHE1_PUSH0 0x00003200
+#define NV03_PFIFO_CACHE1_PUSH1 0x00003204
+#define NV03_PFIFO_CACHE1_PUSH1_DMA (1<<8)
+#define NV40_PFIFO_CACHE1_PUSH1_DMA (1<<16)
+#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000000f
+#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000001f
+#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK 0x0000007f
+#define NV03_PFIFO_CACHE1_PUT 0x00003210
+#define NV04_PFIFO_CACHE1_DMA_PUSH 0x00003220
+#define NV04_PFIFO_CACHE1_DMA_FETCH 0x00003224
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES 0x00000000
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_16_BYTES 0x00000008
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_24_BYTES 0x00000010
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_32_BYTES 0x00000018
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_40_BYTES 0x00000020
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_48_BYTES 0x00000028
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_56_BYTES 0x00000030
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_64_BYTES 0x00000038
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_72_BYTES 0x00000040
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_80_BYTES 0x00000048
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_88_BYTES 0x00000050
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_96_BYTES 0x00000058
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_104_BYTES 0x00000060
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES 0x00000068
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_120_BYTES 0x00000070
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES 0x00000078
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_136_BYTES 0x00000080
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_144_BYTES 0x00000088
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_152_BYTES 0x00000090
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_160_BYTES 0x00000098
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_168_BYTES 0x000000A0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_176_BYTES 0x000000A8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_184_BYTES 0x000000B0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_192_BYTES 0x000000B8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_200_BYTES 0x000000C0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_208_BYTES 0x000000C8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_216_BYTES 0x000000D0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_224_BYTES 0x000000D8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_232_BYTES 0x000000E0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_240_BYTES 0x000000E8
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_248_BYTES 0x000000F0
+# define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_256_BYTES 0x000000F8
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE 0x0000E000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_32_BYTES 0x00000000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_64_BYTES 0x00002000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_96_BYTES 0x00004000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES 0x00006000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_160_BYTES 0x00008000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_192_BYTES 0x0000A000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_224_BYTES 0x0000C000
+# define NV_PFIFO_CACHE1_DMA_FETCH_SIZE_256_BYTES 0x0000E000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS 0x001F0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_0 0x00000000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_1 0x00010000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_2 0x00020000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_3 0x00030000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 0x00040000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_5 0x00050000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_6 0x00060000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_7 0x00070000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 0x00080000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_9 0x00090000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_10 0x000A0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_11 0x000B0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_12 0x000C0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_13 0x000D0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_14 0x000E0000
+# define NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_15 0x000F0000
+# define NV_PFIFO_CACHE1_ENDIAN 0x80000000
+# define NV_PFIFO_CACHE1_LITTLE_ENDIAN 0x7FFFFFFF
+# define NV_PFIFO_CACHE1_BIG_ENDIAN 0x80000000
+#define NV04_PFIFO_CACHE1_DMA_STATE 0x00003228
+#define NV04_PFIFO_CACHE1_DMA_INSTANCE 0x0000322c
+#define NV04_PFIFO_CACHE1_DMA_CTL 0x00003230
+#define NV04_PFIFO_CACHE1_DMA_PUT 0x00003240
+#define NV04_PFIFO_CACHE1_DMA_GET 0x00003244
+#define NV10_PFIFO_CACHE1_REF_CNT 0x00003248
+#define NV10_PFIFO_CACHE1_DMA_SUBROUTINE 0x0000324C
+#define NV03_PFIFO_CACHE1_PULL0 0x00003240
+#define NV04_PFIFO_CACHE1_PULL0 0x00003250
+#define NV03_PFIFO_CACHE1_PULL1 0x00003250
+#define NV04_PFIFO_CACHE1_PULL1 0x00003254
+#define NV04_PFIFO_CACHE1_HASH 0x00003258
+#define NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT 0x00003260
+#define NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP 0x00003264
+#define NV10_PFIFO_CACHE1_ACQUIRE_VALUE 0x00003268
+#define NV10_PFIFO_CACHE1_SEMAPHORE 0x0000326C
+#define NV03_PFIFO_CACHE1_GET 0x00003270
+#define NV04_PFIFO_CACHE1_ENGINE 0x00003280
+#define NV04_PFIFO_CACHE1_DMA_DCOUNT 0x000032A0
+#define NV40_PFIFO_GRCTX_INSTANCE 0x000032E0
+#define NV40_PFIFO_UNK32E4 0x000032E4
+#define NV04_PFIFO_CACHE1_METHOD(i) (0x00003800+(i*8))
+#define NV04_PFIFO_CACHE1_DATA(i) (0x00003804+(i*8))
+#define NV40_PFIFO_CACHE1_METHOD(i) (0x00090000+(i*8))
+#define NV40_PFIFO_CACHE1_DATA(i) (0x00090004+(i*8))
+
+#define NV_CRTC0_INTSTAT 0x00600100
+#define NV_CRTC0_INTEN 0x00600140
+#define NV_CRTC1_INTSTAT 0x00602100
+#define NV_CRTC1_INTEN 0x00602140
+# define NV_CRTC_INTR_VBLANK (1<<0)
+
+/* This name is a partial guess. */
+#define NV50_DISPLAY_SUPERVISOR 0x00610024
+
+/* Fifo commands. These are not regs, neither masks */
+#define NV03_FIFO_CMD_JUMP 0x20000000
+#define NV03_FIFO_CMD_JUMP_OFFSET_MASK 0x1ffffffc
+#define NV03_FIFO_CMD_REWIND (NV03_FIFO_CMD_JUMP | (0 & NV03_FIFO_CMD_JUMP_OFFSET_MASK))
+
+/* RAMFC offsets */
+#define NV04_RAMFC_DMA_PUT 0x00
+#define NV04_RAMFC_DMA_GET 0x04
+#define NV04_RAMFC_DMA_INSTANCE 0x08
+#define NV04_RAMFC_DMA_STATE 0x0C
+#define NV04_RAMFC_DMA_FETCH 0x10
+#define NV04_RAMFC_ENGINE 0x14
+#define NV04_RAMFC_PULL1_ENGINE 0x18
+
+#define NV10_RAMFC_DMA_PUT 0x00
+#define NV10_RAMFC_DMA_GET 0x04
+#define NV10_RAMFC_REF_CNT 0x08
+#define NV10_RAMFC_DMA_INSTANCE 0x0C
+#define NV10_RAMFC_DMA_STATE 0x10
+#define NV10_RAMFC_DMA_FETCH 0x14
+#define NV10_RAMFC_ENGINE 0x18
+#define NV10_RAMFC_PULL1_ENGINE 0x1C
+#define NV10_RAMFC_ACQUIRE_VALUE 0x20
+#define NV10_RAMFC_ACQUIRE_TIMESTAMP 0x24
+#define NV10_RAMFC_ACQUIRE_TIMEOUT 0x28
+#define NV10_RAMFC_SEMAPHORE 0x2C
+#define NV10_RAMFC_DMA_SUBROUTINE 0x30
+
+#define NV40_RAMFC_DMA_PUT 0x00
+#define NV40_RAMFC_DMA_GET 0x04
+#define NV40_RAMFC_REF_CNT 0x08
+#define NV40_RAMFC_DMA_INSTANCE 0x0C
+#define NV40_RAMFC_DMA_DCOUNT /* ? */ 0x10
+#define NV40_RAMFC_DMA_STATE 0x14
+#define NV40_RAMFC_DMA_FETCH 0x18
+#define NV40_RAMFC_ENGINE 0x1C
+#define NV40_RAMFC_PULL1_ENGINE 0x20
+#define NV40_RAMFC_ACQUIRE_VALUE 0x24
+#define NV40_RAMFC_ACQUIRE_TIMESTAMP 0x28
+#define NV40_RAMFC_ACQUIRE_TIMEOUT 0x2C
+#define NV40_RAMFC_SEMAPHORE 0x30
+#define NV40_RAMFC_DMA_SUBROUTINE 0x34
+#define NV40_RAMFC_GRCTX_INSTANCE /* guess */ 0x38
+#define NV40_RAMFC_DMA_TIMESLICE 0x3C
+#define NV40_RAMFC_UNK_40 0x40
+#define NV40_RAMFC_UNK_44 0x44
+#define NV40_RAMFC_UNK_48 0x48
+#define NV40_RAMFC_UNK_4C 0x4C
+#define NV40_RAMFC_UNK_50 0x50
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
new file mode 100644
index 0000000..b35bfb7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -0,0 +1,342 @@
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include <linux/pagemap.h>
+
+#define NV_CTXDMA_PAGE_SHIFT 12
+#define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT)
+#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
+
+struct nouveau_sgdma_be {
+ struct drm_ttm_backend backend;
+ struct drm_device *dev;
+
+ int pages;
+ int pages_populated;
+ dma_addr_t *pagelist;
+ int is_bound;
+
+ unsigned int pte_start;
+};
+
+static int
+nouveau_sgdma_needs_ub_cache_adjust(struct drm_ttm_backend *be)
+{
+ return ((be->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
+}
+
+static int
+nouveau_sgdma_populate(struct drm_ttm_backend *be, unsigned long num_pages,
+ struct page **pages, struct page *dummy_read_page)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ int p, d, o;
+
+ DRM_DEBUG("num_pages = %ld\n", num_pages);
+
+ if (nvbe->pagelist)
+ return -EINVAL;
+ nvbe->pages = (num_pages << PAGE_SHIFT) >> NV_CTXDMA_PAGE_SHIFT;
+ nvbe->pagelist = drm_alloc(nvbe->pages*sizeof(dma_addr_t),
+ DRM_MEM_PAGES);
+
+ nvbe->pages_populated = d = 0;
+ for (p = 0; p < num_pages; p++) {
+ for (o = 0; o < PAGE_SIZE; o += NV_CTXDMA_PAGE_SIZE) {
+ struct page *page = pages[p];
+ if (!page)
+ page = dummy_read_page;
+ nvbe->pagelist[d] = pci_map_page(nvbe->dev->pdev,
+ page, o,
+ NV_CTXDMA_PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(nvbe->dev->pdev, nvbe->pagelist[d])) {
+ be->func->clear(be);
+ DRM_ERROR("pci_map_page failed\n");
+ return -EINVAL;
+ }
+ nvbe->pages_populated = ++d;
+ }
+ }
+
+ return 0;
+}
+
+static void
+nouveau_sgdma_clear(struct drm_ttm_backend *be)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ int d;
+
+ DRM_DEBUG("\n");
+
+ if (nvbe && nvbe->pagelist) {
+ if (nvbe->is_bound)
+ be->func->unbind(be);
+
+ for (d = 0; d < nvbe->pages_populated; d++) {
+ pci_unmap_page(nvbe->dev->pdev, nvbe->pagelist[d],
+ NV_CTXDMA_PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ }
+ drm_free(nvbe->pagelist, nvbe->pages*sizeof(dma_addr_t),
+ DRM_MEM_PAGES);
+ }
+}
+
+static int
+nouveau_sgdma_bind(struct drm_ttm_backend *be, struct drm_bo_mem_reg *mem)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+ struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
+ uint64_t offset = (mem->mm_node->start << PAGE_SHIFT);
+ uint32_t i;
+
+ DRM_DEBUG("pg=0x%lx (0x%llx), cached=%d\n", mem->mm_node->start,
+ offset, (mem->flags & DRM_BO_FLAG_CACHED) == 1);
+
+ if (offset & NV_CTXDMA_PAGE_MASK)
+ return -EINVAL;
+ nvbe->pte_start = (offset >> NV_CTXDMA_PAGE_SHIFT);
+ if (dev_priv->card_type < NV_50)
+ nvbe->pte_start += 2; /* skip ctxdma header */
+
+ for (i = nvbe->pte_start; i < nvbe->pte_start + nvbe->pages; i++) {
+ uint64_t pteval = nvbe->pagelist[i - nvbe->pte_start];
+
+ if (pteval & NV_CTXDMA_PAGE_MASK) {
+ DRM_ERROR("Bad pteval 0x%llx\n", pteval);
+ return -EINVAL;
+ }
+
+ if (dev_priv->card_type < NV_50) {
+ INSTANCE_WR(gpuobj, i, pteval | 3);
+ } else {
+ INSTANCE_WR(gpuobj, (i<<1)+0, pteval | 0x21);
+ INSTANCE_WR(gpuobj, (i<<1)+1, 0x00000000);
+ }
+ }
+
+ nvbe->is_bound = 1;
+ return 0;
+}
+
+static int
+nouveau_sgdma_unbind(struct drm_ttm_backend *be)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+
+ DRM_DEBUG("\n");
+
+ if (nvbe->is_bound) {
+ struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
+ unsigned int pte;
+
+ pte = nvbe->pte_start;
+ while (pte < (nvbe->pte_start + nvbe->pages)) {
+ uint64_t pteval = dev_priv->gart_info.sg_dummy_bus;
+
+ if (dev_priv->card_type < NV_50) {
+ INSTANCE_WR(gpuobj, pte, pteval | 3);
+ } else {
+ INSTANCE_WR(gpuobj, (pte<<1)+0, pteval | 0x21);
+ INSTANCE_WR(gpuobj, (pte<<1)+1, 0x00000000);
+ }
+
+ pte++;
+ }
+
+ nvbe->is_bound = 0;
+ }
+
+ return 0;
+}
+
+static void
+nouveau_sgdma_destroy(struct drm_ttm_backend *be)
+{
+ DRM_DEBUG("\n");
+ if (be) {
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ if (nvbe) {
+ if (nvbe->pagelist)
+ be->func->clear(be);
+ drm_ctl_free(nvbe, sizeof(*nvbe), DRM_MEM_TTM);
+ }
+ }
+}
+
+static struct drm_ttm_backend_func nouveau_sgdma_backend = {
+ .needs_ub_cache_adjust = nouveau_sgdma_needs_ub_cache_adjust,
+ .populate = nouveau_sgdma_populate,
+ .clear = nouveau_sgdma_clear,
+ .bind = nouveau_sgdma_bind,
+ .unbind = nouveau_sgdma_unbind,
+ .destroy = nouveau_sgdma_destroy
+};
+
+struct drm_ttm_backend *
+nouveau_sgdma_init_ttm(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_sgdma_be *nvbe;
+
+ if (!dev_priv->gart_info.sg_ctxdma)
+ return NULL;
+
+ nvbe = drm_ctl_calloc(1, sizeof(*nvbe), DRM_MEM_TTM);
+ if (!nvbe)
+ return NULL;
+
+ nvbe->dev = dev;
+
+ nvbe->backend.func = &nouveau_sgdma_backend;
+
+ return &nvbe->backend;
+}
+
+int
+nouveau_sgdma_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gpuobj = NULL;
+ uint32_t aper_size, obj_size;
+ int i, ret;
+
+ if (dev_priv->card_type < NV_50) {
+ aper_size = (64 * 1024 * 1024);
+ obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
+ obj_size += 8; /* ctxdma header */
+ } else {
+ /* 1 entire VM page table */
+ aper_size = (512 * 1024 * 1024);
+ obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
+ }
+
+ if ((ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
+ NVOBJ_FLAG_ALLOW_NO_REFS |
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &gpuobj))) {
+ DRM_ERROR("Error creating sgdma object: %d\n", ret);
+ return ret;
+ }
+
+ dev_priv->gart_info.sg_dummy_page =
+ alloc_page(GFP_KERNEL|__GFP_DMA32);
+
+ dev_priv->gart_info.sg_dummy_bus =
+ pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+
+ if (dev_priv->card_type < NV_50) {
+ /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
+ * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
+ * on those cards? */
+ INSTANCE_WR(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
+ (1 << 12) /* PT present */ |
+ (0 << 13) /* PT *not* linear */ |
+ (NV_DMA_ACCESS_RW << 14) |
+ (NV_DMA_TARGET_PCI << 16));
+ INSTANCE_WR(gpuobj, 1, aper_size - 1);
+ for (i=2; i<2+(aper_size>>12); i++) {
+ INSTANCE_WR(gpuobj, i,
+ dev_priv->gart_info.sg_dummy_bus | 3);
+ }
+ } else {
+ for (i=0; i<obj_size; i+=8) {
+ INSTANCE_WR(gpuobj, (i+0)/4,
+ dev_priv->gart_info.sg_dummy_bus | 0x21);
+ INSTANCE_WR(gpuobj, (i+4)/4, 0);
+ }
+ }
+
+ dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
+ dev_priv->gart_info.aper_base = 0;
+ dev_priv->gart_info.aper_size = aper_size;
+ dev_priv->gart_info.sg_ctxdma = gpuobj;
+ return 0;
+}
+
+void
+nouveau_sgdma_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->gart_info.sg_dummy_page) {
+ pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
+ NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ unlock_page(dev_priv->gart_info.sg_dummy_page);
+ __free_page(dev_priv->gart_info.sg_dummy_page);
+ dev_priv->gart_info.sg_dummy_page = NULL;
+ dev_priv->gart_info.sg_dummy_bus = 0;
+ }
+
+ nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma);
+}
+
+int
+nouveau_sgdma_nottm_hack_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_ttm_backend *be;
+ struct drm_scatter_gather sgreq;
+ struct drm_mm_node mm_node;
+ struct drm_bo_mem_reg mem;
+ int ret;
+
+ dev_priv->gart_info.sg_be = nouveau_sgdma_init_ttm(dev);
+ if (!dev_priv->gart_info.sg_be)
+ return -ENOMEM;
+ be = dev_priv->gart_info.sg_be;
+
+ /* Hack the aperture size down to the amount of system memory
+ * we're going to bind into it.
+ */
+ if (dev_priv->gart_info.aper_size > 32*1024*1024)
+ dev_priv->gart_info.aper_size = 32*1024*1024;
+
+ sgreq.size = dev_priv->gart_info.aper_size;
+ if ((ret = drm_sg_alloc(dev, &sgreq))) {
+ DRM_ERROR("drm_sg_alloc failed: %d\n", ret);
+ return ret;
+ }
+ dev_priv->gart_info.sg_handle = sgreq.handle;
+
+ if ((ret = be->func->populate(be, dev->sg->pages, dev->sg->pagelist, dev->bm.dummy_read_page))) {
+ DRM_ERROR("failed populate: %d\n", ret);
+ return ret;
+ }
+
+ mm_node.start = 0;
+ mem.mm_node = &mm_node;
+
+ if ((ret = be->func->bind(be, &mem))) {
+ DRM_ERROR("failed bind: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void
+nouveau_sgdma_nottm_hack_takedown(struct drm_device *dev)
+{
+}
+
+int
+nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
+ int pte;
+
+ pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
+ if (dev_priv->card_type < NV_50) {
+ *page = INSTANCE_RD(gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
+ return 0;
+ }
+
+ DRM_ERROR("Unimplemented on NV50\n");
+ return -EINVAL;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
new file mode 100644
index 0000000..8d0430f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -0,0 +1,866 @@
+/*
+ * Copyright 2005 Stephane Marchesin
+ * Copyright 2008 Stuart Bennett
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_sarea.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+static int nouveau_init_card_mappings(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ /* resource 0 is mmio regs */
+ /* resource 1 is linear FB */
+ /* resource 2 is RAMIN (mmio regs + 0x1000000) */
+ /* resource 6 is bios */
+
+ /* map the mmio regs */
+ ret = drm_addmap(dev, drm_get_resource_start(dev, 0),
+ drm_get_resource_len(dev, 0),
+ _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio);
+ if (ret) {
+ DRM_ERROR("Unable to initialize the mmio mapping (%d). "
+ "Please report your setup to " DRIVER_EMAIL "\n",
+ ret);
+ return -EINVAL;
+ }
+ DRM_DEBUG("regs mapped ok at 0x%lx\n", dev_priv->mmio->offset);
+
+ /* map larger RAMIN aperture on NV40 cards */
+ dev_priv->ramin = NULL;
+ if (dev_priv->card_type >= NV_40) {
+ int ramin_resource = 2;
+ if (drm_get_resource_len(dev, ramin_resource) == 0)
+ ramin_resource = 3;
+
+ ret = drm_addmap(dev,
+ drm_get_resource_start(dev, ramin_resource),
+ drm_get_resource_len(dev, ramin_resource),
+ _DRM_REGISTERS, _DRM_READ_ONLY,
+ &dev_priv->ramin);
+ if (ret) {
+ DRM_ERROR("Failed to init RAMIN mapping, "
+ "limited instance memory available\n");
+ dev_priv->ramin = NULL;
+ }
+ }
+
+ /* On older cards (or if the above failed), create a map covering
+ * the BAR0 PRAMIN aperture */
+ if (!dev_priv->ramin) {
+ ret = drm_addmap(dev,
+ drm_get_resource_start(dev, 0) + NV_RAMIN,
+ (1*1024*1024),
+ _DRM_REGISTERS, _DRM_READ_ONLY,
+ &dev_priv->ramin);
+ if (ret) {
+ DRM_ERROR("Failed to map BAR0 PRAMIN: %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int nouveau_stub_init(struct drm_device *dev) { return 0; }
+static void nouveau_stub_takedown(struct drm_device *dev) {}
+
+static int nouveau_init_engine_ptrs(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->Engine;
+
+ switch (dev_priv->chipset & 0xf0) {
+ case 0x00:
+ engine->instmem.init = nv04_instmem_init;
+ engine->instmem.takedown= nv04_instmem_takedown;
+ engine->instmem.populate = nv04_instmem_populate;
+ engine->instmem.clear = nv04_instmem_clear;
+ engine->instmem.bind = nv04_instmem_bind;
+ engine->instmem.unbind = nv04_instmem_unbind;
+ engine->mc.init = nv04_mc_init;
+ engine->mc.takedown = nv04_mc_takedown;
+ engine->timer.init = nv04_timer_init;
+ engine->timer.read = nv04_timer_read;
+ engine->timer.takedown = nv04_timer_takedown;
+ engine->fb.init = nv04_fb_init;
+ engine->fb.takedown = nv04_fb_takedown;
+ engine->graph.init = nv04_graph_init;
+ engine->graph.takedown = nv04_graph_takedown;
+ engine->graph.create_context = nv04_graph_create_context;
+ engine->graph.destroy_context = nv04_graph_destroy_context;
+ engine->graph.load_context = nv04_graph_load_context;
+ engine->graph.save_context = nv04_graph_save_context;
+ engine->fifo.channels = 16;
+ engine->fifo.init = nouveau_fifo_init;
+ engine->fifo.takedown = nouveau_stub_takedown;
+ engine->fifo.channel_id = nv04_fifo_channel_id;
+ engine->fifo.create_context = nv04_fifo_create_context;
+ engine->fifo.destroy_context = nv04_fifo_destroy_context;
+ engine->fifo.load_context = nv04_fifo_load_context;
+ engine->fifo.save_context = nv04_fifo_save_context;
+ break;
+ case 0x10:
+ engine->instmem.init = nv04_instmem_init;
+ engine->instmem.takedown= nv04_instmem_takedown;
+ engine->instmem.populate = nv04_instmem_populate;
+ engine->instmem.clear = nv04_instmem_clear;
+ engine->instmem.bind = nv04_instmem_bind;
+ engine->instmem.unbind = nv04_instmem_unbind;
+ engine->mc.init = nv04_mc_init;
+ engine->mc.takedown = nv04_mc_takedown;
+ engine->timer.init = nv04_timer_init;
+ engine->timer.read = nv04_timer_read;
+ engine->timer.takedown = nv04_timer_takedown;
+ engine->fb.init = nv10_fb_init;
+ engine->fb.takedown = nv10_fb_takedown;
+ engine->graph.init = nv10_graph_init;
+ engine->graph.takedown = nv10_graph_takedown;
+ engine->graph.create_context = nv10_graph_create_context;
+ engine->graph.destroy_context = nv10_graph_destroy_context;
+ engine->graph.load_context = nv10_graph_load_context;
+ engine->graph.save_context = nv10_graph_save_context;
+ engine->fifo.channels = 32;
+ engine->fifo.init = nouveau_fifo_init;
+ engine->fifo.takedown = nouveau_stub_takedown;
+ engine->fifo.channel_id = nv10_fifo_channel_id;
+ engine->fifo.create_context = nv10_fifo_create_context;
+ engine->fifo.destroy_context = nv10_fifo_destroy_context;
+ engine->fifo.load_context = nv10_fifo_load_context;
+ engine->fifo.save_context = nv10_fifo_save_context;
+ break;
+ case 0x20:
+ engine->instmem.init = nv04_instmem_init;
+ engine->instmem.takedown= nv04_instmem_takedown;
+ engine->instmem.populate = nv04_instmem_populate;
+ engine->instmem.clear = nv04_instmem_clear;
+ engine->instmem.bind = nv04_instmem_bind;
+ engine->instmem.unbind = nv04_instmem_unbind;
+ engine->mc.init = nv04_mc_init;
+ engine->mc.takedown = nv04_mc_takedown;
+ engine->timer.init = nv04_timer_init;
+ engine->timer.read = nv04_timer_read;
+ engine->timer.takedown = nv04_timer_takedown;
+ engine->fb.init = nv10_fb_init;
+ engine->fb.takedown = nv10_fb_takedown;
+ engine->graph.init = nv20_graph_init;
+ engine->graph.takedown = nv20_graph_takedown;
+ engine->graph.create_context = nv20_graph_create_context;
+ engine->graph.destroy_context = nv20_graph_destroy_context;
+ engine->graph.load_context = nv20_graph_load_context;
+ engine->graph.save_context = nv20_graph_save_context;
+ engine->fifo.channels = 32;
+ engine->fifo.init = nouveau_fifo_init;
+ engine->fifo.takedown = nouveau_stub_takedown;
+ engine->fifo.channel_id = nv10_fifo_channel_id;
+ engine->fifo.create_context = nv10_fifo_create_context;
+ engine->fifo.destroy_context = nv10_fifo_destroy_context;
+ engine->fifo.load_context = nv10_fifo_load_context;
+ engine->fifo.save_context = nv10_fifo_save_context;
+ break;
+ case 0x30:
+ engine->instmem.init = nv04_instmem_init;
+ engine->instmem.takedown= nv04_instmem_takedown;
+ engine->instmem.populate = nv04_instmem_populate;
+ engine->instmem.clear = nv04_instmem_clear;
+ engine->instmem.bind = nv04_instmem_bind;
+ engine->instmem.unbind = nv04_instmem_unbind;
+ engine->mc.init = nv04_mc_init;
+ engine->mc.takedown = nv04_mc_takedown;
+ engine->timer.init = nv04_timer_init;
+ engine->timer.read = nv04_timer_read;
+ engine->timer.takedown = nv04_timer_takedown;
+ engine->fb.init = nv10_fb_init;
+ engine->fb.takedown = nv10_fb_takedown;
+ engine->graph.init = nv30_graph_init;
+ engine->graph.takedown = nv20_graph_takedown;
+ engine->graph.create_context = nv20_graph_create_context;
+ engine->graph.destroy_context = nv20_graph_destroy_context;
+ engine->graph.load_context = nv20_graph_load_context;
+ engine->graph.save_context = nv20_graph_save_context;
+ engine->fifo.channels = 32;
+ engine->fifo.init = nouveau_fifo_init;
+ engine->fifo.takedown = nouveau_stub_takedown;
+ engine->fifo.channel_id = nv10_fifo_channel_id;
+ engine->fifo.create_context = nv10_fifo_create_context;
+ engine->fifo.destroy_context = nv10_fifo_destroy_context;
+ engine->fifo.load_context = nv10_fifo_load_context;
+ engine->fifo.save_context = nv10_fifo_save_context;
+ break;
+ case 0x40:
+ case 0x60:
+ engine->instmem.init = nv04_instmem_init;
+ engine->instmem.takedown= nv04_instmem_takedown;
+ engine->instmem.populate = nv04_instmem_populate;
+ engine->instmem.clear = nv04_instmem_clear;
+ engine->instmem.bind = nv04_instmem_bind;
+ engine->instmem.unbind = nv04_instmem_unbind;
+ engine->mc.init = nv40_mc_init;
+ engine->mc.takedown = nv40_mc_takedown;
+ engine->timer.init = nv04_timer_init;
+ engine->timer.read = nv04_timer_read;
+ engine->timer.takedown = nv04_timer_takedown;
+ engine->fb.init = nv40_fb_init;
+ engine->fb.takedown = nv40_fb_takedown;
+ engine->graph.init = nv40_graph_init;
+ engine->graph.takedown = nv40_graph_takedown;
+ engine->graph.create_context = nv40_graph_create_context;
+ engine->graph.destroy_context = nv40_graph_destroy_context;
+ engine->graph.load_context = nv40_graph_load_context;
+ engine->graph.save_context = nv40_graph_save_context;
+ engine->fifo.channels = 32;
+ engine->fifo.init = nv40_fifo_init;
+ engine->fifo.takedown = nouveau_stub_takedown;
+ engine->fifo.channel_id = nv10_fifo_channel_id;
+ engine->fifo.create_context = nv40_fifo_create_context;
+ engine->fifo.destroy_context = nv40_fifo_destroy_context;
+ engine->fifo.load_context = nv40_fifo_load_context;
+ engine->fifo.save_context = nv40_fifo_save_context;
+ break;
+ case 0x50:
+ case 0x80: /* gotta love NVIDIA's consistency.. */
+ case 0x90:
+ engine->instmem.init = nv50_instmem_init;
+ engine->instmem.takedown= nv50_instmem_takedown;
+ engine->instmem.populate = nv50_instmem_populate;
+ engine->instmem.clear = nv50_instmem_clear;
+ engine->instmem.bind = nv50_instmem_bind;
+ engine->instmem.unbind = nv50_instmem_unbind;
+ engine->mc.init = nv50_mc_init;
+ engine->mc.takedown = nv50_mc_takedown;
+ engine->timer.init = nv04_timer_init;
+ engine->timer.read = nv04_timer_read;
+ engine->timer.takedown = nv04_timer_takedown;
+ engine->fb.init = nouveau_stub_init;
+ engine->fb.takedown = nouveau_stub_takedown;
+ engine->graph.init = nv50_graph_init;
+ engine->graph.takedown = nv50_graph_takedown;
+ engine->graph.create_context = nv50_graph_create_context;
+ engine->graph.destroy_context = nv50_graph_destroy_context;
+ engine->graph.load_context = nv50_graph_load_context;
+ engine->graph.save_context = nv50_graph_save_context;
+ engine->fifo.channels = 128;
+ engine->fifo.init = nv50_fifo_init;
+ engine->fifo.takedown = nv50_fifo_takedown;
+ engine->fifo.channel_id = nv50_fifo_channel_id;
+ engine->fifo.create_context = nv50_fifo_create_context;
+ engine->fifo.destroy_context = nv50_fifo_destroy_context;
+ engine->fifo.load_context = nv50_fifo_load_context;
+ engine->fifo.save_context = nv50_fifo_save_context;
+ break;
+ default:
+ DRM_ERROR("NV%02x unsupported\n", dev_priv->chipset);
+ return 1;
+ }
+
+ return 0;
+}
+
+int
+nouveau_card_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine;
+ int ret;
+
+ DRM_DEBUG("prev state = %d\n", dev_priv->init_state);
+
+ if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE)
+ return 0;
+ dev_priv->ttm = 0;
+
+ /* Determine exact chipset we're running on */
+ if (dev_priv->card_type < NV_10)
+ dev_priv->chipset = dev_priv->card_type;
+ else
+ dev_priv->chipset =
+ (NV_READ(NV03_PMC_BOOT_0) & 0x0ff00000) >> 20;
+
+ /* Initialise internal driver API hooks */
+ ret = nouveau_init_engine_ptrs(dev);
+ if (ret) return ret;
+ engine = &dev_priv->Engine;
+ dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED;
+
+ ret = nouveau_gpuobj_early_init(dev);
+ if (ret) return ret;
+
+ /* Initialise instance memory, must happen before mem_init so we
+ * know exactly how much VRAM we're able to use for "normal"
+ * purposes.
+ */
+ ret = engine->instmem.init(dev);
+ if (ret) return ret;
+
+ /* Setup the memory manager */
+ if (dev_priv->ttm) {
+ ret = nouveau_mem_init_ttm(dev);
+ if (ret) return ret;
+ } else {
+ ret = nouveau_mem_init(dev);
+ if (ret) return ret;
+ }
+
+ ret = nouveau_gpuobj_init(dev);
+ if (ret) return ret;
+
+ /* Parse BIOS tables / Run init tables? */
+
+ /* PMC */
+ ret = engine->mc.init(dev);
+ if (ret) return ret;
+
+ /* PTIMER */
+ ret = engine->timer.init(dev);
+ if (ret) return ret;
+
+ /* PFB */
+ ret = engine->fb.init(dev);
+ if (ret) return ret;
+
+ /* PGRAPH */
+ ret = engine->graph.init(dev);
+ if (ret) return ret;
+
+ /* PFIFO */
+ ret = engine->fifo.init(dev);
+ if (ret) return ret;
+
+ /* this call irq_preinstall, register irq handler and
+ * call irq_postinstall
+ */
+ ret = drm_irq_install(dev);
+ if (ret) return ret;
+
+ /* what about PVIDEO/PCRTC/PRAMDAC etc? */
+
+ ret = nouveau_dma_channel_init(dev);
+ if (ret) return ret;
+
+ dev_priv->init_state = NOUVEAU_CARD_INIT_DONE;
+ return 0;
+}
+
+static void nouveau_card_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->Engine;
+
+ DRM_DEBUG("prev state = %d\n", dev_priv->init_state);
+
+ if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) {
+ nouveau_dma_channel_takedown(dev);
+
+ engine->fifo.takedown(dev);
+ engine->graph.takedown(dev);
+ engine->fb.takedown(dev);
+ engine->timer.takedown(dev);
+ engine->mc.takedown(dev);
+
+ nouveau_sgdma_nottm_hack_takedown(dev);
+ nouveau_sgdma_takedown(dev);
+
+ nouveau_gpuobj_takedown(dev);
+ nouveau_gpuobj_del(dev, &dev_priv->vm_vram_pt);
+
+ nouveau_mem_close(dev);
+ engine->instmem.takedown(dev);
+
+ drm_irq_uninstall(dev);
+
+ nouveau_gpuobj_late_takedown(dev);
+
+ dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
+ }
+}
+
+/* here a client dies, release the stuff that was allocated for its
+ * file_priv */
+void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ nouveau_fifo_cleanup(dev, file_priv);
+ nouveau_mem_release(file_priv,dev_priv->fb_heap);
+ nouveau_mem_release(file_priv,dev_priv->agp_heap);
+ nouveau_mem_release(file_priv,dev_priv->pci_heap);
+}
+
+/* first module load, setup the mmio/fb mapping */
+int nouveau_firstopen(struct drm_device *dev)
+{
+#if defined(__powerpc__)
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct device_node *dn;
+#endif
+ int ret;
+ /* Map any PCI resources we need on the card */
+ ret = nouveau_init_card_mappings(dev);
+ if (ret) return ret;
+
+#if defined(__powerpc__)
+ /* Put the card in BE mode if it's not */
+ if (NV_READ(NV03_PMC_BOOT_1))
+ NV_WRITE(NV03_PMC_BOOT_1,0x00000001);
+
+ DRM_MEMORYBARRIER();
+#endif
+
+#if defined(__linux__) && defined(__powerpc__)
+ /* if we have an OF card, copy vbios to RAMIN */
+ dn = pci_device_to_OF_node(dev->pdev);
+ if (dn)
+ {
+ int size;
+ const uint32_t *bios = of_get_property(dn, "NVDA,BMP", &size);
+ if (bios)
+ {
+ int i;
+ for(i=0;i<size;i+=4)
+ NV_WI32(i, bios[i/4]);
+ DRM_INFO("OF bios successfully copied (%d bytes)\n",size);
+ }
+ else
+ DRM_INFO("Unable to get the OF bios\n");
+ }
+ else
+ DRM_INFO("Unable to get the OF node\n");
+#endif
+ return 0;
+}
+
+#define NV40_CHIPSET_MASK 0x00000baf
+#define NV44_CHIPSET_MASK 0x00005450
+
+int nouveau_load(struct drm_device *dev, unsigned long flags)
+{
+ struct drm_nouveau_private *dev_priv;
+ void __iomem *regs;
+ uint32_t reg0,reg1;
+ uint8_t architecture = 0;
+
+ dev_priv = drm_calloc(1, sizeof(*dev_priv), DRM_MEM_DRIVER);
+ if (!dev_priv)
+ return -ENOMEM;
+
+ dev_priv->flags = flags & NOUVEAU_FLAGS;
+ dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
+
+ DRM_DEBUG("vendor: 0x%X device: 0x%X class: 0x%X\n", dev->pci_vendor, dev->pci_device, dev->pdev->class);
+
+ /* Time to determine the card architecture */
+ regs = ioremap_nocache(pci_resource_start(dev->pdev, 0), 0x8);
+ if (!regs) {
+ DRM_ERROR("Could not ioremap to determine register\n");
+ return -ENOMEM;
+ }
+
+ reg0 = readl(regs+NV03_PMC_BOOT_0);
+ reg1 = readl(regs+NV03_PMC_BOOT_1);
+#if defined(__powerpc__)
+ if (reg1)
+ reg0=___swab32(reg0);
+#endif
+
+ /* We're dealing with >=NV10 */
+ if ((reg0 & 0x0f000000) > 0 ) {
+ /* Bit 27-20 contain the architecture in hex */
+ architecture = (reg0 & 0xff00000) >> 20;
+ /* NV04 or NV05 */
+ } else if ((reg0 & 0xff00fff0) == 0x20004000) {
+ architecture = 0x04;
+ }
+
+ iounmap(regs);
+
+ if (architecture >= 0x80) {
+ dev_priv->card_type = NV_50;
+ } else if (architecture >= 0x60) {
+ /* FIXME we need to figure out who's who for NV6x */
+ dev_priv->card_type = NV_44;
+ } else if (architecture >= 0x50) {
+ dev_priv->card_type = NV_50;
+ } else if (architecture >= 0x40) {
+ uint8_t subarch = architecture & 0xf;
+ /* Selection criteria borrowed from NV40EXA */
+ if (NV40_CHIPSET_MASK & (1 << subarch)) {
+ dev_priv->card_type = NV_40;
+ } else if (NV44_CHIPSET_MASK & (1 << subarch)) {
+ dev_priv->card_type = NV_44;
+ } else {
+ dev_priv->card_type = NV_UNKNOWN;
+ }
+ } else if (architecture >= 0x30) {
+ dev_priv->card_type = NV_30;
+ } else if (architecture >= 0x20) {
+ dev_priv->card_type = NV_20;
+ } else if (architecture >= 0x17) {
+ dev_priv->card_type = NV_17;
+ } else if (architecture >= 0x11) {
+ dev_priv->card_type = NV_11;
+ } else if (architecture >= 0x10) {
+ dev_priv->card_type = NV_10;
+ } else if (architecture >= 0x04) {
+ dev_priv->card_type = NV_04;
+ } else {
+ dev_priv->card_type = NV_UNKNOWN;
+ }
+
+ DRM_INFO("Detected an NV%d generation card (0x%08x)\n", dev_priv->card_type,reg0);
+
+ if (dev_priv->card_type == NV_UNKNOWN) {
+ return -EINVAL;
+ }
+
+ /* Special flags */
+ if (dev->pci_device == 0x01a0) {
+ dev_priv->flags |= NV_NFORCE;
+ } else if (dev->pci_device == 0x01f0) {
+ dev_priv->flags |= NV_NFORCE2;
+ }
+
+ dev->dev_private = (void *)dev_priv;
+
+ return 0;
+}
+
+void nouveau_lastclose(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /* In the case of an error dev_priv may not be be allocated yet */
+ if (dev_priv && dev_priv->card_type) {
+ nouveau_card_takedown(dev);
+
+ if(dev_priv->fb_mtrr>0)
+ {
+ drm_mtrr_del(dev_priv->fb_mtrr, drm_get_resource_start(dev, 1),nouveau_mem_fb_amount(dev), DRM_MTRR_WC);
+ dev_priv->fb_mtrr=0;
+ }
+ }
+}
+
+int nouveau_unload(struct drm_device *dev)
+{
+ drm_free(dev->dev_private, sizeof(*dev->dev_private), DRM_MEM_DRIVER);
+ dev->dev_private = NULL;
+ return 0;
+}
+
+int
+nouveau_ioctl_card_init(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return nouveau_card_init(dev);
+}
+
+int nouveau_ioctl_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_getparam *getparam = data;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+ switch (getparam->param) {
+ case NOUVEAU_GETPARAM_CHIPSET_ID:
+ getparam->value = dev_priv->chipset;
+ break;
+ case NOUVEAU_GETPARAM_PCI_VENDOR:
+ getparam->value=dev->pci_vendor;
+ break;
+ case NOUVEAU_GETPARAM_PCI_DEVICE:
+ getparam->value=dev->pci_device;
+ break;
+ case NOUVEAU_GETPARAM_BUS_TYPE:
+ if (drm_device_is_agp(dev))
+ getparam->value=NV_AGP;
+ else if (drm_device_is_pcie(dev))
+ getparam->value=NV_PCIE;
+ else
+ getparam->value=NV_PCI;
+ break;
+ case NOUVEAU_GETPARAM_FB_PHYSICAL:
+ getparam->value=dev_priv->fb_phys;
+ break;
+ case NOUVEAU_GETPARAM_AGP_PHYSICAL:
+ getparam->value=dev_priv->gart_info.aper_base;
+ break;
+ case NOUVEAU_GETPARAM_PCI_PHYSICAL:
+ if ( dev -> sg )
+ getparam->value=(unsigned long)dev->sg->virtual;
+ else
+ {
+ DRM_ERROR("Requested PCIGART address, while no PCIGART was created\n");
+ return -EINVAL;
+ }
+ break;
+ case NOUVEAU_GETPARAM_FB_SIZE:
+ getparam->value=dev_priv->fb_available_size;
+ break;
+ case NOUVEAU_GETPARAM_AGP_SIZE:
+ getparam->value=dev_priv->gart_info.aper_size;
+ break;
+ default:
+ DRM_ERROR("unknown parameter %lld\n", getparam->param);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int nouveau_ioctl_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_setparam *setparam = data;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+ switch (setparam->param) {
+ case NOUVEAU_SETPARAM_CMDBUF_LOCATION:
+ switch (setparam->value) {
+ case NOUVEAU_MEM_AGP:
+ case NOUVEAU_MEM_FB:
+ case NOUVEAU_MEM_PCI:
+ case NOUVEAU_MEM_AGP | NOUVEAU_MEM_PCI_ACCEPTABLE:
+ break;
+ default:
+ DRM_ERROR("invalid CMDBUF_LOCATION value=%lld\n",
+ setparam->value);
+ return -EINVAL;
+ }
+ dev_priv->config.cmdbuf.location = setparam->value;
+ break;
+ case NOUVEAU_SETPARAM_CMDBUF_SIZE:
+ dev_priv->config.cmdbuf.size = setparam->value;
+ break;
+ default:
+ DRM_ERROR("unknown parameter %lld\n", setparam->param);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* waits for idle */
+void nouveau_wait_for_idle(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv=dev->dev_private;
+ switch(dev_priv->card_type) {
+ case NV_50:
+ break;
+ default: {
+ /* This stuff is more or less a copy of what is seen
+ * in nv28 kmmio dump.
+ */
+ uint64_t started = dev_priv->Engine.timer.read(dev);
+ uint64_t stopped = started;
+ uint32_t status;
+ do {
+ uint32_t pmc_e = NV_READ(NV03_PMC_ENABLE);
+ (void)pmc_e;
+ status = NV_READ(NV04_PGRAPH_STATUS);
+ if (!status)
+ break;
+ stopped = dev_priv->Engine.timer.read(dev);
+ /* It'll never wrap anyway... */
+ } while (stopped - started < 1000000000ULL);
+ if (status)
+ DRM_ERROR("timed out with status 0x%08x\n",
+ status);
+ }
+ }
+}
+
+static int nouveau_suspend(struct drm_device *dev)
+{
+ struct mem_block *p;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_suspend_resume *susres = &dev_priv->susres;
+ struct nouveau_engine *engine = &dev_priv->Engine;
+ int i;
+
+ drm_free(susres->ramin_copy, susres->ramin_size, DRM_MEM_DRIVER);
+ susres->ramin_size = 0;
+ list_for_each(p, dev_priv->ramin_heap)
+ if (p->file_priv && (p->start + p->size) > susres->ramin_size)
+ susres->ramin_size = p->start + p->size;
+ if (!(susres->ramin_copy = drm_alloc(susres->ramin_size, DRM_MEM_DRIVER))) {
+ DRM_ERROR("Couldn't alloc RAMIN backing for suspend\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < engine->fifo.channels; i++) {
+ uint64_t t_start = engine->timer.read(dev);
+
+ if (dev_priv->fifos[i] == NULL)
+ continue;
+
+ /* Give the channel a chance to idle, wait 2s (hopefully) */
+ while (!nouveau_channel_idle(dev_priv->fifos[i]))
+ if (engine->timer.read(dev) - t_start > 2000000000ULL) {
+ DRM_ERROR("Failed to idle channel %d before"
+ "suspend.", dev_priv->fifos[i]->id);
+ return -EBUSY;
+ }
+ }
+ nouveau_wait_for_idle(dev);
+
+ NV_WRITE(NV04_PGRAPH_FIFO, 0);
+ /* disable the fifo caches */
+ NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH,
+ NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) & ~1);
+ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000);
+ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
+
+ susres->fifo_mode = NV_READ(NV04_PFIFO_MODE);
+
+ if (dev_priv->card_type >= NV_10) {
+ susres->graph_state = NV_READ(NV10_PGRAPH_STATE);
+ susres->graph_ctx_control = NV_READ(NV10_PGRAPH_CTX_CONTROL);
+ } else {
+ susres->graph_state = NV_READ(NV04_PGRAPH_STATE);
+ susres->graph_ctx_control = NV_READ(NV04_PGRAPH_CTX_CONTROL);
+ }
+
+ engine->fifo.save_context(dev_priv->fifos[engine->fifo.channel_id(dev)]);
+ engine->graph.save_context(dev_priv->fifos[engine->fifo.channel_id(dev)]);
+ nouveau_wait_for_idle(dev);
+
+ for (i = 0; i < susres->ramin_size / 4; i++)
+ susres->ramin_copy[i] = NV_RI32(i << 2);
+
+ /* reenable the fifo caches */
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH,
+ NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
+ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001);
+ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001);
+ NV_WRITE(NV03_PFIFO_CACHES, 0x00000001);
+ NV_WRITE(NV04_PGRAPH_FIFO, 1);
+
+ return 0;
+}
+
+static int nouveau_resume(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_suspend_resume *susres = &dev_priv->susres;
+ struct nouveau_engine *engine = &dev_priv->Engine;
+ int i;
+
+ if (!susres->ramin_copy)
+ return -EINVAL;
+
+ DRM_DEBUG("Doing resume\n");
+
+ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+ struct drm_agp_info info;
+ struct drm_agp_mode mode;
+
+ /* agp bridge drivers don't re-enable agp on resume. lame. */
+ if ((i = drm_agp_info(dev, &info))) {
+ DRM_ERROR("Unable to get AGP info: %d\n", i);
+ return i;
+ }
+ mode.mode = info.mode;
+ if ((i = drm_agp_enable(dev, mode))) {
+ DRM_ERROR("Unable to enable AGP: %d\n", i);
+ return i;
+ }
+ }
+
+ for (i = 0; i < susres->ramin_size / 4; i++)
+ NV_WI32(i << 2, susres->ramin_copy[i]);
+
+ engine->mc.init(dev);
+ engine->timer.init(dev);
+ engine->fb.init(dev);
+ engine->graph.init(dev);
+ engine->fifo.init(dev);
+
+ NV_WRITE(NV04_PGRAPH_FIFO, 0);
+ /* disable the fifo caches */
+ NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH,
+ NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) & ~1);
+ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000);
+ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
+
+ /* PMC power cycling PFIFO in init clobbers some of the stuff stored in
+ * PRAMIN (such as NV04_PFIFO_CACHE1_DMA_INSTANCE). this is unhelpful
+ */
+ for (i = 0; i < susres->ramin_size / 4; i++)
+ NV_WI32(i << 2, susres->ramin_copy[i]);
+
+ engine->fifo.load_context(dev_priv->fifos[0]);
+ NV_WRITE(NV04_PFIFO_MODE, susres->fifo_mode);
+
+ engine->graph.load_context(dev_priv->fifos[0]);
+ nouveau_wait_for_idle(dev);
+
+ if (dev_priv->card_type >= NV_10) {
+ NV_WRITE(NV10_PGRAPH_STATE, susres->graph_state);
+ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, susres->graph_ctx_control);
+ } else {
+ NV_WRITE(NV04_PGRAPH_STATE, susres->graph_state);
+ NV_WRITE(NV04_PGRAPH_CTX_CONTROL, susres->graph_ctx_control);
+ }
+
+ /* reenable the fifo caches */
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH,
+ NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
+ NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001);
+ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001);
+ NV_WRITE(NV03_PFIFO_CACHES, 0x00000001);
+ NV_WRITE(NV04_PGRAPH_FIFO, 0x1);
+
+ if (dev->irq_enabled)
+ nouveau_irq_postinstall(dev);
+
+ drm_free(susres->ramin_copy, susres->ramin_size, DRM_MEM_DRIVER);
+ susres->ramin_copy = NULL;
+ susres->ramin_size = 0;
+
+ return 0;
+}
+
+int nouveau_ioctl_suspend(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+ return nouveau_suspend(dev);
+}
+
+int nouveau_ioctl_resume(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+
+ return nouveau_resume(dev);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_swmthd.c b/drivers/gpu/drm/nouveau/nouveau_swmthd.c
new file mode 100644
index 0000000..c3666bf
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_swmthd.c
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2007 Arthur Huillet.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ * Arthur Huillet <arthur.huillet AT free DOT fr>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_reg.h"
+
+/*TODO: add a "card_type" attribute*/
+typedef struct{
+ uint32_t oclass; /* object class for this software method */
+ uint32_t mthd; /* method number */
+ void (*method_code)(struct drm_device *dev, uint32_t oclass, uint32_t mthd); /* pointer to the function that does the work */
+ } nouveau_software_method_t;
+
+
+ /* This function handles the NV04 setcontext software methods.
+One function for all because they are very similar.*/
+static void nouveau_NV04_setcontext_sw_method(struct drm_device *dev, uint32_t oclass, uint32_t mthd) {
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t inst_loc = NV_READ(NV04_PGRAPH_CTX_SWITCH4) & 0xFFFF;
+ uint32_t value_to_set = 0, bit_to_set = 0;
+
+ switch ( oclass ) {
+ case 0x4a:
+ switch ( mthd ) {
+ case 0x188 :
+ case 0x18c :
+ bit_to_set = 0;
+ break;
+ case 0x198 :
+ bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/
+ break;
+ case 0x2fc :
+ bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; /*PATCH_CONFIG = NV04_PGRAPH_TRAPPED_DATA*/
+ break;
+ default : ;
+ };
+ break;
+ case 0x5c:
+ switch ( mthd ) {
+ case 0x184:
+ bit_to_set = 1 << 13; /*USER_CLIP_ENABLE*/
+ break;
+ case 0x188:
+ case 0x18c:
+ bit_to_set = 0;
+ break;
+ case 0x198:
+ bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/
+ break;
+ case 0x2fc :
+ bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; /*PATCH_CONFIG = NV04_PGRAPH_TRAPPED_DATA*/
+ break;
+ };
+ break;
+ case 0x5f:
+ switch ( mthd ) {
+ case 0x184 :
+ bit_to_set = 1 << 12; /*CHROMA_KEY_ENABLE*/
+ break;
+ case 0x188 :
+ bit_to_set = 1 << 13; /*USER_CLIP_ENABLE*/
+ break;
+ case 0x18c :
+ case 0x190 :
+ bit_to_set = 0;
+ break;
+ case 0x19c :
+ bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/
+ break;
+ case 0x2fc :
+ bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; /*PATCH_CONFIG = NV04_PGRAPH_TRAPPED_DATA*/
+ break;
+ };
+ break;
+ case 0x61:
+ switch ( mthd ) {
+ case 0x188 :
+ bit_to_set = 1 << 13; /*USER_CLIP_ENABLE*/
+ break;
+ case 0x18c :
+ case 0x190 :
+ bit_to_set = 0;
+ break;
+ case 0x19c :
+ bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/
+ break;
+ case 0x2fc :
+ bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; /*PATCH_CONFIG = NV04_PGRAPH_TRAPPED_DATA*/
+ break;
+ };
+ break;
+ case 0x77:
+ switch ( mthd ) {
+ case 0x198 :
+ bit_to_set = 1 << 24; /*PATCH_STATUS_VALID*/
+ break;
+ case 0x304 :
+ bit_to_set = NV_READ(NV04_PGRAPH_TRAPPED_DATA) << 15; //PATCH_CONFIG
+ break;
+ };
+ break;
+ default :;
+ };
+
+ value_to_set = (NV_READ(0x00700000 | inst_loc << 4))| bit_to_set;
+
+ /*RAMIN*/
+ nouveau_wait_for_idle(dev);
+ NV_WRITE(0x00700000 | inst_loc << 4, value_to_set);
+
+ /*DRM_DEBUG("CTX_SWITCH1 value is %#x\n", NV_READ(NV04_PGRAPH_CTX_SWITCH1));*/
+ NV_WRITE(NV04_PGRAPH_CTX_SWITCH1, value_to_set);
+
+ /*DRM_DEBUG("CTX_CACHE1 + xxx value is %#x\n", NV_READ(NV04_PGRAPH_CTX_CACHE1 + (((NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7) << 2)));*/
+ NV_WRITE(NV04_PGRAPH_CTX_CACHE1 + (((NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7) << 2), value_to_set);
+}
+
+ nouveau_software_method_t nouveau_sw_methods[] = {
+ /*NV04 context software methods*/
+ { 0x4a, 0x188, nouveau_NV04_setcontext_sw_method },
+ { 0x4a, 0x18c, nouveau_NV04_setcontext_sw_method },
+ { 0x4a, 0x198, nouveau_NV04_setcontext_sw_method },
+ { 0x4a, 0x2fc, nouveau_NV04_setcontext_sw_method },
+ { 0x5c, 0x184, nouveau_NV04_setcontext_sw_method },
+ { 0x5c, 0x188, nouveau_NV04_setcontext_sw_method },
+ { 0x5c, 0x18c, nouveau_NV04_setcontext_sw_method },
+ { 0x5c, 0x198, nouveau_NV04_setcontext_sw_method },
+ { 0x5c, 0x2fc, nouveau_NV04_setcontext_sw_method },
+ { 0x5f, 0x184, nouveau_NV04_setcontext_sw_method },
+ { 0x5f, 0x188, nouveau_NV04_setcontext_sw_method },
+ { 0x5f, 0x18c, nouveau_NV04_setcontext_sw_method },
+ { 0x5f, 0x190, nouveau_NV04_setcontext_sw_method },
+ { 0x5f, 0x19c, nouveau_NV04_setcontext_sw_method },
+ { 0x5f, 0x2fc, nouveau_NV04_setcontext_sw_method },
+ { 0x61, 0x188, nouveau_NV04_setcontext_sw_method },
+ { 0x61, 0x18c, nouveau_NV04_setcontext_sw_method },
+ { 0x61, 0x190, nouveau_NV04_setcontext_sw_method },
+ { 0x61, 0x19c, nouveau_NV04_setcontext_sw_method },
+ { 0x61, 0x2fc, nouveau_NV04_setcontext_sw_method },
+ { 0x77, 0x198, nouveau_NV04_setcontext_sw_method },
+ { 0x77, 0x304, nouveau_NV04_setcontext_sw_method },
+ /*terminator*/
+ { 0x0, 0x0, NULL, },
+ };
+
+ int nouveau_sw_method_execute(struct drm_device *dev, uint32_t oclass, uint32_t method) {
+ int i = 0;
+ while ( nouveau_sw_methods[ i ] . method_code != NULL )
+ {
+ if ( nouveau_sw_methods[ i ] . oclass == oclass && nouveau_sw_methods[ i ] . mthd == method )
+ {
+ nouveau_sw_methods[ i ] . method_code(dev, oclass, method);
+ return 0;
+ }
+ i ++;
+ }
+
+ return 1;
+ }
diff --git a/drivers/gpu/drm/nouveau/nouveau_swmthd.h b/drivers/gpu/drm/nouveau/nouveau_swmthd.h
new file mode 100644
index 0000000..5b9409f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_swmthd.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2007 Arthur Huillet.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+/*
+ * Authors:
+ * Arthur Huillet <arthur.huillet AT free DOT fr>
+ */
+
+int nouveau_sw_method_execute(struct drm_device *dev, uint32_t oclass, uint32_t method); /* execute the given software method, returns 0 on success */
diff --git a/drivers/gpu/drm/nouveau/nv04_fb.c b/drivers/gpu/drm/nouveau/nv04_fb.c
new file mode 100644
index 0000000..58a9247
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_fb.c
@@ -0,0 +1,23 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+int
+nv04_fb_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows
+ * nvidia reading PFB_CFG_0, then writing back its original value.
+ * (which was 0x701114 in this case)
+ */
+ NV_WRITE(NV04_PFB_CFG0, 0x1114);
+
+ return 0;
+}
+
+void
+nv04_fb_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
new file mode 100644
index 0000000..88186fe
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+#define RAMFC_WR(offset,val) INSTANCE_WR(chan->ramfc->gpuobj, \
+ NV04_RAMFC_##offset/4, (val))
+#define RAMFC_RD(offset) INSTANCE_RD(chan->ramfc->gpuobj, \
+ NV04_RAMFC_##offset/4)
+#define NV04_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV04_RAMFC__SIZE))
+#define NV04_RAMFC__SIZE 32
+
+int
+nv04_fifo_channel_id(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ return (NV_READ(NV03_PFIFO_CACHE1_PUSH1) &
+ NV03_PFIFO_CACHE1_PUSH1_CHID_MASK);
+}
+
+int
+nv04_fifo_create_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if ((ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0,
+ NV04_RAMFC__SIZE,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE,
+ NULL, &chan->ramfc)))
+ return ret;
+
+ /* Setup initial state */
+ RAMFC_WR(DMA_PUT, chan->pushbuf_base);
+ RAMFC_WR(DMA_GET, chan->pushbuf_base);
+ RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4);
+ RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ 0));
+
+ /* enable the fifo dma operation */
+ NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE) | (1<<chan->id));
+ return 0;
+}
+
+void
+nv04_fifo_destroy_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<<chan->id));
+
+ nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+}
+
+int
+nv04_fifo_load_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
+
+ NV_WRITE(NV03_PFIFO_CACHE1_PUSH1,
+ NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
+
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, RAMFC_RD(DMA_GET));
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT, RAMFC_RD(DMA_PUT));
+
+ tmp = RAMFC_RD(DMA_INSTANCE);
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16);
+
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, RAMFC_RD(DMA_STATE));
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH, RAMFC_RD(DMA_FETCH));
+ NV_WRITE(NV04_PFIFO_CACHE1_ENGINE, RAMFC_RD(ENGINE));
+ NV_WRITE(NV04_PFIFO_CACHE1_PULL1, RAMFC_RD(PULL1_ENGINE));
+
+ /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
+ tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31);
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp);
+
+ return 0;
+}
+
+int
+nv04_fifo_save_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
+
+ RAMFC_WR(DMA_PUT, NV_READ(NV04_PFIFO_CACHE1_DMA_PUT));
+ RAMFC_WR(DMA_GET, NV_READ(NV04_PFIFO_CACHE1_DMA_GET));
+
+ tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16;
+ tmp |= NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE);
+ RAMFC_WR(DMA_INSTANCE, tmp);
+
+ RAMFC_WR(DMA_STATE, NV_READ(NV04_PFIFO_CACHE1_DMA_STATE));
+ RAMFC_WR(DMA_FETCH, NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH));
+ RAMFC_WR(ENGINE, NV_READ(NV04_PFIFO_CACHE1_ENGINE));
+ RAMFC_WR(PULL1_ENGINE, NV_READ(NV04_PFIFO_CACHE1_PULL1));
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
new file mode 100644
index 0000000..6caae25
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -0,0 +1,516 @@
+/*
+ * Copyright 2007 Stephane Marchesin
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drm.h"
+#include "nouveau_drv.h"
+
+static uint32_t nv04_graph_ctx_regs [] = {
+ NV04_PGRAPH_CTX_SWITCH1,
+ NV04_PGRAPH_CTX_SWITCH2,
+ NV04_PGRAPH_CTX_SWITCH3,
+ NV04_PGRAPH_CTX_SWITCH4,
+ NV04_PGRAPH_CTX_CACHE1,
+ NV04_PGRAPH_CTX_CACHE2,
+ NV04_PGRAPH_CTX_CACHE3,
+ NV04_PGRAPH_CTX_CACHE4,
+ 0x00400184,
+ 0x004001a4,
+ 0x004001c4,
+ 0x004001e4,
+ 0x00400188,
+ 0x004001a8,
+ 0x004001c8,
+ 0x004001e8,
+ 0x0040018c,
+ 0x004001ac,
+ 0x004001cc,
+ 0x004001ec,
+ 0x00400190,
+ 0x004001b0,
+ 0x004001d0,
+ 0x004001f0,
+ 0x00400194,
+ 0x004001b4,
+ 0x004001d4,
+ 0x004001f4,
+ 0x00400198,
+ 0x004001b8,
+ 0x004001d8,
+ 0x004001f8,
+ 0x0040019c,
+ 0x004001bc,
+ 0x004001dc,
+ 0x004001fc,
+ 0x00400174,
+ NV04_PGRAPH_DMA_START_0,
+ NV04_PGRAPH_DMA_START_1,
+ NV04_PGRAPH_DMA_LENGTH,
+ NV04_PGRAPH_DMA_MISC,
+ NV04_PGRAPH_DMA_PITCH,
+ NV04_PGRAPH_BOFFSET0,
+ NV04_PGRAPH_BBASE0,
+ NV04_PGRAPH_BLIMIT0,
+ NV04_PGRAPH_BOFFSET1,
+ NV04_PGRAPH_BBASE1,
+ NV04_PGRAPH_BLIMIT1,
+ NV04_PGRAPH_BOFFSET2,
+ NV04_PGRAPH_BBASE2,
+ NV04_PGRAPH_BLIMIT2,
+ NV04_PGRAPH_BOFFSET3,
+ NV04_PGRAPH_BBASE3,
+ NV04_PGRAPH_BLIMIT3,
+ NV04_PGRAPH_BOFFSET4,
+ NV04_PGRAPH_BBASE4,
+ NV04_PGRAPH_BLIMIT4,
+ NV04_PGRAPH_BOFFSET5,
+ NV04_PGRAPH_BBASE5,
+ NV04_PGRAPH_BLIMIT5,
+ NV04_PGRAPH_BPITCH0,
+ NV04_PGRAPH_BPITCH1,
+ NV04_PGRAPH_BPITCH2,
+ NV04_PGRAPH_BPITCH3,
+ NV04_PGRAPH_BPITCH4,
+ NV04_PGRAPH_SURFACE,
+ NV04_PGRAPH_STATE,
+ NV04_PGRAPH_BSWIZZLE2,
+ NV04_PGRAPH_BSWIZZLE5,
+ NV04_PGRAPH_BPIXEL,
+ NV04_PGRAPH_NOTIFY,
+ NV04_PGRAPH_PATT_COLOR0,
+ NV04_PGRAPH_PATT_COLOR1,
+ NV04_PGRAPH_PATT_COLORRAM+0x00,
+ NV04_PGRAPH_PATT_COLORRAM+0x01,
+ NV04_PGRAPH_PATT_COLORRAM+0x02,
+ NV04_PGRAPH_PATT_COLORRAM+0x03,
+ NV04_PGRAPH_PATT_COLORRAM+0x04,
+ NV04_PGRAPH_PATT_COLORRAM+0x05,
+ NV04_PGRAPH_PATT_COLORRAM+0x06,
+ NV04_PGRAPH_PATT_COLORRAM+0x07,
+ NV04_PGRAPH_PATT_COLORRAM+0x08,
+ NV04_PGRAPH_PATT_COLORRAM+0x09,
+ NV04_PGRAPH_PATT_COLORRAM+0x0A,
+ NV04_PGRAPH_PATT_COLORRAM+0x0B,
+ NV04_PGRAPH_PATT_COLORRAM+0x0C,
+ NV04_PGRAPH_PATT_COLORRAM+0x0D,
+ NV04_PGRAPH_PATT_COLORRAM+0x0E,
+ NV04_PGRAPH_PATT_COLORRAM+0x0F,
+ NV04_PGRAPH_PATT_COLORRAM+0x10,
+ NV04_PGRAPH_PATT_COLORRAM+0x11,
+ NV04_PGRAPH_PATT_COLORRAM+0x12,
+ NV04_PGRAPH_PATT_COLORRAM+0x13,
+ NV04_PGRAPH_PATT_COLORRAM+0x14,
+ NV04_PGRAPH_PATT_COLORRAM+0x15,
+ NV04_PGRAPH_PATT_COLORRAM+0x16,
+ NV04_PGRAPH_PATT_COLORRAM+0x17,
+ NV04_PGRAPH_PATT_COLORRAM+0x18,
+ NV04_PGRAPH_PATT_COLORRAM+0x19,
+ NV04_PGRAPH_PATT_COLORRAM+0x1A,
+ NV04_PGRAPH_PATT_COLORRAM+0x1B,
+ NV04_PGRAPH_PATT_COLORRAM+0x1C,
+ NV04_PGRAPH_PATT_COLORRAM+0x1D,
+ NV04_PGRAPH_PATT_COLORRAM+0x1E,
+ NV04_PGRAPH_PATT_COLORRAM+0x1F,
+ NV04_PGRAPH_PATT_COLORRAM+0x20,
+ NV04_PGRAPH_PATT_COLORRAM+0x21,
+ NV04_PGRAPH_PATT_COLORRAM+0x22,
+ NV04_PGRAPH_PATT_COLORRAM+0x23,
+ NV04_PGRAPH_PATT_COLORRAM+0x24,
+ NV04_PGRAPH_PATT_COLORRAM+0x25,
+ NV04_PGRAPH_PATT_COLORRAM+0x26,
+ NV04_PGRAPH_PATT_COLORRAM+0x27,
+ NV04_PGRAPH_PATT_COLORRAM+0x28,
+ NV04_PGRAPH_PATT_COLORRAM+0x29,
+ NV04_PGRAPH_PATT_COLORRAM+0x2A,
+ NV04_PGRAPH_PATT_COLORRAM+0x2B,
+ NV04_PGRAPH_PATT_COLORRAM+0x2C,
+ NV04_PGRAPH_PATT_COLORRAM+0x2D,
+ NV04_PGRAPH_PATT_COLORRAM+0x2E,
+ NV04_PGRAPH_PATT_COLORRAM+0x2F,
+ NV04_PGRAPH_PATT_COLORRAM+0x30,
+ NV04_PGRAPH_PATT_COLORRAM+0x31,
+ NV04_PGRAPH_PATT_COLORRAM+0x32,
+ NV04_PGRAPH_PATT_COLORRAM+0x33,
+ NV04_PGRAPH_PATT_COLORRAM+0x34,
+ NV04_PGRAPH_PATT_COLORRAM+0x35,
+ NV04_PGRAPH_PATT_COLORRAM+0x36,
+ NV04_PGRAPH_PATT_COLORRAM+0x37,
+ NV04_PGRAPH_PATT_COLORRAM+0x38,
+ NV04_PGRAPH_PATT_COLORRAM+0x39,
+ NV04_PGRAPH_PATT_COLORRAM+0x3A,
+ NV04_PGRAPH_PATT_COLORRAM+0x3B,
+ NV04_PGRAPH_PATT_COLORRAM+0x3C,
+ NV04_PGRAPH_PATT_COLORRAM+0x3D,
+ NV04_PGRAPH_PATT_COLORRAM+0x3E,
+ NV04_PGRAPH_PATT_COLORRAM+0x3F,
+ NV04_PGRAPH_PATTERN,
+ 0x0040080c,
+ NV04_PGRAPH_PATTERN_SHAPE,
+ 0x00400600,
+ NV04_PGRAPH_ROP3,
+ NV04_PGRAPH_CHROMA,
+ NV04_PGRAPH_BETA_AND,
+ NV04_PGRAPH_BETA_PREMULT,
+ NV04_PGRAPH_CONTROL0,
+ NV04_PGRAPH_CONTROL1,
+ NV04_PGRAPH_CONTROL2,
+ NV04_PGRAPH_BLEND,
+ NV04_PGRAPH_STORED_FMT,
+ NV04_PGRAPH_SOURCE_COLOR,
+ 0x00400560,
+ 0x00400568,
+ 0x00400564,
+ 0x0040056c,
+ 0x00400400,
+ 0x00400480,
+ 0x00400404,
+ 0x00400484,
+ 0x00400408,
+ 0x00400488,
+ 0x0040040c,
+ 0x0040048c,
+ 0x00400410,
+ 0x00400490,
+ 0x00400414,
+ 0x00400494,
+ 0x00400418,
+ 0x00400498,
+ 0x0040041c,
+ 0x0040049c,
+ 0x00400420,
+ 0x004004a0,
+ 0x00400424,
+ 0x004004a4,
+ 0x00400428,
+ 0x004004a8,
+ 0x0040042c,
+ 0x004004ac,
+ 0x00400430,
+ 0x004004b0,
+ 0x00400434,
+ 0x004004b4,
+ 0x00400438,
+ 0x004004b8,
+ 0x0040043c,
+ 0x004004bc,
+ 0x00400440,
+ 0x004004c0,
+ 0x00400444,
+ 0x004004c4,
+ 0x00400448,
+ 0x004004c8,
+ 0x0040044c,
+ 0x004004cc,
+ 0x00400450,
+ 0x004004d0,
+ 0x00400454,
+ 0x004004d4,
+ 0x00400458,
+ 0x004004d8,
+ 0x0040045c,
+ 0x004004dc,
+ 0x00400460,
+ 0x004004e0,
+ 0x00400464,
+ 0x004004e4,
+ 0x00400468,
+ 0x004004e8,
+ 0x0040046c,
+ 0x004004ec,
+ 0x00400470,
+ 0x004004f0,
+ 0x00400474,
+ 0x004004f4,
+ 0x00400478,
+ 0x004004f8,
+ 0x0040047c,
+ 0x004004fc,
+ 0x0040053c,
+ 0x00400544,
+ 0x00400540,
+ 0x00400548,
+ 0x00400560,
+ 0x00400568,
+ 0x00400564,
+ 0x0040056c,
+ 0x00400534,
+ 0x00400538,
+ 0x00400514,
+ 0x00400518,
+ 0x0040051c,
+ 0x00400520,
+ 0x00400524,
+ 0x00400528,
+ 0x0040052c,
+ 0x00400530,
+ 0x00400d00,
+ 0x00400d40,
+ 0x00400d80,
+ 0x00400d04,
+ 0x00400d44,
+ 0x00400d84,
+ 0x00400d08,
+ 0x00400d48,
+ 0x00400d88,
+ 0x00400d0c,
+ 0x00400d4c,
+ 0x00400d8c,
+ 0x00400d10,
+ 0x00400d50,
+ 0x00400d90,
+ 0x00400d14,
+ 0x00400d54,
+ 0x00400d94,
+ 0x00400d18,
+ 0x00400d58,
+ 0x00400d98,
+ 0x00400d1c,
+ 0x00400d5c,
+ 0x00400d9c,
+ 0x00400d20,
+ 0x00400d60,
+ 0x00400da0,
+ 0x00400d24,
+ 0x00400d64,
+ 0x00400da4,
+ 0x00400d28,
+ 0x00400d68,
+ 0x00400da8,
+ 0x00400d2c,
+ 0x00400d6c,
+ 0x00400dac,
+ 0x00400d30,
+ 0x00400d70,
+ 0x00400db0,
+ 0x00400d34,
+ 0x00400d74,
+ 0x00400db4,
+ 0x00400d38,
+ 0x00400d78,
+ 0x00400db8,
+ 0x00400d3c,
+ 0x00400d7c,
+ 0x00400dbc,
+ 0x00400590,
+ 0x00400594,
+ 0x00400598,
+ 0x0040059c,
+ 0x004005a8,
+ 0x004005ac,
+ 0x004005b0,
+ 0x004005b4,
+ 0x004005c0,
+ 0x004005c4,
+ 0x004005c8,
+ 0x004005cc,
+ 0x004005d0,
+ 0x004005d4,
+ 0x004005d8,
+ 0x004005dc,
+ 0x004005e0,
+ NV04_PGRAPH_PASSTHRU_0,
+ NV04_PGRAPH_PASSTHRU_1,
+ NV04_PGRAPH_PASSTHRU_2,
+ NV04_PGRAPH_DVD_COLORFMT,
+ NV04_PGRAPH_SCALED_FORMAT,
+ NV04_PGRAPH_MISC24_0,
+ NV04_PGRAPH_MISC24_1,
+ NV04_PGRAPH_MISC24_2,
+ 0x00400500,
+ 0x00400504,
+ NV04_PGRAPH_VALID1,
+ NV04_PGRAPH_VALID2
+
+
+};
+
+struct graph_state {
+ int nv04[sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0])];
+};
+
+void nouveau_nv04_context_switch(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->Engine;
+ struct nouveau_channel *next, *last;
+ int chid;
+
+ if (!dev) {
+ DRM_DEBUG("Invalid drm_device\n");
+ return;
+ }
+ dev_priv = dev->dev_private;
+ if (!dev_priv) {
+ DRM_DEBUG("Invalid drm_nouveau_private\n");
+ return;
+ }
+ if (!dev_priv->fifos) {
+ DRM_DEBUG("Invalid drm_nouveau_private->fifos\n");
+ return;
+ }
+
+ chid = engine->fifo.channel_id(dev);
+ next = dev_priv->fifos[chid];
+
+ if (!next) {
+ DRM_DEBUG("Invalid next channel\n");
+ return;
+ }
+
+ chid = (NV_READ(NV04_PGRAPH_CTX_USER) >> 24) & (engine->fifo.channels - 1);
+ last = dev_priv->fifos[chid];
+
+ if (!last) {
+ DRM_DEBUG("WARNING: Invalid last channel, switch to %x\n",
+ next->id);
+ } else {
+ DRM_INFO("NV: PGRAPH context switch interrupt channel %x -> %x\n",
+ last->id, next->id);
+ }
+
+/* NV_WRITE(NV03_PFIFO_CACHES, 0x0);
+ NV_WRITE(NV04_PFIFO_CACHE0_PULL0, 0x0);
+ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x0);*/
+ NV_WRITE(NV04_PGRAPH_FIFO,0x0);
+
+ if (last)
+ nv04_graph_save_context(last);
+
+ nouveau_wait_for_idle(dev);
+
+ NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10000000);
+ NV_WRITE(NV04_PGRAPH_CTX_USER, (NV_READ(NV04_PGRAPH_CTX_USER) & 0xffffff) | (0x0f << 24));
+
+ nouveau_wait_for_idle(dev);
+
+ nv04_graph_load_context(next);
+
+ NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10010100);
+ NV_WRITE(NV04_PGRAPH_CTX_USER, next->id << 24);
+ NV_WRITE(NV04_PGRAPH_FFINTFC_ST2, NV_READ(NV04_PGRAPH_FFINTFC_ST2)&0x000FFFFF);
+
+/* NV_WRITE(NV04_PGRAPH_FIFO,0x0);
+ NV_WRITE(NV04_PFIFO_CACHE0_PULL0, 0x0);
+ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x1);
+ NV_WRITE(NV03_PFIFO_CACHES, 0x1);*/
+ NV_WRITE(NV04_PGRAPH_FIFO,0x1);
+}
+
+int nv04_graph_create_context(struct nouveau_channel *chan) {
+ struct graph_state* pgraph_ctx;
+ DRM_DEBUG("nv04_graph_context_create %d\n", chan->id);
+
+ chan->pgraph_ctx = pgraph_ctx = drm_calloc(1, sizeof(*pgraph_ctx),
+ DRM_MEM_DRIVER);
+
+ if (pgraph_ctx == NULL)
+ return -ENOMEM;
+
+ //dev_priv->fifos[channel].pgraph_ctx_user = channel << 24;
+ pgraph_ctx->nv04[0] = 0x0001ffff;
+ /* is it really needed ??? */
+ //dev_priv->fifos[channel].pgraph_ctx[1] = NV_READ(NV_PGRAPH_DEBUG_4);
+ //dev_priv->fifos[channel].pgraph_ctx[2] = NV_READ(0x004006b0);
+
+ return 0;
+}
+
+void nv04_graph_destroy_context(struct nouveau_channel *chan)
+{
+ struct graph_state* pgraph_ctx = chan->pgraph_ctx;
+
+ drm_free(pgraph_ctx, sizeof(*pgraph_ctx), DRM_MEM_DRIVER);
+ chan->pgraph_ctx = NULL;
+}
+
+int nv04_graph_load_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct graph_state* pgraph_ctx = chan->pgraph_ctx;
+ int i;
+
+ for (i = 0; i < sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++)
+ NV_WRITE(nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]);
+
+ return 0;
+}
+
+int nv04_graph_save_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct graph_state* pgraph_ctx = chan->pgraph_ctx;
+ int i;
+
+ for (i = 0; i < sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++)
+ pgraph_ctx->nv04[i] = NV_READ(nv04_graph_ctx_regs[i]);
+
+ return 0;
+}
+
+int nv04_graph_init(struct drm_device *dev) {
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
+ ~NV_PMC_ENABLE_PGRAPH);
+ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
+ NV_PMC_ENABLE_PGRAPH);
+
+ /* Enable PGRAPH interrupts */
+ NV_WRITE(NV03_PGRAPH_INTR, 0xFFFFFFFF);
+ NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+ NV_WRITE(NV04_PGRAPH_VALID1, 0);
+ NV_WRITE(NV04_PGRAPH_VALID2, 0);
+ /*NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x000001FF);
+ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
+ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x1231c000);
+ /*1231C000 blob, 001 haiku*/
+ //*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
+ NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x72111100);
+ /*0x72111100 blob , 01 haiku*/
+ /*NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
+ NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x11d5f071);
+ /*haiku same*/
+
+ /*NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
+ NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
+ /*haiku and blob 10d4*/
+
+ NV_WRITE(NV04_PGRAPH_STATE , 0xFFFFFFFF);
+ NV_WRITE(NV04_PGRAPH_CTX_CONTROL , 0x10010100);
+ NV_WRITE(NV04_PGRAPH_FIFO , 0x00000001);
+
+ /* These don't belong here, they're part of a per-channel context */
+ NV_WRITE(NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
+ NV_WRITE(NV04_PGRAPH_BETA_AND , 0xFFFFFFFF);
+
+ return 0;
+}
+
+void nv04_graph_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
new file mode 100644
index 0000000..804f9a7
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -0,0 +1,159 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+static void
+nv04_instmem_determine_amount(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
+ /* Figure out how much instance memory we need */
+ if (dev_priv->card_type >= NV_40) {
+ /* We'll want more instance memory than this on some NV4x cards.
+ * There's a 16MB aperture to play with that maps onto the end
+ * of vram. For now, only reserve a small piece until we know
+ * more about what each chipset requires.
+ */
+ dev_priv->ramin_rsvd_vram = (1*1024* 1024);
+ } else {
+ /*XXX: what *are* the limits on <NV40 cards?, and does RAMIN
+ * exist in vram on those cards as well?
+ */
+ dev_priv->ramin_rsvd_vram = (512*1024);
+ }
+ DRM_DEBUG("RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram>>10);
+
+ /* Clear all of it, except the BIOS image that's in the first 64KiB */
+ for (i=(64*1024); i<dev_priv->ramin_rsvd_vram; i+=4)
+ NV_WI32(i, 0x00000000);
+}
+
+static void
+nv04_instmem_configure_fixed_tables(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->Engine;
+
+ /* FIFO hash table (RAMHT)
+ * use 4k hash table at RAMIN+0x10000
+ * TODO: extend the hash table
+ */
+ dev_priv->ramht_offset = 0x10000;
+ dev_priv->ramht_bits = 9;
+ dev_priv->ramht_size = (1 << dev_priv->ramht_bits);
+ DRM_DEBUG("RAMHT offset=0x%x, size=%d\n", dev_priv->ramht_offset,
+ dev_priv->ramht_size);
+
+ /* FIFO runout table (RAMRO) - 512k at 0x11200 */
+ dev_priv->ramro_offset = 0x11200;
+ dev_priv->ramro_size = 512;
+ DRM_DEBUG("RAMRO offset=0x%x, size=%d\n", dev_priv->ramro_offset,
+ dev_priv->ramro_size);
+
+ /* FIFO context table (RAMFC)
+ * NV40 : Not sure exactly how to position RAMFC on some cards,
+ * 0x30002 seems to position it at RAMIN+0x20000 on these
+ * cards. RAMFC is 4kb (32 fifos, 128byte entries).
+ * Others: Position RAMFC at RAMIN+0x11400
+ */
+ switch(dev_priv->card_type)
+ {
+ case NV_40:
+ case NV_44:
+ dev_priv->ramfc_offset = 0x20000;
+ dev_priv->ramfc_size = engine->fifo.channels *
+ nouveau_fifo_ctx_size(dev);
+ break;
+ case NV_30:
+ case NV_20:
+ case NV_17:
+ case NV_11:
+ case NV_10:
+ case NV_04:
+ default:
+ dev_priv->ramfc_offset = 0x11400;
+ dev_priv->ramfc_size = engine->fifo.channels *
+ nouveau_fifo_ctx_size(dev);
+ break;
+ }
+ DRM_DEBUG("RAMFC offset=0x%x, size=%d\n", dev_priv->ramfc_offset,
+ dev_priv->ramfc_size);
+}
+
+int nv04_instmem_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t offset;
+ int ret = 0;
+
+ nv04_instmem_determine_amount(dev);
+ nv04_instmem_configure_fixed_tables(dev);
+
+ /* Create a heap to manage RAMIN allocations, we don't allocate
+ * the space that was reserved for RAMHT/FC/RO.
+ */
+ offset = dev_priv->ramfc_offset + dev_priv->ramfc_size;
+
+ /* On my NV4E, there's *something* clobbering the 16KiB just after
+ * where we setup these fixed tables. No idea what it is just yet,
+ * so reserve this space on all NV4X cards for now.
+ */
+ if (dev_priv->card_type >= NV_40)
+ offset += 16*1024;
+
+ ret = nouveau_mem_init_heap(&dev_priv->ramin_heap,
+ offset, dev_priv->ramin_rsvd_vram - offset);
+ if (ret) {
+ dev_priv->ramin_heap = NULL;
+ DRM_ERROR("Failed to init RAMIN heap\n");
+ }
+
+ return ret;
+}
+
+void
+nv04_instmem_takedown(struct drm_device *dev)
+{
+}
+
+int
+nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uint32_t *sz)
+{
+ if (gpuobj->im_backing)
+ return -EINVAL;
+
+ return 0;
+}
+
+void
+nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (gpuobj && gpuobj->im_backing) {
+ if (gpuobj->im_bound)
+ dev_priv->Engine.instmem.unbind(dev, gpuobj);
+ gpuobj->im_backing = NULL;
+ }
+}
+
+int
+nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+ if (!gpuobj->im_pramin || gpuobj->im_bound)
+ return -EINVAL;
+
+ gpuobj->im_bound = 1;
+ return 0;
+}
+
+int
+nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+ if (gpuobj->im_bound == 0)
+ return -EINVAL;
+
+ gpuobj->im_bound = 0;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_mc.c b/drivers/gpu/drm/nouveau/nv04_mc.c
new file mode 100644
index 0000000..24c1f7b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_mc.c
@@ -0,0 +1,22 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+int
+nv04_mc_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /* Power up everything, resetting each individual unit will
+ * be done later if needed.
+ */
+ NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF);
+
+ return 0;
+}
+
+void
+nv04_mc_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv04_timer.c b/drivers/gpu/drm/nouveau/nv04_timer.c
new file mode 100644
index 0000000..616f197
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv04_timer.c
@@ -0,0 +1,53 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+int
+nv04_timer_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ NV_WRITE(NV04_PTIMER_INTR_EN_0, 0x00000000);
+ NV_WRITE(NV04_PTIMER_INTR_0, 0xFFFFFFFF);
+
+ /* Just use the pre-existing values when possible for now; these regs
+ * are not written in nv (driver writer missed a /4 on the address), and
+ * writing 8 and 3 to the correct regs breaks the timings on the LVDS
+ * hardware sequencing microcode.
+ * A correct solution (involving calculations with the GPU PLL) can
+ * be done when kernel modesetting lands
+ */
+ if (!NV_READ(NV04_PTIMER_NUMERATOR) || !NV_READ(NV04_PTIMER_DENOMINATOR)) {
+ NV_WRITE(NV04_PTIMER_NUMERATOR, 0x00000008);
+ NV_WRITE(NV04_PTIMER_DENOMINATOR, 0x00000003);
+ }
+
+ return 0;
+}
+
+uint64_t
+nv04_timer_read(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t low;
+ /* From kmmio dumps on nv28 this looks like how the blob does this.
+ * It reads the high dword twice, before and after.
+ * The only explanation seems to be that the 64-bit timer counter
+ * advances between high and low dword reads and may corrupt the
+ * result. Not confirmed.
+ */
+ uint32_t high2 = NV_READ(NV04_PTIMER_TIME_1);
+ uint32_t high1;
+ do {
+ high1 = high2;
+ low = NV_READ(NV04_PTIMER_TIME_0);
+ high2 = NV_READ(NV04_PTIMER_TIME_1);
+ } while(high1 != high2);
+ return (((uint64_t)high2) << 32) | (uint64_t)low;
+}
+
+void
+nv04_timer_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv10_fb.c b/drivers/gpu/drm/nouveau/nv10_fb.c
new file mode 100644
index 0000000..6e0773a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv10_fb.c
@@ -0,0 +1,25 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+int
+nv10_fb_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t fb_bar_size;
+ int i;
+
+ fb_bar_size = drm_get_resource_len(dev, 0) - 1;
+ for (i=0; i<NV10_PFB_TILE__SIZE; i++) {
+ NV_WRITE(NV10_PFB_TILE(i), 0);
+ NV_WRITE(NV10_PFB_TLIMIT(i), fb_bar_size);
+ }
+
+ return 0;
+}
+
+void
+nv10_fb_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c
new file mode 100644
index 0000000..6d50b6c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv10_fifo.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+
+#define RAMFC_WR(offset,val) INSTANCE_WR(chan->ramfc->gpuobj, \
+ NV10_RAMFC_##offset/4, (val))
+#define RAMFC_RD(offset) INSTANCE_RD(chan->ramfc->gpuobj, \
+ NV10_RAMFC_##offset/4)
+#define NV10_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV10_RAMFC__SIZE))
+#define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32)
+
+int
+nv10_fifo_channel_id(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ return (NV_READ(NV03_PFIFO_CACHE1_PUSH1) &
+ NV10_PFIFO_CACHE1_PUSH1_CHID_MASK);
+}
+
+int
+nv10_fifo_create_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if ((ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0,
+ NV10_RAMFC__SIZE,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE,
+ NULL, &chan->ramfc)))
+ return ret;
+
+ /* Fill entries that are seen filled in dumps of nvidia driver just
+ * after channel's is put into DMA mode
+ */
+ RAMFC_WR(DMA_PUT , chan->pushbuf_base);
+ RAMFC_WR(DMA_GET , chan->pushbuf_base);
+ RAMFC_WR(DMA_INSTANCE , chan->pushbuf->instance >> 4);
+ RAMFC_WR(DMA_FETCH , NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ 0);
+
+ /* enable the fifo dma operation */
+ NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<<chan->id));
+ return 0;
+}
+
+void
+nv10_fifo_destroy_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<<chan->id));
+
+ nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+}
+
+int
+nv10_fifo_load_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
+
+ NV_WRITE(NV03_PFIFO_CACHE1_PUSH1,
+ NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
+
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET , RAMFC_RD(DMA_GET));
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT , RAMFC_RD(DMA_PUT));
+ NV_WRITE(NV10_PFIFO_CACHE1_REF_CNT , RAMFC_RD(REF_CNT));
+
+ tmp = RAMFC_RD(DMA_INSTANCE);
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE , tmp & 0xFFFF);
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_DCOUNT , tmp >> 16);
+
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE , RAMFC_RD(DMA_STATE));
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH , RAMFC_RD(DMA_FETCH));
+ NV_WRITE(NV04_PFIFO_CACHE1_ENGINE , RAMFC_RD(ENGINE));
+ NV_WRITE(NV04_PFIFO_CACHE1_PULL1 , RAMFC_RD(PULL1_ENGINE));
+
+ if (dev_priv->chipset >= 0x17) {
+ NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_VALUE,
+ RAMFC_RD(ACQUIRE_VALUE));
+ NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP,
+ RAMFC_RD(ACQUIRE_TIMESTAMP));
+ NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT,
+ RAMFC_RD(ACQUIRE_TIMEOUT));
+ NV_WRITE(NV10_PFIFO_CACHE1_SEMAPHORE,
+ RAMFC_RD(SEMAPHORE));
+ NV_WRITE(NV10_PFIFO_CACHE1_DMA_SUBROUTINE,
+ RAMFC_RD(DMA_SUBROUTINE));
+ }
+
+ /* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
+ tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31);
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp);
+
+ return 0;
+}
+
+int
+nv10_fifo_save_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
+
+ RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT));
+ RAMFC_WR(DMA_GET , NV_READ(NV04_PFIFO_CACHE1_DMA_GET));
+ RAMFC_WR(REF_CNT , NV_READ(NV10_PFIFO_CACHE1_REF_CNT));
+
+ tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE) & 0xFFFF;
+ tmp |= (NV_READ(NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16);
+ RAMFC_WR(DMA_INSTANCE , tmp);
+
+ RAMFC_WR(DMA_STATE , NV_READ(NV04_PFIFO_CACHE1_DMA_STATE));
+ RAMFC_WR(DMA_FETCH , NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH));
+ RAMFC_WR(ENGINE , NV_READ(NV04_PFIFO_CACHE1_ENGINE));
+ RAMFC_WR(PULL1_ENGINE , NV_READ(NV04_PFIFO_CACHE1_PULL1));
+
+ if (dev_priv->chipset >= 0x17) {
+ RAMFC_WR(ACQUIRE_VALUE,
+ NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
+ RAMFC_WR(ACQUIRE_TIMESTAMP,
+ NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP));
+ RAMFC_WR(ACQUIRE_TIMEOUT,
+ NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
+ RAMFC_WR(SEMAPHORE,
+ NV_READ(NV10_PFIFO_CACHE1_SEMAPHORE));
+ RAMFC_WR(DMA_SUBROUTINE,
+ NV_READ(NV04_PFIFO_CACHE1_DMA_GET));
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
new file mode 100644
index 0000000..9bf6c7e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -0,0 +1,871 @@
+/*
+ * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drm.h"
+#include "nouveau_drv.h"
+
+#define NV10_FIFO_NUMBER 32
+
+struct pipe_state {
+ uint32_t pipe_0x0000[0x040/4];
+ uint32_t pipe_0x0040[0x010/4];
+ uint32_t pipe_0x0200[0x0c0/4];
+ uint32_t pipe_0x4400[0x080/4];
+ uint32_t pipe_0x6400[0x3b0/4];
+ uint32_t pipe_0x6800[0x2f0/4];
+ uint32_t pipe_0x6c00[0x030/4];
+ uint32_t pipe_0x7000[0x130/4];
+ uint32_t pipe_0x7400[0x0c0/4];
+ uint32_t pipe_0x7800[0x0c0/4];
+};
+
+static int nv10_graph_ctx_regs [] = {
+NV10_PGRAPH_CTX_SWITCH1,
+NV10_PGRAPH_CTX_SWITCH2,
+NV10_PGRAPH_CTX_SWITCH3,
+NV10_PGRAPH_CTX_SWITCH4,
+NV10_PGRAPH_CTX_SWITCH5,
+NV10_PGRAPH_CTX_CACHE1, /* 8 values from 0x400160 to 0x40017c */
+NV10_PGRAPH_CTX_CACHE2, /* 8 values from 0x400180 to 0x40019c */
+NV10_PGRAPH_CTX_CACHE3, /* 8 values from 0x4001a0 to 0x4001bc */
+NV10_PGRAPH_CTX_CACHE4, /* 8 values from 0x4001c0 to 0x4001dc */
+NV10_PGRAPH_CTX_CACHE5, /* 8 values from 0x4001e0 to 0x4001fc */
+0x00400164,
+0x00400184,
+0x004001a4,
+0x004001c4,
+0x004001e4,
+0x00400168,
+0x00400188,
+0x004001a8,
+0x004001c8,
+0x004001e8,
+0x0040016c,
+0x0040018c,
+0x004001ac,
+0x004001cc,
+0x004001ec,
+0x00400170,
+0x00400190,
+0x004001b0,
+0x004001d0,
+0x004001f0,
+0x00400174,
+0x00400194,
+0x004001b4,
+0x004001d4,
+0x004001f4,
+0x00400178,
+0x00400198,
+0x004001b8,
+0x004001d8,
+0x004001f8,
+0x0040017c,
+0x0040019c,
+0x004001bc,
+0x004001dc,
+0x004001fc,
+NV10_PGRAPH_CTX_USER,
+NV04_PGRAPH_DMA_START_0,
+NV04_PGRAPH_DMA_START_1,
+NV04_PGRAPH_DMA_LENGTH,
+NV04_PGRAPH_DMA_MISC,
+NV10_PGRAPH_DMA_PITCH,
+NV04_PGRAPH_BOFFSET0,
+NV04_PGRAPH_BBASE0,
+NV04_PGRAPH_BLIMIT0,
+NV04_PGRAPH_BOFFSET1,
+NV04_PGRAPH_BBASE1,
+NV04_PGRAPH_BLIMIT1,
+NV04_PGRAPH_BOFFSET2,
+NV04_PGRAPH_BBASE2,
+NV04_PGRAPH_BLIMIT2,
+NV04_PGRAPH_BOFFSET3,
+NV04_PGRAPH_BBASE3,
+NV04_PGRAPH_BLIMIT3,
+NV04_PGRAPH_BOFFSET4,
+NV04_PGRAPH_BBASE4,
+NV04_PGRAPH_BLIMIT4,
+NV04_PGRAPH_BOFFSET5,
+NV04_PGRAPH_BBASE5,
+NV04_PGRAPH_BLIMIT5,
+NV04_PGRAPH_BPITCH0,
+NV04_PGRAPH_BPITCH1,
+NV04_PGRAPH_BPITCH2,
+NV04_PGRAPH_BPITCH3,
+NV04_PGRAPH_BPITCH4,
+NV10_PGRAPH_SURFACE,
+NV10_PGRAPH_STATE,
+NV04_PGRAPH_BSWIZZLE2,
+NV04_PGRAPH_BSWIZZLE5,
+NV04_PGRAPH_BPIXEL,
+NV10_PGRAPH_NOTIFY,
+NV04_PGRAPH_PATT_COLOR0,
+NV04_PGRAPH_PATT_COLOR1,
+NV04_PGRAPH_PATT_COLORRAM, /* 64 values from 0x400900 to 0x4009fc */
+0x00400904,
+0x00400908,
+0x0040090c,
+0x00400910,
+0x00400914,
+0x00400918,
+0x0040091c,
+0x00400920,
+0x00400924,
+0x00400928,
+0x0040092c,
+0x00400930,
+0x00400934,
+0x00400938,
+0x0040093c,
+0x00400940,
+0x00400944,
+0x00400948,
+0x0040094c,
+0x00400950,
+0x00400954,
+0x00400958,
+0x0040095c,
+0x00400960,
+0x00400964,
+0x00400968,
+0x0040096c,
+0x00400970,
+0x00400974,
+0x00400978,
+0x0040097c,
+0x00400980,
+0x00400984,
+0x00400988,
+0x0040098c,
+0x00400990,
+0x00400994,
+0x00400998,
+0x0040099c,
+0x004009a0,
+0x004009a4,
+0x004009a8,
+0x004009ac,
+0x004009b0,
+0x004009b4,
+0x004009b8,
+0x004009bc,
+0x004009c0,
+0x004009c4,
+0x004009c8,
+0x004009cc,
+0x004009d0,
+0x004009d4,
+0x004009d8,
+0x004009dc,
+0x004009e0,
+0x004009e4,
+0x004009e8,
+0x004009ec,
+0x004009f0,
+0x004009f4,
+0x004009f8,
+0x004009fc,
+NV04_PGRAPH_PATTERN, /* 2 values from 0x400808 to 0x40080c */
+0x0040080c,
+NV04_PGRAPH_PATTERN_SHAPE,
+NV03_PGRAPH_MONO_COLOR0,
+NV04_PGRAPH_ROP3,
+NV04_PGRAPH_CHROMA,
+NV04_PGRAPH_BETA_AND,
+NV04_PGRAPH_BETA_PREMULT,
+0x00400e70,
+0x00400e74,
+0x00400e78,
+0x00400e7c,
+0x00400e80,
+0x00400e84,
+0x00400e88,
+0x00400e8c,
+0x00400ea0,
+0x00400ea4,
+0x00400ea8,
+0x00400e90,
+0x00400e94,
+0x00400e98,
+0x00400e9c,
+NV10_PGRAPH_WINDOWCLIP_HORIZONTAL, /* 8 values from 0x400f00 to 0x400f1c */
+NV10_PGRAPH_WINDOWCLIP_VERTICAL, /* 8 values from 0x400f20 to 0x400f3c */
+0x00400f04,
+0x00400f24,
+0x00400f08,
+0x00400f28,
+0x00400f0c,
+0x00400f2c,
+0x00400f10,
+0x00400f30,
+0x00400f14,
+0x00400f34,
+0x00400f18,
+0x00400f38,
+0x00400f1c,
+0x00400f3c,
+NV10_PGRAPH_XFMODE0,
+NV10_PGRAPH_XFMODE1,
+NV10_PGRAPH_GLOBALSTATE0,
+NV10_PGRAPH_GLOBALSTATE1,
+NV04_PGRAPH_STORED_FMT,
+NV04_PGRAPH_SOURCE_COLOR,
+NV03_PGRAPH_ABS_X_RAM, /* 32 values from 0x400400 to 0x40047c */
+NV03_PGRAPH_ABS_Y_RAM, /* 32 values from 0x400480 to 0x4004fc */
+0x00400404,
+0x00400484,
+0x00400408,
+0x00400488,
+0x0040040c,
+0x0040048c,
+0x00400410,
+0x00400490,
+0x00400414,
+0x00400494,
+0x00400418,
+0x00400498,
+0x0040041c,
+0x0040049c,
+0x00400420,
+0x004004a0,
+0x00400424,
+0x004004a4,
+0x00400428,
+0x004004a8,
+0x0040042c,
+0x004004ac,
+0x00400430,
+0x004004b0,
+0x00400434,
+0x004004b4,
+0x00400438,
+0x004004b8,
+0x0040043c,
+0x004004bc,
+0x00400440,
+0x004004c0,
+0x00400444,
+0x004004c4,
+0x00400448,
+0x004004c8,
+0x0040044c,
+0x004004cc,
+0x00400450,
+0x004004d0,
+0x00400454,
+0x004004d4,
+0x00400458,
+0x004004d8,
+0x0040045c,
+0x004004dc,
+0x00400460,
+0x004004e0,
+0x00400464,
+0x004004e4,
+0x00400468,
+0x004004e8,
+0x0040046c,
+0x004004ec,
+0x00400470,
+0x004004f0,
+0x00400474,
+0x004004f4,
+0x00400478,
+0x004004f8,
+0x0040047c,
+0x004004fc,
+NV03_PGRAPH_ABS_UCLIP_XMIN,
+NV03_PGRAPH_ABS_UCLIP_XMAX,
+NV03_PGRAPH_ABS_UCLIP_YMIN,
+NV03_PGRAPH_ABS_UCLIP_YMAX,
+0x00400550,
+0x00400558,
+0x00400554,
+0x0040055c,
+NV03_PGRAPH_ABS_UCLIPA_XMIN,
+NV03_PGRAPH_ABS_UCLIPA_XMAX,
+NV03_PGRAPH_ABS_UCLIPA_YMIN,
+NV03_PGRAPH_ABS_UCLIPA_YMAX,
+NV03_PGRAPH_ABS_ICLIP_XMAX,
+NV03_PGRAPH_ABS_ICLIP_YMAX,
+NV03_PGRAPH_XY_LOGIC_MISC0,
+NV03_PGRAPH_XY_LOGIC_MISC1,
+NV03_PGRAPH_XY_LOGIC_MISC2,
+NV03_PGRAPH_XY_LOGIC_MISC3,
+NV03_PGRAPH_CLIPX_0,
+NV03_PGRAPH_CLIPX_1,
+NV03_PGRAPH_CLIPY_0,
+NV03_PGRAPH_CLIPY_1,
+NV10_PGRAPH_COMBINER0_IN_ALPHA,
+NV10_PGRAPH_COMBINER1_IN_ALPHA,
+NV10_PGRAPH_COMBINER0_IN_RGB,
+NV10_PGRAPH_COMBINER1_IN_RGB,
+NV10_PGRAPH_COMBINER_COLOR0,
+NV10_PGRAPH_COMBINER_COLOR1,
+NV10_PGRAPH_COMBINER0_OUT_ALPHA,
+NV10_PGRAPH_COMBINER1_OUT_ALPHA,
+NV10_PGRAPH_COMBINER0_OUT_RGB,
+NV10_PGRAPH_COMBINER1_OUT_RGB,
+NV10_PGRAPH_COMBINER_FINAL0,
+NV10_PGRAPH_COMBINER_FINAL1,
+0x00400e00,
+0x00400e04,
+0x00400e08,
+0x00400e0c,
+0x00400e10,
+0x00400e14,
+0x00400e18,
+0x00400e1c,
+0x00400e20,
+0x00400e24,
+0x00400e28,
+0x00400e2c,
+0x00400e30,
+0x00400e34,
+0x00400e38,
+0x00400e3c,
+NV04_PGRAPH_PASSTHRU_0,
+NV04_PGRAPH_PASSTHRU_1,
+NV04_PGRAPH_PASSTHRU_2,
+NV10_PGRAPH_DIMX_TEXTURE,
+NV10_PGRAPH_WDIMX_TEXTURE,
+NV10_PGRAPH_DVD_COLORFMT,
+NV10_PGRAPH_SCALED_FORMAT,
+NV04_PGRAPH_MISC24_0,
+NV04_PGRAPH_MISC24_1,
+NV04_PGRAPH_MISC24_2,
+NV03_PGRAPH_X_MISC,
+NV03_PGRAPH_Y_MISC,
+NV04_PGRAPH_VALID1,
+NV04_PGRAPH_VALID2,
+};
+
+static int nv17_graph_ctx_regs [] = {
+NV10_PGRAPH_DEBUG_4,
+0x004006b0,
+0x00400eac,
+0x00400eb0,
+0x00400eb4,
+0x00400eb8,
+0x00400ebc,
+0x00400ec0,
+0x00400ec4,
+0x00400ec8,
+0x00400ecc,
+0x00400ed0,
+0x00400ed4,
+0x00400ed8,
+0x00400edc,
+0x00400ee0,
+0x00400a00,
+0x00400a04,
+};
+
+struct graph_state {
+ int nv10[sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0])];
+ int nv17[sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0])];
+ struct pipe_state pipe_state;
+};
+
+static void nv10_graph_save_pipe(struct nouveau_channel *chan) {
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct graph_state* pgraph_ctx = chan->pgraph_ctx;
+ struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
+ int i;
+#define PIPE_SAVE(addr) \
+ do { \
+ NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, addr); \
+ for (i=0; i < sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]); i++) \
+ fifo_pipe_state->pipe_##addr[i] = NV_READ(NV10_PGRAPH_PIPE_DATA); \
+ } while (0)
+
+ PIPE_SAVE(0x4400);
+ PIPE_SAVE(0x0200);
+ PIPE_SAVE(0x6400);
+ PIPE_SAVE(0x6800);
+ PIPE_SAVE(0x6c00);
+ PIPE_SAVE(0x7000);
+ PIPE_SAVE(0x7400);
+ PIPE_SAVE(0x7800);
+ PIPE_SAVE(0x0040);
+ PIPE_SAVE(0x0000);
+
+#undef PIPE_SAVE
+}
+
+static void nv10_graph_load_pipe(struct nouveau_channel *chan) {
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct graph_state* pgraph_ctx = chan->pgraph_ctx;
+ struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
+ int i;
+ uint32_t xfmode0, xfmode1;
+#define PIPE_RESTORE(addr) \
+ do { \
+ NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, addr); \
+ for (i=0; i < sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]); i++) \
+ NV_WRITE(NV10_PGRAPH_PIPE_DATA, fifo_pipe_state->pipe_##addr[i]); \
+ } while (0)
+
+
+ nouveau_wait_for_idle(dev);
+ /* XXX check haiku comments */
+ xfmode0 = NV_READ(NV10_PGRAPH_XFMODE0);
+ xfmode1 = NV_READ(NV10_PGRAPH_XFMODE1);
+ NV_WRITE(NV10_PGRAPH_XFMODE0, 0x10000000);
+ NV_WRITE(NV10_PGRAPH_XFMODE1, 0x00000000);
+ NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
+ for (i = 0; i < 4; i++)
+ NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+ for (i = 0; i < 4; i++)
+ NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+ NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
+ for (i = 0; i < 3; i++)
+ NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+
+ NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
+ for (i = 0; i < 3; i++)
+ NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+ NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
+ NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000008);
+
+
+ PIPE_RESTORE(0x0200);
+ nouveau_wait_for_idle(dev);
+
+ /* restore XFMODE */
+ NV_WRITE(NV10_PGRAPH_XFMODE0, xfmode0);
+ NV_WRITE(NV10_PGRAPH_XFMODE1, xfmode1);
+ PIPE_RESTORE(0x6400);
+ PIPE_RESTORE(0x6800);
+ PIPE_RESTORE(0x6c00);
+ PIPE_RESTORE(0x7000);
+ PIPE_RESTORE(0x7400);
+ PIPE_RESTORE(0x7800);
+ PIPE_RESTORE(0x4400);
+ PIPE_RESTORE(0x0000);
+ PIPE_RESTORE(0x0040);
+ nouveau_wait_for_idle(dev);
+
+#undef PIPE_RESTORE
+}
+
+static void nv10_graph_create_pipe(struct nouveau_channel *chan) {
+ struct graph_state* pgraph_ctx = chan->pgraph_ctx;
+ struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
+ uint32_t *fifo_pipe_state_addr;
+ int i;
+#define PIPE_INIT(addr) \
+ do { \
+ fifo_pipe_state_addr = fifo_pipe_state->pipe_##addr; \
+ } while (0)
+#define PIPE_INIT_END(addr) \
+ do { \
+ if (fifo_pipe_state_addr != \
+ sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]) + fifo_pipe_state->pipe_##addr) \
+ DRM_ERROR("incomplete pipe init for 0x%x : %p/%p\n", addr, fifo_pipe_state_addr, \
+ sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]) + fifo_pipe_state->pipe_##addr); \
+ } while (0)
+#define NV_WRITE_PIPE_INIT(value) *(fifo_pipe_state_addr++) = value
+
+ PIPE_INIT(0x0200);
+ for (i = 0; i < 48; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x0200);
+
+ PIPE_INIT(0x6400);
+ for (i = 0; i < 211; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ NV_WRITE_PIPE_INIT(0x40000000);
+ NV_WRITE_PIPE_INIT(0x40000000);
+ NV_WRITE_PIPE_INIT(0x40000000);
+ NV_WRITE_PIPE_INIT(0x40000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x3f000000);
+ NV_WRITE_PIPE_INIT(0x3f000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ PIPE_INIT_END(0x6400);
+
+ PIPE_INIT(0x6800);
+ for (i = 0; i < 162; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ for (i = 0; i < 25; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x6800);
+
+ PIPE_INIT(0x6c00);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0xbf800000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x6c00);
+
+ PIPE_INIT(0x7000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ for (i = 0; i < 35; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x7000);
+
+ PIPE_INIT(0x7400);
+ for (i = 0; i < 48; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x7400);
+
+ PIPE_INIT(0x7800);
+ for (i = 0; i < 48; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x7800);
+
+ PIPE_INIT(0x4400);
+ for (i = 0; i < 32; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x4400);
+
+ PIPE_INIT(0x0000);
+ for (i = 0; i < 16; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x0000);
+
+ PIPE_INIT(0x0040);
+ for (i = 0; i < 4; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x0040);
+
+#undef PIPE_INIT
+#undef PIPE_INIT_END
+#undef NV_WRITE_PIPE_INIT
+}
+
+static int nv10_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
+{
+ int i;
+ for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++) {
+ if (nv10_graph_ctx_regs[i] == reg)
+ return i;
+ }
+ DRM_ERROR("unknow offset nv10_ctx_regs %d\n", reg);
+ return -1;
+}
+
+static int nv17_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
+{
+ int i;
+ for (i = 0; i < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++) {
+ if (nv17_graph_ctx_regs[i] == reg)
+ return i;
+ }
+ DRM_ERROR("unknow offset nv17_ctx_regs %d\n", reg);
+ return -1;
+}
+
+int nv10_graph_load_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct graph_state* pgraph_ctx = chan->pgraph_ctx;
+ int i;
+
+ for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++)
+ NV_WRITE(nv10_graph_ctx_regs[i], pgraph_ctx->nv10[i]);
+ if (dev_priv->chipset>=0x17) {
+ for (i = 0; i < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++)
+ NV_WRITE(nv17_graph_ctx_regs[i], pgraph_ctx->nv17[i]);
+ }
+
+ nv10_graph_load_pipe(chan);
+
+ return 0;
+}
+
+int nv10_graph_save_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct graph_state* pgraph_ctx = chan->pgraph_ctx;
+ int i;
+
+ for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++)
+ pgraph_ctx->nv10[i] = NV_READ(nv10_graph_ctx_regs[i]);
+ if (dev_priv->chipset>=0x17) {
+ for (i = 0; i < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++)
+ pgraph_ctx->nv17[i] = NV_READ(nv17_graph_ctx_regs[i]);
+ }
+
+ nv10_graph_save_pipe(chan);
+
+ return 0;
+}
+
+void nouveau_nv10_context_switch(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv;
+ struct nouveau_engine *engine;
+ struct nouveau_channel *next, *last;
+ int chid;
+
+ if (!dev) {
+ DRM_DEBUG("Invalid drm_device\n");
+ return;
+ }
+ dev_priv = dev->dev_private;
+ if (!dev_priv) {
+ DRM_DEBUG("Invalid drm_nouveau_private\n");
+ return;
+ }
+ if (!dev_priv->fifos) {
+ DRM_DEBUG("Invalid drm_nouveau_private->fifos\n");
+ return;
+ }
+ engine = &dev_priv->Engine;
+
+ chid = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 20) &
+ (engine->fifo.channels - 1);
+ next = dev_priv->fifos[chid];
+
+ if (!next) {
+ DRM_ERROR("Invalid next channel\n");
+ return;
+ }
+
+ chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) &
+ (engine->fifo.channels - 1);
+ last = dev_priv->fifos[chid];
+
+ if (!last) {
+ DRM_INFO("WARNING: Invalid last channel, switch to %x\n",
+ next->id);
+ } else {
+ DRM_DEBUG("NV: PGRAPH context switch interrupt channel %x -> %x\n",
+ last->id, next->id);
+ }
+
+ NV_WRITE(NV04_PGRAPH_FIFO,0x0);
+ if (last) {
+ nouveau_wait_for_idle(dev);
+ nv10_graph_save_context(last);
+ }
+
+ nouveau_wait_for_idle(dev);
+
+ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000000);
+
+ nouveau_wait_for_idle(dev);
+
+ nv10_graph_load_context(next);
+
+ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100);
+ NV_WRITE(NV10_PGRAPH_FFINTFC_ST2, NV_READ(NV10_PGRAPH_FFINTFC_ST2)&0xCFFFFFFF);
+ NV_WRITE(NV04_PGRAPH_FIFO,0x1);
+}
+
+#define NV_WRITE_CTX(reg, val) do { \
+ int offset = nv10_graph_ctx_regs_find_offset(dev, reg); \
+ if (offset > 0) \
+ pgraph_ctx->nv10[offset] = val; \
+ } while (0)
+
+#define NV17_WRITE_CTX(reg, val) do { \
+ int offset = nv17_graph_ctx_regs_find_offset(dev, reg); \
+ if (offset > 0) \
+ pgraph_ctx->nv17[offset] = val; \
+ } while (0)
+
+int nv10_graph_create_context(struct nouveau_channel *chan) {
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct graph_state* pgraph_ctx;
+
+ DRM_DEBUG("nv10_graph_context_create %d\n", chan->id);
+
+ chan->pgraph_ctx = pgraph_ctx = drm_calloc(1, sizeof(*pgraph_ctx),
+ DRM_MEM_DRIVER);
+
+ if (pgraph_ctx == NULL)
+ return -ENOMEM;
+
+ /* mmio trace suggest that should be done in ddx with methods/objects */
+
+ NV_WRITE_CTX(0x00400e88, 0x08000000);
+ NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
+ NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff);
+ NV_WRITE_CTX(0x00400e10, 0x00001000);
+ NV_WRITE_CTX(0x00400e14, 0x00001000);
+ NV_WRITE_CTX(0x00400e30, 0x00080008);
+ NV_WRITE_CTX(0x00400e34, 0x00080008);
+ if (dev_priv->chipset>=0x17) {
+ /* is it really needed ??? */
+ NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4, NV_READ(NV10_PGRAPH_DEBUG_4));
+ NV17_WRITE_CTX(0x004006b0, NV_READ(0x004006b0));
+ NV17_WRITE_CTX(0x00400eac, 0x0fff0000);
+ NV17_WRITE_CTX(0x00400eb0, 0x0fff0000);
+ NV17_WRITE_CTX(0x00400ec0, 0x00000080);
+ NV17_WRITE_CTX(0x00400ed0, 0x00000080);
+ }
+ NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->id << 24);
+
+ nv10_graph_create_pipe(chan);
+ return 0;
+}
+
+void nv10_graph_destroy_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->Engine;
+ struct graph_state* pgraph_ctx = chan->pgraph_ctx;
+ int chid;
+
+ drm_free(pgraph_ctx, sizeof(*pgraph_ctx), DRM_MEM_DRIVER);
+ chan->pgraph_ctx = NULL;
+
+ chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (engine->fifo.channels - 1);
+
+ /* This code seems to corrupt the 3D pipe, but blob seems to do similar things ????
+ */
+ if (chid == chan->id) {
+ DRM_INFO("cleanning a channel with graph in current context\n");
+ }
+}
+
+int nv10_graph_init(struct drm_device *dev) {
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
+ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
+ ~NV_PMC_ENABLE_PGRAPH);
+ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
+ NV_PMC_ENABLE_PGRAPH);
+
+ NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF);
+ NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000);
+ NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x00118700);
+ //NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x24E00810); /* 0x25f92ad9 */
+ NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
+ NV_WRITE(NV04_PGRAPH_DEBUG_3, 0x55DE0830 |
+ (1<<29) |
+ (1<<31));
+ if (dev_priv->chipset>=0x17) {
+ NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x1f000000);
+ NV_WRITE(0x004006b0, 0x40000020);
+ }
+ else
+ NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00000000);
+
+ /* copy tile info from PFB */
+ for (i=0; i<NV10_PFB_TILE__SIZE; i++) {
+ NV_WRITE(NV10_PGRAPH_TILE(i), NV_READ(NV10_PFB_TILE(i)));
+ NV_WRITE(NV10_PGRAPH_TLIMIT(i), NV_READ(NV10_PFB_TLIMIT(i)));
+ NV_WRITE(NV10_PGRAPH_TSIZE(i), NV_READ(NV10_PFB_TSIZE(i)));
+ NV_WRITE(NV10_PGRAPH_TSTATUS(i), NV_READ(NV10_PFB_TSTATUS(i)));
+ }
+
+ NV_WRITE(NV10_PGRAPH_CTX_SWITCH1, 0x00000000);
+ NV_WRITE(NV10_PGRAPH_CTX_SWITCH2, 0x00000000);
+ NV_WRITE(NV10_PGRAPH_CTX_SWITCH3, 0x00000000);
+ NV_WRITE(NV10_PGRAPH_CTX_SWITCH4, 0x00000000);
+ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100);
+ NV_WRITE(NV10_PGRAPH_STATE , 0xFFFFFFFF);
+ NV_WRITE(NV04_PGRAPH_FIFO , 0x00000001);
+
+ return 0;
+}
+
+void nv10_graph_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
new file mode 100644
index 0000000..05a2ff3
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -0,0 +1,893 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+/*
+ * NV20
+ * -----
+ * There are 3 families :
+ * NV20 is 0x10de:0x020*
+ * NV25/28 is 0x10de:0x025* / 0x10de:0x028*
+ * NV2A is 0x10de:0x02A0
+ *
+ * NV30
+ * -----
+ * There are 3 families :
+ * NV30/31 is 0x10de:0x030* / 0x10de:0x031*
+ * NV34 is 0x10de:0x032*
+ * NV35/36 is 0x10de:0x033* / 0x10de:0x034*
+ *
+ * Not seen in the wild, no dumps (probably NV35) :
+ * NV37 is 0x10de:0x00fc, 0x10de:0x00fd
+ * NV38 is 0x10de:0x0333, 0x10de:0x00fe
+ *
+ */
+
+#define NV20_GRCTX_SIZE (3580*4)
+#define NV25_GRCTX_SIZE (3529*4)
+#define NV2A_GRCTX_SIZE (3500*4)
+
+#define NV30_31_GRCTX_SIZE (24392)
+#define NV34_GRCTX_SIZE (18140)
+#define NV35_36_GRCTX_SIZE (22396)
+
+static void nv20_graph_context_init(struct drm_device *dev,
+ struct nouveau_gpuobj *ctx)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+/*
+write32 #1 block at +0x00740adc NV_PRAMIN+0x40adc of 3369 (0xd29) elements:
++0x00740adc: ffff0000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++0x00740afc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++0x00740b1c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++0x00740b3c: 00000000 0fff0000 0fff0000 00000000 00000000 00000000 00000000 00000000
++0x00740b5c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++0x00740b7c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++0x00740b9c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++0x00740bbc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++0x00740bdc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++0x00740bfc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+
++0x00740c1c: 00000101 00000000 00000000 00000000 00000000 00000111 00000000 00000000
++0x00740c3c: 00000000 00000000 00000000 44400000 00000000 00000000 00000000 00000000
++0x00740c5c: 00000000 00000000 00000000 00000000 00000000 00000000 00030303 00030303
++0x00740c7c: 00030303 00030303 00000000 00000000 00000000 00000000 00080000 00080000
++0x00740c9c: 00080000 00080000 00000000 00000000 01012000 01012000 01012000 01012000
++0x00740cbc: 000105b8 000105b8 000105b8 000105b8 00080008 00080008 00080008 00080008
++0x00740cdc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++0x00740cfc: 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000
++0x00740d1c: 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000
++0x00740d3c: 00000000 00000000 4b7fffff 00000000 00000000 00000000 00000000 00000000
+
++0x00740d5c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++0x00740d7c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++0x00740d9c: 00000001 00000000 00004000 00000000 00000000 00000001 00000000 00040000
++0x00740dbc: 00010000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++0x00740ddc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+...
+*/
+ INSTANCE_WR(ctx, (0x33c/4)+0, 0xffff0000);
+ INSTANCE_WR(ctx, (0x33c/4)+25, 0x0fff0000);
+ INSTANCE_WR(ctx, (0x33c/4)+26, 0x0fff0000);
+ INSTANCE_WR(ctx, (0x33c/4)+80, 0x00000101);
+ INSTANCE_WR(ctx, (0x33c/4)+85, 0x00000111);
+ INSTANCE_WR(ctx, (0x33c/4)+91, 0x44400000);
+ for (i = 0; i < 4; ++i)
+ INSTANCE_WR(ctx, (0x33c/4)+102+i, 0x00030303);
+ for (i = 0; i < 4; ++i)
+ INSTANCE_WR(ctx, (0x33c/4)+110+i, 0x00080000);
+ for (i = 0; i < 4; ++i)
+ INSTANCE_WR(ctx, (0x33c/4)+116+i, 0x01012000);
+ for (i = 0; i < 4; ++i)
+ INSTANCE_WR(ctx, (0x33c/4)+120+i, 0x000105b8);
+ for (i = 0; i < 4; ++i)
+ INSTANCE_WR(ctx, (0x33c/4)+124+i, 0x00080008);
+ for (i = 0; i < 16; ++i)
+ INSTANCE_WR(ctx, (0x33c/4)+136+i, 0x07ff0000);
+ INSTANCE_WR(ctx, (0x33c/4)+154, 0x4b7fffff);
+ INSTANCE_WR(ctx, (0x33c/4)+176, 0x00000001);
+ INSTANCE_WR(ctx, (0x33c/4)+178, 0x00004000);
+ INSTANCE_WR(ctx, (0x33c/4)+181, 0x00000001);
+ INSTANCE_WR(ctx, (0x33c/4)+183, 0x00040000);
+ INSTANCE_WR(ctx, (0x33c/4)+184, 0x00010000);
+
+/*
+...
++0x0074239c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++0x007423bc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
++0x007423dc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
++0x007423fc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
+...
++0x00742bdc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
++0x00742bfc: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
++0x00742c1c: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
++0x00742c3c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+...
+*/
+ for (i = 0; i < 0x880; i += 0x10) {
+ INSTANCE_WR(ctx, ((0x1c1c + i)/4)+0, 0x10700ff9);
+ INSTANCE_WR(ctx, ((0x1c1c + i)/4)+1, 0x0436086c);
+ INSTANCE_WR(ctx, ((0x1c1c + i)/4)+2, 0x000c001b);
+ }
+
+/*
+write32 #1 block at +0x00742fbc NV_PRAMIN+0x42fbc of 4 (0x4) elements:
++0x00742fbc: 3f800000 00000000 00000000 00000000
+*/
+ INSTANCE_WR(ctx, (0x281c/4), 0x3f800000);
+
+/*
+write32 #1 block at +0x00742ffc NV_PRAMIN+0x42ffc of 12 (0xc) elements:
++0x00742ffc: 40000000 3f800000 3f000000 00000000 40000000 3f800000 00000000 bf800000
++0x0074301c: 00000000 bf800000 00000000 00000000
+*/
+ INSTANCE_WR(ctx, (0x285c/4)+0, 0x40000000);
+ INSTANCE_WR(ctx, (0x285c/4)+1, 0x3f800000);
+ INSTANCE_WR(ctx, (0x285c/4)+2, 0x3f000000);
+ INSTANCE_WR(ctx, (0x285c/4)+4, 0x40000000);
+ INSTANCE_WR(ctx, (0x285c/4)+5, 0x3f800000);
+ INSTANCE_WR(ctx, (0x285c/4)+7, 0xbf800000);
+ INSTANCE_WR(ctx, (0x285c/4)+9, 0xbf800000);
+
+/*
+write32 #1 block at +0x00742fcc NV_PRAMIN+0x42fcc of 4 (0x4) elements:
++0x00742fcc: 00000000 3f800000 00000000 00000000
+*/
+ INSTANCE_WR(ctx, (0x282c/4)+1, 0x3f800000);
+
+/*
+write32 #1 block at +0x0074302c NV_PRAMIN+0x4302c of 4 (0x4) elements:
++0x0074302c: 00000000 00000000 00000000 00000000
+write32 #1 block at +0x00743c9c NV_PRAMIN+0x43c9c of 4 (0x4) elements:
++0x00743c9c: 00000000 00000000 00000000 00000000
+write32 #1 block at +0x00743c3c NV_PRAMIN+0x43c3c of 8 (0x8) elements:
++0x00743c3c: 00000000 00000000 000fe000 00000000 00000000 00000000 00000000 00000000
+*/
+ INSTANCE_WR(ctx, (0x349c/4)+2, 0x000fe000);
+
+/*
+write32 #1 block at +0x00743c6c NV_PRAMIN+0x43c6c of 4 (0x4) elements:
++0x00743c6c: 00000000 00000000 00000000 00000000
+write32 #1 block at +0x00743ccc NV_PRAMIN+0x43ccc of 4 (0x4) elements:
++0x00743ccc: 00000000 000003f8 00000000 00000000
+*/
+ INSTANCE_WR(ctx, (0x352c/4)+1, 0x000003f8);
+
+/* write32 #1 NV_PRAMIN+0x43ce0 <- 0x002fe000 */
+ INSTANCE_WR(ctx, 0x3540/4, 0x002fe000);
+
+/*
+write32 #1 block at +0x00743cfc NV_PRAMIN+0x43cfc of 8 (0x8) elements:
++0x00743cfc: 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c
+*/
+ for (i = 0; i < 8; ++i)
+ INSTANCE_WR(ctx, (0x355c/4)+i, 0x001c527c);
+}
+
+static void nv2a_graph_context_init(struct drm_device *dev,
+ struct nouveau_gpuobj *ctx)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
+ INSTANCE_WR(ctx, 0x33c/4, 0xffff0000);
+ for(i = 0x3a0; i< 0x3a8; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x0fff0000);
+ INSTANCE_WR(ctx, 0x47c/4, 0x00000101);
+ INSTANCE_WR(ctx, 0x490/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x4a8/4, 0x44400000);
+ for(i = 0x4d4; i< 0x4e4; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00030303);
+ for(i = 0x4f4; i< 0x504; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00080000);
+ for(i = 0x50c; i< 0x51c; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x01012000);
+ for(i = 0x51c; i< 0x52c; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x000105b8);
+ for(i = 0x52c; i< 0x53c; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00080008);
+ for(i = 0x55c; i< 0x59c; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x5a4/4, 0x4b7fffff);
+ INSTANCE_WR(ctx, 0x5fc/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x604/4, 0x00004000);
+ INSTANCE_WR(ctx, 0x610/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x618/4, 0x00040000);
+ INSTANCE_WR(ctx, 0x61c/4, 0x00010000);
+
+ for (i=0x1a9c; i <= 0x22fc/4; i += 32) {
+ INSTANCE_WR(ctx, i/4 , 0x10700ff9);
+ INSTANCE_WR(ctx, i/4 + 1, 0x0436086c);
+ INSTANCE_WR(ctx, i/4 + 2, 0x000c001b);
+ }
+
+ INSTANCE_WR(ctx, 0x269c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x26b0/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x26dc/4, 0x40000000);
+ INSTANCE_WR(ctx, 0x26e0/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x26e4/4, 0x3f000000);
+ INSTANCE_WR(ctx, 0x26ec/4, 0x40000000);
+ INSTANCE_WR(ctx, 0x26f0/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x26f8/4, 0xbf800000);
+ INSTANCE_WR(ctx, 0x2700/4, 0xbf800000);
+ INSTANCE_WR(ctx, 0x3024/4, 0x000fe000);
+ INSTANCE_WR(ctx, 0x30a0/4, 0x000003f8);
+ INSTANCE_WR(ctx, 0x33fc/4, 0x002fe000);
+ for(i = 0x341c; i< 0x343c; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x001c527c);
+}
+
+static void nv25_graph_context_init(struct drm_device *dev,
+ struct nouveau_gpuobj *ctx)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+/*
+write32 #1 block at +0x00740a7c NV_PRAMIN.GRCTX0+0x35c of 173 (0xad) elements:
++0x00740a7c: ffff0000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++0x00740a9c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++0x00740abc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++0x00740adc: 00000000 0fff0000 0fff0000 00000000 00000000 00000000 00000000 00000000
++0x00740afc: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++0x00740b1c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++0x00740b3c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++0x00740b5c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+
++0x00740b7c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++0x00740b9c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++0x00740bbc: 00000101 00000000 00000000 00000000 00000000 00000111 00000000 00000000
++0x00740bdc: 00000000 00000000 00000000 00000080 ffff0000 00000001 00000000 00000000
++0x00740bfc: 00000000 00000000 44400000 00000000 00000000 00000000 00000000 00000000
++0x00740c1c: 4b800000 00000000 00000000 00000000 00000000 00030303 00030303 00030303
++0x00740c3c: 00030303 00000000 00000000 00000000 00000000 00080000 00080000 00080000
++0x00740c5c: 00080000 00000000 00000000 01012000 01012000 01012000 01012000 000105b8
+
++0x00740c7c: 000105b8 000105b8 000105b8 00080008 00080008 00080008 00080008 00000000
++0x00740c9c: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 07ff0000
++0x00740cbc: 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000
++0x00740cdc: 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 07ff0000 00000000
++0x00740cfc: 00000000 4b7fffff 00000000 00000000 00000000 00000000 00000000 00000000
++0x00740d1c: 00000000 00000000 00000000 00000000 00000000
+*/
+ INSTANCE_WR(ctx, (0x35c/4)+0, 0xffff0000);
+ INSTANCE_WR(ctx, (0x35c/4)+25, 0x0fff0000);
+ INSTANCE_WR(ctx, (0x35c/4)+26, 0x0fff0000);
+ INSTANCE_WR(ctx, (0x35c/4)+80, 0x00000101);
+ INSTANCE_WR(ctx, (0x35c/4)+85, 0x00000111);
+ INSTANCE_WR(ctx, (0x35c/4)+91, 0x00000080);
+ INSTANCE_WR(ctx, (0x35c/4)+92, 0xffff0000);
+ INSTANCE_WR(ctx, (0x35c/4)+93, 0x00000001);
+ INSTANCE_WR(ctx, (0x35c/4)+98, 0x44400000);
+ INSTANCE_WR(ctx, (0x35c/4)+104, 0x4b800000);
+ INSTANCE_WR(ctx, (0x35c/4)+109, 0x00030303);
+ INSTANCE_WR(ctx, (0x35c/4)+110, 0x00030303);
+ INSTANCE_WR(ctx, (0x35c/4)+111, 0x00030303);
+ INSTANCE_WR(ctx, (0x35c/4)+112, 0x00030303);
+ INSTANCE_WR(ctx, (0x35c/4)+117, 0x00080000);
+ INSTANCE_WR(ctx, (0x35c/4)+118, 0x00080000);
+ INSTANCE_WR(ctx, (0x35c/4)+119, 0x00080000);
+ INSTANCE_WR(ctx, (0x35c/4)+120, 0x00080000);
+ INSTANCE_WR(ctx, (0x35c/4)+123, 0x01012000);
+ INSTANCE_WR(ctx, (0x35c/4)+124, 0x01012000);
+ INSTANCE_WR(ctx, (0x35c/4)+125, 0x01012000);
+ INSTANCE_WR(ctx, (0x35c/4)+126, 0x01012000);
+ INSTANCE_WR(ctx, (0x35c/4)+127, 0x000105b8);
+ INSTANCE_WR(ctx, (0x35c/4)+128, 0x000105b8);
+ INSTANCE_WR(ctx, (0x35c/4)+129, 0x000105b8);
+ INSTANCE_WR(ctx, (0x35c/4)+130, 0x000105b8);
+ INSTANCE_WR(ctx, (0x35c/4)+131, 0x00080008);
+ INSTANCE_WR(ctx, (0x35c/4)+132, 0x00080008);
+ INSTANCE_WR(ctx, (0x35c/4)+133, 0x00080008);
+ INSTANCE_WR(ctx, (0x35c/4)+134, 0x00080008);
+ for (i=0; i<16; ++i)
+ INSTANCE_WR(ctx, (0x35c/4)+143+i, 0x07ff0000);
+ INSTANCE_WR(ctx, (0x35c/4)+161, 0x4b7fffff);
+
+/*
+write32 #1 block at +0x00740d34 NV_PRAMIN.GRCTX0+0x614 of 3136 (0xc40) elements:
++0x00740d34: 00000000 00000000 00000000 00000080 30201000 70605040 b0a09080 f0e0d0c0
++0x00740d54: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++0x00740d74: 00000000 00000000 00000000 00000000 00000001 00000000 00004000 00000000
++0x00740d94: 00000000 00000001 00000000 00040000 00010000 00000000 00000000 00000000
++0x00740db4: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+...
++0x00742214: 00000000 00000000 00000000 00000000 10700ff9 0436086c 000c001b 00000000
++0x00742234: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
++0x00742254: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
++0x00742274: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
+...
++0x00742a34: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
++0x00742a54: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
++0x00742a74: 10700ff9 0436086c 000c001b 00000000 10700ff9 0436086c 000c001b 00000000
++0x00742a94: 10700ff9 0436086c 000c001b 00000000 00000000 00000000 00000000 00000000
++0x00742ab4: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
++0x00742ad4: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000
+*/
+ INSTANCE_WR(ctx, (0x614/4)+3, 0x00000080);
+ INSTANCE_WR(ctx, (0x614/4)+4, 0x30201000);
+ INSTANCE_WR(ctx, (0x614/4)+5, 0x70605040);
+ INSTANCE_WR(ctx, (0x614/4)+6, 0xb0a09080);
+ INSTANCE_WR(ctx, (0x614/4)+7, 0xf0e0d0c0);
+ INSTANCE_WR(ctx, (0x614/4)+20, 0x00000001);
+ INSTANCE_WR(ctx, (0x614/4)+22, 0x00004000);
+ INSTANCE_WR(ctx, (0x614/4)+25, 0x00000001);
+ INSTANCE_WR(ctx, (0x614/4)+27, 0x00040000);
+ INSTANCE_WR(ctx, (0x614/4)+28, 0x00010000);
+ for (i=0; i < 0x880/4; i+=4) {
+ INSTANCE_WR(ctx, (0x1b04/4)+i+0, 0x10700ff9);
+ INSTANCE_WR(ctx, (0x1b04/4)+i+1, 0x0436086c);
+ INSTANCE_WR(ctx, (0x1b04/4)+i+2, 0x000c001b);
+ }
+
+/*
+write32 #1 block at +0x00742e24 NV_PRAMIN.GRCTX0+0x2704 of 4 (0x4) elements:
++0x00742e24: 3f800000 00000000 00000000 00000000
+*/
+ INSTANCE_WR(ctx, (0x2704/4), 0x3f800000);
+
+/*
+write32 #1 block at +0x00742e64 NV_PRAMIN.GRCTX0+0x2744 of 12 (0xc) elements:
++0x00742e64: 40000000 3f800000 3f000000 00000000 40000000 3f800000 00000000 bf800000
++0x00742e84: 00000000 bf800000 00000000 00000000
+*/
+ INSTANCE_WR(ctx, (0x2744/4)+0, 0x40000000);
+ INSTANCE_WR(ctx, (0x2744/4)+1, 0x3f800000);
+ INSTANCE_WR(ctx, (0x2744/4)+2, 0x3f000000);
+ INSTANCE_WR(ctx, (0x2744/4)+4, 0x40000000);
+ INSTANCE_WR(ctx, (0x2744/4)+5, 0x3f800000);
+ INSTANCE_WR(ctx, (0x2744/4)+7, 0xbf800000);
+ INSTANCE_WR(ctx, (0x2744/4)+9, 0xbf800000);
+
+/*
+write32 #1 block at +0x00742e34 NV_PRAMIN.GRCTX0+0x2714 of 4 (0x4) elements:
++0x00742e34: 00000000 3f800000 00000000 00000000
+*/
+ INSTANCE_WR(ctx, (0x2714/4)+1, 0x3f800000);
+
+/*
+write32 #1 block at +0x00742e94 NV_PRAMIN.GRCTX0+0x2774 of 4 (0x4) elements:
++0x00742e94: 00000000 00000000 00000000 00000000
+write32 #1 block at +0x00743804 NV_PRAMIN.GRCTX0+0x30e4 of 4 (0x4) elements:
++0x00743804: 00000000 00000000 00000000 00000000
+write32 #1 block at +0x007437a4 NV_PRAMIN.GRCTX0+0x3084 of 8 (0x8) elements:
++0x007437a4: 00000000 00000000 000fe000 00000000 00000000 00000000 00000000 00000000
+*/
+ INSTANCE_WR(ctx, (0x3084/4)+2, 0x000fe000);
+
+/*
+write32 #1 block at +0x007437d4 NV_PRAMIN.GRCTX0+0x30b4 of 4 (0x4) elements:
++0x007437d4: 00000000 00000000 00000000 00000000
+write32 #1 block at +0x00743824 NV_PRAMIN.GRCTX0+0x3104 of 4 (0x4) elements:
++0x00743824: 00000000 000003f8 00000000 00000000
+*/
+ INSTANCE_WR(ctx, (0x3104/4)+1, 0x000003f8);
+
+/* write32 #1 NV_PRAMIN.GRCTX0+0x3468 <- 0x002fe000 */
+ INSTANCE_WR(ctx, 0x3468/4, 0x002fe000);
+
+/*
+write32 #1 block at +0x00743ba4 NV_PRAMIN.GRCTX0+0x3484 of 8 (0x8) elements:
++0x00743ba4: 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c 001c527c
+*/
+ for (i=0; i<8; ++i)
+ INSTANCE_WR(ctx, (0x3484/4)+i, 0x001c527c);
+}
+
+static void nv30_31_graph_context_init(struct drm_device *dev,
+ struct nouveau_gpuobj *ctx)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
+ INSTANCE_WR(ctx, 0x410/4, 0x00000101);
+ INSTANCE_WR(ctx, 0x424/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x428/4, 0x00000060);
+ INSTANCE_WR(ctx, 0x444/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x448/4, 0xffff0000);
+ INSTANCE_WR(ctx, 0x44c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x460/4, 0x44400000);
+ INSTANCE_WR(ctx, 0x48c/4, 0xffff0000);
+ for(i = 0x4e0; i< 0x4e8; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x0fff0000);
+ INSTANCE_WR(ctx, 0x4ec/4, 0x00011100);
+ for(i = 0x508; i< 0x548; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x550/4, 0x4b7fffff);
+ INSTANCE_WR(ctx, 0x58c/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x590/4, 0x30201000);
+ INSTANCE_WR(ctx, 0x594/4, 0x70605040);
+ INSTANCE_WR(ctx, 0x598/4, 0xb8a89888);
+ INSTANCE_WR(ctx, 0x59c/4, 0xf8e8d8c8);
+ INSTANCE_WR(ctx, 0x5b0/4, 0xb0000000);
+ for(i = 0x600; i< 0x640; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00010588);
+ for(i = 0x640; i< 0x680; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00030303);
+ for(i = 0x6c0; i< 0x700; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x0008aae4);
+ for(i = 0x700; i< 0x740; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x01012000);
+ for(i = 0x740; i< 0x780; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00080008);
+ INSTANCE_WR(ctx, 0x85c/4, 0x00040000);
+ INSTANCE_WR(ctx, 0x860/4, 0x00010000);
+ for(i = 0x864; i< 0x874; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00040004);
+ for(i = 0x1f18; i<= 0x3088 ; i+= 16) {
+ INSTANCE_WR(ctx, i/4 + 0, 0x10700ff9);
+ INSTANCE_WR(ctx, i/4 + 1, 0x0436086c);
+ INSTANCE_WR(ctx, i/4 + 2, 0x000c001b);
+ }
+ for(i = 0x30b8; i< 0x30c8; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x344c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x3808/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x381c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x3848/4, 0x40000000);
+ INSTANCE_WR(ctx, 0x384c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x3850/4, 0x3f000000);
+ INSTANCE_WR(ctx, 0x3858/4, 0x40000000);
+ INSTANCE_WR(ctx, 0x385c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x3864/4, 0xbf800000);
+ INSTANCE_WR(ctx, 0x386c/4, 0xbf800000);
+}
+
+static void nv34_graph_context_init(struct drm_device *dev,
+ struct nouveau_gpuobj *ctx)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
+ INSTANCE_WR(ctx, 0x40c/4, 0x01000101);
+ INSTANCE_WR(ctx, 0x420/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x424/4, 0x00000060);
+ INSTANCE_WR(ctx, 0x440/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x444/4, 0xffff0000);
+ INSTANCE_WR(ctx, 0x448/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x45c/4, 0x44400000);
+ INSTANCE_WR(ctx, 0x480/4, 0xffff0000);
+ for(i = 0x4d4; i< 0x4dc; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x0fff0000);
+ INSTANCE_WR(ctx, 0x4e0/4, 0x00011100);
+ for(i = 0x4fc; i< 0x53c; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x544/4, 0x4b7fffff);
+ INSTANCE_WR(ctx, 0x57c/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x580/4, 0x30201000);
+ INSTANCE_WR(ctx, 0x584/4, 0x70605040);
+ INSTANCE_WR(ctx, 0x588/4, 0xb8a89888);
+ INSTANCE_WR(ctx, 0x58c/4, 0xf8e8d8c8);
+ INSTANCE_WR(ctx, 0x5a0/4, 0xb0000000);
+ for(i = 0x5f0; i< 0x630; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00010588);
+ for(i = 0x630; i< 0x670; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00030303);
+ for(i = 0x6b0; i< 0x6f0; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x0008aae4);
+ for(i = 0x6f0; i< 0x730; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x01012000);
+ for(i = 0x730; i< 0x770; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00080008);
+ INSTANCE_WR(ctx, 0x850/4, 0x00040000);
+ INSTANCE_WR(ctx, 0x854/4, 0x00010000);
+ for(i = 0x858; i< 0x868; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00040004);
+ for(i = 0x15ac; i<= 0x271c ; i+= 16) {
+ INSTANCE_WR(ctx, i/4 + 0, 0x10700ff9);
+ INSTANCE_WR(ctx, i/4 + 1, 0x0436086c);
+ INSTANCE_WR(ctx, i/4 + 2, 0x000c001b);
+ }
+ for(i = 0x274c; i< 0x275c; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x2ae0/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x2e9c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x2eb0/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x2edc/4, 0x40000000);
+ INSTANCE_WR(ctx, 0x2ee0/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x2ee4/4, 0x3f000000);
+ INSTANCE_WR(ctx, 0x2eec/4, 0x40000000);
+ INSTANCE_WR(ctx, 0x2ef0/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x2ef8/4, 0xbf800000);
+ INSTANCE_WR(ctx, 0x2f00/4, 0xbf800000);
+}
+
+static void nv35_36_graph_context_init(struct drm_device *dev,
+ struct nouveau_gpuobj *ctx)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
+ INSTANCE_WR(ctx, 0x40c/4, 0x00000101);
+ INSTANCE_WR(ctx, 0x420/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x424/4, 0x00000060);
+ INSTANCE_WR(ctx, 0x440/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x444/4, 0xffff0000);
+ INSTANCE_WR(ctx, 0x448/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x45c/4, 0x44400000);
+ INSTANCE_WR(ctx, 0x488/4, 0xffff0000);
+ for(i = 0x4dc; i< 0x4e4; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x0fff0000);
+ INSTANCE_WR(ctx, 0x4e8/4, 0x00011100);
+ for(i = 0x504; i< 0x544; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x54c/4, 0x4b7fffff);
+ INSTANCE_WR(ctx, 0x588/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x58c/4, 0x30201000);
+ INSTANCE_WR(ctx, 0x590/4, 0x70605040);
+ INSTANCE_WR(ctx, 0x594/4, 0xb8a89888);
+ INSTANCE_WR(ctx, 0x598/4, 0xf8e8d8c8);
+ INSTANCE_WR(ctx, 0x5ac/4, 0xb0000000);
+ for(i = 0x604; i< 0x644; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00010588);
+ for(i = 0x644; i< 0x684; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00030303);
+ for(i = 0x6c4; i< 0x704; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x0008aae4);
+ for(i = 0x704; i< 0x744; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x01012000);
+ for(i = 0x744; i< 0x784; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00080008);
+ INSTANCE_WR(ctx, 0x860/4, 0x00040000);
+ INSTANCE_WR(ctx, 0x864/4, 0x00010000);
+ for(i = 0x868; i< 0x878; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00040004);
+ for(i = 0x1f1c; i<= 0x308c ; i+= 16) {
+ INSTANCE_WR(ctx, i/4 + 0, 0x10700ff9);
+ INSTANCE_WR(ctx, i/4 + 1, 0x0436086c);
+ INSTANCE_WR(ctx, i/4 + 2, 0x000c001b);
+ }
+ for(i = 0x30bc; i< 0x30cc; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x3450/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x380c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x3820/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x384c/4, 0x40000000);
+ INSTANCE_WR(ctx, 0x3850/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x3854/4, 0x3f000000);
+ INSTANCE_WR(ctx, 0x385c/4, 0x40000000);
+ INSTANCE_WR(ctx, 0x3860/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x3868/4, 0xbf800000);
+ INSTANCE_WR(ctx, 0x3870/4, 0xbf800000);
+}
+
+int nv20_graph_create_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *);
+ unsigned int ctx_size;
+ unsigned int idoffs = 0x28/4;
+ int ret;
+
+ switch (dev_priv->chipset) {
+ case 0x20:
+ ctx_size = NV20_GRCTX_SIZE;
+ ctx_init = nv20_graph_context_init;
+ idoffs = 0;
+ break;
+ case 0x25:
+ case 0x28:
+ ctx_size = NV25_GRCTX_SIZE;
+ ctx_init = nv25_graph_context_init;
+ break;
+ case 0x2a:
+ ctx_size = NV2A_GRCTX_SIZE;
+ ctx_init = nv2a_graph_context_init;
+ idoffs = 0;
+ break;
+ case 0x30:
+ case 0x31:
+ ctx_size = NV30_31_GRCTX_SIZE;
+ ctx_init = nv30_31_graph_context_init;
+ break;
+ case 0x34:
+ ctx_size = NV34_GRCTX_SIZE;
+ ctx_init = nv34_graph_context_init;
+ break;
+ case 0x35:
+ case 0x36:
+ ctx_size = NV35_36_GRCTX_SIZE;
+ ctx_init = nv35_36_graph_context_init;
+ break;
+ default:
+ ctx_size = 0;
+ ctx_init = nv35_36_graph_context_init;
+ DRM_ERROR("Please contact the devs if you want your NV%x"
+ " card to work\n", dev_priv->chipset);
+ return -ENOSYS;
+ break;
+ }
+
+ if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, ctx_size, 16,
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &chan->ramin_grctx)))
+ return ret;
+
+ /* Initialise default context values */
+ ctx_init(dev, chan->ramin_grctx->gpuobj);
+
+ /* nv20: INSTANCE_WR(chan->ramin_grctx->gpuobj, 10, chan->id<<24); */
+ INSTANCE_WR(chan->ramin_grctx->gpuobj, idoffs, (chan->id<<24)|0x1);
+ /* CTX_USER */
+
+ INSTANCE_WR(dev_priv->ctx_table->gpuobj, chan->id,
+ chan->ramin_grctx->instance >> 4);
+
+ return 0;
+}
+
+void nv20_graph_destroy_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (chan->ramin_grctx)
+ nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
+
+ INSTANCE_WR(dev_priv->ctx_table->gpuobj, chan->id, 0);
+}
+
+int nv20_graph_load_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t inst;
+
+ if (!chan->ramin_grctx)
+ return -EINVAL;
+ inst = chan->ramin_grctx->instance >> 4;
+
+ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
+ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_XFER,
+ NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD);
+ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100);
+
+ nouveau_wait_for_idle(dev);
+ return 0;
+}
+
+int nv20_graph_save_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t inst;
+
+ if (!chan->ramin_grctx)
+ return -EINVAL;
+ inst = chan->ramin_grctx->instance >> 4;
+
+ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
+ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_XFER,
+ NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE);
+
+ nouveau_wait_for_idle(dev);
+ return 0;
+}
+
+static void nv20_graph_rdi(struct drm_device *dev) {
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i, writecount = 32;
+ uint32_t rdi_index = 0x2c80000;
+
+ if (dev_priv->chipset == 0x20) {
+ rdi_index = 0x3d0000;
+ writecount = 15;
+ }
+
+ NV_WRITE(NV10_PGRAPH_RDI_INDEX, rdi_index);
+ for (i = 0; i < writecount; i++)
+ NV_WRITE(NV10_PGRAPH_RDI_DATA, 0);
+
+ nouveau_wait_for_idle(dev);
+}
+
+int nv20_graph_init(struct drm_device *dev) {
+ struct drm_nouveau_private *dev_priv =
+ (struct drm_nouveau_private *)dev->dev_private;
+ uint32_t tmp, vramsz;
+ int ret, i;
+
+ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
+ ~NV_PMC_ENABLE_PGRAPH);
+ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
+ NV_PMC_ENABLE_PGRAPH);
+
+ if (!dev_priv->ctx_table) {
+ /* Create Context Pointer Table */
+ dev_priv->ctx_table_size = 32 * 4;
+ if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0,
+ dev_priv->ctx_table_size, 16,
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &dev_priv->ctx_table)))
+ return ret;
+ }
+
+ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_TABLE,
+ dev_priv->ctx_table->instance >> 4);
+
+ nv20_graph_rdi(dev);
+
+ NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF);
+ NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000);
+ NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x00118700);
+ NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
+ NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00000000);
+ NV_WRITE(0x40009C , 0x00000040);
+
+ if (dev_priv->chipset >= 0x25) {
+ NV_WRITE(0x400890, 0x00080000);
+ NV_WRITE(0x400610, 0x304B1FB6);
+ NV_WRITE(0x400B80, 0x18B82880);
+ NV_WRITE(0x400B84, 0x44000000);
+ NV_WRITE(0x400098, 0x40000080);
+ NV_WRITE(0x400B88, 0x000000ff);
+ } else {
+ NV_WRITE(0x400880, 0x00080000); /* 0x0008c7df */
+ NV_WRITE(0x400094, 0x00000005);
+ NV_WRITE(0x400B80, 0x45CAA208); /* 0x45eae20e */
+ NV_WRITE(0x400B84, 0x24000000);
+ NV_WRITE(0x400098, 0x00000040);
+ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00E00038);
+ NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000030);
+ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00E10038);
+ NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000030);
+ }
+
+ /* copy tile info from PFB */
+ for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
+ NV_WRITE(0x00400904 + i*0x10, NV_READ(NV10_PFB_TLIMIT(i)));
+ /* which is NV40_PGRAPH_TLIMIT0(i) ?? */
+ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0030+i*4);
+ NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TLIMIT(i)));
+ NV_WRITE(0x00400908 + i*0x10, NV_READ(NV10_PFB_TSIZE(i)));
+ /* which is NV40_PGRAPH_TSIZE0(i) ?? */
+ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0050+i*4);
+ NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TSIZE(i)));
+ NV_WRITE(0x00400900 + i*0x10, NV_READ(NV10_PFB_TILE(i)));
+ /* which is NV40_PGRAPH_TILE0(i) ?? */
+ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0010+i*4);
+ NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TILE(i)));
+ }
+ for (i = 0; i < 8; i++) {
+ NV_WRITE(0x400980+i*4, NV_READ(0x100300+i*4));
+ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0090+i*4);
+ NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(0x100300+i*4));
+ }
+ NV_WRITE(0x4009a0, NV_READ(0x100324));
+ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
+ NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(0x100324));
+
+ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+ NV_WRITE(NV10_PGRAPH_STATE , 0xFFFFFFFF);
+ NV_WRITE(NV04_PGRAPH_FIFO , 0x00000001);
+
+ tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00;
+ NV_WRITE(NV10_PGRAPH_SURFACE, tmp);
+ tmp = NV_READ(NV10_PGRAPH_SURFACE) | 0x00020100;
+ NV_WRITE(NV10_PGRAPH_SURFACE, tmp);
+
+ /* begin RAM config */
+ vramsz = drm_get_resource_len(dev, 0) - 1;
+ NV_WRITE(0x4009A4, NV_READ(NV04_PFB_CFG0));
+ NV_WRITE(0x4009A8, NV_READ(NV04_PFB_CFG1));
+ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
+ NV_WRITE(NV10_PGRAPH_RDI_DATA , NV_READ(NV04_PFB_CFG0));
+ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
+ NV_WRITE(NV10_PGRAPH_RDI_DATA , NV_READ(NV04_PFB_CFG1));
+ NV_WRITE(0x400820, 0);
+ NV_WRITE(0x400824, 0);
+ NV_WRITE(0x400864, vramsz-1);
+ NV_WRITE(0x400868, vramsz-1);
+
+ /* interesting.. the below overwrites some of the tile setup above.. */
+ NV_WRITE(0x400B20, 0x00000000);
+ NV_WRITE(0x400B04, 0xFFFFFFFF);
+
+ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
+ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
+ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
+ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
+
+ return 0;
+}
+
+void nv20_graph_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ nouveau_gpuobj_ref_del(dev, &dev_priv->ctx_table);
+}
+
+int nv30_graph_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+// uint32_t vramsz, tmp;
+ int ret, i;
+
+ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
+ ~NV_PMC_ENABLE_PGRAPH);
+ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
+ NV_PMC_ENABLE_PGRAPH);
+
+ if (!dev_priv->ctx_table) {
+ /* Create Context Pointer Table */
+ dev_priv->ctx_table_size = 32 * 4;
+ if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0,
+ dev_priv->ctx_table_size, 16,
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &dev_priv->ctx_table)))
+ return ret;
+ }
+
+ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_TABLE,
+ dev_priv->ctx_table->instance >> 4);
+
+ NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF);
+ NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000);
+ NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x401287c0);
+ NV_WRITE(0x400890, 0x01b463ff);
+ NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xf2de0475);
+ NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00008000);
+ NV_WRITE(NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
+ NV_WRITE(0x400B80, 0x1003d888);
+ NV_WRITE(0x400B84, 0x0c000000);
+ NV_WRITE(0x400098, 0x00000000);
+ NV_WRITE(0x40009C, 0x0005ad00);
+ NV_WRITE(0x400B88, 0x62ff00ff); // suspiciously like PGRAPH_DEBUG_2
+ NV_WRITE(0x4000a0, 0x00000000);
+ NV_WRITE(0x4000a4, 0x00000008);
+ NV_WRITE(0x4008a8, 0xb784a400);
+ NV_WRITE(0x400ba0, 0x002f8685);
+ NV_WRITE(0x400ba4, 0x00231f3f);
+ NV_WRITE(0x4008a4, 0x40000020);
+
+ if (dev_priv->chipset == 0x34) {
+ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
+ NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00200201);
+ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
+ NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000008);
+ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
+ NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000032);
+ NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00E00004);
+ NV_WRITE(NV10_PGRAPH_RDI_DATA , 0x00000002);
+ }
+
+ NV_WRITE(0x4000c0, 0x00000016);
+
+ /* copy tile info from PFB */
+ for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
+ NV_WRITE(0x00400904 + i*0x10, NV_READ(NV10_PFB_TLIMIT(i)));
+ /* which is NV40_PGRAPH_TLIMIT0(i) ?? */
+ NV_WRITE(0x00400908 + i*0x10, NV_READ(NV10_PFB_TSIZE(i)));
+ /* which is NV40_PGRAPH_TSIZE0(i) ?? */
+ NV_WRITE(0x00400900 + i*0x10, NV_READ(NV10_PFB_TILE(i)));
+ /* which is NV40_PGRAPH_TILE0(i) ?? */
+ }
+
+ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+ NV_WRITE(NV10_PGRAPH_STATE , 0xFFFFFFFF);
+ NV_WRITE(0x0040075c , 0x00000001);
+ NV_WRITE(NV04_PGRAPH_FIFO , 0x00000001);
+
+ /* begin RAM config */
+// vramsz = drm_get_resource_len(dev, 0) - 1;
+ NV_WRITE(0x4009A4, NV_READ(NV04_PFB_CFG0));
+ NV_WRITE(0x4009A8, NV_READ(NV04_PFB_CFG1));
+ if (dev_priv->chipset != 0x34) {
+ NV_WRITE(0x400750, 0x00EA0000);
+ NV_WRITE(0x400754, NV_READ(NV04_PFB_CFG0));
+ NV_WRITE(0x400750, 0x00EA0004);
+ NV_WRITE(0x400754, NV_READ(NV04_PFB_CFG1));
+ }
+
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv40_fb.c b/drivers/gpu/drm/nouveau/nv40_fb.c
new file mode 100644
index 0000000..ae784cb
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_fb.c
@@ -0,0 +1,62 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+int
+nv40_fb_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t fb_bar_size, tmp;
+ int num_tiles;
+ int i;
+
+ /* This is strictly a NV4x register (don't know about NV5x). */
+ /* The blob sets these to all kinds of values, and they mess up our setup. */
+ /* I got value 0x52802 instead. For some cards the blob even sets it back to 0x1. */
+ /* Note: the blob doesn't read this value, so i'm pretty sure this is safe for all cards. */
+ /* Any idea what this is? */
+ NV_WRITE(NV40_PFB_UNK_800, 0x1);
+
+ switch (dev_priv->chipset) {
+ case 0x40:
+ case 0x45:
+ tmp = NV_READ(NV10_PFB_CLOSE_PAGE2);
+ NV_WRITE(NV10_PFB_CLOSE_PAGE2, tmp & ~(1<<15));
+ num_tiles = NV10_PFB_TILE__SIZE;
+ break;
+ case 0x46: /* G72 */
+ case 0x47: /* G70 */
+ case 0x49: /* G71 */
+ case 0x4b: /* G73 */
+ case 0x4c: /* C51 (G7X version) */
+ num_tiles = NV40_PFB_TILE__SIZE_1;
+ break;
+ default:
+ num_tiles = NV40_PFB_TILE__SIZE_0;
+ break;
+ }
+
+ fb_bar_size = drm_get_resource_len(dev, 0) - 1;
+ switch (dev_priv->chipset) {
+ case 0x40:
+ for (i=0; i<num_tiles; i++) {
+ NV_WRITE(NV10_PFB_TILE(i), 0);
+ NV_WRITE(NV10_PFB_TLIMIT(i), fb_bar_size);
+ }
+ break;
+ default:
+ for (i=0; i<num_tiles; i++) {
+ NV_WRITE(NV40_PFB_TILE(i), 0);
+ NV_WRITE(NV40_PFB_TLIMIT(i), fb_bar_size);
+ }
+ break;
+ }
+
+ return 0;
+}
+
+void
+nv40_fb_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
new file mode 100644
index 0000000..fc38bb9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+
+#define RAMFC_WR(offset,val) INSTANCE_WR(chan->ramfc->gpuobj, \
+ NV40_RAMFC_##offset/4, (val))
+#define RAMFC_RD(offset) INSTANCE_RD(chan->ramfc->gpuobj, \
+ NV40_RAMFC_##offset/4)
+#define NV40_RAMFC(c) (dev_priv->ramfc_offset + ((c)*NV40_RAMFC__SIZE))
+#define NV40_RAMFC__SIZE 128
+
+int
+nv40_fifo_create_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if ((ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0,
+ NV40_RAMFC__SIZE,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE,
+ NULL, &chan->ramfc)))
+ return ret;
+
+ /* Fill entries that are seen filled in dumps of nvidia driver just
+ * after channel's is put into DMA mode
+ */
+ RAMFC_WR(DMA_PUT , chan->pushbuf_base);
+ RAMFC_WR(DMA_GET , chan->pushbuf_base);
+ RAMFC_WR(DMA_INSTANCE , chan->pushbuf->instance >> 4);
+ RAMFC_WR(DMA_FETCH , NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ 0x30000000 /* no idea.. */);
+ RAMFC_WR(DMA_SUBROUTINE, 0);
+ RAMFC_WR(GRCTX_INSTANCE, chan->ramin_grctx->instance >> 4);
+ RAMFC_WR(DMA_TIMESLICE , 0x0001FFFF);
+
+ /* enable the fifo dma operation */
+ NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<<chan->id));
+ return 0;
+}
+
+void
+nv40_fifo_destroy_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<<chan->id));
+
+ if (chan->ramfc)
+ nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+}
+
+int
+nv40_fifo_load_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t tmp, tmp2;
+
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET , RAMFC_RD(DMA_GET));
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT , RAMFC_RD(DMA_PUT));
+ NV_WRITE(NV10_PFIFO_CACHE1_REF_CNT , RAMFC_RD(REF_CNT));
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE , RAMFC_RD(DMA_INSTANCE));
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_DCOUNT , RAMFC_RD(DMA_DCOUNT));
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE , RAMFC_RD(DMA_STATE));
+
+ /* No idea what 0x2058 is.. */
+ tmp = RAMFC_RD(DMA_FETCH);
+ tmp2 = NV_READ(0x2058) & 0xFFF;
+ tmp2 |= (tmp & 0x30000000);
+ NV_WRITE(0x2058, tmp2);
+ tmp &= ~0x30000000;
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH , tmp);
+
+ NV_WRITE(NV04_PFIFO_CACHE1_ENGINE , RAMFC_RD(ENGINE));
+ NV_WRITE(NV04_PFIFO_CACHE1_PULL1 , RAMFC_RD(PULL1_ENGINE));
+ NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_VALUE , RAMFC_RD(ACQUIRE_VALUE));
+ NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP, RAMFC_RD(ACQUIRE_TIMESTAMP));
+ NV_WRITE(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT , RAMFC_RD(ACQUIRE_TIMEOUT));
+ NV_WRITE(NV10_PFIFO_CACHE1_SEMAPHORE , RAMFC_RD(SEMAPHORE));
+ NV_WRITE(NV10_PFIFO_CACHE1_DMA_SUBROUTINE , RAMFC_RD(DMA_SUBROUTINE));
+ NV_WRITE(NV40_PFIFO_GRCTX_INSTANCE , RAMFC_RD(GRCTX_INSTANCE));
+ NV_WRITE(0x32e4, RAMFC_RD(UNK_40));
+ /* NVIDIA does this next line twice... */
+ NV_WRITE(0x32e8, RAMFC_RD(UNK_44));
+ NV_WRITE(0x2088, RAMFC_RD(UNK_4C));
+ NV_WRITE(0x3300, RAMFC_RD(UNK_50));
+
+ /* not sure what part is PUT, and which is GET.. never seen a non-zero
+ * value appear in a mmio-trace yet..
+ */
+
+ /* Don't clobber the TIMEOUT_ENABLED flag when restoring from RAMFC */
+ tmp = NV_READ(NV04_PFIFO_DMA_TIMESLICE) & ~0x1FFFF;
+ tmp |= RAMFC_RD(DMA_TIMESLICE) & 0x1FFFF;
+ NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, tmp);
+
+ /* Set channel active, and in DMA mode */
+ NV_WRITE(NV03_PFIFO_CACHE1_PUSH1,
+ NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
+
+ /* Reset DMA_CTL_AT_INFO to INVALID */
+ tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31);
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp);
+
+ return 0;
+}
+
+int
+nv40_fifo_save_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
+
+ RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT));
+ RAMFC_WR(DMA_GET , NV_READ(NV04_PFIFO_CACHE1_DMA_GET));
+ RAMFC_WR(REF_CNT , NV_READ(NV10_PFIFO_CACHE1_REF_CNT));
+ RAMFC_WR(DMA_INSTANCE , NV_READ(NV04_PFIFO_CACHE1_DMA_INSTANCE));
+ RAMFC_WR(DMA_DCOUNT , NV_READ(NV04_PFIFO_CACHE1_DMA_DCOUNT));
+ RAMFC_WR(DMA_STATE , NV_READ(NV04_PFIFO_CACHE1_DMA_STATE));
+
+ tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH);
+ tmp |= NV_READ(0x2058) & 0x30000000;
+ RAMFC_WR(DMA_FETCH , tmp);
+
+ RAMFC_WR(ENGINE , NV_READ(NV04_PFIFO_CACHE1_ENGINE));
+ RAMFC_WR(PULL1_ENGINE , NV_READ(NV04_PFIFO_CACHE1_PULL1));
+ RAMFC_WR(ACQUIRE_VALUE , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_VALUE));
+ tmp = NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP);
+ RAMFC_WR(ACQUIRE_TIMESTAMP, tmp);
+ RAMFC_WR(ACQUIRE_TIMEOUT , NV_READ(NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT));
+ RAMFC_WR(SEMAPHORE , NV_READ(NV10_PFIFO_CACHE1_SEMAPHORE));
+
+ /* NVIDIA read 0x3228 first, then write DMA_GET here.. maybe something
+ * more involved depending on the value of 0x3228?
+ */
+ RAMFC_WR(DMA_SUBROUTINE , NV_READ(NV04_PFIFO_CACHE1_DMA_GET));
+
+ RAMFC_WR(GRCTX_INSTANCE , NV_READ(NV40_PFIFO_GRCTX_INSTANCE));
+
+ /* No idea what the below is for exactly, ripped from a mmio-trace */
+ RAMFC_WR(UNK_40 , NV_READ(NV40_PFIFO_UNK32E4));
+
+ /* NVIDIA do this next line twice.. bug? */
+ RAMFC_WR(UNK_44 , NV_READ(0x32e8));
+ RAMFC_WR(UNK_4C , NV_READ(0x2088));
+ RAMFC_WR(UNK_50 , NV_READ(0x3300));
+
+
+ return 0;
+}
+
+int
+nv40_fifo_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if ((ret = nouveau_fifo_init(dev)))
+ return ret;
+
+ NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, 0x2101ffff);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
new file mode 100644
index 0000000..de178f5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -0,0 +1,2193 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+/*TODO: deciper what each offset in the context represents. The below
+ * contexts are taken from dumps just after the 3D object is
+ * created.
+ */
+static void
+nv40_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
+ /* Always has the "instance address" of itself at offset 0 */
+ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
+ /* unknown */
+ INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x00030/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0011c/4, 0x20010001);
+ INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00);
+ INSTANCE_WR(ctx, 0x00128/4, 0x02008821);
+ INSTANCE_WR(ctx, 0x0016c/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x00170/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x00174/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x0017c/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x00180/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x00184/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x00188/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x0018c/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x0019c/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x001a0/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x001b0/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x001c0/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c);
+ INSTANCE_WR(ctx, 0x00340/4, 0x00040000);
+ INSTANCE_WR(ctx, 0x00350/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x00354/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x00358/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x0035c/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x00388/4, 0x00000008);
+ INSTANCE_WR(ctx, 0x0039c/4, 0x00000010);
+ INSTANCE_WR(ctx, 0x00480/4, 0x00000100);
+ INSTANCE_WR(ctx, 0x00494/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x00498/4, 0x00080060);
+ INSTANCE_WR(ctx, 0x004b4/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x004b8/4, 0xffff0000);
+ INSTANCE_WR(ctx, 0x004bc/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x004d0/4, 0x46400000);
+ INSTANCE_WR(ctx, 0x004ec/4, 0xffff0000);
+ INSTANCE_WR(ctx, 0x004f8/4, 0x0fff0000);
+ INSTANCE_WR(ctx, 0x004fc/4, 0x0fff0000);
+ INSTANCE_WR(ctx, 0x00504/4, 0x00011100);
+ for (i=0x00520; i<=0x0055c; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x00568/4, 0x4b7fffff);
+ INSTANCE_WR(ctx, 0x00594/4, 0x30201000);
+ INSTANCE_WR(ctx, 0x00598/4, 0x70605040);
+ INSTANCE_WR(ctx, 0x0059c/4, 0xb8a89888);
+ INSTANCE_WR(ctx, 0x005a0/4, 0xf8e8d8c8);
+ INSTANCE_WR(ctx, 0x005b4/4, 0x40100000);
+ INSTANCE_WR(ctx, 0x005cc/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x005d8/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x0060c/4, 0x435185d6);
+ INSTANCE_WR(ctx, 0x00610/4, 0x2155b699);
+ INSTANCE_WR(ctx, 0x00614/4, 0xfedcba98);
+ INSTANCE_WR(ctx, 0x00618/4, 0x00000098);
+ INSTANCE_WR(ctx, 0x00628/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x0062c/4, 0x00ff7000);
+ INSTANCE_WR(ctx, 0x00630/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x00640/4, 0x00ff0000);
+ INSTANCE_WR(ctx, 0x0067c/4, 0x00ffff00);
+ /* 0x680-0x6BC - NV30_TCL_PRIMITIVE_3D_TX_ADDRESS_UNIT(0-15) */
+ /* 0x6C0-0x6FC - NV30_TCL_PRIMITIVE_3D_TX_FORMAT_UNIT(0-15) */
+ for (i=0x006C0; i<=0x006fc; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00018488);
+ /* 0x700-0x73C - NV30_TCL_PRIMITIVE_3D_TX_WRAP_UNIT(0-15) */
+ for (i=0x00700; i<=0x0073c; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00028202);
+ /* 0x740-0x77C - NV30_TCL_PRIMITIVE_3D_TX_ENABLE_UNIT(0-15) */
+ /* 0x780-0x7BC - NV30_TCL_PRIMITIVE_3D_TX_SWIZZLE_UNIT(0-15) */
+ for (i=0x00780; i<=0x007bc; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x0000aae4);
+ /* 0x7C0-0x7FC - NV30_TCL_PRIMITIVE_3D_TX_FILTER_UNIT(0-15) */
+ for (i=0x007c0; i<=0x007fc; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x01012000);
+ /* 0x800-0x83C - NV30_TCL_PRIMITIVE_3D_TX_XY_DIM_UNIT(0-15) */
+ for (i=0x00800; i<=0x0083c; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00080008);
+ /* 0x840-0x87C - NV30_TCL_PRIMITIVE_3D_TX_UNK07_UNIT(0-15) */
+ /* 0x880-0x8BC - NV30_TCL_PRIMITIVE_3D_TX_DEPTH_UNIT(0-15) */
+ for (i=0x00880; i<=0x008bc; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00100008);
+ /* unknown */
+ for (i=0x00910; i<=0x0091c; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x0001bc80);
+ for (i=0x00920; i<=0x0092c; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00000202);
+ for (i=0x00940; i<=0x0094c; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00000008);
+ for (i=0x00960; i<=0x0096c; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00080008);
+ INSTANCE_WR(ctx, 0x00980/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x009b4/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x009c0/4, 0x3e020200);
+ INSTANCE_WR(ctx, 0x009c4/4, 0x00ffffff);
+ INSTANCE_WR(ctx, 0x009c8/4, 0x60103f00);
+ INSTANCE_WR(ctx, 0x009d4/4, 0x00020000);
+ INSTANCE_WR(ctx, 0x00a08/4, 0x00008100);
+ INSTANCE_WR(ctx, 0x00aac/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00af0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00af8/4, 0x80800001);
+ INSTANCE_WR(ctx, 0x00bcc/4, 0x00000005);
+ INSTANCE_WR(ctx, 0x00bf8/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00bfc/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00c00/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00c04/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00c08/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00c0c/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00c44/4, 0x00000001);
+ for (i=0x03008; i<=0x03080; i+=8)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for (i=0x05288; i<=0x08570; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for (i=0x08628; i<=0x08e18; i+=16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for (i=0x0bd28; i<=0x0f010; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for (i=0x0f0c8; i<=0x0f8b8; i+=16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for (i=0x127c8; i<=0x15ab0; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for (i=0x15b68; i<=0x16358; i+=16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for (i=0x19268; i<=0x1c550; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for (i=0x1c608; i<=0x1cdf8; i+=16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for (i=0x1fd08; i<=0x22ff0; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for (i=0x230a8; i<=0x23898; i+=16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for (i=0x267a8; i<=0x29a90; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for (i=0x29b48; i<=0x2a338; i+=16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+}
+
+static void
+nv41_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
+ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
+ INSTANCE_WR(ctx, 0x00000024/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x00000028/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x00000030/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0000011c/4, 0x20010001);
+ INSTANCE_WR(ctx, 0x00000120/4, 0x0f73ef00);
+ INSTANCE_WR(ctx, 0x00000128/4, 0x02008821);
+ for (i = 0x00000178; i <= 0x00000180; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x00000188/4, 0x00000040);
+ for (i = 0x00000194; i <= 0x000001b0; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x000001d0/4, 0x0b0b0b0c);
+ INSTANCE_WR(ctx, 0x00000340/4, 0x00040000);
+ for (i = 0x00000350; i <= 0x0000035c; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x00000388/4, 0x00000008);
+ INSTANCE_WR(ctx, 0x0000039c/4, 0x00001010);
+ INSTANCE_WR(ctx, 0x000003cc/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x000003d0/4, 0x00080060);
+ INSTANCE_WR(ctx, 0x000003ec/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x000003f0/4, 0xffff0000);
+ INSTANCE_WR(ctx, 0x000003f4/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00000408/4, 0x46400000);
+ INSTANCE_WR(ctx, 0x00000418/4, 0xffff0000);
+ INSTANCE_WR(ctx, 0x00000424/4, 0x0fff0000);
+ INSTANCE_WR(ctx, 0x00000428/4, 0x0fff0000);
+ INSTANCE_WR(ctx, 0x00000430/4, 0x00011100);
+ for (i = 0x0000044c; i <= 0x00000488; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x00000494/4, 0x4b7fffff);
+ INSTANCE_WR(ctx, 0x000004bc/4, 0x30201000);
+ INSTANCE_WR(ctx, 0x000004c0/4, 0x70605040);
+ INSTANCE_WR(ctx, 0x000004c4/4, 0xb8a89888);
+ INSTANCE_WR(ctx, 0x000004c8/4, 0xf8e8d8c8);
+ INSTANCE_WR(ctx, 0x000004dc/4, 0x40100000);
+ INSTANCE_WR(ctx, 0x000004f8/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x0000052c/4, 0x435185d6);
+ INSTANCE_WR(ctx, 0x00000530/4, 0x2155b699);
+ INSTANCE_WR(ctx, 0x00000534/4, 0xfedcba98);
+ INSTANCE_WR(ctx, 0x00000538/4, 0x00000098);
+ INSTANCE_WR(ctx, 0x00000548/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x0000054c/4, 0x00ff7000);
+ INSTANCE_WR(ctx, 0x00000550/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x00000560/4, 0x00ff0000);
+ INSTANCE_WR(ctx, 0x00000598/4, 0x00ffff00);
+ for (i = 0x000005dc; i <= 0x00000618; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00018488);
+ for (i = 0x0000061c; i <= 0x00000658; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00028202);
+ for (i = 0x0000069c; i <= 0x000006d8; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x0000aae4);
+ for (i = 0x000006dc; i <= 0x00000718; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x01012000);
+ for (i = 0x0000071c; i <= 0x00000758; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00080008);
+ for (i = 0x0000079c; i <= 0x000007d8; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00100008);
+ for (i = 0x0000082c; i <= 0x00000838; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x0001bc80);
+ for (i = 0x0000083c; i <= 0x00000848; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00000202);
+ for (i = 0x0000085c; i <= 0x00000868; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00000008);
+ for (i = 0x0000087c; i <= 0x00000888; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00080008);
+ INSTANCE_WR(ctx, 0x0000089c/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x000008d0/4, 0x00000021);
+ INSTANCE_WR(ctx, 0x000008d4/4, 0x030c30c3);
+ INSTANCE_WR(ctx, 0x000008e0/4, 0x3e020200);
+ INSTANCE_WR(ctx, 0x000008e4/4, 0x00ffffff);
+ INSTANCE_WR(ctx, 0x000008e8/4, 0x20103f00);
+ INSTANCE_WR(ctx, 0x000008f4/4, 0x00020000);
+ INSTANCE_WR(ctx, 0x0000092c/4, 0x00008100);
+ INSTANCE_WR(ctx, 0x000009b8/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x000009fc/4, 0x00001001);
+ INSTANCE_WR(ctx, 0x00000a04/4, 0x00000003);
+ INSTANCE_WR(ctx, 0x00000a08/4, 0x00888001);
+ INSTANCE_WR(ctx, 0x00000aac/4, 0x00000005);
+ INSTANCE_WR(ctx, 0x00000ab8/4, 0x0000ffff);
+ for (i = 0x00000ad4; i <= 0x00000ae4; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00000ae8/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00000b20/4, 0x00000001);
+ for (i = 0x00002ee8; i <= 0x00002f60; i += 8)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for (i = 0x00005168; i <= 0x00007358; i += 24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for (i = 0x00007368; i <= 0x00007758; i += 16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for (i = 0x0000a068; i <= 0x0000c258; i += 24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for (i = 0x0000c268; i <= 0x0000c658; i += 16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for (i = 0x0000ef68; i <= 0x00011158; i += 24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for (i = 0x00011168; i <= 0x00011558; i += 16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for (i = 0x00013e68; i <= 0x00016058; i += 24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for (i = 0x00016068; i <= 0x00016458; i += 16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+};
+
+static void
+nv43_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
+ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
+ INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x00030/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0011c/4, 0x20010001);
+ INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00);
+ INSTANCE_WR(ctx, 0x00128/4, 0x02008821);
+ INSTANCE_WR(ctx, 0x00178/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x0017c/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x00180/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x00188/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x00194/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x00198/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x0019c/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x001a0/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x001a4/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x001a8/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x001ac/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x001b0/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c);
+ INSTANCE_WR(ctx, 0x00340/4, 0x00040000);
+ INSTANCE_WR(ctx, 0x00350/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x00354/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x00358/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x0035c/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x00388/4, 0x00000008);
+ INSTANCE_WR(ctx, 0x0039c/4, 0x00001010);
+ INSTANCE_WR(ctx, 0x003cc/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x003d0/4, 0x00080060);
+ INSTANCE_WR(ctx, 0x003ec/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x003f0/4, 0xffff0000);
+ INSTANCE_WR(ctx, 0x003f4/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00408/4, 0x46400000);
+ INSTANCE_WR(ctx, 0x00418/4, 0xffff0000);
+ INSTANCE_WR(ctx, 0x00424/4, 0x0fff0000);
+ INSTANCE_WR(ctx, 0x00428/4, 0x0fff0000);
+ INSTANCE_WR(ctx, 0x00430/4, 0x00011100);
+ for (i=0x0044c; i<=0x00488; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x00494/4, 0x4b7fffff);
+ INSTANCE_WR(ctx, 0x004bc/4, 0x30201000);
+ INSTANCE_WR(ctx, 0x004c0/4, 0x70605040);
+ INSTANCE_WR(ctx, 0x004c4/4, 0xb8a89888);
+ INSTANCE_WR(ctx, 0x004c8/4, 0xf8e8d8c8);
+ INSTANCE_WR(ctx, 0x004dc/4, 0x40100000);
+ INSTANCE_WR(ctx, 0x004f8/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x0052c/4, 0x435185d6);
+ INSTANCE_WR(ctx, 0x00530/4, 0x2155b699);
+ INSTANCE_WR(ctx, 0x00534/4, 0xfedcba98);
+ INSTANCE_WR(ctx, 0x00538/4, 0x00000098);
+ INSTANCE_WR(ctx, 0x00548/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x0054c/4, 0x00ff7000);
+ INSTANCE_WR(ctx, 0x00550/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x00560/4, 0x00ff0000);
+ INSTANCE_WR(ctx, 0x00598/4, 0x00ffff00);
+ for (i=0x005dc; i<=0x00618; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00018488);
+ for (i=0x0061c; i<=0x00658; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00028202);
+ for (i=0x0069c; i<=0x006d8; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x0000aae4);
+ for (i=0x006dc; i<=0x00718; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x01012000);
+ for (i=0x0071c; i<=0x00758; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00080008);
+ for (i=0x0079c; i<=0x007d8; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00100008);
+ for (i=0x0082c; i<=0x00838; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x0001bc80);
+ for (i=0x0083c; i<=0x00848; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00000202);
+ for (i=0x0085c; i<=0x00868; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00000008);
+ for (i=0x0087c; i<=0x00888; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00080008);
+ INSTANCE_WR(ctx, 0x0089c/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x008d0/4, 0x00000021);
+ INSTANCE_WR(ctx, 0x008d4/4, 0x030c30c3);
+ INSTANCE_WR(ctx, 0x008e0/4, 0x3e020200);
+ INSTANCE_WR(ctx, 0x008e4/4, 0x00ffffff);
+ INSTANCE_WR(ctx, 0x008e8/4, 0x0c103f00);
+ INSTANCE_WR(ctx, 0x008f4/4, 0x00020000);
+ INSTANCE_WR(ctx, 0x0092c/4, 0x00008100);
+ INSTANCE_WR(ctx, 0x009b8/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x009fc/4, 0x00001001);
+ INSTANCE_WR(ctx, 0x00a04/4, 0x00000003);
+ INSTANCE_WR(ctx, 0x00a08/4, 0x00888001);
+ INSTANCE_WR(ctx, 0x00a8c/4, 0x00000005);
+ INSTANCE_WR(ctx, 0x00a98/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x00ab4/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00ab8/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00abc/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00ac0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00af8/4, 0x00000001);
+ for (i=0x02ec0; i<=0x02f38; i+=8)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for (i=0x04c80; i<=0x06e70; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for (i=0x06e80; i<=0x07270; i+=16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for (i=0x096c0; i<=0x0b8b0; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for (i=0x0b8c0; i<=0x0bcb0; i+=16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for (i=0x0e100; i<=0x102f0; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for (i=0x10300; i<=0x106f0; i+=16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+};
+
+static void
+nv46_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
+ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
+ INSTANCE_WR(ctx, 0x00040/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x00044/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x0004c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00138/4, 0x20010001);
+ INSTANCE_WR(ctx, 0x0013c/4, 0x0f73ef00);
+ INSTANCE_WR(ctx, 0x00144/4, 0x02008821);
+ INSTANCE_WR(ctx, 0x00174/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00178/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0017c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00180/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00184/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00188/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0018c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00190/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00194/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x00198/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x0019c/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x001a4/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x001ec/4, 0x0b0b0b0c);
+ INSTANCE_WR(ctx, 0x0035c/4, 0x00040000);
+ INSTANCE_WR(ctx, 0x0036c/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x00370/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x00374/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x00378/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x003a4/4, 0x00000008);
+ INSTANCE_WR(ctx, 0x003b8/4, 0x00003010);
+ INSTANCE_WR(ctx, 0x003dc/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x003e0/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x003e4/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x003e8/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x003ec/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x003f0/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x003f4/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x003f8/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x003fc/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x00400/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x00404/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x00408/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x0040c/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x00410/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x00414/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x00418/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x004b0/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x004b4/4, 0x00080060);
+ INSTANCE_WR(ctx, 0x004d0/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x004d4/4, 0xffff0000);
+ INSTANCE_WR(ctx, 0x004d8/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x004ec/4, 0x46400000);
+ INSTANCE_WR(ctx, 0x004fc/4, 0xffff0000);
+ INSTANCE_WR(ctx, 0x00500/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00504/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00508/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x0050c/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00510/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00514/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00518/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x0051c/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00520/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00524/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00528/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x0052c/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00530/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00534/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00538/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x0053c/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00550/4, 0x0fff0000);
+ INSTANCE_WR(ctx, 0x00554/4, 0x0fff0000);
+ INSTANCE_WR(ctx, 0x0055c/4, 0x00011100);
+ for (i=0x00578; i<0x005b4; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x005c0/4, 0x4b7fffff);
+ INSTANCE_WR(ctx, 0x005e8/4, 0x30201000);
+ INSTANCE_WR(ctx, 0x005ec/4, 0x70605040);
+ INSTANCE_WR(ctx, 0x005f0/4, 0xb8a89888);
+ INSTANCE_WR(ctx, 0x005f4/4, 0xf8e8d8c8);
+ INSTANCE_WR(ctx, 0x00608/4, 0x40100000);
+ INSTANCE_WR(ctx, 0x00624/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x00658/4, 0x435185d6);
+ INSTANCE_WR(ctx, 0x0065c/4, 0x2155b699);
+ INSTANCE_WR(ctx, 0x00660/4, 0xfedcba98);
+ INSTANCE_WR(ctx, 0x00664/4, 0x00000098);
+ INSTANCE_WR(ctx, 0x00674/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00678/4, 0x00ff7000);
+ INSTANCE_WR(ctx, 0x0067c/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x0068c/4, 0x00ff0000);
+ INSTANCE_WR(ctx, 0x006c8/4, 0x00ffff00);
+ for (i=0x0070c; i<=0x00748; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00018488);
+ for (i=0x0074c; i<=0x00788; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00028202);
+ for (i=0x007cc; i<=0x00808; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x0000aae4);
+ for (i=0x0080c; i<=0x00848; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x01012000);
+ for (i=0x0084c; i<=0x00888; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00080008);
+ for (i=0x008cc; i<=0x00908; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00100008);
+ for (i=0x0095c; i<=0x00968; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x0001bc80);
+ for (i=0x0096c; i<=0x00978; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00000202);
+ for (i=0x0098c; i<=0x00998; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00000008);
+ for (i=0x009ac; i<=0x009b8; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00080008);
+ INSTANCE_WR(ctx, 0x009cc/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x00a00/4, 0x00000421);
+ INSTANCE_WR(ctx, 0x00a04/4, 0x030c30c3);
+ INSTANCE_WR(ctx, 0x00a08/4, 0x00011001);
+ INSTANCE_WR(ctx, 0x00a14/4, 0x3e020200);
+ INSTANCE_WR(ctx, 0x00a18/4, 0x00ffffff);
+ INSTANCE_WR(ctx, 0x00a1c/4, 0x0c103f00);
+ INSTANCE_WR(ctx, 0x00a28/4, 0x00040000);
+ INSTANCE_WR(ctx, 0x00a60/4, 0x00008100);
+ INSTANCE_WR(ctx, 0x00aec/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00b30/4, 0x00001001);
+ INSTANCE_WR(ctx, 0x00b38/4, 0x00000003);
+ INSTANCE_WR(ctx, 0x00b3c/4, 0x00888001);
+ INSTANCE_WR(ctx, 0x00bc0/4, 0x00000005);
+ INSTANCE_WR(ctx, 0x00bcc/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x00be8/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00bec/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00bf0/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00bf4/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00c2c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00c30/4, 0x08e00001);
+ INSTANCE_WR(ctx, 0x00c34/4, 0x000e3000);
+ for (i=0x017f8; i<=0x01870; i+=8)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for (i=0x035b8; i<=0x057a8; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for (i=0x057b8; i<=0x05ba8; i+=16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for (i=0x07f38; i<=0x0a128; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for (i=0x0a138; i<=0x0a528; i+=16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for (i=0x0c8b8; i<=0x0eaa8; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for (i=0x0eab8; i<=0x0eea8; i+=16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+}
+
+/* This may only work on 7800 AGP cards, will include a warning */
+static void
+nv47_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
+ INSTANCE_WR(ctx, 0x00000000/4, ctx->im_pramin->start);
+ INSTANCE_WR(ctx, 0x00000024/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x00000028/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x00000030/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0000011c/4, 0x20010001);
+ INSTANCE_WR(ctx, 0x00000120/4, 0x0f73ef00);
+ INSTANCE_WR(ctx, 0x00000128/4, 0x02008821);
+ INSTANCE_WR(ctx, 0x00000178/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x0000017c/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x00000180/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x00000188/4, 0x00000040);
+ for (i=0x00000194; i<=0x000001b0; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x000001d0/4, 0x0b0b0b0c);
+ INSTANCE_WR(ctx, 0x00000340/4, 0x00040000);
+ INSTANCE_WR(ctx, 0x00000350/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x00000354/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x00000358/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x0000035c/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x00000388/4, 0x00000008);
+ INSTANCE_WR(ctx, 0x0000039c/4, 0x00001010);
+ for (i=0x000003c0; i<=0x000003fc; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x00000454/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x00000458/4, 0x00080060);
+ INSTANCE_WR(ctx, 0x00000474/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x00000478/4, 0xffff0000);
+ INSTANCE_WR(ctx, 0x0000047c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00000490/4, 0x46400000);
+ INSTANCE_WR(ctx, 0x000004a0/4, 0xffff0000);
+ for (i=0x000004a4; i<=0x000004e0; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x000004f4/4, 0x0fff0000);
+ INSTANCE_WR(ctx, 0x000004f8/4, 0x0fff0000);
+ INSTANCE_WR(ctx, 0x00000500/4, 0x00011100);
+ for (i=0x0000051c; i<=0x00000558; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x00000564/4, 0x4b7fffff);
+ INSTANCE_WR(ctx, 0x0000058c/4, 0x30201000);
+ INSTANCE_WR(ctx, 0x00000590/4, 0x70605040);
+ INSTANCE_WR(ctx, 0x00000594/4, 0xb8a89888);
+ INSTANCE_WR(ctx, 0x00000598/4, 0xf8e8d8c8);
+ INSTANCE_WR(ctx, 0x000005ac/4, 0x40100000);
+ INSTANCE_WR(ctx, 0x000005c8/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x000005fc/4, 0x435185d6);
+ INSTANCE_WR(ctx, 0x00000600/4, 0x2155b699);
+ INSTANCE_WR(ctx, 0x00000604/4, 0xfedcba98);
+ INSTANCE_WR(ctx, 0x00000608/4, 0x00000098);
+ INSTANCE_WR(ctx, 0x00000618/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x0000061c/4, 0x00ff7000);
+ INSTANCE_WR(ctx, 0x00000620/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x00000630/4, 0x00ff0000);
+ INSTANCE_WR(ctx, 0x0000066c/4, 0x00ffff00);
+ for (i=0x000006b0; i<=0x000006ec; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00018488);
+ for (i=0x000006f0; i<=0x0000072c; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00028202);
+ for (i=0x00000770; i<=0x000007ac; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x0000aae4);
+ for (i=0x000007b0; i<=0x000007ec; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x01012000);
+ for (i=0x000007f0; i<=0x0000082c; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00080008);
+ for (i=0x00000870; i<=0x000008ac; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00100008);
+ INSTANCE_WR(ctx, 0x00000900/4, 0x0001bc80);
+ INSTANCE_WR(ctx, 0x00000904/4, 0x0001bc80);
+ INSTANCE_WR(ctx, 0x00000908/4, 0x0001bc80);
+ INSTANCE_WR(ctx, 0x0000090c/4, 0x0001bc80);
+ INSTANCE_WR(ctx, 0x00000910/4, 0x00000202);
+ INSTANCE_WR(ctx, 0x00000914/4, 0x00000202);
+ INSTANCE_WR(ctx, 0x00000918/4, 0x00000202);
+ INSTANCE_WR(ctx, 0x0000091c/4, 0x00000202);
+ for (i=0x00000930; i<=0x0000095c; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00000008);
+ INSTANCE_WR(ctx, 0x00000970/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x000009a4/4, 0x00000021);
+ INSTANCE_WR(ctx, 0x000009a8/4, 0x030c30c3);
+ INSTANCE_WR(ctx, 0x000009b4/4, 0x3e020200);
+ INSTANCE_WR(ctx, 0x000009b8/4, 0x00ffffff);
+ INSTANCE_WR(ctx, 0x000009bc/4, 0x40103f00);
+ INSTANCE_WR(ctx, 0x000009c8/4, 0x00040000);
+ INSTANCE_WR(ctx, 0x00000a00/4, 0x00008100);
+ INSTANCE_WR(ctx, 0x00000a8c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00000ad0/4, 0x00001001);
+ INSTANCE_WR(ctx, 0x00000adc/4, 0x00000003);
+ INSTANCE_WR(ctx, 0x00000ae0/4, 0x00888001);
+ for (i=0x00000b10; i<=0x00000b8c; i+=4)
+ INSTANCE_WR(ctx, i/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00000bb4/4, 0x00000005);
+ INSTANCE_WR(ctx, 0x00000bc0/4, 0x0000ffff);
+ for (i=0x00000bdc; i<=0x00000bf8; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00000bfc/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00000c34/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00000c38/4, 0x08e00001);
+ INSTANCE_WR(ctx, 0x00000c3c/4, 0x000e3000);
+ for (i=0x00003000; i<=0x00003078; i+=8)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for (i=0x00004dc0; i<=0x00006fb0; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for (i=0x00006fc0; i<=0x000073b0; i+=16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for (i=0x00009800; i<=0x0000b9f0; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for (i=0x0000ba00; i<=0x00010430; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for (i=0x00010440; i<=0x00010830; i+=16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for (i=0x00012c80; i<=0x00014e70; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for (i=0x00014e80; i<=0x00015270; i+=16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for (i=0x000176c0; i<=0x000198b0; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for (i=0x000198c0; i<=0x00019cb0; i+=16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for (i=0x0001c100; i<=0x0001e2f0; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for (i=0x0001e300; i<=0x0001e6f0; i+=16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+}
+
+static void
+nv49_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
+ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
+ INSTANCE_WR(ctx, 0x00004/4, 0x0000c040);
+ INSTANCE_WR(ctx, 0x00008/4, 0x0000c040);
+ INSTANCE_WR(ctx, 0x0000c/4, 0x0000c040);
+ INSTANCE_WR(ctx, 0x00010/4, 0x0000c040);
+ INSTANCE_WR(ctx, 0x00014/4, 0x0000c040);
+ INSTANCE_WR(ctx, 0x00018/4, 0x0000c040);
+ INSTANCE_WR(ctx, 0x0001c/4, 0x0000c040);
+ INSTANCE_WR(ctx, 0x00020/4, 0x0000c040);
+ INSTANCE_WR(ctx, 0x000c4/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x000c8/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x000d0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x001bc/4, 0x20010001);
+ INSTANCE_WR(ctx, 0x001c0/4, 0x0f73ef00);
+ INSTANCE_WR(ctx, 0x001c8/4, 0x02008821);
+ INSTANCE_WR(ctx, 0x00218/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x0021c/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x00220/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x00228/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x00234/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x00238/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x0023c/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x00240/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x00244/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x00248/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x0024c/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x00250/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x00270/4, 0x0b0b0b0c);
+ INSTANCE_WR(ctx, 0x003e0/4, 0x00040000);
+ INSTANCE_WR(ctx, 0x003f0/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x003f4/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x003f8/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x003fc/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x00428/4, 0x00000008);
+ INSTANCE_WR(ctx, 0x0043c/4, 0x00001010);
+ INSTANCE_WR(ctx, 0x00460/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x00464/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x00468/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x0046c/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x00470/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x00474/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x00478/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x0047c/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x00480/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x00484/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x00488/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x0048c/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x00490/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x00494/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x00498/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x0049c/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x004f4/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x004f8/4, 0x00080060);
+ INSTANCE_WR(ctx, 0x00514/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x00518/4, 0xffff0000);
+ INSTANCE_WR(ctx, 0x0051c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00530/4, 0x46400000);
+ INSTANCE_WR(ctx, 0x00540/4, 0xffff0000);
+ INSTANCE_WR(ctx, 0x00544/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00548/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x0054c/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00550/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00554/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00558/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x0055c/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00560/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00564/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00568/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x0056c/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00570/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00574/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00578/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x0057c/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00580/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00594/4, 0x0fff0000);
+ INSTANCE_WR(ctx, 0x00598/4, 0x0fff0000);
+ INSTANCE_WR(ctx, 0x005a0/4, 0x00011100);
+ INSTANCE_WR(ctx, 0x005bc/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x005c0/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x005c4/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x005c8/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x005cc/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x005d0/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x005d4/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x005d8/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x005dc/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x005e0/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x005e4/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x005e8/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x005ec/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x005f0/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x005f4/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x005f8/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x00604/4, 0x4b7fffff);
+ INSTANCE_WR(ctx, 0x0062c/4, 0x30201000);
+ INSTANCE_WR(ctx, 0x00630/4, 0x70605040);
+ INSTANCE_WR(ctx, 0x00634/4, 0xb8a89888);
+ INSTANCE_WR(ctx, 0x00638/4, 0xf8e8d8c8);
+ INSTANCE_WR(ctx, 0x0064c/4, 0x40100000);
+ INSTANCE_WR(ctx, 0x00668/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x0069c/4, 0x435185d6);
+ INSTANCE_WR(ctx, 0x006a0/4, 0x2155b699);
+ INSTANCE_WR(ctx, 0x006a4/4, 0xfedcba98);
+ INSTANCE_WR(ctx, 0x006a8/4, 0x00000098);
+ INSTANCE_WR(ctx, 0x006b8/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x006bc/4, 0x00ff7000);
+ INSTANCE_WR(ctx, 0x006c0/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x006d0/4, 0x00ff0000);
+ INSTANCE_WR(ctx, 0x0070c/4, 0x00ffff00);
+ for (i=0x00750; i<=0x0078c; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00018488);
+ for (i=0x00790; i<=0x007cc; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00028202);
+ for (i=0x00810; i<=0x0084c; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x0000aae4);
+ for (i=0x00850; i<=0x0088c; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x01012000);
+ for (i=0x00890; i<=0x008cc; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00080008);
+ for (i=0x00910; i<=0x0094c; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00100008);
+ for (i=0x009a0; i<=0x009ac; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x0001bc80);
+ for (i=0x009b0; i<=0x009bc; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00000202);
+ for (i=0x009d0; i<=0x009dc; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00000008);
+ for (i=0x009f0; i<=0x009fc; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00080008);
+ INSTANCE_WR(ctx, 0x00a10/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x00a44/4, 0x00000421);
+ INSTANCE_WR(ctx, 0x00a48/4, 0x030c30c3);
+ INSTANCE_WR(ctx, 0x00a54/4, 0x3e020200);
+ INSTANCE_WR(ctx, 0x00a58/4, 0x00ffffff);
+ INSTANCE_WR(ctx, 0x00a5c/4, 0x20103f00);
+ INSTANCE_WR(ctx, 0x00a68/4, 0x00040000);
+ INSTANCE_WR(ctx, 0x00aa0/4, 0x00008100);
+ INSTANCE_WR(ctx, 0x00b2c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00b70/4, 0x00001001);
+ INSTANCE_WR(ctx, 0x00b7c/4, 0x00000003);
+ INSTANCE_WR(ctx, 0x00b80/4, 0x00888001);
+ INSTANCE_WR(ctx, 0x00bb0/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00bb4/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00bb8/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00bbc/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00bc0/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00bc4/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00bc8/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00bcc/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00bd0/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00bd4/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00bd8/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00bdc/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00be0/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00be4/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00be8/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00bec/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00bf0/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00bf4/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00bf8/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00bfc/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00c00/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00c04/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00c08/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00c0c/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00c10/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00c14/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00c18/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00c1c/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00c20/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00c24/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00c28/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00c2c/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00c54/4, 0x00000005);
+ INSTANCE_WR(ctx, 0x00c60/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x00c7c/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00c80/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00c84/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00c88/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00c8c/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00c90/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00c94/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00c98/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00c9c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00cd4/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00cd8/4, 0x08e00001);
+ INSTANCE_WR(ctx, 0x00cdc/4, 0x000e3000);
+ for(i=0x030a0; i<=0x03118; i+=8)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for(i=0x098a0; i<=0x0ba90; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for(i=0x0baa0; i<=0x0be90; i+=16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for(i=0x0e2e0; i<=0x0fff0; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for(i=0x10008; i<=0x104d0; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for(i=0x104e0; i<=0x108d0; i+=16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for(i=0x12d20; i<=0x14f10; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for(i=0x14f20; i<=0x15310; i+=16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for(i=0x17760; i<=0x19950; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for(i=0x19960; i<=0x19d50; i+=16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for(i=0x1c1a0; i<=0x1e390; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for(i=0x1e3a0; i<=0x1e790; i+=16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for(i=0x20be0; i<=0x22dd0; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for(i=0x22de0; i<=0x231d0; i+=16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+}
+
+static void
+nv4a_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
+ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
+ INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x00030/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0011c/4, 0x20010001);
+ INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00);
+ INSTANCE_WR(ctx, 0x00128/4, 0x02008821);
+ INSTANCE_WR(ctx, 0x00158/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0015c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00160/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00164/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00168/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0016c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00170/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00174/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00178/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x0017c/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x00180/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x00188/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c);
+ INSTANCE_WR(ctx, 0x00340/4, 0x00040000);
+ INSTANCE_WR(ctx, 0x00350/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x00354/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x00358/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x0035c/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x00388/4, 0x00000008);
+ INSTANCE_WR(ctx, 0x0039c/4, 0x00003010);
+ INSTANCE_WR(ctx, 0x003cc/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x003d0/4, 0x00080060);
+ INSTANCE_WR(ctx, 0x003ec/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x003f0/4, 0xffff0000);
+ INSTANCE_WR(ctx, 0x003f4/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00408/4, 0x46400000);
+ INSTANCE_WR(ctx, 0x00418/4, 0xffff0000);
+ INSTANCE_WR(ctx, 0x00424/4, 0x0fff0000);
+ INSTANCE_WR(ctx, 0x00428/4, 0x0fff0000);
+ INSTANCE_WR(ctx, 0x00430/4, 0x00011100);
+ for (i=0x0044c; i<=0x00488; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x00494/4, 0x4b7fffff);
+ INSTANCE_WR(ctx, 0x004bc/4, 0x30201000);
+ INSTANCE_WR(ctx, 0x004c0/4, 0x70605040);
+ INSTANCE_WR(ctx, 0x004c4/4, 0xb8a89888);
+ INSTANCE_WR(ctx, 0x004c8/4, 0xf8e8d8c8);
+ INSTANCE_WR(ctx, 0x004dc/4, 0x40100000);
+ INSTANCE_WR(ctx, 0x004f8/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x0052c/4, 0x435185d6);
+ INSTANCE_WR(ctx, 0x00530/4, 0x2155b699);
+ INSTANCE_WR(ctx, 0x00534/4, 0xfedcba98);
+ INSTANCE_WR(ctx, 0x00538/4, 0x00000098);
+ INSTANCE_WR(ctx, 0x00548/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x0054c/4, 0x00ff7000);
+ INSTANCE_WR(ctx, 0x00550/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x0055c/4, 0x00ff0000);
+ INSTANCE_WR(ctx, 0x00594/4, 0x00ffff00);
+ for (i=0x005d8; i<=0x00614; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00018488);
+ for (i=0x00618; i<=0x00654; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00028202);
+ for (i=0x00698; i<=0x006d4; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x0000aae4);
+ for (i=0x006d8; i<=0x00714; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x01012000);
+ for (i=0x00718; i<=0x00754; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00080008);
+ for (i=0x00798; i<=0x007d4; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00100008);
+ for (i=0x00828; i<=0x00834; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x0001bc80);
+ for (i=0x00838; i<=0x00844; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00000202);
+ for (i=0x00858; i<=0x00864; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00000008);
+ for (i=0x00878; i<=0x00884; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00080008);
+ INSTANCE_WR(ctx, 0x00898/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x008cc/4, 0x00000021);
+ INSTANCE_WR(ctx, 0x008d0/4, 0x030c30c3);
+ INSTANCE_WR(ctx, 0x008d4/4, 0x00011001);
+ INSTANCE_WR(ctx, 0x008e0/4, 0x3e020200);
+ INSTANCE_WR(ctx, 0x008e4/4, 0x00ffffff);
+ INSTANCE_WR(ctx, 0x008e8/4, 0x0c103f00);
+ INSTANCE_WR(ctx, 0x008f4/4, 0x00040000);
+ INSTANCE_WR(ctx, 0x0092c/4, 0x00008100);
+ INSTANCE_WR(ctx, 0x009b8/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x009fc/4, 0x00001001);
+ INSTANCE_WR(ctx, 0x00a04/4, 0x00000003);
+ INSTANCE_WR(ctx, 0x00a08/4, 0x00888001);
+ INSTANCE_WR(ctx, 0x00a8c/4, 0x00000005);
+ INSTANCE_WR(ctx, 0x00a98/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x00ab4/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00ab8/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00abc/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00ac0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00af8/4, 0x00000001);
+ for (i=0x016c0; i<=0x01738; i+=8)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for (i=0x03840; i<=0x05670; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for (i=0x05680; i<=0x05a70; i+=16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for (i=0x07e00; i<=0x09ff0; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for (i=0x0a000; i<=0x0a3f0; i+=16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for (i=0x0c780; i<=0x0e970; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for (i=0x0e980; i<=0x0ed70; i+=16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+}
+
+static void
+nv4b_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
+ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
+ INSTANCE_WR(ctx, 0x00004/4, 0x0000c040);
+ INSTANCE_WR(ctx, 0x00008/4, 0x0000c040);
+ INSTANCE_WR(ctx, 0x0000c/4, 0x0000c040);
+ INSTANCE_WR(ctx, 0x00010/4, 0x0000c040);
+ INSTANCE_WR(ctx, 0x00014/4, 0x0000c040);
+ INSTANCE_WR(ctx, 0x00018/4, 0x0000c040);
+ INSTANCE_WR(ctx, 0x0001c/4, 0x0000c040);
+ INSTANCE_WR(ctx, 0x00020/4, 0x0000c040);
+ INSTANCE_WR(ctx, 0x000c4/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x000c8/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x000d0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x001bc/4, 0x20010001);
+ INSTANCE_WR(ctx, 0x001c0/4, 0x0f73ef00);
+ INSTANCE_WR(ctx, 0x001c8/4, 0x02008821);
+ INSTANCE_WR(ctx, 0x00218/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x0021c/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x00220/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x00228/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x00234/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x00238/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x0023c/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x00240/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x00244/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x00248/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x0024c/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x00250/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x00270/4, 0x0b0b0b0c);
+ INSTANCE_WR(ctx, 0x003e0/4, 0x00040000);
+ INSTANCE_WR(ctx, 0x003f0/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x003f4/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x003f8/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x003fc/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x00428/4, 0x00000008);
+ INSTANCE_WR(ctx, 0x0043c/4, 0x00001010);
+ INSTANCE_WR(ctx, 0x00460/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x00464/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x00468/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x0046c/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x00470/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x00474/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x00478/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x0047c/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x00480/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x00484/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x00488/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x0048c/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x00490/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x00494/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x00498/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x0049c/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x004f4/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x004f8/4, 0x00080060);
+ INSTANCE_WR(ctx, 0x00514/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x00518/4, 0xffff0000);
+ INSTANCE_WR(ctx, 0x0051c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00530/4, 0x46400000);
+ INSTANCE_WR(ctx, 0x00540/4, 0xffff0000);
+ INSTANCE_WR(ctx, 0x00544/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00548/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x0054c/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00550/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00554/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00558/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x0055c/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00560/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00564/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00568/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x0056c/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00570/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00574/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00578/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x0057c/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00580/4, 0x88888888);
+ INSTANCE_WR(ctx, 0x00594/4, 0x0fff0000);
+ INSTANCE_WR(ctx, 0x00598/4, 0x0fff0000);
+ INSTANCE_WR(ctx, 0x005a0/4, 0x00011100);
+ INSTANCE_WR(ctx, 0x005bc/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x005c0/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x005c4/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x005c8/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x005cc/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x005d0/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x005d4/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x005d8/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x005dc/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x005e0/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x005e4/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x005e8/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x005ec/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x005f0/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x005f4/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x005f8/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x00604/4, 0x4b7fffff);
+ INSTANCE_WR(ctx, 0x0062c/4, 0x30201000);
+ INSTANCE_WR(ctx, 0x00630/4, 0x70605040);
+ INSTANCE_WR(ctx, 0x00634/4, 0xb8a89888);
+ INSTANCE_WR(ctx, 0x00638/4, 0xf8e8d8c8);
+ INSTANCE_WR(ctx, 0x0064c/4, 0x40100000);
+ INSTANCE_WR(ctx, 0x00668/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x0069c/4, 0x435185d6);
+ INSTANCE_WR(ctx, 0x006a0/4, 0x2155b699);
+ INSTANCE_WR(ctx, 0x006a4/4, 0xfedcba98);
+ INSTANCE_WR(ctx, 0x006a8/4, 0x00000098);
+ INSTANCE_WR(ctx, 0x006b8/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x006bc/4, 0x00ff7000);
+ INSTANCE_WR(ctx, 0x006c0/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x006d0/4, 0x00ff0000);
+ INSTANCE_WR(ctx, 0x0070c/4, 0x00ffff00);
+ for (i=0x00750; i<=0x0078c; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00018488);
+ for (i=0x00790; i<=0x007cc; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00028202);
+ for (i=0x00810; i<=0x0084c; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x0000aae4);
+ for (i=0x00850; i<=0x0088c; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x01012000);
+ for (i=0x00890; i<=0x008cc; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00080008);
+ for (i=0x00910; i<=0x0094c; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00100008);
+ for (i=0x009a0; i<=0x009ac; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x0001bc80);
+ for (i=0x009b0; i<=0x009bc; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00000202);
+ for (i=0x009d0; i<=0x009dc; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00000008);
+ for (i=0x009f0; i<=0x009fc; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00080008);
+ INSTANCE_WR(ctx, 0x00a10/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x00a44/4, 0x00000421);
+ INSTANCE_WR(ctx, 0x00a48/4, 0x030c30c3);
+ INSTANCE_WR(ctx, 0x00a54/4, 0x3e020200);
+ INSTANCE_WR(ctx, 0x00a58/4, 0x00ffffff);
+ INSTANCE_WR(ctx, 0x00a5c/4, 0x20103f00);
+ INSTANCE_WR(ctx, 0x00a68/4, 0x00040000);
+ INSTANCE_WR(ctx, 0x00aa0/4, 0x00008100);
+ INSTANCE_WR(ctx, 0x00b2c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00b70/4, 0x00001001);
+ INSTANCE_WR(ctx, 0x00b7c/4, 0x00000003);
+ INSTANCE_WR(ctx, 0x00b80/4, 0x00888001);
+ INSTANCE_WR(ctx, 0x00bb0/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00bb4/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00bb8/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00bbc/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00bc0/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00bc4/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00bc8/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00bcc/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00bd0/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00bd4/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00bd8/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00bdc/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00be0/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00be4/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00be8/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00bec/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00bf0/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00bf4/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00bf8/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00bfc/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00c00/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00c04/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00c08/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00c0c/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00c10/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00c14/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00c18/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00c1c/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00c20/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00c24/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00c28/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00c2c/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00c54/4, 0x00000005);
+ INSTANCE_WR(ctx, 0x00c60/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x00c7c/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00c80/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00c84/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00c88/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00c8c/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00c90/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00c94/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00c98/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00c9c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00cd4/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00cd8/4, 0x08e00001);
+ INSTANCE_WR(ctx, 0x00cdc/4, 0x000e3000);
+ for(i=0x030a0; i<=0x03118; i+=8)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for(i=0x098a0; i<=0x0ba90; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for(i=0x0baa0; i<=0x0be90; i+=16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for(i=0x0e2e0; i<=0x0fff0; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for(i=0x10008; i<=0x104d0; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for(i=0x104e0; i<=0x108d0; i+=16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for(i=0x12d20; i<=0x14f10; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for(i=0x14f20; i<=0x15310; i+=16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for(i=0x17760; i<=0x19950; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for(i=0x19960; i<=0x19d50; i+=16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+}
+
+static void
+nv4c_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
+ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
+ INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x00030/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0011c/4, 0x20010001);
+ INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00);
+ INSTANCE_WR(ctx, 0x00128/4, 0x02008821);
+ INSTANCE_WR(ctx, 0x00158/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0015c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00160/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00164/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00168/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0016c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00170/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00174/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00178/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x0017c/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x00180/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x00188/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c);
+ INSTANCE_WR(ctx, 0x00340/4, 0x00040000);
+ INSTANCE_WR(ctx, 0x00350/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x00354/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x00358/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x0035c/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x00388/4, 0x00000008);
+ INSTANCE_WR(ctx, 0x0039c/4, 0x00001010);
+ INSTANCE_WR(ctx, 0x003d0/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x003d4/4, 0x00080060);
+ INSTANCE_WR(ctx, 0x003f0/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x003f4/4, 0xffff0000);
+ INSTANCE_WR(ctx, 0x003f8/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0040c/4, 0x46400000);
+ INSTANCE_WR(ctx, 0x0041c/4, 0xffff0000);
+ INSTANCE_WR(ctx, 0x00428/4, 0x0fff0000);
+ INSTANCE_WR(ctx, 0x0042c/4, 0x0fff0000);
+ INSTANCE_WR(ctx, 0x00434/4, 0x00011100);
+ for (i=0x00450; i<0x0048c; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x00498/4, 0x4b7fffff);
+ INSTANCE_WR(ctx, 0x004c0/4, 0x30201000);
+ INSTANCE_WR(ctx, 0x004c4/4, 0x70605040);
+ INSTANCE_WR(ctx, 0x004c8/4, 0xb8a89888);
+ INSTANCE_WR(ctx, 0x004cc/4, 0xf8e8d8c8);
+ INSTANCE_WR(ctx, 0x004e0/4, 0x40100000);
+ INSTANCE_WR(ctx, 0x004fc/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x00530/4, 0x435185d6);
+ INSTANCE_WR(ctx, 0x00534/4, 0x2155b699);
+ INSTANCE_WR(ctx, 0x00538/4, 0xfedcba98);
+ INSTANCE_WR(ctx, 0x0053c/4, 0x00000098);
+ INSTANCE_WR(ctx, 0x0054c/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x00550/4, 0x00ff7000);
+ INSTANCE_WR(ctx, 0x00554/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x00564/4, 0x00ff0000);
+ INSTANCE_WR(ctx, 0x0059c/4, 0x00ffff00);
+ for (i=0x005e0; i<=0x0061c; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00018488);
+ for (i=0x00620; i<=0x0065c; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00028202);
+ for (i=0x006a0; i<=0x006dc; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x0000aae4);
+ for (i=0x006e0; i<=0x0071c; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x01012000);
+ for (i=0x00720; i<=0x0075c; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00080008);
+ for (i=0x007a0; i<=0x007dc; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00100008);
+ for (i=0x00830; i<=0x0083c; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x0001bc80);
+ for (i=0x00840; i<=0x0084c; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00000202);
+ for (i=0x00860; i<=0x0086c; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00000008);
+ for (i=0x00880; i<=0x0088c; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00080008);
+ INSTANCE_WR(ctx, 0x008a0/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x008d4/4, 0x00000020);
+ INSTANCE_WR(ctx, 0x008d8/4, 0x030c30c3);
+ INSTANCE_WR(ctx, 0x008dc/4, 0x00011001);
+ INSTANCE_WR(ctx, 0x008e8/4, 0x3e020200);
+ INSTANCE_WR(ctx, 0x008ec/4, 0x00ffffff);
+ INSTANCE_WR(ctx, 0x008f0/4, 0x0c103f00);
+ INSTANCE_WR(ctx, 0x008fc/4, 0x00040000);
+ INSTANCE_WR(ctx, 0x00934/4, 0x00008100);
+ INSTANCE_WR(ctx, 0x009c0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00a04/4, 0x00001001);
+ INSTANCE_WR(ctx, 0x00a0c/4, 0x00000003);
+ INSTANCE_WR(ctx, 0x00a10/4, 0x00888001);
+ INSTANCE_WR(ctx, 0x00a74/4, 0x00000005);
+ INSTANCE_WR(ctx, 0x00a80/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x00a9c/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00aa0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00ad8/4, 0x00000001);
+ for (i=0x016a0; i<0x01718; i+=8)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for (i=0x03460; i<0x05650; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for (i=0x05660; i<0x05a50; i+=16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+}
+
+static void
+nv4e_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
+ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
+ INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x00030/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0011c/4, 0x20010001);
+ INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00);
+ INSTANCE_WR(ctx, 0x00128/4, 0x02008821);
+ INSTANCE_WR(ctx, 0x00158/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0015c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00160/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00164/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00168/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0016c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00170/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00174/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00178/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x0017c/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x00180/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x00188/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c);
+ INSTANCE_WR(ctx, 0x00340/4, 0x00040000);
+ INSTANCE_WR(ctx, 0x00350/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x00354/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x00358/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x0035c/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x00388/4, 0x00000008);
+ INSTANCE_WR(ctx, 0x0039c/4, 0x00001010);
+ INSTANCE_WR(ctx, 0x003cc/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x003d0/4, 0x00080060);
+ INSTANCE_WR(ctx, 0x003ec/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x003f0/4, 0xffff0000);
+ INSTANCE_WR(ctx, 0x003f4/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00408/4, 0x46400000);
+ INSTANCE_WR(ctx, 0x00418/4, 0xffff0000);
+ INSTANCE_WR(ctx, 0x00424/4, 0x0fff0000);
+ INSTANCE_WR(ctx, 0x00428/4, 0x0fff0000);
+ INSTANCE_WR(ctx, 0x00430/4, 0x00011100);
+ for (i=0x0044c; i<=0x00488; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x00494/4, 0x4b7fffff);
+ INSTANCE_WR(ctx, 0x004bc/4, 0x30201000);
+ INSTANCE_WR(ctx, 0x004c0/4, 0x70605040);
+ INSTANCE_WR(ctx, 0x004c4/4, 0xb8a89888);
+ INSTANCE_WR(ctx, 0x004c8/4, 0xf8e8d8c8);
+ INSTANCE_WR(ctx, 0x004dc/4, 0x40100000);
+ INSTANCE_WR(ctx, 0x004f8/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x0052c/4, 0x435185d6);
+ INSTANCE_WR(ctx, 0x00530/4, 0x2155b699);
+ INSTANCE_WR(ctx, 0x00534/4, 0xfedcba98);
+ INSTANCE_WR(ctx, 0x00538/4, 0x00000098);
+ INSTANCE_WR(ctx, 0x00548/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x0054c/4, 0x00ff7000);
+ INSTANCE_WR(ctx, 0x00550/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x0055c/4, 0x00ff0000);
+ INSTANCE_WR(ctx, 0x00594/4, 0x00ffff00);
+ for (i=0x005d8; i<=0x00614; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00018488);
+ for (i=0x00618; i<=0x00654; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00028202);
+ for (i=0x00698; i<=0x006d4; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x0000aae4);
+ for (i=0x006d8; i<=0x00714; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x01012000);
+ for (i=0x00718; i<=0x00754; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00080008);
+ for (i=0x00798; i<=0x007d4; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00100008);
+ for (i=0x00828; i<=0x00834; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x0001bc80);
+ for (i=0x00838; i<=0x00844; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00000202);
+ for (i=0x00858; i<=0x00864; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00000008);
+ for (i=0x00878; i<=0x00884; i+=4)
+ INSTANCE_WR(ctx, i/4, 0x00080008);
+ INSTANCE_WR(ctx, 0x00898/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x008cc/4, 0x00000020);
+ INSTANCE_WR(ctx, 0x008d0/4, 0x030c30c3);
+ INSTANCE_WR(ctx, 0x008d4/4, 0x00011001);
+ INSTANCE_WR(ctx, 0x008e0/4, 0x3e020200);
+ INSTANCE_WR(ctx, 0x008e4/4, 0x00ffffff);
+ INSTANCE_WR(ctx, 0x008e8/4, 0x0c103f00);
+ INSTANCE_WR(ctx, 0x008f4/4, 0x00040000);
+ INSTANCE_WR(ctx, 0x0092c/4, 0x00008100);
+ INSTANCE_WR(ctx, 0x009b8/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x009fc/4, 0x00001001);
+ INSTANCE_WR(ctx, 0x00a04/4, 0x00000003);
+ INSTANCE_WR(ctx, 0x00a08/4, 0x00888001);
+ INSTANCE_WR(ctx, 0x00a6c/4, 0x00000005);
+ INSTANCE_WR(ctx, 0x00a78/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x00a94/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00a98/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00aa4/4, 0x00000001);
+ for (i=0x01668; i<=0x016e0; i+=8)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for (i=0x03428; i<=0x05618; i+=24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for (i=0x05628; i<=0x05a18; i+=16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+}
+
+int
+nv40_graph_create_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *);
+ int ret;
+
+ /* These functions populate the graphics context with a whole heap
+ * of default state. All these functions are very similar, with
+ * a minimal amount of chipset-specific changes. However, as we're
+ * currently dependant on the context programs used by the NVIDIA
+ * binary driver these functions must match the layout expected by
+ * them. Hopefully at some point this will all change.
+ */
+ switch (dev_priv->chipset) {
+ case 0x40:
+ ctx_init = nv40_graph_context_init;
+ break;
+ case 0x41:
+ case 0x42:
+ ctx_init = nv41_graph_context_init;
+ break;
+ case 0x43:
+ ctx_init = nv43_graph_context_init;
+ break;
+ case 0x46:
+ ctx_init = nv46_graph_context_init;
+ break;
+ case 0x47:
+ ctx_init = nv47_graph_context_init;
+ break;
+ case 0x49:
+ ctx_init = nv49_graph_context_init;
+ break;
+ case 0x44:
+ case 0x4a:
+ ctx_init = nv4a_graph_context_init;
+ break;
+ case 0x4b:
+ ctx_init = nv4b_graph_context_init;
+ break;
+ case 0x4c:
+ case 0x67:
+ ctx_init = nv4c_graph_context_init;
+ break;
+ case 0x4e:
+ ctx_init = nv4e_graph_context_init;
+ break;
+ default:
+ ctx_init = nv40_graph_context_init;
+ break;
+ }
+
+ /* Allocate a 175KiB block of PRAMIN to store the context. This
+ * is massive overkill for a lot of chipsets, but it should be safe
+ * until we're able to implement this properly (will happen at more
+ * or less the same time we're able to write our own context programs.
+ */
+ if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 175*1024, 16,
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &chan->ramin_grctx)))
+ return ret;
+
+ /* Initialise default context values */
+ ctx_init(dev, chan->ramin_grctx->gpuobj);
+
+ return 0;
+}
+
+void
+nv40_graph_destroy_context(struct nouveau_channel *chan)
+{
+ nouveau_gpuobj_ref_del(chan->dev, &chan->ramin_grctx);
+}
+
+static int
+nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t old_cp, tv = 1000, tmp;
+ int i;
+
+ old_cp = NV_READ(NV20_PGRAPH_CHANNEL_CTX_POINTER);
+ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
+
+ tmp = NV_READ(NV40_PGRAPH_CTXCTL_0310);
+ tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :
+ NV40_PGRAPH_CTXCTL_0310_XFER_LOAD;
+ NV_WRITE(NV40_PGRAPH_CTXCTL_0310, tmp);
+
+ tmp = NV_READ(NV40_PGRAPH_CTXCTL_0304);
+ tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX;
+ NV_WRITE(NV40_PGRAPH_CTXCTL_0304, tmp);
+
+ nouveau_wait_for_idle(dev);
+
+ for (i = 0; i < tv; i++) {
+ if (NV_READ(NV40_PGRAPH_CTXCTL_030C) == 0)
+ break;
+ }
+
+ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp);
+
+ if (i == tv) {
+ uint32_t ucstat = NV_READ(NV40_PGRAPH_CTXCTL_UCODE_STAT);
+ DRM_ERROR("Failed: Instance=0x%08x Save=%d\n", inst, save);
+ DRM_ERROR("IP: 0x%02x, Opcode: 0x%08x\n",
+ ucstat >> NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT,
+ ucstat & NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK);
+ DRM_ERROR("0x40030C = 0x%08x\n",
+ NV_READ(NV40_PGRAPH_CTXCTL_030C));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/* Save current context (from PGRAPH) into the channel's context */
+int
+nv40_graph_save_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ uint32_t inst;
+
+ if (!chan->ramin_grctx)
+ return -EINVAL;
+ inst = chan->ramin_grctx->instance >> 4;
+
+ return nv40_graph_transfer_context(dev, inst, 1);
+}
+
+/* Restore the context for a specific channel into PGRAPH */
+int
+nv40_graph_load_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t inst;
+ int ret;
+
+ if (!chan->ramin_grctx)
+ return -EINVAL;
+ inst = chan->ramin_grctx->instance >> 4;
+
+ ret = nv40_graph_transfer_context(dev, inst, 0);
+ if (ret)
+ return ret;
+
+ /* 0x40032C, no idea of it's exact function. Could simply be a
+ * record of the currently active PGRAPH context. It's currently
+ * unknown as to what bit 24 does. The nv ddx has it set, so we will
+ * set it here too.
+ */
+ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
+ NV_WRITE(NV40_PGRAPH_CTXCTL_CUR,
+ (inst & NV40_PGRAPH_CTXCTL_CUR_INST_MASK) |
+ NV40_PGRAPH_CTXCTL_CUR_LOADED);
+ /* 0x32E0 records the instance address of the active FIFO's PGRAPH
+ * context. If at any time this doesn't match 0x40032C, you will
+ * recieve PGRAPH_INTR_CONTEXT_SWITCH
+ */
+ NV_WRITE(NV40_PFIFO_GRCTX_INSTANCE, inst);
+ return 0;
+}
+
+/* These blocks of "magic numbers" are actually a microcode that the GPU uses
+ * to control how graphics contexts get saved and restored between PRAMIN
+ * and PGRAPH during a context switch. We're currently using values seen
+ * in mmio-traces of the binary driver.
+ */
+static uint32_t nv40_ctx_prog[] = {
+ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
+ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409406,
+ 0x0040a268, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
+ 0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061,
+ 0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d,
+ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
+ 0x001041c9, 0x0010c1dc, 0x00110205, 0x0011420a, 0x00114210, 0x00110216,
+ 0x0012421b, 0x00120270, 0x001242c0, 0x00200040, 0x00100280, 0x00128100,
+ 0x00128120, 0x00128143, 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029,
+ 0x00110400, 0x00104d10, 0x00500060, 0x00403b87, 0x0060000d, 0x004076e6,
+ 0x002000f0, 0x0060000a, 0x00200045, 0x00100620, 0x00108668, 0x0011466b,
+ 0x00120682, 0x0011068b, 0x00168691, 0x0010c6ae, 0x001206b4, 0x0020002a,
+ 0x001006c4, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7, 0x001043e1,
+ 0x00500060, 0x00405600, 0x00405684, 0x00600003, 0x00500067, 0x00600008,
+ 0x00500060, 0x00700082, 0x0020026c, 0x0060000a, 0x00104800, 0x00104901,
+ 0x00120920, 0x00200035, 0x00100940, 0x00148a00, 0x00104a14, 0x00200038,
+ 0x00100b00, 0x00138d00, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06,
+ 0x0020031a, 0x0060000a, 0x00300000, 0x00200680, 0x00406c00, 0x00200684,
+ 0x00800001, 0x00200b62, 0x0060000a, 0x0020a0b0, 0x0040728a, 0x00201b68,
+ 0x00800041, 0x00407684, 0x00203e60, 0x00800002, 0x00408700, 0x00600006,
+ 0x00700003, 0x004080e6, 0x00700080, 0x0020031a, 0x0060000a, 0x00200004,
+ 0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a284,
+ 0x00700002, 0x00600004, 0x0040a268, 0x00700000, 0x00200000, 0x0060000a,
+ 0x00106002, 0x00700080, 0x00400a84, 0x00700002, 0x00400a68, 0x00500060,
+ 0x00600007, 0x00409388, 0x0060000f, 0x00000000, 0x00500060, 0x00200000,
+ 0x0060000a, 0x00700000, 0x00106001, 0x00700083, 0x00910880, 0x00901ffe,
+ 0x00940400, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00401b68,
+ 0x0040a406, 0x0040a505, 0x00600009, 0x00700005, 0x00700006, 0x0060000e,
+ ~0
+};
+
+static uint32_t nv41_ctx_prog[] = {
+ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
+ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409306,
+ 0x0040a068, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042,
+ 0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968,
+ 0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
+ 0x004020e6, 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d,
+ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
+ 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e,
+ 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143,
+ 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10,
+ 0x001046ec, 0x00500060, 0x00404087, 0x0060000d, 0x004079e6, 0x002000f1,
+ 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b,
+ 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6,
+ 0x00200020, 0x001006cc, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700,
+ 0x0010c3d7, 0x001043e1, 0x00500060, 0x00200233, 0x0060000a, 0x00104800,
+ 0x00108901, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00148a00,
+ 0x00108a14, 0x00200020, 0x00100b00, 0x00134b2c, 0x0010cd00, 0x0010cd04,
+ 0x00114d08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06,
+ 0x002002d2, 0x0060000a, 0x00300000, 0x00200680, 0x00407200, 0x00200684,
+ 0x00800001, 0x00200b1a, 0x0060000a, 0x00206380, 0x0040788a, 0x00201480,
+ 0x00800041, 0x00408900, 0x00600006, 0x004085e6, 0x00700080, 0x0020007a,
+ 0x0060000a, 0x00104280, 0x002002d2, 0x0060000a, 0x00200004, 0x00800001,
+ 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a068, 0x00700000,
+ 0x00200000, 0x0060000a, 0x00106002, 0x00700080, 0x00400a68, 0x00500060,
+ 0x00600007, 0x00409388, 0x0060000f, 0x00500060, 0x00200000, 0x0060000a,
+ 0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x00940400, 0x00200020,
+ 0x0060000b, 0x00500069, 0x0060000c, 0x00402168, 0x0040a206, 0x0040a305,
+ 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
+};
+
+static uint32_t nv43_ctx_prog[] = {
+ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
+ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409565, 0x00409a06,
+ 0x0040a868, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
+ 0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061,
+ 0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d,
+ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
+ 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e,
+ 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143,
+ 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10,
+ 0x001046ec, 0x00500060, 0x00403a87, 0x0060000d, 0x00407ce6, 0x002000f1,
+ 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b,
+ 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6,
+ 0x00200020, 0x001006cc, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700,
+ 0x0010c3d7, 0x001043e1, 0x00500060, 0x00405800, 0x00405884, 0x00600003,
+ 0x00500067, 0x00600008, 0x00500060, 0x00700082, 0x00200233, 0x0060000a,
+ 0x00104800, 0x00108901, 0x00124920, 0x0020001f, 0x00100940, 0x00140965,
+ 0x00148a00, 0x00108a14, 0x00160b00, 0x00134b2c, 0x0010cd00, 0x0010cd04,
+ 0x0010cd08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06,
+ 0x002002c8, 0x0060000a, 0x00300000, 0x00200680, 0x00407200, 0x00200684,
+ 0x00800001, 0x00200b10, 0x0060000a, 0x00203870, 0x0040788a, 0x00201350,
+ 0x00800041, 0x00407c84, 0x00201560, 0x00800002, 0x00408d00, 0x00600006,
+ 0x00700003, 0x004086e6, 0x00700080, 0x002002c8, 0x0060000a, 0x00200004,
+ 0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a884,
+ 0x00700002, 0x00600004, 0x0040a868, 0x00700000, 0x00200000, 0x0060000a,
+ 0x00106002, 0x00700080, 0x00400a84, 0x00700002, 0x00400a68, 0x00500060,
+ 0x00600007, 0x00409988, 0x0060000f, 0x00000000, 0x00500060, 0x00200000,
+ 0x0060000a, 0x00700000, 0x00106001, 0x00700083, 0x00910880, 0x00901ffe,
+ 0x00940400, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00401b68,
+ 0x0040aa06, 0x0040ab05, 0x00600009, 0x00700005, 0x00700006, 0x0060000e,
+ ~0
+};
+
+static uint32_t nv44_ctx_prog[] = {
+ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
+ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409a65, 0x00409f06,
+ 0x0040ac68, 0x0040248f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042,
+ 0x001041c6, 0x00104040, 0x00200001, 0x0060000a, 0x00700000, 0x001040c5,
+ 0x00402320, 0x00402321, 0x00402322, 0x00402324, 0x00402326, 0x0040232b,
+ 0x001040c5, 0x00402328, 0x001040c5, 0x00402320, 0x00402468, 0x0060000d,
+ 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, 0x00402be6,
+ 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, 0x00110158,
+ 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, 0x001041c9,
+ 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, 0x001242c0,
+ 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, 0x0011415f,
+ 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, 0x001046ec,
+ 0x00500060, 0x00404b87, 0x0060000d, 0x004084e6, 0x002000f1, 0x0060000a,
+ 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b, 0x00168691,
+ 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, 0x001646cc,
+ 0x001186e6, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7,
+ 0x001043e1, 0x00500060, 0x00200232, 0x0060000a, 0x00104800, 0x00108901,
+ 0x00104910, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00148a00,
+ 0x00108a14, 0x00160b00, 0x00134b2c, 0x0010cd00, 0x0010cd04, 0x0010cd08,
+ 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06, 0x002002c8,
+ 0x0060000a, 0x00300000, 0x00200080, 0x00407d00, 0x00200084, 0x00800001,
+ 0x00200510, 0x0060000a, 0x002037e0, 0x0040838a, 0x00201320, 0x00800029,
+ 0x00409400, 0x00600006, 0x004090e6, 0x00700080, 0x0020007a, 0x0060000a,
+ 0x00104280, 0x002002c8, 0x0060000a, 0x00200004, 0x00800001, 0x00700000,
+ 0x00200000, 0x0060000a, 0x00106002, 0x0040ac68, 0x00700000, 0x00200000,
+ 0x0060000a, 0x00106002, 0x00700080, 0x00400a68, 0x00500060, 0x00600007,
+ 0x00409e88, 0x0060000f, 0x00000000, 0x00500060, 0x00200000, 0x0060000a,
+ 0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x01940000, 0x00200020,
+ 0x0060000b, 0x00500069, 0x0060000c, 0x00402c68, 0x0040ae06, 0x0040af05,
+ 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
+};
+
+static uint32_t nv46_ctx_prog[] = {
+ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
+ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409306,
+ 0x0040a068, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042,
+ 0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968,
+ 0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
+ 0x004020e6, 0x007000a0, 0x00500060, 0x00200008, 0x0060000a, 0x0011814d,
+ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
+ 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e,
+ 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143,
+ 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10,
+ 0x00500060, 0x00403f87, 0x0060000d, 0x004079e6, 0x002000f7, 0x0060000a,
+ 0x00200045, 0x00100620, 0x00104668, 0x0017466d, 0x0011068b, 0x00168691,
+ 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, 0x00200022,
+ 0x001006cc, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7, 0x001043e1,
+ 0x00500060, 0x0020027f, 0x0060000a, 0x00104800, 0x00108901, 0x00104910,
+ 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00148a00, 0x00108a14,
+ 0x00160b00, 0x00134b2c, 0x0010cd00, 0x0010cd04, 0x0010cd08, 0x00104d80,
+ 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06, 0x00105406, 0x00105709,
+ 0x00200316, 0x0060000a, 0x00300000, 0x00200080, 0x00407200, 0x00200084,
+ 0x00800001, 0x0020055e, 0x0060000a, 0x002037e0, 0x0040788a, 0x00201320,
+ 0x00800029, 0x00408900, 0x00600006, 0x004085e6, 0x00700080, 0x00200081,
+ 0x0060000a, 0x00104280, 0x00200316, 0x0060000a, 0x00200004, 0x00800001,
+ 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a068, 0x00700000,
+ 0x00200000, 0x0060000a, 0x00106002, 0x00700080, 0x00400a68, 0x00500060,
+ 0x00600007, 0x00409388, 0x0060000f, 0x00500060, 0x00200000, 0x0060000a,
+ 0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x01940000, 0x00200020,
+ 0x0060000b, 0x00500069, 0x0060000c, 0x00402168, 0x0040a206, 0x0040a305,
+ 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
+};
+
+static uint32_t nv47_ctx_prog[] = {
+ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
+ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409265, 0x00409606,
+ 0x0040a368, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042,
+ 0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968,
+ 0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
+ 0x004020e6, 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d,
+ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
+ 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e,
+ 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143,
+ 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d12,
+ 0x00500060, 0x00403f87, 0x0060000d, 0x00407ce6, 0x002000f0, 0x0060000a,
+ 0x00200020, 0x00100620, 0x00154650, 0x00104668, 0x0017466d, 0x0011068b,
+ 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6,
+ 0x00200022, 0x001006cc, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7,
+ 0x001043e1, 0x00500060, 0x00200268, 0x0060000a, 0x00104800, 0x00108901,
+ 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00144a00, 0x00104a19,
+ 0x0010ca1c, 0x00110b00, 0x00200028, 0x00100b08, 0x00134c2e, 0x0010cd00,
+ 0x0010cd04, 0x00120d08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00,
+ 0x00104f06, 0x00105406, 0x00105709, 0x00200318, 0x0060000a, 0x00300000,
+ 0x00200680, 0x00407500, 0x00200684, 0x00800001, 0x00200b60, 0x0060000a,
+ 0x00209540, 0x00407b8a, 0x00201350, 0x00800041, 0x00408c00, 0x00600006,
+ 0x004088e6, 0x00700080, 0x0020007a, 0x0060000a, 0x00104280, 0x00200318,
+ 0x0060000a, 0x00200004, 0x00800001, 0x00700000, 0x00200000, 0x0060000a,
+ 0x00106002, 0x0040a368, 0x00700000, 0x00200000, 0x0060000a, 0x00106002,
+ 0x00700080, 0x00400a68, 0x00500060, 0x00600007, 0x00409688, 0x0060000f,
+ 0x00500060, 0x00200000, 0x0060000a, 0x00700000, 0x00106001, 0x0091a880,
+ 0x00901ffe, 0x10940000, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c,
+ 0x00402168, 0x0040a506, 0x0040a605, 0x00600009, 0x00700005, 0x00700006,
+ 0x0060000e, ~0
+};
+
+//this is used for nv49 and nv4b
+static uint32_t nv49_4b_ctx_prog[] ={
+ 0x00400564, 0x00400505, 0x00408165, 0x00408206, 0x00409e68, 0x00200020,
+ 0x0060000a, 0x00700080, 0x00104042, 0x00200020, 0x0060000a, 0x00700000,
+ 0x001040c5, 0x00400f26, 0x00401068, 0x0060000d, 0x0070008f, 0x0070000e,
+ 0x00408d68, 0x004015e6, 0x007000a0, 0x00700080, 0x0040180f, 0x00700000,
+ 0x00200029, 0x0060000a, 0x0011814d, 0x00110158, 0x00105401, 0x0020003a,
+ 0x00100051, 0x001040c5, 0x0010c1c4, 0x001041c9, 0x0010c1dc, 0x00150210,
+ 0x0012c225, 0x00108238, 0x0010823e, 0x001242c0, 0x00200040, 0x00100280,
+ 0x00128100, 0x00128120, 0x00128143, 0x0011415f, 0x0010815c, 0x0010c140,
+ 0x00104029, 0x00110400, 0x00104d12, 0x00500060, 0x004071e6, 0x00200118,
+ 0x0060000a, 0x00200020, 0x00100620, 0x00154650, 0x00104668, 0x0017466d,
+ 0x0011068b, 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4,
+ 0x001146c6, 0x00200022, 0x001006cc, 0x001246f0, 0x002000c0, 0x00100700,
+ 0x0010c3d7, 0x001043e1, 0x00500060, 0x00200290, 0x0060000a, 0x00104800,
+ 0x00108901, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00144a00,
+ 0x00104a19, 0x0010ca1c, 0x00110b00, 0x00200028, 0x00100b08, 0x00134c2e,
+ 0x0010cd00, 0x0010cd04, 0x00120d08, 0x00104d80, 0x00104e00, 0x0012d600,
+ 0x00105c00, 0x00104f06, 0x00105406, 0x00105709, 0x00200340, 0x0060000a,
+ 0x00300000, 0x00200680, 0x00406a0f, 0x00200684, 0x00800001, 0x00200b88,
+ 0x0060000a, 0x00209540, 0x0040708a, 0x00201350, 0x00800041, 0x00407c0f,
+ 0x00600006, 0x00407ce6, 0x00700080, 0x002000a2, 0x0060000a, 0x00104280,
+ 0x00200340, 0x0060000a, 0x00200004, 0x00800001, 0x0070008e, 0x00408d68,
+ 0x0040020f, 0x00600006, 0x00409e68, 0x00600007, 0x0070000f, 0x0070000e,
+ 0x00408d68, 0x0091a880, 0x00901ffe, 0x10940000, 0x00200020, 0x0060000b,
+ 0x00500069, 0x0060000c, 0x00401568, 0x00700000, 0x00200001, 0x0040910e,
+ 0x00200021, 0x0060000a, 0x00409b0d, 0x00104a40, 0x00104a50, 0x00104a60,
+ 0x00104a70, 0x00104a80, 0x00104a90, 0x00104aa0, 0x00104ab0, 0x00407e0e,
+ 0x0040130f, 0x00408568, 0x0040a006, 0x0040a105, 0x00600009, 0x00700005,
+ 0x00700006, 0x0060000e, ~0
+};
+
+
+static uint32_t nv4a_ctx_prog[] = {
+ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
+ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409965, 0x00409e06,
+ 0x0040ac68, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
+ 0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061,
+ 0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d,
+ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
+ 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e,
+ 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143,
+ 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10,
+ 0x001046ec, 0x00500060, 0x00403a87, 0x0060000d, 0x00407de6, 0x002000f1,
+ 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b,
+ 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6,
+ 0x001646cc, 0x001186e6, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700,
+ 0x0010c3d7, 0x001043e1, 0x00500060, 0x00405800, 0x00405884, 0x00600003,
+ 0x00500067, 0x00600008, 0x00500060, 0x00700082, 0x00200232, 0x0060000a,
+ 0x00104800, 0x00108901, 0x00104910, 0x00124920, 0x0020001f, 0x00100940,
+ 0x00140965, 0x00148a00, 0x00108a14, 0x00160b00, 0x00134b2c, 0x0010cd00,
+ 0x0010cd04, 0x0010cd08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00,
+ 0x00104f06, 0x002002c8, 0x0060000a, 0x00300000, 0x00200080, 0x00407300,
+ 0x00200084, 0x00800001, 0x00200510, 0x0060000a, 0x002037e0, 0x0040798a,
+ 0x00201320, 0x00800029, 0x00407d84, 0x00201560, 0x00800002, 0x00409100,
+ 0x00600006, 0x00700003, 0x00408ae6, 0x00700080, 0x0020007a, 0x0060000a,
+ 0x00104280, 0x002002c8, 0x0060000a, 0x00200004, 0x00800001, 0x00700000,
+ 0x00200000, 0x0060000a, 0x00106002, 0x0040ac84, 0x00700002, 0x00600004,
+ 0x0040ac68, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x00700080,
+ 0x00400a84, 0x00700002, 0x00400a68, 0x00500060, 0x00600007, 0x00409d88,
+ 0x0060000f, 0x00000000, 0x00500060, 0x00200000, 0x0060000a, 0x00700000,
+ 0x00106001, 0x00700083, 0x00910880, 0x00901ffe, 0x01940000, 0x00200020,
+ 0x0060000b, 0x00500069, 0x0060000c, 0x00401b68, 0x0040ae06, 0x0040af05,
+ 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
+};
+
+static uint32_t nv4c_ctx_prog[] = {
+ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
+ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409065, 0x00409406,
+ 0x0040a168, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042,
+ 0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968,
+ 0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
+ 0x004020e6, 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d,
+ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
+ 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e,
+ 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143,
+ 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10,
+ 0x0010427e, 0x001046ec, 0x00500060, 0x00404187, 0x0060000d, 0x00407ae6,
+ 0x002000f2, 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682,
+ 0x0011068b, 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4,
+ 0x001146c6, 0x00200020, 0x001006cc, 0x001046ed, 0x001246f0, 0x002000c0,
+ 0x00100700, 0x0010c3d7, 0x001043e1, 0x00500060, 0x00200234, 0x0060000a,
+ 0x00104800, 0x00108901, 0x00104910, 0x00124920, 0x0020001f, 0x00100940,
+ 0x00140965, 0x00148a00, 0x00108a14, 0x00140b00, 0x00134b2c, 0x0010cd00,
+ 0x0010cd04, 0x00104d08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00,
+ 0x00104f06, 0x002002c0, 0x0060000a, 0x00300000, 0x00200080, 0x00407300,
+ 0x00200084, 0x00800001, 0x00200508, 0x0060000a, 0x00201320, 0x0040798a,
+ 0xfffffaf8, 0x00800029, 0x00408a00, 0x00600006, 0x004086e6, 0x00700080,
+ 0x0020007a, 0x0060000a, 0x00104280, 0x002002c0, 0x0060000a, 0x00200004,
+ 0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a168,
+ 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x00700080, 0x00400a68,
+ 0x00500060, 0x00600007, 0x00409488, 0x0060000f, 0x00500060, 0x00200000,
+ 0x0060000a, 0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x01940000,
+ 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00402168, 0x0040a306,
+ 0x0040a405, 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
+};
+
+static uint32_t nv4e_ctx_prog[] = {
+ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
+ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409565, 0x00409a06,
+ 0x0040a868, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
+ 0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061,
+ 0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d,
+ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
+ 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e,
+ 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143,
+ 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10,
+ 0x001046ec, 0x00500060, 0x00403a87, 0x0060000d, 0x00407ce6, 0x002000f1,
+ 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b,
+ 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6,
+ 0x001646cc, 0x001186e6, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700,
+ 0x0010c3d7, 0x001043e1, 0x00500060, 0x00405800, 0x00405884, 0x00600003,
+ 0x00500067, 0x00600008, 0x00500060, 0x00700082, 0x00200232, 0x0060000a,
+ 0x00104800, 0x00108901, 0x00104910, 0x00124920, 0x0020001f, 0x00100940,
+ 0x00140965, 0x00148a00, 0x00108a14, 0x00140b00, 0x00134b2c, 0x0010cd00,
+ 0x0010cd04, 0x00104d08, 0x00104d80, 0x00104e00, 0x00105c00, 0x00104f06,
+ 0x002002b2, 0x0060000a, 0x00300000, 0x00200080, 0x00407200, 0x00200084,
+ 0x00800001, 0x002004fa, 0x0060000a, 0x00201320, 0x0040788a, 0xfffffb06,
+ 0x00800029, 0x00407c84, 0x00200b20, 0x00800002, 0x00408d00, 0x00600006,
+ 0x00700003, 0x004086e6, 0x00700080, 0x002002b2, 0x0060000a, 0x00200004,
+ 0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a884,
+ 0x00700002, 0x00600004, 0x0040a868, 0x00700000, 0x00200000, 0x0060000a,
+ 0x00106002, 0x00700080, 0x00400a84, 0x00700002, 0x00400a68, 0x00500060,
+ 0x00600007, 0x00409988, 0x0060000f, 0x00000000, 0x00500060, 0x00200000,
+ 0x0060000a, 0x00700000, 0x00106001, 0x00700083, 0x00910880, 0x00901ffe,
+ 0x01940000, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00401b68,
+ 0x0040aa06, 0x0040ab05, 0x00600009, 0x00700005, 0x00700006, 0x0060000e,
+ ~0
+};
+
+/*
+ * G70 0x47
+ * G71 0x49
+ * NV45 0x48
+ * G72[M] 0x46
+ * G73 0x4b
+ * C51_G7X 0x4c
+ * C51 0x4e
+ */
+int
+nv40_graph_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv =
+ (struct drm_nouveau_private *)dev->dev_private;
+ uint32_t *ctx_prog;
+ uint32_t vramsz, tmp;
+ int i, j;
+
+ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
+ ~NV_PMC_ENABLE_PGRAPH);
+ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
+ NV_PMC_ENABLE_PGRAPH);
+
+ switch (dev_priv->chipset) {
+ case 0x40: ctx_prog = nv40_ctx_prog; break;
+ case 0x41:
+ case 0x42: ctx_prog = nv41_ctx_prog; break;
+ case 0x43: ctx_prog = nv43_ctx_prog; break;
+ case 0x44: ctx_prog = nv44_ctx_prog; break;
+ case 0x46: ctx_prog = nv46_ctx_prog; break;
+ case 0x47: ctx_prog = nv47_ctx_prog; break;
+ case 0x49: ctx_prog = nv49_4b_ctx_prog; break;
+ case 0x4a: ctx_prog = nv4a_ctx_prog; break;
+ case 0x4b: ctx_prog = nv49_4b_ctx_prog; break;
+ case 0x4c:
+ case 0x67: ctx_prog = nv4c_ctx_prog; break;
+ case 0x4e: ctx_prog = nv4e_ctx_prog; break;
+ default:
+ DRM_ERROR("Context program for 0x%02x unavailable\n",
+ dev_priv->chipset);
+ ctx_prog = NULL;
+ break;
+ }
+
+ /* Load the context program onto the card */
+ if (ctx_prog) {
+ DRM_DEBUG("Loading context program\n");
+ i = 0;
+
+ NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
+ while (ctx_prog[i] != ~0) {
+ NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_DATA, ctx_prog[i]);
+ i++;
+ }
+ }
+
+ /* No context present currently */
+ NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
+
+ NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF);
+ NV_WRITE(NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000);
+ NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x401287c0);
+ NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xe0de8055);
+ NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00008000);
+ NV_WRITE(NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
+
+ NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100);
+ NV_WRITE(NV10_PGRAPH_STATE , 0xFFFFFFFF);
+ NV_WRITE(NV04_PGRAPH_FIFO , 0x00000001);
+
+ j = NV_READ(0x1540) & 0xff;
+ if (j) {
+ for (i=0; !(j&1); j>>=1, i++);
+ NV_WRITE(0x405000, i);
+ }
+
+ if (dev_priv->chipset == 0x40) {
+ NV_WRITE(0x4009b0, 0x83280fff);
+ NV_WRITE(0x4009b4, 0x000000a0);
+ } else {
+ NV_WRITE(0x400820, 0x83280eff);
+ NV_WRITE(0x400824, 0x000000a0);
+ }
+
+ switch (dev_priv->chipset) {
+ case 0x40:
+ case 0x45:
+ NV_WRITE(0x4009b8, 0x0078e366);
+ NV_WRITE(0x4009bc, 0x0000014c);
+ break;
+ case 0x41:
+ case 0x42: /* pciid also 0x00Cx */
+// case 0x0120: //XXX (pciid)
+ NV_WRITE(0x400828, 0x007596ff);
+ NV_WRITE(0x40082c, 0x00000108);
+ break;
+ case 0x43:
+ NV_WRITE(0x400828, 0x0072cb77);
+ NV_WRITE(0x40082c, 0x00000108);
+ break;
+ case 0x44:
+ case 0x46: /* G72 */
+ case 0x4a:
+ case 0x4c: /* G7x-based C51 */
+ case 0x4e:
+ NV_WRITE(0x400860, 0);
+ NV_WRITE(0x400864, 0);
+ break;
+ case 0x47: /* G70 */
+ case 0x49: /* G71 */
+ case 0x4b: /* G73 */
+ NV_WRITE(0x400828, 0x07830610);
+ NV_WRITE(0x40082c, 0x0000016A);
+ break;
+ default:
+ break;
+ }
+
+ NV_WRITE(0x400b38, 0x2ffff800);
+ NV_WRITE(0x400b3c, 0x00006000);
+
+ /* copy tile info from PFB */
+ switch (dev_priv->chipset) {
+ case 0x40: /* vanilla NV40 */
+ for (i=0; i<NV10_PFB_TILE__SIZE; i++) {
+ tmp = NV_READ(NV10_PFB_TILE(i));
+ NV_WRITE(NV40_PGRAPH_TILE0(i), tmp);
+ NV_WRITE(NV40_PGRAPH_TILE1(i), tmp);
+ tmp = NV_READ(NV10_PFB_TLIMIT(i));
+ NV_WRITE(NV40_PGRAPH_TLIMIT0(i), tmp);
+ NV_WRITE(NV40_PGRAPH_TLIMIT1(i), tmp);
+ tmp = NV_READ(NV10_PFB_TSIZE(i));
+ NV_WRITE(NV40_PGRAPH_TSIZE0(i), tmp);
+ NV_WRITE(NV40_PGRAPH_TSIZE1(i), tmp);
+ tmp = NV_READ(NV10_PFB_TSTATUS(i));
+ NV_WRITE(NV40_PGRAPH_TSTATUS0(i), tmp);
+ NV_WRITE(NV40_PGRAPH_TSTATUS1(i), tmp);
+ }
+ break;
+ case 0x44:
+ case 0x4a:
+ case 0x4e: /* NV44-based cores don't have 0x406900? */
+ for (i=0; i<NV40_PFB_TILE__SIZE_0; i++) {
+ tmp = NV_READ(NV40_PFB_TILE(i));
+ NV_WRITE(NV40_PGRAPH_TILE0(i), tmp);
+ tmp = NV_READ(NV40_PFB_TLIMIT(i));
+ NV_WRITE(NV40_PGRAPH_TLIMIT0(i), tmp);
+ tmp = NV_READ(NV40_PFB_TSIZE(i));
+ NV_WRITE(NV40_PGRAPH_TSIZE0(i), tmp);
+ tmp = NV_READ(NV40_PFB_TSTATUS(i));
+ NV_WRITE(NV40_PGRAPH_TSTATUS0(i), tmp);
+ }
+ break;
+ case 0x46:
+ case 0x47:
+ case 0x49:
+ case 0x4b: /* G7X-based cores */
+ for (i=0; i<NV40_PFB_TILE__SIZE_1; i++) {
+ tmp = NV_READ(NV40_PFB_TILE(i));
+ NV_WRITE(NV47_PGRAPH_TILE0(i), tmp);
+ NV_WRITE(NV40_PGRAPH_TILE1(i), tmp);
+ tmp = NV_READ(NV40_PFB_TLIMIT(i));
+ NV_WRITE(NV47_PGRAPH_TLIMIT0(i), tmp);
+ NV_WRITE(NV40_PGRAPH_TLIMIT1(i), tmp);
+ tmp = NV_READ(NV40_PFB_TSIZE(i));
+ NV_WRITE(NV47_PGRAPH_TSIZE0(i), tmp);
+ NV_WRITE(NV40_PGRAPH_TSIZE1(i), tmp);
+ tmp = NV_READ(NV40_PFB_TSTATUS(i));
+ NV_WRITE(NV47_PGRAPH_TSTATUS0(i), tmp);
+ NV_WRITE(NV40_PGRAPH_TSTATUS1(i), tmp);
+ }
+ break;
+ default: /* everything else */
+ for (i=0; i<NV40_PFB_TILE__SIZE_0; i++) {
+ tmp = NV_READ(NV40_PFB_TILE(i));
+ NV_WRITE(NV40_PGRAPH_TILE0(i), tmp);
+ NV_WRITE(NV40_PGRAPH_TILE1(i), tmp);
+ tmp = NV_READ(NV40_PFB_TLIMIT(i));
+ NV_WRITE(NV40_PGRAPH_TLIMIT0(i), tmp);
+ NV_WRITE(NV40_PGRAPH_TLIMIT1(i), tmp);
+ tmp = NV_READ(NV40_PFB_TSIZE(i));
+ NV_WRITE(NV40_PGRAPH_TSIZE0(i), tmp);
+ NV_WRITE(NV40_PGRAPH_TSIZE1(i), tmp);
+ tmp = NV_READ(NV40_PFB_TSTATUS(i));
+ NV_WRITE(NV40_PGRAPH_TSTATUS0(i), tmp);
+ NV_WRITE(NV40_PGRAPH_TSTATUS1(i), tmp);
+ }
+ break;
+ }
+
+ /* begin RAM config */
+ vramsz = drm_get_resource_len(dev, 0) - 1;
+ switch (dev_priv->chipset) {
+ case 0x40:
+ NV_WRITE(0x4009A4, NV_READ(NV04_PFB_CFG0));
+ NV_WRITE(0x4009A8, NV_READ(NV04_PFB_CFG1));
+ NV_WRITE(0x4069A4, NV_READ(NV04_PFB_CFG0));
+ NV_WRITE(0x4069A8, NV_READ(NV04_PFB_CFG1));
+ NV_WRITE(0x400820, 0);
+ NV_WRITE(0x400824, 0);
+ NV_WRITE(0x400864, vramsz);
+ NV_WRITE(0x400868, vramsz);
+ break;
+ default:
+ switch (dev_priv->chipset) {
+ case 0x46:
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ NV_WRITE(0x400DF0, NV_READ(NV04_PFB_CFG0));
+ NV_WRITE(0x400DF4, NV_READ(NV04_PFB_CFG1));
+ break;
+ default:
+ NV_WRITE(0x4009F0, NV_READ(NV04_PFB_CFG0));
+ NV_WRITE(0x4009F4, NV_READ(NV04_PFB_CFG1));
+ break;
+ }
+ NV_WRITE(0x4069F0, NV_READ(NV04_PFB_CFG0));
+ NV_WRITE(0x4069F4, NV_READ(NV04_PFB_CFG1));
+ NV_WRITE(0x400840, 0);
+ NV_WRITE(0x400844, 0);
+ NV_WRITE(0x4008A0, vramsz);
+ NV_WRITE(0x4008A4, vramsz);
+ break;
+ }
+
+ /* per-context state, doesn't belong here */
+ NV_WRITE(0x400B20, 0x00000000);
+ NV_WRITE(0x400B04, 0xFFFFFFFF);
+
+ tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00;
+ NV_WRITE(NV10_PGRAPH_SURFACE, tmp);
+ tmp = NV_READ(NV10_PGRAPH_SURFACE) | 0x00020100;
+ NV_WRITE(NV10_PGRAPH_SURFACE, tmp);
+
+ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
+ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
+ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
+ NV_WRITE(NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
+
+ return 0;
+}
+
+void nv40_graph_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv40_mc.c b/drivers/gpu/drm/nouveau/nv40_mc.c
new file mode 100644
index 0000000..ead6f87
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv40_mc.c
@@ -0,0 +1,38 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+int
+nv40_mc_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t tmp;
+
+ /* Power up everything, resetting each individual unit will
+ * be done later if needed.
+ */
+ NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF);
+
+ switch (dev_priv->chipset) {
+ case 0x44:
+ case 0x46: /* G72 */
+ case 0x4e:
+ case 0x4c: /* C51_G7X */
+ tmp = NV_READ(NV40_PFB_020C);
+ NV_WRITE(NV40_PMC_1700, tmp);
+ NV_WRITE(NV40_PMC_1704, 0);
+ NV_WRITE(NV40_PMC_1708, 0);
+ NV_WRITE(NV40_PMC_170C, tmp);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void
+nv40_mc_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
new file mode 100644
index 0000000..d681066
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -0,0 +1,343 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+struct nv50_fifo_priv {
+ struct nouveau_gpuobj_ref *thingo[2];
+ int cur_thingo;
+};
+
+#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
+
+static void
+nv50_fifo_init_thingo(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_fifo_priv *priv = dev_priv->Engine.fifo.priv;
+ struct nouveau_gpuobj_ref *cur;
+ int i, nr;
+
+ DRM_DEBUG("\n");
+
+ cur = priv->thingo[priv->cur_thingo];
+ priv->cur_thingo = !priv->cur_thingo;
+
+ /* We never schedule channel 0 or 127 */
+ for (i = 1, nr = 0; i < 127; i++) {
+ if (dev_priv->fifos[i]) {
+ INSTANCE_WR(cur->gpuobj, nr++, i);
+ }
+ }
+ NV_WRITE(0x32f4, cur->instance >> 12);
+ NV_WRITE(0x32ec, nr);
+ NV_WRITE(0x2500, 0x101);
+}
+
+static int
+nv50_fifo_channel_enable(struct drm_device *dev, int channel, int nt)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->fifos[channel];
+ uint32_t inst;
+
+ DRM_DEBUG("ch%d\n", channel);
+
+ if (!chan->ramfc)
+ return -EINVAL;
+
+ if (IS_G80) inst = chan->ramfc->instance >> 12;
+ else inst = chan->ramfc->instance >> 8;
+ NV_WRITE(NV50_PFIFO_CTX_TABLE(channel),
+ inst | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
+
+ if (!nt) nv50_fifo_init_thingo(dev);
+ return 0;
+}
+
+static void
+nv50_fifo_channel_disable(struct drm_device *dev, int channel, int nt)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t inst;
+
+ DRM_DEBUG("ch%d, nt=%d\n", channel, nt);
+
+ if (IS_G80) inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80;
+ else inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84;
+ NV_WRITE(NV50_PFIFO_CTX_TABLE(channel), inst);
+
+ if (!nt) nv50_fifo_init_thingo(dev);
+}
+
+static void
+nv50_fifo_init_reset(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t pmc_e = NV_PMC_ENABLE_PFIFO;
+
+ DRM_DEBUG("\n");
+
+ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ~pmc_e);
+ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | pmc_e);
+}
+
+static void
+nv50_fifo_init_intr(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG("\n");
+
+ NV_WRITE(NV03_PFIFO_INTR_0, 0xFFFFFFFF);
+ NV_WRITE(NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
+}
+
+static void
+nv50_fifo_init_context_table(struct drm_device *dev)
+{
+ int i;
+
+ DRM_DEBUG("\n");
+
+ for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++)
+ nv50_fifo_channel_disable(dev, i, 1);
+ nv50_fifo_init_thingo(dev);
+}
+
+static void
+nv50_fifo_init_regs__nv(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG("\n");
+
+ NV_WRITE(0x250c, 0x6f3cfc34);
+}
+
+static void
+nv50_fifo_init_regs(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG("\n");
+
+ NV_WRITE(0x2500, 0);
+ NV_WRITE(0x3250, 0);
+ NV_WRITE(0x3220, 0);
+ NV_WRITE(0x3204, 0);
+ NV_WRITE(0x3210, 0);
+ NV_WRITE(0x3270, 0);
+
+ /* Enable dummy channels setup by nv50_instmem.c */
+ nv50_fifo_channel_enable(dev, 0, 1);
+ nv50_fifo_channel_enable(dev, 127, 1);
+}
+
+int
+nv50_fifo_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_fifo_priv *priv;
+ int ret;
+
+ DRM_DEBUG("\n");
+
+ priv = drm_calloc(1, sizeof(*priv), DRM_MEM_DRIVER);
+ if (!priv)
+ return -ENOMEM;
+ dev_priv->Engine.fifo.priv = priv;
+
+ nv50_fifo_init_reset(dev);
+ nv50_fifo_init_intr(dev);
+
+ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[0]);
+ if (ret) {
+ DRM_ERROR("error creating thingo0: %d\n", ret);
+ return ret;
+ }
+
+ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[1]);
+ if (ret) {
+ DRM_ERROR("error creating thingo1: %d\n", ret);
+ return ret;
+ }
+
+ nv50_fifo_init_context_table(dev);
+ nv50_fifo_init_regs__nv(dev);
+ nv50_fifo_init_regs(dev);
+
+ return 0;
+}
+
+void
+nv50_fifo_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_fifo_priv *priv = dev_priv->Engine.fifo.priv;
+
+ DRM_DEBUG("\n");
+
+ if (!priv)
+ return;
+
+ nouveau_gpuobj_ref_del(dev, &priv->thingo[0]);
+ nouveau_gpuobj_ref_del(dev, &priv->thingo[1]);
+
+ dev_priv->Engine.fifo.priv = NULL;
+ drm_free(priv, sizeof(*priv), DRM_MEM_DRIVER);
+}
+
+int
+nv50_fifo_channel_id(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ return (NV_READ(NV03_PFIFO_CACHE1_PUSH1) &
+ NV50_PFIFO_CACHE1_PUSH1_CHID_MASK);
+}
+
+int
+nv50_fifo_create_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *ramfc = NULL;
+ int ret;
+
+ DRM_DEBUG("ch%d\n", chan->id);
+
+ if (IS_G80) {
+ uint32_t ramfc_offset = chan->ramin->gpuobj->im_pramin->start;
+ uint32_t vram_offset = chan->ramin->gpuobj->im_backing->start;
+ ret = nouveau_gpuobj_new_fake(dev, ramfc_offset, vram_offset,
+ 0x100, NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &ramfc,
+ &chan->ramfc);
+ if (ret)
+ return ret;
+ } else {
+ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 0x100, 256,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE,
+ &chan->ramfc);
+ if (ret)
+ return ret;
+ ramfc = chan->ramfc->gpuobj;
+ }
+
+ INSTANCE_WR(ramfc, 0x48/4, chan->pushbuf->instance >> 4);
+ INSTANCE_WR(ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4));
+ INSTANCE_WR(ramfc, 0x3c/4, 0x000f0078); /* fetch? */
+ INSTANCE_WR(ramfc, 0x44/4, 0x2101ffff);
+ INSTANCE_WR(ramfc, 0x60/4, 0x7fffffff);
+ INSTANCE_WR(ramfc, 0x10/4, 0x00000000);
+ INSTANCE_WR(ramfc, 0x08/4, 0x00000000);
+ INSTANCE_WR(ramfc, 0x40/4, 0x00000000);
+ INSTANCE_WR(ramfc, 0x50/4, 0x2039b2e0);
+ INSTANCE_WR(ramfc, 0x54/4, 0x000f0000);
+ INSTANCE_WR(ramfc, 0x7c/4, 0x30000001);
+ INSTANCE_WR(ramfc, 0x78/4, 0x00000000);
+ INSTANCE_WR(ramfc, 0x4c/4, chan->pushbuf_mem->size - 1);
+
+ if (!IS_G80) {
+ INSTANCE_WR(chan->ramin->gpuobj, 0, chan->id);
+ INSTANCE_WR(chan->ramin->gpuobj, 1, chan->ramfc->instance);
+
+ INSTANCE_WR(ramfc, 0x88/4, 0x3d520); /* some vram addy >> 10 */
+ INSTANCE_WR(ramfc, 0x98/4, chan->ramin->instance >> 12);
+ }
+
+ ret = nv50_fifo_channel_enable(dev, chan->id, 0);
+ if (ret) {
+ DRM_ERROR("error enabling ch%d: %d\n", chan->id, ret);
+ nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+ return ret;
+ }
+
+ return 0;
+}
+
+void
+nv50_fifo_destroy_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG("ch%d\n", chan->id);
+
+ nv50_fifo_channel_disable(dev, chan->id, 0);
+
+ /* Dummy channel, also used on ch 127 */
+ if (chan->id == 0)
+ nv50_fifo_channel_disable(dev, 127, 0);
+
+ if ((NV_READ(NV03_PFIFO_CACHE1_PUSH1) & 0xffff) == chan->id)
+ NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, 127);
+
+ nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+}
+
+int
+nv50_fifo_load_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj;
+
+ DRM_DEBUG("ch%d\n", chan->id);
+
+ /*XXX: incomplete, only touches the regs that NV does */
+
+ NV_WRITE(0x3244, 0);
+ NV_WRITE(0x3240, 0);
+
+ NV_WRITE(0x3224, INSTANCE_RD(ramfc, 0x3c/4));
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, INSTANCE_RD(ramfc, 0x48/4));
+ NV_WRITE(0x3234, INSTANCE_RD(ramfc, 0x4c/4));
+ NV_WRITE(0x3254, 1);
+ NV_WRITE(NV03_PFIFO_RAMHT, INSTANCE_RD(ramfc, 0x80/4));
+
+ if (!IS_G80) {
+ NV_WRITE(0x340c, INSTANCE_RD(ramfc, 0x88/4));
+ NV_WRITE(0x3410, INSTANCE_RD(ramfc, 0x98/4));
+ }
+
+ NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
+ return 0;
+}
+
+int
+nv50_fifo_save_context(struct nouveau_channel *chan)
+{
+ DRM_DEBUG("ch%d\n", chan->id);
+ DRM_ERROR("stub!\n");
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
new file mode 100644
index 0000000..35e123c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -0,0 +1,2192 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
+
+static void
+nv50_graph_init_reset(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21);
+
+ DRM_DEBUG("\n");
+
+ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ~pmc_e);
+ NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) | pmc_e);
+}
+
+static void
+nv50_graph_init_intr(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG("\n");
+ NV_WRITE(NV03_PGRAPH_INTR, 0xffffffff);
+ NV_WRITE(0x400138, 0xffffffff);
+ NV_WRITE(NV40_PGRAPH_INTR_EN, 0xffffffff);
+}
+
+static void
+nv50_graph_init_regs__nv(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG("\n");
+
+ NV_WRITE(0x400804, 0xc0000000);
+ NV_WRITE(0x406800, 0xc0000000);
+ NV_WRITE(0x400c04, 0xc0000000);
+ NV_WRITE(0x401804, 0xc0000000);
+ NV_WRITE(0x405018, 0xc0000000);
+ NV_WRITE(0x402000, 0xc0000000);
+
+ NV_WRITE(0x400108, 0xffffffff);
+
+ NV_WRITE(0x400824, 0x00004000);
+ NV_WRITE(0x400500, 0x00010001);
+}
+
+static void
+nv50_graph_init_regs(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG("\n");
+
+ NV_WRITE(NV04_PGRAPH_DEBUG_3, (1<<2) /* HW_CONTEXT_SWITCH_ENABLED */);
+}
+
+static uint32_t nv84_ctx_voodoo[] = {
+ 0x0070008e, 0x0070009c, 0x00200020, 0x00600008, 0x0050004c, 0x00400e89,
+ 0x00200000, 0x00600007, 0x00300000, 0x00c000ff, 0x00200000, 0x008000ff,
+ 0x00700009, 0x0041634d, 0x00402944, 0x00402905, 0x0040290d, 0x00413e06,
+ 0x00600005, 0x004015c5, 0x00600011, 0x0040270b, 0x004021c5, 0x00700000,
+ 0x00700081, 0x00600004, 0x0050004a, 0x00216f40, 0x00600007, 0x00c02801,
+ 0x0020002e, 0x00800001, 0x005000cb, 0x0090ffff, 0x0091ffff, 0x00200020,
+ 0x00600008, 0x0050004c, 0x00600009, 0x00413e45, 0x0041594d, 0x0070009d,
+ 0x00402dcf, 0x0070009f, 0x0050009f, 0x00402ac0, 0x00200200, 0x00600008,
+ 0x00402a4f, 0x00402ac0, 0x004030cc, 0x00700081, 0x00200000, 0x00600006,
+ 0x00700000, 0x00111bfc, 0x00700083, 0x00300000, 0x00216f40, 0x00600007,
+ 0x00c00b01, 0x0020001e, 0x00800001, 0x005000cb, 0x00c000ff, 0x00700080,
+ 0x00700083, 0x00200047, 0x00600006, 0x0011020a, 0x00200480, 0x00600007,
+ 0x00300000, 0x00c000ff, 0x00c800ff, 0x00414907, 0x00202916, 0x008000ff,
+ 0x0040508c, 0x005000cb, 0x00a0023f, 0x00200040, 0x00600006, 0x0070000f,
+ 0x00170202, 0x0011020a, 0x00200032, 0x0010020d, 0x001c0242, 0x00120302,
+ 0x00140402, 0x00180500, 0x00130509, 0x00150550, 0x00110605, 0x0020000f,
+ 0x00100607, 0x00110700, 0x00110900, 0x00120902, 0x00110a00, 0x00160b02,
+ 0x00120b28, 0x00140b2b, 0x00110c01, 0x00111400, 0x00111405, 0x00111407,
+ 0x00111409, 0x0011140b, 0x002000cb, 0x00101500, 0x0040790f, 0x0040794b,
+ 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x0040798c,
+ 0x005000cb, 0x00000000, 0x0020002b, 0x00101a05, 0x00131c00, 0x00121c04,
+ 0x00141c20, 0x00111c25, 0x00131c40, 0x00121c44, 0x00141c60, 0x00111c65,
+ 0x00131c80, 0x00121c84, 0x00141ca0, 0x00111ca5, 0x00131cc0, 0x00121cc4,
+ 0x00141ce0, 0x00111ce5, 0x00131f00, 0x00191f40, 0x0040a1e0, 0x002001ed,
+ 0x00600006, 0x00200044, 0x00102080, 0x001120c6, 0x001520c9, 0x001920d0,
+ 0x00122100, 0x00122103, 0x00162200, 0x00122207, 0x00112280, 0x00112300,
+ 0x00112302, 0x00122380, 0x0011238b, 0x00112394, 0x0011239c, 0x0040bee1,
+ 0x00200254, 0x00600006, 0x00200044, 0x00102480, 0x0040af0f, 0x0040af4b,
+ 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x0040af8c,
+ 0x005000cb, 0x00000000, 0x001124c6, 0x001524c9, 0x001924d0, 0x00122500,
+ 0x00122503, 0x00162600, 0x00122607, 0x00112680, 0x00112700, 0x00112702,
+ 0x00122780, 0x0011278b, 0x00112794, 0x0011279c, 0x0040d1e2, 0x002002bb,
+ 0x00600006, 0x00200044, 0x00102880, 0x001128c6, 0x001528c9, 0x001928d0,
+ 0x00122900, 0x00122903, 0x00162a00, 0x00122a07, 0x00112a80, 0x00112b00,
+ 0x00112b02, 0x00122b80, 0x00112b8b, 0x00112b94, 0x00112b9c, 0x0040eee3,
+ 0x00200322, 0x00600006, 0x00200044, 0x00102c80, 0x0040df0f, 0x0040df4b,
+ 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x0040df8c,
+ 0x005000cb, 0x00000000, 0x00112cc6, 0x00152cc9, 0x00192cd0, 0x00122d00,
+ 0x00122d03, 0x00162e00, 0x00122e07, 0x00112e80, 0x00112f00, 0x00112f02,
+ 0x00122f80, 0x00112f8b, 0x00112f94, 0x00112f9c, 0x004101e4, 0x00200389,
+ 0x00600006, 0x00200044, 0x00103080, 0x001130c6, 0x001530c9, 0x001930d0,
+ 0x00123100, 0x00123103, 0x00163200, 0x00123207, 0x00113280, 0x00113300,
+ 0x00113302, 0x00123380, 0x0011338b, 0x00113394, 0x0011339c, 0x00411ee5,
+ 0x002003f0, 0x00600006, 0x00200044, 0x00103480, 0x00410f0f, 0x00410f4b,
+ 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x00410f8c,
+ 0x005000cb, 0x00000000, 0x001134c6, 0x001534c9, 0x001934d0, 0x00123500,
+ 0x00123503, 0x00163600, 0x00123607, 0x00113680, 0x00113700, 0x00113702,
+ 0x00123780, 0x0011378b, 0x00113794, 0x0011379c, 0x00000000, 0x0041250f,
+ 0x005000cb, 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x005000cb,
+ 0x00412887, 0x0060000a, 0x00000000, 0x00413700, 0x007000a0, 0x00700080,
+ 0x00200480, 0x00600007, 0x00200004, 0x00c000ff, 0x008000ff, 0x005000cb,
+ 0x00700000, 0x00200000, 0x00600006, 0x00111bfe, 0x0041594d, 0x00700000,
+ 0x00200000, 0x00600006, 0x00111bfe, 0x00700080, 0x0070001d, 0x0040114d,
+ 0x00700081, 0x00600004, 0x0050004a, 0x00414388, 0x0060000b, 0x00200000,
+ 0x00600006, 0x00700000, 0x0041590b, 0x00111bfd, 0x0040424d, 0x00202916,
+ 0x008000fd, 0x005000cb, 0x00c00002, 0x00200480, 0x00600007, 0x00200160,
+ 0x00800002, 0x005000cb, 0x00c01802, 0x002027b6, 0x00800002, 0x005000cb,
+ 0x00404e4d, 0x0060000b, 0x0041574d, 0x00700001, 0x005000cf, 0x00700003,
+ 0x00415e06, 0x00415f05, 0x0060000d, 0x00700005, 0x0070000d, 0x00700006,
+ 0x0070000b, 0x0070000e, 0x0070001c, 0x0060000c, ~0
+};
+
+static uint32_t nv86_ctx_voodoo[] = {
+ 0x0070008e, 0x0070009c, 0x00200020, 0x00600008, 0x0050004c, 0x00400e89,
+ 0x00200000, 0x00600007, 0x00300000, 0x00c000ff, 0x00200000, 0x008000ff,
+ 0x00700009, 0x0040dd4d, 0x00402944, 0x00402905, 0x0040290d, 0x0040b906,
+ 0x00600005, 0x004015c5, 0x00600011, 0x0040270b, 0x004021c5, 0x00700000,
+ 0x00700081, 0x00600004, 0x0050004a, 0x00216d80, 0x00600007, 0x00c02801,
+ 0x0020002e, 0x00800001, 0x005000cb, 0x0090ffff, 0x0091ffff, 0x00200020,
+ 0x00600008, 0x0050004c, 0x00600009, 0x0040b945, 0x0040d44d, 0x0070009d,
+ 0x00402dcf, 0x0070009f, 0x0050009f, 0x00402ac0, 0x00200200, 0x00600008,
+ 0x00402a4f, 0x00402ac0, 0x004030cc, 0x00700081, 0x00200000, 0x00600006,
+ 0x00700000, 0x00111bfc, 0x00700083, 0x00300000, 0x00216d80, 0x00600007,
+ 0x00c00b01, 0x0020001e, 0x00800001, 0x005000cb, 0x00c000ff, 0x00700080,
+ 0x00700083, 0x00200047, 0x00600006, 0x0011020a, 0x00200280, 0x00600007,
+ 0x00300000, 0x00c000ff, 0x00c800ff, 0x0040c407, 0x00202916, 0x008000ff,
+ 0x0040508c, 0x005000cb, 0x00a0023f, 0x00200040, 0x00600006, 0x0070000f,
+ 0x00170202, 0x0011020a, 0x00200032, 0x0010020d, 0x001c0242, 0x00120302,
+ 0x00140402, 0x00180500, 0x00130509, 0x00150550, 0x00110605, 0x0020000f,
+ 0x00100607, 0x00110700, 0x00110900, 0x00120902, 0x00110a00, 0x00160b02,
+ 0x00120b28, 0x00140b2b, 0x00110c01, 0x00111400, 0x00111405, 0x00111407,
+ 0x00111409, 0x0011140b, 0x002000cb, 0x00101500, 0x0040790f, 0x0040794b,
+ 0x00214b40, 0x00600007, 0x00200442, 0x008800ff, 0x0070008f, 0x0040798c,
+ 0x005000cb, 0x00000000, 0x0020002b, 0x00101a05, 0x00131c00, 0x00121c04,
+ 0x00141c20, 0x00111c25, 0x00131c40, 0x00121c44, 0x00141c60, 0x00111c65,
+ 0x00131f00, 0x00191f40, 0x004099e0, 0x002001d9, 0x00600006, 0x00200044,
+ 0x00102080, 0x001120c6, 0x001520c9, 0x001920d0, 0x00122100, 0x00122103,
+ 0x00162200, 0x00122207, 0x00112280, 0x00112300, 0x00112302, 0x00122380,
+ 0x0011238b, 0x00112394, 0x0011239c, 0x00000000, 0x0040a00f, 0x005000cb,
+ 0x00214b40, 0x00600007, 0x00200442, 0x008800ff, 0x005000cb, 0x0040a387,
+ 0x0060000a, 0x00000000, 0x0040b200, 0x007000a0, 0x00700080, 0x00200280,
+ 0x00600007, 0x00200004, 0x00c000ff, 0x008000ff, 0x005000cb, 0x00700000,
+ 0x00200000, 0x00600006, 0x00111bfe, 0x0040d44d, 0x00700000, 0x00200000,
+ 0x00600006, 0x00111bfe, 0x00700080, 0x0070001d, 0x0040114d, 0x00700081,
+ 0x00600004, 0x0050004a, 0x0040be88, 0x0060000b, 0x00200000, 0x00600006,
+ 0x00700000, 0x0040d40b, 0x00111bfd, 0x0040424d, 0x00202916, 0x008000fd,
+ 0x005000cb, 0x00c00002, 0x00200280, 0x00600007, 0x00200160, 0x00800002,
+ 0x005000cb, 0x00c01802, 0x002027b6, 0x00800002, 0x005000cb, 0x00404e4d,
+ 0x0060000b, 0x0040d24d, 0x00700001, 0x00700003, 0x0040d806, 0x0040d905,
+ 0x0060000d, 0x00700005, 0x0070000d, 0x00700006, 0x0070000b, 0x0070000e,
+ 0x0060000c, ~0
+};
+
+static int
+nv50_graph_init_ctxctl(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t *voodoo = NULL;
+
+ DRM_DEBUG("\n");
+
+ switch (dev_priv->chipset) {
+ case 0x84:
+ voodoo = nv84_ctx_voodoo;
+ break;
+ case 0x86:
+ voodoo = nv86_ctx_voodoo;
+ break;
+ default:
+ DRM_ERROR("no voodoo for chipset NV%02x\n", dev_priv->chipset);
+ return -EINVAL;
+ }
+
+ NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
+ while (*voodoo != ~0) {
+ NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_DATA, *voodoo);
+ voodoo++;
+ }
+
+ NV_WRITE(0x400320, 4);
+ NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, 0);
+ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, 0);
+
+ return 0;
+}
+
+int
+nv50_graph_init(struct drm_device *dev)
+{
+ int ret;
+
+ DRM_DEBUG("\n");
+
+ nv50_graph_init_reset(dev);
+ nv50_graph_init_intr(dev);
+ nv50_graph_init_regs__nv(dev);
+ nv50_graph_init_regs(dev);
+
+ ret = nv50_graph_init_ctxctl(dev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void
+nv50_graph_takedown(struct drm_device *dev)
+{
+ DRM_DEBUG("\n");
+}
+
+static void
+nv86_graph_init_ctxvals(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *ctx = ref->gpuobj;
+
+ INSTANCE_WR(ctx, 0x10C/4, 0x30);
+ INSTANCE_WR(ctx, 0x1D4/4, 0x3);
+ INSTANCE_WR(ctx, 0x1D8/4, 0x1000);
+ INSTANCE_WR(ctx, 0x218/4, 0xFE0C);
+ INSTANCE_WR(ctx, 0x22C/4, 0x1000);
+ INSTANCE_WR(ctx, 0x258/4, 0x187);
+ INSTANCE_WR(ctx, 0x26C/4, 0x1018);
+ INSTANCE_WR(ctx, 0x270/4, 0xFF);
+ INSTANCE_WR(ctx, 0x2AC/4, 0x4);
+ INSTANCE_WR(ctx, 0x2B0/4, 0x44D00DF);
+ INSTANCE_WR(ctx, 0x2B8/4, 0x600);
+ INSTANCE_WR(ctx, 0x2D0/4, 0x1000000);
+ INSTANCE_WR(ctx, 0x2D4/4, 0xFF);
+ INSTANCE_WR(ctx, 0x2DC/4, 0x400);
+ INSTANCE_WR(ctx, 0x2F4/4, 0x1);
+ INSTANCE_WR(ctx, 0x2F8/4, 0x80);
+ INSTANCE_WR(ctx, 0x2FC/4, 0x4);
+ INSTANCE_WR(ctx, 0x318/4, 0x2);
+ INSTANCE_WR(ctx, 0x31C/4, 0x1);
+ INSTANCE_WR(ctx, 0x328/4, 0x1);
+ INSTANCE_WR(ctx, 0x32C/4, 0x100);
+ INSTANCE_WR(ctx, 0x344/4, 0x2);
+ INSTANCE_WR(ctx, 0x348/4, 0x1);
+ INSTANCE_WR(ctx, 0x34C/4, 0x1);
+ INSTANCE_WR(ctx, 0x35C/4, 0x1);
+ INSTANCE_WR(ctx, 0x360/4, 0x3FFFFF);
+ INSTANCE_WR(ctx, 0x364/4, 0x1FFF);
+ INSTANCE_WR(ctx, 0x36C/4, 0x1);
+ INSTANCE_WR(ctx, 0x370/4, 0x1);
+ INSTANCE_WR(ctx, 0x378/4, 0x1);
+ INSTANCE_WR(ctx, 0x37C/4, 0x1);
+ INSTANCE_WR(ctx, 0x380/4, 0x1);
+ INSTANCE_WR(ctx, 0x384/4, 0x4);
+ INSTANCE_WR(ctx, 0x388/4, 0x1);
+ INSTANCE_WR(ctx, 0x38C/4, 0x1);
+ INSTANCE_WR(ctx, 0x390/4, 0x1);
+ INSTANCE_WR(ctx, 0x394/4, 0x7);
+ INSTANCE_WR(ctx, 0x398/4, 0x1);
+ INSTANCE_WR(ctx, 0x39C/4, 0x7);
+ INSTANCE_WR(ctx, 0x3A0/4, 0x1);
+ INSTANCE_WR(ctx, 0x3A4/4, 0x1);
+ INSTANCE_WR(ctx, 0x3A8/4, 0x1);
+ INSTANCE_WR(ctx, 0x3BC/4, 0x1);
+ INSTANCE_WR(ctx, 0x3C0/4, 0x100);
+ INSTANCE_WR(ctx, 0x3C8/4, 0x1);
+ INSTANCE_WR(ctx, 0x3D4/4, 0x100);
+ INSTANCE_WR(ctx, 0x3D8/4, 0x1);
+ INSTANCE_WR(ctx, 0x3DC/4, 0x100);
+ INSTANCE_WR(ctx, 0x3E4/4, 0x1);
+ INSTANCE_WR(ctx, 0x3F0/4, 0x100);
+ INSTANCE_WR(ctx, 0x404/4, 0x4);
+ INSTANCE_WR(ctx, 0x408/4, 0x70);
+ INSTANCE_WR(ctx, 0x40C/4, 0x80);
+ INSTANCE_WR(ctx, 0x420/4, 0xC);
+ INSTANCE_WR(ctx, 0x428/4, 0x8);
+ INSTANCE_WR(ctx, 0x42C/4, 0x14);
+ INSTANCE_WR(ctx, 0x434/4, 0x29);
+ INSTANCE_WR(ctx, 0x438/4, 0x27);
+ INSTANCE_WR(ctx, 0x43C/4, 0x26);
+ INSTANCE_WR(ctx, 0x440/4, 0x8);
+ INSTANCE_WR(ctx, 0x444/4, 0x4);
+ INSTANCE_WR(ctx, 0x448/4, 0x27);
+ INSTANCE_WR(ctx, 0x454/4, 0x1);
+ INSTANCE_WR(ctx, 0x458/4, 0x2);
+ INSTANCE_WR(ctx, 0x45C/4, 0x3);
+ INSTANCE_WR(ctx, 0x460/4, 0x4);
+ INSTANCE_WR(ctx, 0x464/4, 0x5);
+ INSTANCE_WR(ctx, 0x468/4, 0x6);
+ INSTANCE_WR(ctx, 0x46C/4, 0x7);
+ INSTANCE_WR(ctx, 0x470/4, 0x1);
+ INSTANCE_WR(ctx, 0x4B4/4, 0xCF);
+ INSTANCE_WR(ctx, 0x4E4/4, 0x80);
+ INSTANCE_WR(ctx, 0x4E8/4, 0x4);
+ INSTANCE_WR(ctx, 0x4EC/4, 0x4);
+ INSTANCE_WR(ctx, 0x4F0/4, 0x3);
+ INSTANCE_WR(ctx, 0x4F4/4, 0x1);
+ INSTANCE_WR(ctx, 0x500/4, 0x12);
+ INSTANCE_WR(ctx, 0x504/4, 0x10);
+ INSTANCE_WR(ctx, 0x508/4, 0xC);
+ INSTANCE_WR(ctx, 0x50C/4, 0x1);
+ INSTANCE_WR(ctx, 0x51C/4, 0x4);
+ INSTANCE_WR(ctx, 0x520/4, 0x2);
+ INSTANCE_WR(ctx, 0x524/4, 0x4);
+ INSTANCE_WR(ctx, 0x530/4, 0x3FFFFF);
+ INSTANCE_WR(ctx, 0x534/4, 0x1FFF);
+ INSTANCE_WR(ctx, 0x55C/4, 0x4);
+ INSTANCE_WR(ctx, 0x560/4, 0x14);
+ INSTANCE_WR(ctx, 0x564/4, 0x1);
+ INSTANCE_WR(ctx, 0x570/4, 0x2);
+ INSTANCE_WR(ctx, 0x57C/4, 0x1);
+ INSTANCE_WR(ctx, 0x584/4, 0x2);
+ INSTANCE_WR(ctx, 0x588/4, 0x1000);
+ INSTANCE_WR(ctx, 0x58C/4, 0xE00);
+ INSTANCE_WR(ctx, 0x590/4, 0x1000);
+ INSTANCE_WR(ctx, 0x594/4, 0x1E00);
+ INSTANCE_WR(ctx, 0x59C/4, 0x1);
+ INSTANCE_WR(ctx, 0x5A0/4, 0x1);
+ INSTANCE_WR(ctx, 0x5A4/4, 0x1);
+ INSTANCE_WR(ctx, 0x5A8/4, 0x1);
+ INSTANCE_WR(ctx, 0x5AC/4, 0x1);
+ INSTANCE_WR(ctx, 0x5BC/4, 0x200);
+ INSTANCE_WR(ctx, 0x5C4/4, 0x1);
+ INSTANCE_WR(ctx, 0x5C8/4, 0x70);
+ INSTANCE_WR(ctx, 0x5CC/4, 0x80);
+ INSTANCE_WR(ctx, 0x5D8/4, 0x1);
+ INSTANCE_WR(ctx, 0x5DC/4, 0x70);
+ INSTANCE_WR(ctx, 0x5E0/4, 0x80);
+ INSTANCE_WR(ctx, 0x5F0/4, 0x1);
+ INSTANCE_WR(ctx, 0x5F4/4, 0xCF);
+ INSTANCE_WR(ctx, 0x5FC/4, 0x1);
+ INSTANCE_WR(ctx, 0x60C/4, 0xCF);
+ INSTANCE_WR(ctx, 0x614/4, 0x2);
+ INSTANCE_WR(ctx, 0x61C/4, 0x1);
+ INSTANCE_WR(ctx, 0x624/4, 0x1);
+ INSTANCE_WR(ctx, 0x62C/4, 0xCF);
+ INSTANCE_WR(ctx, 0x630/4, 0xCF);
+ INSTANCE_WR(ctx, 0x634/4, 0x1);
+ INSTANCE_WR(ctx, 0x63C/4, 0xF80);
+ INSTANCE_WR(ctx, 0x684/4, 0x7F0080);
+ INSTANCE_WR(ctx, 0x6C0/4, 0x7F0080);
+ INSTANCE_WR(ctx, 0x6E4/4, 0x3B74F821);
+ INSTANCE_WR(ctx, 0x6E8/4, 0x89058001);
+ INSTANCE_WR(ctx, 0x6F0/4, 0x1000);
+ INSTANCE_WR(ctx, 0x6F4/4, 0x1F);
+ INSTANCE_WR(ctx, 0x6F8/4, 0x27C10FA);
+ INSTANCE_WR(ctx, 0x6FC/4, 0x400000C0);
+ INSTANCE_WR(ctx, 0x700/4, 0xB7892080);
+ INSTANCE_WR(ctx, 0x70C/4, 0x3B74F821);
+ INSTANCE_WR(ctx, 0x710/4, 0x89058001);
+ INSTANCE_WR(ctx, 0x718/4, 0x1000);
+ INSTANCE_WR(ctx, 0x71C/4, 0x1F);
+ INSTANCE_WR(ctx, 0x720/4, 0x27C10FA);
+ INSTANCE_WR(ctx, 0x724/4, 0x400000C0);
+ INSTANCE_WR(ctx, 0x728/4, 0xB7892080);
+ INSTANCE_WR(ctx, 0x734/4, 0x10040);
+ INSTANCE_WR(ctx, 0x73C/4, 0x22);
+ INSTANCE_WR(ctx, 0x748/4, 0x10040);
+ INSTANCE_WR(ctx, 0x74C/4, 0x22);
+ INSTANCE_WR(ctx, 0x764/4, 0x1800000);
+ INSTANCE_WR(ctx, 0x768/4, 0x160000);
+ INSTANCE_WR(ctx, 0x76C/4, 0x1800000);
+ INSTANCE_WR(ctx, 0x77C/4, 0x3FFFF);
+ INSTANCE_WR(ctx, 0x780/4, 0x8C0000);
+ INSTANCE_WR(ctx, 0x7A4/4, 0x10401);
+ INSTANCE_WR(ctx, 0x7AC/4, 0x78);
+ INSTANCE_WR(ctx, 0x7B4/4, 0xBF);
+ INSTANCE_WR(ctx, 0x7BC/4, 0x1210);
+ INSTANCE_WR(ctx, 0x7C0/4, 0x8000080);
+ INSTANCE_WR(ctx, 0x7E4/4, 0x1800000);
+ INSTANCE_WR(ctx, 0x7E8/4, 0x160000);
+ INSTANCE_WR(ctx, 0x7EC/4, 0x1800000);
+ INSTANCE_WR(ctx, 0x7FC/4, 0x3FFFF);
+ INSTANCE_WR(ctx, 0x800/4, 0x8C0000);
+ INSTANCE_WR(ctx, 0x824/4, 0x10401);
+ INSTANCE_WR(ctx, 0x82C/4, 0x78);
+ INSTANCE_WR(ctx, 0x834/4, 0xBF);
+ INSTANCE_WR(ctx, 0x83C/4, 0x1210);
+ INSTANCE_WR(ctx, 0x840/4, 0x8000080);
+ INSTANCE_WR(ctx, 0x868/4, 0x27070);
+ INSTANCE_WR(ctx, 0x874/4, 0x3FFFFFF);
+ INSTANCE_WR(ctx, 0x88C/4, 0x120407);
+ INSTANCE_WR(ctx, 0x890/4, 0x5091507);
+ INSTANCE_WR(ctx, 0x894/4, 0x5010202);
+ INSTANCE_WR(ctx, 0x898/4, 0x30201);
+ INSTANCE_WR(ctx, 0x8B4/4, 0x40);
+ INSTANCE_WR(ctx, 0x8B8/4, 0xD0C0B0A);
+ INSTANCE_WR(ctx, 0x8BC/4, 0x141210);
+ INSTANCE_WR(ctx, 0x8C0/4, 0x1F0);
+ INSTANCE_WR(ctx, 0x8C4/4, 0x1);
+ INSTANCE_WR(ctx, 0x8C8/4, 0x3);
+ INSTANCE_WR(ctx, 0x8D4/4, 0x39E00);
+ INSTANCE_WR(ctx, 0x8D8/4, 0x100);
+ INSTANCE_WR(ctx, 0x8DC/4, 0x3800);
+ INSTANCE_WR(ctx, 0x8E0/4, 0x404040);
+ INSTANCE_WR(ctx, 0x8E4/4, 0xFF0A);
+ INSTANCE_WR(ctx, 0x8EC/4, 0x77F005);
+ INSTANCE_WR(ctx, 0x8F0/4, 0x3F7FFF);
+ INSTANCE_WR(ctx, 0x7BA0/4, 0x21);
+ INSTANCE_WR(ctx, 0x7BC0/4, 0x1);
+ INSTANCE_WR(ctx, 0x7BE0/4, 0x2);
+ INSTANCE_WR(ctx, 0x7C00/4, 0x100);
+ INSTANCE_WR(ctx, 0x7C20/4, 0x100);
+ INSTANCE_WR(ctx, 0x7C40/4, 0x1);
+ INSTANCE_WR(ctx, 0x7CA0/4, 0x1);
+ INSTANCE_WR(ctx, 0x7CC0/4, 0x2);
+ INSTANCE_WR(ctx, 0x7CE0/4, 0x100);
+ INSTANCE_WR(ctx, 0x7D00/4, 0x100);
+ INSTANCE_WR(ctx, 0x7D20/4, 0x1);
+ INSTANCE_WR(ctx, 0x11640/4, 0x4);
+ INSTANCE_WR(ctx, 0x11660/4, 0x4);
+ INSTANCE_WR(ctx, 0x49FE0/4, 0x4);
+ INSTANCE_WR(ctx, 0x4A000/4, 0x4);
+ INSTANCE_WR(ctx, 0x4A020/4, 0x8100C12);
+ INSTANCE_WR(ctx, 0x4A040/4, 0x3);
+ INSTANCE_WR(ctx, 0x4A080/4, 0x8100C12);
+ INSTANCE_WR(ctx, 0x4A0C0/4, 0x80C14);
+ INSTANCE_WR(ctx, 0x4A0E0/4, 0x1);
+ INSTANCE_WR(ctx, 0x4A100/4, 0x80C14);
+ INSTANCE_WR(ctx, 0x4A160/4, 0x8100C12);
+ INSTANCE_WR(ctx, 0x4A180/4, 0x27);
+ INSTANCE_WR(ctx, 0x4A1E0/4, 0x1);
+ INSTANCE_WR(ctx, 0x51A20/4, 0x1);
+ INSTANCE_WR(ctx, 0x51D00/4, 0x8100C12);
+ INSTANCE_WR(ctx, 0x51EA0/4, 0x4000000);
+ INSTANCE_WR(ctx, 0x51EC0/4, 0x4000000);
+ INSTANCE_WR(ctx, 0x51F00/4, 0x80);
+ INSTANCE_WR(ctx, 0x51F80/4, 0x80);
+ INSTANCE_WR(ctx, 0x51FC0/4, 0x3F);
+ INSTANCE_WR(ctx, 0x52120/4, 0x2);
+ INSTANCE_WR(ctx, 0x52140/4, 0x4000000);
+ INSTANCE_WR(ctx, 0x52160/4, 0x4000000);
+ INSTANCE_WR(ctx, 0x52280/4, 0x4);
+ INSTANCE_WR(ctx, 0x52300/4, 0x4);
+ INSTANCE_WR(ctx, 0x52540/4, 0x1);
+ INSTANCE_WR(ctx, 0x52560/4, 0x1001);
+ INSTANCE_WR(ctx, 0x52580/4, 0xFFFF);
+ INSTANCE_WR(ctx, 0x525A0/4, 0xFFFF);
+ INSTANCE_WR(ctx, 0x525C0/4, 0xFFFF);
+ INSTANCE_WR(ctx, 0x525E0/4, 0xFFFF);
+ INSTANCE_WR(ctx, 0x52A00/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x52A20/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x52A40/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x52A60/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x52A80/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x52AA0/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x52AC0/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x52AE0/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x52B00/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x52B20/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x52B40/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x52B60/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x52B80/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x52BA0/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x52BC0/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x52BE0/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x52C00/4, 0x10);
+ INSTANCE_WR(ctx, 0x52C60/4, 0x3);
+ INSTANCE_WR(ctx, 0xA84/4, 0xF);
+ INSTANCE_WR(ctx, 0xB24/4, 0x20);
+ INSTANCE_WR(ctx, 0xD04/4, 0x1A);
+ INSTANCE_WR(ctx, 0xEC4/4, 0x4);
+ INSTANCE_WR(ctx, 0xEE4/4, 0x4);
+ INSTANCE_WR(ctx, 0xF24/4, 0x4);
+ INSTANCE_WR(ctx, 0xF44/4, 0x8);
+ INSTANCE_WR(ctx, 0xF84/4, 0x7FF);
+ INSTANCE_WR(ctx, 0x1124/4, 0xF);
+ INSTANCE_WR(ctx, 0x3604/4, 0xF);
+ INSTANCE_WR(ctx, 0x3644/4, 0x1);
+ INSTANCE_WR(ctx, 0x41A4/4, 0xF);
+ INSTANCE_WR(ctx, 0x14844/4, 0xF);
+ INSTANCE_WR(ctx, 0x14AE4/4, 0x1);
+ INSTANCE_WR(ctx, 0x14B04/4, 0x100);
+ INSTANCE_WR(ctx, 0x14B24/4, 0x100);
+ INSTANCE_WR(ctx, 0x14B44/4, 0x11);
+ INSTANCE_WR(ctx, 0x14B84/4, 0x8);
+ INSTANCE_WR(ctx, 0x14C44/4, 0x1);
+ INSTANCE_WR(ctx, 0x14C84/4, 0x1);
+ INSTANCE_WR(ctx, 0x14CA4/4, 0x1);
+ INSTANCE_WR(ctx, 0x14CC4/4, 0x1);
+ INSTANCE_WR(ctx, 0x14CE4/4, 0xCF);
+ INSTANCE_WR(ctx, 0x14D04/4, 0x2);
+ INSTANCE_WR(ctx, 0x14DE4/4, 0x1);
+ INSTANCE_WR(ctx, 0x14E24/4, 0x1);
+ INSTANCE_WR(ctx, 0x14E44/4, 0x1);
+ INSTANCE_WR(ctx, 0x14E64/4, 0x1);
+ INSTANCE_WR(ctx, 0x14F04/4, 0x4);
+ INSTANCE_WR(ctx, 0x14F44/4, 0x1);
+ INSTANCE_WR(ctx, 0x14F64/4, 0x15);
+ INSTANCE_WR(ctx, 0x14FE4/4, 0x4444480);
+ INSTANCE_WR(ctx, 0x15764/4, 0x8100C12);
+ INSTANCE_WR(ctx, 0x15804/4, 0x100);
+ INSTANCE_WR(ctx, 0x15864/4, 0x10001);
+ INSTANCE_WR(ctx, 0x158A4/4, 0x10001);
+ INSTANCE_WR(ctx, 0x158C4/4, 0x1);
+ INSTANCE_WR(ctx, 0x158E4/4, 0x10001);
+ INSTANCE_WR(ctx, 0x15904/4, 0x1);
+ INSTANCE_WR(ctx, 0x15924/4, 0x4);
+ INSTANCE_WR(ctx, 0x15944/4, 0x2);
+ INSTANCE_WR(ctx, 0x166C4/4, 0x4E3BFDF);
+ INSTANCE_WR(ctx, 0x166E4/4, 0x4E3BFDF);
+ INSTANCE_WR(ctx, 0x16784/4, 0xFAC6881);
+ INSTANCE_WR(ctx, 0x16904/4, 0x4E3BFDF);
+ INSTANCE_WR(ctx, 0x16924/4, 0x4E3BFDF);
+ INSTANCE_WR(ctx, 0x15948/4, 0x3FFFFF);
+ INSTANCE_WR(ctx, 0x159A8/4, 0x1FFF);
+ INSTANCE_WR(ctx, 0x15B88/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x15C68/4, 0x4);
+ INSTANCE_WR(ctx, 0x15C88/4, 0x1A);
+ INSTANCE_WR(ctx, 0x15CE8/4, 0x1);
+ INSTANCE_WR(ctx, 0x15F48/4, 0xFFFF00);
+ INSTANCE_WR(ctx, 0x16028/4, 0xF);
+ INSTANCE_WR(ctx, 0x16128/4, 0xFAC6881);
+ INSTANCE_WR(ctx, 0x16148/4, 0x11);
+ INSTANCE_WR(ctx, 0x16348/4, 0x4);
+ INSTANCE_WR(ctx, 0x163E8/4, 0x2);
+ INSTANCE_WR(ctx, 0x16408/4, 0x4000000);
+ INSTANCE_WR(ctx, 0x16428/4, 0x4000000);
+ INSTANCE_WR(ctx, 0x164A8/4, 0x5);
+ INSTANCE_WR(ctx, 0x164C8/4, 0x52);
+ INSTANCE_WR(ctx, 0x16568/4, 0x1);
+ INSTANCE_WR(ctx, 0x16788/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x167A8/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x167C8/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x167E8/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x16808/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x16828/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x16848/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x16868/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x16888/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x168A8/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x168C8/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x168E8/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x16908/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x16928/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x16948/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x16968/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x16988/4, 0x10);
+ INSTANCE_WR(ctx, 0x16E68/4, 0x8100C12);
+ INSTANCE_WR(ctx, 0x16E88/4, 0x5);
+ INSTANCE_WR(ctx, 0x16EE8/4, 0x1);
+ INSTANCE_WR(ctx, 0x16F28/4, 0xFFFF);
+ INSTANCE_WR(ctx, 0x16F48/4, 0xFFFF);
+ INSTANCE_WR(ctx, 0x16F68/4, 0xFFFF);
+ INSTANCE_WR(ctx, 0x16F88/4, 0xFFFF);
+ INSTANCE_WR(ctx, 0x16FA8/4, 0x3);
+ INSTANCE_WR(ctx, 0x173A8/4, 0xFFFF00);
+ INSTANCE_WR(ctx, 0x173C8/4, 0x1A);
+ INSTANCE_WR(ctx, 0x17408/4, 0x3);
+ INSTANCE_WR(ctx, 0x178E8/4, 0x102);
+ INSTANCE_WR(ctx, 0x17928/4, 0x4);
+ INSTANCE_WR(ctx, 0x17948/4, 0x4);
+ INSTANCE_WR(ctx, 0x17968/4, 0x4);
+ INSTANCE_WR(ctx, 0x17988/4, 0x4);
+ INSTANCE_WR(ctx, 0x179A8/4, 0x4);
+ INSTANCE_WR(ctx, 0x179C8/4, 0x4);
+ INSTANCE_WR(ctx, 0x17A08/4, 0x7FF);
+ INSTANCE_WR(ctx, 0x17A48/4, 0x102);
+ INSTANCE_WR(ctx, 0x17B88/4, 0x4);
+ INSTANCE_WR(ctx, 0x17BA8/4, 0x4);
+ INSTANCE_WR(ctx, 0x17BC8/4, 0x4);
+ INSTANCE_WR(ctx, 0x17BE8/4, 0x4);
+ INSTANCE_WR(ctx, 0x18228/4, 0x80C14);
+ INSTANCE_WR(ctx, 0x18288/4, 0x804);
+ INSTANCE_WR(ctx, 0x182C8/4, 0x4);
+ INSTANCE_WR(ctx, 0x182E8/4, 0x4);
+ INSTANCE_WR(ctx, 0x18308/4, 0x8100C12);
+ INSTANCE_WR(ctx, 0x18348/4, 0x4);
+ INSTANCE_WR(ctx, 0x18368/4, 0x4);
+ INSTANCE_WR(ctx, 0x183A8/4, 0x10);
+ INSTANCE_WR(ctx, 0x18448/4, 0x804);
+ INSTANCE_WR(ctx, 0x18468/4, 0x1);
+ INSTANCE_WR(ctx, 0x18488/4, 0x1A);
+ INSTANCE_WR(ctx, 0x184A8/4, 0x7F);
+ INSTANCE_WR(ctx, 0x184E8/4, 0x1);
+ INSTANCE_WR(ctx, 0x18508/4, 0x80C14);
+ INSTANCE_WR(ctx, 0x18548/4, 0x8100C12);
+ INSTANCE_WR(ctx, 0x18568/4, 0x4);
+ INSTANCE_WR(ctx, 0x18588/4, 0x4);
+ INSTANCE_WR(ctx, 0x185C8/4, 0x10);
+ INSTANCE_WR(ctx, 0x18648/4, 0x1);
+ INSTANCE_WR(ctx, 0x18668/4, 0x8100C12);
+ INSTANCE_WR(ctx, 0x18748/4, 0x7FF);
+ INSTANCE_WR(ctx, 0x18768/4, 0x80C14);
+ INSTANCE_WR(ctx, 0x18E88/4, 0x1);
+ INSTANCE_WR(ctx, 0x18EE8/4, 0x10);
+ INSTANCE_WR(ctx, 0x19608/4, 0x88);
+ INSTANCE_WR(ctx, 0x19628/4, 0x88);
+ INSTANCE_WR(ctx, 0x19688/4, 0x4);
+ INSTANCE_WR(ctx, 0x19968/4, 0x26);
+ INSTANCE_WR(ctx, 0x199C8/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x19A48/4, 0x1A);
+ INSTANCE_WR(ctx, 0x19A68/4, 0x10);
+ INSTANCE_WR(ctx, 0x19F88/4, 0x52);
+ INSTANCE_WR(ctx, 0x19FC8/4, 0x26);
+ INSTANCE_WR(ctx, 0x1A008/4, 0x4);
+ INSTANCE_WR(ctx, 0x1A028/4, 0x4);
+ INSTANCE_WR(ctx, 0x1A068/4, 0x1A);
+ INSTANCE_WR(ctx, 0x1A0C8/4, 0xFFFF00);
+ INSTANCE_WR(ctx, 0x1A108/4, 0x4);
+ INSTANCE_WR(ctx, 0x1A128/4, 0x4);
+ INSTANCE_WR(ctx, 0x1A168/4, 0x80);
+ INSTANCE_WR(ctx, 0x1A188/4, 0x4);
+ INSTANCE_WR(ctx, 0x1A1A8/4, 0x80C14);
+ INSTANCE_WR(ctx, 0x1A1E8/4, 0x7FF);
+ INSTANCE_WR(ctx, 0x24A48/4, 0x4);
+ INSTANCE_WR(ctx, 0x24A68/4, 0x4);
+ INSTANCE_WR(ctx, 0x24AA8/4, 0x80);
+ INSTANCE_WR(ctx, 0x24AC8/4, 0x4);
+ INSTANCE_WR(ctx, 0x24AE8/4, 0x1);
+ INSTANCE_WR(ctx, 0x24B28/4, 0x27);
+ INSTANCE_WR(ctx, 0x24B68/4, 0x26);
+ INSTANCE_WR(ctx, 0x24BE8/4, 0x4000000);
+ INSTANCE_WR(ctx, 0x24C08/4, 0x4000000);
+ INSTANCE_WR(ctx, 0x24C28/4, 0x4000000);
+ INSTANCE_WR(ctx, 0x24C48/4, 0x4000000);
+ INSTANCE_WR(ctx, 0x24C68/4, 0x4000000);
+ INSTANCE_WR(ctx, 0x24C88/4, 0x4000000);
+ INSTANCE_WR(ctx, 0x24CA8/4, 0x4000000);
+ INSTANCE_WR(ctx, 0x24CC8/4, 0x4000000);
+ INSTANCE_WR(ctx, 0x24CE8/4, 0x4000000);
+ INSTANCE_WR(ctx, 0x24D08/4, 0x4000000);
+ INSTANCE_WR(ctx, 0x24D28/4, 0x4000000);
+ INSTANCE_WR(ctx, 0x24D48/4, 0x4000000);
+ INSTANCE_WR(ctx, 0x24D68/4, 0x4000000);
+ INSTANCE_WR(ctx, 0x24D88/4, 0x4000000);
+ INSTANCE_WR(ctx, 0x24DA8/4, 0x4000000);
+ INSTANCE_WR(ctx, 0x24DC8/4, 0x4000000);
+ INSTANCE_WR(ctx, 0x25268/4, 0x4E3BFDF);
+ INSTANCE_WR(ctx, 0x25288/4, 0x4E3BFDF);
+ INSTANCE_WR(ctx, 0x252E8/4, 0x1FE21);
+ INSTANCE_WR(ctx, 0xB0C/4, 0x2);
+ INSTANCE_WR(ctx, 0xB4C/4, 0x1FFE67);
+ INSTANCE_WR(ctx, 0xCEC/4, 0x1);
+ INSTANCE_WR(ctx, 0xD0C/4, 0x10);
+ INSTANCE_WR(ctx, 0xD6C/4, 0x1);
+ INSTANCE_WR(ctx, 0xE0C/4, 0x4);
+ INSTANCE_WR(ctx, 0xE2C/4, 0x400);
+ INSTANCE_WR(ctx, 0xE4C/4, 0x300);
+ INSTANCE_WR(ctx, 0xE6C/4, 0x1001);
+ INSTANCE_WR(ctx, 0xE8C/4, 0x15);
+ INSTANCE_WR(ctx, 0xF4C/4, 0x2);
+ INSTANCE_WR(ctx, 0x106C/4, 0x1);
+ INSTANCE_WR(ctx, 0x108C/4, 0x10);
+ INSTANCE_WR(ctx, 0x10CC/4, 0x1);
+ INSTANCE_WR(ctx, 0x134C/4, 0x10);
+ INSTANCE_WR(ctx, 0x156C/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x158C/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x15AC/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x15CC/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x15EC/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x160C/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x162C/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x164C/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x166C/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x168C/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x16AC/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x16CC/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x16EC/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x170C/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x172C/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x174C/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x1A8C/4, 0x10);
+ INSTANCE_WR(ctx, 0x1ACC/4, 0x3F);
+ INSTANCE_WR(ctx, 0x1BAC/4, 0x1);
+ INSTANCE_WR(ctx, 0x1BEC/4, 0x1);
+ INSTANCE_WR(ctx, 0x1C2C/4, 0x1);
+ INSTANCE_WR(ctx, 0x1DCC/4, 0x11);
+ INSTANCE_WR(ctx, 0x1ECC/4, 0xF);
+ INSTANCE_WR(ctx, 0x1FCC/4, 0x11);
+ INSTANCE_WR(ctx, 0x20AC/4, 0x1);
+ INSTANCE_WR(ctx, 0x20CC/4, 0x1);
+ INSTANCE_WR(ctx, 0x20EC/4, 0x1);
+ INSTANCE_WR(ctx, 0x210C/4, 0x2);
+ INSTANCE_WR(ctx, 0x212C/4, 0x1);
+ INSTANCE_WR(ctx, 0x214C/4, 0x2);
+ INSTANCE_WR(ctx, 0x216C/4, 0x1);
+ INSTANCE_WR(ctx, 0x21AC/4, 0x1FFE67);
+ INSTANCE_WR(ctx, 0x21EC/4, 0xFAC6881);
+ INSTANCE_WR(ctx, 0x24AC/4, 0x1);
+ INSTANCE_WR(ctx, 0x24CC/4, 0x2);
+ INSTANCE_WR(ctx, 0x24EC/4, 0x1);
+ INSTANCE_WR(ctx, 0x250C/4, 0x1);
+ INSTANCE_WR(ctx, 0x252C/4, 0x2);
+ INSTANCE_WR(ctx, 0x254C/4, 0x1);
+ INSTANCE_WR(ctx, 0x256C/4, 0x1);
+ INSTANCE_WR(ctx, 0x25EC/4, 0x11);
+ INSTANCE_WR(ctx, 0x260C/4, 0x1);
+ INSTANCE_WR(ctx, 0x328C/4, 0x2);
+ INSTANCE_WR(ctx, 0x32CC/4, 0x1FFE67);
+ INSTANCE_WR(ctx, 0x346C/4, 0x1);
+ INSTANCE_WR(ctx, 0x348C/4, 0x10);
+ INSTANCE_WR(ctx, 0x34EC/4, 0x1);
+ INSTANCE_WR(ctx, 0x358C/4, 0x4);
+ INSTANCE_WR(ctx, 0x35AC/4, 0x400);
+ INSTANCE_WR(ctx, 0x35CC/4, 0x300);
+ INSTANCE_WR(ctx, 0x35EC/4, 0x1001);
+ INSTANCE_WR(ctx, 0x360C/4, 0x15);
+ INSTANCE_WR(ctx, 0x36CC/4, 0x2);
+ INSTANCE_WR(ctx, 0x37EC/4, 0x1);
+ INSTANCE_WR(ctx, 0x380C/4, 0x10);
+ INSTANCE_WR(ctx, 0x384C/4, 0x1);
+ INSTANCE_WR(ctx, 0x3ACC/4, 0x10);
+ INSTANCE_WR(ctx, 0x3CEC/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x3D0C/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x3D2C/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x3D4C/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x3D6C/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x3D8C/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x3DAC/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x3DCC/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x3DEC/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x3E0C/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x3E2C/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x3E4C/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x3E6C/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x3E8C/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x3EAC/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x3ECC/4, 0x3F800000);
+ INSTANCE_WR(ctx, 0x420C/4, 0x10);
+ INSTANCE_WR(ctx, 0x424C/4, 0x3F);
+ INSTANCE_WR(ctx, 0x432C/4, 0x1);
+ INSTANCE_WR(ctx, 0x436C/4, 0x1);
+ INSTANCE_WR(ctx, 0x43AC/4, 0x1);
+ INSTANCE_WR(ctx, 0x454C/4, 0x11);
+ INSTANCE_WR(ctx, 0x464C/4, 0xF);
+ INSTANCE_WR(ctx, 0x474C/4, 0x11);
+ INSTANCE_WR(ctx, 0x482C/4, 0x1);
+ INSTANCE_WR(ctx, 0x484C/4, 0x1);
+ INSTANCE_WR(ctx, 0x486C/4, 0x1);
+ INSTANCE_WR(ctx, 0x488C/4, 0x2);
+ INSTANCE_WR(ctx, 0x48AC/4, 0x1);
+ INSTANCE_WR(ctx, 0x48CC/4, 0x2);
+ INSTANCE_WR(ctx, 0x48EC/4, 0x1);
+ INSTANCE_WR(ctx, 0x492C/4, 0x1FFE67);
+ INSTANCE_WR(ctx, 0x496C/4, 0xFAC6881);
+ INSTANCE_WR(ctx, 0x4C2C/4, 0x1);
+ INSTANCE_WR(ctx, 0x4C4C/4, 0x2);
+ INSTANCE_WR(ctx, 0x4C6C/4, 0x1);
+ INSTANCE_WR(ctx, 0x4C8C/4, 0x1);
+ INSTANCE_WR(ctx, 0x4CAC/4, 0x2);
+ INSTANCE_WR(ctx, 0x4CCC/4, 0x1);
+ INSTANCE_WR(ctx, 0x4CEC/4, 0x1);
+ INSTANCE_WR(ctx, 0x4D6C/4, 0x11);
+ INSTANCE_WR(ctx, 0x4D8C/4, 0x1);
+ INSTANCE_WR(ctx, 0xA30/4, 0x4);
+ INSTANCE_WR(ctx, 0xCF0/4, 0x4);
+ INSTANCE_WR(ctx, 0xD10/4, 0x4);
+ INSTANCE_WR(ctx, 0xD30/4, 0x608080);
+ INSTANCE_WR(ctx, 0xDD0/4, 0x4);
+ INSTANCE_WR(ctx, 0xE30/4, 0x4);
+ INSTANCE_WR(ctx, 0xE50/4, 0x4);
+ INSTANCE_WR(ctx, 0xE70/4, 0x80);
+ INSTANCE_WR(ctx, 0xE90/4, 0x1E00);
+ INSTANCE_WR(ctx, 0xEB0/4, 0x4);
+ INSTANCE_WR(ctx, 0x1350/4, 0x4);
+ INSTANCE_WR(ctx, 0x1370/4, 0x80);
+ INSTANCE_WR(ctx, 0x1390/4, 0x4);
+ INSTANCE_WR(ctx, 0x13B0/4, 0x3020100);
+ INSTANCE_WR(ctx, 0x13D0/4, 0x3);
+ INSTANCE_WR(ctx, 0x13F0/4, 0x1E00);
+ INSTANCE_WR(ctx, 0x1410/4, 0x4);
+ INSTANCE_WR(ctx, 0x14B0/4, 0x4);
+ INSTANCE_WR(ctx, 0x14D0/4, 0x3);
+ INSTANCE_WR(ctx, 0x1550/4, 0x4);
+ INSTANCE_WR(ctx, 0x159F0/4, 0x4);
+ INSTANCE_WR(ctx, 0x15A10/4, 0x3);
+ INSTANCE_WR(ctx, 0x15C50/4, 0xF);
+ INSTANCE_WR(ctx, 0x15DD0/4, 0x4);
+ INSTANCE_WR(ctx, 0x15DF0/4, 0xFFFF);
+ INSTANCE_WR(ctx, 0x15E10/4, 0xFFFF);
+ INSTANCE_WR(ctx, 0x15E30/4, 0xFFFF);
+ INSTANCE_WR(ctx, 0x15E50/4, 0xFFFF);
+ INSTANCE_WR(ctx, 0x15F70/4, 0x1);
+ INSTANCE_WR(ctx, 0x15FF0/4, 0x1);
+ INSTANCE_WR(ctx, 0x160B0/4, 0x1);
+ INSTANCE_WR(ctx, 0x16250/4, 0x1);
+ INSTANCE_WR(ctx, 0x16270/4, 0x1);
+ INSTANCE_WR(ctx, 0x16290/4, 0x2);
+ INSTANCE_WR(ctx, 0x162B0/4, 0x1);
+ INSTANCE_WR(ctx, 0x162D0/4, 0x1);
+ INSTANCE_WR(ctx, 0x162F0/4, 0x2);
+ INSTANCE_WR(ctx, 0x16310/4, 0x1);
+ INSTANCE_WR(ctx, 0x16350/4, 0x11);
+ INSTANCE_WR(ctx, 0x16450/4, 0xFAC6881);
+ INSTANCE_WR(ctx, 0x164B0/4, 0x4);
+ INSTANCE_WR(ctx, 0x16530/4, 0x11);
+ INSTANCE_WR(ctx, 0x16550/4, 0x1);
+ INSTANCE_WR(ctx, 0x16590/4, 0xCF);
+ INSTANCE_WR(ctx, 0x165B0/4, 0xCF);
+ INSTANCE_WR(ctx, 0x165D0/4, 0xCF);
+ INSTANCE_WR(ctx, 0x16730/4, 0x1);
+ INSTANCE_WR(ctx, 0x16750/4, 0x1);
+ INSTANCE_WR(ctx, 0x16770/4, 0x2);
+ INSTANCE_WR(ctx, 0x16790/4, 0x1);
+ INSTANCE_WR(ctx, 0x167B0/4, 0x1);
+ INSTANCE_WR(ctx, 0x167D0/4, 0x2);
+ INSTANCE_WR(ctx, 0x167F0/4, 0x1);
+ INSTANCE_WR(ctx, 0x16830/4, 0x1);
+ INSTANCE_WR(ctx, 0x16850/4, 0x1);
+ INSTANCE_WR(ctx, 0x16870/4, 0x1);
+ INSTANCE_WR(ctx, 0x16890/4, 0x1);
+ INSTANCE_WR(ctx, 0x168B0/4, 0x1);
+ INSTANCE_WR(ctx, 0x168D0/4, 0x1);
+ INSTANCE_WR(ctx, 0x168F0/4, 0x1);
+ INSTANCE_WR(ctx, 0x16910/4, 0x1);
+ INSTANCE_WR(ctx, 0x16930/4, 0x11);
+ INSTANCE_WR(ctx, 0x16A30/4, 0xFAC6881);
+ INSTANCE_WR(ctx, 0x16A50/4, 0xF);
+ INSTANCE_WR(ctx, 0x16B50/4, 0x1FFE67);
+ INSTANCE_WR(ctx, 0x16BB0/4, 0x11);
+ INSTANCE_WR(ctx, 0x16BD0/4, 0x1);
+ INSTANCE_WR(ctx, 0x16C50/4, 0x4);
+ INSTANCE_WR(ctx, 0x16D10/4, 0x1);
+ INSTANCE_WR(ctx, 0x16DB0/4, 0x11);
+ INSTANCE_WR(ctx, 0x16EB0/4, 0xFAC6881);
+ INSTANCE_WR(ctx, 0x16F30/4, 0x11);
+ INSTANCE_WR(ctx, 0x16F50/4, 0x1);
+ INSTANCE_WR(ctx, 0x16F90/4, 0x1);
+ INSTANCE_WR(ctx, 0x16FD0/4, 0x1);
+ INSTANCE_WR(ctx, 0x17010/4, 0x7FF);
+ INSTANCE_WR(ctx, 0x17050/4, 0x1);
+ INSTANCE_WR(ctx, 0x17090/4, 0x1);
+ INSTANCE_WR(ctx, 0x175F0/4, 0x8);
+ INSTANCE_WR(ctx, 0x17610/4, 0x8);
+ INSTANCE_WR(ctx, 0x17630/4, 0x8);
+ INSTANCE_WR(ctx, 0x17650/4, 0x8);
+ INSTANCE_WR(ctx, 0x17670/4, 0x8);
+ INSTANCE_WR(ctx, 0x17690/4, 0x8);
+ INSTANCE_WR(ctx, 0x176B0/4, 0x8);
+ INSTANCE_WR(ctx, 0x176D0/4, 0x8);
+ INSTANCE_WR(ctx, 0x176F0/4, 0x11);
+ INSTANCE_WR(ctx, 0x177F0/4, 0xFAC6881);
+ INSTANCE_WR(ctx, 0x17810/4, 0x400);
+ INSTANCE_WR(ctx, 0x17830/4, 0x400);
+ INSTANCE_WR(ctx, 0x17850/4, 0x400);
+ INSTANCE_WR(ctx, 0x17870/4, 0x400);
+ INSTANCE_WR(ctx, 0x17890/4, 0x400);
+ INSTANCE_WR(ctx, 0x178B0/4, 0x400);
+ INSTANCE_WR(ctx, 0x178D0/4, 0x400);
+ INSTANCE_WR(ctx, 0x178F0/4, 0x400);
+ INSTANCE_WR(ctx, 0x17910/4, 0x300);
+ INSTANCE_WR(ctx, 0x17930/4, 0x300);
+ INSTANCE_WR(ctx, 0x17950/4, 0x300);
+ INSTANCE_WR(ctx, 0x17970/4, 0x300);
+ INSTANCE_WR(ctx, 0x17990/4, 0x300);
+ INSTANCE_WR(ctx, 0x179B0/4, 0x300);
+ INSTANCE_WR(ctx, 0x179D0/4, 0x300);
+ INSTANCE_WR(ctx, 0x179F0/4, 0x300);
+ INSTANCE_WR(ctx, 0x17A10/4, 0x1);
+ INSTANCE_WR(ctx, 0x17A30/4, 0xF);
+ INSTANCE_WR(ctx, 0x17B30/4, 0x20);
+ INSTANCE_WR(ctx, 0x17B50/4, 0x11);
+ INSTANCE_WR(ctx, 0x17B70/4, 0x100);
+ INSTANCE_WR(ctx, 0x17BB0/4, 0x1);
+ INSTANCE_WR(ctx, 0x17C10/4, 0x40);
+ INSTANCE_WR(ctx, 0x17C30/4, 0x100);
+ INSTANCE_WR(ctx, 0x17C70/4, 0x3);
+ INSTANCE_WR(ctx, 0x17D10/4, 0x1FFE67);
+ INSTANCE_WR(ctx, 0x17D90/4, 0x2);
+ INSTANCE_WR(ctx, 0x17DB0/4, 0xFAC6881);
+ INSTANCE_WR(ctx, 0x17EF0/4, 0x1);
+ INSTANCE_WR(ctx, 0x17F90/4, 0x4);
+ INSTANCE_WR(ctx, 0x17FD0/4, 0x1);
+ INSTANCE_WR(ctx, 0x17FF0/4, 0x400);
+ INSTANCE_WR(ctx, 0x18010/4, 0x300);
+ INSTANCE_WR(ctx, 0x18030/4, 0x1001);
+ INSTANCE_WR(ctx, 0x180B0/4, 0x11);
+ INSTANCE_WR(ctx, 0x181B0/4, 0xFAC6881);
+ INSTANCE_WR(ctx, 0x181D0/4, 0xF);
+ INSTANCE_WR(ctx, 0x184D0/4, 0x1FFE67);
+ INSTANCE_WR(ctx, 0x18550/4, 0x11);
+ INSTANCE_WR(ctx, 0x185B0/4, 0x4);
+ INSTANCE_WR(ctx, 0x185F0/4, 0x1);
+ INSTANCE_WR(ctx, 0x18610/4, 0x1);
+ INSTANCE_WR(ctx, 0x18690/4, 0x1);
+ INSTANCE_WR(ctx, 0x18730/4, 0x1);
+ INSTANCE_WR(ctx, 0x18770/4, 0x1);
+ INSTANCE_WR(ctx, 0x187F0/4, 0x2A712488);
+ INSTANCE_WR(ctx, 0x18830/4, 0x4085C000);
+ INSTANCE_WR(ctx, 0x18850/4, 0x40);
+ INSTANCE_WR(ctx, 0x18870/4, 0x100);
+ INSTANCE_WR(ctx, 0x18890/4, 0x10100);
+ INSTANCE_WR(ctx, 0x188B0/4, 0x2800000);
+ INSTANCE_WR(ctx, 0x18B10/4, 0x4E3BFDF);
+ INSTANCE_WR(ctx, 0x18B30/4, 0x4E3BFDF);
+ INSTANCE_WR(ctx, 0x18B50/4, 0x1);
+ INSTANCE_WR(ctx, 0x18B90/4, 0xFFFF00);
+ INSTANCE_WR(ctx, 0x18BB0/4, 0x1);
+ INSTANCE_WR(ctx, 0x18C10/4, 0xFFFF00);
+ INSTANCE_WR(ctx, 0x18D30/4, 0x1);
+ INSTANCE_WR(ctx, 0x18D70/4, 0x1);
+ INSTANCE_WR(ctx, 0x18D90/4, 0x30201000);
+ INSTANCE_WR(ctx, 0x18DB0/4, 0x70605040);
+ INSTANCE_WR(ctx, 0x18DD0/4, 0xB8A89888);
+ INSTANCE_WR(ctx, 0x18DF0/4, 0xF8E8D8C8);
+ INSTANCE_WR(ctx, 0x18E30/4, 0x1A);
+}
+
+
+static void
+nv84_graph_init_ctxvals(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *ctx = ref->gpuobj;
+
+ INSTANCE_WR(ctx, 0x0010c/4, 0x00000030);
+ INSTANCE_WR(ctx, 0x00130/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x001d4/4, 0x00000003);
+ INSTANCE_WR(ctx, 0x001d8/4, 0x00001000);
+ INSTANCE_WR(ctx, 0x00218/4, 0x0000fe0c);
+ INSTANCE_WR(ctx, 0x0022c/4, 0x00001000);
+ INSTANCE_WR(ctx, 0x00258/4, 0x00000187);
+ INSTANCE_WR(ctx, 0x0026c/4, 0x00001018);
+ INSTANCE_WR(ctx, 0x00270/4, 0x000000ff);
+ INSTANCE_WR(ctx, 0x002ac/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x002b0/4, 0x044d00df);
+ INSTANCE_WR(ctx, 0x002b8/4, 0x00000600);
+ INSTANCE_WR(ctx, 0x002d0/4, 0x01000000);
+ INSTANCE_WR(ctx, 0x002d4/4, 0x000000ff);
+ INSTANCE_WR(ctx, 0x002dc/4, 0x00000400);
+ INSTANCE_WR(ctx, 0x002f4/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x002f8/4, 0x000e0080);
+ INSTANCE_WR(ctx, 0x002fc/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x00318/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x0031c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00328/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0032c/4, 0x00000100);
+ INSTANCE_WR(ctx, 0x00344/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x00348/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0034c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0035c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00360/4, 0x003fffff);
+ INSTANCE_WR(ctx, 0x00364/4, 0x00001fff);
+ INSTANCE_WR(ctx, 0x0036c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00370/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00378/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0037c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00380/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00384/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x00388/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0038c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00390/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00394/4, 0x00000007);
+ INSTANCE_WR(ctx, 0x00398/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0039c/4, 0x00000007);
+ INSTANCE_WR(ctx, 0x003a0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x003a4/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x003a8/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x003bc/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x003c0/4, 0x00000100);
+ INSTANCE_WR(ctx, 0x003c8/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x003d4/4, 0x00000100);
+ INSTANCE_WR(ctx, 0x003d8/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x003dc/4, 0x00000100);
+ INSTANCE_WR(ctx, 0x003e4/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x003f0/4, 0x00000100);
+ INSTANCE_WR(ctx, 0x00404/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x00408/4, 0x00000070);
+ INSTANCE_WR(ctx, 0x0040c/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x00420/4, 0x0000000c);
+ INSTANCE_WR(ctx, 0x00428/4, 0x00000008);
+ INSTANCE_WR(ctx, 0x0042c/4, 0x00000014);
+ INSTANCE_WR(ctx, 0x00434/4, 0x00000029);
+ INSTANCE_WR(ctx, 0x00438/4, 0x00000027);
+ INSTANCE_WR(ctx, 0x0043c/4, 0x00000026);
+ INSTANCE_WR(ctx, 0x00440/4, 0x00000008);
+ INSTANCE_WR(ctx, 0x00444/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x00448/4, 0x00000027);
+ INSTANCE_WR(ctx, 0x00454/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00458/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x0045c/4, 0x00000003);
+ INSTANCE_WR(ctx, 0x00460/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x00464/4, 0x00000005);
+ INSTANCE_WR(ctx, 0x00468/4, 0x00000006);
+ INSTANCE_WR(ctx, 0x0046c/4, 0x00000007);
+ INSTANCE_WR(ctx, 0x00470/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x004b4/4, 0x000000cf);
+ INSTANCE_WR(ctx, 0x004e4/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x004e8/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x004ec/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x004f0/4, 0x00000003);
+ INSTANCE_WR(ctx, 0x004f4/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00500/4, 0x00000012);
+ INSTANCE_WR(ctx, 0x00504/4, 0x00000010);
+ INSTANCE_WR(ctx, 0x00508/4, 0x0000000c);
+ INSTANCE_WR(ctx, 0x0050c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0051c/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x00520/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x00524/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x00530/4, 0x003fffff);
+ INSTANCE_WR(ctx, 0x00534/4, 0x00001fff);
+ INSTANCE_WR(ctx, 0x0055c/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x00560/4, 0x00000014);
+ INSTANCE_WR(ctx, 0x00564/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00570/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x0057c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00584/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x00588/4, 0x00001000);
+ INSTANCE_WR(ctx, 0x0058c/4, 0x00000e00);
+ INSTANCE_WR(ctx, 0x00590/4, 0x00001000);
+ INSTANCE_WR(ctx, 0x00594/4, 0x00001e00);
+ INSTANCE_WR(ctx, 0x0059c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x005a0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x005a4/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x005a8/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x005ac/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x005bc/4, 0x00000200);
+ INSTANCE_WR(ctx, 0x005c4/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x005c8/4, 0x00000070);
+ INSTANCE_WR(ctx, 0x005cc/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x005d8/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x005dc/4, 0x00000070);
+ INSTANCE_WR(ctx, 0x005e0/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x005f0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x005f4/4, 0x000000cf);
+ INSTANCE_WR(ctx, 0x005fc/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0060c/4, 0x000000cf);
+ INSTANCE_WR(ctx, 0x00614/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x0061c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00624/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0062c/4, 0x000000cf);
+ INSTANCE_WR(ctx, 0x00630/4, 0x000000cf);
+ INSTANCE_WR(ctx, 0x00634/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0063c/4, 0x00000f80);
+ INSTANCE_WR(ctx, 0x00684/4, 0x007f0080);
+ INSTANCE_WR(ctx, 0x006c0/4, 0x007f0080);
+
+ INSTANCE_WR(ctx, 0x006e4/4, 0x3b74f821);
+ INSTANCE_WR(ctx, 0x006e8/4, 0x89058001);
+ INSTANCE_WR(ctx, 0x006f0/4, 0x00001000);
+ INSTANCE_WR(ctx, 0x006f4/4, 0x0000001f);
+ INSTANCE_WR(ctx, 0x006f8/4, 0x027c10fa);
+ INSTANCE_WR(ctx, 0x006fc/4, 0x400000c0);
+ INSTANCE_WR(ctx, 0x00700/4, 0xb7892080);
+
+ INSTANCE_WR(ctx, 0x0070c/4, 0x3b74f821);
+ INSTANCE_WR(ctx, 0x00710/4, 0x89058001);
+ INSTANCE_WR(ctx, 0x00718/4, 0x00001000);
+ INSTANCE_WR(ctx, 0x0071c/4, 0x0000001f);
+ INSTANCE_WR(ctx, 0x00720/4, 0x027c10fa);
+ INSTANCE_WR(ctx, 0x00724/4, 0x400000c0);
+ INSTANCE_WR(ctx, 0x00728/4, 0xb7892080);
+
+ INSTANCE_WR(ctx, 0x00734/4, 0x3b74f821);
+ INSTANCE_WR(ctx, 0x00738/4, 0x89058001);
+ INSTANCE_WR(ctx, 0x00740/4, 0x00001000);
+ INSTANCE_WR(ctx, 0x00744/4, 0x0000001f);
+ INSTANCE_WR(ctx, 0x00748/4, 0x027c10fa);
+ INSTANCE_WR(ctx, 0x0074c/4, 0x400000c0);
+ INSTANCE_WR(ctx, 0x00750/4, 0xb7892080);
+
+ INSTANCE_WR(ctx, 0x0075c/4, 0x3b74f821);
+ INSTANCE_WR(ctx, 0x00760/4, 0x89058001);
+ INSTANCE_WR(ctx, 0x00768/4, 0x00001000);
+ INSTANCE_WR(ctx, 0x0076c/4, 0x0000001f);
+ INSTANCE_WR(ctx, 0x00770/4, 0x027c10fa);
+ INSTANCE_WR(ctx, 0x00774/4, 0x400000c0);
+ INSTANCE_WR(ctx, 0x00778/4, 0xb7892080);
+
+ INSTANCE_WR(ctx, 0x00784/4, 0x00010040);
+ INSTANCE_WR(ctx, 0x0078c/4, 0x00000022);
+ INSTANCE_WR(ctx, 0x00798/4, 0x00010040);
+ INSTANCE_WR(ctx, 0x0079c/4, 0x00000022);
+
+ INSTANCE_WR(ctx, 0x007b4/4, 0x01800000);
+ INSTANCE_WR(ctx, 0x007b8/4, 0x00160000);
+ INSTANCE_WR(ctx, 0x007bc/4, 0x01800000);
+ INSTANCE_WR(ctx, 0x007cc/4, 0x0003ffff);
+ INSTANCE_WR(ctx, 0x007d0/4, 0x00880000);
+ INSTANCE_WR(ctx, 0x007f4/4, 0x00010401);
+ INSTANCE_WR(ctx, 0x007fc/4, 0x00000078);
+ INSTANCE_WR(ctx, 0x00804/4, 0x000000bf);
+ INSTANCE_WR(ctx, 0x0080c/4, 0x00001210);
+ INSTANCE_WR(ctx, 0x00810/4, 0x08000080);
+ INSTANCE_WR(ctx, 0x00834/4, 0x01800000);
+ INSTANCE_WR(ctx, 0x00838/4, 0x00160000);
+ INSTANCE_WR(ctx, 0x0083c/4, 0x01800000);
+ INSTANCE_WR(ctx, 0x0084c/4, 0x0003ffff);
+ INSTANCE_WR(ctx, 0x00850/4, 0x00880000);
+ INSTANCE_WR(ctx, 0x00874/4, 0x00010401);
+ INSTANCE_WR(ctx, 0x0087c/4, 0x00000078);
+ INSTANCE_WR(ctx, 0x00884/4, 0x000000bf);
+ INSTANCE_WR(ctx, 0x0088c/4, 0x00001210);
+ INSTANCE_WR(ctx, 0x00890/4, 0x08000080);
+ INSTANCE_WR(ctx, 0x008b8/4, 0x00027070);
+ INSTANCE_WR(ctx, 0x008c4/4, 0x03ffffff);
+ INSTANCE_WR(ctx, 0x008dc/4, 0x00120407);
+ INSTANCE_WR(ctx, 0x008e0/4, 0x05091507);
+ INSTANCE_WR(ctx, 0x008e4/4, 0x05100202);
+ INSTANCE_WR(ctx, 0x008e8/4, 0x00030201);
+ INSTANCE_WR(ctx, 0x00904/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x00908/4, 0x0d0c0b0a);
+ INSTANCE_WR(ctx, 0x0090c/4, 0x00141210);
+ INSTANCE_WR(ctx, 0x00910/4, 0x000001f0);
+ INSTANCE_WR(ctx, 0x00914/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00918/4, 0x00000003);
+ INSTANCE_WR(ctx, 0x00924/4, 0x00039e00);
+ INSTANCE_WR(ctx, 0x00928/4, 0x00000100);
+ INSTANCE_WR(ctx, 0x0092c/4, 0x00003800);
+ INSTANCE_WR(ctx, 0x00930/4, 0x00404040);
+ INSTANCE_WR(ctx, 0x00934/4, 0x0000ff0a);
+ INSTANCE_WR(ctx, 0x0093c/4, 0x0077f005);
+ INSTANCE_WR(ctx, 0x00940/4, 0x003f7fff);
+
+ INSTANCE_WR(ctx, 0x00950/4, 0x01800000);
+ INSTANCE_WR(ctx, 0x00954/4, 0x00160000);
+ INSTANCE_WR(ctx, 0x00958/4, 0x01800000);
+ INSTANCE_WR(ctx, 0x00968/4, 0x0003ffff);
+ INSTANCE_WR(ctx, 0x0096c/4, 0x00880000);
+ INSTANCE_WR(ctx, 0x00990/4, 0x00010401);
+ INSTANCE_WR(ctx, 0x00998/4, 0x00000078);
+ INSTANCE_WR(ctx, 0x009a0/4, 0x000000bf);
+ INSTANCE_WR(ctx, 0x009a8/4, 0x00001210);
+ INSTANCE_WR(ctx, 0x009ac/4, 0x08000080);
+ INSTANCE_WR(ctx, 0x009d0/4, 0x01800000);
+ INSTANCE_WR(ctx, 0x009d4/4, 0x00160000);
+ INSTANCE_WR(ctx, 0x009d8/4, 0x01800000);
+ INSTANCE_WR(ctx, 0x009e8/4, 0x0003ffff);
+ INSTANCE_WR(ctx, 0x009ec/4, 0x00880000);
+ INSTANCE_WR(ctx, 0x00a10/4, 0x00010401);
+ INSTANCE_WR(ctx, 0x00a18/4, 0x00000078);
+ INSTANCE_WR(ctx, 0x00a20/4, 0x000000bf);
+ INSTANCE_WR(ctx, 0x00a28/4, 0x00001210);
+ INSTANCE_WR(ctx, 0x00a2c/4, 0x08000080);
+ INSTANCE_WR(ctx, 0x00a54/4, 0x00027070);
+ INSTANCE_WR(ctx, 0x00a60/4, 0x03ffffff);
+ INSTANCE_WR(ctx, 0x00a78/4, 0x00120407);
+ INSTANCE_WR(ctx, 0x00a7c/4, 0x05091507);
+ INSTANCE_WR(ctx, 0x00a80/4, 0x05100202);
+ INSTANCE_WR(ctx, 0x00a84/4, 0x00030201);
+ INSTANCE_WR(ctx, 0x00aa0/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x00aa4/4, 0x0d0c0b0a);
+ INSTANCE_WR(ctx, 0x00aa8/4, 0x00141210);
+ INSTANCE_WR(ctx, 0x00aac/4, 0x000001f0);
+ INSTANCE_WR(ctx, 0x00ab0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00ab4/4, 0x00000003);
+ INSTANCE_WR(ctx, 0x00ac0/4, 0x00039e00);
+ INSTANCE_WR(ctx, 0x00ac4/4, 0x00000100);
+ INSTANCE_WR(ctx, 0x00ac8/4, 0x00003800);
+ INSTANCE_WR(ctx, 0x00acc/4, 0x00404040);
+ INSTANCE_WR(ctx, 0x00ad0/4, 0x0000ff0a);
+ INSTANCE_WR(ctx, 0x00ad8/4, 0x0077f005);
+ INSTANCE_WR(ctx, 0x00adc/4, 0x003f7fff);
+
+ INSTANCE_WR(ctx, 0x00aec/4, 0x01800000);
+ INSTANCE_WR(ctx, 0x00af0/4, 0x00160000);
+ INSTANCE_WR(ctx, 0x00af4/4, 0x01800000);
+ INSTANCE_WR(ctx, 0x00b04/4, 0x0003ffff);
+ INSTANCE_WR(ctx, 0x00b08/4, 0x00880000);
+ INSTANCE_WR(ctx, 0x00b2c/4, 0x00010401);
+ INSTANCE_WR(ctx, 0x00b34/4, 0x00000078);
+ INSTANCE_WR(ctx, 0x00b3c/4, 0x000000bf);
+ INSTANCE_WR(ctx, 0x00b44/4, 0x00001210);
+ INSTANCE_WR(ctx, 0x00b48/4, 0x08000080);
+ INSTANCE_WR(ctx, 0x00b6c/4, 0x01800000);
+ INSTANCE_WR(ctx, 0x00b70/4, 0x00160000);
+ INSTANCE_WR(ctx, 0x00b74/4, 0x01800000);
+ INSTANCE_WR(ctx, 0x00b84/4, 0x0003ffff);
+ INSTANCE_WR(ctx, 0x00b88/4, 0x00880000);
+ INSTANCE_WR(ctx, 0x00bac/4, 0x00010401);
+ INSTANCE_WR(ctx, 0x00bb4/4, 0x00000078);
+ INSTANCE_WR(ctx, 0x00bbc/4, 0x000000bf);
+ INSTANCE_WR(ctx, 0x00bc4/4, 0x00001210);
+ INSTANCE_WR(ctx, 0x00bc8/4, 0x08000080);
+ INSTANCE_WR(ctx, 0x00bf0/4, 0x00027070);
+ INSTANCE_WR(ctx, 0x00bfc/4, 0x03ffffff);
+ INSTANCE_WR(ctx, 0x00c14/4, 0x00120407);
+ INSTANCE_WR(ctx, 0x00c18/4, 0x05091507);
+ INSTANCE_WR(ctx, 0x00c1c/4, 0x05100202);
+ INSTANCE_WR(ctx, 0x00c20/4, 0x00030201);
+ INSTANCE_WR(ctx, 0x00c3c/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x00c40/4, 0x0d0c0b0a);
+ INSTANCE_WR(ctx, 0x00c44/4, 0x00141210);
+ INSTANCE_WR(ctx, 0x00c48/4, 0x000001f0);
+ INSTANCE_WR(ctx, 0x00c4c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00c50/4, 0x00000003);
+ INSTANCE_WR(ctx, 0x00c5c/4, 0x00039e00);
+ INSTANCE_WR(ctx, 0x00c60/4, 0x00000100);
+ INSTANCE_WR(ctx, 0x00c64/4, 0x00003800);
+ INSTANCE_WR(ctx, 0x00c68/4, 0x00404040);
+ INSTANCE_WR(ctx, 0x00c6c/4, 0x0000ff0a);
+ INSTANCE_WR(ctx, 0x00c74/4, 0x0077f005);
+ INSTANCE_WR(ctx, 0x00c78/4, 0x003f7fff);
+
+ INSTANCE_WR(ctx, 0x00c88/4, 0x01800000);
+ INSTANCE_WR(ctx, 0x00c8c/4, 0x00160000);
+ INSTANCE_WR(ctx, 0x00c90/4, 0x01800000);
+ INSTANCE_WR(ctx, 0x00ca0/4, 0x0003ffff);
+ INSTANCE_WR(ctx, 0x00ca4/4, 0x00880000);
+ INSTANCE_WR(ctx, 0x00cc8/4, 0x00010401);
+ INSTANCE_WR(ctx, 0x00cd0/4, 0x00000078);
+ INSTANCE_WR(ctx, 0x00cd8/4, 0x000000bf);
+ INSTANCE_WR(ctx, 0x00ce0/4, 0x00001210);
+ INSTANCE_WR(ctx, 0x00ce4/4, 0x08000080);
+ INSTANCE_WR(ctx, 0x00d08/4, 0x01800000);
+ INSTANCE_WR(ctx, 0x00d0c/4, 0x00160000);
+ INSTANCE_WR(ctx, 0x00d10/4, 0x01800000);
+ INSTANCE_WR(ctx, 0x00d20/4, 0x0003ffff);
+ INSTANCE_WR(ctx, 0x00d24/4, 0x00880000);
+ INSTANCE_WR(ctx, 0x00d48/4, 0x00010401);
+ INSTANCE_WR(ctx, 0x00d50/4, 0x00000078);
+ INSTANCE_WR(ctx, 0x00d58/4, 0x000000bf);
+ INSTANCE_WR(ctx, 0x00d60/4, 0x00001210);
+ INSTANCE_WR(ctx, 0x00d64/4, 0x08000080);
+ INSTANCE_WR(ctx, 0x00d8c/4, 0x00027070);
+ INSTANCE_WR(ctx, 0x00d98/4, 0x03ffffff);
+ INSTANCE_WR(ctx, 0x00db0/4, 0x00120407);
+ INSTANCE_WR(ctx, 0x00db4/4, 0x05091507);
+ INSTANCE_WR(ctx, 0x00db8/4, 0x05100202);
+ INSTANCE_WR(ctx, 0x00dbc/4, 0x00030201);
+ INSTANCE_WR(ctx, 0x00dd8/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x00ddc/4, 0x0d0c0b0a);
+ INSTANCE_WR(ctx, 0x00de0/4, 0x00141210);
+ INSTANCE_WR(ctx, 0x00de4/4, 0x000001f0);
+ INSTANCE_WR(ctx, 0x00de8/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00dec/4, 0x00000003);
+ INSTANCE_WR(ctx, 0x00df8/4, 0x00039e00);
+ INSTANCE_WR(ctx, 0x00dfc/4, 0x00000100);
+ INSTANCE_WR(ctx, 0x00e00/4, 0x00003800);
+ INSTANCE_WR(ctx, 0x00e04/4, 0x00404040);
+ INSTANCE_WR(ctx, 0x00e08/4, 0x0000ff0a);
+ INSTANCE_WR(ctx, 0x00e10/4, 0x0077f005);
+ INSTANCE_WR(ctx, 0x00e14/4, 0x003f7fff);
+
+ INSTANCE_WR(ctx, 0x00e24/4, 0x01800000);
+ INSTANCE_WR(ctx, 0x00e28/4, 0x00160000);
+ INSTANCE_WR(ctx, 0x00e2c/4, 0x01800000);
+ INSTANCE_WR(ctx, 0x00e3c/4, 0x0003ffff);
+ INSTANCE_WR(ctx, 0x00e40/4, 0x00880000);
+ INSTANCE_WR(ctx, 0x00e64/4, 0x00010401);
+ INSTANCE_WR(ctx, 0x00e6c/4, 0x00000078);
+ INSTANCE_WR(ctx, 0x00e74/4, 0x000000bf);
+ INSTANCE_WR(ctx, 0x00e7c/4, 0x00001210);
+ INSTANCE_WR(ctx, 0x00e80/4, 0x08000080);
+ INSTANCE_WR(ctx, 0x00ea4/4, 0x01800000);
+ INSTANCE_WR(ctx, 0x00ea8/4, 0x00160000);
+ INSTANCE_WR(ctx, 0x00eac/4, 0x01800000);
+ INSTANCE_WR(ctx, 0x00ebc/4, 0x0003ffff);
+ INSTANCE_WR(ctx, 0x00ec0/4, 0x00880000);
+ INSTANCE_WR(ctx, 0x00ee4/4, 0x00010401);
+ INSTANCE_WR(ctx, 0x00eec/4, 0x00000078);
+ INSTANCE_WR(ctx, 0x00ef4/4, 0x000000bf);
+ INSTANCE_WR(ctx, 0x00efc/4, 0x00001210);
+ INSTANCE_WR(ctx, 0x00f00/4, 0x08000080);
+ INSTANCE_WR(ctx, 0x00f28/4, 0x00027070);
+ INSTANCE_WR(ctx, 0x00f34/4, 0x03ffffff);
+ INSTANCE_WR(ctx, 0x00f4c/4, 0x00120407);
+ INSTANCE_WR(ctx, 0x00f50/4, 0x05091507);
+ INSTANCE_WR(ctx, 0x00f54/4, 0x05100202);
+ INSTANCE_WR(ctx, 0x00f58/4, 0x00030201);
+ INSTANCE_WR(ctx, 0x00f74/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x00f78/4, 0x0d0c0b0a);
+ INSTANCE_WR(ctx, 0x00f7c/4, 0x00141210);
+ INSTANCE_WR(ctx, 0x00f80/4, 0x000001f0);
+ INSTANCE_WR(ctx, 0x00f84/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00f88/4, 0x00000003);
+ INSTANCE_WR(ctx, 0x00f94/4, 0x00039e00);
+ INSTANCE_WR(ctx, 0x00f98/4, 0x00000100);
+ INSTANCE_WR(ctx, 0x00f9c/4, 0x00003800);
+ INSTANCE_WR(ctx, 0x00fa0/4, 0x00404040);
+ INSTANCE_WR(ctx, 0x00fa4/4, 0x0000ff0a);
+ INSTANCE_WR(ctx, 0x00fac/4, 0x0077f005);
+ INSTANCE_WR(ctx, 0x00fb0/4, 0x003f7fff);
+
+ INSTANCE_WR(ctx, 0x00fc0/4, 0x01800000);
+ INSTANCE_WR(ctx, 0x00fc4/4, 0x00160000);
+ INSTANCE_WR(ctx, 0x00fc8/4, 0x01800000);
+ INSTANCE_WR(ctx, 0x00fd8/4, 0x0003ffff);
+ INSTANCE_WR(ctx, 0x00fdc/4, 0x00880000);
+ INSTANCE_WR(ctx, 0x01000/4, 0x00010401);
+ INSTANCE_WR(ctx, 0x01008/4, 0x00000078);
+ INSTANCE_WR(ctx, 0x01010/4, 0x000000bf);
+ INSTANCE_WR(ctx, 0x01018/4, 0x00001210);
+ INSTANCE_WR(ctx, 0x0101c/4, 0x08000080);
+ INSTANCE_WR(ctx, 0x01040/4, 0x01800000);
+ INSTANCE_WR(ctx, 0x01044/4, 0x00160000);
+ INSTANCE_WR(ctx, 0x01048/4, 0x01800000);
+ INSTANCE_WR(ctx, 0x01058/4, 0x0003ffff);
+ INSTANCE_WR(ctx, 0x0105c/4, 0x00880000);
+ INSTANCE_WR(ctx, 0x01080/4, 0x00010401);
+ INSTANCE_WR(ctx, 0x01088/4, 0x00000078);
+ INSTANCE_WR(ctx, 0x01090/4, 0x000000bf);
+ INSTANCE_WR(ctx, 0x01098/4, 0x00001210);
+ INSTANCE_WR(ctx, 0x0109c/4, 0x08000080);
+ INSTANCE_WR(ctx, 0x010c4/4, 0x00027070);
+ INSTANCE_WR(ctx, 0x010d0/4, 0x03ffffff);
+ INSTANCE_WR(ctx, 0x010e8/4, 0x00120407);
+ INSTANCE_WR(ctx, 0x010ec/4, 0x05091507);
+ INSTANCE_WR(ctx, 0x010f0/4, 0x05100202);
+ INSTANCE_WR(ctx, 0x010f4/4, 0x00030201);
+ INSTANCE_WR(ctx, 0x01110/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x01114/4, 0x0d0c0b0a);
+ INSTANCE_WR(ctx, 0x01118/4, 0x00141210);
+ INSTANCE_WR(ctx, 0x0111c/4, 0x000001f0);
+ INSTANCE_WR(ctx, 0x01120/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x01124/4, 0x00000003);
+ INSTANCE_WR(ctx, 0x01130/4, 0x00039e00);
+ INSTANCE_WR(ctx, 0x01134/4, 0x00000100);
+ INSTANCE_WR(ctx, 0x01138/4, 0x00003800);
+ INSTANCE_WR(ctx, 0x0113c/4, 0x00404040);
+ INSTANCE_WR(ctx, 0x01140/4, 0x0000ff0a);
+ INSTANCE_WR(ctx, 0x01148/4, 0x0077f005);
+ INSTANCE_WR(ctx, 0x0114c/4, 0x003f7fff);
+
+ INSTANCE_WR(ctx, 0x01230/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x01284/4, 0x0000000f);
+ INSTANCE_WR(ctx, 0x0130c/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x01324/4, 0x00000020);
+ INSTANCE_WR(ctx, 0x0134c/4, 0x001ffe67);
+ INSTANCE_WR(ctx, 0x014ec/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x014f0/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x01504/4, 0x0000001a);
+ INSTANCE_WR(ctx, 0x0150c/4, 0x00000010);
+ INSTANCE_WR(ctx, 0x01510/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x01530/4, 0x00608080);
+ INSTANCE_WR(ctx, 0x0156c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x015d0/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x01630/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x0164c/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x01650/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x01670/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x01690/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x016c4/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x016e4/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x01724/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x01744/4, 0x00000008);
+ INSTANCE_WR(ctx, 0x0176c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x01784/4, 0x000007ff);
+ INSTANCE_WR(ctx, 0x0178c/4, 0x00000010);
+ INSTANCE_WR(ctx, 0x017cc/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x01924/4, 0x0000000f);
+ INSTANCE_WR(ctx, 0x01a4c/4, 0x00000010);
+ INSTANCE_WR(ctx, 0x01b30/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x01b50/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x01b70/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x01b90/4, 0x03020100);
+ INSTANCE_WR(ctx, 0x01bb0/4, 0x00000003);
+ INSTANCE_WR(ctx, 0x01bd0/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x01c6c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x01c70/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x01c8c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x01c90/4, 0x00000003);
+ INSTANCE_WR(ctx, 0x01cac/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x01ccc/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x01cec/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x01d0c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x01d10/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x01d2c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x01d4c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x01d6c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x01d8c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x01dac/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x01dcc/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x01dec/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x01e0c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x01e2c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x01e4c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x0218c/4, 0x00000010);
+ INSTANCE_WR(ctx, 0x021cc/4, 0x0000003f);
+ INSTANCE_WR(ctx, 0x022ac/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x022ec/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0232c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x024cc/4, 0x00000011);
+ INSTANCE_WR(ctx, 0x025cc/4, 0x0000000f);
+ INSTANCE_WR(ctx, 0x026cc/4, 0x00000011);
+ INSTANCE_WR(ctx, 0x027ac/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x027cc/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x027ec/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0280c/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x0282c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0284c/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x0286c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x028ac/4, 0x001ffe67);
+ INSTANCE_WR(ctx, 0x028ec/4, 0x0fac6881);
+ INSTANCE_WR(ctx, 0x02bac/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x02bcc/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x02bec/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x02c0c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x02c2c/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x02c4c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x02c6c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x02cec/4, 0x00000011);
+ INSTANCE_WR(ctx, 0x02d0c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0398c/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x039cc/4, 0x001ffe67);
+ INSTANCE_WR(ctx, 0x03b6c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x03b8c/4, 0x00000010);
+ INSTANCE_WR(ctx, 0x03bec/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x03ccc/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x03dec/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x03e04/4, 0x0000000f);
+ INSTANCE_WR(ctx, 0x03e0c/4, 0x00000010);
+ INSTANCE_WR(ctx, 0x03e44/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x03e4c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x040cc/4, 0x00000010);
+ INSTANCE_WR(ctx, 0x042ec/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x0430c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x0432c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x0434c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x0436c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x0438c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x043ac/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x043cc/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x043ec/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x0440c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x0442c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x0444c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x0446c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x0448c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x044ac/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x044cc/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x0480c/4, 0x00000010);
+ INSTANCE_WR(ctx, 0x0484c/4, 0x0000003f);
+ INSTANCE_WR(ctx, 0x0492c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0496c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x049a4/4, 0x0000000f);
+ INSTANCE_WR(ctx, 0x049ac/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x04b4c/4, 0x00000011);
+ INSTANCE_WR(ctx, 0x04c4c/4, 0x0000000f);
+ INSTANCE_WR(ctx, 0x04d4c/4, 0x00000011);
+ INSTANCE_WR(ctx, 0x04e2c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x04e4c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x04e6c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x04e8c/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x04eac/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x04ecc/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x04eec/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x04f2c/4, 0x001ffe67);
+ INSTANCE_WR(ctx, 0x04f6c/4, 0x0fac6881);
+ INSTANCE_WR(ctx, 0x0522c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0524c/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x0526c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0528c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x052ac/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x052cc/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x052ec/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0536c/4, 0x00000011);
+ INSTANCE_WR(ctx, 0x0538c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x083a0/4, 0x00000021);
+ INSTANCE_WR(ctx, 0x083c0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x083e0/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x08400/4, 0x00000100);
+ INSTANCE_WR(ctx, 0x08420/4, 0x00000100);
+ INSTANCE_WR(ctx, 0x08440/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x084a0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x084c0/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x084e0/4, 0x00000100);
+ INSTANCE_WR(ctx, 0x08500/4, 0x00000100);
+ INSTANCE_WR(ctx, 0x08520/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x11e40/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x11e60/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x15044/4, 0x0000000f);
+ INSTANCE_WR(ctx, 0x152e4/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x15304/4, 0x00000100);
+ INSTANCE_WR(ctx, 0x15324/4, 0x00000100);
+ INSTANCE_WR(ctx, 0x15344/4, 0x00000011);
+ INSTANCE_WR(ctx, 0x15384/4, 0x00000008);
+ INSTANCE_WR(ctx, 0x15444/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x15484/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x154a4/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x154c4/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x154e4/4, 0x000000cf);
+ INSTANCE_WR(ctx, 0x15504/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x155e4/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x15624/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x15644/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x15664/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x15704/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x15744/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x15764/4, 0x00000015);
+ INSTANCE_WR(ctx, 0x157e4/4, 0x04444480);
+ INSTANCE_WR(ctx, 0x15f64/4, 0x08100c12);
+ INSTANCE_WR(ctx, 0x16004/4, 0x00000100);
+ INSTANCE_WR(ctx, 0x16064/4, 0x00010001);
+ INSTANCE_WR(ctx, 0x160a4/4, 0x00010001);
+ INSTANCE_WR(ctx, 0x160c4/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x160e4/4, 0x00010001);
+ INSTANCE_WR(ctx, 0x16104/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x16124/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x16144/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x161b0/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x161c8/4, 0x003fffff);
+ INSTANCE_WR(ctx, 0x161d0/4, 0x00000003);
+ INSTANCE_WR(ctx, 0x16228/4, 0x00001fff);
+ INSTANCE_WR(ctx, 0x16408/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x16410/4, 0x0000000f);
+ INSTANCE_WR(ctx, 0x164e8/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x16508/4, 0x0000001a);
+ INSTANCE_WR(ctx, 0x16568/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x16590/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x165b0/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x165d0/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x165f0/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x16610/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x16730/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x167b0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x167c8/4, 0x00ffff00);
+ INSTANCE_WR(ctx, 0x16870/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x168a8/4, 0x0000000f);
+ INSTANCE_WR(ctx, 0x169a8/4, 0x0fac6881);
+ INSTANCE_WR(ctx, 0x169c8/4, 0x00000011);
+ INSTANCE_WR(ctx, 0x16a10/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x16a30/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x16a50/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x16a70/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x16a90/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x16ab0/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x16ad0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x16b10/4, 0x00000011);
+ INSTANCE_WR(ctx, 0x16bc8/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x16c10/4, 0x0fac6881);
+ INSTANCE_WR(ctx, 0x16c68/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x16c70/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x16c88/4, 0x04000000);
+ INSTANCE_WR(ctx, 0x16ca8/4, 0x04000000);
+ INSTANCE_WR(ctx, 0x16cf0/4, 0x00000011);
+ INSTANCE_WR(ctx, 0x16d10/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x16d28/4, 0x00000005);
+ INSTANCE_WR(ctx, 0x16d48/4, 0x00000052);
+ INSTANCE_WR(ctx, 0x16d50/4, 0x000000cf);
+ INSTANCE_WR(ctx, 0x16d70/4, 0x000000cf);
+ INSTANCE_WR(ctx, 0x16d90/4, 0x000000cf);
+ INSTANCE_WR(ctx, 0x16de8/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x16ef0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x16f10/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x16f30/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x16f50/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x16f70/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x16f90/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x16fb0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x16ff0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x17008/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x17010/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x17028/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x17030/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x17048/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x17050/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x17068/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x17070/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x17088/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x17090/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x170a8/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x170b0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x170c8/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x170d0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x170e8/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x170f0/4, 0x00000011);
+ INSTANCE_WR(ctx, 0x17108/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x17128/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x17148/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x17168/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x17188/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x171a8/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x171c8/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x171e8/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x171f0/4, 0x0fac6881);
+ INSTANCE_WR(ctx, 0x17208/4, 0x00000010);
+ INSTANCE_WR(ctx, 0x17210/4, 0x0000000f);
+ INSTANCE_WR(ctx, 0x17310/4, 0x001ffe67);
+ INSTANCE_WR(ctx, 0x17370/4, 0x00000011);
+ INSTANCE_WR(ctx, 0x17390/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x17410/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x174d0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x17570/4, 0x00000011);
+ INSTANCE_WR(ctx, 0x17670/4, 0x0fac6881);
+ INSTANCE_WR(ctx, 0x176e8/4, 0x08100c12);
+ INSTANCE_WR(ctx, 0x176f0/4, 0x00000011);
+ INSTANCE_WR(ctx, 0x17708/4, 0x00000005);
+ INSTANCE_WR(ctx, 0x17710/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x17750/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x17768/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x17790/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x177a8/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x177c8/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x177d0/4, 0x000007ff);
+ INSTANCE_WR(ctx, 0x177e8/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x17808/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x17810/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x17828/4, 0x00000003);
+ INSTANCE_WR(ctx, 0x17850/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x17bc4/4, 0x04e3bfdf);
+ INSTANCE_WR(ctx, 0x17be4/4, 0x04e3bfdf);
+ INSTANCE_WR(ctx, 0x17c28/4, 0x00ffff00);
+ INSTANCE_WR(ctx, 0x17c48/4, 0x0000001a);
+ INSTANCE_WR(ctx, 0x17c84/4, 0x0fac6881);
+ INSTANCE_WR(ctx, 0x17c88/4, 0x00000003);
+ INSTANCE_WR(ctx, 0x17db0/4, 0x00000008);
+ INSTANCE_WR(ctx, 0x17dd0/4, 0x00000008);
+ INSTANCE_WR(ctx, 0x17df0/4, 0x00000008);
+ INSTANCE_WR(ctx, 0x17e04/4, 0x04e3bfdf);
+ INSTANCE_WR(ctx, 0x17e10/4, 0x00000008);
+ INSTANCE_WR(ctx, 0x17e24/4, 0x04e3bfdf);
+ INSTANCE_WR(ctx, 0x17e30/4, 0x00000008);
+ INSTANCE_WR(ctx, 0x17e50/4, 0x00000008);
+ INSTANCE_WR(ctx, 0x17e70/4, 0x00000008);
+ INSTANCE_WR(ctx, 0x17e90/4, 0x00000008);
+ INSTANCE_WR(ctx, 0x17eb0/4, 0x00000011);
+ INSTANCE_WR(ctx, 0x17fb0/4, 0x0fac6881);
+ INSTANCE_WR(ctx, 0x17fd0/4, 0x00000400);
+ INSTANCE_WR(ctx, 0x17ff0/4, 0x00000400);
+ INSTANCE_WR(ctx, 0x18010/4, 0x00000400);
+ INSTANCE_WR(ctx, 0x18030/4, 0x00000400);
+ INSTANCE_WR(ctx, 0x18050/4, 0x00000400);
+ INSTANCE_WR(ctx, 0x18070/4, 0x00000400);
+ INSTANCE_WR(ctx, 0x18090/4, 0x00000400);
+ INSTANCE_WR(ctx, 0x180b0/4, 0x00000400);
+ INSTANCE_WR(ctx, 0x180d0/4, 0x00000300);
+ INSTANCE_WR(ctx, 0x180f0/4, 0x00000300);
+ INSTANCE_WR(ctx, 0x18110/4, 0x00000300);
+ INSTANCE_WR(ctx, 0x18130/4, 0x00000300);
+ INSTANCE_WR(ctx, 0x18150/4, 0x00000300);
+ INSTANCE_WR(ctx, 0x18168/4, 0x00000102);
+ INSTANCE_WR(ctx, 0x18170/4, 0x00000300);
+ INSTANCE_WR(ctx, 0x18190/4, 0x00000300);
+ INSTANCE_WR(ctx, 0x181a8/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x181b0/4, 0x00000300);
+ INSTANCE_WR(ctx, 0x181c8/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x181d0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x181e8/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x181f0/4, 0x0000000f);
+ INSTANCE_WR(ctx, 0x18208/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x18228/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x18248/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x18288/4, 0x000007ff);
+ INSTANCE_WR(ctx, 0x182c8/4, 0x00000102);
+ INSTANCE_WR(ctx, 0x182f0/4, 0x00000020);
+ INSTANCE_WR(ctx, 0x18310/4, 0x00000011);
+ INSTANCE_WR(ctx, 0x18330/4, 0x00000100);
+ INSTANCE_WR(ctx, 0x18370/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x183d0/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x183f0/4, 0x00000100);
+ INSTANCE_WR(ctx, 0x18408/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x18428/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x18430/4, 0x00000003);
+ INSTANCE_WR(ctx, 0x18448/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x18468/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x184d0/4, 0x001ffe67);
+ INSTANCE_WR(ctx, 0x18550/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x18570/4, 0x0fac6881);
+ INSTANCE_WR(ctx, 0x186b0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x18750/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x18790/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x187b0/4, 0x00000400);
+ INSTANCE_WR(ctx, 0x187d0/4, 0x00000300);
+ INSTANCE_WR(ctx, 0x187f0/4, 0x00001001);
+ INSTANCE_WR(ctx, 0x18870/4, 0x00000011);
+ INSTANCE_WR(ctx, 0x18970/4, 0x0fac6881);
+ INSTANCE_WR(ctx, 0x18990/4, 0x0000000f);
+ INSTANCE_WR(ctx, 0x18aa8/4, 0x00080c14);
+ INSTANCE_WR(ctx, 0x18b08/4, 0x00000804);
+ INSTANCE_WR(ctx, 0x18b48/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x18b68/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x18b88/4, 0x08100c12);
+ INSTANCE_WR(ctx, 0x18bc8/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x18be8/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x18c28/4, 0x00000010);
+ INSTANCE_WR(ctx, 0x18c90/4, 0x001ffe67);
+ INSTANCE_WR(ctx, 0x18cc8/4, 0x00000804);
+ INSTANCE_WR(ctx, 0x18ce8/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x18d08/4, 0x0000001a);
+ INSTANCE_WR(ctx, 0x18d10/4, 0x00000011);
+ INSTANCE_WR(ctx, 0x18d28/4, 0x0000007f);
+ INSTANCE_WR(ctx, 0x18d68/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x18d70/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x18d88/4, 0x00080c14);
+ INSTANCE_WR(ctx, 0x18db0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x18dc8/4, 0x08100c12);
+ INSTANCE_WR(ctx, 0x18dd0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x18de8/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x18e08/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x18e48/4, 0x00000010);
+ INSTANCE_WR(ctx, 0x18e50/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x18ec8/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x18ee8/4, 0x08100c12);
+ INSTANCE_WR(ctx, 0x18ef0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x18f30/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x18fb0/4, 0x2a712488);
+ INSTANCE_WR(ctx, 0x18fc8/4, 0x000007ff);
+ INSTANCE_WR(ctx, 0x18fe8/4, 0x00080c14);
+ INSTANCE_WR(ctx, 0x18ff0/4, 0x4085c000);
+ INSTANCE_WR(ctx, 0x19010/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x19030/4, 0x00000100);
+ INSTANCE_WR(ctx, 0x19050/4, 0x00010100);
+ INSTANCE_WR(ctx, 0x19070/4, 0x02800000);
+ INSTANCE_WR(ctx, 0x192d0/4, 0x04e3bfdf);
+ INSTANCE_WR(ctx, 0x192f0/4, 0x04e3bfdf);
+ INSTANCE_WR(ctx, 0x19310/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x19350/4, 0x00ffff00);
+ INSTANCE_WR(ctx, 0x19370/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x193d0/4, 0x00ffff00);
+ INSTANCE_WR(ctx, 0x194f0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x19530/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x19550/4, 0x30201000);
+ INSTANCE_WR(ctx, 0x19570/4, 0x70605040);
+ INSTANCE_WR(ctx, 0x19590/4, 0xb8a89888);
+ INSTANCE_WR(ctx, 0x195b0/4, 0xf8e8d8c8);
+ INSTANCE_WR(ctx, 0x195f0/4, 0x0000001a);
+ INSTANCE_WR(ctx, 0x19630/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x19708/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x19768/4, 0x00000010);
+ INSTANCE_WR(ctx, 0x198f0/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x19910/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x19930/4, 0x00608080);
+ INSTANCE_WR(ctx, 0x199d0/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x19a30/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x19a50/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x19a70/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x19a90/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x19e88/4, 0x00000088);
+ INSTANCE_WR(ctx, 0x19ea8/4, 0x00000088);
+ INSTANCE_WR(ctx, 0x19f08/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x19f30/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x19f50/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x19f70/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x19f90/4, 0x03020100);
+ INSTANCE_WR(ctx, 0x19fb0/4, 0x00000003);
+ INSTANCE_WR(ctx, 0x19fd0/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x1a070/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x1a090/4, 0x00000003);
+ INSTANCE_WR(ctx, 0x1a110/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x1a1e8/4, 0x00000026);
+ INSTANCE_WR(ctx, 0x1a248/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x1a2c8/4, 0x0000001a);
+ INSTANCE_WR(ctx, 0x1a2e8/4, 0x00000010);
+ INSTANCE_WR(ctx, 0x1a808/4, 0x00000052);
+ INSTANCE_WR(ctx, 0x1a848/4, 0x00000026);
+ INSTANCE_WR(ctx, 0x1a888/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x1a8a8/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x1a8e8/4, 0x0000001a);
+ INSTANCE_WR(ctx, 0x1a948/4, 0x00ffff00);
+ INSTANCE_WR(ctx, 0x1a988/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x1a9a8/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x1a9e8/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x1aa08/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x1aa28/4, 0x00080c14);
+ INSTANCE_WR(ctx, 0x1aa68/4, 0x000007ff);
+ INSTANCE_WR(ctx, 0x2d2c8/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x2d2e8/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x2d328/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x2d348/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x2d368/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x2d3a8/4, 0x00000027);
+ INSTANCE_WR(ctx, 0x2d3e8/4, 0x00000026);
+ INSTANCE_WR(ctx, 0x2d468/4, 0x04000000);
+ INSTANCE_WR(ctx, 0x2d488/4, 0x04000000);
+ INSTANCE_WR(ctx, 0x2d4a8/4, 0x04000000);
+ INSTANCE_WR(ctx, 0x2d4c8/4, 0x04000000);
+ INSTANCE_WR(ctx, 0x2d4e8/4, 0x04000000);
+ INSTANCE_WR(ctx, 0x2d508/4, 0x04000000);
+ INSTANCE_WR(ctx, 0x2d528/4, 0x04000000);
+ INSTANCE_WR(ctx, 0x2d548/4, 0x04000000);
+ INSTANCE_WR(ctx, 0x2d568/4, 0x04000000);
+ INSTANCE_WR(ctx, 0x2d588/4, 0x04000000);
+ INSTANCE_WR(ctx, 0x2d5a8/4, 0x04000000);
+ INSTANCE_WR(ctx, 0x2d5c8/4, 0x04000000);
+ INSTANCE_WR(ctx, 0x2d5e8/4, 0x04000000);
+ INSTANCE_WR(ctx, 0x2d608/4, 0x04000000);
+ INSTANCE_WR(ctx, 0x2d628/4, 0x04000000);
+ INSTANCE_WR(ctx, 0x2d648/4, 0x04000000);
+ INSTANCE_WR(ctx, 0x2dae8/4, 0x04e3bfdf);
+ INSTANCE_WR(ctx, 0x2db08/4, 0x04e3bfdf);
+ INSTANCE_WR(ctx, 0x2db68/4, 0x0001fe21);
+ INSTANCE_WR(ctx, 0x2e5b0/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x2e5d0/4, 0x00000003);
+ INSTANCE_WR(ctx, 0x2e810/4, 0x0000000f);
+ INSTANCE_WR(ctx, 0x2e990/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x2e9b0/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x2e9d0/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x2e9f0/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x2ea10/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x2eb30/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x2ebb0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x2ec70/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x2ee10/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x2ee30/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x2ee50/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x2ee70/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x2ee90/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x2eeb0/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x2eed0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x2ef10/4, 0x00000011);
+ INSTANCE_WR(ctx, 0x2f010/4, 0x0fac6881);
+ INSTANCE_WR(ctx, 0x2f070/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x2f0f0/4, 0x00000011);
+ INSTANCE_WR(ctx, 0x2f110/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x2f150/4, 0x000000cf);
+ INSTANCE_WR(ctx, 0x2f170/4, 0x000000cf);
+ INSTANCE_WR(ctx, 0x2f190/4, 0x000000cf);
+ INSTANCE_WR(ctx, 0x2f2f0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x2f310/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x2f330/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x2f350/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x2f370/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x2f390/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x2f3b0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x2f3f0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x2f410/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x2f430/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x2f450/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x2f470/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x2f490/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x2f4b0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x2f4d0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x2f4f0/4, 0x00000011);
+ INSTANCE_WR(ctx, 0x2f5f0/4, 0x0fac6881);
+ INSTANCE_WR(ctx, 0x2f610/4, 0x0000000f);
+ INSTANCE_WR(ctx, 0x2f710/4, 0x001ffe67);
+ INSTANCE_WR(ctx, 0x2f770/4, 0x00000011);
+ INSTANCE_WR(ctx, 0x2f790/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x2f810/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x2f8d0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x2f970/4, 0x00000011);
+ INSTANCE_WR(ctx, 0x2fa70/4, 0x0fac6881);
+ INSTANCE_WR(ctx, 0x2faf0/4, 0x00000011);
+ INSTANCE_WR(ctx, 0x2fb10/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x2fb50/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x2fb90/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x2fbd0/4, 0x000007ff);
+ INSTANCE_WR(ctx, 0x2fc10/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x2fc50/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x301b0/4, 0x00000008);
+ INSTANCE_WR(ctx, 0x301d0/4, 0x00000008);
+ INSTANCE_WR(ctx, 0x301f0/4, 0x00000008);
+ INSTANCE_WR(ctx, 0x30210/4, 0x00000008);
+ INSTANCE_WR(ctx, 0x30230/4, 0x00000008);
+ INSTANCE_WR(ctx, 0x30250/4, 0x00000008);
+ INSTANCE_WR(ctx, 0x30270/4, 0x00000008);
+ INSTANCE_WR(ctx, 0x30290/4, 0x00000008);
+ INSTANCE_WR(ctx, 0x302b0/4, 0x00000011);
+ INSTANCE_WR(ctx, 0x303b0/4, 0x0fac6881);
+ INSTANCE_WR(ctx, 0x303d0/4, 0x00000400);
+ INSTANCE_WR(ctx, 0x303f0/4, 0x00000400);
+ INSTANCE_WR(ctx, 0x30410/4, 0x00000400);
+ INSTANCE_WR(ctx, 0x30430/4, 0x00000400);
+ INSTANCE_WR(ctx, 0x30450/4, 0x00000400);
+ INSTANCE_WR(ctx, 0x30470/4, 0x00000400);
+ INSTANCE_WR(ctx, 0x30490/4, 0x00000400);
+ INSTANCE_WR(ctx, 0x304b0/4, 0x00000400);
+ INSTANCE_WR(ctx, 0x304d0/4, 0x00000300);
+ INSTANCE_WR(ctx, 0x304f0/4, 0x00000300);
+ INSTANCE_WR(ctx, 0x30510/4, 0x00000300);
+ INSTANCE_WR(ctx, 0x30530/4, 0x00000300);
+ INSTANCE_WR(ctx, 0x30550/4, 0x00000300);
+ INSTANCE_WR(ctx, 0x30570/4, 0x00000300);
+ INSTANCE_WR(ctx, 0x30590/4, 0x00000300);
+ INSTANCE_WR(ctx, 0x305b0/4, 0x00000300);
+ INSTANCE_WR(ctx, 0x305d0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x305f0/4, 0x0000000f);
+ INSTANCE_WR(ctx, 0x306f0/4, 0x00000020);
+ INSTANCE_WR(ctx, 0x30710/4, 0x00000011);
+ INSTANCE_WR(ctx, 0x30730/4, 0x00000100);
+ INSTANCE_WR(ctx, 0x30770/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x307d0/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x307f0/4, 0x00000100);
+ INSTANCE_WR(ctx, 0x30830/4, 0x00000003);
+ INSTANCE_WR(ctx, 0x308d0/4, 0x001ffe67);
+ INSTANCE_WR(ctx, 0x30950/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x30970/4, 0x0fac6881);
+ INSTANCE_WR(ctx, 0x30ab0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x30b50/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x30b90/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x30bb0/4, 0x00000400);
+ INSTANCE_WR(ctx, 0x30bd0/4, 0x00000300);
+ INSTANCE_WR(ctx, 0x30bf0/4, 0x00001001);
+ INSTANCE_WR(ctx, 0x30c70/4, 0x00000011);
+ INSTANCE_WR(ctx, 0x30d70/4, 0x0fac6881);
+ INSTANCE_WR(ctx, 0x30d90/4, 0x0000000f);
+ INSTANCE_WR(ctx, 0x31090/4, 0x001ffe67);
+ INSTANCE_WR(ctx, 0x31110/4, 0x00000011);
+ INSTANCE_WR(ctx, 0x31170/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x311b0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x311d0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x31250/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x312f0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x31330/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x313b0/4, 0x2a712488);
+ INSTANCE_WR(ctx, 0x313f0/4, 0x4085c000);
+ INSTANCE_WR(ctx, 0x31410/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x31430/4, 0x00000100);
+ INSTANCE_WR(ctx, 0x31450/4, 0x00010100);
+ INSTANCE_WR(ctx, 0x31470/4, 0x02800000);
+ INSTANCE_WR(ctx, 0x316d0/4, 0x04e3bfdf);
+ INSTANCE_WR(ctx, 0x316f0/4, 0x04e3bfdf);
+ INSTANCE_WR(ctx, 0x31710/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x31750/4, 0x00ffff00);
+ INSTANCE_WR(ctx, 0x31770/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x317d0/4, 0x00ffff00);
+ INSTANCE_WR(ctx, 0x318f0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x31930/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x31950/4, 0x30201000);
+ INSTANCE_WR(ctx, 0x31970/4, 0x70605040);
+ INSTANCE_WR(ctx, 0x31990/4, 0xb8a89888);
+ INSTANCE_WR(ctx, 0x319b0/4, 0xf8e8d8c8);
+ INSTANCE_WR(ctx, 0x319f0/4, 0x0000001a);
+ INSTANCE_WR(ctx, 0x4a7e0/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x4a800/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x4a820/4, 0x08100c12);
+ INSTANCE_WR(ctx, 0x4a840/4, 0x00000003);
+ INSTANCE_WR(ctx, 0x4a880/4, 0x08100c12);
+ INSTANCE_WR(ctx, 0x4a8c0/4, 0x00080c14);
+ INSTANCE_WR(ctx, 0x4a8e0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x4a900/4, 0x00080c14);
+ INSTANCE_WR(ctx, 0x4a960/4, 0x08100c12);
+ INSTANCE_WR(ctx, 0x4a980/4, 0x00000027);
+ INSTANCE_WR(ctx, 0x4a9e0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x52220/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x52500/4, 0x08100c12);
+ INSTANCE_WR(ctx, 0x526a0/4, 0x04000000);
+ INSTANCE_WR(ctx, 0x526c0/4, 0x04000000);
+ INSTANCE_WR(ctx, 0x52700/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x52780/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x527c0/4, 0x0000003f);
+ INSTANCE_WR(ctx, 0x52920/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x52940/4, 0x04000000);
+ INSTANCE_WR(ctx, 0x52960/4, 0x04000000);
+ INSTANCE_WR(ctx, 0x52a80/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x52b00/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x52d40/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x52d60/4, 0x00001001);
+ INSTANCE_WR(ctx, 0x52d80/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x52da0/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x52dc0/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x52de0/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x53200/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x53220/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x53240/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x53260/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x53280/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x532a0/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x532c0/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x532e0/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x53300/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x53320/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x53340/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x53360/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x53380/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x533a0/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x533c0/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x533e0/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x53400/4, 0x00000010);
+ INSTANCE_WR(ctx, 0x53460/4, 0x00000003);
+ INSTANCE_WR(ctx, 0x53500/4, 0x08100c12);
+ INSTANCE_WR(ctx, 0x53524/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x53540/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x53544/4, 0x80007004);
+ INSTANCE_WR(ctx, 0x53560/4, 0x80007004);
+ INSTANCE_WR(ctx, 0x53564/4, 0x04000400);
+ INSTANCE_WR(ctx, 0x53580/4, 0x04000400);
+ INSTANCE_WR(ctx, 0x53584/4, 0x00001000);
+ INSTANCE_WR(ctx, 0x535a0/4, 0x00001000);
+ INSTANCE_WR(ctx, 0x535e4/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x53600/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x53644/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x53660/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x53684/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x536a0/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x536a4/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x536c0/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x53824/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x53840/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x53844/4, 0x80007004);
+ INSTANCE_WR(ctx, 0x53860/4, 0x80007004);
+ INSTANCE_WR(ctx, 0x53864/4, 0x04000400);
+ INSTANCE_WR(ctx, 0x53880/4, 0x04000400);
+ INSTANCE_WR(ctx, 0x53884/4, 0x00001000);
+ INSTANCE_WR(ctx, 0x538a0/4, 0x00001000);
+ INSTANCE_WR(ctx, 0x538e4/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x53900/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x53944/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x53960/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x53984/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x539a0/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x539a4/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x539c0/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x53b04/4, 0x08100c12);
+ INSTANCE_WR(ctx, 0x53b20/4, 0x08100c12);
+ INSTANCE_WR(ctx, 0x53be4/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x53c00/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x53c04/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x53c20/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x53c24/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x53c40/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x53c44/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x53c60/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x53c64/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x53c80/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x53c84/4, 0x00010001);
+ INSTANCE_WR(ctx, 0x53ca0/4, 0x00010001);
+ INSTANCE_WR(ctx, 0x53ca4/4, 0x00010001);
+ INSTANCE_WR(ctx, 0x53cc0/4, 0x00010001);
+ INSTANCE_WR(ctx, 0x53cc4/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x53ce0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x53d04/4, 0x0001fe21);
+ INSTANCE_WR(ctx, 0x53d20/4, 0x0001fe21);
+ INSTANCE_WR(ctx, 0x53dc4/4, 0x08100c12);
+ INSTANCE_WR(ctx, 0x53de0/4, 0x08100c12);
+ INSTANCE_WR(ctx, 0x53de4/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x53e00/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x53e24/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x53e40/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x53e44/4, 0x00000011);
+ INSTANCE_WR(ctx, 0x53e60/4, 0x00000011);
+ INSTANCE_WR(ctx, 0x53f64/4, 0x0fac6881);
+ INSTANCE_WR(ctx, 0x53f80/4, 0x0fac6881);
+ INSTANCE_WR(ctx, 0x54004/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x54020/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x54144/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x54160/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x54164/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x54180/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x54184/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x541a0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x541a4/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x541c0/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x541c4/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x541e0/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x541e4/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x54200/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x54204/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x54220/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x54244/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x54260/4, 0x00000004);
+ INSTANCE_WR(ctx, 0x5b6a4/4, 0x00000011);
+ INSTANCE_WR(ctx, 0x5b6c0/4, 0x00000011);
+ INSTANCE_WR(ctx, 0x5b6e4/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x5b700/4, 0x00000001);
+}
+
+int
+nv50_graph_create_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
+ struct nouveau_engine *engine = &dev_priv->Engine;
+ int grctx_size = 0x60000, hdr;
+ int ret;
+
+ DRM_DEBUG("ch%d\n", chan->id);
+
+ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, grctx_size, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
+ if (ret)
+ return ret;
+
+ hdr = IS_G80 ? 0x200 : 0x20;
+ INSTANCE_WR(ramin, (hdr + 0x00)/4, 0x00190002);
+ INSTANCE_WR(ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance +
+ grctx_size - 1);
+ INSTANCE_WR(ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance);
+ INSTANCE_WR(ramin, (hdr + 0x0c)/4, 0);
+ INSTANCE_WR(ramin, (hdr + 0x10)/4, 0);
+ INSTANCE_WR(ramin, (hdr + 0x14)/4, 0x00010000);
+
+ INSTANCE_WR(chan->ramin_grctx->gpuobj, 0x00000/4,
+ chan->ramin->instance >> 12);
+ INSTANCE_WR(chan->ramin_grctx->gpuobj, 0x0011c/4, 0x00000002);
+
+ switch (dev_priv->chipset) {
+ case 0x84:
+ nv84_graph_init_ctxvals(dev, chan->ramin_grctx);
+ break;
+ case 0x86:
+ nv86_graph_init_ctxvals(dev, chan->ramin_grctx);
+ break;
+ default:
+ /* This is complete crack, it accidently used to make at
+ * least some G8x cards work partially somehow, though there's
+ * no good reason why - and it stopped working as the rest
+ * of the code got off the drugs..
+ */
+ ret = engine->graph.load_context(chan);
+ if (ret) {
+ DRM_ERROR("Error hacking up context: %d\n", ret);
+ return ret;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+void
+nv50_graph_destroy_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i, hdr;
+
+ DRM_DEBUG("ch%d\n", chan->id);
+
+ hdr = IS_G80 ? 0x200 : 0x20;
+ for (i=hdr; i<hdr+24; i+=4)
+ INSTANCE_WR(chan->ramin->gpuobj, i/4, 0);
+
+ nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
+}
+
+static int
+nv50_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t old_cp, tv = 20000;
+ int i;
+
+ DRM_DEBUG("inst=0x%08x, save=%d\n", inst, save);
+
+ old_cp = NV_READ(NV20_PGRAPH_CHANNEL_CTX_POINTER);
+ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
+ NV_WRITE(0x400824, NV_READ(0x400824) |
+ (save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :
+ NV40_PGRAPH_CTXCTL_0310_XFER_LOAD));
+ NV_WRITE(NV40_PGRAPH_CTXCTL_0304, NV40_PGRAPH_CTXCTL_0304_XFER_CTX);
+
+ for (i = 0; i < tv; i++) {
+ if (NV_READ(NV40_PGRAPH_CTXCTL_030C) == 0)
+ break;
+ }
+ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp);
+
+ if (i == tv) {
+ DRM_ERROR("failed: inst=0x%08x save=%d\n", inst, save);
+ DRM_ERROR("0x40030C = 0x%08x\n",
+ NV_READ(NV40_PGRAPH_CTXCTL_030C));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+int
+nv50_graph_load_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t inst = chan->ramin->instance >> 12;
+ int ret; (void)ret;
+
+ DRM_DEBUG("ch%d\n", chan->id);
+
+
+ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
+ NV_WRITE(0x400320, 4);
+ NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, inst | (1<<31));
+
+ return 0;
+}
+
+int
+nv50_graph_save_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ uint32_t inst = chan->ramin->instance >> 12;
+
+ DRM_DEBUG("ch%d\n", chan->id);
+
+ return nv50_graph_transfer_context(dev, inst, 1);
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
new file mode 100644
index 0000000..b7a51f0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -0,0 +1,324 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+typedef struct {
+ uint32_t save1700[5]; /* 0x1700->0x1710 */
+
+ struct nouveau_gpuobj_ref *pramin_pt;
+ struct nouveau_gpuobj_ref *pramin_bar;
+} nv50_instmem_priv;
+
+#define NV50_INSTMEM_PAGE_SHIFT 12
+#define NV50_INSTMEM_PAGE_SIZE (1 << NV50_INSTMEM_PAGE_SHIFT)
+#define NV50_INSTMEM_PT_SIZE(a) (((a) >> 12) << 3)
+
+/*NOTE: - Assumes 0x1700 already covers the correct MiB of PRAMIN
+ */
+#define BAR0_WI32(g,o,v) do { \
+ uint32_t offset; \
+ if ((g)->im_backing) { \
+ offset = (g)->im_backing->start; \
+ } else { \
+ offset = chan->ramin->gpuobj->im_backing->start; \
+ offset += (g)->im_pramin->start; \
+ } \
+ offset += (o); \
+ NV_WRITE(NV_RAMIN + (offset & 0xfffff), (v)); \
+} while(0)
+
+int
+nv50_instmem_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan;
+ uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size;
+ nv50_instmem_priv *priv;
+ int ret, i;
+ uint32_t v;
+
+ priv = drm_calloc(1, sizeof(*priv), DRM_MEM_DRIVER);
+ if (!priv)
+ return -ENOMEM;
+ dev_priv->Engine.instmem.priv = priv;
+
+ /* Save state, will restore at takedown. */
+ for (i = 0x1700; i <= 0x1710; i+=4)
+ priv->save1700[(i-0x1700)/4] = NV_READ(i);
+
+ /* Reserve the last MiB of VRAM, we should probably try to avoid
+ * setting up the below tables over the top of the VBIOS image at
+ * some point.
+ */
+ dev_priv->ramin_rsvd_vram = 1 << 20;
+ c_offset = nouveau_mem_fb_amount(dev) - dev_priv->ramin_rsvd_vram;
+ c_size = 128 << 10;
+ c_vmpd = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200;
+ c_ramfc = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20;
+ c_base = c_vmpd + 0x4000;
+ pt_size = NV50_INSTMEM_PT_SIZE(dev_priv->ramin->size);
+
+ DRM_DEBUG(" Rsvd VRAM base: 0x%08x\n", c_offset);
+ DRM_DEBUG(" VBIOS image: 0x%08x\n", (NV_READ(0x619f04)&~0xff)<<8);
+ DRM_DEBUG(" Aperture size: %d MiB\n",
+ (uint32_t)dev_priv->ramin->size >> 20);
+ DRM_DEBUG(" PT size: %d KiB\n", pt_size >> 10);
+
+ NV_WRITE(NV50_PUNK_BAR0_PRAMIN, (c_offset >> 16));
+
+ /* Create a fake channel, and use it as our "dummy" channels 0/127.
+ * The main reason for creating a channel is so we can use the gpuobj
+ * code. However, it's probably worth noting that NVIDIA also setup
+ * their channels 0/127 with the same values they configure here.
+ * So, there may be some other reason for doing this.
+ *
+ * Have to create the entire channel manually, as the real channel
+ * creation code assumes we have PRAMIN access, and we don't until
+ * we're done here.
+ */
+ chan = drm_calloc(1, sizeof(*chan), DRM_MEM_DRIVER);
+ if (!chan)
+ return -ENOMEM;
+ chan->id = 0;
+ chan->dev = dev;
+ chan->file_priv = (struct drm_file *)-2;
+ dev_priv->fifos[0] = dev_priv->fifos[127] = chan;
+
+ /* Channel's PRAMIN object + heap */
+ if ((ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, 128<<10, 0,
+ NULL, &chan->ramin)))
+ return ret;
+
+ if (nouveau_mem_init_heap(&chan->ramin_heap, c_base, c_size - c_base))
+ return -ENOMEM;
+
+ /* RAMFC + zero channel's PRAMIN up to start of VM pagedir */
+ if ((ret = nouveau_gpuobj_new_fake(dev, c_ramfc, c_offset + c_ramfc,
+ 0x4000, 0, NULL, &chan->ramfc)))
+ return ret;
+
+ for (i = 0; i < c_vmpd; i += 4)
+ BAR0_WI32(chan->ramin->gpuobj, i, 0);
+
+ /* VM page directory */
+ if ((ret = nouveau_gpuobj_new_fake(dev, c_vmpd, c_offset + c_vmpd,
+ 0x4000, 0, &chan->vm_pd, NULL)))
+ return ret;
+ for (i = 0; i < 0x4000; i += 8) {
+ BAR0_WI32(chan->vm_pd, i + 0x00, 0x00000000);
+ BAR0_WI32(chan->vm_pd, i + 0x04, 0x00000000);
+ }
+
+ /* PRAMIN page table, cheat and map into VM at 0x0000000000.
+ * We map the entire fake channel into the start of the PRAMIN BAR
+ */
+ if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
+ 0, &priv->pramin_pt)))
+ return ret;
+
+ for (i = 0, v = c_offset; i < pt_size; i+=8, v+=0x1000) {
+ if (v < (c_offset + c_size))
+ BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1);
+ else
+ BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009);
+ BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
+ }
+
+ BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
+ BAR0_WI32(chan->vm_pd, 0x04, 0x00000000);
+
+ /* DMA object for PRAMIN BAR */
+ if ((ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0,
+ &priv->pramin_bar)))
+ return ret;
+ BAR0_WI32(priv->pramin_bar->gpuobj, 0x00, 0x7fc00000);
+ BAR0_WI32(priv->pramin_bar->gpuobj, 0x04, dev_priv->ramin->size - 1);
+ BAR0_WI32(priv->pramin_bar->gpuobj, 0x08, 0x00000000);
+ BAR0_WI32(priv->pramin_bar->gpuobj, 0x0c, 0x00000000);
+ BAR0_WI32(priv->pramin_bar->gpuobj, 0x10, 0x00000000);
+ BAR0_WI32(priv->pramin_bar->gpuobj, 0x14, 0x00000000);
+
+ /* Poke the relevant regs, and pray it works :) */
+ NV_WRITE(NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12));
+ NV_WRITE(NV50_PUNK_UNK1710, 0);
+ NV_WRITE(NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) |
+ NV50_PUNK_BAR_CFG_BASE_VALID);
+ NV_WRITE(NV50_PUNK_BAR1_CTXDMA, 0);
+ NV_WRITE(NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) |
+ NV50_PUNK_BAR3_CTXDMA_VALID);
+
+ /* Assume that praying isn't enough, check that we can re-read the
+ * entire fake channel back from the PRAMIN BAR */
+ for (i = 0; i < c_size; i+=4) {
+ if (NV_READ(NV_RAMIN + i) != NV_RI32(i)) {
+ DRM_ERROR("Error reading back PRAMIN at 0x%08x\n", i);
+ return -EINVAL;
+ }
+ }
+
+ /* Global PRAMIN heap */
+ if (nouveau_mem_init_heap(&dev_priv->ramin_heap,
+ c_size, dev_priv->ramin->size - c_size)) {
+ dev_priv->ramin_heap = NULL;
+ DRM_ERROR("Failed to init RAMIN heap\n");
+ }
+
+ /*XXX: incorrect, but needed to make hash func "work" */
+ dev_priv->ramht_offset = 0x10000;
+ dev_priv->ramht_bits = 9;
+ dev_priv->ramht_size = (1 << dev_priv->ramht_bits);
+ return 0;
+}
+
+void
+nv50_instmem_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv;
+ struct nouveau_channel *chan = dev_priv->fifos[0];
+ int i;
+
+ DRM_DEBUG("\n");
+
+ if (!priv)
+ return;
+
+ /* Restore state from before init */
+ for (i = 0x1700; i <= 0x1710; i+=4)
+ NV_WRITE(i, priv->save1700[(i-0x1700)/4]);
+
+ nouveau_gpuobj_ref_del(dev, &priv->pramin_bar);
+ nouveau_gpuobj_ref_del(dev, &priv->pramin_pt);
+
+ /* Destroy dummy channel */
+ if (chan) {
+ nouveau_gpuobj_del(dev, &chan->vm_pd);
+ nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+ nouveau_gpuobj_ref_del(dev, &chan->ramin);
+ nouveau_mem_takedown(&chan->ramin_heap);
+
+ dev_priv->fifos[0] = dev_priv->fifos[127] = NULL;
+ drm_free(chan, sizeof(*chan), DRM_MEM_DRIVER);
+ }
+
+ dev_priv->Engine.instmem.priv = NULL;
+ drm_free(priv, sizeof(*priv), DRM_MEM_DRIVER);
+}
+
+int
+nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uint32_t *sz)
+{
+ if (gpuobj->im_backing)
+ return -EINVAL;
+
+ *sz = (*sz + (NV50_INSTMEM_PAGE_SIZE-1)) & ~(NV50_INSTMEM_PAGE_SIZE-1);
+ if (*sz == 0)
+ return -EINVAL;
+
+ gpuobj->im_backing = nouveau_mem_alloc(dev, NV50_INSTMEM_PAGE_SIZE,
+ *sz, NOUVEAU_MEM_FB |
+ NOUVEAU_MEM_NOVM,
+ (struct drm_file *)-2);
+ if (!gpuobj->im_backing) {
+ DRM_ERROR("Couldn't allocate vram to back PRAMIN pages\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void
+nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (gpuobj && gpuobj->im_backing) {
+ if (gpuobj->im_bound)
+ dev_priv->Engine.instmem.unbind(dev, gpuobj);
+ nouveau_mem_free(dev, gpuobj->im_backing);
+ gpuobj->im_backing = NULL;
+ }
+}
+
+int
+nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv;
+ uint32_t pte, pte_end, vram;
+
+ if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
+ return -EINVAL;
+
+ DRM_DEBUG("st=0x%0llx sz=0x%0llx\n",
+ gpuobj->im_pramin->start, gpuobj->im_pramin->size);
+
+ pte = (gpuobj->im_pramin->start >> 12) << 3;
+ pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
+ vram = gpuobj->im_backing->start;
+
+ DRM_DEBUG("pramin=0x%llx, pte=%d, pte_end=%d\n",
+ gpuobj->im_pramin->start, pte, pte_end);
+ DRM_DEBUG("first vram page: 0x%llx\n",
+ gpuobj->im_backing->start);
+
+ while (pte < pte_end) {
+ INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1);
+ INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
+
+ pte += 8;
+ vram += NV50_INSTMEM_PAGE_SIZE;
+ }
+
+ gpuobj->im_bound = 1;
+ return 0;
+}
+
+int
+nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv;
+ uint32_t pte, pte_end;
+
+ if (gpuobj->im_bound == 0)
+ return -EINVAL;
+
+ pte = (gpuobj->im_pramin->start >> 12) << 3;
+ pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
+ while (pte < pte_end) {
+ INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009);
+ INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
+ pte += 8;
+ }
+
+ gpuobj->im_bound = 0;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_mc.c b/drivers/gpu/drm/nouveau/nv50_mc.c
new file mode 100644
index 0000000..b111826
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_mc.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+int
+nv50_mc_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF);
+
+ return 0;
+}
+
+void nv50_mc_takedown(struct drm_device *dev)
+{
+}
diff --git a/include/drm/Kbuild b/include/drm/Kbuild
index 82b6983..d65553c 100644
--- a/include/drm/Kbuild
+++ b/include/drm/Kbuild
@@ -8,3 +8,4 @@ unifdef-y += radeon_drm.h
unifdef-y += sis_drm.h
unifdef-y += savage_drm.h
unifdef-y += via_drm.h
+unifdef-y += nouveau_drm.h
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 31e2f17..c6ba6a9 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1136,6 +1136,8 @@ extern void drm_idlelock_release(struct drm_lock_data *lock_data);
extern int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv);
/* Buffer management support (drm_bufs.h) */
+extern struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
+ drm_local_map_t *map);
extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request);
extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request);
extern int drm_addmap(struct drm_device *dev, unsigned int offset,
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h
new file mode 100644
index 0000000..a99c615
--- /dev/null
+++ b/include/drm/nouveau_drm.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright 2005 Stephane Marchesin.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NOUVEAU_DRM_H__
+#define __NOUVEAU_DRM_H__
+
+#define NOUVEAU_DRM_HEADER_PATCHLEVEL 11
+
+struct drm_nouveau_channel_alloc {
+ uint32_t fb_ctxdma_handle;
+ uint32_t tt_ctxdma_handle;
+
+ int channel;
+ uint32_t put_base;
+ /* FIFO control regs */
+ drm_handle_t ctrl;
+ int ctrl_size;
+ /* DMA command buffer */
+ drm_handle_t cmdbuf;
+ int cmdbuf_size;
+ /* Notifier memory */
+ drm_handle_t notifier;
+ int notifier_size;
+};
+
+struct drm_nouveau_channel_free {
+ int channel;
+};
+
+struct drm_nouveau_grobj_alloc {
+ int channel;
+ uint32_t handle;
+ int class;
+};
+
+#define NOUVEAU_MEM_ACCESS_RO 1
+#define NOUVEAU_MEM_ACCESS_WO 2
+#define NOUVEAU_MEM_ACCESS_RW 3
+struct drm_nouveau_notifierobj_alloc {
+ int channel;
+ uint32_t handle;
+ int count;
+
+ uint32_t offset;
+};
+
+struct drm_nouveau_gpuobj_free {
+ int channel;
+ uint32_t handle;
+};
+
+/* This is needed to avoid a race condition.
+ * Otherwise you may be writing in the fetch area.
+ * Is this large enough, as it's only 32 bytes, and the maximum fetch size is 256 bytes?
+ */
+#define NOUVEAU_DMA_SKIPS 8
+
+#define NOUVEAU_MEM_FB 0x00000001
+#define NOUVEAU_MEM_AGP 0x00000002
+#define NOUVEAU_MEM_FB_ACCEPTABLE 0x00000004
+#define NOUVEAU_MEM_AGP_ACCEPTABLE 0x00000008
+#define NOUVEAU_MEM_PCI 0x00000010
+#define NOUVEAU_MEM_PCI_ACCEPTABLE 0x00000020
+#define NOUVEAU_MEM_PINNED 0x00000040
+#define NOUVEAU_MEM_USER_BACKED 0x00000080
+#define NOUVEAU_MEM_MAPPED 0x00000100
+#define NOUVEAU_MEM_TILE 0x00000200
+#define NOUVEAU_MEM_TILE_ZETA 0x00000400
+#define NOUVEAU_MEM_INSTANCE 0x01000000 /* internal */
+#define NOUVEAU_MEM_NOTIFIER 0x02000000 /* internal */
+#define NOUVEAU_MEM_NOVM 0x04000000 /* internal */
+#define NOUVEAU_MEM_USER 0x08000000 /* internal */
+#define NOUVEAU_MEM_INTERNAL (NOUVEAU_MEM_INSTANCE | \
+ NOUVEAU_MEM_NOTIFIER | \
+ NOUVEAU_MEM_NOVM | \
+ NOUVEAU_MEM_USER)
+
+struct drm_nouveau_mem_alloc {
+ int flags;
+ int alignment;
+ uint64_t size; // in bytes
+ uint64_t offset;
+ drm_handle_t map_handle;
+};
+
+struct drm_nouveau_mem_free {
+ uint64_t offset;
+ int flags;
+};
+
+struct drm_nouveau_mem_tile {
+ uint64_t offset;
+ uint64_t delta;
+ uint64_t size;
+ int flags;
+};
+
+/* FIXME : maybe unify {GET,SET}PARAMs */
+#define NOUVEAU_GETPARAM_PCI_VENDOR 3
+#define NOUVEAU_GETPARAM_PCI_DEVICE 4
+#define NOUVEAU_GETPARAM_BUS_TYPE 5
+#define NOUVEAU_GETPARAM_FB_PHYSICAL 6
+#define NOUVEAU_GETPARAM_AGP_PHYSICAL 7
+#define NOUVEAU_GETPARAM_FB_SIZE 8
+#define NOUVEAU_GETPARAM_AGP_SIZE 9
+#define NOUVEAU_GETPARAM_PCI_PHYSICAL 10
+#define NOUVEAU_GETPARAM_CHIPSET_ID 11
+struct drm_nouveau_getparam {
+ uint64_t param;
+ uint64_t value;
+};
+
+#define NOUVEAU_SETPARAM_CMDBUF_LOCATION 1
+#define NOUVEAU_SETPARAM_CMDBUF_SIZE 2
+struct drm_nouveau_setparam {
+ uint64_t param;
+ uint64_t value;
+};
+
+enum nouveau_card_type {
+ NV_UNKNOWN =0,
+ NV_04 =4,
+ NV_05 =5,
+ NV_10 =10,
+ NV_11 =11,
+ NV_17 =17,
+ NV_20 =20,
+ NV_30 =30,
+ NV_40 =40,
+ NV_44 =44,
+ NV_50 =50,
+ NV_LAST =0xffff,
+};
+
+enum nouveau_bus_type {
+ NV_AGP =0,
+ NV_PCI =1,
+ NV_PCIE =2,
+};
+
+#define NOUVEAU_MAX_SAREA_CLIPRECTS 16
+
+struct drm_nouveau_sarea {
+ /* the cliprects */
+ struct drm_clip_rect boxes[NOUVEAU_MAX_SAREA_CLIPRECTS];
+ unsigned int nbox;
+};
+
+#define DRM_NOUVEAU_CARD_INIT 0x00
+#define DRM_NOUVEAU_GETPARAM 0x01
+#define DRM_NOUVEAU_SETPARAM 0x02
+#define DRM_NOUVEAU_CHANNEL_ALLOC 0x03
+#define DRM_NOUVEAU_CHANNEL_FREE 0x04
+#define DRM_NOUVEAU_GROBJ_ALLOC 0x05
+#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x06
+#define DRM_NOUVEAU_GPUOBJ_FREE 0x07
+#define DRM_NOUVEAU_MEM_ALLOC 0x08
+#define DRM_NOUVEAU_MEM_FREE 0x09
+#define DRM_NOUVEAU_MEM_TILE 0x0a
+#define DRM_NOUVEAU_SUSPEND 0x0b
+#define DRM_NOUVEAU_RESUME 0x0c
+
+#endif /* __NOUVEAU_DRM_H__ */