2009-03-04 07:00:36 +00:00
|
|
|
From c4030c794274b22ba6ccb7c919900b41f5c723f2 Mon Sep 17 00:00:00 2001
|
|
|
|
From: Dave Airlie <airlied@redhat.com>
|
|
|
|
Date: Wed, 4 Mar 2009 16:51:14 +1000
|
|
|
|
Subject: [PATCH] radeon/r100/r200: import latest merge
|
|
|
|
|
|
|
|
---
|
|
|
|
src/mesa/drivers/dri/radeon/radeon_bo_drm.h | 182 ++++
|
|
|
|
src/mesa/drivers/dri/radeon/radeon_bo_legacy.c | 825 +++++++++++++++++
|
|
|
|
src/mesa/drivers/dri/radeon/radeon_bo_legacy.h | 47 +
|
|
|
|
src/mesa/drivers/dri/radeon/radeon_bocs_wrapper.h | 67 ++
|
|
|
|
src/mesa/drivers/dri/radeon/radeon_cmdbuf.h | 143 +++
|
|
|
|
src/mesa/drivers/dri/radeon/radeon_common.c | 849 +++++++++++++++++
|
|
|
|
src/mesa/drivers/dri/radeon/radeon_common.h | 55 ++
|
|
|
|
.../drivers/dri/radeon/radeon_common_context.c | 589 ++++++++++++
|
|
|
|
.../drivers/dri/radeon/radeon_common_context.h | 508 ++++++++++
|
|
|
|
src/mesa/drivers/dri/radeon/radeon_cs_drm.h | 207 +++++
|
|
|
|
src/mesa/drivers/dri/radeon/radeon_cs_legacy.c | 504 ++++++++++
|
|
|
|
src/mesa/drivers/dri/radeon/radeon_cs_legacy.h | 40 +
|
|
|
|
src/mesa/drivers/dri/radeon/radeon_dma.c | 323 +++++++
|
|
|
|
src/mesa/drivers/dri/radeon/radeon_dma.h | 51 +
|
|
|
|
src/mesa/drivers/dri/radeon/radeon_mipmap_tree.c | 360 ++++++++
|
|
|
|
src/mesa/drivers/dri/radeon/radeon_mipmap_tree.h | 97 ++
|
|
|
|
src/mesa/drivers/dri/radeon/radeon_texture.c | 966 ++++++++++++++++++++
|
|
|
|
src/mesa/drivers/dri/radeon/radeon_texture.h | 118 +++
|
|
|
|
18 files changed, 5931 insertions(+), 0 deletions(-)
|
|
|
|
create mode 100644 src/mesa/drivers/dri/radeon/radeon_bo_drm.h
|
|
|
|
create mode 100644 src/mesa/drivers/dri/radeon/radeon_bo_legacy.c
|
|
|
|
create mode 100644 src/mesa/drivers/dri/radeon/radeon_bo_legacy.h
|
|
|
|
create mode 100644 src/mesa/drivers/dri/radeon/radeon_bocs_wrapper.h
|
|
|
|
create mode 100644 src/mesa/drivers/dri/radeon/radeon_cmdbuf.h
|
|
|
|
create mode 100644 src/mesa/drivers/dri/radeon/radeon_common.c
|
|
|
|
create mode 100644 src/mesa/drivers/dri/radeon/radeon_common.h
|
|
|
|
create mode 100644 src/mesa/drivers/dri/radeon/radeon_common_context.c
|
|
|
|
create mode 100644 src/mesa/drivers/dri/radeon/radeon_common_context.h
|
|
|
|
create mode 100644 src/mesa/drivers/dri/radeon/radeon_cs_drm.h
|
|
|
|
create mode 100644 src/mesa/drivers/dri/radeon/radeon_cs_legacy.c
|
|
|
|
create mode 100644 src/mesa/drivers/dri/radeon/radeon_cs_legacy.h
|
|
|
|
create mode 100644 src/mesa/drivers/dri/radeon/radeon_dma.c
|
|
|
|
create mode 100644 src/mesa/drivers/dri/radeon/radeon_dma.h
|
|
|
|
create mode 100644 src/mesa/drivers/dri/radeon/radeon_mipmap_tree.c
|
|
|
|
create mode 100644 src/mesa/drivers/dri/radeon/radeon_mipmap_tree.h
|
|
|
|
create mode 100644 src/mesa/drivers/dri/radeon/radeon_texture.c
|
|
|
|
create mode 100644 src/mesa/drivers/dri/radeon/radeon_texture.h
|
|
|
|
|
2009-02-23 04:59:49 +00:00
|
|
|
diff --git a/src/mesa/drivers/dri/radeon/radeon_bo_drm.h b/src/mesa/drivers/dri/radeon/radeon_bo_drm.h
|
|
|
|
new file mode 100644
|
|
|
|
index 0000000..1ed13f1
|
|
|
|
--- /dev/null
|
|
|
|
+++ b/src/mesa/drivers/dri/radeon/radeon_bo_drm.h
|
|
|
|
@@ -0,0 +1,182 @@
|
|
|
|
+/*
|
|
|
|
+ * Copyright © 2008 Jérôme Glisse
|
|
|
|
+ * All Rights Reserved.
|
|
|
|
+ *
|
|
|
|
+ * Permission is hereby granted, free of charge, to any person obtaining
|
|
|
|
+ * a copy of this software and associated documentation files (the
|
|
|
|
+ * "Software"), to deal in the Software without restriction, including
|
|
|
|
+ * without limitation the rights to use, copy, modify, merge, publish,
|
|
|
|
+ * distribute, sub license, and/or sell copies of the Software, and to
|
|
|
|
+ * permit persons to whom the Software is furnished to do so, subject to
|
|
|
|
+ * the following conditions:
|
|
|
|
+ *
|
|
|
|
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
|
|
|
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
|
|
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
|
|
|
|
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
|
|
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
|
|
|
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
+ *
|
|
|
|
+ * The above copyright notice and this permission notice (including the
|
|
|
|
+ * next paragraph) shall be included in all copies or substantial portions
|
|
|
|
+ * of the Software.
|
|
|
|
+ */
|
|
|
|
+/*
|
|
|
|
+ * Authors:
|
|
|
|
+ * Jérôme Glisse <glisse@freedesktop.org>
|
|
|
|
+ */
|
|
|
|
+#ifndef RADEON_BO_H
|
|
|
|
+#define RADEON_BO_H
|
|
|
|
+
|
|
|
|
+#include <stdio.h>
|
|
|
|
+#include <stdint.h>
|
|
|
|
+//#include "radeon_track.h"
|
|
|
|
+
|
|
|
|
+/* bo object */
|
|
|
|
+#define RADEON_BO_FLAGS_MACRO_TILE 1
|
|
|
|
+#define RADEON_BO_FLAGS_MICRO_TILE 2
|
|
|
|
+
|
|
|
|
+struct radeon_bo_manager;
|
|
|
|
+
|
|
|
|
+struct radeon_bo {
|
|
|
|
+ uint32_t alignment;
|
|
|
|
+ uint32_t handle;
|
|
|
|
+ uint32_t size;
|
|
|
|
+ uint32_t domains;
|
|
|
|
+ uint32_t flags;
|
|
|
|
+ unsigned cref;
|
|
|
|
+#ifdef RADEON_BO_TRACK
|
|
|
|
+ struct radeon_track *track;
|
|
|
|
+#endif
|
|
|
|
+ void *ptr;
|
|
|
|
+ struct radeon_bo_manager *bom;
|
|
|
|
+ uint32_t space_accounted;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+/* bo functions */
|
|
|
|
+struct radeon_bo_funcs {
|
|
|
|
+ struct radeon_bo *(*bo_open)(struct radeon_bo_manager *bom,
|
|
|
|
+ uint32_t handle,
|
|
|
|
+ uint32_t size,
|
|
|
|
+ uint32_t alignment,
|
|
|
|
+ uint32_t domains,
|
|
|
|
+ uint32_t flags);
|
|
|
|
+ void (*bo_ref)(struct radeon_bo *bo);
|
|
|
|
+ struct radeon_bo *(*bo_unref)(struct radeon_bo *bo);
|
|
|
|
+ int (*bo_map)(struct radeon_bo *bo, int write);
|
|
|
|
+ int (*bo_unmap)(struct radeon_bo *bo);
|
|
|
|
+ int (*bo_wait)(struct radeon_bo *bo);
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+struct radeon_bo_manager {
|
|
|
|
+ struct radeon_bo_funcs *funcs;
|
|
|
|
+ int fd;
|
|
|
|
+
|
|
|
|
+#ifdef RADEON_BO_TRACK
|
|
|
|
+ struct radeon_tracker tracker;
|
|
|
|
+#endif
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static inline void _radeon_bo_debug(struct radeon_bo *bo,
|
|
|
|
+ const char *op,
|
|
|
|
+ const char *file,
|
|
|
|
+ const char *func,
|
|
|
|
+ int line)
|
|
|
|
+{
|
|
|
|
+ fprintf(stderr, "%s %p 0x%08X 0x%08X 0x%08X [%s %s %d]\n",
|
|
|
|
+ op, bo, bo->handle, bo->size, bo->cref, file, func, line);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline struct radeon_bo *_radeon_bo_open(struct radeon_bo_manager *bom,
|
|
|
|
+ uint32_t handle,
|
|
|
|
+ uint32_t size,
|
|
|
|
+ uint32_t alignment,
|
|
|
|
+ uint32_t domains,
|
|
|
|
+ uint32_t flags,
|
|
|
|
+ const char *file,
|
|
|
|
+ const char *func,
|
|
|
|
+ int line)
|
|
|
|
+{
|
|
|
|
+ struct radeon_bo *bo;
|
|
|
|
+
|
|
|
|
+ bo = bom->funcs->bo_open(bom, handle, size, alignment, domains, flags);
|
|
|
|
+#ifdef RADEON_BO_TRACK
|
|
|
|
+ if (bo) {
|
|
|
|
+ bo->track = radeon_tracker_add_track(&bom->tracker, bo->handle);
|
|
|
|
+ radeon_track_add_event(bo->track, file, func, "open", line);
|
|
|
|
+ }
|
|
|
|
+#endif
|
|
|
|
+ return bo;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void _radeon_bo_ref(struct radeon_bo *bo,
|
|
|
|
+ const char *file,
|
|
|
|
+ const char *func,
|
|
|
|
+ int line)
|
|
|
|
+{
|
|
|
|
+ bo->cref++;
|
|
|
|
+#ifdef RADEON_BO_TRACK
|
|
|
|
+ radeon_track_add_event(bo->track, file, func, "ref", line);
|
|
|
|
+#endif
|
|
|
|
+ bo->bom->funcs->bo_ref(bo);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline struct radeon_bo *_radeon_bo_unref(struct radeon_bo *bo,
|
|
|
|
+ const char *file,
|
|
|
|
+ const char *func,
|
|
|
|
+ int line)
|
|
|
|
+{
|
|
|
|
+ bo->cref--;
|
|
|
|
+#ifdef RADEON_BO_TRACK
|
|
|
|
+ radeon_track_add_event(bo->track, file, func, "unref", line);
|
|
|
|
+ if (bo->cref <= 0) {
|
|
|
|
+ radeon_tracker_remove_track(&bo->bom->tracker, bo->track);
|
|
|
|
+ bo->track = NULL;
|
|
|
|
+ }
|
|
|
|
+#endif
|
|
|
|
+ return bo->bom->funcs->bo_unref(bo);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline int _radeon_bo_map(struct radeon_bo *bo,
|
|
|
|
+ int write,
|
|
|
|
+ const char *file,
|
|
|
|
+ const char *func,
|
|
|
|
+ int line)
|
|
|
|
+{
|
|
|
|
+ return bo->bom->funcs->bo_map(bo, write);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline int _radeon_bo_unmap(struct radeon_bo *bo,
|
|
|
|
+ const char *file,
|
|
|
|
+ const char *func,
|
|
|
|
+ int line)
|
|
|
|
+{
|
|
|
|
+ return bo->bom->funcs->bo_unmap(bo);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline int _radeon_bo_wait(struct radeon_bo *bo,
|
|
|
|
+ const char *file,
|
|
|
|
+ const char *func,
|
|
|
|
+ int line)
|
|
|
|
+{
|
|
|
|
+ return bo->bom->funcs->bo_wait(bo);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+#define radeon_bo_open(bom, h, s, a, d, f)\
|
|
|
|
+ _radeon_bo_open(bom, h, s, a, d, f, __FILE__, __FUNCTION__, __LINE__)
|
|
|
|
+#define radeon_bo_ref(bo)\
|
|
|
|
+ _radeon_bo_ref(bo, __FILE__, __FUNCTION__, __LINE__)
|
|
|
|
+#define radeon_bo_unref(bo)\
|
|
|
|
+ _radeon_bo_unref(bo, __FILE__, __FUNCTION__, __LINE__)
|
|
|
|
+#define radeon_bo_map(bo, w)\
|
|
|
|
+ _radeon_bo_map(bo, w, __FILE__, __FUNCTION__, __LINE__)
|
|
|
|
+#define radeon_bo_unmap(bo)\
|
|
|
|
+ _radeon_bo_unmap(bo, __FILE__, __FUNCTION__, __LINE__)
|
|
|
|
+#define radeon_bo_debug(bo, opcode)\
|
|
|
|
+ _radeon_bo_debug(bo, opcode, __FILE__, __FUNCTION__, __LINE__)
|
|
|
|
+#define radeon_bo_wait(bo) \
|
|
|
|
+ _radeon_bo_wait(bo, __FILE__, __func__, __LINE__)
|
|
|
|
+
|
|
|
|
+#endif
|
|
|
|
diff --git a/src/mesa/drivers/dri/radeon/radeon_bo_legacy.c b/src/mesa/drivers/dri/radeon/radeon_bo_legacy.c
|
|
|
|
new file mode 100644
|
|
|
|
index 0000000..03a6299
|
|
|
|
--- /dev/null
|
|
|
|
+++ b/src/mesa/drivers/dri/radeon/radeon_bo_legacy.c
|
|
|
|
@@ -0,0 +1,825 @@
|
|
|
|
+/*
|
|
|
|
+ * Copyright © 2008 Nicolai Haehnle
|
|
|
|
+ * Copyright © 2008 Dave Airlie
|
|
|
|
+ * Copyright © 2008 Jérôme Glisse
|
|
|
|
+ * All Rights Reserved.
|
|
|
|
+ *
|
|
|
|
+ * Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
+ * copy of this software and associated documentation files (the
|
|
|
|
+ * "Software"), to deal in the Software without restriction, including
|
|
|
|
+ * without limitation the rights to use, copy, modify, merge, publish,
|
|
|
|
+ * distribute, sub license, and/or sell copies of the Software, and to
|
|
|
|
+ * permit persons to whom the Software is furnished to do so, subject to
|
|
|
|
+ * the following conditions:
|
|
|
|
+ *
|
|
|
|
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
|
|
|
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
|
|
|
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
|
|
|
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
+ *
|
|
|
|
+ * The above copyright notice and this permission notice (including the
|
|
|
|
+ * next paragraph) shall be included in all copies or substantial portions
|
|
|
|
+ * of the Software.
|
|
|
|
+ */
|
|
|
|
+/*
|
|
|
|
+ * Authors:
|
|
|
|
+ * Aapo Tahkola <aet@rasterburn.org>
|
|
|
|
+ * Nicolai Haehnle <prefect_@gmx.net>
|
|
|
|
+ * Dave Airlie
|
|
|
|
+ * Jérôme Glisse <glisse@freedesktop.org>
|
|
|
|
+ */
|
|
|
|
+#include <stdio.h>
|
|
|
|
+#include <stddef.h>
|
|
|
|
+#include <stdint.h>
|
|
|
|
+#include <stdlib.h>
|
|
|
|
+#include <string.h>
|
|
|
|
+#include <errno.h>
|
|
|
|
+#include <unistd.h>
|
|
|
|
+#include <sys/mman.h>
|
|
|
|
+#include <sys/ioctl.h>
|
|
|
|
+#include "xf86drm.h"
|
|
|
|
+#include "texmem.h"
|
|
|
|
+#include "main/simple_list.h"
|
|
|
|
+
|
|
|
|
+#include "drm.h"
|
|
|
|
+#include "radeon_drm.h"
|
|
|
|
+#include "radeon_common.h"
|
|
|
|
+#include "radeon_bocs_wrapper.h"
|
|
|
|
+
|
|
|
|
+/* no seriously texmem.c is this screwed up */
|
|
|
|
+struct bo_legacy_texture_object {
|
|
|
|
+ driTextureObject base;
|
|
|
|
+ struct bo_legacy *parent;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+struct bo_legacy {
|
|
|
|
+ struct radeon_bo base;
|
|
|
|
+ int map_count;
|
|
|
|
+ uint32_t pending;
|
|
|
|
+ int is_pending;
|
|
|
|
+ int static_bo;
|
|
|
|
+ uint32_t offset;
|
|
|
|
+ struct bo_legacy_texture_object *tobj;
|
|
|
|
+ int validated;
|
|
|
|
+ int dirty;
|
|
|
|
+ void *ptr;
|
|
|
|
+ struct bo_legacy *next, *prev;
|
|
|
|
+ struct bo_legacy *pnext, *pprev;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+struct bo_manager_legacy {
|
|
|
|
+ struct radeon_bo_manager base;
|
|
|
|
+ unsigned nhandle;
|
|
|
|
+ unsigned nfree_handles;
|
|
|
|
+ unsigned cfree_handles;
|
|
|
|
+ uint32_t current_age;
|
|
|
|
+ struct bo_legacy bos;
|
|
|
|
+ struct bo_legacy pending_bos;
|
|
|
|
+ uint32_t fb_location;
|
|
|
|
+ uint32_t texture_offset;
|
|
|
|
+ unsigned dma_alloc_size;
|
|
|
|
+ uint32_t dma_buf_count;
|
|
|
|
+ unsigned cpendings;
|
|
|
|
+ driTextureObject texture_swapped;
|
|
|
|
+ driTexHeap *texture_heap;
|
|
|
|
+ struct radeon_screen *screen;
|
|
|
|
+ unsigned *free_handles;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static void bo_legacy_tobj_destroy(void *data, driTextureObject *t)
|
|
|
|
+{
|
|
|
|
+ struct bo_legacy_texture_object *tobj = (struct bo_legacy_texture_object *)t;
|
|
|
|
+
|
|
|
|
+ if (tobj->parent) {
|
|
|
|
+ tobj->parent->tobj = NULL;
|
|
|
|
+ tobj->parent->validated = 0;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void inline clean_handles(struct bo_manager_legacy *bom)
|
|
|
|
+{
|
|
|
|
+ while (bom->cfree_handles > 0 &&
|
|
|
|
+ !bom->free_handles[bom->cfree_handles - 1])
|
|
|
|
+ bom->cfree_handles--;
|
|
|
|
+
|
|
|
|
+}
|
|
|
|
+static int legacy_new_handle(struct bo_manager_legacy *bom, uint32_t *handle)
|
|
|
|
+{
|
|
|
|
+ uint32_t tmp;
|
|
|
|
+
|
|
|
|
+ *handle = 0;
|
|
|
|
+ if (bom->nhandle == 0xFFFFFFFF) {
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+ if (bom->cfree_handles > 0) {
|
|
|
|
+ tmp = bom->free_handles[--bom->cfree_handles];
|
|
|
|
+ clean_handles(bom);
|
|
|
|
+ } else {
|
|
|
|
+ bom->cfree_handles = 0;
|
|
|
|
+ tmp = bom->nhandle++;
|
|
|
|
+ }
|
|
|
|
+ assert(tmp);
|
|
|
|
+ *handle = tmp;
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int legacy_free_handle(struct bo_manager_legacy *bom, uint32_t handle)
|
|
|
|
+{
|
|
|
|
+ uint32_t *handles;
|
|
|
|
+
|
|
|
|
+ if (!handle) {
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+ if (handle == (bom->nhandle - 1)) {
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ bom->nhandle--;
|
|
|
|
+ for (i = bom->cfree_handles - 1; i >= 0; i--) {
|
|
|
|
+ if (bom->free_handles[i] == (bom->nhandle - 1)) {
|
|
|
|
+ bom->nhandle--;
|
|
|
|
+ bom->free_handles[i] = 0;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ clean_handles(bom);
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+ if (bom->cfree_handles < bom->nfree_handles) {
|
|
|
|
+ bom->free_handles[bom->cfree_handles++] = handle;
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+ bom->nfree_handles += 0x100;
|
|
|
|
+ handles = (uint32_t*)realloc(bom->free_handles, bom->nfree_handles * 4);
|
|
|
|
+ if (handles == NULL) {
|
|
|
|
+ bom->nfree_handles -= 0x100;
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+ }
|
|
|
|
+ bom->free_handles = handles;
|
|
|
|
+ bom->free_handles[bom->cfree_handles++] = handle;
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void legacy_get_current_age(struct bo_manager_legacy *boml)
|
|
|
|
+{
|
|
|
|
+ drm_radeon_getparam_t gp;
|
|
|
|
+ int r;
|
|
|
|
+
|
|
|
|
+ if (IS_R300_CLASS(boml->screen)) {
|
|
|
|
+ gp.param = RADEON_PARAM_LAST_CLEAR;
|
|
|
|
+ gp.value = (int *)&boml->current_age;
|
|
|
|
+ r = drmCommandWriteRead(boml->base.fd, DRM_RADEON_GETPARAM,
|
|
|
|
+ &gp, sizeof(gp));
|
|
|
|
+ if (r) {
|
|
|
|
+ fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__, r);
|
|
|
|
+ exit(1);
|
|
|
|
+ }
|
|
|
|
+ } else
|
|
|
|
+ boml->current_age = boml->screen->scratch[3];
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int legacy_is_pending(struct radeon_bo *bo)
|
|
|
|
+{
|
|
|
|
+ struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
|
|
|
|
+ struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
|
|
|
|
+
|
|
|
|
+ if (bo_legacy->is_pending <= 0) {
|
|
|
|
+ bo_legacy->is_pending = 0;
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+ if (boml->current_age >= bo_legacy->pending) {
|
|
|
|
+ if (boml->pending_bos.pprev == bo_legacy) {
|
|
|
|
+ boml->pending_bos.pprev = bo_legacy->pprev;
|
|
|
|
+ }
|
|
|
|
+ bo_legacy->pprev->pnext = bo_legacy->pnext;
|
|
|
|
+ if (bo_legacy->pnext) {
|
|
|
|
+ bo_legacy->pnext->pprev = bo_legacy->pprev;
|
|
|
|
+ }
|
|
|
|
+ assert(bo_legacy->is_pending <= bo->cref);
|
|
|
|
+ while (bo_legacy->is_pending--) {
|
|
|
|
+ bo = radeon_bo_unref(bo);
|
|
|
|
+ if (!bo)
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+ if (bo)
|
|
|
|
+ bo_legacy->is_pending = 0;
|
|
|
|
+ boml->cpendings--;
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+ return 1;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int legacy_wait_pending(struct radeon_bo *bo)
|
|
|
|
+{
|
|
|
|
+ struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
|
|
|
|
+ struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
|
|
|
|
+
|
|
|
|
+ if (!bo_legacy->is_pending) {
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+ /* FIXME: lockup and userspace busy looping that's all the folks */
|
|
|
|
+ legacy_get_current_age(boml);
|
|
|
|
+ while (legacy_is_pending(bo)) {
|
|
|
|
+ usleep(10);
|
|
|
|
+ legacy_get_current_age(boml);
|
|
|
|
+ }
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void legacy_track_pending(struct bo_manager_legacy *boml, int debug)
|
|
|
|
+{
|
|
|
|
+ struct bo_legacy *bo_legacy;
|
|
|
|
+ struct bo_legacy *next;
|
|
|
|
+
|
|
|
|
+ legacy_get_current_age(boml);
|
|
|
|
+ bo_legacy = boml->pending_bos.pnext;
|
|
|
|
+ while (bo_legacy) {
|
|
|
|
+ if (debug)
|
|
|
|
+ fprintf(stderr,"pending %p %d %d %d\n", bo_legacy, bo_legacy->base.size,
|
|
|
|
+ boml->current_age, bo_legacy->pending);
|
|
|
|
+ next = bo_legacy->pnext;
|
|
|
|
+ if (legacy_is_pending(&(bo_legacy->base))) {
|
|
|
|
+ }
|
|
|
|
+ bo_legacy = next;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int legacy_wait_any_pending(struct bo_manager_legacy *boml)
|
|
|
|
+{
|
|
|
|
+ struct bo_legacy *bo_legacy;
|
|
|
|
+
|
|
|
|
+ legacy_get_current_age(boml);
|
|
|
|
+ bo_legacy = boml->pending_bos.pnext;
|
|
|
|
+ if (!bo_legacy)
|
|
|
|
+ return -1;
|
|
|
|
+ legacy_wait_pending(&bo_legacy->base);
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void legacy_kick_all_buffers(struct bo_manager_legacy *boml)
|
|
|
|
+{
|
|
|
|
+ struct bo_legacy *legacy;
|
|
|
|
+
|
|
|
|
+ legacy = boml->bos.next;
|
|
|
|
+ while (legacy != &boml->bos) {
|
|
|
|
+ if (legacy->tobj) {
|
|
|
|
+ if (legacy->validated) {
|
|
|
|
+ driDestroyTextureObject(&legacy->tobj->base);
|
|
|
|
+ legacy->tobj = 0;
|
|
|
|
+ legacy->validated = 0;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ legacy = legacy->next;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static struct bo_legacy *bo_allocate(struct bo_manager_legacy *boml,
|
|
|
|
+ uint32_t size,
|
|
|
|
+ uint32_t alignment,
|
|
|
|
+ uint32_t domains,
|
|
|
|
+ uint32_t flags)
|
|
|
|
+{
|
|
|
|
+ struct bo_legacy *bo_legacy;
|
|
|
|
+ static int pgsize;
|
|
|
|
+
|
|
|
|
+ if (pgsize == 0)
|
|
|
|
+ pgsize = getpagesize() - 1;
|
|
|
|
+
|
|
|
|
+ size = (size + pgsize) & ~pgsize;
|
|
|
|
+
|
|
|
|
+ bo_legacy = (struct bo_legacy*)calloc(1, sizeof(struct bo_legacy));
|
|
|
|
+ if (bo_legacy == NULL) {
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+ bo_legacy->base.bom = (struct radeon_bo_manager*)boml;
|
|
|
|
+ bo_legacy->base.handle = 0;
|
|
|
|
+ bo_legacy->base.size = size;
|
|
|
|
+ bo_legacy->base.alignment = alignment;
|
|
|
|
+ bo_legacy->base.domains = domains;
|
|
|
|
+ bo_legacy->base.flags = flags;
|
|
|
|
+ bo_legacy->base.ptr = NULL;
|
|
|
|
+ bo_legacy->map_count = 0;
|
|
|
|
+ bo_legacy->next = NULL;
|
|
|
|
+ bo_legacy->prev = NULL;
|
|
|
|
+ bo_legacy->pnext = NULL;
|
|
|
|
+ bo_legacy->pprev = NULL;
|
|
|
|
+ bo_legacy->next = boml->bos.next;
|
|
|
|
+ bo_legacy->prev = &boml->bos;
|
|
|
|
+ boml->bos.next = bo_legacy;
|
|
|
|
+ if (bo_legacy->next) {
|
|
|
|
+ bo_legacy->next->prev = bo_legacy;
|
|
|
|
+ }
|
|
|
|
+ return bo_legacy;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int bo_dma_alloc(struct radeon_bo *bo)
|
|
|
|
+{
|
|
|
|
+ struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
|
|
|
|
+ struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
|
|
|
|
+ drm_radeon_mem_alloc_t alloc;
|
|
|
|
+ unsigned size;
|
|
|
|
+ int base_offset;
|
|
|
|
+ int r;
|
|
|
|
+
|
|
|
|
+ /* align size on 4Kb */
|
|
|
|
+ size = (((4 * 1024) - 1) + bo->size) & ~((4 * 1024) - 1);
|
|
|
|
+ alloc.region = RADEON_MEM_REGION_GART;
|
|
|
|
+ alloc.alignment = bo_legacy->base.alignment;
|
|
|
|
+ alloc.size = size;
|
|
|
|
+ alloc.region_offset = &base_offset;
|
|
|
|
+ r = drmCommandWriteRead(bo->bom->fd,
|
|
|
|
+ DRM_RADEON_ALLOC,
|
|
|
|
+ &alloc,
|
|
|
|
+ sizeof(alloc));
|
|
|
|
+ if (r) {
|
|
|
|
+ /* ptr is set to NULL if dma allocation failed */
|
|
|
|
+ bo_legacy->ptr = NULL;
|
|
|
|
+ return r;
|
|
|
|
+ }
|
|
|
|
+ bo_legacy->ptr = boml->screen->gartTextures.map + base_offset;
|
|
|
|
+ bo_legacy->offset = boml->screen->gart_texture_offset + base_offset;
|
|
|
|
+ bo->size = size;
|
|
|
|
+ boml->dma_alloc_size += size;
|
|
|
|
+ boml->dma_buf_count++;
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int bo_dma_free(struct radeon_bo *bo)
|
|
|
|
+{
|
|
|
|
+ struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
|
|
|
|
+ struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
|
|
|
|
+ drm_radeon_mem_free_t memfree;
|
|
|
|
+ int r;
|
|
|
|
+
|
|
|
|
+ if (bo_legacy->ptr == NULL) {
|
|
|
|
+ /* ptr is set to NULL if dma allocation failed */
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+ legacy_get_current_age(boml);
|
|
|
|
+ memfree.region = RADEON_MEM_REGION_GART;
|
|
|
|
+ memfree.region_offset = bo_legacy->offset;
|
|
|
|
+ memfree.region_offset -= boml->screen->gart_texture_offset;
|
|
|
|
+ r = drmCommandWrite(boml->base.fd,
|
|
|
|
+ DRM_RADEON_FREE,
|
|
|
|
+ &memfree,
|
|
|
|
+ sizeof(memfree));
|
|
|
|
+ if (r) {
|
|
|
|
+ fprintf(stderr, "Failed to free bo[%p] at %08x\n",
|
|
|
|
+ &bo_legacy->base, memfree.region_offset);
|
|
|
|
+ fprintf(stderr, "ret = %s\n", strerror(-r));
|
|
|
|
+ return r;
|
|
|
|
+ }
|
|
|
|
+ boml->dma_alloc_size -= bo_legacy->base.size;
|
|
|
|
+ boml->dma_buf_count--;
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void bo_free(struct bo_legacy *bo_legacy)
|
|
|
|
+{
|
|
|
|
+ struct bo_manager_legacy *boml;
|
|
|
|
+
|
|
|
|
+ if (bo_legacy == NULL) {
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ boml = (struct bo_manager_legacy *)bo_legacy->base.bom;
|
|
|
|
+ bo_legacy->prev->next = bo_legacy->next;
|
|
|
|
+ if (bo_legacy->next) {
|
|
|
|
+ bo_legacy->next->prev = bo_legacy->prev;
|
|
|
|
+ }
|
|
|
|
+ if (!bo_legacy->static_bo) {
|
|
|
|
+ legacy_free_handle(boml, bo_legacy->base.handle);
|
|
|
|
+ if (bo_legacy->base.domains & RADEON_GEM_DOMAIN_GTT) {
|
|
|
|
+ /* dma buffers */
|
|
|
|
+ bo_dma_free(&bo_legacy->base);
|
|
|
|
+ } else {
|
|
|
|
+ driDestroyTextureObject(&bo_legacy->tobj->base);
|
|
|
|
+ bo_legacy->tobj = NULL;
|
|
|
|
+ /* free backing store */
|
|
|
|
+ free(bo_legacy->ptr);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ memset(bo_legacy, 0 , sizeof(struct bo_legacy));
|
|
|
|
+ free(bo_legacy);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static struct radeon_bo *bo_open(struct radeon_bo_manager *bom,
|
|
|
|
+ uint32_t handle,
|
|
|
|
+ uint32_t size,
|
|
|
|
+ uint32_t alignment,
|
|
|
|
+ uint32_t domains,
|
|
|
|
+ uint32_t flags)
|
|
|
|
+{
|
|
|
|
+ struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
|
|
|
|
+ struct bo_legacy *bo_legacy;
|
|
|
|
+ int r;
|
|
|
|
+
|
|
|
|
+ if (handle) {
|
|
|
|
+ bo_legacy = boml->bos.next;
|
|
|
|
+ while (bo_legacy) {
|
|
|
|
+ if (bo_legacy->base.handle == handle) {
|
|
|
|
+ radeon_bo_ref(&(bo_legacy->base));
|
|
|
|
+ return (struct radeon_bo*)bo_legacy;
|
|
|
|
+ }
|
|
|
|
+ bo_legacy = bo_legacy->next;
|
|
|
|
+ }
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ bo_legacy = bo_allocate(boml, size, alignment, domains, flags);
|
|
|
|
+ bo_legacy->static_bo = 0;
|
|
|
|
+ r = legacy_new_handle(boml, &bo_legacy->base.handle);
|
|
|
|
+ if (r) {
|
|
|
|
+ bo_free(bo_legacy);
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+ if (bo_legacy->base.domains & RADEON_GEM_DOMAIN_GTT) {
|
|
|
|
+ retry:
|
|
|
|
+ legacy_track_pending(boml, 0);
|
|
|
|
+ /* dma buffers */
|
|
|
|
+
|
|
|
|
+ r = bo_dma_alloc(&(bo_legacy->base));
|
|
|
|
+ if (r) {
|
|
|
|
+ if (legacy_wait_any_pending(boml) == -1) {
|
|
|
|
+ bo_free(bo_legacy);
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+ goto retry;
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+ } else {
|
|
|
|
+ bo_legacy->ptr = malloc(bo_legacy->base.size);
|
|
|
|
+ if (bo_legacy->ptr == NULL) {
|
|
|
|
+ bo_free(bo_legacy);
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ radeon_bo_ref(&(bo_legacy->base));
|
|
|
|
+ return (struct radeon_bo*)bo_legacy;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void bo_ref(struct radeon_bo *bo)
|
|
|
|
+{
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static struct radeon_bo *bo_unref(struct radeon_bo *bo)
|
|
|
|
+{
|
|
|
|
+ struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
|
|
|
|
+
|
|
|
|
+ if (bo->cref <= 0) {
|
|
|
|
+ bo_legacy->prev->next = bo_legacy->next;
|
|
|
|
+ if (bo_legacy->next) {
|
|
|
|
+ bo_legacy->next->prev = bo_legacy->prev;
|
|
|
|
+ }
|
|
|
|
+ if (!bo_legacy->is_pending) {
|
|
|
|
+ bo_free(bo_legacy);
|
|
|
|
+ }
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+ return bo;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int bo_map(struct radeon_bo *bo, int write)
|
|
|
|
+{
|
|
|
|
+ struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
|
|
|
|
+ struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
|
|
|
|
+
|
|
|
|
+ legacy_wait_pending(bo);
|
|
|
|
+ bo_legacy->validated = 0;
|
|
|
|
+ bo_legacy->dirty = 1;
|
|
|
|
+ bo_legacy->map_count++;
|
|
|
|
+ bo->ptr = bo_legacy->ptr;
|
|
|
|
+ /* Read the first pixel in the frame buffer. This should
|
|
|
|
+ * be a noop, right? In fact without this conform fails as reading
|
|
|
|
+ * from the framebuffer sometimes produces old results -- the
|
|
|
|
+ * on-card read cache gets mixed up and doesn't notice that the
|
|
|
|
+ * framebuffer has been updated.
|
|
|
|
+ *
|
|
|
|
+ * Note that we should probably be reading some otherwise unused
|
|
|
|
+ * region of VRAM, otherwise we might get incorrect results when
|
|
|
|
+ * reading pixels from the top left of the screen.
|
|
|
|
+ *
|
|
|
|
+ * I found this problem on an R420 with glean's texCube test.
|
|
|
|
+ * Note that the R200 span code also *writes* the first pixel in the
|
|
|
|
+ * framebuffer, but I've found this to be unnecessary.
|
|
|
|
+ * -- Nicolai Hähnle, June 2008
|
|
|
|
+ */
|
|
|
|
+ if (!(bo->domains & RADEON_GEM_DOMAIN_GTT)) {
|
|
|
|
+ int p;
|
|
|
|
+ volatile int *buf = (int*)boml->screen->driScreen->pFB;
|
|
|
|
+ p = *buf;
|
|
|
|
+ }
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int bo_unmap(struct radeon_bo *bo)
|
|
|
|
+{
|
|
|
|
+ struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
|
|
|
|
+
|
|
|
|
+ if (--bo_legacy->map_count > 0) {
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+ bo->ptr = NULL;
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static struct radeon_bo_funcs bo_legacy_funcs = {
|
|
|
|
+ bo_open,
|
|
|
|
+ bo_ref,
|
|
|
|
+ bo_unref,
|
|
|
|
+ bo_map,
|
|
|
|
+ bo_unmap
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static int bo_vram_validate(struct radeon_bo *bo,
|
|
|
|
+ uint32_t *soffset,
|
|
|
|
+ uint32_t *eoffset)
|
|
|
|
+{
|
|
|
|
+ struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
|
|
|
|
+ struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
|
|
|
|
+ int r;
|
|
|
|
+ int retry_count = 0, pending_retry = 0;
|
|
|
|
+
|
|
|
|
+ if (!bo_legacy->tobj) {
|
|
|
|
+ bo_legacy->tobj = CALLOC(sizeof(struct bo_legacy_texture_object));
|
|
|
|
+ bo_legacy->tobj->parent = bo_legacy;
|
|
|
|
+ make_empty_list(&bo_legacy->tobj->base);
|
|
|
|
+ bo_legacy->tobj->base.totalSize = bo->size;
|
|
|
|
+ retry:
|
|
|
|
+ r = driAllocateTexture(&boml->texture_heap, 1,
|
|
|
|
+ &bo_legacy->tobj->base);
|
|
|
|
+ if (r) {
|
|
|
|
+ pending_retry = 0;
|
|
|
|
+ while(boml->cpendings && pending_retry++ < 10000) {
|
|
|
|
+ legacy_track_pending(boml, 0);
|
|
|
|
+ retry_count++;
|
|
|
|
+ if (retry_count > 2) {
|
|
|
|
+ free(bo_legacy->tobj);
|
|
|
|
+ bo_legacy->tobj = NULL;
|
|
|
|
+ fprintf(stderr, "Ouch! vram_validate failed %d\n", r);
|
|
|
|
+ return -1;
|
|
|
|
+ }
|
|
|
|
+ goto retry;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ bo_legacy->offset = boml->texture_offset +
|
|
|
|
+ bo_legacy->tobj->base.memBlock->ofs;
|
|
|
|
+ bo_legacy->dirty = 1;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ assert(bo_legacy->tobj->base.memBlock);
|
|
|
|
+
|
|
|
|
+ if (bo_legacy->tobj)
|
|
|
|
+ driUpdateTextureLRU(&bo_legacy->tobj->base);
|
|
|
|
+
|
|
|
|
+ if (bo_legacy->dirty || bo_legacy->tobj->base.dirty_images[0]) {
|
|
|
|
+ /* Copy to VRAM using a blit.
|
|
|
|
+ * All memory is 4K aligned. We're using 1024 pixels wide blits.
|
|
|
|
+ */
|
|
|
|
+ drm_radeon_texture_t tex;
|
|
|
|
+ drm_radeon_tex_image_t tmp;
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ tex.offset = bo_legacy->offset;
|
|
|
|
+ tex.image = &tmp;
|
|
|
|
+ assert(!(tex.offset & 1023));
|
|
|
|
+
|
|
|
|
+ tmp.x = 0;
|
|
|
|
+ tmp.y = 0;
|
|
|
|
+ if (bo->size < 4096) {
|
|
|
|
+ tmp.width = (bo->size + 3) / 4;
|
|
|
|
+ tmp.height = 1;
|
|
|
|
+ } else {
|
|
|
|
+ tmp.width = 1024;
|
|
|
|
+ tmp.height = (bo->size + 4095) / 4096;
|
|
|
|
+ }
|
|
|
|
+ tmp.data = bo_legacy->ptr;
|
|
|
|
+ tex.format = RADEON_TXFORMAT_ARGB8888;
|
|
|
|
+ tex.width = tmp.width;
|
|
|
|
+ tex.height = tmp.height;
|
|
|
|
+ tex.pitch = MAX2(tmp.width / 16, 1);
|
|
|
|
+ do {
|
|
|
|
+ ret = drmCommandWriteRead(bo->bom->fd,
|
|
|
|
+ DRM_RADEON_TEXTURE,
|
|
|
|
+ &tex,
|
|
|
|
+ sizeof(drm_radeon_texture_t));
|
|
|
|
+ if (ret) {
|
|
|
|
+ if (RADEON_DEBUG & DEBUG_IOCTL)
|
|
|
|
+ fprintf(stderr, "DRM_RADEON_TEXTURE: again!\n");
|
|
|
|
+ usleep(1);
|
|
|
|
+ }
|
|
|
|
+ } while (ret == -EAGAIN);
|
|
|
|
+ bo_legacy->dirty = 0;
|
|
|
|
+ bo_legacy->tobj->base.dirty_images[0] = 0;
|
|
|
|
+ }
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * radeon_bo_legacy_validate -
|
|
|
|
+ * returns:
|
|
|
|
+ * 0 - all good
|
|
|
|
+ * -EINVAL - mapped buffer can't be validated
|
|
|
|
+ * -EAGAIN - restart validation we've kicked all the buffers out
|
|
|
|
+ */
|
|
|
|
+int radeon_bo_legacy_validate(struct radeon_bo *bo,
|
|
|
|
+ uint32_t *soffset,
|
|
|
|
+ uint32_t *eoffset)
|
|
|
|
+{
|
|
|
|
+ struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
|
|
|
|
+ struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
|
|
|
|
+ int r;
|
|
|
|
+ int retries = 0;
|
|
|
|
+
|
|
|
|
+ if (bo_legacy->map_count) {
|
|
|
|
+ fprintf(stderr, "bo(%p, %d) is mapped (%d) can't valide it.\n",
|
|
|
|
+ bo, bo->size, bo_legacy->map_count);
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+ if (bo_legacy->static_bo || bo_legacy->validated) {
|
|
|
|
+ *soffset = bo_legacy->offset;
|
|
|
|
+ *eoffset = bo_legacy->offset + bo->size;
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+ if (!(bo->domains & RADEON_GEM_DOMAIN_GTT)) {
|
|
|
|
+
|
|
|
|
+ r = bo_vram_validate(bo, soffset, eoffset);
|
|
|
|
+ if (r) {
|
|
|
|
+ legacy_track_pending(boml, 0);
|
|
|
|
+ legacy_kick_all_buffers(boml);
|
|
|
|
+ retries++;
|
|
|
|
+ if (retries == 2) {
|
|
|
|
+ fprintf(stderr,"legacy bo: failed to get relocations into aperture\n");
|
|
|
|
+ assert(0);
|
|
|
|
+ exit(-1);
|
|
|
|
+ }
|
|
|
|
+ return -EAGAIN;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ *soffset = bo_legacy->offset;
|
|
|
|
+ *eoffset = bo_legacy->offset + bo->size;
|
|
|
|
+ bo_legacy->validated = 1;
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void radeon_bo_legacy_pending(struct radeon_bo *bo, uint32_t pending)
|
|
|
|
+{
|
|
|
|
+ struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
|
|
|
|
+ struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
|
|
|
|
+
|
|
|
|
+ bo_legacy->pending = pending;
|
|
|
|
+ bo_legacy->is_pending++;
|
|
|
|
+ /* add to pending list */
|
|
|
|
+ radeon_bo_ref(bo);
|
|
|
|
+ if (bo_legacy->is_pending > 1) {
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ bo_legacy->pprev = boml->pending_bos.pprev;
|
|
|
|
+ bo_legacy->pnext = NULL;
|
|
|
|
+ bo_legacy->pprev->pnext = bo_legacy;
|
|
|
|
+ boml->pending_bos.pprev = bo_legacy;
|
|
|
|
+ boml->cpendings++;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void radeon_bo_manager_legacy_dtor(struct radeon_bo_manager *bom)
|
|
|
|
+{
|
|
|
|
+ struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
|
|
|
|
+ struct bo_legacy *bo_legacy;
|
|
|
|
+
|
|
|
|
+ if (bom == NULL) {
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ bo_legacy = boml->bos.next;
|
|
|
|
+ while (bo_legacy) {
|
|
|
|
+ struct bo_legacy *next;
|
|
|
|
+
|
|
|
|
+ next = bo_legacy->next;
|
|
|
|
+ bo_free(bo_legacy);
|
|
|
|
+ bo_legacy = next;
|
|
|
|
+ }
|
|
|
|
+ driDestroyTextureHeap(boml->texture_heap);
|
|
|
|
+ free(boml->free_handles);
|
|
|
|
+ free(boml);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static struct bo_legacy *radeon_legacy_bo_alloc_static(struct bo_manager_legacy *bom,
|
|
|
|
+ int size, uint32_t offset)
|
|
|
|
+{
|
|
|
|
+ struct bo_legacy *bo;
|
|
|
|
+
|
|
|
|
+ bo = bo_allocate(bom, size, 0, RADEON_GEM_DOMAIN_VRAM, 0);
|
|
|
|
+ if (bo == NULL)
|
|
|
|
+ return NULL;
|
|
|
|
+ bo->static_bo = 1;
|
|
|
|
+ bo->offset = offset + bom->fb_location;
|
|
|
|
+ bo->base.handle = bo->offset;
|
|
|
|
+ bo->ptr = bom->screen->driScreen->pFB + offset;
|
|
|
|
+ if (bo->base.handle > bom->nhandle) {
|
|
|
|
+ bom->nhandle = bo->base.handle + 1;
|
|
|
|
+ }
|
|
|
|
+ radeon_bo_ref(&(bo->base));
|
|
|
|
+ return bo;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+struct radeon_bo_manager *radeon_bo_manager_legacy_ctor(struct radeon_screen *scrn)
|
|
|
|
+{
|
|
|
|
+ struct bo_manager_legacy *bom;
|
|
|
|
+ struct bo_legacy *bo;
|
|
|
|
+ unsigned size;
|
|
|
|
+
|
|
|
|
+ bom = (struct bo_manager_legacy*)
|
|
|
|
+ calloc(1, sizeof(struct bo_manager_legacy));
|
|
|
|
+ if (bom == NULL) {
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ make_empty_list(&bom->texture_swapped);
|
|
|
|
+
|
|
|
|
+ bom->texture_heap = driCreateTextureHeap(0,
|
|
|
|
+ bom,
|
|
|
|
+ scrn->texSize[0],
|
|
|
|
+ 12,
|
|
|
|
+ RADEON_NR_TEX_REGIONS,
|
|
|
|
+ (drmTextureRegionPtr)scrn->sarea->tex_list[0],
|
|
|
|
+ &scrn->sarea->tex_age[0],
|
|
|
|
+ &bom->texture_swapped,
|
|
|
|
+ sizeof(struct bo_legacy_texture_object),
|
|
|
|
+ &bo_legacy_tobj_destroy);
|
|
|
|
+ bom->texture_offset = scrn->texOffset[0];
|
|
|
|
+
|
|
|
|
+ bom->base.funcs = &bo_legacy_funcs;
|
|
|
|
+ bom->base.fd = scrn->driScreen->fd;
|
|
|
|
+ bom->bos.next = NULL;
|
|
|
|
+ bom->bos.prev = NULL;
|
|
|
|
+ bom->pending_bos.pprev = &bom->pending_bos;
|
|
|
|
+ bom->pending_bos.pnext = NULL;
|
|
|
|
+ bom->screen = scrn;
|
|
|
|
+ bom->fb_location = scrn->fbLocation;
|
|
|
|
+ bom->nhandle = 1;
|
|
|
|
+ bom->cfree_handles = 0;
|
|
|
|
+ bom->nfree_handles = 0x400;
|
|
|
|
+ bom->free_handles = (uint32_t*)malloc(bom->nfree_handles * 4);
|
|
|
|
+ if (bom->free_handles == NULL) {
|
|
|
|
+ radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* biggest framebuffer size */
|
|
|
|
+ size = 4096*4096*4;
|
|
|
|
+
|
|
|
|
+ /* allocate front */
|
|
|
|
+ bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->frontOffset);
|
|
|
|
+ if (!bo) {
|
|
|
|
+ radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+ if (scrn->sarea->tiling_enabled) {
|
|
|
|
+ bo->base.flags = RADEON_BO_FLAGS_MACRO_TILE;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* allocate back */
|
|
|
|
+ bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->backOffset);
|
|
|
|
+ if (!bo) {
|
|
|
|
+ radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+ if (scrn->sarea->tiling_enabled) {
|
|
|
|
+ bo->base.flags = RADEON_BO_FLAGS_MACRO_TILE;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* allocate depth */
|
|
|
|
+ bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->depthOffset);
|
|
|
|
+ if (!bo) {
|
|
|
|
+ radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+ bo->base.flags = 0;
|
|
|
|
+ if (scrn->sarea->tiling_enabled) {
|
|
|
|
+ bo->base.flags |= RADEON_BO_FLAGS_MACRO_TILE;
|
|
|
|
+ bo->base.flags |= RADEON_BO_FLAGS_MICRO_TILE;
|
|
|
|
+ }
|
|
|
|
+ return (struct radeon_bo_manager*)bom;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void radeon_bo_legacy_texture_age(struct radeon_bo_manager *bom)
|
|
|
|
+{
|
|
|
|
+ struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
|
|
|
|
+ DRI_AGE_TEXTURES(boml->texture_heap);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+unsigned radeon_bo_legacy_relocs_size(struct radeon_bo *bo)
|
|
|
|
+{
|
|
|
|
+ struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
|
|
|
|
+
|
|
|
|
+ if (bo_legacy->static_bo || (bo->domains & RADEON_GEM_DOMAIN_GTT)) {
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+ return bo->size;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+int radeon_legacy_bo_is_static(struct radeon_bo *bo)
|
|
|
|
+{
|
|
|
|
+ struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
|
|
|
|
+ return bo_legacy->static_bo;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
diff --git a/src/mesa/drivers/dri/radeon/radeon_bo_legacy.h b/src/mesa/drivers/dri/radeon/radeon_bo_legacy.h
|
|
|
|
new file mode 100644
|
|
|
|
index 0000000..9187cd7
|
|
|
|
--- /dev/null
|
|
|
|
+++ b/src/mesa/drivers/dri/radeon/radeon_bo_legacy.h
|
|
|
|
@@ -0,0 +1,47 @@
|
|
|
|
+/*
|
|
|
|
+ * Copyright © 2008 Nicolai Haehnle
|
|
|
|
+ * Copyright © 2008 Jérôme Glisse
|
|
|
|
+ * All Rights Reserved.
|
|
|
|
+ *
|
|
|
|
+ * Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
+ * copy of this software and associated documentation files (the
|
|
|
|
+ * "Software"), to deal in the Software without restriction, including
|
|
|
|
+ * without limitation the rights to use, copy, modify, merge, publish,
|
|
|
|
+ * distribute, sub license, and/or sell copies of the Software, and to
|
|
|
|
+ * permit persons to whom the Software is furnished to do so, subject to
|
|
|
|
+ * the following conditions:
|
|
|
|
+ *
|
|
|
|
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
|
|
|
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
|
|
|
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
|
|
|
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
+ *
|
|
|
|
+ * The above copyright notice and this permission notice (including the
|
|
|
|
+ * next paragraph) shall be included in all copies or substantial portions
|
|
|
|
+ * of the Software.
|
|
|
|
+ */
|
|
|
|
+/*
|
|
|
|
+ * Authors:
|
|
|
|
+ * Aapo Tahkola <aet@rasterburn.org>
|
|
|
|
+ * Nicolai Haehnle <prefect_@gmx.net>
|
|
|
|
+ * Jérôme Glisse <glisse@freedesktop.org>
|
|
|
|
+ */
|
|
|
|
+#ifndef RADEON_BO_LEGACY_H
|
|
|
|
+#define RADEON_BO_LEGACY_H
|
|
|
|
+
|
|
|
|
+#include "radeon_screen.h"
|
|
|
|
+
|
|
|
|
+void radeon_bo_legacy_pending(struct radeon_bo *bo, uint32_t pending);
|
|
|
|
+int radeon_bo_legacy_validate(struct radeon_bo *bo,
|
|
|
|
+ uint32_t *soffset,
|
|
|
|
+ uint32_t *eoffset);
|
|
|
|
+struct radeon_bo_manager *radeon_bo_manager_legacy_ctor(struct radeon_screen *scrn);
|
|
|
|
+void radeon_bo_manager_legacy_dtor(struct radeon_bo_manager *bom);
|
|
|
|
+void radeon_bo_legacy_texture_age(struct radeon_bo_manager *bom);
|
|
|
|
+unsigned radeon_bo_legacy_relocs_size(struct radeon_bo *bo);
|
|
|
|
+
|
|
|
|
+int radeon_legacy_bo_is_static(struct radeon_bo *bo);
|
|
|
|
+#endif
|
|
|
|
diff --git a/src/mesa/drivers/dri/radeon/radeon_bocs_wrapper.h b/src/mesa/drivers/dri/radeon/radeon_bocs_wrapper.h
|
|
|
|
new file mode 100644
|
|
|
|
index 0000000..f80f0f7
|
|
|
|
--- /dev/null
|
|
|
|
+++ b/src/mesa/drivers/dri/radeon/radeon_bocs_wrapper.h
|
|
|
|
@@ -0,0 +1,67 @@
|
|
|
|
+#ifndef RADEON_CS_WRAPPER_H
|
|
|
|
+#define RADEON_CS_WRAPPER_H
|
|
|
|
+
|
|
|
|
+#ifndef RADEON_PARAM_DEVICE_ID
|
|
|
|
+#define RADEON_PARAM_DEVICE_ID 17
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+#ifdef HAVE_LIBDRM_RADEON
|
|
|
|
+
|
|
|
|
+#include "radeon_bo.h"
|
|
|
|
+#include "radeon_bo_gem.h"
|
|
|
|
+#include "radeon_cs.h"
|
|
|
|
+#include "radeon_cs_gem.h"
|
|
|
|
+
|
|
|
|
+#else
|
|
|
|
+#include <stdint.h>
|
|
|
|
+
|
|
|
|
+#define RADEON_GEM_DOMAIN_CPU 0x1 // Cached CPU domain
|
|
|
|
+#define RADEON_GEM_DOMAIN_GTT 0x2 // GTT or cache flushed
|
|
|
|
+#define RADEON_GEM_DOMAIN_VRAM 0x4 // VRAM domain
|
|
|
|
+
|
|
|
|
+/* to be used to build locally in mesa with no libdrm bits */
|
|
|
|
+#include "../radeon/radeon_bo_drm.h"
|
|
|
|
+#include "../radeon/radeon_cs_drm.h"
|
|
|
|
+
|
|
|
|
+#ifndef DRM_RADEON_GEM_INFO
|
|
|
|
+#define DRM_RADEON_GEM_INFO 0x1c
|
|
|
|
+
|
|
|
|
+struct drm_radeon_gem_info {
|
|
|
|
+ uint64_t gart_start;
|
|
|
|
+ uint64_t gart_size;
|
|
|
|
+ uint64_t vram_start;
|
|
|
|
+ uint64_t vram_size;
|
|
|
|
+ uint64_t vram_visible;
|
|
|
|
+};
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+static inline void *radeon_bo_manager_gem_ctor(int fd)
|
|
|
|
+{
|
|
|
|
+ return NULL;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void radeon_bo_manager_gem_dtor(void *dummy)
|
|
|
|
+{
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void *radeon_cs_manager_gem_ctor(int fd)
|
|
|
|
+{
|
|
|
|
+ return NULL;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void radeon_cs_manager_gem_dtor(void *dummy)
|
|
|
|
+{
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void radeon_tracker_print(void *ptr, int io)
|
|
|
|
+{
|
|
|
|
+}
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+#include "radeon_bo_legacy.h"
|
|
|
|
+#include "radeon_cs_legacy.h"
|
|
|
|
+
|
|
|
|
+#endif
|
|
|
|
diff --git a/src/mesa/drivers/dri/radeon/radeon_cmdbuf.h b/src/mesa/drivers/dri/radeon/radeon_cmdbuf.h
|
|
|
|
new file mode 100644
|
|
|
|
index 0000000..4b5116c
|
|
|
|
--- /dev/null
|
|
|
|
+++ b/src/mesa/drivers/dri/radeon/radeon_cmdbuf.h
|
|
|
|
@@ -0,0 +1,143 @@
|
|
|
|
+#ifndef COMMON_CMDBUF_H
|
|
|
|
+#define COMMON_CMDBUF_H
|
|
|
|
+
|
|
|
|
+#include "radeon_bocs_wrapper.h"
|
|
|
|
+
|
|
|
|
+void rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller);
|
|
|
|
+int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller);
|
|
|
|
+int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller);
|
|
|
|
+void rcommonInitCmdBuf(radeonContextPtr rmesa);
|
|
|
|
+void rcommonDestroyCmdBuf(radeonContextPtr rmesa);
|
|
|
|
+
|
|
|
|
+void rcommonBeginBatch(radeonContextPtr rmesa,
|
|
|
|
+ int n,
|
|
|
|
+ int dostate,
|
|
|
|
+ const char *file,
|
|
|
|
+ const char *function,
|
|
|
|
+ int line);
|
|
|
|
+
|
|
|
|
+#define RADEON_CP_PACKET3_NOP 0xC0001000
|
|
|
|
+#define RADEON_CP_PACKET3_NEXT_CHAR 0xC0001900
|
|
|
|
+#define RADEON_CP_PACKET3_PLY_NEXTSCAN 0xC0001D00
|
|
|
|
+#define RADEON_CP_PACKET3_SET_SCISSORS 0xC0001E00
|
|
|
|
+#define RADEON_CP_PACKET3_3D_RNDR_GEN_INDX_PRIM 0xC0002300
|
|
|
|
+#define RADEON_CP_PACKET3_LOAD_MICROCODE 0xC0002400
|
|
|
|
+#define RADEON_CP_PACKET3_WAIT_FOR_IDLE 0xC0002600
|
|
|
|
+#define RADEON_CP_PACKET3_3D_DRAW_VBUF 0xC0002800
|
|
|
|
+#define RADEON_CP_PACKET3_3D_DRAW_IMMD 0xC0002900
|
|
|
|
+#define RADEON_CP_PACKET3_3D_DRAW_INDX 0xC0002A00
|
|
|
|
+#define RADEON_CP_PACKET3_LOAD_PALETTE 0xC0002C00
|
|
|
|
+#define RADEON_CP_PACKET3_3D_LOAD_VBPNTR 0xC0002F00
|
|
|
|
+#define RADEON_CP_PACKET3_CNTL_PAINT 0xC0009100
|
|
|
|
+#define RADEON_CP_PACKET3_CNTL_BITBLT 0xC0009200
|
|
|
|
+#define RADEON_CP_PACKET3_CNTL_SMALLTEXT 0xC0009300
|
|
|
|
+#define RADEON_CP_PACKET3_CNTL_HOSTDATA_BLT 0xC0009400
|
|
|
|
+#define RADEON_CP_PACKET3_CNTL_POLYLINE 0xC0009500
|
|
|
|
+#define RADEON_CP_PACKET3_CNTL_POLYSCANLINES 0xC0009800
|
|
|
|
+#define RADEON_CP_PACKET3_CNTL_PAINT_MULTI 0xC0009A00
|
|
|
|
+#define RADEON_CP_PACKET3_CNTL_BITBLT_MULTI 0xC0009B00
|
|
|
|
+#define RADEON_CP_PACKET3_CNTL_TRANS_BITBLT 0xC0009C00
|
|
|
|
+
|
|
|
|
+#define CP_PACKET2 (2 << 30)
|
|
|
|
+#define CP_PACKET0(reg, n) (RADEON_CP_PACKET0 | ((n)<<16) | ((reg)>>2))
|
|
|
|
+#define CP_PACKET0_ONE(reg, n) (RADEON_CP_PACKET0 | RADEON_CP_PACKET0_ONE_REG_WR | ((n)<<16) | ((reg)>>2))
|
|
|
|
+#define CP_PACKET3( pkt, n ) \
|
|
|
|
+ (RADEON_CP_PACKET3 | (pkt) | ((n) << 16))
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * Every function writing to the command buffer needs to declare this
|
|
|
|
+ * to get the necessary local variables.
|
|
|
|
+ */
|
|
|
|
+#define BATCH_LOCALS(rmesa) \
|
|
|
|
+ const radeonContextPtr b_l_rmesa = rmesa
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * Prepare writing n dwords to the command buffer,
|
|
|
|
+ * including producing any necessary state emits on buffer wraparound.
|
|
|
|
+ */
|
|
|
|
+#define BEGIN_BATCH(n) rcommonBeginBatch(b_l_rmesa, n, 1, __FILE__, __FUNCTION__, __LINE__)
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * Same as BEGIN_BATCH, but do not cause automatic state emits.
|
|
|
|
+ */
|
|
|
|
+#define BEGIN_BATCH_NO_AUTOSTATE(n) rcommonBeginBatch(b_l_rmesa, n, 0, __FILE__, __FUNCTION__, __LINE__)
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * Write one dword to the command buffer.
|
|
|
|
+ */
|
|
|
|
+#define OUT_BATCH(data) \
|
|
|
|
+ do { \
|
|
|
|
+ radeon_cs_write_dword(b_l_rmesa->cmdbuf.cs, data);\
|
|
|
|
+ } while(0)
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * Write a relocated dword to the command buffer.
|
|
|
|
+ */
|
|
|
|
+#define OUT_BATCH_RELOC(data, bo, offset, rd, wd, flags) \
|
|
|
|
+ do { \
|
|
|
|
+ if (0 && offset) { \
|
|
|
|
+ fprintf(stderr, "(%s:%s:%d) offset : %d\n", \
|
|
|
|
+ __FILE__, __FUNCTION__, __LINE__, offset); \
|
|
|
|
+ } \
|
|
|
|
+ radeon_cs_write_dword(b_l_rmesa->cmdbuf.cs, offset); \
|
|
|
|
+ radeon_cs_write_reloc(b_l_rmesa->cmdbuf.cs, \
|
|
|
|
+ bo, rd, wd, flags); \
|
|
|
|
+ if (!b_l_rmesa->radeonScreen->kernel_mm) \
|
|
|
|
+ b_l_rmesa->cmdbuf.cs->section_cdw += 2; \
|
|
|
|
+ } while(0)
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * Write n dwords from ptr to the command buffer.
|
|
|
|
+ */
|
|
|
|
+#define OUT_BATCH_TABLE(ptr,n) \
|
|
|
|
+ do { \
|
|
|
|
+ int _i; \
|
|
|
|
+ for (_i=0; _i < n; _i++) {\
|
|
|
|
+ radeon_cs_write_dword(b_l_rmesa->cmdbuf.cs, ptr[_i]);\
|
|
|
|
+ }\
|
|
|
|
+ } while(0)
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * Finish writing dwords to the command buffer.
|
|
|
|
+ * The number of (direct or indirect) OUT_BATCH calls between the previous
|
|
|
|
+ * BEGIN_BATCH and END_BATCH must match the number specified at BEGIN_BATCH time.
|
|
|
|
+ */
|
|
|
|
+#define END_BATCH() \
|
|
|
|
+ do { \
|
|
|
|
+ radeon_cs_end(b_l_rmesa->cmdbuf.cs, __FILE__, __FUNCTION__, __LINE__);\
|
|
|
|
+ } while(0)
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * After the last END_BATCH() of rendering, this indicates that flushing
|
|
|
|
+ * the command buffer now is okay.
|
|
|
|
+ */
|
|
|
|
+#define COMMIT_BATCH() \
|
|
|
|
+ do { \
|
|
|
|
+ } while(0)
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+/** Single register write to command buffer; requires 2 dwords. */
|
|
|
|
+#define OUT_BATCH_REGVAL(reg, val) \
|
|
|
|
+ OUT_BATCH(cmdpacket0(b_l_rmesa->radeonScreen, (reg), 1)); \
|
|
|
|
+ OUT_BATCH((val))
|
|
|
|
+
|
|
|
|
+/** Continuous register range write to command buffer; requires 1 dword,
|
|
|
|
+ * expects count dwords afterwards for register contents. */
|
|
|
|
+#define OUT_BATCH_REGSEQ(reg, count) \
|
|
|
|
+ OUT_BATCH(cmdpacket0(b_l_rmesa->radeonScreen, (reg), (count)));
|
|
|
|
+
|
|
|
|
+/** Write a 32 bit float to the ring; requires 1 dword. */
|
|
|
|
+#define OUT_BATCH_FLOAT32(f) \
|
|
|
|
+ OUT_BATCH(radeonPackFloat32((f)));
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+/* Fire the buffered vertices no matter what.
|
|
|
|
+ */
|
|
|
|
+static INLINE void radeon_firevertices(radeonContextPtr radeon)
|
|
|
|
+{
|
|
|
|
+ if (radeon->cmdbuf.cs->cdw || radeon->dma.flush )
|
|
|
|
+ radeonFlush(radeon->glCtx);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+#endif
|
|
|
|
diff --git a/src/mesa/drivers/dri/radeon/radeon_common.c b/src/mesa/drivers/dri/radeon/radeon_common.c
|
|
|
|
new file mode 100644
|
|
|
|
index 0000000..f7c0d7d
|
|
|
|
--- /dev/null
|
|
|
|
+++ b/src/mesa/drivers/dri/radeon/radeon_common.c
|
|
|
|
@@ -0,0 +1,849 @@
|
|
|
|
+/**************************************************************************
|
|
|
|
+
|
|
|
|
+Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
|
|
|
|
+
|
|
|
|
+The Weather Channel (TM) funded Tungsten Graphics to develop the
|
|
|
|
+initial release of the Radeon 8500 driver under the XFree86 license.
|
|
|
|
+This notice must be preserved.
|
|
|
|
+
|
|
|
|
+Permission is hereby granted, free of charge, to any person obtaining
|
|
|
|
+a copy of this software and associated documentation files (the
|
|
|
|
+"Software"), to deal in the Software without restriction, including
|
|
|
|
+without limitation the rights to use, copy, modify, merge, publish,
|
|
|
|
+distribute, sublicense, and/or sell copies of the Software, and to
|
|
|
|
+permit persons to whom the Software is furnished to do so, subject to
|
|
|
|
+the following conditions:
|
|
|
|
+
|
|
|
|
+The above copyright notice and this permission notice (including the
|
|
|
|
+next paragraph) shall be included in all copies or substantial
|
|
|
|
+portions of the Software.
|
|
|
|
+
|
|
|
|
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
|
|
|
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
|
|
|
|
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
|
|
|
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
|
|
|
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
+
|
|
|
|
+**************************************************************************/
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * Authors:
|
|
|
|
+ * Keith Whitwell <keith@tungstengraphics.com>
|
|
|
|
+ */
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ - Scissor implementation
|
|
|
|
+ - buffer swap/copy ioctls
|
|
|
|
+ - finish/flush
|
|
|
|
+ - state emission
|
|
|
|
+ - cmdbuffer management
|
|
|
|
+*/
|
|
|
|
+
|
|
|
|
+#include <errno.h>
|
|
|
|
+#include "main/glheader.h"
|
|
|
|
+#include "main/imports.h"
|
|
|
|
+#include "main/context.h"
|
|
|
|
+#include "main/api_arrayelt.h"
|
|
|
|
+#include "main/enums.h"
|
|
|
|
+#include "main/colormac.h"
|
|
|
|
+#include "main/light.h"
|
|
|
|
+#include "main/framebuffer.h"
|
|
|
|
+#include "main/simple_list.h"
|
|
|
|
+
|
|
|
|
+#include "swrast/swrast.h"
|
|
|
|
+#include "vbo/vbo.h"
|
|
|
|
+#include "tnl/tnl.h"
|
|
|
|
+#include "tnl/t_pipeline.h"
|
|
|
|
+#include "swrast_setup/swrast_setup.h"
|
|
|
|
+
|
|
|
|
+#include "dri_util.h"
|
|
|
|
+#include "vblank.h"
|
|
|
|
+
|
|
|
|
+#include "radeon_common.h"
|
|
|
|
+#include "radeon_bocs_wrapper.h"
|
|
|
|
+#include "radeon_lock.h"
|
|
|
|
+#include "radeon_drm.h"
|
|
|
|
+#include "radeon_mipmap_tree.h"
|
|
|
|
+
|
|
|
|
+#define DEBUG_CMDBUF 0
|
|
|
|
+
|
|
|
|
+/* =============================================================
|
|
|
|
+ * Scissoring
|
|
|
|
+ */
|
|
|
|
+
|
|
|
|
+static GLboolean intersect_rect(drm_clip_rect_t * out,
|
|
|
|
+ drm_clip_rect_t * a, drm_clip_rect_t * b)
|
|
|
|
+{
|
|
|
|
+ *out = *a;
|
|
|
|
+ if (b->x1 > out->x1)
|
|
|
|
+ out->x1 = b->x1;
|
|
|
|
+ if (b->y1 > out->y1)
|
|
|
|
+ out->y1 = b->y1;
|
|
|
|
+ if (b->x2 < out->x2)
|
|
|
|
+ out->x2 = b->x2;
|
|
|
|
+ if (b->y2 < out->y2)
|
|
|
|
+ out->y2 = b->y2;
|
|
|
|
+ if (out->x1 >= out->x2)
|
|
|
|
+ return GL_FALSE;
|
|
|
|
+ if (out->y1 >= out->y2)
|
|
|
|
+ return GL_FALSE;
|
|
|
|
+ return GL_TRUE;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void radeonRecalcScissorRects(radeonContextPtr radeon)
|
|
|
|
+{
|
|
|
|
+ drm_clip_rect_t *out;
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ /* Grow cliprect store?
|
|
|
|
+ */
|
|
|
|
+ if (radeon->state.scissor.numAllocedClipRects < radeon->numClipRects) {
|
|
|
|
+ while (radeon->state.scissor.numAllocedClipRects <
|
|
|
|
+ radeon->numClipRects) {
|
|
|
|
+ radeon->state.scissor.numAllocedClipRects += 1; /* zero case */
|
|
|
|
+ radeon->state.scissor.numAllocedClipRects *= 2;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (radeon->state.scissor.pClipRects)
|
|
|
|
+ FREE(radeon->state.scissor.pClipRects);
|
|
|
|
+
|
|
|
|
+ radeon->state.scissor.pClipRects =
|
|
|
|
+ MALLOC(radeon->state.scissor.numAllocedClipRects *
|
|
|
|
+ sizeof(drm_clip_rect_t));
|
|
|
|
+
|
|
|
|
+ if (radeon->state.scissor.pClipRects == NULL) {
|
|
|
|
+ radeon->state.scissor.numAllocedClipRects = 0;
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ out = radeon->state.scissor.pClipRects;
|
|
|
|
+ radeon->state.scissor.numClipRects = 0;
|
|
|
|
+
|
|
|
|
+ for (i = 0; i < radeon->numClipRects; i++) {
|
|
|
|
+ if (intersect_rect(out,
|
|
|
|
+ &radeon->pClipRects[i],
|
|
|
|
+ &radeon->state.scissor.rect)) {
|
|
|
|
+ radeon->state.scissor.numClipRects++;
|
|
|
|
+ out++;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * Update cliprects and scissors.
|
|
|
|
+ */
|
|
|
|
+void radeonSetCliprects(radeonContextPtr radeon)
|
|
|
|
+{
|
|
|
|
+ __DRIdrawablePrivate *const drawable = radeon->dri.drawable;
|
|
|
|
+ __DRIdrawablePrivate *const readable = radeon->dri.readable;
|
|
|
|
+ GLframebuffer *const draw_fb = (GLframebuffer*)drawable->driverPrivate;
|
|
|
|
+ GLframebuffer *const read_fb = (GLframebuffer*)readable->driverPrivate;
|
|
|
|
+
|
|
|
|
+ if (!radeon->radeonScreen->driScreen->dri2.enabled) {
|
|
|
|
+ if (draw_fb->_ColorDrawBufferIndexes[0] == BUFFER_BACK_LEFT) {
|
|
|
|
+ /* Can't ignore 2d windows if we are page flipping. */
|
|
|
|
+ if (drawable->numBackClipRects == 0 || radeon->doPageFlip ||
|
|
|
|
+ radeon->sarea->pfCurrentPage == 1) {
|
|
|
|
+ radeon->numClipRects = drawable->numClipRects;
|
|
|
|
+ radeon->pClipRects = drawable->pClipRects;
|
|
|
|
+ } else {
|
|
|
|
+ radeon->numClipRects = drawable->numBackClipRects;
|
|
|
|
+ radeon->pClipRects = drawable->pBackClipRects;
|
|
|
|
+ }
|
|
|
|
+ } else {
|
|
|
|
+ /* front buffer (or none, or multiple buffers */
|
|
|
|
+ radeon->numClipRects = drawable->numClipRects;
|
|
|
|
+ radeon->pClipRects = drawable->pClipRects;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if ((draw_fb->Width != drawable->w) ||
|
|
|
|
+ (draw_fb->Height != drawable->h)) {
|
|
|
|
+ _mesa_resize_framebuffer(radeon->glCtx, draw_fb,
|
|
|
|
+ drawable->w, drawable->h);
|
|
|
|
+ draw_fb->Initialized = GL_TRUE;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (drawable != readable) {
|
|
|
|
+ if ((read_fb->Width != readable->w) ||
|
|
|
|
+ (read_fb->Height != readable->h)) {
|
|
|
|
+ _mesa_resize_framebuffer(radeon->glCtx, read_fb,
|
|
|
|
+ readable->w, readable->h);
|
|
|
|
+ read_fb->Initialized = GL_TRUE;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (radeon->state.scissor.enabled)
|
|
|
|
+ radeonRecalcScissorRects(radeon);
|
|
|
|
+
|
|
|
|
+ radeon->lastStamp = drawable->lastStamp;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void radeonUpdateScissor( GLcontext *ctx )
|
|
|
|
+{
|
|
|
|
+ radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
|
|
|
|
+
|
|
|
|
+ if ( rmesa->dri.drawable ) {
|
|
|
|
+ __DRIdrawablePrivate *dPriv = rmesa->dri.drawable;
|
|
|
|
+
|
|
|
|
+ int x = ctx->Scissor.X;
|
|
|
|
+ int y = dPriv->h - ctx->Scissor.Y - ctx->Scissor.Height;
|
|
|
|
+ int w = ctx->Scissor.X + ctx->Scissor.Width - 1;
|
|
|
|
+ int h = dPriv->h - ctx->Scissor.Y - 1;
|
|
|
|
+
|
|
|
|
+ rmesa->state.scissor.rect.x1 = x + dPriv->x;
|
|
|
|
+ rmesa->state.scissor.rect.y1 = y + dPriv->y;
|
|
|
|
+ rmesa->state.scissor.rect.x2 = w + dPriv->x + 1;
|
|
|
|
+ rmesa->state.scissor.rect.y2 = h + dPriv->y + 1;
|
|
|
|
+
|
|
|
|
+ radeonRecalcScissorRects( rmesa );
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* =============================================================
|
|
|
|
+ * Scissoring
|
|
|
|
+ */
|
|
|
|
+
|
|
|
|
+void radeonScissor(GLcontext* ctx, GLint x, GLint y, GLsizei w, GLsizei h)
|
|
|
|
+{
|
|
|
|
+ radeonContextPtr radeon = RADEON_CONTEXT(ctx);
|
|
|
|
+ if (ctx->Scissor.Enabled) {
|
|
|
|
+ /* We don't pipeline cliprect changes */
|
|
|
|
+ radeon_firevertices(radeon);
|
|
|
|
+ radeonUpdateScissor(ctx);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+/* ================================================================
|
|
|
|
+ * SwapBuffers with client-side throttling
|
|
|
|
+ */
|
|
|
|
+
|
|
|
|
+static uint32_t radeonGetLastFrame(radeonContextPtr radeon)
|
|
|
|
+{
|
|
|
|
+ drm_radeon_getparam_t gp;
|
|
|
|
+ int ret;
|
|
|
|
+ uint32_t frame;
|
|
|
|
+
|
|
|
|
+ gp.param = RADEON_PARAM_LAST_FRAME;
|
|
|
|
+ gp.value = (int *)&frame;
|
|
|
|
+ ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
|
|
|
|
+ &gp, sizeof(gp));
|
|
|
|
+ if (ret) {
|
|
|
|
+ fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
|
|
|
|
+ ret);
|
|
|
|
+ exit(1);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return frame;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+uint32_t radeonGetAge(radeonContextPtr radeon)
|
|
|
|
+{
|
|
|
|
+ drm_radeon_getparam_t gp;
|
|
|
|
+ int ret;
|
|
|
|
+ uint32_t age;
|
|
|
|
+
|
|
|
|
+ gp.param = RADEON_PARAM_LAST_CLEAR;
|
|
|
|
+ gp.value = (int *)&age;
|
|
|
|
+ ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
|
|
|
|
+ &gp, sizeof(gp));
|
|
|
|
+ if (ret) {
|
|
|
|
+ fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
|
|
|
|
+ ret);
|
|
|
|
+ exit(1);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return age;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void radeonEmitIrqLocked(radeonContextPtr radeon)
|
|
|
|
+{
|
|
|
|
+ drm_radeon_irq_emit_t ie;
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ ie.irq_seq = &radeon->iw.irq_seq;
|
|
|
|
+ ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_IRQ_EMIT,
|
|
|
|
+ &ie, sizeof(ie));
|
|
|
|
+ if (ret) {
|
|
|
|
+ fprintf(stderr, "%s: drmRadeonIrqEmit: %d\n", __FUNCTION__,
|
|
|
|
+ ret);
|
|
|
|
+ exit(1);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void radeonWaitIrq(radeonContextPtr radeon)
|
|
|
|
+{
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ do {
|
|
|
|
+ ret = drmCommandWrite(radeon->dri.fd, DRM_RADEON_IRQ_WAIT,
|
|
|
|
+ &radeon->iw, sizeof(radeon->iw));
|
|
|
|
+ } while (ret && (errno == EINTR || errno == EBUSY));
|
|
|
|
+
|
|
|
|
+ if (ret) {
|
|
|
|
+ fprintf(stderr, "%s: drmRadeonIrqWait: %d\n", __FUNCTION__,
|
|
|
|
+ ret);
|
|
|
|
+ exit(1);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void radeonWaitForFrameCompletion(radeonContextPtr radeon)
|
|
|
|
+{
|
|
|
|
+ drm_radeon_sarea_t *sarea = radeon->sarea;
|
|
|
|
+
|
|
|
|
+ if (radeon->do_irqs) {
|
|
|
|
+ if (radeonGetLastFrame(radeon) < sarea->last_frame) {
|
|
|
|
+ if (!radeon->irqsEmitted) {
|
|
|
|
+ while (radeonGetLastFrame(radeon) <
|
|
|
|
+ sarea->last_frame) ;
|
|
|
|
+ } else {
|
|
|
|
+ UNLOCK_HARDWARE(radeon);
|
|
|
|
+ radeonWaitIrq(radeon);
|
|
|
|
+ LOCK_HARDWARE(radeon);
|
|
|
|
+ }
|
|
|
|
+ radeon->irqsEmitted = 10;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (radeon->irqsEmitted) {
|
|
|
|
+ radeonEmitIrqLocked(radeon);
|
|
|
|
+ radeon->irqsEmitted--;
|
|
|
|
+ }
|
|
|
|
+ } else {
|
|
|
|
+ while (radeonGetLastFrame(radeon) < sarea->last_frame) {
|
|
|
|
+ UNLOCK_HARDWARE(radeon);
|
|
|
|
+ if (radeon->do_usleeps)
|
|
|
|
+ DO_USLEEP(1);
|
|
|
|
+ LOCK_HARDWARE(radeon);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* wait for idle */
|
|
|
|
+void radeonWaitForIdleLocked(radeonContextPtr radeon)
|
|
|
|
+{
|
|
|
|
+ int ret;
|
|
|
|
+ int i = 0;
|
|
|
|
+
|
|
|
|
+ do {
|
|
|
|
+ ret = drmCommandNone(radeon->dri.fd, DRM_RADEON_CP_IDLE);
|
|
|
|
+ if (ret)
|
|
|
|
+ DO_USLEEP(1);
|
|
|
|
+ } while (ret && ++i < 100);
|
|
|
|
+
|
|
|
|
+ if (ret < 0) {
|
|
|
|
+ UNLOCK_HARDWARE(radeon);
|
|
|
|
+ fprintf(stderr, "Error: R300 timed out... exiting\n");
|
|
|
|
+ exit(-1);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void radeonWaitForIdle(radeonContextPtr radeon)
|
|
|
|
+{
|
|
|
|
+ LOCK_HARDWARE(radeon);
|
|
|
|
+ radeonWaitForIdleLocked(radeon);
|
|
|
|
+ UNLOCK_HARDWARE(radeon);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+/* Copy the back color buffer to the front color buffer.
|
|
|
|
+ */
|
|
|
|
+void radeonCopyBuffer( __DRIdrawablePrivate *dPriv,
|
|
|
|
+ const drm_clip_rect_t *rect)
|
|
|
|
+{
|
|
|
|
+ radeonContextPtr rmesa;
|
|
|
|
+ GLint nbox, i, ret;
|
|
|
|
+ GLboolean missed_target;
|
|
|
|
+ int64_t ust;
|
|
|
|
+ __DRIscreenPrivate *psp;
|
|
|
|
+
|
|
|
|
+ assert(dPriv);
|
|
|
|
+ assert(dPriv->driContextPriv);
|
|
|
|
+ assert(dPriv->driContextPriv->driverPrivate);
|
|
|
|
+
|
|
|
|
+ rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
|
|
|
|
+
|
|
|
|
+ if ( RADEON_DEBUG & DEBUG_IOCTL ) {
|
|
|
|
+ fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ radeon_firevertices(rmesa);
|
|
|
|
+ LOCK_HARDWARE( rmesa );
|
|
|
|
+
|
|
|
|
+ /* Throttle the frame rate -- only allow one pending swap buffers
|
|
|
|
+ * request at a time.
|
|
|
|
+ */
|
|
|
|
+ radeonWaitForFrameCompletion( rmesa );
|
|
|
|
+ if (!rect)
|
|
|
|
+ {
|
|
|
|
+ UNLOCK_HARDWARE( rmesa );
|
|
|
|
+ driWaitForVBlank( dPriv, & missed_target );
|
|
|
|
+ LOCK_HARDWARE( rmesa );
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ nbox = dPriv->numClipRects; /* must be in locked region */
|
|
|
|
+
|
|
|
|
+ for ( i = 0 ; i < nbox ; ) {
|
|
|
|
+ GLint nr = MIN2( i + RADEON_NR_SAREA_CLIPRECTS , nbox );
|
|
|
|
+ drm_clip_rect_t *box = dPriv->pClipRects;
|
|
|
|
+ drm_clip_rect_t *b = rmesa->sarea->boxes;
|
|
|
|
+ GLint n = 0;
|
|
|
|
+
|
|
|
|
+ for ( ; i < nr ; i++ ) {
|
|
|
|
+
|
|
|
|
+ *b = box[i];
|
|
|
|
+
|
|
|
|
+ if (rect)
|
|
|
|
+ {
|
|
|
|
+ if (rect->x1 > b->x1)
|
|
|
|
+ b->x1 = rect->x1;
|
|
|
|
+ if (rect->y1 > b->y1)
|
|
|
|
+ b->y1 = rect->y1;
|
|
|
|
+ if (rect->x2 < b->x2)
|
|
|
|
+ b->x2 = rect->x2;
|
|
|
|
+ if (rect->y2 < b->y2)
|
|
|
|
+ b->y2 = rect->y2;
|
|
|
|
+
|
|
|
|
+ if (b->x1 >= b->x2 || b->y1 >= b->y2)
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ b++;
|
|
|
|
+ n++;
|
|
|
|
+ }
|
|
|
|
+ rmesa->sarea->nbox = n;
|
|
|
|
+
|
|
|
|
+ if (!n)
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_SWAP );
|
|
|
|
+
|
|
|
|
+ if ( ret ) {
|
|
|
|
+ fprintf( stderr, "DRM_RADEON_SWAP_BUFFERS: return = %d\n", ret );
|
|
|
|
+ UNLOCK_HARDWARE( rmesa );
|
|
|
|
+ exit( 1 );
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ UNLOCK_HARDWARE( rmesa );
|
|
|
|
+ if (!rect)
|
|
|
|
+ {
|
|
|
|
+ psp = dPriv->driScreenPriv;
|
|
|
|
+ rmesa->swap_count++;
|
|
|
|
+ (*psp->systemTime->getUST)( & ust );
|
|
|
|
+ if ( missed_target ) {
|
|
|
|
+ rmesa->swap_missed_count++;
|
|
|
|
+ rmesa->swap_missed_ust = ust - rmesa->swap_ust;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ rmesa->swap_ust = ust;
|
|
|
|
+ rmesa->hw.all_dirty = GL_TRUE;
|
|
|
|
+
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void radeonPageFlip( __DRIdrawablePrivate *dPriv )
|
|
|
|
+{
|
|
|
|
+ radeonContextPtr rmesa;
|
|
|
|
+ GLint ret;
|
|
|
|
+ GLboolean missed_target;
|
|
|
|
+ __DRIscreenPrivate *psp;
|
|
|
|
+ struct radeon_renderbuffer *rrb;
|
|
|
|
+ GLframebuffer *fb = dPriv->driverPrivate;
|
|
|
|
+
|
|
|
|
+ assert(dPriv);
|
|
|
|
+ assert(dPriv->driContextPriv);
|
|
|
|
+ assert(dPriv->driContextPriv->driverPrivate);
|
|
|
|
+
|
|
|
|
+ rmesa = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
|
|
|
|
+ rrb = (void *)fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
|
|
|
|
+
|
|
|
|
+ psp = dPriv->driScreenPriv;
|
|
|
|
+
|
|
|
|
+ if ( RADEON_DEBUG & DEBUG_IOCTL ) {
|
|
|
|
+ fprintf(stderr, "%s: pfCurrentPage: %d\n", __FUNCTION__,
|
|
|
|
+ rmesa->sarea->pfCurrentPage);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ radeon_firevertices(rmesa);
|
|
|
|
+
|
|
|
|
+ LOCK_HARDWARE( rmesa );
|
|
|
|
+
|
|
|
|
+ if (!dPriv->numClipRects) {
|
|
|
|
+ UNLOCK_HARDWARE(rmesa);
|
|
|
|
+ usleep(10000); /* throttle invisible client 10ms */
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ drm_clip_rect_t *box = dPriv->pClipRects;
|
|
|
|
+ drm_clip_rect_t *b = rmesa->sarea->boxes;
|
|
|
|
+ b[0] = box[0];
|
|
|
|
+ rmesa->sarea->nbox = 1;
|
|
|
|
+
|
|
|
|
+ /* Throttle the frame rate -- only allow a few pending swap buffers
|
|
|
|
+ * request at a time.
|
|
|
|
+ */
|
|
|
|
+ radeonWaitForFrameCompletion( rmesa );
|
|
|
|
+ UNLOCK_HARDWARE( rmesa );
|
|
|
|
+ driWaitForVBlank( dPriv, & missed_target );
|
|
|
|
+ if ( missed_target ) {
|
|
|
|
+ rmesa->swap_missed_count++;
|
|
|
|
+ (void) (*psp->systemTime->getUST)( & rmesa->swap_missed_ust );
|
|
|
|
+ }
|
|
|
|
+ LOCK_HARDWARE( rmesa );
|
|
|
|
+
|
|
|
|
+ ret = drmCommandNone( rmesa->dri.fd, DRM_RADEON_FLIP );
|
|
|
|
+
|
|
|
|
+ UNLOCK_HARDWARE( rmesa );
|
|
|
|
+
|
|
|
|
+ if ( ret ) {
|
|
|
|
+ fprintf( stderr, "DRM_RADEON_FLIP: return = %d\n", ret );
|
|
|
|
+ exit( 1 );
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ rmesa->swap_count++;
|
|
|
|
+ (void) (*psp->systemTime->getUST)( & rmesa->swap_ust );
|
|
|
|
+
|
|
|
|
+ /* Get ready for drawing next frame. Update the renderbuffers'
|
|
|
|
+ * flippedOffset/Pitch fields so we draw into the right place.
|
|
|
|
+ */
|
|
|
|
+ // driFlipRenderbuffers(rmesa->glCtx->WinSysDrawBuffer,
|
|
|
|
+ // rmesa->sarea->pfCurrentPage);
|
|
|
|
+
|
|
|
|
+ rmesa->state.color.rrb = rrb;
|
|
|
|
+
|
|
|
|
+ if (rmesa->vtbl.update_draw_buffer)
|
|
|
|
+ rmesa->vtbl.update_draw_buffer(rmesa->glCtx);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * Swap front and back buffer.
|
|
|
|
+ */
|
|
|
|
+void radeonSwapBuffers(__DRIdrawablePrivate * dPriv)
|
|
|
|
+{
|
|
|
|
+ if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
|
|
|
|
+ radeonContextPtr radeon;
|
|
|
|
+ GLcontext *ctx;
|
|
|
|
+
|
|
|
|
+ radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
|
|
|
|
+ ctx = radeon->glCtx;
|
|
|
|
+
|
|
|
|
+ if (ctx->Visual.doubleBufferMode) {
|
|
|
|
+ _mesa_notifySwapBuffers(ctx);/* flush pending rendering comands */
|
|
|
|
+ if (radeon->doPageFlip) {
|
|
|
|
+ radeonPageFlip(dPriv);
|
|
|
|
+ } else {
|
|
|
|
+ radeonCopyBuffer(dPriv, NULL);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ } else {
|
|
|
|
+ /* XXX this shouldn't be an error but we can't handle it for now */
|
|
|
|
+ _mesa_problem(NULL, "%s: drawable has no context!",
|
|
|
|
+ __FUNCTION__);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void radeonCopySubBuffer(__DRIdrawablePrivate * dPriv,
|
|
|
|
+ int x, int y, int w, int h )
|
|
|
|
+{
|
|
|
|
+ if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
|
|
|
|
+ radeonContextPtr radeon;
|
|
|
|
+ GLcontext *ctx;
|
|
|
|
+
|
|
|
|
+ radeon = (radeonContextPtr) dPriv->driContextPriv->driverPrivate;
|
|
|
|
+ ctx = radeon->glCtx;
|
|
|
|
+
|
|
|
|
+ if (ctx->Visual.doubleBufferMode) {
|
|
|
|
+ drm_clip_rect_t rect;
|
|
|
|
+ rect.x1 = x + dPriv->x;
|
|
|
|
+ rect.y1 = (dPriv->h - y - h) + dPriv->y;
|
|
|
|
+ rect.x2 = rect.x1 + w;
|
|
|
|
+ rect.y2 = rect.y1 + h;
|
|
|
|
+ _mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
|
|
|
|
+ radeonCopyBuffer(dPriv, &rect);
|
|
|
|
+ }
|
|
|
|
+ } else {
|
|
|
|
+ /* XXX this shouldn't be an error but we can't handle it for now */
|
|
|
|
+ _mesa_problem(NULL, "%s: drawable has no context!",
|
|
|
|
+ __FUNCTION__);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state )
|
|
|
|
+{
|
|
|
|
+ int i;
|
|
|
|
+ int dwords = (*state->check)(radeon->glCtx, state);
|
|
|
|
+
|
|
|
|
+ fprintf(stderr, "emit %s %d/%d\n", state->name, state->cmd_size, dwords);
|
|
|
|
+
|
|
|
|
+ if (RADEON_DEBUG & DEBUG_VERBOSE)
|
|
|
|
+ for (i = 0 ; i < dwords; i++)
|
|
|
|
+ fprintf(stderr, "\t%s[%d]: %x\n", state->name, i, state->cmd[i]);
|
|
|
|
+
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean dirty)
|
|
|
|
+{
|
|
|
|
+ BATCH_LOCALS(radeon);
|
|
|
|
+ struct radeon_state_atom *atom;
|
|
|
|
+ int dwords;
|
|
|
|
+
|
|
|
|
+ if (radeon->vtbl.pre_emit_atoms)
|
|
|
|
+ radeon->vtbl.pre_emit_atoms(radeon);
|
|
|
|
+
|
|
|
|
+ /* Emit actual atoms */
|
|
|
|
+ foreach(atom, &radeon->hw.atomlist) {
|
|
|
|
+ if ((atom->dirty || radeon->hw.all_dirty) == dirty) {
|
|
|
|
+ dwords = (*atom->check) (radeon->glCtx, atom);
|
|
|
|
+ if (dwords) {
|
|
|
|
+ if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
|
|
|
|
+ radeon_print_state_atom(radeon, atom);
|
|
|
|
+ }
|
|
|
|
+ if (atom->emit) {
|
|
|
|
+ (*atom->emit)(radeon->glCtx, atom);
|
|
|
|
+ } else {
|
|
|
|
+ BEGIN_BATCH_NO_AUTOSTATE(dwords);
|
|
|
|
+ OUT_BATCH_TABLE(atom->cmd, dwords);
|
|
|
|
+ END_BATCH();
|
|
|
|
+ }
|
|
|
|
+ atom->dirty = GL_FALSE;
|
|
|
|
+ } else {
|
|
|
|
+ if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
|
|
|
|
+ fprintf(stderr, " skip state %s\n",
|
|
|
|
+ atom->name);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ COMMIT_BATCH();
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void radeonEmitState(radeonContextPtr radeon)
|
|
|
|
+{
|
|
|
|
+ if (RADEON_DEBUG & (DEBUG_STATE|DEBUG_PRIMS))
|
|
|
|
+ fprintf(stderr, "%s\n", __FUNCTION__);
|
|
|
|
+
|
|
|
|
+ if (radeon->vtbl.pre_emit_state)
|
|
|
|
+ radeon->vtbl.pre_emit_state(radeon);
|
|
|
|
+
|
|
|
|
+ /* this code used to return here but now it emits zbs */
|
|
|
|
+ if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ /* To avoid going across the entire set of states multiple times, just check
|
|
|
|
+ * for enough space for the case of emitting all state, and inline the
|
|
|
|
+ * radeonAllocCmdBuf code here without all the checks.
|
|
|
|
+ */
|
|
|
|
+ rcommonEnsureCmdBufSpace(radeon, radeon->hw.max_state_size, __FUNCTION__);
|
|
|
|
+
|
|
|
|
+ if (!radeon->cmdbuf.cs->cdw) {
|
|
|
|
+ if (RADEON_DEBUG & DEBUG_STATE)
|
|
|
|
+ fprintf(stderr, "Begin reemit state\n");
|
|
|
|
+
|
|
|
|
+ radeonEmitAtoms(radeon, GL_FALSE);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (RADEON_DEBUG & DEBUG_STATE)
|
|
|
|
+ fprintf(stderr, "Begin dirty state\n");
|
|
|
|
+
|
|
|
|
+ radeonEmitAtoms(radeon, GL_TRUE);
|
|
|
|
+ radeon->hw.is_dirty = GL_FALSE;
|
|
|
|
+ radeon->hw.all_dirty = GL_FALSE;
|
|
|
|
+
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+void radeonFlush(GLcontext *ctx)
|
|
|
|
+{
|
|
|
|
+ radeonContextPtr radeon = RADEON_CONTEXT(ctx);
|
|
|
|
+ if (RADEON_DEBUG & DEBUG_IOCTL)
|
|
|
|
+ fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
|
|
|
|
+
|
|
|
|
+ if (radeon->dma.flush)
|
|
|
|
+ radeon->dma.flush( ctx );
|
|
|
|
+
|
|
|
|
+ radeonEmitState(radeon);
|
|
|
|
+
|
|
|
|
+ if (radeon->cmdbuf.cs->cdw)
|
|
|
|
+ rcommonFlushCmdBuf(radeon, __FUNCTION__);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* Make sure all commands have been sent to the hardware and have
|
|
|
|
+ * completed processing.
|
|
|
|
+ */
|
|
|
|
+void radeonFinish(GLcontext * ctx)
|
|
|
|
+{
|
|
|
|
+ radeonContextPtr radeon = RADEON_CONTEXT(ctx);
|
|
|
|
+ struct gl_framebuffer *fb = ctx->DrawBuffer;
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ radeonFlush(ctx);
|
|
|
|
+
|
|
|
|
+ if (radeon->radeonScreen->kernel_mm) {
|
|
|
|
+ for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
|
|
|
|
+ struct radeon_renderbuffer *rrb;
|
|
|
|
+ rrb = (struct radeon_renderbuffer *)fb->_ColorDrawBuffers[i];
|
|
|
|
+ if (rrb->bo)
|
|
|
|
+ radeon_bo_wait(rrb->bo);
|
|
|
|
+ }
|
|
|
|
+ } else if (radeon->do_irqs) {
|
|
|
|
+ LOCK_HARDWARE(radeon);
|
|
|
|
+ radeonEmitIrqLocked(radeon);
|
|
|
|
+ UNLOCK_HARDWARE(radeon);
|
|
|
|
+ radeonWaitIrq(radeon);
|
|
|
|
+ } else {
|
|
|
|
+ radeonWaitForIdle(radeon);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* cmdbuffer */
|
|
|
|
+/**
|
|
|
|
+ * Send the current command buffer via ioctl to the hardware.
|
|
|
|
+ */
|
|
|
|
+int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
|
|
|
|
+{
|
|
|
|
+ int ret = 0;
|
|
|
|
+
|
|
|
|
+ if (rmesa->cmdbuf.flushing) {
|
|
|
|
+ fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
|
|
|
|
+ exit(-1);
|
|
|
|
+ }
|
|
|
|
+ rmesa->cmdbuf.flushing = 1;
|
|
|
|
+
|
|
|
|
+ if (RADEON_DEBUG & DEBUG_IOCTL) {
|
|
|
|
+ fprintf(stderr, "%s from %s - %i cliprects\n",
|
|
|
|
+ __FUNCTION__, caller, rmesa->numClipRects);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (rmesa->cmdbuf.cs->cdw) {
|
|
|
|
+ ret = radeon_cs_emit(rmesa->cmdbuf.cs);
|
|
|
|
+ rmesa->hw.all_dirty = GL_TRUE;
|
|
|
|
+ }
|
|
|
|
+ radeon_cs_erase(rmesa->cmdbuf.cs);
|
|
|
|
+ rmesa->cmdbuf.flushing = 0;
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
|
|
|
|
+{
|
|
|
|
+ int ret;
|
|
|
|
+
|
|
|
|
+ radeonReleaseDmaRegion(rmesa);
|
|
|
|
+
|
|
|
|
+ LOCK_HARDWARE(rmesa);
|
|
|
|
+ ret = rcommonFlushCmdBufLocked(rmesa, caller);
|
|
|
|
+ UNLOCK_HARDWARE(rmesa);
|
|
|
|
+
|
|
|
|
+ if (ret) {
|
|
|
|
+ fprintf(stderr, "drmRadeonCmdBuffer: %d\n", ret);
|
|
|
|
+ _mesa_exit(ret);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * Make sure that enough space is available in the command buffer
|
|
|
|
+ * by flushing if necessary.
|
|
|
|
+ *
|
|
|
|
+ * \param dwords The number of dwords we need to be free on the command buffer
|
|
|
|
+ */
|
|
|
|
+void rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
|
|
|
|
+{
|
|
|
|
+ if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size ||
|
|
|
|
+ radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
|
|
|
|
+ rcommonFlushCmdBuf(rmesa, caller);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void rcommonInitCmdBuf(radeonContextPtr rmesa)
|
|
|
|
+{
|
|
|
|
+ GLuint size;
|
|
|
|
+ /* Initialize command buffer */
|
|
|
|
+ size = 256 * driQueryOptioni(&rmesa->optionCache,
|
|
|
|
+ "command_buffer_size");
|
|
|
|
+ if (size < 2 * rmesa->hw.max_state_size) {
|
|
|
|
+ size = 2 * rmesa->hw.max_state_size + 65535;
|
|
|
|
+ }
|
|
|
|
+ if (size > 64 * 256)
|
|
|
|
+ size = 64 * 256;
|
|
|
|
+
|
|
|
|
+ if (RADEON_DEBUG & (DEBUG_IOCTL | DEBUG_DMA)) {
|
|
|
|
+ fprintf(stderr, "sizeof(drm_r300_cmd_header_t)=%zd\n",
|
|
|
|
+ sizeof(drm_r300_cmd_header_t));
|
|
|
|
+ fprintf(stderr, "sizeof(drm_radeon_cmd_buffer_t)=%zd\n",
|
|
|
|
+ sizeof(drm_radeon_cmd_buffer_t));
|
|
|
|
+ fprintf(stderr,
|
|
|
|
+ "Allocating %d bytes command buffer (max state is %d bytes)\n",
|
|
|
|
+ size * 4, rmesa->hw.max_state_size * 4);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (rmesa->radeonScreen->kernel_mm) {
|
|
|
|
+ int fd = rmesa->radeonScreen->driScreen->fd;
|
|
|
|
+ rmesa->cmdbuf.csm = radeon_cs_manager_gem_ctor(fd);
|
|
|
|
+ } else {
|
|
|
|
+ rmesa->cmdbuf.csm = radeon_cs_manager_legacy_ctor(rmesa);
|
|
|
|
+ }
|
|
|
|
+ if (rmesa->cmdbuf.csm == NULL) {
|
|
|
|
+ /* FIXME: fatal error */
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
|
|
|
|
+ assert(rmesa->cmdbuf.cs != NULL);
|
|
|
|
+ rmesa->cmdbuf.size = size;
|
|
|
|
+
|
|
|
|
+ if (!rmesa->radeonScreen->kernel_mm) {
|
|
|
|
+ radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, rmesa->radeonScreen->texSize[0]);
|
|
|
|
+ radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, rmesa->radeonScreen->gartTextures.size);
|
|
|
|
+ } else {
|
|
|
|
+ struct drm_radeon_gem_info mminfo;
|
|
|
|
+
|
|
|
|
+ if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO, &mminfo, sizeof(mminfo)))
|
|
|
|
+ {
|
|
|
|
+ radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, mminfo.vram_size);
|
|
|
|
+ radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, mminfo.gart_size);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+}
|
|
|
|
+/**
|
|
|
|
+ * Destroy the command buffer
|
|
|
|
+ */
|
|
|
|
+void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
|
|
|
|
+{
|
|
|
|
+ radeon_cs_destroy(rmesa->cmdbuf.cs);
|
|
|
|
+ if (rmesa->radeonScreen->driScreen->dri2.enabled || rmesa->radeonScreen->kernel_mm) {
|
|
|
|
+ radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
|
|
|
|
+ } else {
|
|
|
|
+ radeon_cs_manager_legacy_dtor(rmesa->cmdbuf.csm);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void rcommonBeginBatch(radeonContextPtr rmesa, int n,
|
|
|
|
+ int dostate,
|
|
|
|
+ const char *file,
|
|
|
|
+ const char *function,
|
|
|
|
+ int line)
|
|
|
|
+{
|
|
|
|
+ rcommonEnsureCmdBufSpace(rmesa, n, function);
|
|
|
|
+ if (!rmesa->cmdbuf.cs->cdw && dostate) {
|
|
|
|
+ if (RADEON_DEBUG & DEBUG_IOCTL)
|
|
|
|
+ fprintf(stderr, "Reemit state after flush (from %s)\n", function);
|
|
|
|
+ radeonEmitState(rmesa);
|
|
|
|
+ }
|
|
|
|
+ radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
|
|
|
|
+
|
|
|
|
+ if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_IOCTL)
|
|
|
|
+ fprintf(stderr, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
|
|
|
|
+ n, rmesa->cmdbuf.cs->cdw, function, line);
|
|
|
|
+
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+
|
|
|
|
diff --git a/src/mesa/drivers/dri/radeon/radeon_common.h b/src/mesa/drivers/dri/radeon/radeon_common.h
|
|
|
|
new file mode 100644
|
|
|
|
index 0000000..ead0f55
|
|
|
|
--- /dev/null
|
|
|
|
+++ b/src/mesa/drivers/dri/radeon/radeon_common.h
|
|
|
|
@@ -0,0 +1,55 @@
|
|
|
|
+#ifndef COMMON_MISC_H
|
|
|
|
+#define COMMON_MISC_H
|
|
|
|
+
|
|
|
|
+#include "radeon_common_context.h"
|
|
|
|
+#include "radeon_dma.h"
|
|
|
|
+#include "radeon_texture.h"
|
|
|
|
+
|
|
|
|
+void radeonRecalcScissorRects(radeonContextPtr radeon);
|
|
|
|
+void radeonSetCliprects(radeonContextPtr radeon);
|
|
|
|
+void radeonUpdateScissor( GLcontext *ctx );
|
|
|
|
+void radeonScissor(GLcontext* ctx, GLint x, GLint y, GLsizei w, GLsizei h);
|
|
|
|
+
|
|
|
|
+void radeonWaitForIdleLocked(radeonContextPtr radeon);
|
|
|
|
+extern uint32_t radeonGetAge(radeonContextPtr radeon);
|
|
|
|
+void radeonCopyBuffer( __DRIdrawablePrivate *dPriv,
|
|
|
|
+ const drm_clip_rect_t *rect);
|
|
|
|
+void radeonPageFlip( __DRIdrawablePrivate *dPriv );
|
|
|
|
+void radeonSwapBuffers(__DRIdrawablePrivate * dPriv);
|
|
|
|
+void radeonCopySubBuffer(__DRIdrawablePrivate * dPriv,
|
|
|
|
+ int x, int y, int w, int h );
|
|
|
|
+
|
|
|
|
+void radeonUpdatePageFlipping(radeonContextPtr rmesa);
|
|
|
|
+
|
|
|
|
+void radeonFlush(GLcontext *ctx);
|
|
|
|
+void radeonFinish(GLcontext * ctx);
|
|
|
|
+void radeonEmitState(radeonContextPtr radeon);
|
|
|
|
+
|
|
|
|
+static inline struct radeon_renderbuffer *radeon_get_depthbuffer(radeonContextPtr rmesa)
|
|
|
|
+{
|
|
|
|
+ struct radeon_renderbuffer *rrb;
|
|
|
|
+ rrb = rmesa->state.depth.rrb;
|
|
|
|
+ if (!rrb)
|
|
|
|
+ return NULL;
|
|
|
|
+
|
|
|
|
+ return rrb;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline struct radeon_renderbuffer *radeon_get_colorbuffer(radeonContextPtr rmesa)
|
|
|
|
+{
|
|
|
|
+ struct radeon_renderbuffer *rrb;
|
|
|
|
+ GLframebuffer *fb = rmesa->dri.drawable->driverPrivate;
|
|
|
|
+
|
|
|
|
+ rrb = rmesa->state.color.rrb;
|
|
|
|
+ if (rmesa->radeonScreen->driScreen->dri2.enabled) {
|
|
|
|
+ rrb = (struct radeon_renderbuffer *)fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer;
|
|
|
|
+ }
|
|
|
|
+ if (!rrb)
|
|
|
|
+ return NULL;
|
|
|
|
+ return rrb;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+#include "radeon_cmdbuf.h"
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+#endif
|
|
|
|
diff --git a/src/mesa/drivers/dri/radeon/radeon_common_context.c b/src/mesa/drivers/dri/radeon/radeon_common_context.c
|
|
|
|
new file mode 100644
|
|
|
|
index 0000000..1b8a05d
|
|
|
|
--- /dev/null
|
|
|
|
+++ b/src/mesa/drivers/dri/radeon/radeon_common_context.c
|
|
|
|
@@ -0,0 +1,589 @@
|
|
|
|
+/**************************************************************************
|
|
|
|
+
|
|
|
|
+Copyright 2000, 2001 ATI Technologies Inc., Ontario, Canada, and
|
|
|
|
+ VA Linux Systems Inc., Fremont, California.
|
|
|
|
+Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
|
|
|
|
+
|
|
|
|
+The Weather Channel (TM) funded Tungsten Graphics to develop the
|
|
|
|
+initial release of the Radeon 8500 driver under the XFree86 license.
|
|
|
|
+This notice must be preserved.
|
|
|
|
+
|
|
|
|
+All Rights Reserved.
|
|
|
|
+
|
|
|
|
+Permission is hereby granted, free of charge, to any person obtaining
|
|
|
|
+a copy of this software and associated documentation files (the
|
|
|
|
+"Software"), to deal in the Software without restriction, including
|
|
|
|
+without limitation the rights to use, copy, modify, merge, publish,
|
|
|
|
+distribute, sublicense, and/or sell copies of the Software, and to
|
|
|
|
+permit persons to whom the Software is furnished to do so, subject to
|
|
|
|
+the following conditions:
|
|
|
|
+
|
|
|
|
+The above copyright notice and this permission notice (including the
|
|
|
|
+next paragraph) shall be included in all copies or substantial
|
|
|
|
+portions of the Software.
|
|
|
|
+
|
|
|
|
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
|
|
|
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
|
|
|
|
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
|
|
|
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
|
|
|
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
+
|
|
|
|
+**************************************************************************/
|
|
|
|
+
|
|
|
|
+#include "radeon_common.h"
|
|
|
|
+#include "xmlpool.h" /* for symbolic values of enum-type options */
|
|
|
|
+#include "utils.h"
|
|
|
|
+#include "drirenderbuffer.h"
|
|
|
|
+#include "vblank.h"
|
|
|
|
+#include "main/state.h"
|
|
|
|
+
|
|
|
|
+#define DRIVER_DATE "20090101"
|
|
|
|
+
|
|
|
|
+#ifndef RADEON_DEBUG
|
|
|
|
+int RADEON_DEBUG = (0);
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+/* Return various strings for glGetString().
|
|
|
|
+ */
|
|
|
|
+static const GLubyte *radeonGetString(GLcontext * ctx, GLenum name)
|
|
|
|
+{
|
|
|
|
+ radeonContextPtr radeon = RADEON_CONTEXT(ctx);
|
|
|
|
+ static char buffer[128];
|
|
|
|
+
|
|
|
|
+ switch (name) {
|
|
|
|
+ case GL_VENDOR:
|
|
|
|
+ if (IS_R300_CLASS(radeon->radeonScreen))
|
|
|
|
+ return (GLubyte *) "DRI R300 Project";
|
|
|
|
+ else
|
|
|
|
+ return (GLubyte *) "Tungsten Graphics, Inc.";
|
|
|
|
+
|
|
|
|
+ case GL_RENDERER:
|
|
|
|
+ {
|
|
|
|
+ unsigned offset;
|
|
|
|
+ GLuint agp_mode = (radeon->radeonScreen->card_type==RADEON_CARD_PCI) ? 0 :
|
|
|
|
+ radeon->radeonScreen->AGPMode;
|
|
|
|
+ const char* chipname;
|
|
|
|
+
|
|
|
|
+ if (IS_R300_CLASS(radeon->radeonScreen))
|
|
|
|
+ chipname = "R300";
|
|
|
|
+ else if (IS_R200_CLASS(radeon->radeonScreen))
|
|
|
|
+ chipname = "R200";
|
|
|
|
+ else
|
|
|
|
+ chipname = "R100";
|
|
|
|
+
|
|
|
|
+ offset = driGetRendererString(buffer, chipname, DRIVER_DATE,
|
|
|
|
+ agp_mode);
|
|
|
|
+
|
|
|
|
+ if (IS_R300_CLASS(radeon->radeonScreen)) {
|
|
|
|
+ sprintf(&buffer[offset], " %sTCL",
|
|
|
|
+ (radeon->radeonScreen->chip_flags & RADEON_CHIPSET_TCL)
|
|
|
|
+ ? "" : "NO-");
|
|
|
|
+ } else {
|
|
|
|
+ sprintf(&buffer[offset], " %sTCL",
|
|
|
|
+ !(radeon->TclFallback & RADEON_TCL_FALLBACK_TCL_DISABLE)
|
|
|
|
+ ? "" : "NO-");
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (radeon->radeonScreen->driScreen->dri2.enabled)
|
|
|
|
+ strcat(buffer, " DRI2");
|
|
|
|
+
|
|
|
|
+ return (GLubyte *) buffer;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ default:
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* Initialize the driver's misc functions.
|
|
|
|
+ */
|
|
|
|
+static void radeonInitDriverFuncs(struct dd_function_table *functions)
|
|
|
|
+{
|
|
|
|
+ functions->GetString = radeonGetString;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * Create and initialize all common fields of the context,
|
|
|
|
+ * including the Mesa context itself.
|
|
|
|
+ */
|
|
|
|
+GLboolean radeonInitContext(radeonContextPtr radeon,
|
|
|
|
+ struct dd_function_table* functions,
|
|
|
|
+ const __GLcontextModes * glVisual,
|
|
|
|
+ __DRIcontextPrivate * driContextPriv,
|
|
|
|
+ void *sharedContextPrivate)
|
|
|
|
+{
|
|
|
|
+ __DRIscreenPrivate *sPriv = driContextPriv->driScreenPriv;
|
|
|
|
+ radeonScreenPtr screen = (radeonScreenPtr) (sPriv->private);
|
|
|
|
+ GLcontext* ctx;
|
|
|
|
+ GLcontext* shareCtx;
|
|
|
|
+ int fthrottle_mode;
|
|
|
|
+
|
|
|
|
+ /* Fill in additional standard functions. */
|
|
|
|
+ radeonInitDriverFuncs(functions);
|
|
|
|
+
|
|
|
|
+ radeon->radeonScreen = screen;
|
|
|
|
+ /* Allocate and initialize the Mesa context */
|
|
|
|
+ if (sharedContextPrivate)
|
|
|
|
+ shareCtx = ((radeonContextPtr)sharedContextPrivate)->glCtx;
|
|
|
|
+ else
|
|
|
|
+ shareCtx = NULL;
|
|
|
|
+ radeon->glCtx = _mesa_create_context(glVisual, shareCtx,
|
|
|
|
+ functions, (void *)radeon);
|
|
|
|
+ if (!radeon->glCtx)
|
|
|
|
+ return GL_FALSE;
|
|
|
|
+
|
|
|
|
+ ctx = radeon->glCtx;
|
|
|
|
+ driContextPriv->driverPrivate = radeon;
|
|
|
|
+
|
|
|
|
+ /* DRI fields */
|
|
|
|
+ radeon->dri.context = driContextPriv;
|
|
|
|
+ radeon->dri.screen = sPriv;
|
|
|
|
+ radeon->dri.drawable = NULL;
|
|
|
|
+ radeon->dri.readable = NULL;
|
|
|
|
+ radeon->dri.hwContext = driContextPriv->hHWContext;
|
|
|
|
+ radeon->dri.hwLock = &sPriv->pSAREA->lock;
|
|
|
|
+ radeon->dri.fd = sPriv->fd;
|
|
|
|
+ radeon->dri.drmMinor = sPriv->drm_version.minor;
|
|
|
|
+
|
|
|
|
+ radeon->sarea = (drm_radeon_sarea_t *) ((GLubyte *) sPriv->pSAREA +
|
|
|
|
+ screen->sarea_priv_offset);
|
|
|
|
+
|
|
|
|
+ /* Setup IRQs */
|
|
|
|
+ fthrottle_mode = driQueryOptioni(&radeon->optionCache, "fthrottle_mode");
|
|
|
|
+ radeon->iw.irq_seq = -1;
|
|
|
|
+ radeon->irqsEmitted = 0;
|
|
|
|
+ radeon->do_irqs = (fthrottle_mode == DRI_CONF_FTHROTTLE_IRQS &&
|
|
|
|
+ radeon->radeonScreen->irq);
|
|
|
|
+
|
|
|
|
+ radeon->do_usleeps = (fthrottle_mode == DRI_CONF_FTHROTTLE_USLEEPS);
|
|
|
|
+
|
|
|
|
+ if (!radeon->do_irqs)
|
|
|
|
+ fprintf(stderr,
|
|
|
|
+ "IRQ's not enabled, falling back to %s: %d %d\n",
|
|
|
|
+ radeon->do_usleeps ? "usleeps" : "busy waits",
|
|
|
|
+ fthrottle_mode, radeon->radeonScreen->irq);
|
|
|
|
+
|
|
|
|
+ (*sPriv->systemTime->getUST) (&radeon->swap_ust);
|
|
|
|
+
|
|
|
|
+ return GL_TRUE;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * Cleanup common context fields.
|
|
|
|
+ * Called by r200DestroyContext/r300DestroyContext
|
|
|
|
+ */
|
|
|
|
+void radeonCleanupContext(radeonContextPtr radeon)
|
|
|
|
+{
|
|
|
|
+#ifdef RADEON_BO_TRACK
|
|
|
|
+ FILE *track;
|
|
|
|
+#endif
|
|
|
|
+ struct radeon_renderbuffer *rb;
|
|
|
|
+ GLframebuffer *fb;
|
|
|
|
+
|
|
|
|
+ /* free the Mesa context */
|
|
|
|
+ _mesa_destroy_context(radeon->glCtx);
|
|
|
|
+
|
|
|
|
+ fb = (void*)radeon->dri.drawable->driverPrivate;
|
|
|
|
+ rb = (void *)fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
|
|
|
|
+ if (rb && rb->bo) {
|
|
|
|
+ radeon_bo_unref(rb->bo);
|
|
|
|
+ rb->bo = NULL;
|
|
|
|
+ }
|
|
|
|
+ rb = (void *)fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer;
|
|
|
|
+ if (rb && rb->bo) {
|
|
|
|
+ radeon_bo_unref(rb->bo);
|
|
|
|
+ rb->bo = NULL;
|
|
|
|
+ }
|
|
|
|
+ rb = (void *)fb->Attachment[BUFFER_DEPTH].Renderbuffer;
|
|
|
|
+ if (rb && rb->bo) {
|
|
|
|
+ radeon_bo_unref(rb->bo);
|
|
|
|
+ rb->bo = NULL;
|
|
|
|
+ }
|
|
|
|
+ fb = (void*)radeon->dri.readable->driverPrivate;
|
|
|
|
+ rb = (void *)fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
|
|
|
|
+ if (rb && rb->bo) {
|
|
|
|
+ radeon_bo_unref(rb->bo);
|
|
|
|
+ rb->bo = NULL;
|
|
|
|
+ }
|
|
|
|
+ rb = (void *)fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer;
|
|
|
|
+ if (rb && rb->bo) {
|
|
|
|
+ radeon_bo_unref(rb->bo);
|
|
|
|
+ rb->bo = NULL;
|
|
|
|
+ }
|
|
|
|
+ rb = (void *)fb->Attachment[BUFFER_DEPTH].Renderbuffer;
|
|
|
|
+ if (rb && rb->bo) {
|
|
|
|
+ radeon_bo_unref(rb->bo);
|
|
|
|
+ rb->bo = NULL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* _mesa_destroy_context() might result in calls to functions that
|
|
|
|
+ * depend on the DriverCtx, so don't set it to NULL before.
|
|
|
|
+ *
|
|
|
|
+ * radeon->glCtx->DriverCtx = NULL;
|
|
|
|
+ */
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ /* free the option cache */
|
|
|
|
+ driDestroyOptionCache(&radeon->optionCache);
|
|
|
|
+
|
|
|
|
+ rcommonDestroyCmdBuf(radeon);
|
|
|
|
+
|
|
|
|
+ if (radeon->state.scissor.pClipRects) {
|
|
|
|
+ FREE(radeon->state.scissor.pClipRects);
|
|
|
|
+ radeon->state.scissor.pClipRects = 0;
|
|
|
|
+ }
|
|
|
|
+#ifdef RADEON_BO_TRACK
|
|
|
|
+ track = fopen("/tmp/tracklog", "w");
|
|
|
|
+ if (track) {
|
|
|
|
+ radeon_tracker_print(&radeon->radeonScreen->bom->tracker, track);
|
|
|
|
+ fclose(track);
|
|
|
|
+ }
|
|
|
|
+#endif
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* Force the context `c' to be unbound from its buffer.
|
|
|
|
+ */
|
|
|
|
+GLboolean radeonUnbindContext(__DRIcontextPrivate * driContextPriv)
|
|
|
|
+{
|
|
|
|
+ radeonContextPtr radeon = (radeonContextPtr) driContextPriv->driverPrivate;
|
|
|
|
+
|
|
|
|
+ if (RADEON_DEBUG & DEBUG_DRI)
|
|
|
|
+ fprintf(stderr, "%s ctx %p\n", __FUNCTION__,
|
|
|
|
+ radeon->glCtx);
|
|
|
|
+
|
|
|
|
+ return GL_TRUE;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+static void
|
|
|
|
+radeon_make_kernel_renderbuffer_current(radeonContextPtr radeon,
|
|
|
|
+ GLframebuffer *draw)
|
|
|
|
+{
|
|
|
|
+ /* if radeon->fake */
|
|
|
|
+ struct radeon_renderbuffer *rb;
|
|
|
|
+
|
|
|
|
+ if ((rb = (void *)draw->Attachment[BUFFER_FRONT_LEFT].Renderbuffer)) {
|
|
|
|
+ if (!rb->bo) {
|
|
|
|
+ rb->bo = radeon_bo_open(radeon->radeonScreen->bom,
|
|
|
|
+ radeon->radeonScreen->frontOffset,
|
|
|
|
+ 0,
|
|
|
|
+ 0,
|
|
|
|
+ RADEON_GEM_DOMAIN_VRAM,
|
|
|
|
+ 0);
|
|
|
|
+ }
|
|
|
|
+ rb->cpp = radeon->radeonScreen->cpp;
|
|
|
|
+ rb->pitch = radeon->radeonScreen->frontPitch * rb->cpp;
|
|
|
|
+ }
|
|
|
|
+ if ((rb = (void *)draw->Attachment[BUFFER_BACK_LEFT].Renderbuffer)) {
|
|
|
|
+ if (!rb->bo) {
|
|
|
|
+ rb->bo = radeon_bo_open(radeon->radeonScreen->bom,
|
|
|
|
+ radeon->radeonScreen->backOffset,
|
|
|
|
+ 0,
|
|
|
|
+ 0,
|
|
|
|
+ RADEON_GEM_DOMAIN_VRAM,
|
|
|
|
+ 0);
|
|
|
|
+ }
|
|
|
|
+ rb->cpp = radeon->radeonScreen->cpp;
|
|
|
|
+ rb->pitch = radeon->radeonScreen->backPitch * rb->cpp;
|
|
|
|
+ }
|
|
|
|
+ if ((rb = (void *)draw->Attachment[BUFFER_DEPTH].Renderbuffer)) {
|
|
|
|
+ if (!rb->bo) {
|
|
|
|
+ rb->bo = radeon_bo_open(radeon->radeonScreen->bom,
|
|
|
|
+ radeon->radeonScreen->depthOffset,
|
|
|
|
+ 0,
|
|
|
|
+ 0,
|
|
|
|
+ RADEON_GEM_DOMAIN_VRAM,
|
|
|
|
+ 0);
|
|
|
|
+ }
|
|
|
|
+ rb->cpp = radeon->radeonScreen->cpp;
|
|
|
|
+ rb->pitch = radeon->radeonScreen->depthPitch * rb->cpp;
|
|
|
|
+ }
|
|
|
|
+ if ((rb = (void *)draw->Attachment[BUFFER_STENCIL].Renderbuffer)) {
|
|
|
|
+ if (!rb->bo) {
|
|
|
|
+ rb->bo = radeon_bo_open(radeon->radeonScreen->bom,
|
|
|
|
+ radeon->radeonScreen->depthOffset,
|
|
|
|
+ 0,
|
|
|
|
+ 0,
|
|
|
|
+ RADEON_GEM_DOMAIN_VRAM,
|
|
|
|
+ 0);
|
|
|
|
+ }
|
|
|
|
+ rb->cpp = radeon->radeonScreen->cpp;
|
|
|
|
+ rb->pitch = radeon->radeonScreen->depthPitch * rb->cpp;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void
|
|
|
|
+radeon_make_renderbuffer_current(radeonContextPtr radeon,
|
|
|
|
+ GLframebuffer *draw)
|
|
|
|
+{
|
|
|
|
+ int size = 4096*4096*4;
|
|
|
|
+ /* if radeon->fake */
|
|
|
|
+ struct radeon_renderbuffer *rb;
|
|
|
|
+
|
|
|
|
+ if (radeon->radeonScreen->kernel_mm) {
|
|
|
|
+ radeon_make_kernel_renderbuffer_current(radeon, draw);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ if ((rb = (void *)draw->Attachment[BUFFER_FRONT_LEFT].Renderbuffer)) {
|
|
|
|
+ if (!rb->bo) {
|
|
|
|
+ rb->bo = radeon_bo_open(radeon->radeonScreen->bom,
|
|
|
|
+ radeon->radeonScreen->frontOffset +
|
|
|
|
+ radeon->radeonScreen->fbLocation,
|
|
|
|
+ size,
|
|
|
|
+ 4096,
|
|
|
|
+ RADEON_GEM_DOMAIN_VRAM,
|
|
|
|
+ 0);
|
|
|
|
+ }
|
|
|
|
+ rb->cpp = radeon->radeonScreen->cpp;
|
|
|
|
+ rb->pitch = radeon->radeonScreen->frontPitch * rb->cpp;
|
|
|
|
+ }
|
|
|
|
+ if ((rb = (void *)draw->Attachment[BUFFER_BACK_LEFT].Renderbuffer)) {
|
|
|
|
+ if (!rb->bo) {
|
|
|
|
+ rb->bo = radeon_bo_open(radeon->radeonScreen->bom,
|
|
|
|
+ radeon->radeonScreen->backOffset +
|
|
|
|
+ radeon->radeonScreen->fbLocation,
|
|
|
|
+ size,
|
|
|
|
+ 4096,
|
|
|
|
+ RADEON_GEM_DOMAIN_VRAM,
|
|
|
|
+ 0);
|
|
|
|
+ }
|
|
|
|
+ rb->cpp = radeon->radeonScreen->cpp;
|
|
|
|
+ rb->pitch = radeon->radeonScreen->backPitch * rb->cpp;
|
|
|
|
+ }
|
|
|
|
+ if ((rb = (void *)draw->Attachment[BUFFER_DEPTH].Renderbuffer)) {
|
|
|
|
+ if (!rb->bo) {
|
|
|
|
+ rb->bo = radeon_bo_open(radeon->radeonScreen->bom,
|
|
|
|
+ radeon->radeonScreen->depthOffset +
|
|
|
|
+ radeon->radeonScreen->fbLocation,
|
|
|
|
+ size,
|
|
|
|
+ 4096,
|
|
|
|
+ RADEON_GEM_DOMAIN_VRAM,
|
|
|
|
+ 0);
|
|
|
|
+ }
|
|
|
|
+ rb->cpp = radeon->radeonScreen->cpp;
|
|
|
|
+ rb->pitch = radeon->radeonScreen->depthPitch * rb->cpp;
|
|
|
|
+ }
|
|
|
|
+ if ((rb = (void *)draw->Attachment[BUFFER_STENCIL].Renderbuffer)) {
|
|
|
|
+ if (!rb->bo) {
|
|
|
|
+ rb->bo = radeon_bo_open(radeon->radeonScreen->bom,
|
|
|
|
+ radeon->radeonScreen->depthOffset +
|
|
|
|
+ radeon->radeonScreen->fbLocation,
|
|
|
|
+ size,
|
|
|
|
+ 4096,
|
|
|
|
+ RADEON_GEM_DOMAIN_VRAM,
|
|
|
|
+ 0);
|
|
|
|
+ }
|
|
|
|
+ rb->cpp = radeon->radeonScreen->cpp;
|
|
|
|
+ rb->pitch = radeon->radeonScreen->depthPitch * rb->cpp;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+void
|
|
|
|
+radeon_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable)
|
|
|
|
+{
|
|
|
|
+ unsigned int attachments[10];
|
|
|
|
+ __DRIbuffer *buffers;
|
|
|
|
+ __DRIscreen *screen;
|
|
|
|
+ struct radeon_renderbuffer *rb;
|
|
|
|
+ int i, count;
|
|
|
|
+ GLframebuffer *draw;
|
|
|
|
+ radeonContextPtr radeon;
|
|
|
|
+
|
|
|
|
+ if (RADEON_DEBUG & DEBUG_DRI)
|
|
|
|
+ fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
|
|
|
|
+
|
|
|
|
+ draw = drawable->driverPrivate;
|
|
|
|
+ screen = context->driScreenPriv;
|
|
|
|
+ radeon = (radeonContextPtr) context->driverPrivate;
|
|
|
|
+ i = 0;
|
|
|
|
+ if ((rb = (void *)draw->Attachment[BUFFER_FRONT_LEFT].Renderbuffer)) {
|
|
|
|
+ attachments[i++] = __DRI_BUFFER_FRONT_LEFT;
|
|
|
|
+ }
|
|
|
|
+ if ((rb = (void *)draw->Attachment[BUFFER_BACK_LEFT].Renderbuffer)) {
|
|
|
|
+ attachments[i++] = __DRI_BUFFER_BACK_LEFT;
|
|
|
|
+ }
|
|
|
|
+ if ((rb = (void *)draw->Attachment[BUFFER_DEPTH].Renderbuffer)) {
|
|
|
|
+ attachments[i++] = __DRI_BUFFER_DEPTH;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ buffers = (*screen->dri2.loader->getBuffers)(drawable,
|
|
|
|
+ &drawable->w,
|
|
|
|
+ &drawable->h,
|
|
|
|
+ attachments, i,
|
|
|
|
+ &count,
|
|
|
|
+ drawable->loaderPrivate);
|
|
|
|
+ if (buffers == NULL)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ /* set one cliprect to cover the whole drawable */
|
|
|
|
+ drawable->x = 0;
|
|
|
|
+ drawable->y = 0;
|
|
|
|
+ drawable->backX = 0;
|
|
|
|
+ drawable->backY = 0;
|
|
|
|
+ drawable->numClipRects = 1;
|
|
|
|
+ drawable->pClipRects[0].x1 = 0;
|
|
|
|
+ drawable->pClipRects[0].y1 = 0;
|
|
|
|
+ drawable->pClipRects[0].x2 = drawable->w;
|
|
|
|
+ drawable->pClipRects[0].y2 = drawable->h;
|
|
|
|
+ drawable->numBackClipRects = 1;
|
|
|
|
+ drawable->pBackClipRects[0].x1 = 0;
|
|
|
|
+ drawable->pBackClipRects[0].y1 = 0;
|
|
|
|
+ drawable->pBackClipRects[0].x2 = drawable->w;
|
|
|
|
+ drawable->pBackClipRects[0].y2 = drawable->h;
|
|
|
|
+ for (i = 0; i < count; i++) {
|
|
|
|
+ switch (buffers[i].attachment) {
|
|
|
|
+ case __DRI_BUFFER_FRONT_LEFT:
|
|
|
|
+ rb = (void *)draw->Attachment[BUFFER_FRONT_LEFT].Renderbuffer;
|
|
|
|
+ if (rb->bo) {
|
|
|
|
+ radeon_bo_unref(rb->bo);
|
|
|
|
+ rb->bo = NULL;
|
|
|
|
+ }
|
|
|
|
+ rb->cpp = buffers[i].cpp;
|
|
|
|
+ rb->pitch = buffers[i].pitch;
|
|
|
|
+ rb->width = drawable->w;
|
|
|
|
+ rb->height = drawable->h;
|
|
|
|
+ rb->has_surface = 0;
|
|
|
|
+ rb->bo = radeon_bo_open(radeon->radeonScreen->bom,
|
|
|
|
+ buffers[i].name,
|
|
|
|
+ 0,
|
|
|
|
+ 0,
|
|
|
|
+ RADEON_GEM_DOMAIN_VRAM,
|
|
|
|
+ buffers[i].flags);
|
|
|
|
+ if (rb->bo == NULL) {
|
|
|
|
+ fprintf(stderr, "failled to attach front %d\n",
|
|
|
|
+ buffers[i].name);
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ case __DRI_BUFFER_BACK_LEFT:
|
|
|
|
+ rb = (void *)draw->Attachment[BUFFER_BACK_LEFT].Renderbuffer;
|
|
|
|
+ if (rb->bo) {
|
|
|
|
+ radeon_bo_unref(rb->bo);
|
|
|
|
+ rb->bo = NULL;
|
|
|
|
+ }
|
|
|
|
+ rb->cpp = buffers[i].cpp;
|
|
|
|
+ rb->pitch = buffers[i].pitch;
|
|
|
|
+ rb->width = drawable->w;
|
|
|
|
+ rb->height = drawable->h;
|
|
|
|
+ rb->has_surface = 0;
|
|
|
|
+ rb->bo = radeon_bo_open(radeon->radeonScreen->bom,
|
|
|
|
+ buffers[i].name,
|
|
|
|
+ 0,
|
|
|
|
+ 0,
|
|
|
|
+ RADEON_GEM_DOMAIN_VRAM,
|
|
|
|
+ buffers[i].flags);
|
|
|
|
+ break;
|
|
|
|
+ case __DRI_BUFFER_DEPTH:
|
|
|
|
+ rb = (void *)draw->Attachment[BUFFER_DEPTH].Renderbuffer;
|
|
|
|
+ if (rb->bo) {
|
|
|
|
+ radeon_bo_unref(rb->bo);
|
|
|
|
+ rb->bo = NULL;
|
|
|
|
+ }
|
|
|
|
+ rb->cpp = buffers[i].cpp;
|
|
|
|
+ rb->pitch = buffers[i].pitch;
|
|
|
|
+ rb->width = drawable->w;
|
|
|
|
+ rb->height = drawable->h;
|
|
|
|
+ rb->has_surface = 0;
|
|
|
|
+ rb->bo = radeon_bo_open(radeon->radeonScreen->bom,
|
|
|
|
+ buffers[i].name,
|
|
|
|
+ 0,
|
|
|
|
+ 0,
|
|
|
|
+ RADEON_GEM_DOMAIN_VRAM,
|
|
|
|
+ buffers[i].flags);
|
|
|
|
+ break;
|
|
|
|
+ case __DRI_BUFFER_STENCIL:
|
|
|
|
+ break;
|
|
|
|
+ case __DRI_BUFFER_ACCUM:
|
|
|
|
+ default:
|
|
|
|
+ fprintf(stderr,
|
|
|
|
+ "unhandled buffer attach event, attacment type %d\n",
|
|
|
|
+ buffers[i].attachment);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ radeon = (radeonContextPtr) context->driverPrivate;
|
|
|
|
+ driUpdateFramebufferSize(radeon->glCtx, drawable);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* Force the context `c' to be the current context and associate with it
|
|
|
|
+ * buffer `b'.
|
|
|
|
+ */
|
|
|
|
+GLboolean radeonMakeCurrent(__DRIcontextPrivate * driContextPriv,
|
|
|
|
+ __DRIdrawablePrivate * driDrawPriv,
|
|
|
|
+ __DRIdrawablePrivate * driReadPriv)
|
|
|
|
+{
|
|
|
|
+ radeonContextPtr radeon;
|
|
|
|
+ GLframebuffer *dfb, *rfb;
|
|
|
|
+
|
|
|
|
+ if (!driContextPriv) {
|
|
|
|
+ if (RADEON_DEBUG & DEBUG_DRI)
|
|
|
|
+ fprintf(stderr, "%s ctx is null\n", __FUNCTION__);
|
|
|
|
+ _mesa_make_current(NULL, NULL, NULL);
|
|
|
|
+ return GL_TRUE;
|
|
|
|
+ }
|
|
|
|
+ radeon = (radeonContextPtr) driContextPriv->driverPrivate;
|
|
|
|
+ dfb = driDrawPriv->driverPrivate;
|
|
|
|
+ rfb = driReadPriv->driverPrivate;
|
|
|
|
+
|
|
|
|
+ if (driContextPriv->driScreenPriv->dri2.enabled) {
|
|
|
|
+ radeon_update_renderbuffers(driContextPriv, driDrawPriv);
|
|
|
|
+ if (driDrawPriv != driReadPriv)
|
|
|
|
+ radeon_update_renderbuffers(driContextPriv, driReadPriv);
|
|
|
|
+ radeon->state.color.rrb =
|
|
|
|
+ (void *)dfb->Attachment[BUFFER_BACK_LEFT].Renderbuffer;
|
|
|
|
+ radeon->state.depth.rrb =
|
|
|
|
+ (void *)dfb->Attachment[BUFFER_DEPTH].Renderbuffer;
|
|
|
|
+ } else {
|
|
|
|
+ radeon_make_renderbuffer_current(radeon, dfb);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ if (RADEON_DEBUG & DEBUG_DRI)
|
|
|
|
+ fprintf(stderr, "%s ctx %p dfb %p rfb %p\n", __FUNCTION__, radeon->glCtx, dfb, rfb);
|
|
|
|
+
|
|
|
|
+ driUpdateFramebufferSize(radeon->glCtx, driDrawPriv);
|
|
|
|
+ if (driReadPriv != driDrawPriv)
|
|
|
|
+ driUpdateFramebufferSize(radeon->glCtx, driReadPriv);
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ _mesa_make_current(radeon->glCtx, dfb, rfb);
|
|
|
|
+
|
|
|
|
+ if (radeon->dri.drawable != driDrawPriv) {
|
|
|
|
+ if (driDrawPriv->swap_interval == (unsigned)-1) {
|
|
|
|
+ driDrawPriv->vblFlags =
|
|
|
|
+ (radeon->radeonScreen->irq != 0)
|
|
|
|
+ ? driGetDefaultVBlankFlags(&radeon->
|
|
|
|
+ optionCache)
|
|
|
|
+ : VBLANK_FLAG_NO_IRQ;
|
|
|
|
+
|
|
|
|
+ driDrawableInitVBlank(driDrawPriv);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ radeon->dri.readable = driReadPriv;
|
|
|
|
+
|
|
|
|
+ if (radeon->dri.drawable != driDrawPriv ||
|
|
|
|
+ radeon->lastStamp != driDrawPriv->lastStamp) {
|
|
|
|
+ radeon->dri.drawable = driDrawPriv;
|
|
|
|
+
|
|
|
|
+ radeonSetCliprects(radeon);
|
|
|
|
+ radeon->vtbl.update_viewport_offset(radeon->glCtx);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ _mesa_update_state(radeon->glCtx);
|
|
|
|
+
|
|
|
|
+ if (!driContextPriv->driScreenPriv->dri2.enabled) {
|
|
|
|
+ radeonUpdatePageFlipping(radeon);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (RADEON_DEBUG & DEBUG_DRI)
|
|
|
|
+ fprintf(stderr, "End %s\n", __FUNCTION__);
|
|
|
|
+ return GL_TRUE;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
diff --git a/src/mesa/drivers/dri/radeon/radeon_common_context.h b/src/mesa/drivers/dri/radeon/radeon_common_context.h
|
|
|
|
new file mode 100644
|
|
|
|
index 0000000..a200e90
|
|
|
|
--- /dev/null
|
|
|
|
+++ b/src/mesa/drivers/dri/radeon/radeon_common_context.h
|
|
|
|
@@ -0,0 +1,508 @@
|
|
|
|
+
|
|
|
|
+#ifndef COMMON_CONTEXT_H
|
|
|
|
+#define COMMON_CONTEXT_H
|
|
|
|
+
|
|
|
|
+#include "main/mm.h"
|
|
|
|
+#include "math/m_vector.h"
|
|
|
|
+#include "texmem.h"
|
|
|
|
+#include "tnl/t_context.h"
|
|
|
|
+#include "main/colormac.h"
|
|
|
|
+
|
|
|
|
+#include "radeon_screen.h"
|
|
|
|
+#include "radeon_drm.h"
|
|
|
|
+#include "dri_util.h"
|
|
|
|
+#include "tnl/t_vertex.h"
|
|
|
|
+
|
|
|
|
+/* This union is used to avoid warnings/miscompilation
|
|
|
|
+ with float to uint32_t casts due to strict-aliasing */
|
|
|
|
+typedef union { GLfloat f; uint32_t ui32; } float_ui32_type;
|
|
|
|
+
|
|
|
|
+struct radeon_context;
|
|
|
|
+typedef struct radeon_context radeonContextRec;
|
|
|
|
+typedef struct radeon_context *radeonContextPtr;
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+#define TEX_0 0x1
|
|
|
|
+#define TEX_1 0x2
|
|
|
|
+#define TEX_2 0x4
|
|
|
|
+#define TEX_3 0x8
|
|
|
|
+#define TEX_4 0x10
|
|
|
|
+#define TEX_5 0x20
|
|
|
|
+
|
|
|
|
+/* Rasterizing fallbacks */
|
|
|
|
+/* See correponding strings in r200_swtcl.c */
|
|
|
|
+#define RADEON_FALLBACK_TEXTURE 0x0001
|
|
|
|
+#define RADEON_FALLBACK_DRAW_BUFFER 0x0002
|
|
|
|
+#define RADEON_FALLBACK_STENCIL 0x0004
|
|
|
|
+#define RADEON_FALLBACK_RENDER_MODE 0x0008
|
|
|
|
+#define RADEON_FALLBACK_BLEND_EQ 0x0010
|
|
|
|
+#define RADEON_FALLBACK_BLEND_FUNC 0x0020
|
|
|
|
+#define RADEON_FALLBACK_DISABLE 0x0040
|
|
|
|
+#define RADEON_FALLBACK_BORDER_MODE 0x0080
|
|
|
|
+
|
|
|
|
+#define R200_FALLBACK_TEXTURE 0x01
|
|
|
|
+#define R200_FALLBACK_DRAW_BUFFER 0x02
|
|
|
|
+#define R200_FALLBACK_STENCIL 0x04
|
|
|
|
+#define R200_FALLBACK_RENDER_MODE 0x08
|
|
|
|
+#define R200_FALLBACK_DISABLE 0x10
|
|
|
|
+#define R200_FALLBACK_BORDER_MODE 0x20
|
|
|
|
+
|
|
|
|
+#define RADEON_TCL_FALLBACK_RASTER 0x1 /* rasterization */
|
|
|
|
+#define RADEON_TCL_FALLBACK_UNFILLED 0x2 /* unfilled tris */
|
|
|
|
+#define RADEON_TCL_FALLBACK_LIGHT_TWOSIDE 0x4 /* twoside tris */
|
|
|
|
+#define RADEON_TCL_FALLBACK_MATERIAL 0x8 /* material in vb */
|
|
|
|
+#define RADEON_TCL_FALLBACK_TEXGEN_0 0x10 /* texgen, unit 0 */
|
|
|
|
+#define RADEON_TCL_FALLBACK_TEXGEN_1 0x20 /* texgen, unit 1 */
|
|
|
|
+#define RADEON_TCL_FALLBACK_TEXGEN_2 0x40 /* texgen, unit 2 */
|
|
|
|
+#define RADEON_TCL_FALLBACK_TCL_DISABLE 0x80 /* user disable */
|
|
|
|
+#define RADEON_TCL_FALLBACK_FOGCOORDSPEC 0x100 /* fogcoord, sep. spec light */
|
|
|
|
+
|
|
|
|
+/* The blit width for texture uploads
|
|
|
|
+ */
|
|
|
|
+#define BLIT_WIDTH_BYTES 1024
|
|
|
|
+
|
|
|
|
+/* Use the templated vertex format:
|
|
|
|
+ */
|
|
|
|
+#define COLOR_IS_RGBA
|
|
|
|
+#define TAG(x) radeon##x
|
|
|
|
+#include "tnl_dd/t_dd_vertex.h"
|
|
|
|
+#undef TAG
|
|
|
|
+
|
|
|
|
+struct radeon_renderbuffer
|
|
|
|
+{
|
|
|
|
+ struct gl_renderbuffer base;
|
|
|
|
+ struct radeon_bo *bo;
|
|
|
|
+ unsigned int cpp;
|
|
|
|
+ /* unsigned int offset; */
|
|
|
|
+ unsigned int pitch;
|
|
|
|
+ unsigned int width;
|
|
|
|
+ unsigned int height;
|
|
|
|
+
|
|
|
|
+ /* boo Xorg 6.8.2 compat */
|
|
|
|
+ int has_surface;
|
|
|
|
+
|
|
|
|
+ __DRIdrawablePrivate *dPriv;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+struct radeon_colorbuffer_state {
|
|
|
|
+ GLuint clear;
|
|
|
|
+ int roundEnable;
|
|
|
|
+ struct radeon_renderbuffer *rrb;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+struct radeon_depthbuffer_state {
|
|
|
|
+ GLuint clear;
|
|
|
|
+ GLfloat scale;
|
|
|
|
+ struct radeon_renderbuffer *rrb;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+struct radeon_scissor_state {
|
|
|
|
+ drm_clip_rect_t rect;
|
|
|
|
+ GLboolean enabled;
|
|
|
|
+
|
|
|
|
+ GLuint numClipRects; /* Cliprects active */
|
|
|
|
+ GLuint numAllocedClipRects; /* Cliprects available */
|
|
|
|
+ drm_clip_rect_t *pClipRects;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+struct radeon_stencilbuffer_state {
|
|
|
|
+ GLboolean hwBuffer;
|
|
|
|
+ GLuint clear; /* rb3d_stencilrefmask value */
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+struct radeon_stipple_state {
|
|
|
|
+ GLuint mask[32];
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+struct radeon_state_atom {
|
|
|
|
+ struct radeon_state_atom *next, *prev;
|
|
|
|
+ const char *name; /* for debug */
|
|
|
|
+ int cmd_size; /* size in bytes */
|
|
|
|
+ GLuint idx;
|
|
|
|
+ GLuint is_tcl;
|
|
|
|
+ GLuint *cmd; /* one or more cmd's */
|
|
|
|
+ GLuint *lastcmd; /* one or more cmd's */
|
|
|
|
+ GLboolean dirty; /* dirty-mark in emit_state_list */
|
|
|
|
+ int (*check) (GLcontext *, struct radeon_state_atom *atom); /* is this state active? */
|
|
|
|
+ void (*emit) (GLcontext *, struct radeon_state_atom *atom);
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+struct radeon_hw_state {
|
|
|
|
+ /* Head of the linked list of state atoms. */
|
|
|
|
+ struct radeon_state_atom atomlist;
|
|
|
|
+ int max_state_size; /* Number of bytes necessary for a full state emit. */
|
|
|
|
+ GLboolean is_dirty, all_dirty;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+/* Texture related */
|
|
|
|
+typedef struct _radeon_texture_image radeon_texture_image;
|
|
|
|
+
|
|
|
|
+struct _radeon_texture_image {
|
|
|
|
+ struct gl_texture_image base;
|
|
|
|
+
|
|
|
|
+ /**
|
|
|
|
+ * If mt != 0, the image is stored in hardware format in the
|
|
|
|
+ * given mipmap tree. In this case, base.Data may point into the
|
|
|
|
+ * mapping of the buffer object that contains the mipmap tree.
|
|
|
|
+ *
|
|
|
|
+ * If mt == 0, the image is stored in normal memory pointed to
|
|
|
|
+ * by base.Data.
|
|
|
|
+ */
|
|
|
|
+ struct _radeon_mipmap_tree *mt;
|
|
|
|
+ struct radeon_bo *bo;
|
|
|
|
+
|
|
|
|
+ int mtlevel; /** if mt != 0, this is the image's level in the mipmap tree */
|
|
|
|
+ int mtface; /** if mt != 0, this is the image's face in the mipmap tree */
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+static INLINE radeon_texture_image *get_radeon_texture_image(struct gl_texture_image *image)
|
|
|
|
+{
|
|
|
|
+ return (radeon_texture_image*)image;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+typedef struct radeon_tex_obj radeonTexObj, *radeonTexObjPtr;
|
|
|
|
+
|
|
|
|
+#define RADEON_TXO_MICRO_TILE (1 << 3)
|
|
|
|
+
|
|
|
|
+/* Texture object in locally shared texture space.
|
|
|
|
+ */
|
|
|
|
+struct radeon_tex_obj {
|
|
|
|
+ struct gl_texture_object base;
|
|
|
|
+ struct _radeon_mipmap_tree *mt;
|
|
|
|
+
|
|
|
|
+ /**
|
|
|
|
+ * This is true if we've verified that the mipmap tree above is complete
|
|
|
|
+ * and so on.
|
|
|
|
+ */
|
|
|
|
+ GLboolean validated;
|
|
|
|
+
|
|
|
|
+ GLuint override_offset;
|
|
|
|
+ GLboolean image_override; /* Image overridden by GLX_EXT_tfp */
|
|
|
|
+ GLuint tile_bits; /* hw texture tile bits used on this texture */
|
|
|
|
+ struct radeon_bo *bo;
|
|
|
|
+
|
|
|
|
+ GLuint pp_txfilter; /* hardware register values */
|
|
|
|
+ GLuint pp_txformat;
|
|
|
|
+ GLuint pp_txformat_x;
|
|
|
|
+ GLuint pp_txsize; /* npot only */
|
|
|
|
+ GLuint pp_txpitch; /* npot only */
|
|
|
|
+ GLuint pp_border_color;
|
|
|
|
+ GLuint pp_cubic_faces; /* cube face 1,2,3,4 log2 sizes */
|
|
|
|
+
|
|
|
|
+ GLuint pp_txfilter_1; /* r300 */
|
|
|
|
+
|
|
|
|
+ GLboolean border_fallback;
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static INLINE radeonTexObj* radeon_tex_obj(struct gl_texture_object *texObj)
|
|
|
|
+{
|
|
|
|
+ return (radeonTexObj*)texObj;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* Need refcounting on dma buffers:
|
|
|
|
+ */
|
|
|
|
+struct radeon_dma_buffer {
|
|
|
|
+ int refcount; /* the number of retained regions in buf */
|
|
|
|
+ drmBufPtr buf;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+struct radeon_aos {
|
|
|
|
+ struct radeon_bo *bo; /** Buffer object where vertex data is stored */
|
|
|
|
+ int offset; /** Offset into buffer object, in bytes */
|
|
|
|
+ int components; /** Number of components per vertex */
|
|
|
|
+ int stride; /** Stride in dwords (may be 0 for repeating) */
|
|
|
|
+ int count; /** Number of vertices */
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+struct radeon_dma {
|
|
|
|
+ /* Active dma region. Allocations for vertices and retained
|
|
|
|
+ * regions come from here. Also used for emitting random vertices,
|
|
|
|
+ * these may be flushed by calling flush_current();
|
|
|
|
+ */
|
|
|
|
+ struct radeon_bo *current; /** Buffer that DMA memory is allocated from */
|
|
|
|
+ int current_used; /** Number of bytes allocated and forgotten about */
|
|
|
|
+ int current_vertexptr; /** End of active vertex region */
|
|
|
|
+
|
|
|
|
+ /**
|
|
|
|
+ * If current_vertexptr != current_used then flush must be non-zero.
|
|
|
|
+ * flush must be called before non-active vertex allocations can be
|
|
|
|
+ * performed.
|
|
|
|
+ */
|
|
|
|
+ void (*flush) (GLcontext *);
|
|
|
|
+
|
|
|
|
+ /* Number of "in-flight" DMA buffers, i.e. the number of buffers
|
|
|
|
+ * for which a DISCARD command is currently queued in the command buffer
|
|
|
|
+.
|
|
|
|
+ */
|
|
|
|
+ GLuint nr_released_bufs;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+/* radeon_swtcl.c
|
|
|
|
+ */
|
|
|
|
+struct radeon_swtcl_info {
|
|
|
|
+
|
|
|
|
+ GLuint RenderIndex;
|
|
|
|
+ GLuint vertex_size;
|
|
|
|
+ GLubyte *verts;
|
|
|
|
+
|
|
|
|
+ /* Fallback rasterization functions
|
|
|
|
+ */
|
|
|
|
+ GLuint hw_primitive;
|
|
|
|
+ GLenum render_primitive;
|
|
|
|
+ GLuint numverts;
|
|
|
|
+
|
|
|
|
+ struct tnl_attr_map vertex_attrs[VERT_ATTRIB_MAX];
|
|
|
|
+ GLuint vertex_attr_count;
|
|
|
|
+
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+struct radeon_ioctl {
|
|
|
|
+ GLuint vertex_offset;
|
|
|
|
+ struct radeon_bo *bo;
|
|
|
|
+ GLuint vertex_size;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+#define RADEON_MAX_PRIMS 64
|
|
|
|
+
|
|
|
|
+struct radeon_prim {
|
|
|
|
+ GLuint start;
|
|
|
|
+ GLuint end;
|
|
|
|
+ GLuint prim;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static INLINE GLuint radeonPackColor(GLuint cpp,
|
|
|
|
+ GLubyte r, GLubyte g,
|
|
|
|
+ GLubyte b, GLubyte a)
|
|
|
|
+{
|
|
|
|
+ switch (cpp) {
|
|
|
|
+ case 2:
|
|
|
|
+ return PACK_COLOR_565(r, g, b);
|
|
|
|
+ case 4:
|
|
|
|
+ return PACK_COLOR_8888(a, r, g, b);
|
|
|
|
+ default:
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+#define MAX_CMD_BUF_SZ (16*1024)
|
|
|
|
+
|
|
|
|
+#define MAX_DMA_BUF_SZ (64*1024)
|
|
|
|
+
|
|
|
|
+struct radeon_store {
|
|
|
|
+ GLuint statenr;
|
|
|
|
+ GLuint primnr;
|
|
|
|
+ char cmd_buf[MAX_CMD_BUF_SZ];
|
|
|
|
+ int cmd_used;
|
|
|
|
+ int elts_start;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+struct radeon_dri_mirror {
|
|
|
|
+ __DRIcontextPrivate *context; /* DRI context */
|
|
|
|
+ __DRIscreenPrivate *screen; /* DRI screen */
|
|
|
|
+
|
|
|
|
+ /**
|
|
|
|
+ * DRI drawable bound to this context for drawing.
|
|
|
|
+ */
|
|
|
|
+ __DRIdrawablePrivate *drawable;
|
|
|
|
+
|
|
|
|
+ /**
|
|
|
|
+ * DRI drawable bound to this context for reading.
|
|
|
|
+ */
|
|
|
|
+ __DRIdrawablePrivate *readable;
|
|
|
|
+
|
|
|
|
+ drm_context_t hwContext;
|
|
|
|
+ drm_hw_lock_t *hwLock;
|
|
|
|
+ int fd;
|
|
|
|
+ int drmMinor;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+#define DEBUG_TEXTURE 0x001
|
|
|
|
+#define DEBUG_STATE 0x002
|
|
|
|
+#define DEBUG_IOCTL 0x004
|
|
|
|
+#define DEBUG_PRIMS 0x008
|
|
|
|
+#define DEBUG_VERTS 0x010
|
|
|
|
+#define DEBUG_FALLBACKS 0x020
|
|
|
|
+#define DEBUG_VFMT 0x040
|
|
|
|
+#define DEBUG_CODEGEN 0x080
|
|
|
|
+#define DEBUG_VERBOSE 0x100
|
|
|
|
+#define DEBUG_DRI 0x200
|
|
|
|
+#define DEBUG_DMA 0x400
|
|
|
|
+#define DEBUG_SANITY 0x800
|
|
|
|
+#define DEBUG_SYNC 0x1000
|
|
|
|
+#define DEBUG_PIXEL 0x2000
|
|
|
|
+#define DEBUG_MEMORY 0x4000
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+typedef void (*radeon_tri_func) (radeonContextPtr,
|
|
|
|
+ radeonVertex *,
|
|
|
|
+ radeonVertex *, radeonVertex *);
|
|
|
|
+
|
|
|
|
+typedef void (*radeon_line_func) (radeonContextPtr,
|
|
|
|
+ radeonVertex *, radeonVertex *);
|
|
|
|
+
|
|
|
|
+typedef void (*radeon_point_func) (radeonContextPtr, radeonVertex *);
|
|
|
|
+
|
|
|
|
+struct radeon_state {
|
|
|
|
+ struct radeon_colorbuffer_state color;
|
|
|
|
+ struct radeon_depthbuffer_state depth;
|
|
|
|
+ struct radeon_scissor_state scissor;
|
|
|
|
+ struct radeon_stencilbuffer_state stencil;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * This structure holds the command buffer while it is being constructed.
|
|
|
|
+ *
|
|
|
|
+ * The first batch of commands in the buffer is always the state that needs
|
|
|
|
+ * to be re-emitted when the context is lost. This batch can be skipped
|
|
|
|
+ * otherwise.
|
|
|
|
+ */
|
|
|
|
+struct radeon_cmdbuf {
|
|
|
|
+ struct radeon_cs_manager *csm;
|
|
|
|
+ struct radeon_cs *cs;
|
|
|
|
+ int size; /** # of dwords total */
|
|
|
|
+ unsigned int flushing:1; /** whether we're currently in FlushCmdBufLocked */
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+struct radeon_context {
|
|
|
|
+ GLcontext *glCtx;
|
|
|
|
+ radeonScreenPtr radeonScreen; /* Screen private DRI data */
|
|
|
|
+
|
|
|
|
+ /* Texture object bookkeeping
|
|
|
|
+ */
|
|
|
|
+ int texture_depth;
|
|
|
|
+ float initialMaxAnisotropy;
|
|
|
|
+
|
|
|
|
+ struct radeon_dma dma;
|
|
|
|
+ struct radeon_hw_state hw;
|
|
|
|
+ /* Rasterization and vertex state:
|
|
|
|
+ */
|
|
|
|
+ GLuint TclFallback;
|
|
|
|
+ GLuint Fallback;
|
|
|
|
+ GLuint NewGLState;
|
|
|
|
+ DECLARE_RENDERINPUTS(tnl_index_bitset); /* index of bits for last tnl_install_attrs */
|
|
|
|
+
|
|
|
|
+ /* Page flipping */
|
|
|
|
+ GLuint doPageFlip;
|
|
|
|
+
|
|
|
|
+ /* Drawable, cliprect and scissor information */
|
|
|
|
+ GLuint numClipRects; /* Cliprects for the draw buffer */
|
|
|
|
+ drm_clip_rect_t *pClipRects;
|
|
|
|
+ unsigned int lastStamp;
|
|
|
|
+ GLboolean lost_context;
|
|
|
|
+ drm_radeon_sarea_t *sarea; /* Private SAREA data */
|
|
|
|
+
|
|
|
|
+ /* Mirrors of some DRI state */
|
|
|
|
+ struct radeon_dri_mirror dri;
|
|
|
|
+
|
|
|
|
+ /* Busy waiting */
|
|
|
|
+ GLuint do_usleeps;
|
|
|
|
+ GLuint do_irqs;
|
|
|
|
+ GLuint irqsEmitted;
|
|
|
|
+ drm_radeon_irq_wait_t iw;
|
|
|
|
+
|
|
|
|
+ /* buffer swap */
|
|
|
|
+ int64_t swap_ust;
|
|
|
|
+ int64_t swap_missed_ust;
|
|
|
|
+
|
|
|
|
+ GLuint swap_count;
|
|
|
|
+ GLuint swap_missed_count;
|
|
|
|
+
|
|
|
|
+ /* Derived state - for r300 only */
|
|
|
|
+ struct radeon_state state;
|
|
|
|
+
|
|
|
|
+ struct radeon_swtcl_info swtcl;
|
|
|
|
+ /* Configuration cache
|
|
|
|
+ */
|
|
|
|
+ driOptionCache optionCache;
|
|
|
|
+
|
|
|
|
+ struct radeon_cmdbuf cmdbuf;
|
|
|
|
+
|
|
|
|
+ struct {
|
|
|
|
+ void (*get_lock)(radeonContextPtr radeon);
|
|
|
|
+ void (*update_viewport_offset)(GLcontext *ctx);
|
|
|
|
+ void (*update_draw_buffer)(GLcontext *ctx);
|
|
|
|
+ void (*emit_cs_header)(struct radeon_cs *cs, radeonContextPtr rmesa);
|
|
|
|
+ void (*swtcl_flush)(GLcontext *ctx, uint32_t offset);
|
|
|
|
+ void (*pre_emit_atoms)(radeonContextPtr rmesa);
|
|
|
|
+ void (*pre_emit_state)(radeonContextPtr rmesa);
|
|
|
|
+ } vtbl;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+#define RADEON_CONTEXT(glctx) ((radeonContextPtr)(ctx->DriverCtx))
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * This function takes a float and packs it into a uint32_t
|
|
|
|
+ */
|
|
|
|
+static INLINE uint32_t radeonPackFloat32(float fl)
|
|
|
|
+{
|
|
|
|
+ union {
|
|
|
|
+ float fl;
|
|
|
|
+ uint32_t u;
|
|
|
|
+ } u;
|
|
|
|
+
|
|
|
|
+ u.fl = fl;
|
|
|
|
+ return u.u;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* This is probably wrong for some values, I need to test this
|
|
|
|
+ * some more. Range checking would be a good idea also..
|
|
|
|
+ *
|
|
|
|
+ * But it works for most things. I'll fix it later if someone
|
|
|
|
+ * else with a better clue doesn't
|
|
|
|
+ */
|
|
|
|
+static INLINE uint32_t radeonPackFloat24(float f)
|
|
|
|
+{
|
|
|
|
+ float mantissa;
|
|
|
|
+ int exponent;
|
|
|
|
+ uint32_t float24 = 0;
|
|
|
|
+
|
|
|
|
+ if (f == 0.0)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ mantissa = frexpf(f, &exponent);
|
|
|
|
+
|
|
|
|
+ /* Handle -ve */
|
|
|
|
+ if (mantissa < 0) {
|
|
|
|
+ float24 |= (1 << 23);
|
|
|
|
+ mantissa = mantissa * -1.0;
|
|
|
|
+ }
|
|
|
|
+ /* Handle exponent, bias of 63 */
|
|
|
|
+ exponent += 62;
|
|
|
|
+ float24 |= (exponent << 16);
|
|
|
|
+ /* Kill 7 LSB of mantissa */
|
|
|
|
+ float24 |= (radeonPackFloat32(mantissa) & 0x7FFFFF) >> 7;
|
|
|
|
+
|
|
|
|
+ return float24;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+GLboolean radeonInitContext(radeonContextPtr radeon,
|
|
|
|
+ struct dd_function_table* functions,
|
|
|
|
+ const __GLcontextModes * glVisual,
|
|
|
|
+ __DRIcontextPrivate * driContextPriv,
|
|
|
|
+ void *sharedContextPrivate);
|
|
|
|
+
|
|
|
|
+void radeonCleanupContext(radeonContextPtr radeon);
|
|
|
|
+GLboolean radeonUnbindContext(__DRIcontextPrivate * driContextPriv);
|
|
|
|
+void radeon_update_renderbuffers(__DRIcontext *context, __DRIdrawable *drawable);
|
|
|
|
+GLboolean radeonMakeCurrent(__DRIcontextPrivate * driContextPriv,
|
|
|
|
+ __DRIdrawablePrivate * driDrawPriv,
|
|
|
|
+ __DRIdrawablePrivate * driReadPriv);
|
|
|
|
+
|
|
|
|
+/* ================================================================
|
|
|
|
+ * Debugging:
|
|
|
|
+ */
|
|
|
|
+#define DO_DEBUG 1
|
|
|
|
+
|
|
|
|
+#if DO_DEBUG
|
|
|
|
+extern int RADEON_DEBUG;
|
|
|
|
+#else
|
|
|
|
+#define RADEON_DEBUG 0
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+#endif
|
|
|
|
diff --git a/src/mesa/drivers/dri/radeon/radeon_cs_drm.h b/src/mesa/drivers/dri/radeon/radeon_cs_drm.h
|
|
|
|
new file mode 100644
|
|
|
|
index 0000000..984725a
|
|
|
|
--- /dev/null
|
|
|
|
+++ b/src/mesa/drivers/dri/radeon/radeon_cs_drm.h
|
|
|
|
@@ -0,0 +1,207 @@
|
|
|
|
+/*
|
|
|
|
+ * Copyright © 2008 Nicolai Haehnle
|
|
|
|
+ * Copyright © 2008 Jérôme Glisse
|
|
|
|
+ * All Rights Reserved.
|
|
|
|
+ *
|
|
|
|
+ * Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
+ * copy of this software and associated documentation files (the
|
|
|
|
+ * "Software"), to deal in the Software without restriction, including
|
|
|
|
+ * without limitation the rights to use, copy, modify, merge, publish,
|
|
|
|
+ * distribute, sub license, and/or sell copies of the Software, and to
|
|
|
|
+ * permit persons to whom the Software is furnished to do so, subject to
|
|
|
|
+ * the following conditions:
|
|
|
|
+ *
|
|
|
|
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
|
|
|
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
|
|
|
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
|
|
|
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
+ *
|
|
|
|
+ * The above copyright notice and this permission notice (including the
|
|
|
|
+ * next paragraph) shall be included in all copies or substantial portions
|
|
|
|
+ * of the Software.
|
|
|
|
+ */
|
|
|
|
+/*
|
|
|
|
+ * Authors:
|
|
|
|
+ * Aapo Tahkola <aet@rasterburn.org>
|
|
|
|
+ * Nicolai Haehnle <prefect_@gmx.net>
|
|
|
|
+ * Jérôme Glisse <glisse@freedesktop.org>
|
|
|
|
+ */
|
|
|
|
+#ifndef RADEON_CS_H
|
|
|
|
+#define RADEON_CS_H
|
|
|
|
+
|
|
|
|
+#include <stdint.h>
|
|
|
|
+#include <string.h>
|
|
|
|
+#include "drm.h"
|
|
|
|
+#include "radeon_drm.h"
|
|
|
|
+
|
|
|
|
+struct radeon_cs_reloc {
|
|
|
|
+ struct radeon_bo *bo;
|
|
|
|
+ uint32_t read_domain;
|
|
|
|
+ uint32_t write_domain;
|
|
|
|
+ uint32_t flags;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+#define RADEON_CS_SPACE_OK 0
|
|
|
|
+#define RADEON_CS_SPACE_OP_TO_BIG 1
|
|
|
|
+#define RADEON_CS_SPACE_FLUSH 2
|
|
|
|
+
|
|
|
|
+struct radeon_cs_space_check {
|
|
|
|
+ struct radeon_bo *bo;
|
|
|
|
+ uint32_t read_domains;
|
|
|
|
+ uint32_t write_domain;
|
|
|
|
+ uint32_t new_accounted;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+struct radeon_cs_manager;
|
|
|
|
+
|
|
|
|
+struct radeon_cs {
|
|
|
|
+ struct radeon_cs_manager *csm;
|
|
|
|
+ void *relocs;
|
|
|
|
+ uint32_t *packets;
|
|
|
|
+ unsigned crelocs;
|
|
|
|
+ unsigned relocs_total_size;
|
|
|
|
+ unsigned cdw;
|
|
|
|
+ unsigned ndw;
|
|
|
|
+ int section;
|
|
|
|
+ unsigned section_ndw;
|
|
|
|
+ unsigned section_cdw;
|
|
|
|
+ const char *section_file;
|
|
|
|
+ const char *section_func;
|
|
|
|
+ int section_line;
|
|
|
|
+
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+/* cs functions */
|
|
|
|
+struct radeon_cs_funcs {
|
|
|
|
+ struct radeon_cs *(*cs_create)(struct radeon_cs_manager *csm,
|
|
|
|
+ uint32_t ndw);
|
|
|
|
+ int (*cs_write_reloc)(struct radeon_cs *cs,
|
|
|
|
+ struct radeon_bo *bo,
|
|
|
|
+ uint32_t read_domain,
|
|
|
|
+ uint32_t write_domain,
|
|
|
|
+ uint32_t flags);
|
|
|
|
+ int (*cs_begin)(struct radeon_cs *cs,
|
|
|
|
+ uint32_t ndw,
|
|
|
|
+ const char *file,
|
|
|
|
+ const char *func,
|
|
|
|
+ int line);
|
|
|
|
+ int (*cs_end)(struct radeon_cs *cs,
|
|
|
|
+ const char *file,
|
|
|
|
+ const char *func,
|
|
|
|
+ int line);
|
|
|
|
+ int (*cs_emit)(struct radeon_cs *cs);
|
|
|
|
+ int (*cs_destroy)(struct radeon_cs *cs);
|
|
|
|
+ int (*cs_erase)(struct radeon_cs *cs);
|
|
|
|
+ int (*cs_need_flush)(struct radeon_cs *cs);
|
|
|
|
+ void (*cs_print)(struct radeon_cs *cs, FILE *file);
|
|
|
|
+ int (*cs_space_check)(struct radeon_cs *cs, struct radeon_cs_space_check *bos,
|
|
|
|
+ int num_bo);
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+struct radeon_cs_manager {
|
|
|
|
+ struct radeon_cs_funcs *funcs;
|
|
|
|
+ int fd;
|
|
|
|
+ uint32_t vram_limit, gart_limit;
|
|
|
|
+ uint32_t vram_write_used, gart_write_used;
|
|
|
|
+ uint32_t read_used;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+static inline struct radeon_cs *radeon_cs_create(struct radeon_cs_manager *csm,
|
|
|
|
+ uint32_t ndw)
|
|
|
|
+{
|
|
|
|
+ return csm->funcs->cs_create(csm, ndw);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline int radeon_cs_write_reloc(struct radeon_cs *cs,
|
|
|
|
+ struct radeon_bo *bo,
|
|
|
|
+ uint32_t read_domain,
|
|
|
|
+ uint32_t write_domain,
|
|
|
|
+ uint32_t flags)
|
|
|
|
+{
|
|
|
|
+ return cs->csm->funcs->cs_write_reloc(cs,
|
|
|
|
+ bo,
|
|
|
|
+ read_domain,
|
|
|
|
+ write_domain,
|
|
|
|
+ flags);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline int radeon_cs_begin(struct radeon_cs *cs,
|
|
|
|
+ uint32_t ndw,
|
|
|
|
+ const char *file,
|
|
|
|
+ const char *func,
|
|
|
|
+ int line)
|
|
|
|
+{
|
|
|
|
+ return cs->csm->funcs->cs_begin(cs, ndw, file, func, line);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline int radeon_cs_end(struct radeon_cs *cs,
|
|
|
|
+ const char *file,
|
|
|
|
+ const char *func,
|
|
|
|
+ int line)
|
|
|
|
+{
|
|
|
|
+ return cs->csm->funcs->cs_end(cs, file, func, line);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline int radeon_cs_emit(struct radeon_cs *cs)
|
|
|
|
+{
|
|
|
|
+ return cs->csm->funcs->cs_emit(cs);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline int radeon_cs_destroy(struct radeon_cs *cs)
|
|
|
|
+{
|
|
|
|
+ return cs->csm->funcs->cs_destroy(cs);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline int radeon_cs_erase(struct radeon_cs *cs)
|
|
|
|
+{
|
|
|
|
+ return cs->csm->funcs->cs_erase(cs);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline int radeon_cs_need_flush(struct radeon_cs *cs)
|
|
|
|
+{
|
|
|
|
+ return cs->csm->funcs->cs_need_flush(cs);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void radeon_cs_print(struct radeon_cs *cs, FILE *file)
|
|
|
|
+{
|
|
|
|
+ cs->csm->funcs->cs_print(cs, file);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline int radeon_cs_space_check(struct radeon_cs *cs,
|
|
|
|
+ struct radeon_cs_space_check *bos,
|
|
|
|
+ int num_bo)
|
|
|
|
+{
|
|
|
|
+ return cs->csm->funcs->cs_space_check(cs, bos, num_bo);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void radeon_cs_set_limit(struct radeon_cs *cs, uint32_t domain, uint32_t limit)
|
|
|
|
+{
|
|
|
|
+
|
|
|
|
+ if (domain == RADEON_GEM_DOMAIN_VRAM)
|
|
|
|
+ cs->csm->vram_limit = limit;
|
|
|
|
+ else
|
|
|
|
+ cs->csm->gart_limit = limit;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void radeon_cs_write_dword(struct radeon_cs *cs, uint32_t dword)
|
|
|
|
+{
|
|
|
|
+ cs->packets[cs->cdw++] = dword;
|
|
|
|
+ if (cs->section) {
|
|
|
|
+ cs->section_cdw++;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static inline void radeon_cs_write_qword(struct radeon_cs *cs, uint64_t qword)
|
|
|
|
+{
|
|
|
|
+
|
|
|
|
+ memcpy(cs->packets + cs->cdw, &qword, sizeof(qword));
|
|
|
|
+ cs->cdw+=2;
|
|
|
|
+ if (cs->section) {
|
|
|
|
+ cs->section_cdw+=2;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+#endif
|
|
|
|
diff --git a/src/mesa/drivers/dri/radeon/radeon_cs_legacy.c b/src/mesa/drivers/dri/radeon/radeon_cs_legacy.c
|
|
|
|
new file mode 100644
|
|
|
|
index 0000000..b47b095
|
|
|
|
--- /dev/null
|
|
|
|
+++ b/src/mesa/drivers/dri/radeon/radeon_cs_legacy.c
|
|
|
|
@@ -0,0 +1,504 @@
|
|
|
|
+/*
|
|
|
|
+ * Copyright © 2008 Nicolai Haehnle
|
|
|
|
+ * Copyright © 2008 Jérôme Glisse
|
|
|
|
+ * All Rights Reserved.
|
|
|
|
+ *
|
|
|
|
+ * Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
+ * copy of this software and associated documentation files (the
|
|
|
|
+ * "Software"), to deal in the Software without restriction, including
|
|
|
|
+ * without limitation the rights to use, copy, modify, merge, publish,
|
|
|
|
+ * distribute, sub license, and/or sell copies of the Software, and to
|
|
|
|
+ * permit persons to whom the Software is furnished to do so, subject to
|
|
|
|
+ * the following conditions:
|
|
|
|
+ *
|
|
|
|
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
|
|
|
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
|
|
|
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
|
|
|
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
+ *
|
|
|
|
+ * The above copyright notice and this permission notice (including the
|
|
|
|
+ * next paragraph) shall be included in all copies or substantial portions
|
|
|
|
+ * of the Software.
|
|
|
|
+ */
|
|
|
|
+/*
|
|
|
|
+ * Authors:
|
|
|
|
+ * Aapo Tahkola <aet@rasterburn.org>
|
|
|
|
+ * Nicolai Haehnle <prefect_@gmx.net>
|
|
|
|
+ * Jérôme Glisse <glisse@freedesktop.org>
|
|
|
|
+ */
|
|
|
|
+#include <errno.h>
|
|
|
|
+
|
|
|
|
+#include "radeon_bocs_wrapper.h"
|
|
|
|
+
|
|
|
|
+struct cs_manager_legacy {
|
|
|
|
+ struct radeon_cs_manager base;
|
|
|
|
+ struct radeon_context *ctx;
|
|
|
|
+ /* hack for scratch stuff */
|
|
|
|
+ uint32_t pending_age;
|
|
|
|
+ uint32_t pending_count;
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+struct cs_reloc_legacy {
|
|
|
|
+ struct radeon_cs_reloc base;
|
|
|
|
+ uint32_t cindices;
|
|
|
|
+ uint32_t *indices;
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+static struct radeon_cs *cs_create(struct radeon_cs_manager *csm,
|
|
|
|
+ uint32_t ndw)
|
|
|
|
+{
|
|
|
|
+ struct radeon_cs *cs;
|
|
|
|
+
|
|
|
|
+ cs = (struct radeon_cs*)calloc(1, sizeof(struct radeon_cs));
|
|
|
|
+ if (cs == NULL) {
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+ cs->csm = csm;
|
|
|
|
+ cs->ndw = (ndw + 0x3FF) & (~0x3FF);
|
|
|
|
+ cs->packets = (uint32_t*)malloc(4*cs->ndw);
|
|
|
|
+ if (cs->packets == NULL) {
|
|
|
|
+ free(cs);
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+ cs->relocs_total_size = 0;
|
|
|
|
+ return cs;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int cs_write_reloc(struct radeon_cs *cs,
|
|
|
|
+ struct radeon_bo *bo,
|
|
|
|
+ uint32_t read_domain,
|
|
|
|
+ uint32_t write_domain,
|
|
|
|
+ uint32_t flags)
|
|
|
|
+{
|
|
|
|
+ struct cs_reloc_legacy *relocs;
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ relocs = (struct cs_reloc_legacy *)cs->relocs;
|
|
|
|
+ /* check domains */
|
|
|
|
+ if ((read_domain && write_domain) || (!read_domain && !write_domain)) {
|
|
|
|
+ /* in one CS a bo can only be in read or write domain but not
|
|
|
|
+ * in read & write domain at the same sime
|
|
|
|
+ */
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+ if (read_domain == RADEON_GEM_DOMAIN_CPU) {
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+ if (write_domain == RADEON_GEM_DOMAIN_CPU) {
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+ /* check if bo is already referenced */
|
|
|
|
+ for(i = 0; i < cs->crelocs; i++) {
|
|
|
|
+ uint32_t *indices;
|
|
|
|
+
|
|
|
|
+ if (relocs[i].base.bo->handle == bo->handle) {
|
|
|
|
+ /* Check domains must be in read or write. As we check already
|
|
|
|
+ * checked that in argument one of the read or write domain was
|
|
|
|
+ * set we only need to check that if previous reloc as the read
|
|
|
|
+ * domain set then the read_domain should also be set for this
|
|
|
|
+ * new relocation.
|
|
|
|
+ */
|
|
|
|
+ if (relocs[i].base.read_domain && !read_domain) {
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+ if (relocs[i].base.write_domain && !write_domain) {
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+ relocs[i].base.read_domain |= read_domain;
|
|
|
|
+ relocs[i].base.write_domain |= write_domain;
|
|
|
|
+ /* save indice */
|
|
|
|
+ relocs[i].cindices++;
|
|
|
|
+ indices = (uint32_t*)realloc(relocs[i].indices,
|
|
|
|
+ relocs[i].cindices * 4);
|
|
|
|
+ if (indices == NULL) {
|
|
|
|
+ relocs[i].cindices -= 1;
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+ }
|
|
|
|
+ relocs[i].indices = indices;
|
|
|
|
+ relocs[i].indices[relocs[i].cindices - 1] = cs->cdw - 1;
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ /* add bo to reloc */
|
|
|
|
+ relocs = (struct cs_reloc_legacy*)
|
|
|
|
+ realloc(cs->relocs,
|
|
|
|
+ sizeof(struct cs_reloc_legacy) * (cs->crelocs + 1));
|
|
|
|
+ if (relocs == NULL) {
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+ }
|
|
|
|
+ cs->relocs = relocs;
|
|
|
|
+ relocs[cs->crelocs].base.bo = bo;
|
|
|
|
+ relocs[cs->crelocs].base.read_domain = read_domain;
|
|
|
|
+ relocs[cs->crelocs].base.write_domain = write_domain;
|
|
|
|
+ relocs[cs->crelocs].base.flags = flags;
|
|
|
|
+ relocs[cs->crelocs].indices = (uint32_t*)malloc(4);
|
|
|
|
+ if (relocs[cs->crelocs].indices == NULL) {
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+ }
|
|
|
|
+ relocs[cs->crelocs].indices[0] = cs->cdw - 1;
|
|
|
|
+ relocs[cs->crelocs].cindices = 1;
|
|
|
|
+ cs->relocs_total_size += radeon_bo_legacy_relocs_size(bo);
|
|
|
|
+ cs->crelocs++;
|
|
|
|
+ radeon_bo_ref(bo);
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int cs_begin(struct radeon_cs *cs,
|
|
|
|
+ uint32_t ndw,
|
|
|
|
+ const char *file,
|
|
|
|
+ const char *func,
|
|
|
|
+ int line)
|
|
|
|
+{
|
|
|
|
+ if (cs->section) {
|
|
|
|
+ fprintf(stderr, "CS already in a section(%s,%s,%d)\n",
|
|
|
|
+ cs->section_file, cs->section_func, cs->section_line);
|
|
|
|
+ fprintf(stderr, "CS can't start section(%s,%s,%d)\n",
|
|
|
|
+ file, func, line);
|
|
|
|
+ return -EPIPE;
|
|
|
|
+ }
|
|
|
|
+ cs->section = 1;
|
|
|
|
+ cs->section_ndw = ndw;
|
|
|
|
+ cs->section_cdw = 0;
|
|
|
|
+ cs->section_file = file;
|
|
|
|
+ cs->section_func = func;
|
|
|
|
+ cs->section_line = line;
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ if (cs->cdw + ndw > cs->ndw) {
|
|
|
|
+ uint32_t tmp, *ptr;
|
|
|
|
+ int num = (ndw > 0x3FF) ? ndw : 0x3FF;
|
|
|
|
+
|
|
|
|
+ tmp = (cs->cdw + 1 + num) & (~num);
|
|
|
|
+ ptr = (uint32_t*)realloc(cs->packets, 4 * tmp);
|
|
|
|
+ if (ptr == NULL) {
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+ }
|
|
|
|
+ cs->packets = ptr;
|
|
|
|
+ cs->ndw = tmp;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int cs_end(struct radeon_cs *cs,
|
|
|
|
+ const char *file,
|
|
|
|
+ const char *func,
|
|
|
|
+ int line)
|
|
|
|
+
|
|
|
|
+{
|
|
|
|
+ if (!cs->section) {
|
|
|
|
+ fprintf(stderr, "CS no section to end at (%s,%s,%d)\n",
|
|
|
|
+ file, func, line);
|
|
|
|
+ return -EPIPE;
|
|
|
|
+ }
|
|
|
|
+ cs->section = 0;
|
|
|
|
+ if (cs->section_ndw != cs->section_cdw) {
|
|
|
|
+ fprintf(stderr, "CS section size missmatch start at (%s,%s,%d) %d vs %d\n",
|
|
|
|
+ cs->section_file, cs->section_func, cs->section_line, cs->section_ndw, cs->section_cdw);
|
|
|
|
+ fprintf(stderr, "CS section end at (%s,%s,%d)\n",
|
|
|
|
+ file, func, line);
|
|
|
|
+ return -EPIPE;
|
|
|
|
+ }
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int cs_process_relocs(struct radeon_cs *cs)
|
|
|
|
+{
|
|
|
|
+ struct cs_manager_legacy *csm = (struct cs_manager_legacy*)cs->csm;
|
|
|
|
+ struct cs_reloc_legacy *relocs;
|
|
|
|
+ int i, j, r;
|
|
|
|
+
|
|
|
|
+ csm = (struct cs_manager_legacy*)cs->csm;
|
|
|
|
+ relocs = (struct cs_reloc_legacy *)cs->relocs;
|
|
|
|
+ restart:
|
|
|
|
+ for (i = 0; i < cs->crelocs; i++) {
|
|
|
|
+ for (j = 0; j < relocs[i].cindices; j++) {
|
|
|
|
+ uint32_t soffset, eoffset;
|
|
|
|
+
|
|
|
|
+ r = radeon_bo_legacy_validate(relocs[i].base.bo,
|
|
|
|
+ &soffset, &eoffset);
|
|
|
|
+ if (r == -EAGAIN)
|
|
|
|
+ goto restart;
|
|
|
|
+ if (r) {
|
|
|
|
+ fprintf(stderr, "validated %p [0x%08X, 0x%08X]\n",
|
|
|
|
+ relocs[i].base.bo, soffset, eoffset);
|
|
|
|
+ return r;
|
|
|
|
+ }
|
|
|
|
+ cs->packets[relocs[i].indices[j]] += soffset;
|
|
|
|
+ if (cs->packets[relocs[i].indices[j]] >= eoffset) {
|
|
|
|
+ /* radeon_bo_debug(relocs[i].base.bo, 12); */
|
|
|
|
+ fprintf(stderr, "validated %p [0x%08X, 0x%08X]\n",
|
|
|
|
+ relocs[i].base.bo, soffset, eoffset);
|
|
|
|
+ fprintf(stderr, "above end: %p 0x%08X 0x%08X\n",
|
|
|
|
+ relocs[i].base.bo,
|
|
|
|
+ cs->packets[relocs[i].indices[j]],
|
|
|
|
+ eoffset);
|
|
|
|
+ exit(0);
|
|
|
|
+ return -EINVAL;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int cs_set_age(struct radeon_cs *cs)
|
|
|
|
+{
|
|
|
|
+ struct cs_manager_legacy *csm = (struct cs_manager_legacy*)cs->csm;
|
|
|
|
+ struct cs_reloc_legacy *relocs;
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ relocs = (struct cs_reloc_legacy *)cs->relocs;
|
|
|
|
+ for (i = 0; i < cs->crelocs; i++) {
|
|
|
|
+ radeon_bo_legacy_pending(relocs[i].base.bo, csm->pending_age);
|
|
|
|
+ radeon_bo_unref(relocs[i].base.bo);
|
|
|
|
+ }
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void dump_cmdbuf(struct radeon_cs *cs)
|
|
|
|
+{
|
|
|
|
+ int i;
|
|
|
|
+ for (i = 0; i < cs->cdw; i++){
|
|
|
|
+ fprintf(stderr,"%x: %08x\n", i, cs->packets[i]);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+}
|
|
|
|
+static int cs_emit(struct radeon_cs *cs)
|
|
|
|
+{
|
|
|
|
+ struct cs_manager_legacy *csm = (struct cs_manager_legacy*)cs->csm;
|
|
|
|
+ drm_radeon_cmd_buffer_t cmd;
|
|
|
|
+ drm_r300_cmd_header_t age;
|
|
|
|
+ uint64_t ull;
|
|
|
|
+ int r;
|
|
|
|
+
|
|
|
|
+ csm->ctx->vtbl.emit_cs_header(cs, csm->ctx);
|
|
|
|
+
|
|
|
|
+ /* append buffer age */
|
|
|
|
+ if (IS_R300_CLASS(csm->ctx->radeonScreen)) {
|
|
|
|
+ age.scratch.cmd_type = R300_CMD_SCRATCH;
|
|
|
|
+ /* Scratch register 2 corresponds to what radeonGetAge polls */
|
|
|
|
+ csm->pending_age = 0;
|
|
|
|
+ csm->pending_count = 1;
|
|
|
|
+ ull = (uint64_t) (intptr_t) &csm->pending_age;
|
|
|
|
+ age.scratch.reg = 2;
|
|
|
|
+ age.scratch.n_bufs = 1;
|
|
|
|
+ age.scratch.flags = 0;
|
|
|
|
+ radeon_cs_write_dword(cs, age.u);
|
|
|
|
+ radeon_cs_write_qword(cs, ull);
|
|
|
|
+ radeon_cs_write_dword(cs, 0);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ r = cs_process_relocs(cs);
|
|
|
|
+ if (r) {
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ cmd.buf = (char *)cs->packets;
|
|
|
|
+ cmd.bufsz = cs->cdw * 4;
|
|
|
|
+ if (csm->ctx->state.scissor.enabled) {
|
|
|
|
+ cmd.nbox = csm->ctx->state.scissor.numClipRects;
|
|
|
|
+ cmd.boxes = (drm_clip_rect_t *) csm->ctx->state.scissor.pClipRects;
|
|
|
|
+ } else {
|
|
|
|
+ cmd.nbox = csm->ctx->numClipRects;
|
|
|
|
+ cmd.boxes = (drm_clip_rect_t *) csm->ctx->pClipRects;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ //dump_cmdbuf(cs);
|
|
|
|
+
|
|
|
|
+ r = drmCommandWrite(cs->csm->fd, DRM_RADEON_CMDBUF, &cmd, sizeof(cmd));
|
|
|
|
+ if (r) {
|
|
|
|
+ return r;
|
|
|
|
+ }
|
|
|
|
+ if (!IS_R300_CLASS(csm->ctx->radeonScreen)) {
|
|
|
|
+ drm_radeon_irq_emit_t emit_cmd;
|
|
|
|
+ emit_cmd.irq_seq = &csm->pending_age;
|
|
|
|
+ r = drmCommandWrite(cs->csm->fd, DRM_RADEON_IRQ_EMIT, &emit_cmd, sizeof(emit_cmd));
|
|
|
|
+ if (r) {
|
|
|
|
+ return r;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ cs_set_age(cs);
|
|
|
|
+
|
|
|
|
+ cs->csm->read_used = 0;
|
|
|
|
+ cs->csm->vram_write_used = 0;
|
|
|
|
+ cs->csm->gart_write_used = 0;
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void inline cs_free_reloc(void *relocs_p, int crelocs)
|
|
|
|
+{
|
|
|
|
+ struct cs_reloc_legacy *relocs = relocs_p;
|
|
|
|
+ int i;
|
|
|
|
+ if (!relocs_p)
|
|
|
|
+ return;
|
|
|
|
+ for (i = 0; i < crelocs; i++)
|
|
|
|
+ free(relocs[i].indices);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int cs_destroy(struct radeon_cs *cs)
|
|
|
|
+{
|
|
|
|
+ cs_free_reloc(cs->relocs, cs->crelocs);
|
|
|
|
+ free(cs->relocs);
|
|
|
|
+ free(cs->packets);
|
|
|
|
+ free(cs);
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int cs_erase(struct radeon_cs *cs)
|
|
|
|
+{
|
|
|
|
+ cs_free_reloc(cs->relocs, cs->crelocs);
|
|
|
|
+ free(cs->relocs);
|
|
|
|
+ cs->relocs_total_size = 0;
|
|
|
|
+ cs->relocs = NULL;
|
|
|
|
+ cs->crelocs = 0;
|
|
|
|
+ cs->cdw = 0;
|
|
|
|
+ cs->section = 0;
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int cs_need_flush(struct radeon_cs *cs)
|
|
|
|
+{
|
|
|
|
+ /* this function used to flush when the BO usage got to
|
|
|
|
+ * a certain size, now the higher levels handle this better */
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void cs_print(struct radeon_cs *cs, FILE *file)
|
|
|
|
+{
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static int cs_check_space(struct radeon_cs *cs, struct radeon_cs_space_check *bos, int num_bo)
|
|
|
|
+{
|
|
|
|
+ struct radeon_cs_manager *csm = cs->csm;
|
|
|
|
+ int this_op_read = 0, this_op_gart_write = 0, this_op_vram_write = 0;
|
|
|
|
+ uint32_t read_domains, write_domain;
|
|
|
|
+ int i;
|
|
|
|
+ struct radeon_bo *bo;
|
|
|
|
+
|
|
|
|
+ /* check the totals for this operation */
|
|
|
|
+
|
|
|
|
+ if (num_bo == 0)
|
|
|
|
+ return 0;
|
|
|
|
+
|
|
|
|
+ /* prepare */
|
|
|
|
+ for (i = 0; i < num_bo; i++) {
|
|
|
|
+ bo = bos[i].bo;
|
|
|
|
+
|
|
|
|
+ bos[i].new_accounted = 0;
|
|
|
|
+ read_domains = bos[i].read_domains;
|
|
|
|
+ write_domain = bos[i].write_domain;
|
|
|
|
+
|
|
|
|
+ /* pinned bos don't count */
|
|
|
|
+ if (radeon_legacy_bo_is_static(bo))
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ /* already accounted this bo */
|
|
|
|
+ if (write_domain && (write_domain == bo->space_accounted))
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ if (read_domains && ((read_domains << 16) == bo->space_accounted))
|
|
|
|
+ continue;
|
|
|
|
+
|
|
|
|
+ if (bo->space_accounted == 0) {
|
|
|
|
+ if (write_domain == RADEON_GEM_DOMAIN_VRAM)
|
|
|
|
+ this_op_vram_write += bo->size;
|
|
|
|
+ else if (write_domain == RADEON_GEM_DOMAIN_GTT)
|
|
|
|
+ this_op_gart_write += bo->size;
|
|
|
|
+ else
|
|
|
|
+ this_op_read += bo->size;
|
|
|
|
+ bos[i].new_accounted = (read_domains << 16) | write_domain;
|
|
|
|
+ } else {
|
|
|
|
+ uint16_t old_read, old_write;
|
|
|
|
+
|
|
|
|
+ old_read = bo->space_accounted >> 16;
|
|
|
|
+ old_write = bo->space_accounted & 0xffff;
|
|
|
|
+
|
|
|
|
+ if (write_domain && (old_read & write_domain)) {
|
|
|
|
+ bos[i].new_accounted = write_domain;
|
|
|
|
+ /* moving from read to a write domain */
|
|
|
|
+ if (write_domain == RADEON_GEM_DOMAIN_VRAM) {
|
|
|
|
+ this_op_read -= bo->size;
|
|
|
|
+ this_op_vram_write += bo->size;
|
|
|
|
+ } else if (write_domain == RADEON_GEM_DOMAIN_VRAM) {
|
|
|
|
+ this_op_read -= bo->size;
|
|
|
|
+ this_op_gart_write += bo->size;
|
|
|
|
+ }
|
|
|
|
+ } else if (read_domains & old_write) {
|
|
|
|
+ bos[i].new_accounted = bo->space_accounted & 0xffff;
|
|
|
|
+ } else {
|
|
|
|
+ /* rewrite the domains */
|
|
|
|
+ if (write_domain != old_write)
|
|
|
|
+ fprintf(stderr,"WRITE DOMAIN RELOC FAILURE 0x%x %d %d\n", bo->handle, write_domain, old_write);
|
|
|
|
+ if (read_domains != old_read)
|
|
|
|
+ fprintf(stderr,"READ DOMAIN RELOC FAILURE 0x%x %d %d\n", bo->handle, read_domains, old_read);
|
|
|
|
+ return RADEON_CS_SPACE_FLUSH;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (this_op_read < 0)
|
|
|
|
+ this_op_read = 0;
|
|
|
|
+
|
|
|
|
+ /* check sizes - operation first */
|
|
|
|
+ if ((this_op_read + this_op_gart_write > csm->gart_limit) ||
|
|
|
|
+ (this_op_vram_write > csm->vram_limit)) {
|
|
|
|
+ return RADEON_CS_SPACE_OP_TO_BIG;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (((csm->vram_write_used + this_op_vram_write) > csm->vram_limit) ||
|
|
|
|
+ ((csm->read_used + csm->gart_write_used + this_op_gart_write + this_op_read) > csm->gart_limit)) {
|
|
|
|
+ return RADEON_CS_SPACE_FLUSH;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ csm->gart_write_used += this_op_gart_write;
|
|
|
|
+ csm->vram_write_used += this_op_vram_write;
|
|
|
|
+ csm->read_used += this_op_read;
|
|
|
|
+ /* commit */
|
|
|
|
+ for (i = 0; i < num_bo; i++) {
|
|
|
|
+ bo = bos[i].bo;
|
|
|
|
+ bo->space_accounted = bos[i].new_accounted;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return RADEON_CS_SPACE_OK;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static struct radeon_cs_funcs radeon_cs_legacy_funcs = {
|
|
|
|
+ cs_create,
|
|
|
|
+ cs_write_reloc,
|
|
|
|
+ cs_begin,
|
|
|
|
+ cs_end,
|
|
|
|
+ cs_emit,
|
|
|
|
+ cs_destroy,
|
|
|
|
+ cs_erase,
|
|
|
|
+ cs_need_flush,
|
|
|
|
+ cs_print,
|
|
|
|
+ cs_check_space
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+struct radeon_cs_manager *radeon_cs_manager_legacy_ctor(struct radeon_context *ctx)
|
|
|
|
+{
|
|
|
|
+ struct cs_manager_legacy *csm;
|
|
|
|
+
|
|
|
|
+ csm = (struct cs_manager_legacy*)
|
|
|
|
+ calloc(1, sizeof(struct cs_manager_legacy));
|
|
|
|
+ if (csm == NULL) {
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+ csm->base.funcs = &radeon_cs_legacy_funcs;
|
|
|
|
+ csm->base.fd = ctx->dri.fd;
|
|
|
|
+ csm->ctx = ctx;
|
|
|
|
+ csm->pending_age = 1;
|
|
|
|
+ return (struct radeon_cs_manager*)csm;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void radeon_cs_manager_legacy_dtor(struct radeon_cs_manager *csm)
|
|
|
|
+{
|
|
|
|
+ free(csm);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
diff --git a/src/mesa/drivers/dri/radeon/radeon_cs_legacy.h b/src/mesa/drivers/dri/radeon/radeon_cs_legacy.h
|
|
|
|
new file mode 100644
|
|
|
|
index 0000000..e177b4b
|
|
|
|
--- /dev/null
|
|
|
|
+++ b/src/mesa/drivers/dri/radeon/radeon_cs_legacy.h
|
|
|
|
@@ -0,0 +1,40 @@
|
|
|
|
+/*
|
|
|
|
+ * Copyright © 2008 Nicolai Haehnle
|
|
|
|
+ * Copyright © 2008 Jérôme Glisse
|
|
|
|
+ * All Rights Reserved.
|
|
|
|
+ *
|
|
|
|
+ * Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
+ * copy of this software and associated documentation files (the
|
|
|
|
+ * "Software"), to deal in the Software without restriction, including
|
|
|
|
+ * without limitation the rights to use, copy, modify, merge, publish,
|
|
|
|
+ * distribute, sub license, and/or sell copies of the Software, and to
|
|
|
|
+ * permit persons to whom the Software is furnished to do so, subject to
|
|
|
|
+ * the following conditions:
|
|
|
|
+ *
|
|
|
|
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
|
|
|
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
|
|
|
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
|
|
|
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
+ *
|
|
|
|
+ * The above copyright notice and this permission notice (including the
|
|
|
|
+ * next paragraph) shall be included in all copies or substantial portions
|
|
|
|
+ * of the Software.
|
|
|
|
+ */
|
|
|
|
+/*
|
|
|
|
+ * Authors:
|
|
|
|
+ * Aapo Tahkola <aet@rasterburn.org>
|
|
|
|
+ * Nicolai Haehnle <prefect_@gmx.net>
|
|
|
|
+ * Jérôme Glisse <glisse@freedesktop.org>
|
|
|
|
+ */
|
|
|
|
+#ifndef RADEON_CS_LEGACY_H
|
|
|
|
+#define RADEON_CS_LEGACY_H
|
|
|
|
+
|
|
|
|
+#include "radeon_common.h"
|
|
|
|
+
|
|
|
|
+struct radeon_cs_manager *radeon_cs_manager_legacy_ctor(struct radeon_context *ctx);
|
|
|
|
+void radeon_cs_manager_legacy_dtor(struct radeon_cs_manager *csm);
|
|
|
|
+
|
|
|
|
+#endif
|
|
|
|
diff --git a/src/mesa/drivers/dri/radeon/radeon_dma.c b/src/mesa/drivers/dri/radeon/radeon_dma.c
|
|
|
|
new file mode 100644
|
|
|
|
index 0000000..393b121
|
|
|
|
--- /dev/null
|
|
|
|
+++ b/src/mesa/drivers/dri/radeon/radeon_dma.c
|
|
|
|
@@ -0,0 +1,323 @@
|
|
|
|
+/**************************************************************************
|
|
|
|
+
|
|
|
|
+Copyright (C) 2004 Nicolai Haehnle.
|
|
|
|
+Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
|
|
|
|
+
|
|
|
|
+The Weather Channel (TM) funded Tungsten Graphics to develop the
|
|
|
|
+initial release of the Radeon 8500 driver under the XFree86 license.
|
|
|
|
+This notice must be preserved.
|
|
|
|
+
|
|
|
|
+All Rights Reserved.
|
|
|
|
+
|
|
|
|
+Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
+copy of this software and associated documentation files (the "Software"),
|
|
|
|
+to deal in the Software without restriction, including without limitation
|
|
|
|
+on the rights to use, copy, modify, merge, publish, distribute, sub
|
|
|
|
+license, and/or sell copies of the Software, and to permit persons to whom
|
|
|
|
+the Software is furnished to do so, subject to the following conditions:
|
|
|
|
+
|
|
|
|
+The above copyright notice and this permission notice (including the next
|
|
|
|
+paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
+Software.
|
|
|
|
+
|
|
|
|
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
+FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
+ATI, VA LINUX SYSTEMS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
|
|
|
+DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
|
|
|
+OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
|
|
|
+USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
+
|
|
|
|
+**************************************************************************/
|
|
|
|
+
|
|
|
|
+#include "radeon_common.h"
|
|
|
|
+
|
|
|
|
+#if defined(USE_X86_ASM)
|
|
|
|
+#define COPY_DWORDS( dst, src, nr ) \
|
|
|
|
+do { \
|
|
|
|
+ int __tmp; \
|
|
|
|
+ __asm__ __volatile__( "rep ; movsl" \
|
|
|
|
+ : "=%c" (__tmp), "=D" (dst), "=S" (__tmp) \
|
|
|
|
+ : "0" (nr), \
|
|
|
|
+ "D" ((long)dst), \
|
|
|
|
+ "S" ((long)src) ); \
|
|
|
|
+} while (0)
|
|
|
|
+#else
|
|
|
|
+#define COPY_DWORDS( dst, src, nr ) \
|
|
|
|
+do { \
|
|
|
|
+ int j; \
|
|
|
|
+ for ( j = 0 ; j < nr ; j++ ) \
|
|
|
|
+ dst[j] = ((int *)src)[j]; \
|
|
|
|
+ dst += nr; \
|
|
|
|
+} while (0)
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+static void radeonEmitVec4(uint32_t *out, GLvoid * data, int stride, int count)
|
|
|
|
+{
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ if (RADEON_DEBUG & DEBUG_VERTS)
|
|
|
|
+ fprintf(stderr, "%s count %d stride %d out %p data %p\n",
|
|
|
|
+ __FUNCTION__, count, stride, (void *)out, (void *)data);
|
|
|
|
+
|
|
|
|
+ if (stride == 4)
|
|
|
|
+ COPY_DWORDS(out, data, count);
|
|
|
|
+ else
|
|
|
|
+ for (i = 0; i < count; i++) {
|
|
|
|
+ out[0] = *(int *)data;
|
|
|
|
+ out++;
|
|
|
|
+ data += stride;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void radeonEmitVec8(uint32_t *out, GLvoid * data, int stride, int count)
|
|
|
|
+{
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ if (RADEON_DEBUG & DEBUG_VERTS)
|
|
|
|
+ fprintf(stderr, "%s count %d stride %d out %p data %p\n",
|
|
|
|
+ __FUNCTION__, count, stride, (void *)out, (void *)data);
|
|
|
|
+
|
|
|
|
+ if (stride == 8)
|
|
|
|
+ COPY_DWORDS(out, data, count * 2);
|
|
|
|
+ else
|
|
|
|
+ for (i = 0; i < count; i++) {
|
|
|
|
+ out[0] = *(int *)data;
|
|
|
|
+ out[1] = *(int *)(data + 4);
|
|
|
|
+ out += 2;
|
|
|
|
+ data += stride;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void radeonEmitVec12(uint32_t *out, GLvoid * data, int stride, int count)
|
|
|
|
+{
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ if (RADEON_DEBUG & DEBUG_VERTS)
|
|
|
|
+ fprintf(stderr, "%s count %d stride %d out %p data %p\n",
|
|
|
|
+ __FUNCTION__, count, stride, (void *)out, (void *)data);
|
|
|
|
+
|
|
|
|
+ if (stride == 12) {
|
|
|
|
+ COPY_DWORDS(out, data, count * 3);
|
|
|
|
+ }
|
|
|
|
+ else
|
|
|
|
+ for (i = 0; i < count; i++) {
|
|
|
|
+ out[0] = *(int *)data;
|
|
|
|
+ out[1] = *(int *)(data + 4);
|
|
|
|
+ out[2] = *(int *)(data + 8);
|
|
|
|
+ out += 3;
|
|
|
|
+ data += stride;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void radeonEmitVec16(uint32_t *out, GLvoid * data, int stride, int count)
|
|
|
|
+{
|
|
|
|
+ int i;
|
|
|
|
+
|
|
|
|
+ if (RADEON_DEBUG & DEBUG_VERTS)
|
|
|
|
+ fprintf(stderr, "%s count %d stride %d out %p data %p\n",
|
|
|
|
+ __FUNCTION__, count, stride, (void *)out, (void *)data);
|
|
|
|
+
|
|
|
|
+ if (stride == 16)
|
|
|
|
+ COPY_DWORDS(out, data, count * 4);
|
|
|
|
+ else
|
|
|
|
+ for (i = 0; i < count; i++) {
|
|
|
|
+ out[0] = *(int *)data;
|
|
|
|
+ out[1] = *(int *)(data + 4);
|
|
|
|
+ out[2] = *(int *)(data + 8);
|
|
|
|
+ out[3] = *(int *)(data + 12);
|
|
|
|
+ out += 4;
|
|
|
|
+ data += stride;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void rcommon_emit_vector(GLcontext * ctx, struct radeon_aos *aos,
|
|
|
|
+ GLvoid * data, int size, int stride, int count)
|
|
|
|
+{
|
|
|
|
+ radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
|
|
|
|
+ uint32_t *out;
|
|
|
|
+
|
|
|
|
+ if (stride == 0) {
|
|
|
|
+ radeonAllocDmaRegion(rmesa, &aos->bo, &aos->offset, size * 4, 32);
|
|
|
|
+ count = 1;
|
|
|
|
+ aos->stride = 0;
|
|
|
|
+ } else {
|
|
|
|
+ radeonAllocDmaRegion(rmesa, &aos->bo, &aos->offset, size * count * 4, 32);
|
|
|
|
+ aos->stride = size;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ aos->components = size;
|
|
|
|
+ aos->count = count;
|
|
|
|
+
|
|
|
|
+ out = (uint32_t*)((char*)aos->bo->ptr + aos->offset);
|
|
|
|
+ switch (size) {
|
|
|
|
+ case 1: radeonEmitVec4(out, data, stride, count); break;
|
|
|
|
+ case 2: radeonEmitVec8(out, data, stride, count); break;
|
|
|
|
+ case 3: radeonEmitVec12(out, data, stride, count); break;
|
|
|
|
+ case 4: radeonEmitVec16(out, data, stride, count); break;
|
|
|
|
+ default:
|
|
|
|
+ assert(0);
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void radeonRefillCurrentDmaRegion(radeonContextPtr rmesa, int size)
|
|
|
|
+{
|
|
|
|
+ struct radeon_cs_space_check bos[1];
|
|
|
|
+ int flushed = 0, ret;
|
|
|
|
+
|
|
|
|
+ size = MAX2(size, MAX_DMA_BUF_SZ * 16);
|
|
|
|
+
|
|
|
|
+ if (RADEON_DEBUG & (DEBUG_IOCTL | DEBUG_DMA))
|
|
|
|
+ fprintf(stderr, "%s\n", __FUNCTION__);
|
|
|
|
+
|
|
|
|
+ if (rmesa->dma.flush) {
|
|
|
|
+ rmesa->dma.flush(rmesa->glCtx);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (rmesa->dma.nr_released_bufs > 4) {
|
|
|
|
+ rcommonFlushCmdBuf(rmesa, __FUNCTION__);
|
|
|
|
+ rmesa->dma.nr_released_bufs = 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (rmesa->dma.current) {
|
|
|
|
+ radeon_bo_unmap(rmesa->dma.current);
|
|
|
|
+ radeon_bo_unref(rmesa->dma.current);
|
|
|
|
+ rmesa->dma.current = 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+again_alloc:
|
|
|
|
+ rmesa->dma.current = radeon_bo_open(rmesa->radeonScreen->bom,
|
|
|
|
+ 0, size, 4, RADEON_GEM_DOMAIN_GTT,
|
|
|
|
+ 0);
|
|
|
|
+
|
|
|
|
+ if (!rmesa->dma.current) {
|
|
|
|
+ rcommonFlushCmdBuf(rmesa, __FUNCTION__);
|
|
|
|
+ rmesa->dma.nr_released_bufs = 0;
|
|
|
|
+ goto again_alloc;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ rmesa->dma.current_used = 0;
|
|
|
|
+ rmesa->dma.current_vertexptr = 0;
|
|
|
|
+
|
|
|
|
+ bos[0].bo = rmesa->dma.current;
|
|
|
|
+ bos[0].read_domains = RADEON_GEM_DOMAIN_GTT;
|
|
|
|
+ bos[0].write_domain =0 ;
|
|
|
|
+ bos[0].new_accounted = 0;
|
|
|
|
+
|
|
|
|
+ ret = radeon_cs_space_check(rmesa->cmdbuf.cs, bos, 1);
|
|
|
|
+ if (ret == RADEON_CS_SPACE_OP_TO_BIG) {
|
|
|
|
+ fprintf(stderr,"Got OPEARTION TO BIG ILLEGAL - this cannot happen");
|
|
|
|
+ assert(0);
|
|
|
|
+ } else if (ret == RADEON_CS_SPACE_FLUSH) {
|
|
|
|
+ rcommonFlushCmdBuf(rmesa, __FUNCTION__);
|
|
|
|
+ if (flushed) {
|
|
|
|
+ fprintf(stderr,"flushed but still no space\n");
|
|
|
|
+ assert(0);
|
|
|
|
+ }
|
|
|
|
+ flushed = 1;
|
|
|
|
+ goto again_alloc;
|
|
|
|
+ }
|
|
|
|
+ radeon_bo_map(rmesa->dma.current, 1);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* Allocates a region from rmesa->dma.current. If there isn't enough
|
|
|
|
+ * space in current, grab a new buffer (and discard what was left of current)
|
|
|
|
+ */
|
|
|
|
+void radeonAllocDmaRegion(radeonContextPtr rmesa,
|
|
|
|
+ struct radeon_bo **pbo, int *poffset,
|
|
|
|
+ int bytes, int alignment)
|
|
|
|
+{
|
|
|
|
+ if (RADEON_DEBUG & DEBUG_IOCTL)
|
|
|
|
+ fprintf(stderr, "%s %d\n", __FUNCTION__, bytes);
|
|
|
|
+
|
|
|
|
+ if (rmesa->dma.flush)
|
|
|
|
+ rmesa->dma.flush(rmesa->glCtx);
|
|
|
|
+
|
|
|
|
+ assert(rmesa->dma.current_used == rmesa->dma.current_vertexptr);
|
|
|
|
+
|
|
|
|
+ alignment--;
|
|
|
|
+ rmesa->dma.current_used = (rmesa->dma.current_used + alignment) & ~alignment;
|
|
|
|
+
|
|
|
|
+ if (!rmesa->dma.current || rmesa->dma.current_used + bytes > rmesa->dma.current->size)
|
|
|
|
+ radeonRefillCurrentDmaRegion(rmesa, (bytes + 15) & ~15);
|
|
|
|
+
|
|
|
|
+ *poffset = rmesa->dma.current_used;
|
|
|
|
+ *pbo = rmesa->dma.current;
|
|
|
|
+ radeon_bo_ref(*pbo);
|
|
|
|
+
|
|
|
|
+ /* Always align to at least 16 bytes */
|
|
|
|
+ rmesa->dma.current_used = (rmesa->dma.current_used + bytes + 15) & ~15;
|
|
|
|
+ rmesa->dma.current_vertexptr = rmesa->dma.current_used;
|
|
|
|
+
|
|
|
|
+ assert(rmesa->dma.current_used <= rmesa->dma.current->size);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void radeonReleaseDmaRegion(radeonContextPtr rmesa)
|
|
|
|
+{
|
|
|
|
+ if (RADEON_DEBUG & DEBUG_IOCTL)
|
|
|
|
+ fprintf(stderr, "%s %p\n", __FUNCTION__, rmesa->dma.current);
|
|
|
|
+ if (rmesa->dma.current) {
|
|
|
|
+ rmesa->dma.nr_released_bufs++;
|
|
|
|
+ radeon_bo_unmap(rmesa->dma.current);
|
|
|
|
+ radeon_bo_unref(rmesa->dma.current);
|
|
|
|
+ }
|
|
|
|
+ rmesa->dma.current = NULL;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+/* Flush vertices in the current dma region.
|
|
|
|
+ */
|
|
|
|
+void rcommon_flush_last_swtcl_prim( GLcontext *ctx )
|
|
|
|
+{
|
|
|
|
+ radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
|
|
|
|
+ struct radeon_dma *dma = &rmesa->dma;
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ if (RADEON_DEBUG & DEBUG_IOCTL)
|
|
|
|
+ fprintf(stderr, "%s\n", __FUNCTION__);
|
|
|
|
+ dma->flush = NULL;
|
|
|
|
+
|
|
|
|
+ if (dma->current) {
|
|
|
|
+ GLuint current_offset = dma->current_used;
|
|
|
|
+
|
|
|
|
+ assert (dma->current_used +
|
|
|
|
+ rmesa->swtcl.numverts * rmesa->swtcl.vertex_size * 4 ==
|
|
|
|
+ dma->current_vertexptr);
|
|
|
|
+
|
|
|
|
+ if (dma->current_used != dma->current_vertexptr) {
|
|
|
|
+ dma->current_used = dma->current_vertexptr;
|
|
|
|
+
|
|
|
|
+ rmesa->vtbl.swtcl_flush(ctx, current_offset);
|
|
|
|
+ }
|
|
|
|
+ rmesa->swtcl.numverts = 0;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+/* Alloc space in the current dma region.
|
|
|
|
+ */
|
|
|
|
+void *
|
|
|
|
+rcommonAllocDmaLowVerts( radeonContextPtr rmesa, int nverts, int vsize )
|
|
|
|
+{
|
|
|
|
+ GLuint bytes = vsize * nverts;
|
|
|
|
+ void *head;
|
|
|
|
+
|
|
|
|
+ if (!rmesa->dma.current || rmesa->dma.current_vertexptr + bytes > rmesa->dma.current->size) {
|
|
|
|
+ radeonRefillCurrentDmaRegion(rmesa, bytes);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!rmesa->dma.flush) {
|
|
|
|
+ rmesa->glCtx->Driver.NeedFlush |= FLUSH_STORED_VERTICES;
|
|
|
|
+ rmesa->dma.flush = rcommon_flush_last_swtcl_prim;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ ASSERT( vsize == rmesa->swtcl.vertex_size * 4 );
|
|
|
|
+ ASSERT( rmesa->dma.flush == rcommon_flush_last_swtcl_prim );
|
|
|
|
+ ASSERT( rmesa->dma.current_used +
|
|
|
|
+ rmesa->swtcl.numverts * rmesa->swtcl.vertex_size * 4 ==
|
|
|
|
+ rmesa->dma.current_vertexptr );
|
|
|
|
+
|
|
|
|
+ head = (rmesa->dma.current->ptr + rmesa->dma.current_vertexptr);
|
|
|
|
+ rmesa->dma.current_vertexptr += bytes;
|
|
|
|
+ rmesa->swtcl.numverts += nverts;
|
|
|
|
+ return head;
|
|
|
|
+}
|
|
|
|
diff --git a/src/mesa/drivers/dri/radeon/radeon_dma.h b/src/mesa/drivers/dri/radeon/radeon_dma.h
|
|
|
|
new file mode 100644
|
|
|
|
index 0000000..cee3744
|
|
|
|
--- /dev/null
|
|
|
|
+++ b/src/mesa/drivers/dri/radeon/radeon_dma.h
|
|
|
|
@@ -0,0 +1,51 @@
|
|
|
|
+/**************************************************************************
|
|
|
|
+
|
|
|
|
+Copyright (C) 2004 Nicolai Haehnle.
|
|
|
|
+Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
|
|
|
|
+
|
|
|
|
+The Weather Channel (TM) funded Tungsten Graphics to develop the
|
|
|
|
+initial release of the Radeon 8500 driver under the XFree86 license.
|
|
|
|
+This notice must be preserved.
|
|
|
|
+
|
|
|
|
+All Rights Reserved.
|
|
|
|
+
|
|
|
|
+Permission is hereby granted, free of charge, to any person obtaining a
|
|
|
|
+copy of this software and associated documentation files (the "Software"),
|
|
|
|
+to deal in the Software without restriction, including without limitation
|
|
|
|
+on the rights to use, copy, modify, merge, publish, distribute, sub
|
|
|
|
+license, and/or sell copies of the Software, and to permit persons to whom
|
|
|
|
+the Software is furnished to do so, subject to the following conditions:
|
|
|
|
+
|
|
|
|
+The above copyright notice and this permission notice (including the next
|
|
|
|
+paragraph) shall be included in all copies or substantial portions of the
|
|
|
|
+Software.
|
|
|
|
+
|
|
|
|
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
+FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
+ATI, VA LINUX SYSTEMS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
|
|
|
+DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
|
|
|
+OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
|
|
|
+USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
+
|
|
|
|
+**************************************************************************/
|
|
|
|
+
|
|
|
|
+#ifndef RADEON_DMA_H
|
|
|
|
+#define RADEON_DMA_H
|
|
|
|
+
|
|
|
|
+void radeonEmitVec8(uint32_t *out, GLvoid * data, int stride, int count);
|
|
|
|
+void radeonEmitVec12(uint32_t *out, GLvoid * data, int stride, int count);
|
|
|
|
+
|
|
|
|
+void rcommon_emit_vector(GLcontext * ctx, struct radeon_aos *aos,
|
|
|
|
+ GLvoid * data, int size, int stride, int count);
|
|
|
|
+
|
|
|
|
+void radeonRefillCurrentDmaRegion(radeonContextPtr rmesa, int size);
|
|
|
|
+void radeonAllocDmaRegion(radeonContextPtr rmesa,
|
|
|
|
+ struct radeon_bo **pbo, int *poffset,
|
|
|
|
+ int bytes, int alignment);
|
|
|
|
+void radeonReleaseDmaRegion(radeonContextPtr rmesa);
|
|
|
|
+
|
|
|
|
+void rcommon_flush_last_swtcl_prim(GLcontext *ctx);
|
|
|
|
+
|
2009-03-04 07:00:36 +00:00
|
|
|
+void *rcommonAllocDmaLowVerts(radeonContextPtr rmesa, int nverts, int vsize);
|
2009-02-23 04:59:49 +00:00
|
|
|
+#endif
|
|
|
|
diff --git a/src/mesa/drivers/dri/radeon/radeon_mipmap_tree.c b/src/mesa/drivers/dri/radeon/radeon_mipmap_tree.c
|
|
|
|
new file mode 100644
|
|
|
|
index 0000000..3203ee1
|
|
|
|
--- /dev/null
|
|
|
|
+++ b/src/mesa/drivers/dri/radeon/radeon_mipmap_tree.c
|
|
|
|
@@ -0,0 +1,360 @@
|
|
|
|
+/*
|
|
|
|
+ * Copyright (C) 2008 Nicolai Haehnle.
|
|
|
|
+ *
|
|
|
|
+ * All Rights Reserved.
|
|
|
|
+ *
|
|
|
|
+ * Permission is hereby granted, free of charge, to any person obtaining
|
|
|
|
+ * a copy of this software and associated documentation files (the
|
|
|
|
+ * "Software"), to deal in the Software without restriction, including
|
|
|
|
+ * without limitation the rights to use, copy, modify, merge, publish,
|
|
|
|
+ * distribute, sublicense, and/or sell copies of the Software, and to
|
|
|
|
+ * permit persons to whom the Software is furnished to do so, subject to
|
|
|
|
+ * the following conditions:
|
|
|
|
+ *
|
|
|
|
+ * The above copyright notice and this permission notice (including the
|
|
|
|
+ * next paragraph) shall be included in all copies or substantial
|
|
|
|
+ * portions of the Software.
|
|
|
|
+ *
|
|
|
|
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
|
|
|
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
|
|
|
|
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
|
|
|
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
|
|
|
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
+ *
|
|
|
|
+ */
|
|
|
|
+
|
|
|
|
+#include "radeon_mipmap_tree.h"
|
|
|
|
+
|
|
|
|
+#include <errno.h>
|
|
|
|
+#include <unistd.h>
|
|
|
|
+
|
|
|
|
+#include "main/simple_list.h"
|
|
|
|
+#include "main/texcompress.h"
|
|
|
|
+#include "main/texformat.h"
|
|
|
|
+
|
|
|
|
+static GLuint radeon_compressed_texture_size(GLcontext *ctx,
|
|
|
|
+ GLsizei width, GLsizei height, GLsizei depth,
|
|
|
|
+ GLuint mesaFormat)
|
|
|
|
+{
|
|
|
|
+ GLuint size = _mesa_compressed_texture_size(ctx, width, height, depth, mesaFormat);
|
|
|
|
+
|
|
|
|
+ if (mesaFormat == MESA_FORMAT_RGB_DXT1 ||
|
|
|
|
+ mesaFormat == MESA_FORMAT_RGBA_DXT1) {
|
|
|
|
+ if (width + 3 < 8) /* width one block */
|
|
|
|
+ size = size * 4;
|
|
|
|
+ else if (width + 3 < 16)
|
|
|
|
+ size = size * 2;
|
|
|
|
+ } else {
|
|
|
|
+ /* DXT3/5, 16 bytes per block */
|
|
|
|
+ // WARN_ONCE("DXT 3/5 suffers from multitexturing problems!\n");
|
|
|
|
+ if (width + 3 < 8)
|
|
|
|
+ size = size * 2;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return size;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+static int radeon_compressed_num_bytes(GLuint mesaFormat)
|
|
|
|
+{
|
|
|
|
+ int bytes = 0;
|
|
|
|
+ switch(mesaFormat) {
|
|
|
|
+
|
|
|
|
+ case MESA_FORMAT_RGB_FXT1:
|
|
|
|
+ case MESA_FORMAT_RGBA_FXT1:
|
|
|
|
+ case MESA_FORMAT_RGB_DXT1:
|
|
|
|
+ case MESA_FORMAT_RGBA_DXT1:
|
|
|
|
+ bytes = 2;
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ case MESA_FORMAT_RGBA_DXT3:
|
|
|
|
+ case MESA_FORMAT_RGBA_DXT5:
|
|
|
|
+ bytes = 4;
|
|
|
|
+ default:
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return bytes;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * Compute sizes and fill in offset and blit information for the given
|
|
|
|
+ * image (determined by \p face and \p level).
|
|
|
|
+ *
|
|
|
|
+ * \param curOffset points to the offset at which the image is to be stored
|
|
|
|
+ * and is updated by this function according to the size of the image.
|
|
|
|
+ */
|
|
|
|
+static void compute_tex_image_offset(radeon_mipmap_tree *mt,
|
|
|
|
+ GLuint face, GLuint level, GLuint* curOffset)
|
|
|
|
+{
|
|
|
|
+ radeon_mipmap_level *lvl = &mt->levels[level];
|
|
|
|
+
|
|
|
|
+ /* Find image size in bytes */
|
|
|
|
+ if (mt->compressed) {
|
|
|
|
+ /* TODO: Is this correct? Need test cases for compressed textures! */
|
|
|
|
+ GLuint align;
|
|
|
|
+
|
|
|
|
+ lvl->rowstride = (lvl->width * mt->bpp + 63) & ~63;
|
|
|
|
+ lvl->size = radeon_compressed_texture_size(mt->radeon->glCtx,
|
|
|
|
+ lvl->width, lvl->height, lvl->depth, mt->compressed);
|
|
|
|
+ } else if (mt->target == GL_TEXTURE_RECTANGLE_NV) {
|
|
|
|
+ lvl->rowstride = (lvl->width * mt->bpp + 63) & ~63;
|
|
|
|
+ lvl->size = lvl->rowstride * lvl->height;
|
|
|
|
+ } else if (mt->tilebits & RADEON_TXO_MICRO_TILE) {
|
|
|
|
+ /* tile pattern is 16 bytes x2. mipmaps stay 32 byte aligned,
|
|
|
|
+ * though the actual offset may be different (if texture is less than
|
|
|
|
+ * 32 bytes width) to the untiled case */
|
|
|
|
+ lvl->rowstride = (lvl->width * mt->bpp * 2 + 31) & ~31;
|
|
|
|
+ lvl->size = lvl->rowstride * ((lvl->height + 1) / 2) * lvl->depth;
|
|
|
|
+ } else {
|
|
|
|
+ lvl->rowstride = (lvl->width * mt->bpp + 31) & ~31;
|
|
|
|
+ lvl->size = lvl->rowstride * lvl->height * lvl->depth;
|
|
|
|
+ }
|
|
|
|
+ assert(lvl->size > 0);
|
|
|
|
+
|
|
|
|
+ /* All images are aligned to a 32-byte offset */
|
|
|
|
+ *curOffset = (*curOffset + 0x1f) & ~0x1f;
|
|
|
|
+ lvl->faces[face].offset = *curOffset;
|
|
|
|
+ *curOffset += lvl->size;
|
|
|
|
+
|
|
|
|
+ if (RADEON_DEBUG & DEBUG_TEXTURE)
|
|
|
|
+ fprintf(stderr,
|
|
|
|
+ "level %d, face %d: rs:%d %dx%d at %d\n",
|
|
|
|
+ level, face, lvl->rowstride, lvl->width, lvl->height, lvl->faces[face].offset);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static GLuint minify(GLuint size, GLuint levels)
|
|
|
|
+{
|
|
|
|
+ size = size >> levels;
|
|
|
|
+ if (size < 1)
|
|
|
|
+ size = 1;
|
|
|
|
+ return size;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void calculate_miptree_layout(radeon_mipmap_tree *mt)
|
|
|
|
+{
|
|
|
|
+ GLuint curOffset;
|
|
|
|
+ GLuint numLevels;
|
|
|
|
+ GLuint i;
|
|
|
|
+
|
|
|
|
+ numLevels = mt->lastLevel - mt->firstLevel + 1;
|
|
|
|
+ assert(numLevels <= RADEON_MAX_TEXTURE_LEVELS);
|
|
|
|
+
|
|
|
|
+ curOffset = 0;
|
|
|
|
+ for(i = 0; i < numLevels; i++) {
|
|
|
|
+ GLuint face;
|
|
|
|
+
|
|
|
|
+ mt->levels[i].width = minify(mt->width0, i);
|
|
|
|
+ mt->levels[i].height = minify(mt->height0, i);
|
|
|
|
+ mt->levels[i].depth = minify(mt->depth0, i);
|
|
|
|
+
|
|
|
|
+ for(face = 0; face < mt->faces; face++)
|
|
|
|
+ compute_tex_image_offset(mt, face, i, &curOffset);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Note the required size in memory */
|
|
|
|
+ mt->totalsize = (curOffset + RADEON_OFFSET_MASK) & ~RADEON_OFFSET_MASK;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * Create a new mipmap tree, calculate its layout and allocate memory.
|
|
|
|
+ */
|
|
|
|
+radeon_mipmap_tree* radeon_miptree_create(radeonContextPtr rmesa, radeonTexObj *t,
|
|
|
|
+ GLenum target, GLuint firstLevel, GLuint lastLevel,
|
|
|
|
+ GLuint width0, GLuint height0, GLuint depth0,
|
|
|
|
+ GLuint bpp, GLuint tilebits, GLuint compressed)
|
|
|
|
+{
|
|
|
|
+ radeon_mipmap_tree *mt = CALLOC_STRUCT(_radeon_mipmap_tree);
|
|
|
|
+
|
|
|
|
+ mt->radeon = rmesa;
|
|
|
|
+ mt->refcount = 1;
|
|
|
|
+ mt->t = t;
|
|
|
|
+ mt->target = target;
|
|
|
|
+ mt->faces = (target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
|
|
|
|
+ mt->firstLevel = firstLevel;
|
|
|
|
+ mt->lastLevel = lastLevel;
|
|
|
|
+ mt->width0 = width0;
|
|
|
|
+ mt->height0 = height0;
|
|
|
|
+ mt->depth0 = depth0;
|
|
|
|
+ mt->bpp = compressed ? radeon_compressed_num_bytes(compressed) : bpp;
|
|
|
|
+ mt->tilebits = tilebits;
|
|
|
|
+ mt->compressed = compressed;
|
|
|
|
+
|
|
|
|
+ calculate_miptree_layout(mt);
|
|
|
|
+
|
|
|
|
+ mt->bo = radeon_bo_open(rmesa->radeonScreen->bom,
|
|
|
|
+ 0, mt->totalsize, 1024,
|
|
|
|
+ RADEON_GEM_DOMAIN_VRAM,
|
|
|
|
+ 0);
|
|
|
|
+
|
|
|
|
+ return mt;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void radeon_miptree_reference(radeon_mipmap_tree *mt)
|
|
|
|
+{
|
|
|
|
+ mt->refcount++;
|
|
|
|
+ assert(mt->refcount > 0);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void radeon_miptree_unreference(radeon_mipmap_tree *mt)
|
|
|
|
+{
|
|
|
|
+ if (!mt)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ assert(mt->refcount > 0);
|
|
|
|
+ mt->refcount--;
|
|
|
|
+ if (!mt->refcount) {
|
|
|
|
+ radeon_bo_unref(mt->bo);
|
|
|
|
+ free(mt);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * Calculate first and last mip levels for the given texture object,
|
|
|
|
+ * where the dimensions are taken from the given texture image at
|
|
|
|
+ * the given level.
|
|
|
|
+ *
|
|
|
|
+ * Note: level is the OpenGL level number, which is not necessarily the same
|
|
|
|
+ * as the first level that is actually present.
|
|
|
|
+ *
|
|
|
|
+ * The base level image of the given texture face must be non-null,
|
|
|
|
+ * or this will fail.
|
|
|
|
+ */
|
|
|
|
+static void calculate_first_last_level(struct gl_texture_object *tObj,
|
|
|
|
+ GLuint *pfirstLevel, GLuint *plastLevel,
|
|
|
|
+ GLuint face, GLuint level)
|
|
|
|
+{
|
|
|
|
+ const struct gl_texture_image * const baseImage =
|
|
|
|
+ tObj->Image[face][level];
|
|
|
|
+
|
|
|
|
+ assert(baseImage);
|
|
|
|
+
|
|
|
|
+ /* These must be signed values. MinLod and MaxLod can be negative numbers,
|
|
|
|
+ * and having firstLevel and lastLevel as signed prevents the need for
|
|
|
|
+ * extra sign checks.
|
|
|
|
+ */
|
|
|
|
+ int firstLevel;
|
|
|
|
+ int lastLevel;
|
|
|
|
+
|
|
|
|
+ /* Yes, this looks overly complicated, but it's all needed.
|
|
|
|
+ */
|
|
|
|
+ switch (tObj->Target) {
|
|
|
|
+ case GL_TEXTURE_1D:
|
|
|
|
+ case GL_TEXTURE_2D:
|
|
|
|
+ case GL_TEXTURE_3D:
|
|
|
|
+ case GL_TEXTURE_CUBE_MAP:
|
|
|
|
+ if (tObj->MinFilter == GL_NEAREST || tObj->MinFilter == GL_LINEAR) {
|
|
|
|
+ /* GL_NEAREST and GL_LINEAR only care about GL_TEXTURE_BASE_LEVEL.
|
|
|
|
+ */
|
|
|
|
+ firstLevel = lastLevel = tObj->BaseLevel;
|
|
|
|
+ } else {
|
|
|
|
+ firstLevel = tObj->BaseLevel + (GLint)(tObj->MinLod + 0.5);
|
|
|
|
+ firstLevel = MAX2(firstLevel, tObj->BaseLevel);
|
|
|
|
+ firstLevel = MIN2(firstLevel, level + baseImage->MaxLog2);
|
|
|
|
+ lastLevel = tObj->BaseLevel + (GLint)(tObj->MaxLod + 0.5);
|
|
|
|
+ lastLevel = MAX2(lastLevel, tObj->BaseLevel);
|
|
|
|
+ lastLevel = MIN2(lastLevel, level + baseImage->MaxLog2);
|
|
|
|
+ lastLevel = MIN2(lastLevel, tObj->MaxLevel);
|
|
|
|
+ lastLevel = MAX2(firstLevel, lastLevel); /* need at least one level */
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ case GL_TEXTURE_RECTANGLE_NV:
|
|
|
|
+ case GL_TEXTURE_4D_SGIS:
|
|
|
|
+ firstLevel = lastLevel = 0;
|
|
|
|
+ break;
|
|
|
|
+ default:
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* save these values */
|
|
|
|
+ *pfirstLevel = firstLevel;
|
|
|
|
+ *plastLevel = lastLevel;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * Checks whether the given miptree can hold the given texture image at the
|
|
|
|
+ * given face and level.
|
|
|
|
+ */
|
|
|
|
+GLboolean radeon_miptree_matches_image(radeon_mipmap_tree *mt,
|
|
|
|
+ struct gl_texture_image *texImage, GLuint face, GLuint level)
|
|
|
|
+{
|
|
|
|
+ radeon_mipmap_level *lvl;
|
|
|
|
+
|
|
|
|
+ if (face >= mt->faces || level < mt->firstLevel || level > mt->lastLevel)
|
|
|
|
+ return GL_FALSE;
|
|
|
|
+
|
|
|
|
+ if (texImage->IsCompressed != mt->compressed)
|
|
|
|
+ return GL_FALSE;
|
|
|
|
+
|
|
|
|
+ if (!texImage->IsCompressed &&
|
|
|
|
+ !mt->compressed &&
|
|
|
|
+ texImage->TexFormat->TexelBytes != mt->bpp)
|
|
|
|
+ return GL_FALSE;
|
|
|
|
+
|
|
|
|
+ lvl = &mt->levels[level - mt->firstLevel];
|
|
|
|
+ if (lvl->width != texImage->Width ||
|
|
|
|
+ lvl->height != texImage->Height ||
|
|
|
|
+ lvl->depth != texImage->Depth)
|
|
|
|
+ return GL_FALSE;
|
|
|
|
+
|
|
|
|
+ return GL_TRUE;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * Checks whether the given miptree has the right format to store the given texture object.
|
|
|
|
+ */
|
|
|
|
+GLboolean radeon_miptree_matches_texture(radeon_mipmap_tree *mt, struct gl_texture_object *texObj)
|
|
|
|
+{
|
|
|
|
+ struct gl_texture_image *firstImage;
|
|
|
|
+ GLuint compressed;
|
|
|
|
+ GLuint numfaces = 1;
|
|
|
|
+ GLuint firstLevel, lastLevel;
|
|
|
|
+
|
|
|
|
+ calculate_first_last_level(texObj, &firstLevel, &lastLevel, 0, texObj->BaseLevel);
|
|
|
|
+ if (texObj->Target == GL_TEXTURE_CUBE_MAP)
|
|
|
|
+ numfaces = 6;
|
|
|
|
+
|
|
|
|
+ firstImage = texObj->Image[0][firstLevel];
|
|
|
|
+ compressed = firstImage->IsCompressed ? firstImage->TexFormat->MesaFormat : 0;
|
|
|
|
+
|
|
|
|
+ return (mt->firstLevel == firstLevel &&
|
|
|
|
+ mt->lastLevel == lastLevel &&
|
|
|
|
+ mt->width0 == firstImage->Width &&
|
|
|
|
+ mt->height0 == firstImage->Height &&
|
|
|
|
+ mt->depth0 == firstImage->Depth &&
|
|
|
|
+ mt->bpp == firstImage->TexFormat->TexelBytes &&
|
|
|
|
+ mt->compressed == compressed);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * Try to allocate a mipmap tree for the given texture that will fit the
|
|
|
|
+ * given image in the given position.
|
|
|
|
+ */
|
|
|
|
+void radeon_try_alloc_miptree(radeonContextPtr rmesa, radeonTexObj *t,
|
|
|
|
+ struct gl_texture_image *texImage, GLuint face, GLuint level)
|
|
|
|
+{
|
|
|
|
+ GLuint compressed = texImage->IsCompressed ? texImage->TexFormat->MesaFormat : 0;
|
|
|
|
+ GLuint numfaces = 1;
|
|
|
|
+ GLuint firstLevel, lastLevel;
|
|
|
|
+
|
|
|
|
+ assert(!t->mt);
|
|
|
|
+
|
|
|
|
+ calculate_first_last_level(&t->base, &firstLevel, &lastLevel, face, level);
|
|
|
|
+ if (t->base.Target == GL_TEXTURE_CUBE_MAP)
|
|
|
|
+ numfaces = 6;
|
|
|
|
+
|
|
|
|
+ if (level != firstLevel || face >= numfaces)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ t->mt = radeon_miptree_create(rmesa, t, t->base.Target,
|
|
|
|
+ firstLevel, lastLevel,
|
|
|
|
+ texImage->Width, texImage->Height, texImage->Depth,
|
|
|
|
+ texImage->TexFormat->TexelBytes, t->tile_bits, compressed);
|
|
|
|
+}
|
|
|
|
diff --git a/src/mesa/drivers/dri/radeon/radeon_mipmap_tree.h b/src/mesa/drivers/dri/radeon/radeon_mipmap_tree.h
|
|
|
|
new file mode 100644
|
|
|
|
index 0000000..43dfa48
|
|
|
|
--- /dev/null
|
|
|
|
+++ b/src/mesa/drivers/dri/radeon/radeon_mipmap_tree.h
|
|
|
|
@@ -0,0 +1,97 @@
|
|
|
|
+/*
|
|
|
|
+ * Copyright (C) 2008 Nicolai Haehnle.
|
|
|
|
+ *
|
|
|
|
+ * All Rights Reserved.
|
|
|
|
+ *
|
|
|
|
+ * Permission is hereby granted, free of charge, to any person obtaining
|
|
|
|
+ * a copy of this software and associated documentation files (the
|
|
|
|
+ * "Software"), to deal in the Software without restriction, including
|
|
|
|
+ * without limitation the rights to use, copy, modify, merge, publish,
|
|
|
|
+ * distribute, sublicense, and/or sell copies of the Software, and to
|
|
|
|
+ * permit persons to whom the Software is furnished to do so, subject to
|
|
|
|
+ * the following conditions:
|
|
|
|
+ *
|
|
|
|
+ * The above copyright notice and this permission notice (including the
|
|
|
|
+ * next paragraph) shall be included in all copies or substantial
|
|
|
|
+ * portions of the Software.
|
|
|
|
+ *
|
|
|
|
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
|
|
|
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
|
|
|
|
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
|
|
|
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
|
|
|
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
+ *
|
|
|
|
+ */
|
|
|
|
+
|
|
|
|
+#ifndef __RADEON_MIPMAP_TREE_H_
|
|
|
|
+#define __RADEON_MIPMAP_TREE_H_
|
|
|
|
+
|
|
|
|
+#include "radeon_common.h"
|
|
|
|
+
|
|
|
|
+typedef struct _radeon_mipmap_tree radeon_mipmap_tree;
|
|
|
|
+typedef struct _radeon_mipmap_level radeon_mipmap_level;
|
|
|
|
+typedef struct _radeon_mipmap_image radeon_mipmap_image;
|
|
|
|
+
|
|
|
|
+struct _radeon_mipmap_image {
|
|
|
|
+ GLuint offset; /** Offset of this image from the start of mipmap tree buffer, in bytes */
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+struct _radeon_mipmap_level {
|
|
|
|
+ GLuint width;
|
|
|
|
+ GLuint height;
|
|
|
|
+ GLuint depth;
|
|
|
|
+ GLuint size; /** Size of each image, in bytes */
|
|
|
|
+ GLuint rowstride; /** in bytes */
|
|
|
|
+ radeon_mipmap_image faces[6];
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * A mipmap tree contains texture images in the layout that the hardware
|
|
|
|
+ * expects.
|
|
|
|
+ *
|
|
|
|
+ * The meta-data of mipmap trees is immutable, i.e. you cannot change the
|
|
|
|
+ * layout on-the-fly; however, the texture contents (i.e. texels) can be
|
|
|
|
+ * changed.
|
|
|
|
+ */
|
|
|
|
+struct _radeon_mipmap_tree {
|
|
|
|
+ radeonContextPtr radeon;
|
|
|
|
+ radeonTexObj *t;
|
|
|
|
+ struct radeon_bo *bo;
|
|
|
|
+ GLuint refcount;
|
|
|
|
+
|
|
|
|
+ GLuint totalsize; /** total size of the miptree, in bytes */
|
|
|
|
+
|
|
|
|
+ GLenum target; /** GL_TEXTURE_xxx */
|
|
|
|
+ GLuint faces; /** # of faces: 6 for cubemaps, 1 otherwise */
|
|
|
|
+ GLuint firstLevel; /** First mip level stored in this mipmap tree */
|
|
|
|
+ GLuint lastLevel; /** Last mip level stored in this mipmap tree */
|
|
|
|
+
|
|
|
|
+ GLuint width0; /** Width of firstLevel image */
|
|
|
|
+ GLuint height0; /** Height of firstLevel image */
|
|
|
|
+ GLuint depth0; /** Depth of firstLevel image */
|
|
|
|
+
|
|
|
|
+ GLuint bpp; /** Bytes per texel */
|
|
|
|
+ GLuint tilebits; /** RADEON_TXO_xxx_TILE */
|
|
|
|
+ GLuint compressed; /** MESA_FORMAT_xxx indicating a compressed format, or 0 if uncompressed */
|
|
|
|
+
|
|
|
|
+ radeon_mipmap_level levels[RADEON_MAX_TEXTURE_LEVELS];
|
|
|
|
+};
|
|
|
|
+
|
|
|
|
+radeon_mipmap_tree* radeon_miptree_create(radeonContextPtr rmesa, radeonTexObj *t,
|
|
|
|
+ GLenum target, GLuint firstLevel, GLuint lastLevel,
|
|
|
|
+ GLuint width0, GLuint height0, GLuint depth0,
|
|
|
|
+ GLuint bpp, GLuint tilebits, GLuint compressed);
|
|
|
|
+void radeon_miptree_reference(radeon_mipmap_tree *mt);
|
|
|
|
+void radeon_miptree_unreference(radeon_mipmap_tree *mt);
|
|
|
|
+
|
|
|
|
+GLboolean radeon_miptree_matches_image(radeon_mipmap_tree *mt,
|
|
|
|
+ struct gl_texture_image *texImage, GLuint face, GLuint level);
|
|
|
|
+GLboolean radeon_miptree_matches_texture(radeon_mipmap_tree *mt, struct gl_texture_object *texObj);
|
|
|
|
+void radeon_try_alloc_miptree(radeonContextPtr rmesa, radeonTexObj *t,
|
|
|
|
+ struct gl_texture_image *texImage, GLuint face, GLuint level);
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+#endif /* __RADEON_MIPMAP_TREE_H_ */
|
|
|
|
diff --git a/src/mesa/drivers/dri/radeon/radeon_texture.c b/src/mesa/drivers/dri/radeon/radeon_texture.c
|
|
|
|
new file mode 100644
|
|
|
|
index 0000000..63680b4
|
|
|
|
--- /dev/null
|
|
|
|
+++ b/src/mesa/drivers/dri/radeon/radeon_texture.c
|
|
|
|
@@ -0,0 +1,966 @@
|
|
|
|
+/*
|
|
|
|
+ * Copyright (C) 2008 Nicolai Haehnle.
|
|
|
|
+ * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
|
|
|
|
+ *
|
|
|
|
+ * The Weather Channel (TM) funded Tungsten Graphics to develop the
|
|
|
|
+ * initial release of the Radeon 8500 driver under the XFree86 license.
|
|
|
|
+ * This notice must be preserved.
|
|
|
|
+ *
|
|
|
|
+ * Permission is hereby granted, free of charge, to any person obtaining
|
|
|
|
+ * a copy of this software and associated documentation files (the
|
|
|
|
+ * "Software"), to deal in the Software without restriction, including
|
|
|
|
+ * without limitation the rights to use, copy, modify, merge, publish,
|
|
|
|
+ * distribute, sublicense, and/or sell copies of the Software, and to
|
|
|
|
+ * permit persons to whom the Software is furnished to do so, subject to
|
|
|
|
+ * the following conditions:
|
|
|
|
+ *
|
|
|
|
+ * The above copyright notice and this permission notice (including the
|
|
|
|
+ * next paragraph) shall be included in all copies or substantial
|
|
|
|
+ * portions of the Software.
|
|
|
|
+ *
|
|
|
|
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
|
|
|
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
|
|
|
|
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
|
|
|
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
|
|
|
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
+ *
|
|
|
|
+ */
|
|
|
|
+
|
|
|
|
+#include "main/glheader.h"
|
|
|
|
+#include "main/imports.h"
|
|
|
|
+#include "main/context.h"
|
|
|
|
+#include "main/convolve.h"
|
|
|
|
+#include "main/mipmap.h"
|
|
|
|
+#include "main/texcompress.h"
|
|
|
|
+#include "main/texformat.h"
|
|
|
|
+#include "main/texstore.h"
|
|
|
|
+#include "main/teximage.h"
|
|
|
|
+#include "main/texobj.h"
|
|
|
|
+
|
|
|
|
+#include "xmlpool.h" /* for symbolic values of enum-type options */
|
|
|
|
+
|
|
|
|
+#include "radeon_common.h"
|
|
|
|
+
|
|
|
|
+#include "radeon_mipmap_tree.h"
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+static void copy_rows(void* dst, GLuint dststride, const void* src, GLuint srcstride,
|
|
|
|
+ GLuint numrows, GLuint rowsize)
|
|
|
|
+{
|
|
|
|
+ assert(rowsize <= dststride);
|
|
|
|
+ assert(rowsize <= srcstride);
|
|
|
|
+
|
|
|
|
+ if (rowsize == srcstride && rowsize == dststride) {
|
|
|
|
+ memcpy(dst, src, numrows*rowsize);
|
|
|
|
+ } else {
|
|
|
|
+ GLuint i;
|
|
|
|
+ for(i = 0; i < numrows; ++i) {
|
|
|
|
+ memcpy(dst, src, rowsize);
|
|
|
|
+ dst += dststride;
|
|
|
|
+ src += srcstride;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* textures */
|
|
|
|
+/**
|
|
|
|
+ * Allocate an empty texture image object.
|
|
|
|
+ */
|
|
|
|
+struct gl_texture_image *radeonNewTextureImage(GLcontext *ctx)
|
|
|
|
+{
|
|
|
|
+ return CALLOC(sizeof(radeon_texture_image));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * Free memory associated with this texture image.
|
|
|
|
+ */
|
|
|
|
+void radeonFreeTexImageData(GLcontext *ctx, struct gl_texture_image *timage)
|
|
|
|
+{
|
|
|
|
+ radeon_texture_image* image = get_radeon_texture_image(timage);
|
|
|
|
+
|
|
|
|
+ if (image->mt) {
|
|
|
|
+ radeon_miptree_unreference(image->mt);
|
|
|
|
+ image->mt = 0;
|
|
|
|
+ assert(!image->base.Data);
|
|
|
|
+ } else {
|
|
|
|
+ _mesa_free_texture_image_data(ctx, timage);
|
|
|
|
+ }
|
|
|
|
+ if (image->bo) {
|
|
|
|
+ radeon_bo_unref(image->bo);
|
|
|
|
+ image->bo = NULL;
|
|
|
|
+ }
|
|
|
|
+ if (timage->Data) {
|
|
|
|
+ _mesa_free_texmemory(timage->Data);
|
|
|
|
+ timage->Data = NULL;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* Set Data pointer and additional data for mapped texture image */
|
|
|
|
+static void teximage_set_map_data(radeon_texture_image *image)
|
|
|
|
+{
|
|
|
|
+ radeon_mipmap_level *lvl = &image->mt->levels[image->mtlevel];
|
|
|
|
+
|
|
|
|
+ image->base.Data = image->mt->bo->ptr + lvl->faces[image->mtface].offset;
|
|
|
|
+ image->base.RowStride = lvl->rowstride / image->mt->bpp;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * Map a single texture image for glTexImage and friends.
|
|
|
|
+ */
|
|
|
|
+void radeon_teximage_map(radeon_texture_image *image, GLboolean write_enable)
|
|
|
|
+{
|
|
|
|
+ if (image->mt) {
|
|
|
|
+ assert(!image->base.Data);
|
|
|
|
+
|
|
|
|
+ radeon_bo_map(image->mt->bo, write_enable);
|
|
|
|
+ teximage_set_map_data(image);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+void radeon_teximage_unmap(radeon_texture_image *image)
|
|
|
|
+{
|
|
|
|
+ if (image->mt) {
|
|
|
|
+ assert(image->base.Data);
|
|
|
|
+
|
|
|
|
+ image->base.Data = 0;
|
|
|
|
+ radeon_bo_unmap(image->mt->bo);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * Map a validated texture for reading during software rendering.
|
|
|
|
+ */
|
|
|
|
+void radeonMapTexture(GLcontext *ctx, struct gl_texture_object *texObj)
|
|
|
|
+{
|
|
|
|
+ radeonTexObj* t = radeon_tex_obj(texObj);
|
|
|
|
+ int face, level;
|
|
|
|
+
|
|
|
|
+ /* for r100 3D sw fallbacks don't have mt */
|
|
|
|
+ if (!t->mt)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ radeon_bo_map(t->mt->bo, GL_FALSE);
|
|
|
|
+ for(face = 0; face < t->mt->faces; ++face) {
|
|
|
|
+ for(level = t->mt->firstLevel; level <= t->mt->lastLevel; ++level)
|
|
|
|
+ teximage_set_map_data(get_radeon_texture_image(texObj->Image[face][level]));
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void radeonUnmapTexture(GLcontext *ctx, struct gl_texture_object *texObj)
|
|
|
|
+{
|
|
|
|
+ radeonTexObj* t = radeon_tex_obj(texObj);
|
|
|
|
+ int face, level;
|
|
|
|
+
|
|
|
|
+ /* for r100 3D sw fallbacks don't have mt */
|
|
|
|
+ if (!t->mt)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ for(face = 0; face < t->mt->faces; ++face) {
|
|
|
|
+ for(level = t->mt->firstLevel; level <= t->mt->lastLevel; ++level)
|
|
|
|
+ texObj->Image[face][level]->Data = 0;
|
|
|
|
+ }
|
|
|
|
+ radeon_bo_unmap(t->mt->bo);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+GLuint radeon_face_for_target(GLenum target)
|
|
|
|
+{
|
|
|
|
+ switch (target) {
|
|
|
|
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_X:
|
|
|
|
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_X:
|
|
|
|
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_Y:
|
|
|
|
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y:
|
|
|
|
+ case GL_TEXTURE_CUBE_MAP_POSITIVE_Z:
|
|
|
|
+ case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z:
|
|
|
|
+ return (GLuint) target - (GLuint) GL_TEXTURE_CUBE_MAP_POSITIVE_X;
|
|
|
|
+ default:
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * Wraps Mesa's implementation to ensure that the base level image is mapped.
|
|
|
|
+ *
|
|
|
|
+ * This relies on internal details of _mesa_generate_mipmap, in particular
|
|
|
|
+ * the fact that the memory for recreated texture images is always freed.
|
|
|
|
+ */
|
|
|
|
+static void radeon_generate_mipmap(GLcontext *ctx, GLenum target,
|
|
|
|
+ struct gl_texture_object *texObj)
|
|
|
|
+{
|
|
|
|
+ radeonTexObj* t = radeon_tex_obj(texObj);
|
|
|
|
+ GLuint nr_faces = (t->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
|
|
|
|
+ int i, face;
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ _mesa_generate_mipmap(ctx, target, texObj);
|
|
|
|
+
|
|
|
|
+ for (face = 0; face < nr_faces; face++) {
|
|
|
|
+ for (i = texObj->BaseLevel + 1; i < texObj->MaxLevel; i++) {
|
|
|
|
+ radeon_texture_image *image;
|
|
|
|
+
|
|
|
|
+ image = get_radeon_texture_image(texObj->Image[face][i]);
|
|
|
|
+
|
|
|
|
+ if (image == NULL)
|
|
|
|
+ break;
|
|
|
|
+
|
|
|
|
+ image->mtlevel = i;
|
|
|
|
+ image->mtface = face;
|
|
|
|
+
|
|
|
|
+ radeon_miptree_unreference(image->mt);
|
|
|
|
+ image->mt = NULL;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void radeonGenerateMipmap(GLcontext* ctx, GLenum target, struct gl_texture_object *texObj)
|
|
|
|
+{
|
|
|
|
+ GLuint face = radeon_face_for_target(target);
|
|
|
|
+ radeon_texture_image *baseimage = get_radeon_texture_image(texObj->Image[face][texObj->BaseLevel]);
|
|
|
|
+
|
|
|
|
+ radeon_teximage_map(baseimage, GL_FALSE);
|
|
|
|
+ radeon_generate_mipmap(ctx, target, texObj);
|
|
|
|
+ radeon_teximage_unmap(baseimage);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+/* try to find a format which will only need a memcopy */
|
|
|
|
+static const struct gl_texture_format *radeonChoose8888TexFormat(radeonContextPtr rmesa,
|
|
|
|
+ GLenum srcFormat,
|
|
|
|
+ GLenum srcType)
|
|
|
|
+{
|
|
|
|
+ const GLuint ui = 1;
|
|
|
|
+ const GLubyte littleEndian = *((const GLubyte *)&ui);
|
|
|
|
+
|
|
|
|
+ /* r100 can only do this */
|
|
|
|
+ if (IS_R100_CLASS(rmesa->radeonScreen))
|
|
|
|
+ return _dri_texformat_argb8888;
|
|
|
|
+
|
|
|
|
+ if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
|
|
|
|
+ (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
|
|
|
|
+ (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
|
|
|
|
+ (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && littleEndian)) {
|
|
|
|
+ return &_mesa_texformat_rgba8888;
|
|
|
|
+ } else if ((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
|
|
|
|
+ (srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && littleEndian) ||
|
|
|
|
+ (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
|
|
|
|
+ (srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && !littleEndian)) {
|
|
|
|
+ return &_mesa_texformat_rgba8888_rev;
|
|
|
|
+ } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
|
|
|
|
+ srcType == GL_UNSIGNED_INT_8_8_8_8)) {
|
|
|
|
+ return &_mesa_texformat_argb8888_rev;
|
|
|
|
+ } else if (srcFormat == GL_BGRA && ((srcType == GL_UNSIGNED_BYTE && littleEndian) ||
|
|
|
|
+ srcType == GL_UNSIGNED_INT_8_8_8_8_REV)) {
|
|
|
|
+ return &_mesa_texformat_argb8888;
|
|
|
|
+ } else
|
|
|
|
+ return _dri_texformat_argb8888;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+const struct gl_texture_format *radeonChooseTextureFormat(GLcontext * ctx,
|
|
|
|
+ GLint internalFormat,
|
|
|
|
+ GLenum format,
|
|
|
|
+ GLenum type)
|
|
|
|
+{
|
|
|
|
+ radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
|
|
|
|
+ const GLboolean do32bpt =
|
|
|
|
+ (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_32);
|
|
|
|
+ const GLboolean force16bpt =
|
|
|
|
+ (rmesa->texture_depth == DRI_CONF_TEXTURE_DEPTH_FORCE_16);
|
|
|
|
+ (void)format;
|
|
|
|
+
|
|
|
|
+#if 0
|
|
|
|
+ fprintf(stderr, "InternalFormat=%s(%d) type=%s format=%s\n",
|
|
|
|
+ _mesa_lookup_enum_by_nr(internalFormat), internalFormat,
|
|
|
|
+ _mesa_lookup_enum_by_nr(type), _mesa_lookup_enum_by_nr(format));
|
|
|
|
+ fprintf(stderr, "do32bpt=%d force16bpt=%d\n", do32bpt, force16bpt);
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+ switch (internalFormat) {
|
|
|
|
+ case 4:
|
|
|
|
+ case GL_RGBA:
|
|
|
|
+ case GL_COMPRESSED_RGBA:
|
|
|
|
+ switch (type) {
|
|
|
|
+ case GL_UNSIGNED_INT_10_10_10_2:
|
|
|
|
+ case GL_UNSIGNED_INT_2_10_10_10_REV:
|
|
|
|
+ return do32bpt ? _dri_texformat_argb8888 :
|
|
|
|
+ _dri_texformat_argb1555;
|
|
|
|
+ case GL_UNSIGNED_SHORT_4_4_4_4:
|
|
|
|
+ case GL_UNSIGNED_SHORT_4_4_4_4_REV:
|
|
|
|
+ return _dri_texformat_argb4444;
|
|
|
|
+ case GL_UNSIGNED_SHORT_5_5_5_1:
|
|
|
|
+ case GL_UNSIGNED_SHORT_1_5_5_5_REV:
|
|
|
|
+ return _dri_texformat_argb1555;
|
|
|
|
+ default:
|
|
|
|
+ return do32bpt ? radeonChoose8888TexFormat(rmesa, format, type) :
|
|
|
|
+ _dri_texformat_argb4444;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ case 3:
|
|
|
|
+ case GL_RGB:
|
|
|
|
+ case GL_COMPRESSED_RGB:
|
|
|
|
+ switch (type) {
|
|
|
|
+ case GL_UNSIGNED_SHORT_4_4_4_4:
|
|
|
|
+ case GL_UNSIGNED_SHORT_4_4_4_4_REV:
|
|
|
|
+ return _dri_texformat_argb4444;
|
|
|
|
+ case GL_UNSIGNED_SHORT_5_5_5_1:
|
|
|
|
+ case GL_UNSIGNED_SHORT_1_5_5_5_REV:
|
|
|
|
+ return _dri_texformat_argb1555;
|
|
|
|
+ case GL_UNSIGNED_SHORT_5_6_5:
|
|
|
|
+ case GL_UNSIGNED_SHORT_5_6_5_REV:
|
|
|
|
+ return _dri_texformat_rgb565;
|
|
|
|
+ default:
|
|
|
|
+ return do32bpt ? _dri_texformat_argb8888 :
|
|
|
|
+ _dri_texformat_rgb565;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ case GL_RGBA8:
|
|
|
|
+ case GL_RGB10_A2:
|
|
|
|
+ case GL_RGBA12:
|
|
|
|
+ case GL_RGBA16:
|
|
|
|
+ return !force16bpt ?
|
|
|
|
+ radeonChoose8888TexFormat(rmesa, format,type) :
|
|
|
|
+ _dri_texformat_argb4444;
|
|
|
|
+
|
|
|
|
+ case GL_RGBA4:
|
|
|
|
+ case GL_RGBA2:
|
|
|
|
+ return _dri_texformat_argb4444;
|
|
|
|
+
|
|
|
|
+ case GL_RGB5_A1:
|
|
|
|
+ return _dri_texformat_argb1555;
|
|
|
|
+
|
|
|
|
+ case GL_RGB8:
|
|
|
|
+ case GL_RGB10:
|
|
|
|
+ case GL_RGB12:
|
|
|
|
+ case GL_RGB16:
|
|
|
|
+ return !force16bpt ? _dri_texformat_argb8888 :
|
|
|
|
+ _dri_texformat_rgb565;
|
|
|
|
+
|
|
|
|
+ case GL_RGB5:
|
|
|
|
+ case GL_RGB4:
|
|
|
|
+ case GL_R3_G3_B2:
|
|
|
|
+ return _dri_texformat_rgb565;
|
|
|
|
+
|
|
|
|
+ case GL_ALPHA:
|
|
|
|
+ case GL_ALPHA4:
|
|
|
|
+ case GL_ALPHA8:
|
|
|
|
+ case GL_ALPHA12:
|
|
|
|
+ case GL_ALPHA16:
|
|
|
|
+ case GL_COMPRESSED_ALPHA:
|
|
|
|
+ return _dri_texformat_a8;
|
|
|
|
+
|
|
|
|
+ case 1:
|
|
|
|
+ case GL_LUMINANCE:
|
|
|
|
+ case GL_LUMINANCE4:
|
|
|
|
+ case GL_LUMINANCE8:
|
|
|
|
+ case GL_LUMINANCE12:
|
|
|
|
+ case GL_LUMINANCE16:
|
|
|
|
+ case GL_COMPRESSED_LUMINANCE:
|
|
|
|
+ return _dri_texformat_l8;
|
|
|
|
+
|
|
|
|
+ case 2:
|
|
|
|
+ case GL_LUMINANCE_ALPHA:
|
|
|
|
+ case GL_LUMINANCE4_ALPHA4:
|
|
|
|
+ case GL_LUMINANCE6_ALPHA2:
|
|
|
|
+ case GL_LUMINANCE8_ALPHA8:
|
|
|
|
+ case GL_LUMINANCE12_ALPHA4:
|
|
|
|
+ case GL_LUMINANCE12_ALPHA12:
|
|
|
|
+ case GL_LUMINANCE16_ALPHA16:
|
|
|
|
+ case GL_COMPRESSED_LUMINANCE_ALPHA:
|
|
|
|
+ return _dri_texformat_al88;
|
|
|
|
+
|
|
|
|
+ case GL_INTENSITY:
|
|
|
|
+ case GL_INTENSITY4:
|
|
|
|
+ case GL_INTENSITY8:
|
|
|
|
+ case GL_INTENSITY12:
|
|
|
|
+ case GL_INTENSITY16:
|
|
|
|
+ case GL_COMPRESSED_INTENSITY:
|
|
|
|
+ return _dri_texformat_i8;
|
|
|
|
+
|
|
|
|
+ case GL_YCBCR_MESA:
|
|
|
|
+ if (type == GL_UNSIGNED_SHORT_8_8_APPLE ||
|
|
|
|
+ type == GL_UNSIGNED_BYTE)
|
|
|
|
+ return &_mesa_texformat_ycbcr;
|
|
|
|
+ else
|
|
|
|
+ return &_mesa_texformat_ycbcr_rev;
|
|
|
|
+
|
|
|
|
+ case GL_RGB_S3TC:
|
|
|
|
+ case GL_RGB4_S3TC:
|
|
|
|
+ case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
|
|
|
|
+ return &_mesa_texformat_rgb_dxt1;
|
|
|
|
+
|
|
|
|
+ case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
|
|
|
|
+ return &_mesa_texformat_rgba_dxt1;
|
|
|
|
+
|
|
|
|
+ case GL_RGBA_S3TC:
|
|
|
|
+ case GL_RGBA4_S3TC:
|
|
|
|
+ case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
|
|
|
|
+ return &_mesa_texformat_rgba_dxt3;
|
|
|
|
+
|
|
|
|
+ case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
|
|
|
|
+ return &_mesa_texformat_rgba_dxt5;
|
|
|
|
+
|
|
|
|
+ case GL_ALPHA16F_ARB:
|
|
|
|
+ return &_mesa_texformat_alpha_float16;
|
|
|
|
+ case GL_ALPHA32F_ARB:
|
|
|
|
+ return &_mesa_texformat_alpha_float32;
|
|
|
|
+ case GL_LUMINANCE16F_ARB:
|
|
|
|
+ return &_mesa_texformat_luminance_float16;
|
|
|
|
+ case GL_LUMINANCE32F_ARB:
|
|
|
|
+ return &_mesa_texformat_luminance_float32;
|
|
|
|
+ case GL_LUMINANCE_ALPHA16F_ARB:
|
|
|
|
+ return &_mesa_texformat_luminance_alpha_float16;
|
|
|
|
+ case GL_LUMINANCE_ALPHA32F_ARB:
|
|
|
|
+ return &_mesa_texformat_luminance_alpha_float32;
|
|
|
|
+ case GL_INTENSITY16F_ARB:
|
|
|
|
+ return &_mesa_texformat_intensity_float16;
|
|
|
|
+ case GL_INTENSITY32F_ARB:
|
|
|
|
+ return &_mesa_texformat_intensity_float32;
|
|
|
|
+ case GL_RGB16F_ARB:
|
|
|
|
+ return &_mesa_texformat_rgba_float16;
|
|
|
|
+ case GL_RGB32F_ARB:
|
|
|
|
+ return &_mesa_texformat_rgba_float32;
|
|
|
|
+ case GL_RGBA16F_ARB:
|
|
|
|
+ return &_mesa_texformat_rgba_float16;
|
|
|
|
+ case GL_RGBA32F_ARB:
|
|
|
|
+ return &_mesa_texformat_rgba_float32;
|
|
|
|
+
|
|
|
|
+ case GL_DEPTH_COMPONENT:
|
|
|
|
+ case GL_DEPTH_COMPONENT16:
|
|
|
|
+ case GL_DEPTH_COMPONENT24:
|
|
|
|
+ case GL_DEPTH_COMPONENT32:
|
|
|
|
+#if 0
|
|
|
|
+ switch (type) {
|
|
|
|
+ case GL_UNSIGNED_BYTE:
|
|
|
|
+ case GL_UNSIGNED_SHORT:
|
|
|
|
+ return &_mesa_texformat_z16;
|
|
|
|
+ case GL_UNSIGNED_INT:
|
|
|
|
+ return &_mesa_texformat_z32;
|
|
|
|
+ case GL_UNSIGNED_INT_24_8_EXT:
|
|
|
|
+ default:
|
|
|
|
+ return &_mesa_texformat_z24_s8;
|
|
|
|
+ }
|
|
|
|
+#else
|
|
|
|
+ return &_mesa_texformat_z16;
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+ default:
|
|
|
|
+ _mesa_problem(ctx,
|
|
|
|
+ "unexpected internalFormat 0x%x in r300ChooseTextureFormat",
|
|
|
|
+ (int)internalFormat);
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return NULL; /* never get here */
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * All glTexImage calls go through this function.
|
|
|
|
+ */
|
|
|
|
+static void radeon_teximage(
|
|
|
|
+ GLcontext *ctx, int dims,
|
|
|
|
+ GLint face, GLint level,
|
|
|
|
+ GLint internalFormat,
|
|
|
|
+ GLint width, GLint height, GLint depth,
|
|
|
|
+ GLsizei imageSize,
|
|
|
|
+ GLenum format, GLenum type, const GLvoid * pixels,
|
|
|
|
+ const struct gl_pixelstore_attrib *packing,
|
|
|
|
+ struct gl_texture_object *texObj,
|
|
|
|
+ struct gl_texture_image *texImage,
|
|
|
|
+ int compressed)
|
|
|
|
+{
|
|
|
|
+ radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
|
|
|
|
+ radeonTexObj* t = radeon_tex_obj(texObj);
|
|
|
|
+ radeon_texture_image* image = get_radeon_texture_image(texImage);
|
|
|
|
+ GLuint dstRowStride;
|
|
|
|
+ GLint postConvWidth = width;
|
|
|
|
+ GLint postConvHeight = height;
|
|
|
|
+ GLuint texelBytes;
|
|
|
|
+
|
|
|
|
+ radeon_firevertices(rmesa);
|
|
|
|
+
|
|
|
|
+ t->validated = GL_FALSE;
|
|
|
|
+
|
|
|
|
+ if (ctx->_ImageTransferState & IMAGE_CONVOLUTION_BIT) {
|
|
|
|
+ _mesa_adjust_image_for_convolution(ctx, dims, &postConvWidth,
|
|
|
|
+ &postConvHeight);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Choose and fill in the texture format for this image */
|
|
|
|
+ texImage->TexFormat = radeonChooseTextureFormat(ctx, internalFormat, format, type);
|
|
|
|
+ _mesa_set_fetch_functions(texImage, dims);
|
|
|
|
+
|
|
|
|
+ if (texImage->TexFormat->TexelBytes == 0) {
|
|
|
|
+ texelBytes = 0;
|
|
|
|
+ texImage->IsCompressed = GL_TRUE;
|
|
|
|
+ texImage->CompressedSize =
|
|
|
|
+ ctx->Driver.CompressedTextureSize(ctx, texImage->Width,
|
|
|
|
+ texImage->Height, texImage->Depth,
|
|
|
|
+ texImage->TexFormat->MesaFormat);
|
|
|
|
+ } else {
|
|
|
|
+ texImage->IsCompressed = GL_FALSE;
|
|
|
|
+ texImage->CompressedSize = 0;
|
|
|
|
+
|
|
|
|
+ texelBytes = texImage->TexFormat->TexelBytes;
|
|
|
|
+ /* Minimum pitch of 32 bytes */
|
|
|
|
+ if (postConvWidth * texelBytes < 32) {
|
|
|
|
+ postConvWidth = 32 / texelBytes;
|
|
|
|
+ texImage->RowStride = postConvWidth;
|
|
|
|
+ }
|
|
|
|
+ if (!image->mt) {
|
|
|
|
+ assert(texImage->RowStride == postConvWidth);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Allocate memory for image */
|
|
|
|
+ radeonFreeTexImageData(ctx, texImage); /* Mesa core only clears texImage->Data but not image->mt */
|
|
|
|
+
|
|
|
|
+ if (t->mt &&
|
|
|
|
+ t->mt->firstLevel == level &&
|
|
|
|
+ t->mt->lastLevel == level &&
|
|
|
|
+ t->mt->target != GL_TEXTURE_CUBE_MAP_ARB &&
|
|
|
|
+ !radeon_miptree_matches_image(t->mt, texImage, face, level)) {
|
|
|
|
+ radeon_miptree_unreference(t->mt);
|
|
|
|
+ t->mt = NULL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!t->mt)
|
|
|
|
+ radeon_try_alloc_miptree(rmesa, t, texImage, face, level);
|
|
|
|
+ if (t->mt && radeon_miptree_matches_image(t->mt, texImage, face, level)) {
|
|
|
|
+ radeon_mipmap_level *lvl;
|
|
|
|
+ image->mt = t->mt;
|
|
|
|
+ image->mtlevel = level - t->mt->firstLevel;
|
|
|
|
+ image->mtface = face;
|
|
|
|
+ radeon_miptree_reference(t->mt);
|
|
|
|
+ lvl = &image->mt->levels[image->mtlevel];
|
|
|
|
+ dstRowStride = lvl->rowstride;
|
|
|
|
+ } else {
|
|
|
|
+ int size;
|
|
|
|
+ if (texImage->IsCompressed) {
|
|
|
|
+ size = texImage->CompressedSize;
|
|
|
|
+ } else {
|
|
|
|
+ size = texImage->Width * texImage->Height * texImage->Depth * texImage->TexFormat->TexelBytes;
|
|
|
|
+ }
|
|
|
|
+ texImage->Data = _mesa_alloc_texmemory(size);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Upload texture image; note that the spec allows pixels to be NULL */
|
|
|
|
+ if (compressed) {
|
|
|
|
+ pixels = _mesa_validate_pbo_compressed_teximage(
|
|
|
|
+ ctx, imageSize, pixels, packing, "glCompressedTexImage");
|
|
|
|
+ } else {
|
|
|
|
+ pixels = _mesa_validate_pbo_teximage(
|
|
|
|
+ ctx, dims, width, height, depth,
|
|
|
|
+ format, type, pixels, packing, "glTexImage");
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (pixels) {
|
|
|
|
+ radeon_teximage_map(image, GL_TRUE);
|
|
|
|
+
|
|
|
|
+ if (compressed) {
|
|
|
|
+ memcpy(texImage->Data, pixels, imageSize);
|
|
|
|
+ } else {
|
|
|
|
+ GLuint dstRowStride;
|
|
|
|
+ if (image->mt) {
|
|
|
|
+ radeon_mipmap_level *lvl = &image->mt->levels[image->mtlevel];
|
|
|
|
+ dstRowStride = lvl->rowstride;
|
|
|
|
+ } else {
|
|
|
|
+ dstRowStride = texImage->Width * texImage->TexFormat->TexelBytes;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!texImage->TexFormat->StoreImage(ctx, dims,
|
|
|
|
+ texImage->_BaseFormat,
|
|
|
|
+ texImage->TexFormat,
|
|
|
|
+ texImage->Data, 0, 0, 0, /* dstX/Y/Zoffset */
|
|
|
|
+ dstRowStride,
|
|
|
|
+ texImage->ImageOffsets,
|
|
|
|
+ width, height, depth,
|
|
|
|
+ format, type, pixels, packing))
|
|
|
|
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexImage");
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* SGIS_generate_mipmap */
|
|
|
|
+ if (level == texObj->BaseLevel && texObj->GenerateMipmap) {
|
|
|
|
+ radeon_generate_mipmap(ctx, texObj->Target, texObj);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ _mesa_unmap_teximage_pbo(ctx, packing);
|
|
|
|
+
|
|
|
|
+ if (pixels)
|
|
|
|
+ radeon_teximage_unmap(image);
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void radeonTexImage1D(GLcontext * ctx, GLenum target, GLint level,
|
|
|
|
+ GLint internalFormat,
|
|
|
|
+ GLint width, GLint border,
|
|
|
|
+ GLenum format, GLenum type, const GLvoid * pixels,
|
|
|
|
+ const struct gl_pixelstore_attrib *packing,
|
|
|
|
+ struct gl_texture_object *texObj,
|
|
|
|
+ struct gl_texture_image *texImage)
|
|
|
|
+{
|
|
|
|
+ radeon_teximage(ctx, 1, 0, level, internalFormat, width, 1, 1,
|
|
|
|
+ 0, format, type, pixels, packing, texObj, texImage, 0);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void radeonTexImage2D(GLcontext * ctx, GLenum target, GLint level,
|
|
|
|
+ GLint internalFormat,
|
|
|
|
+ GLint width, GLint height, GLint border,
|
|
|
|
+ GLenum format, GLenum type, const GLvoid * pixels,
|
|
|
|
+ const struct gl_pixelstore_attrib *packing,
|
|
|
|
+ struct gl_texture_object *texObj,
|
|
|
|
+ struct gl_texture_image *texImage)
|
|
|
|
+
|
|
|
|
+{
|
|
|
|
+ GLuint face = radeon_face_for_target(target);
|
|
|
|
+
|
|
|
|
+ radeon_teximage(ctx, 2, face, level, internalFormat, width, height, 1,
|
|
|
|
+ 0, format, type, pixels, packing, texObj, texImage, 0);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void radeonCompressedTexImage2D(GLcontext * ctx, GLenum target,
|
|
|
|
+ GLint level, GLint internalFormat,
|
|
|
|
+ GLint width, GLint height, GLint border,
|
|
|
|
+ GLsizei imageSize, const GLvoid * data,
|
|
|
|
+ struct gl_texture_object *texObj,
|
|
|
|
+ struct gl_texture_image *texImage)
|
|
|
|
+{
|
|
|
|
+ GLuint face = radeon_face_for_target(target);
|
|
|
|
+
|
|
|
|
+ radeon_teximage(ctx, 2, face, level, internalFormat, width, height, 1,
|
|
|
|
+ imageSize, 0, 0, data, &ctx->Unpack, texObj, texImage, 1);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void radeonTexImage3D(GLcontext * ctx, GLenum target, GLint level,
|
|
|
|
+ GLint internalFormat,
|
|
|
|
+ GLint width, GLint height, GLint depth,
|
|
|
|
+ GLint border,
|
|
|
|
+ GLenum format, GLenum type, const GLvoid * pixels,
|
|
|
|
+ const struct gl_pixelstore_attrib *packing,
|
|
|
|
+ struct gl_texture_object *texObj,
|
|
|
|
+ struct gl_texture_image *texImage)
|
|
|
|
+{
|
|
|
|
+ radeon_teximage(ctx, 3, 0, level, internalFormat, width, height, depth,
|
|
|
|
+ 0, format, type, pixels, packing, texObj, texImage, 0);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * Update a subregion of the given texture image.
|
|
|
|
+ */
|
|
|
|
+static void radeon_texsubimage(GLcontext* ctx, int dims, int level,
|
|
|
|
+ GLint xoffset, GLint yoffset, GLint zoffset,
|
|
|
|
+ GLsizei width, GLsizei height, GLsizei depth,
|
|
|
|
+ GLsizei imageSize,
|
|
|
|
+ GLenum format, GLenum type,
|
|
|
|
+ const GLvoid * pixels,
|
|
|
|
+ const struct gl_pixelstore_attrib *packing,
|
|
|
|
+ struct gl_texture_object *texObj,
|
|
|
|
+ struct gl_texture_image *texImage,
|
|
|
|
+ int compressed)
|
|
|
|
+{
|
|
|
|
+ radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
|
|
|
|
+ radeonTexObj* t = radeon_tex_obj(texObj);
|
|
|
|
+ radeon_texture_image* image = get_radeon_texture_image(texImage);
|
|
|
|
+
|
|
|
|
+ radeon_firevertices(rmesa);
|
|
|
|
+
|
|
|
|
+ t->validated = GL_FALSE;
|
|
|
|
+ if (compressed) {
|
|
|
|
+ pixels = _mesa_validate_pbo_compressed_teximage(
|
|
|
|
+ ctx, imageSize, pixels, packing, "glCompressedTexImage");
|
|
|
|
+ } else {
|
|
|
|
+ pixels = _mesa_validate_pbo_teximage(ctx, dims,
|
|
|
|
+ width, height, depth, format, type, pixels, packing, "glTexSubImage1D");
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (pixels) {
|
|
|
|
+ GLint dstRowStride;
|
|
|
|
+ radeon_teximage_map(image, GL_TRUE);
|
|
|
|
+
|
|
|
|
+ if (image->mt) {
|
|
|
|
+ radeon_mipmap_level *lvl = &image->mt->levels[image->mtlevel];
|
|
|
|
+ dstRowStride = lvl->rowstride;
|
|
|
|
+ } else {
|
|
|
|
+ dstRowStride = texImage->RowStride * texImage->TexFormat->TexelBytes;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (compressed) {
|
|
|
|
+ uint32_t srcRowStride, bytesPerRow, rows;
|
|
|
|
+ dstRowStride = _mesa_compressed_row_stride(texImage->TexFormat->MesaFormat, texImage->Width);
|
|
|
|
+ srcRowStride = _mesa_compressed_row_stride(texImage->TexFormat->MesaFormat, width);
|
|
|
|
+ bytesPerRow = srcRowStride;
|
|
|
|
+ rows = height / 4;
|
|
|
|
+
|
|
|
|
+ copy_rows(texImage->Data, dstRowStride, image->base.Data, srcRowStride, rows,
|
|
|
|
+ bytesPerRow);
|
|
|
|
+
|
|
|
|
+ } else {
|
|
|
|
+ if (!texImage->TexFormat->StoreImage(ctx, dims, texImage->_BaseFormat,
|
|
|
|
+ texImage->TexFormat, texImage->Data,
|
|
|
|
+ xoffset, yoffset, zoffset,
|
|
|
|
+ dstRowStride,
|
|
|
|
+ texImage->ImageOffsets,
|
|
|
|
+ width, height, depth,
|
|
|
|
+ format, type, pixels, packing))
|
|
|
|
+ _mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexSubImage");
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* GL_SGIS_generate_mipmap */
|
|
|
|
+ if (level == texObj->BaseLevel && texObj->GenerateMipmap) {
|
|
|
|
+ radeon_generate_mipmap(ctx, texObj->Target, texObj);
|
|
|
|
+ }
|
|
|
|
+ radeon_teximage_unmap(image);
|
|
|
|
+
|
|
|
|
+ _mesa_unmap_teximage_pbo(ctx, packing);
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void radeonTexSubImage1D(GLcontext * ctx, GLenum target, GLint level,
|
|
|
|
+ GLint xoffset,
|
|
|
|
+ GLsizei width,
|
|
|
|
+ GLenum format, GLenum type,
|
|
|
|
+ const GLvoid * pixels,
|
|
|
|
+ const struct gl_pixelstore_attrib *packing,
|
|
|
|
+ struct gl_texture_object *texObj,
|
|
|
|
+ struct gl_texture_image *texImage)
|
|
|
|
+{
|
|
|
|
+ radeon_texsubimage(ctx, 1, level, xoffset, 0, 0, width, 1, 1, 0,
|
|
|
|
+ format, type, pixels, packing, texObj, texImage, 0);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void radeonTexSubImage2D(GLcontext * ctx, GLenum target, GLint level,
|
|
|
|
+ GLint xoffset, GLint yoffset,
|
|
|
|
+ GLsizei width, GLsizei height,
|
|
|
|
+ GLenum format, GLenum type,
|
|
|
|
+ const GLvoid * pixels,
|
|
|
|
+ const struct gl_pixelstore_attrib *packing,
|
|
|
|
+ struct gl_texture_object *texObj,
|
|
|
|
+ struct gl_texture_image *texImage)
|
|
|
|
+{
|
|
|
|
+ radeon_texsubimage(ctx, 2, level, xoffset, yoffset, 0, width, height, 1,
|
|
|
|
+ 0, format, type, pixels, packing, texObj, texImage,
|
|
|
|
+ 0);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void radeonCompressedTexSubImage2D(GLcontext * ctx, GLenum target,
|
|
|
|
+ GLint level, GLint xoffset,
|
|
|
|
+ GLint yoffset, GLsizei width,
|
|
|
|
+ GLsizei height, GLenum format,
|
|
|
|
+ GLsizei imageSize, const GLvoid * data,
|
|
|
|
+ struct gl_texture_object *texObj,
|
|
|
|
+ struct gl_texture_image *texImage)
|
|
|
|
+{
|
|
|
|
+ radeon_texsubimage(ctx, 2, level, xoffset, yoffset, 0, width, height, 1,
|
|
|
|
+ imageSize, format, 0, data, &ctx->Unpack, texObj, texImage, 1);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+void radeonTexSubImage3D(GLcontext * ctx, GLenum target, GLint level,
|
|
|
|
+ GLint xoffset, GLint yoffset, GLint zoffset,
|
|
|
|
+ GLsizei width, GLsizei height, GLsizei depth,
|
|
|
|
+ GLenum format, GLenum type,
|
|
|
|
+ const GLvoid * pixels,
|
|
|
|
+ const struct gl_pixelstore_attrib *packing,
|
|
|
|
+ struct gl_texture_object *texObj,
|
|
|
|
+ struct gl_texture_image *texImage)
|
|
|
|
+{
|
|
|
|
+ radeon_texsubimage(ctx, 3, level, xoffset, yoffset, zoffset, width, height, depth, 0,
|
|
|
|
+ format, type, pixels, packing, texObj, texImage, 0);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * Ensure that the given image is stored in the given miptree from now on.
|
|
|
|
+ */
|
|
|
|
+static void migrate_image_to_miptree(radeon_mipmap_tree *mt, radeon_texture_image *image, int face, int level)
|
|
|
|
+{
|
|
|
|
+ radeon_mipmap_level *dstlvl = &mt->levels[level - mt->firstLevel];
|
|
|
|
+ unsigned char *dest;
|
|
|
|
+
|
|
|
|
+ assert(image->mt != mt);
|
|
|
|
+ assert(dstlvl->width == image->base.Width);
|
|
|
|
+ assert(dstlvl->height == image->base.Height);
|
|
|
|
+ assert(dstlvl->depth == image->base.Depth);
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+ radeon_bo_map(mt->bo, GL_TRUE);
|
|
|
|
+ dest = mt->bo->ptr + dstlvl->faces[face].offset;
|
|
|
|
+
|
|
|
|
+ if (image->mt) {
|
|
|
|
+ /* Format etc. should match, so we really just need a memcpy().
|
|
|
|
+ * In fact, that memcpy() could be done by the hardware in many
|
|
|
|
+ * cases, provided that we have a proper memory manager.
|
|
|
|
+ */
|
|
|
|
+ radeon_mipmap_level *srclvl = &image->mt->levels[image->mtlevel];
|
|
|
|
+
|
|
|
|
+ assert(srclvl->size == dstlvl->size);
|
|
|
|
+ assert(srclvl->rowstride == dstlvl->rowstride);
|
|
|
|
+
|
|
|
|
+ radeon_bo_map(image->mt->bo, GL_FALSE);
|
|
|
|
+
|
|
|
|
+ memcpy(dest,
|
|
|
|
+ image->mt->bo->ptr + srclvl->faces[face].offset,
|
|
|
|
+ dstlvl->size);
|
|
|
|
+ radeon_bo_unmap(image->mt->bo);
|
|
|
|
+
|
|
|
|
+ radeon_miptree_unreference(image->mt);
|
|
|
|
+ } else {
|
|
|
|
+ uint32_t srcrowstride;
|
|
|
|
+ uint32_t height;
|
|
|
|
+ /* need to confirm this value is correct */
|
|
|
|
+ if (mt->compressed) {
|
|
|
|
+ height = image->base.Height / 4;
|
|
|
|
+ srcrowstride = image->base.RowStride * mt->bpp;
|
|
|
|
+ } else {
|
|
|
|
+ height = image->base.Height * image->base.Depth;
|
|
|
|
+ srcrowstride = image->base.Width * image->base.TexFormat->TexelBytes;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+// if (mt->tilebits)
|
|
|
|
+// WARN_ONCE("%s: tiling not supported yet", __FUNCTION__);
|
|
|
|
+
|
|
|
|
+ copy_rows(dest, dstlvl->rowstride, image->base.Data, srcrowstride,
|
|
|
|
+ height, srcrowstride);
|
|
|
|
+
|
|
|
|
+ _mesa_free_texmemory(image->base.Data);
|
|
|
|
+ image->base.Data = 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ radeon_bo_unmap(mt->bo);
|
|
|
|
+
|
|
|
|
+ image->mt = mt;
|
|
|
|
+ image->mtface = face;
|
|
|
|
+ image->mtlevel = level;
|
|
|
|
+ radeon_miptree_reference(image->mt);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+int radeon_validate_texture_miptree(GLcontext * ctx, struct gl_texture_object *texObj)
|
|
|
|
+{
|
|
|
|
+ radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
|
|
|
|
+ radeonTexObj *t = radeon_tex_obj(texObj);
|
|
|
|
+ radeon_texture_image *baseimage = get_radeon_texture_image(texObj->Image[0][texObj->BaseLevel]);
|
|
|
|
+ int face, level;
|
|
|
|
+
|
|
|
|
+ if (t->validated || t->image_override)
|
|
|
|
+ return GL_TRUE;
|
|
|
|
+
|
|
|
|
+ if (RADEON_DEBUG & DEBUG_TEXTURE)
|
|
|
|
+ fprintf(stderr, "%s: Validating texture %p now\n", __FUNCTION__, texObj);
|
|
|
|
+
|
|
|
|
+ if (baseimage->base.Border > 0)
|
|
|
|
+ return GL_FALSE;
|
|
|
|
+
|
|
|
|
+ /* Ensure a matching miptree exists.
|
|
|
|
+ *
|
|
|
|
+ * Differing mipmap trees can result when the app uses TexImage to
|
|
|
|
+ * change texture dimensions.
|
|
|
|
+ *
|
|
|
|
+ * Prefer to use base image's miptree if it
|
|
|
|
+ * exists, since that most likely contains more valid data (remember
|
|
|
|
+ * that the base level is usually significantly larger than the rest
|
|
|
|
+ * of the miptree, so cubemaps are the only possible exception).
|
|
|
|
+ */
|
|
|
|
+ if (baseimage->mt &&
|
|
|
|
+ baseimage->mt != t->mt &&
|
|
|
|
+ radeon_miptree_matches_texture(baseimage->mt, &t->base)) {
|
|
|
|
+ radeon_miptree_unreference(t->mt);
|
|
|
|
+ t->mt = baseimage->mt;
|
|
|
|
+ radeon_miptree_reference(t->mt);
|
|
|
|
+ } else if (t->mt && !radeon_miptree_matches_texture(t->mt, &t->base)) {
|
|
|
|
+ radeon_miptree_unreference(t->mt);
|
|
|
|
+ t->mt = 0;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (!t->mt) {
|
|
|
|
+ if (RADEON_DEBUG & DEBUG_TEXTURE)
|
|
|
|
+ fprintf(stderr, " Allocate new miptree\n");
|
|
|
|
+ radeon_try_alloc_miptree(rmesa, t, &baseimage->base, 0, texObj->BaseLevel);
|
|
|
|
+ if (!t->mt) {
|
|
|
|
+ _mesa_problem(ctx, "r300_validate_texture failed to alloc miptree");
|
|
|
|
+ return GL_FALSE;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Ensure all images are stored in the single main miptree */
|
|
|
|
+ for(face = 0; face < t->mt->faces; ++face) {
|
|
|
|
+ for(level = t->mt->firstLevel; level <= t->mt->lastLevel; ++level) {
|
|
|
|
+ radeon_texture_image *image = get_radeon_texture_image(texObj->Image[face][level]);
|
|
|
|
+ if (RADEON_DEBUG & DEBUG_TEXTURE)
|
|
|
|
+ fprintf(stderr, " face %i, level %i... %p vs %p ", face, level, t->mt, image->mt);
|
|
|
|
+ if (t->mt == image->mt) {
|
|
|
|
+ if (RADEON_DEBUG & DEBUG_TEXTURE)
|
|
|
|
+ fprintf(stderr, "OK\n");
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (RADEON_DEBUG & DEBUG_TEXTURE)
|
|
|
|
+ fprintf(stderr, "migrating\n");
|
|
|
|
+ migrate_image_to_miptree(t->mt, image, face, level);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return GL_TRUE;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * Need to map texture image into memory before copying image data,
|
|
|
|
+ * then unmap it.
|
|
|
|
+ */
|
|
|
|
+static void
|
|
|
|
+radeon_get_tex_image(GLcontext * ctx, GLenum target, GLint level,
|
|
|
|
+ GLenum format, GLenum type, GLvoid * pixels,
|
|
|
|
+ struct gl_texture_object *texObj,
|
|
|
|
+ struct gl_texture_image *texImage, int compressed)
|
|
|
|
+{
|
|
|
|
+ radeon_texture_image *image = get_radeon_texture_image(texImage);
|
|
|
|
+
|
|
|
|
+ if (image->mt) {
|
|
|
|
+ /* Map the texture image read-only */
|
|
|
|
+ radeon_teximage_map(image, GL_FALSE);
|
|
|
|
+ } else {
|
|
|
|
+ /* Image hasn't been uploaded to a miptree yet */
|
|
|
|
+ assert(image->base.Data);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (compressed) {
|
|
|
|
+ _mesa_get_compressed_teximage(ctx, target, level, pixels,
|
|
|
|
+ texObj, texImage);
|
|
|
|
+ } else {
|
|
|
|
+ _mesa_get_teximage(ctx, target, level, format, type, pixels,
|
|
|
|
+ texObj, texImage);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (image->mt) {
|
|
|
|
+ radeon_teximage_unmap(image);
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void
|
|
|
|
+radeonGetTexImage(GLcontext * ctx, GLenum target, GLint level,
|
|
|
|
+ GLenum format, GLenum type, GLvoid * pixels,
|
|
|
|
+ struct gl_texture_object *texObj,
|
|
|
|
+ struct gl_texture_image *texImage)
|
|
|
|
+{
|
|
|
|
+ radeon_get_tex_image(ctx, target, level, format, type, pixels,
|
|
|
|
+ texObj, texImage, 0);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void
|
|
|
|
+radeonGetCompressedTexImage(GLcontext *ctx, GLenum target, GLint level,
|
|
|
|
+ GLvoid *pixels,
|
|
|
|
+ struct gl_texture_object *texObj,
|
|
|
|
+ struct gl_texture_image *texImage)
|
|
|
|
+{
|
|
|
|
+ radeon_get_tex_image(ctx, target, level, 0, 0, pixels,
|
|
|
|
+ texObj, texImage, 1);
|
|
|
|
+}
|
|
|
|
diff --git a/src/mesa/drivers/dri/radeon/radeon_texture.h b/src/mesa/drivers/dri/radeon/radeon_texture.h
|
|
|
|
new file mode 100644
|
|
|
|
index 0000000..d90fda7
|
|
|
|
--- /dev/null
|
|
|
|
+++ b/src/mesa/drivers/dri/radeon/radeon_texture.h
|
|
|
|
@@ -0,0 +1,118 @@
|
|
|
|
+/*
|
|
|
|
+ * Copyright (C) 2008 Nicolai Haehnle.
|
|
|
|
+ * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
|
|
|
|
+ *
|
|
|
|
+ * The Weather Channel (TM) funded Tungsten Graphics to develop the
|
|
|
|
+ * initial release of the Radeon 8500 driver under the XFree86 license.
|
|
|
|
+ * This notice must be preserved.
|
|
|
|
+ *
|
|
|
|
+ * Permission is hereby granted, free of charge, to any person obtaining
|
|
|
|
+ * a copy of this software and associated documentation files (the
|
|
|
|
+ * "Software"), to deal in the Software without restriction, including
|
|
|
|
+ * without limitation the rights to use, copy, modify, merge, publish,
|
|
|
|
+ * distribute, sublicense, and/or sell copies of the Software, and to
|
|
|
|
+ * permit persons to whom the Software is furnished to do so, subject to
|
|
|
|
+ * the following conditions:
|
|
|
|
+ *
|
|
|
|
+ * The above copyright notice and this permission notice (including the
|
|
|
|
+ * next paragraph) shall be included in all copies or substantial
|
|
|
|
+ * portions of the Software.
|
|
|
|
+ *
|
|
|
|
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
|
|
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
|
|
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
|
|
|
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
|
|
|
|
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
|
|
|
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
|
|
|
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
|
|
+ *
|
|
|
|
+ */
|
|
|
|
+
|
|
|
|
+#ifndef RADEON_TEXTURE_H
|
|
|
|
+#define RADEON_TEXTURE_H
|
|
|
|
+struct gl_texture_image *radeonNewTextureImage(GLcontext *ctx);
|
|
|
|
+void radeonFreeTexImageData(GLcontext *ctx, struct gl_texture_image *timage);
|
|
|
|
+
|
|
|
|
+void radeon_teximage_map(radeon_texture_image *image, GLboolean write_enable);
|
|
|
|
+void radeon_teximage_unmap(radeon_texture_image *image);
|
|
|
|
+void radeonMapTexture(GLcontext *ctx, struct gl_texture_object *texObj);
|
|
|
|
+void radeonUnmapTexture(GLcontext *ctx, struct gl_texture_object *texObj);
|
|
|
|
+void radeonGenerateMipmap(GLcontext* ctx, GLenum target, struct gl_texture_object *texObj);
|
|
|
|
+int radeon_validate_texture_miptree(GLcontext * ctx, struct gl_texture_object *texObj);
|
|
|
|
+GLuint radeon_face_for_target(GLenum target);
|
|
|
|
+const struct gl_texture_format *radeonChooseTextureFormat(GLcontext * ctx,
|
|
|
|
+ GLint internalFormat,
|
|
|
|
+ GLenum format,
|
|
|
|
+ GLenum type);
|
|
|
|
+
|
|
|
|
+void radeonTexImage1D(GLcontext * ctx, GLenum target, GLint level,
|
|
|
|
+ GLint internalFormat,
|
|
|
|
+ GLint width, GLint border,
|
|
|
|
+ GLenum format, GLenum type, const GLvoid * pixels,
|
|
|
|
+ const struct gl_pixelstore_attrib *packing,
|
|
|
|
+ struct gl_texture_object *texObj,
|
|
|
|
+ struct gl_texture_image *texImage);
|
|
|
|
+void radeonTexImage2D(GLcontext * ctx, GLenum target, GLint level,
|
|
|
|
+ GLint internalFormat,
|
|
|
|
+ GLint width, GLint height, GLint border,
|
|
|
|
+ GLenum format, GLenum type, const GLvoid * pixels,
|
|
|
|
+ const struct gl_pixelstore_attrib *packing,
|
|
|
|
+ struct gl_texture_object *texObj,
|
|
|
|
+ struct gl_texture_image *texImage);
|
|
|
|
+void radeonCompressedTexImage2D(GLcontext * ctx, GLenum target,
|
|
|
|
+ GLint level, GLint internalFormat,
|
|
|
|
+ GLint width, GLint height, GLint border,
|
|
|
|
+ GLsizei imageSize, const GLvoid * data,
|
|
|
|
+ struct gl_texture_object *texObj,
|
|
|
|
+ struct gl_texture_image *texImage);
|
|
|
|
+void radeonTexImage3D(GLcontext * ctx, GLenum target, GLint level,
|
|
|
|
+ GLint internalFormat,
|
|
|
|
+ GLint width, GLint height, GLint depth,
|
|
|
|
+ GLint border,
|
|
|
|
+ GLenum format, GLenum type, const GLvoid * pixels,
|
|
|
|
+ const struct gl_pixelstore_attrib *packing,
|
|
|
|
+ struct gl_texture_object *texObj,
|
|
|
|
+ struct gl_texture_image *texImage);
|
|
|
|
+void radeonTexSubImage1D(GLcontext * ctx, GLenum target, GLint level,
|
|
|
|
+ GLint xoffset,
|
|
|
|
+ GLsizei width,
|
|
|
|
+ GLenum format, GLenum type,
|
|
|
|
+ const GLvoid * pixels,
|
|
|
|
+ const struct gl_pixelstore_attrib *packing,
|
|
|
|
+ struct gl_texture_object *texObj,
|
|
|
|
+ struct gl_texture_image *texImage);
|
|
|
|
+void radeonTexSubImage2D(GLcontext * ctx, GLenum target, GLint level,
|
|
|
|
+ GLint xoffset, GLint yoffset,
|
|
|
|
+ GLsizei width, GLsizei height,
|
|
|
|
+ GLenum format, GLenum type,
|
|
|
|
+ const GLvoid * pixels,
|
|
|
|
+ const struct gl_pixelstore_attrib *packing,
|
|
|
|
+ struct gl_texture_object *texObj,
|
|
|
|
+ struct gl_texture_image *texImage);
|
|
|
|
+void radeonCompressedTexSubImage2D(GLcontext * ctx, GLenum target,
|
|
|
|
+ GLint level, GLint xoffset,
|
|
|
|
+ GLint yoffset, GLsizei width,
|
|
|
|
+ GLsizei height, GLenum format,
|
|
|
|
+ GLsizei imageSize, const GLvoid * data,
|
|
|
|
+ struct gl_texture_object *texObj,
|
|
|
|
+ struct gl_texture_image *texImage);
|
|
|
|
+
|
|
|
|
+void radeonTexSubImage3D(GLcontext * ctx, GLenum target, GLint level,
|
|
|
|
+ GLint xoffset, GLint yoffset, GLint zoffset,
|
|
|
|
+ GLsizei width, GLsizei height, GLsizei depth,
|
|
|
|
+ GLenum format, GLenum type,
|
|
|
|
+ const GLvoid * pixels,
|
|
|
|
+ const struct gl_pixelstore_attrib *packing,
|
|
|
|
+ struct gl_texture_object *texObj,
|
|
|
|
+ struct gl_texture_image *texImage);
|
|
|
|
+
|
|
|
|
+void radeonGetTexImage(GLcontext * ctx, GLenum target, GLint level,
|
|
|
|
+ GLenum format, GLenum type, GLvoid * pixels,
|
|
|
|
+ struct gl_texture_object *texObj,
|
|
|
|
+ struct gl_texture_image *texImage);
|
|
|
|
+void radeonGetCompressedTexImage(GLcontext *ctx, GLenum target, GLint level,
|
|
|
|
+ GLvoid *pixels,
|
|
|
|
+ struct gl_texture_object *texObj,
|
|
|
|
+ struct gl_texture_image *texImage);
|
|
|
|
+
|
|
|
|
+#endif
|
2009-03-04 07:00:36 +00:00
|
|
|
--
|
|
|
|
1.6.0.3
|
|
|
|
|